file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
resp.go | /*
Copyright 2019 yametech.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package canal
import (
"bytes"
"errors"
"fmt"
"io"
"strconv"
"strings"
)
//const bufsz = 4096
// Type represents a Value type
type Type byte
const (
SimpleString Type = '+'
Error Type = '-'
Integer Type = ':'
BulkString Type = '$'
Array Type = '*'
Rdb Type = 'R'
)
// TypeName returns name of the underlying RESP type.
func (t Type) String() string {
switch t {
default:
return "Unknown"
case '+':
return "SimpleString"
case '-':
return "Error"
case ':':
return "Integer"
case '$':
return "BulkString"
case '*':
return "Array"
case 'R':
return "RDB"
}
}
// Value represents the data of a valid RESP type.
type Value struct {
Typ Type
IntegerV int
Str []byte
ArrayV []Value
Null bool
RDB bool
Size int
}
func (v Value) ReplInfo() (runID string, offset int64) {
if v.Type() != Rdb {
return
}
buf := bytes.Split(v.Str, []byte(" "))
if len(buf) < 3 {
return
}
_offset, err := strconv.ParseInt(string(buf[2]), 10, 64)
if err != nil |
return string(buf[1]), _offset
}
// Integer converts Value to an int. If Value cannot be converted, Zero is returned.
func (v Value) Integer() int {
switch v.Typ {
default:
n, _ := strconv.ParseInt(v.String(), 10, 64)
return int(n)
case ':':
return v.IntegerV
}
}
// String converts Value to a string.
func (v Value) String() string {
if v.Typ == '$' {
return string(v.Str)
}
switch v.Typ {
case '+', '-':
return string(v.Str)
case ':':
return strconv.FormatInt(int64(v.IntegerV), 10)
case '*':
buf := bytes.NewBuffer(nil)
concatArray(buf, v.ArrayV...)
return strings.TrimSuffix(buf.String(), " ")
case '\r':
return "\r\n"
}
return ""
}
func concatArray(wr io.Writer, vs ...Value) {
for i := range vs {
_, err := wr.Write([]byte(vs[i].String()))
if err != nil {
panic(err)
}
_, err = wr.Write([]byte("\r\n"))
if err != nil {
panic(err)
}
concatArray(wr, vs[i].Array()...)
}
}
// Bytes converts the Value to a byte array. An empty string is converted to a non-nil empty byte array.
// If it's a RESP Null value, nil is returned.
func (v Value) Bytes() []byte {
switch v.Typ {
default:
return []byte(v.String())
case '$', '+', '-':
return v.Str
}
}
// Float converts Value to a float64. If Value cannot be converted
// Zero is returned.
func (v Value) Float() float64 {
switch v.Typ {
default:
f, _ := strconv.ParseFloat(v.String(), 64)
return f
case ':':
return float64(v.IntegerV)
}
}
// IsNull indicates whether or not the base value is null.
func (v Value) IsNull() bool {
return v.Null
}
// Bool converts Value to an bool. If Value cannot be converted, false is returned.
func (v Value) Bool() bool {
return v.Integer() != 0
}
// Error converts the Value to an error. If Value is not an error, nil is returned.
func (v Value) Error() error {
switch v.Typ {
case '-':
return errors.New(string(v.Str))
}
return nil
}
// Array converts the Value to a an array.
// If Value is not an array or when it's is a RESP Null value, nil is returned.
func (v Value) Array() []Value {
if v.Typ == '*' && !v.Null {
return v.ArrayV
}
return nil
}
// Type returns the underlying RESP type.
// The following types are represent valid RESP values.
func (v Value) Type() Type {
return v.Typ
}
func marshalSimpleRESP(typ Type, b []byte) ([]byte, error) {
bb := make([]byte, 3+len(b))
bb[0] = byte(typ)
copy(bb[1:], b)
bb[1+len(b)+0] = '\r'
bb[1+len(b)+1] = '\n'
return bb, nil
}
func marshalBulkRESP(v Value) ([]byte, error) {
if v.Null {
return []byte("$-1\r\n"), nil
}
szb := []byte(strconv.FormatInt(int64(len(v.Str)), 10))
bb := make([]byte, 5+len(szb)+len(v.Str))
bb[0] = '$'
copy(bb[1:], szb)
bb[1+len(szb)+0] = '\r'
bb[1+len(szb)+1] = '\n'
copy(bb[1+len(szb)+2:], v.Str)
bb[1+len(szb)+2+len(v.Str)+0] = '\r'
bb[1+len(szb)+2+len(v.Str)+1] = '\n'
return bb, nil
}
func marshalArrayRESP(v Value) ([]byte, error) {
if v.Null {
return []byte("*-1\r\n"), nil
}
szb := []byte(strconv.FormatInt(int64(len(v.ArrayV)), 10))
var buf bytes.Buffer
buf.Grow(3 + len(szb) + 16*len(v.ArrayV)) // prime the buffer
buf.WriteByte('*')
buf.Write(szb)
buf.WriteByte('\r')
buf.WriteByte('\n')
for i := 0; i < len(v.ArrayV); i++ {
data, err := v.ArrayV[i].MarshalRESP()
if err != nil {
return nil, err
}
buf.Write(data)
}
return buf.Bytes(), nil
}
func marshalAnyRESP(v Value) ([]byte, error) {
switch v.Typ {
default:
if v.Typ == 0 && v.Null {
return []byte("$-1\r\n"), nil
}
return nil, errors.New("unknown resp type encountered")
case '-', '+':
return marshalSimpleRESP(v.Typ, v.Str)
case ':':
return marshalSimpleRESP(v.Typ, []byte(strconv.FormatInt(int64(v.IntegerV), 10)))
case '$':
return marshalBulkRESP(v)
case '*':
return marshalArrayRESP(v)
}
}
// Equals compares one value to another value.
func (v Value) Equals(value Value) bool {
data1, err := v.MarshalRESP()
if err != nil {
return false
}
data2, err := value.MarshalRESP()
if err != nil {
return false
}
return string(data1) == string(data2)
}
// MarshalRESP returns the original serialized byte representation of Value.
// For more information on this format please see http://redis.io/topics/protocol.
func (v Value) MarshalRESP() ([]byte, error) {
return marshalAnyRESP(v)
}
var NilValue = Value{Null: true}
type ErrProtocol struct{ Msg string }
func (err ErrProtocol) Error() string {
return "Protocol error: " + err.Msg
}
// AnyValue returns a RESP value from an interface.
// This function infers the types. Arrays are not allowed.
func AnyValue(v interface{}) Value {
switch v := v.(type) {
default:
return StringValue(fmt.Sprintf("%v", v))
case nil:
return NullValue()
case int:
return IntegerValue(int(v))
case uint:
return IntegerValue(int(v))
case int8:
return IntegerValue(int(v))
case uint8:
return IntegerValue(int(v))
case int16:
return IntegerValue(int(v))
case uint16:
return IntegerValue(int(v))
case int32:
return IntegerValue(int(v))
case uint32:
return IntegerValue(int(v))
case int64:
return IntegerValue(int(v))
case uint64:
return IntegerValue(int(v))
case bool:
return BoolValue(v)
case float32:
return FloatValue(float64(v))
case float64:
return FloatValue(float64(v))
case []byte:
return BytesValue(v)
case string:
return StringValue(v)
}
}
// SimpleStringValue returns a RESP simple string. A simple string has no new lines. The carriage return and new line characters are replaced with spaces.
func SimpleStringValue(s string) Value { return Value{Typ: '+', Str: []byte(formSingleLine(s))} }
// BytesValue returns a RESP bulk string. A bulk string can represent any data.
func BytesValue(b []byte) Value { return Value{Typ: '$', Str: b} }
// StringValue returns a RESP bulk string. A bulk string can represent any data.
func StringValue(s string) Value { return Value{Typ: '$', Str: []byte(s)} }
// NullValue returns a RESP null bulk string.
func NullValue() Value { return Value{Typ: '$', Null: true} }
// ErrorValue returns a RESP error.
func ErrorValue(err error) Value {
if err == nil {
return Value{Typ: '-'}
}
return Value{Typ: '-', Str: []byte(err.Error())}
}
// IntegerValue returns a RESP integer.
func IntegerValue(i int) Value { return Value{Typ: ':', IntegerV: i} }
// BoolValue returns a RESP integer representation of a bool.
func BoolValue(t bool) Value {
if t {
return Value{Typ: ':', IntegerV: 1}
}
return Value{Typ: ':', IntegerV: 0}
}
// FloatValue returns a RESP bulk string representation of a float.
func FloatValue(f float64) Value { return StringValue(strconv.FormatFloat(f, 'f', -1, 64)) }
// ArrayValue returns a RESP array.
func ArrayValue(vals []Value) Value { return Value{Typ: '*', ArrayV: vals} }
func formSingleLine(s string) string {
bs1 := []byte(s)
for i := 0; i < len(bs1); i++ {
switch bs1[i] {
case '\r', '\n':
bs2 := make([]byte, len(bs1))
copy(bs2, bs1)
bs2[i] = ' '
i++
for ; i < len(bs2); i++ {
switch bs1[i] {
case '\r', '\n':
bs2[i] = ' '
}
}
return string(bs2)
}
}
return s
}
// MultiBulkValue returns a RESP array which contains one or more bulk strings.
// For more information on RESP arrays and strings please see http://redis.io/topics/protocol.
func MultiBulkValue(commandName string, args ...interface{}) Value {
vals := make([]Value, len(args)+1)
vals[0] = StringValue(commandName)
for i, arg := range args {
if rval, ok := arg.(Value); ok && rval.Type() == BulkString {
vals[i+1] = rval
continue
}
switch arg := arg.(type) {
default:
vals[i+1] = StringValue(fmt.Sprintf("%v", arg))
case []byte:
vals[i+1] = StringValue(string(arg))
case string:
vals[i+1] = StringValue(arg)
case nil:
vals[i+1] = NullValue()
}
}
return ArrayValue(vals)
}
func MultiBulkBytes(val Value) ([]byte, int) {
buf := bytes.NewBuffer(nil)
switch val.Typ {
case '+', '-':
buf.WriteByte(byte(val.Typ))
buf.WriteString(val.String())
buf.Write([]byte{'\r', '\n'})
return buf.Bytes(), len(buf.Bytes())
case '$', ':':
buf.WriteByte(byte(val.Typ))
buf.WriteString(fmt.Sprintf("%d", len(val.String())))
buf.Write([]byte{'\r', '\n'})
buf.WriteString(val.String())
buf.Write([]byte{'\r', '\n'})
return buf.Bytes(), len(buf.Bytes())
case '*':
buf.WriteByte(byte(val.Typ))
length := len(val.ArrayV)
buf.WriteString(fmt.Sprintf("%d", length))
buf.Write([]byte{'\r', '\n'})
for i := range val.ArrayV {
bs, _ := MultiBulkBytes(val.ArrayV[i])
buf.Write(bs)
}
return buf.Bytes(), buf.Len()
}
return []byte{}, 0
}
| {
return
} | conditional_block |
resp.go | /*
Copyright 2019 yametech.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package canal
import (
"bytes"
"errors"
"fmt"
"io"
"strconv"
"strings"
)
//const bufsz = 4096
// Type represents a Value type
type Type byte
const (
SimpleString Type = '+'
Error Type = '-'
Integer Type = ':'
BulkString Type = '$'
Array Type = '*'
Rdb Type = 'R'
)
// TypeName returns name of the underlying RESP type.
func (t Type) String() string {
switch t {
default:
return "Unknown"
case '+':
return "SimpleString"
case '-':
return "Error"
case ':':
return "Integer"
case '$':
return "BulkString"
case '*':
return "Array"
case 'R':
return "RDB"
}
}
// Value represents the data of a valid RESP type.
type Value struct {
Typ Type
IntegerV int
Str []byte
ArrayV []Value
Null bool
RDB bool
Size int
}
func (v Value) ReplInfo() (runID string, offset int64) {
if v.Type() != Rdb {
return
}
buf := bytes.Split(v.Str, []byte(" "))
if len(buf) < 3 {
return
}
_offset, err := strconv.ParseInt(string(buf[2]), 10, 64)
if err != nil {
return
}
return string(buf[1]), _offset
}
// Integer converts Value to an int. If Value cannot be converted, Zero is returned.
func (v Value) Integer() int {
switch v.Typ {
default:
n, _ := strconv.ParseInt(v.String(), 10, 64)
return int(n)
case ':':
return v.IntegerV
}
}
// String converts Value to a string.
func (v Value) String() string {
if v.Typ == '$' {
return string(v.Str)
}
switch v.Typ {
case '+', '-':
return string(v.Str)
case ':':
return strconv.FormatInt(int64(v.IntegerV), 10)
case '*':
buf := bytes.NewBuffer(nil)
concatArray(buf, v.ArrayV...)
return strings.TrimSuffix(buf.String(), " ")
case '\r':
return "\r\n"
}
return ""
}
func concatArray(wr io.Writer, vs ...Value) {
for i := range vs {
_, err := wr.Write([]byte(vs[i].String()))
if err != nil {
panic(err)
}
_, err = wr.Write([]byte("\r\n"))
if err != nil {
panic(err)
}
concatArray(wr, vs[i].Array()...)
}
}
// Bytes converts the Value to a byte array. An empty string is converted to a non-nil empty byte array.
// If it's a RESP Null value, nil is returned.
func (v Value) Bytes() []byte {
switch v.Typ {
default:
return []byte(v.String())
case '$', '+', '-':
return v.Str
}
}
// Float converts Value to a float64. If Value cannot be converted
// Zero is returned.
func (v Value) Float() float64 {
switch v.Typ {
default:
f, _ := strconv.ParseFloat(v.String(), 64)
return f
case ':':
return float64(v.IntegerV)
}
}
// IsNull indicates whether or not the base value is null.
func (v Value) IsNull() bool {
return v.Null
}
// Bool converts Value to an bool. If Value cannot be converted, false is returned.
func (v Value) Bool() bool {
return v.Integer() != 0
}
// Error converts the Value to an error. If Value is not an error, nil is returned.
func (v Value) Error() error {
switch v.Typ {
case '-':
return errors.New(string(v.Str))
}
return nil
}
// Array converts the Value to a an array.
// If Value is not an array or when it's is a RESP Null value, nil is returned.
func (v Value) Array() []Value {
if v.Typ == '*' && !v.Null {
return v.ArrayV
}
return nil
}
// Type returns the underlying RESP type.
// The following types are represent valid RESP values.
func (v Value) Type() Type {
return v.Typ
}
func marshalSimpleRESP(typ Type, b []byte) ([]byte, error) {
bb := make([]byte, 3+len(b))
bb[0] = byte(typ)
copy(bb[1:], b)
bb[1+len(b)+0] = '\r'
bb[1+len(b)+1] = '\n'
return bb, nil
}
func marshalBulkRESP(v Value) ([]byte, error) {
if v.Null {
return []byte("$-1\r\n"), nil
}
szb := []byte(strconv.FormatInt(int64(len(v.Str)), 10))
bb := make([]byte, 5+len(szb)+len(v.Str))
bb[0] = '$'
copy(bb[1:], szb)
bb[1+len(szb)+0] = '\r'
bb[1+len(szb)+1] = '\n'
copy(bb[1+len(szb)+2:], v.Str)
bb[1+len(szb)+2+len(v.Str)+0] = '\r'
bb[1+len(szb)+2+len(v.Str)+1] = '\n'
return bb, nil
}
func marshalArrayRESP(v Value) ([]byte, error) {
if v.Null {
return []byte("*-1\r\n"), nil
}
szb := []byte(strconv.FormatInt(int64(len(v.ArrayV)), 10))
var buf bytes.Buffer
buf.Grow(3 + len(szb) + 16*len(v.ArrayV)) // prime the buffer
buf.WriteByte('*')
buf.Write(szb)
buf.WriteByte('\r')
buf.WriteByte('\n')
for i := 0; i < len(v.ArrayV); i++ {
data, err := v.ArrayV[i].MarshalRESP()
if err != nil {
return nil, err
}
buf.Write(data)
}
return buf.Bytes(), nil
}
func marshalAnyRESP(v Value) ([]byte, error) {
switch v.Typ {
default:
if v.Typ == 0 && v.Null {
return []byte("$-1\r\n"), nil
}
return nil, errors.New("unknown resp type encountered")
case '-', '+':
return marshalSimpleRESP(v.Typ, v.Str)
case ':':
return marshalSimpleRESP(v.Typ, []byte(strconv.FormatInt(int64(v.IntegerV), 10)))
case '$':
return marshalBulkRESP(v)
case '*':
return marshalArrayRESP(v)
}
}
// Equals compares one value to another value.
func (v Value) Equals(value Value) bool {
data1, err := v.MarshalRESP()
if err != nil {
return false
}
data2, err := value.MarshalRESP()
if err != nil {
return false
}
return string(data1) == string(data2)
}
// MarshalRESP returns the original serialized byte representation of Value.
// For more information on this format please see http://redis.io/topics/protocol.
func (v Value) MarshalRESP() ([]byte, error) {
return marshalAnyRESP(v)
}
var NilValue = Value{Null: true}
type ErrProtocol struct{ Msg string }
func (err ErrProtocol) Error() string {
return "Protocol error: " + err.Msg
}
// AnyValue returns a RESP value from an interface.
// This function infers the types. Arrays are not allowed.
func AnyValue(v interface{}) Value {
switch v := v.(type) {
default:
return StringValue(fmt.Sprintf("%v", v))
case nil:
return NullValue()
case int:
return IntegerValue(int(v))
case uint:
return IntegerValue(int(v))
case int8:
return IntegerValue(int(v))
case uint8:
return IntegerValue(int(v))
case int16:
return IntegerValue(int(v))
case uint16:
return IntegerValue(int(v))
case int32:
return IntegerValue(int(v))
case uint32:
return IntegerValue(int(v))
case int64:
return IntegerValue(int(v))
case uint64:
return IntegerValue(int(v))
case bool:
return BoolValue(v)
case float32:
return FloatValue(float64(v))
case float64:
return FloatValue(float64(v))
case []byte:
return BytesValue(v)
case string:
return StringValue(v)
}
}
// SimpleStringValue returns a RESP simple string. A simple string has no new lines. The carriage return and new line characters are replaced with spaces.
func SimpleStringValue(s string) Value { return Value{Typ: '+', Str: []byte(formSingleLine(s))} }
// BytesValue returns a RESP bulk string. A bulk string can represent any data.
func BytesValue(b []byte) Value { return Value{Typ: '$', Str: b} }
// StringValue returns a RESP bulk string. A bulk string can represent any data.
func StringValue(s string) Value |
// NullValue returns a RESP null bulk string.
func NullValue() Value { return Value{Typ: '$', Null: true} }
// ErrorValue returns a RESP error.
func ErrorValue(err error) Value {
if err == nil {
return Value{Typ: '-'}
}
return Value{Typ: '-', Str: []byte(err.Error())}
}
// IntegerValue returns a RESP integer.
func IntegerValue(i int) Value { return Value{Typ: ':', IntegerV: i} }
// BoolValue returns a RESP integer representation of a bool.
func BoolValue(t bool) Value {
if t {
return Value{Typ: ':', IntegerV: 1}
}
return Value{Typ: ':', IntegerV: 0}
}
// FloatValue returns a RESP bulk string representation of a float.
func FloatValue(f float64) Value { return StringValue(strconv.FormatFloat(f, 'f', -1, 64)) }
// ArrayValue returns a RESP array.
func ArrayValue(vals []Value) Value { return Value{Typ: '*', ArrayV: vals} }
func formSingleLine(s string) string {
bs1 := []byte(s)
for i := 0; i < len(bs1); i++ {
switch bs1[i] {
case '\r', '\n':
bs2 := make([]byte, len(bs1))
copy(bs2, bs1)
bs2[i] = ' '
i++
for ; i < len(bs2); i++ {
switch bs1[i] {
case '\r', '\n':
bs2[i] = ' '
}
}
return string(bs2)
}
}
return s
}
// MultiBulkValue returns a RESP array which contains one or more bulk strings.
// For more information on RESP arrays and strings please see http://redis.io/topics/protocol.
func MultiBulkValue(commandName string, args ...interface{}) Value {
vals := make([]Value, len(args)+1)
vals[0] = StringValue(commandName)
for i, arg := range args {
if rval, ok := arg.(Value); ok && rval.Type() == BulkString {
vals[i+1] = rval
continue
}
switch arg := arg.(type) {
default:
vals[i+1] = StringValue(fmt.Sprintf("%v", arg))
case []byte:
vals[i+1] = StringValue(string(arg))
case string:
vals[i+1] = StringValue(arg)
case nil:
vals[i+1] = NullValue()
}
}
return ArrayValue(vals)
}
func MultiBulkBytes(val Value) ([]byte, int) {
buf := bytes.NewBuffer(nil)
switch val.Typ {
case '+', '-':
buf.WriteByte(byte(val.Typ))
buf.WriteString(val.String())
buf.Write([]byte{'\r', '\n'})
return buf.Bytes(), len(buf.Bytes())
case '$', ':':
buf.WriteByte(byte(val.Typ))
buf.WriteString(fmt.Sprintf("%d", len(val.String())))
buf.Write([]byte{'\r', '\n'})
buf.WriteString(val.String())
buf.Write([]byte{'\r', '\n'})
return buf.Bytes(), len(buf.Bytes())
case '*':
buf.WriteByte(byte(val.Typ))
length := len(val.ArrayV)
buf.WriteString(fmt.Sprintf("%d", length))
buf.Write([]byte{'\r', '\n'})
for i := range val.ArrayV {
bs, _ := MultiBulkBytes(val.ArrayV[i])
buf.Write(bs)
}
return buf.Bytes(), buf.Len()
}
return []byte{}, 0
}
| { return Value{Typ: '$', Str: []byte(s)} } | identifier_body |
agent.go | // Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
The agent handles local execution of actions triggered remotely.
It has two execution models:
- listening on an action path for ActionNode objects. When receiving
an action, it will forward it to vtaction to perform it (vtaction
uses the actor code). We usually use this model for long-running
queries where an RPC would time out.
All vtaction calls lock the actionMutex.
After executing vtaction, we always call agent.changeCallback.
Additionnally, for TABLET_ACTION_APPLY_SCHEMA, we will force a schema
reload.
- listening as an RPC server. The agent performs the action itself,
calling the actor code directly. We use this for short lived actions.
Most RPC calls lock the actionMutex, except the easy read-donly ones.
We will not call changeCallback for all actions, just for the ones
that are relevant. Same for schema reload.
See rpc_server.go for all cases, and which action takes the actionMutex,
runs changeCallback, and reloads the schema.
*/
package tabletmanager
import (
"encoding/json"
"flag"
"fmt"
"net"
"os"
"os/exec"
"path"
"sync"
"time"
log "github.com/golang/glog"
"github.com/youtube/vitess/go/history"
"github.com/youtube/vitess/go/jscfg"
"github.com/youtube/vitess/go/netutil"
"github.com/youtube/vitess/go/vt/dbconfigs"
"github.com/youtube/vitess/go/vt/env"
"github.com/youtube/vitess/go/vt/logutil"
"github.com/youtube/vitess/go/vt/mysqlctl"
"github.com/youtube/vitess/go/vt/tabletmanager/actionnode"
"github.com/youtube/vitess/go/vt/tabletmanager/actor"
"github.com/youtube/vitess/go/vt/tabletserver"
"github.com/youtube/vitess/go/vt/topo"
)
var (
vtactionBinaryPath = flag.String("vtaction_binary_path", "", "Full path (including filename) to vtaction binary. If not set, tries VTROOT/bin/vtaction.")
)
type tabletChangeItem struct {
oldTablet topo.Tablet
newTablet topo.Tablet
context string
queuedTime time.Time
}
// ActionAgent is the main class for the agent.
type ActionAgent struct {
// The following fields are set during creation
TopoServer topo.Server
TabletAlias topo.TabletAlias
Mysqld *mysqlctl.Mysqld
DBConfigs *dbconfigs.DBConfigs
SchemaOverrides []tabletserver.SchemaOverride
BinlogPlayerMap *BinlogPlayerMap
// Internal variables
vtActionBinFile string // path to vtaction binary
done chan struct{} // closed when we are done.
// This is the History of the health checks, public so status
// pages can display it
History *history.History
// actionMutex is there to run only one action at a time. If
// both agent.actionMutex and agent.mutex needs to be taken,
// take actionMutex first.
actionMutex sync.Mutex // to run only one action at a time
// mutex is protecting the rest of the members
mutex sync.Mutex
changeItems chan tabletChangeItem
_tablet *topo.TabletInfo
}
func loadSchemaOverrides(overridesFile string) []tabletserver.SchemaOverride {
var schemaOverrides []tabletserver.SchemaOverride
if overridesFile == "" {
return schemaOverrides
}
if err := jscfg.ReadJson(overridesFile, &schemaOverrides); err != nil {
log.Warningf("can't read overridesFile %v: %v", overridesFile, err)
} else {
data, _ := json.MarshalIndent(schemaOverrides, "", " ")
log.Infof("schemaOverrides: %s\n", data)
}
return schemaOverrides
}
// NewActionAgent creates a new ActionAgent and registers all the
// associated services
func NewActionAgent(
tabletAlias topo.TabletAlias,
dbcfgs *dbconfigs.DBConfigs,
mycnf *mysqlctl.Mycnf,
port, securePort int,
overridesFile string,
) (agent *ActionAgent, err error) {
schemaOverrides := loadSchemaOverrides(overridesFile)
topoServer := topo.GetServer()
mysqld := mysqlctl.NewMysqld("Dba", mycnf, &dbcfgs.Dba, &dbcfgs.Repl)
agent = &ActionAgent{
TopoServer: topoServer,
TabletAlias: tabletAlias,
Mysqld: mysqld,
DBConfigs: dbcfgs,
SchemaOverrides: schemaOverrides,
done: make(chan struct{}),
History: history.New(historyLength),
changeItems: make(chan tabletChangeItem, 100),
}
// Start the binlog player services, not playing at start.
agent.BinlogPlayerMap = NewBinlogPlayerMap(topoServer, &dbcfgs.App.ConnectionParams, mysqld)
RegisterBinlogPlayerMap(agent.BinlogPlayerMap)
// try to figure out the mysql port
mysqlPort := mycnf.MysqlPort
if mysqlPort == 0 {
// we don't know the port, try to get it from mysqld
var err error
mysqlPort, err = mysqld.GetMysqlPort()
if err != nil {
log.Warningf("Cannot get current mysql port, will use 0 for now: %v", err)
}
}
if err := agent.Start(mysqlPort, port, securePort); err != nil {
return nil, err
}
// register the RPC services from the agent
agent.registerQueryService()
// start health check if needed
agent.initHeathCheck()
return agent, nil
}
func (agent *ActionAgent) runChangeCallback(oldTablet *topo.Tablet, context string) {
agent.mutex.Lock()
// Access directly since we have the lock.
newTablet := agent._tablet.Tablet
agent.changeItems <- tabletChangeItem{oldTablet: *oldTablet, newTablet: *newTablet, context: context, queuedTime: time.Now()}
log.Infof("Queued tablet callback: %v", context)
agent.mutex.Unlock()
}
func (agent *ActionAgent) executeCallbacksLoop() {
for {
select {
case changeItem := <-agent.changeItems:
wg := sync.WaitGroup{}
agent.mutex.Lock()
log.Infof("Running tablet callback after %v: %v", time.Now().Sub(changeItem.queuedTime), changeItem.context)
wg.Add(1)
go func() {
defer wg.Done()
agent.changeCallback(changeItem.oldTablet, changeItem.newTablet)
}()
agent.mutex.Unlock()
wg.Wait()
case <-agent.done:
return
}
}
}
func (agent *ActionAgent) readTablet() error {
tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias)
if err != nil {
return err
}
agent.mutex.Lock()
agent._tablet = tablet
agent.mutex.Unlock()
return nil
}
func (agent *ActionAgent) Tablet() *topo.TabletInfo |
func (agent *ActionAgent) resolvePaths() error {
var p string
if *vtactionBinaryPath != "" {
p = *vtactionBinaryPath
} else {
vtroot, err := env.VtRoot()
if err != nil {
return err
}
p = path.Join(vtroot, "bin/vtaction")
}
if _, err := os.Stat(p); err != nil {
return fmt.Errorf("vtaction binary %s not found: %v", p, err)
}
agent.vtActionBinFile = p
return nil
}
// A non-nil return signals that event processing should stop.
func (agent *ActionAgent) dispatchAction(actionPath, data string) error {
agent.actionMutex.Lock()
defer agent.actionMutex.Unlock()
log.Infof("action dispatch %v", actionPath)
actionNode, err := actionnode.ActionNodeFromJson(data, actionPath)
if err != nil {
log.Errorf("action decode failed: %v %v", actionPath, err)
return nil
}
cmd := []string{
agent.vtActionBinFile,
"-action", actionNode.Action,
"-action-node", actionPath,
"-action-guid", actionNode.ActionGuid,
}
cmd = append(cmd, logutil.GetSubprocessFlags()...)
cmd = append(cmd, topo.GetSubprocessFlags()...)
cmd = append(cmd, dbconfigs.GetSubprocessFlags()...)
cmd = append(cmd, mysqlctl.GetSubprocessFlags()...)
log.Infof("action launch %v", cmd)
vtActionCmd := exec.Command(cmd[0], cmd[1:]...)
stdOut, vtActionErr := vtActionCmd.CombinedOutput()
if vtActionErr != nil {
log.Errorf("agent action failed: %v %v\n%s", actionPath, vtActionErr, stdOut)
// If the action failed, preserve single execution path semantics.
return vtActionErr
}
log.Infof("Agent action completed %v %s", actionPath, stdOut)
agent.afterAction(actionPath, actionNode.Action == actionnode.TABLET_ACTION_APPLY_SCHEMA)
return nil
}
// afterAction needs to be run after an action may have changed the current
// state of the tablet.
func (agent *ActionAgent) afterAction(context string, reloadSchema bool) {
log.Infof("Executing post-action change callbacks")
// Save the old tablet so callbacks can have a better idea of
// the precise nature of the transition.
oldTablet := agent.Tablet().Tablet
// Actions should have side effects on the tablet, so reload the data.
if err := agent.readTablet(); err != nil {
log.Warningf("Failed rereading tablet after %v - services may be inconsistent: %v", context, err)
} else {
if updatedTablet := actor.CheckTabletMysqlPort(agent.TopoServer, agent.Mysqld, agent.Tablet()); updatedTablet != nil {
agent.mutex.Lock()
agent._tablet = updatedTablet
agent.mutex.Unlock()
}
agent.runChangeCallback(oldTablet, context)
}
// Maybe invalidate the schema.
// This adds a dependency between tabletmanager and tabletserver,
// so it's not ideal. But I (alainjobart) think it's better
// to have up to date schema in vtocc.
if reloadSchema {
tabletserver.ReloadSchema()
}
log.Infof("Done with post-action change callbacks")
}
func (agent *ActionAgent) verifyTopology() error {
tablet := agent.Tablet()
if tablet == nil {
return fmt.Errorf("agent._tablet is nil")
}
if err := topo.Validate(agent.TopoServer, agent.TabletAlias); err != nil {
// Don't stop, it's not serious enough, this is likely transient.
log.Warningf("tablet validate failed: %v %v", agent.TabletAlias, err)
}
return agent.TopoServer.ValidateTabletActions(agent.TabletAlias)
}
func (agent *ActionAgent) verifyServingAddrs() error {
if !agent.Tablet().IsRunningQueryService() {
return nil
}
// Check to see our address is registered in the right place.
addr, err := agent.Tablet().Tablet.EndPoint()
if err != nil {
return err
}
return agent.TopoServer.UpdateTabletEndpoint(agent.Tablet().Tablet.Alias.Cell, agent.Tablet().Keyspace, agent.Tablet().Shard, agent.Tablet().Type, addr)
}
// bindAddr: the address for the query service advertised by this agent
func (agent *ActionAgent) Start(mysqlPort, vtPort, vtsPort int) error {
var err error
if err = agent.readTablet(); err != nil {
return err
}
if err = agent.resolvePaths(); err != nil {
return err
}
// find our hostname as fully qualified, and IP
hostname, err := netutil.FullyQualifiedHostname()
if err != nil {
return err
}
ipAddrs, err := net.LookupHost(hostname)
if err != nil {
return err
}
ipAddr := ipAddrs[0]
// Update bind addr for mysql and query service in the tablet node.
f := func(tablet *topo.Tablet) error {
tablet.Hostname = hostname
tablet.IPAddr = ipAddr
if tablet.Portmap == nil {
tablet.Portmap = make(map[string]int)
}
if mysqlPort != 0 {
// only overwrite mysql port if we know it, otherwise
// leave it as is.
tablet.Portmap["mysql"] = mysqlPort
}
tablet.Portmap["vt"] = vtPort
if vtsPort != 0 {
tablet.Portmap["vts"] = vtsPort
} else {
delete(tablet.Portmap, "vts")
}
return nil
}
if err := agent.TopoServer.UpdateTabletFields(agent.Tablet().Alias, f); err != nil {
return err
}
// Reread to get the changes we just made
if err := agent.readTablet(); err != nil {
return err
}
data := fmt.Sprintf("host:%v\npid:%v\n", hostname, os.Getpid())
if err := agent.TopoServer.CreateTabletPidNode(agent.TabletAlias, data, agent.done); err != nil {
return err
}
if err = agent.verifyTopology(); err != nil {
return err
}
if err = agent.verifyServingAddrs(); err != nil {
return err
}
oldTablet := &topo.Tablet{}
agent.runChangeCallback(oldTablet, "Start")
go agent.actionEventLoop()
go agent.executeCallbacksLoop()
return nil
}
func (agent *ActionAgent) Stop() {
close(agent.done)
agent.BinlogPlayerMap.StopAllPlayersAndReset()
agent.Mysqld.Close()
}
func (agent *ActionAgent) actionEventLoop() {
f := func(actionPath, data string) error {
return agent.dispatchAction(actionPath, data)
}
agent.TopoServer.ActionEventLoop(agent.TabletAlias, f, agent.done)
}
| {
agent.mutex.Lock()
tablet := agent._tablet
agent.mutex.Unlock()
return tablet
} | identifier_body |
agent.go | // Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
The agent handles local execution of actions triggered remotely.
It has two execution models:
- listening on an action path for ActionNode objects. When receiving
an action, it will forward it to vtaction to perform it (vtaction
uses the actor code). We usually use this model for long-running
queries where an RPC would time out.
All vtaction calls lock the actionMutex.
After executing vtaction, we always call agent.changeCallback.
Additionnally, for TABLET_ACTION_APPLY_SCHEMA, we will force a schema
reload.
- listening as an RPC server. The agent performs the action itself,
calling the actor code directly. We use this for short lived actions.
Most RPC calls lock the actionMutex, except the easy read-donly ones.
We will not call changeCallback for all actions, just for the ones
that are relevant. Same for schema reload.
See rpc_server.go for all cases, and which action takes the actionMutex,
runs changeCallback, and reloads the schema.
*/
package tabletmanager
import (
"encoding/json"
"flag"
"fmt"
"net"
"os"
"os/exec"
"path"
"sync"
"time"
log "github.com/golang/glog"
"github.com/youtube/vitess/go/history"
"github.com/youtube/vitess/go/jscfg"
"github.com/youtube/vitess/go/netutil"
"github.com/youtube/vitess/go/vt/dbconfigs"
"github.com/youtube/vitess/go/vt/env"
"github.com/youtube/vitess/go/vt/logutil"
"github.com/youtube/vitess/go/vt/mysqlctl"
"github.com/youtube/vitess/go/vt/tabletmanager/actionnode"
"github.com/youtube/vitess/go/vt/tabletmanager/actor"
"github.com/youtube/vitess/go/vt/tabletserver"
"github.com/youtube/vitess/go/vt/topo"
)
var (
vtactionBinaryPath = flag.String("vtaction_binary_path", "", "Full path (including filename) to vtaction binary. If not set, tries VTROOT/bin/vtaction.")
)
type tabletChangeItem struct {
oldTablet topo.Tablet
newTablet topo.Tablet
context string
queuedTime time.Time
}
// ActionAgent is the main class for the agent.
type ActionAgent struct {
// The following fields are set during creation
TopoServer topo.Server
TabletAlias topo.TabletAlias
Mysqld *mysqlctl.Mysqld
DBConfigs *dbconfigs.DBConfigs
SchemaOverrides []tabletserver.SchemaOverride
BinlogPlayerMap *BinlogPlayerMap
// Internal variables
vtActionBinFile string // path to vtaction binary
done chan struct{} // closed when we are done.
// This is the History of the health checks, public so status
// pages can display it
History *history.History
// actionMutex is there to run only one action at a time. If
// both agent.actionMutex and agent.mutex needs to be taken,
// take actionMutex first.
actionMutex sync.Mutex // to run only one action at a time
// mutex is protecting the rest of the members
mutex sync.Mutex
changeItems chan tabletChangeItem
_tablet *topo.TabletInfo
}
func loadSchemaOverrides(overridesFile string) []tabletserver.SchemaOverride {
var schemaOverrides []tabletserver.SchemaOverride
if overridesFile == "" {
return schemaOverrides
}
if err := jscfg.ReadJson(overridesFile, &schemaOverrides); err != nil {
log.Warningf("can't read overridesFile %v: %v", overridesFile, err)
} else {
data, _ := json.MarshalIndent(schemaOverrides, "", " ")
log.Infof("schemaOverrides: %s\n", data)
}
return schemaOverrides
}
// NewActionAgent creates a new ActionAgent and registers all the
// associated services
func NewActionAgent(
tabletAlias topo.TabletAlias,
dbcfgs *dbconfigs.DBConfigs,
mycnf *mysqlctl.Mycnf,
port, securePort int,
overridesFile string,
) (agent *ActionAgent, err error) {
schemaOverrides := loadSchemaOverrides(overridesFile)
topoServer := topo.GetServer()
mysqld := mysqlctl.NewMysqld("Dba", mycnf, &dbcfgs.Dba, &dbcfgs.Repl)
agent = &ActionAgent{
TopoServer: topoServer,
TabletAlias: tabletAlias,
Mysqld: mysqld,
DBConfigs: dbcfgs,
SchemaOverrides: schemaOverrides,
done: make(chan struct{}),
History: history.New(historyLength),
changeItems: make(chan tabletChangeItem, 100),
}
// Start the binlog player services, not playing at start.
agent.BinlogPlayerMap = NewBinlogPlayerMap(topoServer, &dbcfgs.App.ConnectionParams, mysqld)
RegisterBinlogPlayerMap(agent.BinlogPlayerMap)
// try to figure out the mysql port
mysqlPort := mycnf.MysqlPort
if mysqlPort == 0 {
// we don't know the port, try to get it from mysqld
var err error
mysqlPort, err = mysqld.GetMysqlPort()
if err != nil {
log.Warningf("Cannot get current mysql port, will use 0 for now: %v", err)
}
}
if err := agent.Start(mysqlPort, port, securePort); err != nil {
return nil, err
}
// register the RPC services from the agent
agent.registerQueryService()
// start health check if needed
agent.initHeathCheck()
return agent, nil
}
func (agent *ActionAgent) runChangeCallback(oldTablet *topo.Tablet, context string) {
agent.mutex.Lock()
// Access directly since we have the lock.
newTablet := agent._tablet.Tablet
agent.changeItems <- tabletChangeItem{oldTablet: *oldTablet, newTablet: *newTablet, context: context, queuedTime: time.Now()}
log.Infof("Queued tablet callback: %v", context)
agent.mutex.Unlock()
}
func (agent *ActionAgent) executeCallbacksLoop() {
for {
select {
case changeItem := <-agent.changeItems:
wg := sync.WaitGroup{}
agent.mutex.Lock()
log.Infof("Running tablet callback after %v: %v", time.Now().Sub(changeItem.queuedTime), changeItem.context)
wg.Add(1)
go func() {
defer wg.Done()
agent.changeCallback(changeItem.oldTablet, changeItem.newTablet)
}()
agent.mutex.Unlock()
wg.Wait()
case <-agent.done:
return
}
}
}
func (agent *ActionAgent) | () error {
tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias)
if err != nil {
return err
}
agent.mutex.Lock()
agent._tablet = tablet
agent.mutex.Unlock()
return nil
}
func (agent *ActionAgent) Tablet() *topo.TabletInfo {
agent.mutex.Lock()
tablet := agent._tablet
agent.mutex.Unlock()
return tablet
}
func (agent *ActionAgent) resolvePaths() error {
var p string
if *vtactionBinaryPath != "" {
p = *vtactionBinaryPath
} else {
vtroot, err := env.VtRoot()
if err != nil {
return err
}
p = path.Join(vtroot, "bin/vtaction")
}
if _, err := os.Stat(p); err != nil {
return fmt.Errorf("vtaction binary %s not found: %v", p, err)
}
agent.vtActionBinFile = p
return nil
}
// A non-nil return signals that event processing should stop.
func (agent *ActionAgent) dispatchAction(actionPath, data string) error {
agent.actionMutex.Lock()
defer agent.actionMutex.Unlock()
log.Infof("action dispatch %v", actionPath)
actionNode, err := actionnode.ActionNodeFromJson(data, actionPath)
if err != nil {
log.Errorf("action decode failed: %v %v", actionPath, err)
return nil
}
cmd := []string{
agent.vtActionBinFile,
"-action", actionNode.Action,
"-action-node", actionPath,
"-action-guid", actionNode.ActionGuid,
}
cmd = append(cmd, logutil.GetSubprocessFlags()...)
cmd = append(cmd, topo.GetSubprocessFlags()...)
cmd = append(cmd, dbconfigs.GetSubprocessFlags()...)
cmd = append(cmd, mysqlctl.GetSubprocessFlags()...)
log.Infof("action launch %v", cmd)
vtActionCmd := exec.Command(cmd[0], cmd[1:]...)
stdOut, vtActionErr := vtActionCmd.CombinedOutput()
if vtActionErr != nil {
log.Errorf("agent action failed: %v %v\n%s", actionPath, vtActionErr, stdOut)
// If the action failed, preserve single execution path semantics.
return vtActionErr
}
log.Infof("Agent action completed %v %s", actionPath, stdOut)
agent.afterAction(actionPath, actionNode.Action == actionnode.TABLET_ACTION_APPLY_SCHEMA)
return nil
}
// afterAction needs to be run after an action may have changed the current
// state of the tablet.
func (agent *ActionAgent) afterAction(context string, reloadSchema bool) {
log.Infof("Executing post-action change callbacks")
// Save the old tablet so callbacks can have a better idea of
// the precise nature of the transition.
oldTablet := agent.Tablet().Tablet
// Actions should have side effects on the tablet, so reload the data.
if err := agent.readTablet(); err != nil {
log.Warningf("Failed rereading tablet after %v - services may be inconsistent: %v", context, err)
} else {
if updatedTablet := actor.CheckTabletMysqlPort(agent.TopoServer, agent.Mysqld, agent.Tablet()); updatedTablet != nil {
agent.mutex.Lock()
agent._tablet = updatedTablet
agent.mutex.Unlock()
}
agent.runChangeCallback(oldTablet, context)
}
// Maybe invalidate the schema.
// This adds a dependency between tabletmanager and tabletserver,
// so it's not ideal. But I (alainjobart) think it's better
// to have up to date schema in vtocc.
if reloadSchema {
tabletserver.ReloadSchema()
}
log.Infof("Done with post-action change callbacks")
}
func (agent *ActionAgent) verifyTopology() error {
tablet := agent.Tablet()
if tablet == nil {
return fmt.Errorf("agent._tablet is nil")
}
if err := topo.Validate(agent.TopoServer, agent.TabletAlias); err != nil {
// Don't stop, it's not serious enough, this is likely transient.
log.Warningf("tablet validate failed: %v %v", agent.TabletAlias, err)
}
return agent.TopoServer.ValidateTabletActions(agent.TabletAlias)
}
func (agent *ActionAgent) verifyServingAddrs() error {
if !agent.Tablet().IsRunningQueryService() {
return nil
}
// Check to see our address is registered in the right place.
addr, err := agent.Tablet().Tablet.EndPoint()
if err != nil {
return err
}
return agent.TopoServer.UpdateTabletEndpoint(agent.Tablet().Tablet.Alias.Cell, agent.Tablet().Keyspace, agent.Tablet().Shard, agent.Tablet().Type, addr)
}
// bindAddr: the address for the query service advertised by this agent
func (agent *ActionAgent) Start(mysqlPort, vtPort, vtsPort int) error {
var err error
if err = agent.readTablet(); err != nil {
return err
}
if err = agent.resolvePaths(); err != nil {
return err
}
// find our hostname as fully qualified, and IP
hostname, err := netutil.FullyQualifiedHostname()
if err != nil {
return err
}
ipAddrs, err := net.LookupHost(hostname)
if err != nil {
return err
}
ipAddr := ipAddrs[0]
// Update bind addr for mysql and query service in the tablet node.
f := func(tablet *topo.Tablet) error {
tablet.Hostname = hostname
tablet.IPAddr = ipAddr
if tablet.Portmap == nil {
tablet.Portmap = make(map[string]int)
}
if mysqlPort != 0 {
// only overwrite mysql port if we know it, otherwise
// leave it as is.
tablet.Portmap["mysql"] = mysqlPort
}
tablet.Portmap["vt"] = vtPort
if vtsPort != 0 {
tablet.Portmap["vts"] = vtsPort
} else {
delete(tablet.Portmap, "vts")
}
return nil
}
if err := agent.TopoServer.UpdateTabletFields(agent.Tablet().Alias, f); err != nil {
return err
}
// Reread to get the changes we just made
if err := agent.readTablet(); err != nil {
return err
}
data := fmt.Sprintf("host:%v\npid:%v\n", hostname, os.Getpid())
if err := agent.TopoServer.CreateTabletPidNode(agent.TabletAlias, data, agent.done); err != nil {
return err
}
if err = agent.verifyTopology(); err != nil {
return err
}
if err = agent.verifyServingAddrs(); err != nil {
return err
}
oldTablet := &topo.Tablet{}
agent.runChangeCallback(oldTablet, "Start")
go agent.actionEventLoop()
go agent.executeCallbacksLoop()
return nil
}
func (agent *ActionAgent) Stop() {
close(agent.done)
agent.BinlogPlayerMap.StopAllPlayersAndReset()
agent.Mysqld.Close()
}
func (agent *ActionAgent) actionEventLoop() {
f := func(actionPath, data string) error {
return agent.dispatchAction(actionPath, data)
}
agent.TopoServer.ActionEventLoop(agent.TabletAlias, f, agent.done)
}
| readTablet | identifier_name |
agent.go | // Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
The agent handles local execution of actions triggered remotely.
It has two execution models:
- listening on an action path for ActionNode objects. When receiving
an action, it will forward it to vtaction to perform it (vtaction
uses the actor code). We usually use this model for long-running
queries where an RPC would time out.
All vtaction calls lock the actionMutex.
After executing vtaction, we always call agent.changeCallback.
Additionnally, for TABLET_ACTION_APPLY_SCHEMA, we will force a schema
reload.
- listening as an RPC server. The agent performs the action itself,
calling the actor code directly. We use this for short lived actions.
Most RPC calls lock the actionMutex, except the easy read-donly ones.
We will not call changeCallback for all actions, just for the ones
that are relevant. Same for schema reload.
See rpc_server.go for all cases, and which action takes the actionMutex,
runs changeCallback, and reloads the schema.
*/
package tabletmanager
import (
"encoding/json"
"flag"
"fmt"
"net"
"os"
"os/exec"
"path"
"sync"
"time"
log "github.com/golang/glog"
"github.com/youtube/vitess/go/history"
"github.com/youtube/vitess/go/jscfg"
"github.com/youtube/vitess/go/netutil"
"github.com/youtube/vitess/go/vt/dbconfigs"
"github.com/youtube/vitess/go/vt/env"
"github.com/youtube/vitess/go/vt/logutil"
"github.com/youtube/vitess/go/vt/mysqlctl"
"github.com/youtube/vitess/go/vt/tabletmanager/actionnode"
"github.com/youtube/vitess/go/vt/tabletmanager/actor"
"github.com/youtube/vitess/go/vt/tabletserver"
"github.com/youtube/vitess/go/vt/topo"
)
var (
vtactionBinaryPath = flag.String("vtaction_binary_path", "", "Full path (including filename) to vtaction binary. If not set, tries VTROOT/bin/vtaction.")
)
type tabletChangeItem struct {
oldTablet topo.Tablet
newTablet topo.Tablet
context string
queuedTime time.Time
}
// ActionAgent is the main class for the agent.
type ActionAgent struct {
// The following fields are set during creation
TopoServer topo.Server
TabletAlias topo.TabletAlias
Mysqld *mysqlctl.Mysqld
DBConfigs *dbconfigs.DBConfigs
SchemaOverrides []tabletserver.SchemaOverride
BinlogPlayerMap *BinlogPlayerMap
// Internal variables
vtActionBinFile string // path to vtaction binary
done chan struct{} // closed when we are done.
// This is the History of the health checks, public so status
// pages can display it
History *history.History
// actionMutex is there to run only one action at a time. If
// both agent.actionMutex and agent.mutex needs to be taken,
// take actionMutex first.
actionMutex sync.Mutex // to run only one action at a time
// mutex is protecting the rest of the members
mutex sync.Mutex
changeItems chan tabletChangeItem
_tablet *topo.TabletInfo
}
func loadSchemaOverrides(overridesFile string) []tabletserver.SchemaOverride {
var schemaOverrides []tabletserver.SchemaOverride
if overridesFile == "" {
return schemaOverrides
}
if err := jscfg.ReadJson(overridesFile, &schemaOverrides); err != nil {
log.Warningf("can't read overridesFile %v: %v", overridesFile, err)
} else {
data, _ := json.MarshalIndent(schemaOverrides, "", " ")
log.Infof("schemaOverrides: %s\n", data)
}
return schemaOverrides
}
// NewActionAgent creates a new ActionAgent and registers all the
// associated services
func NewActionAgent(
tabletAlias topo.TabletAlias,
dbcfgs *dbconfigs.DBConfigs,
mycnf *mysqlctl.Mycnf,
port, securePort int,
overridesFile string,
) (agent *ActionAgent, err error) {
schemaOverrides := loadSchemaOverrides(overridesFile)
topoServer := topo.GetServer()
mysqld := mysqlctl.NewMysqld("Dba", mycnf, &dbcfgs.Dba, &dbcfgs.Repl)
agent = &ActionAgent{
TopoServer: topoServer,
TabletAlias: tabletAlias,
Mysqld: mysqld,
DBConfigs: dbcfgs,
SchemaOverrides: schemaOverrides,
done: make(chan struct{}),
History: history.New(historyLength),
changeItems: make(chan tabletChangeItem, 100),
}
// Start the binlog player services, not playing at start.
agent.BinlogPlayerMap = NewBinlogPlayerMap(topoServer, &dbcfgs.App.ConnectionParams, mysqld)
RegisterBinlogPlayerMap(agent.BinlogPlayerMap)
// try to figure out the mysql port
mysqlPort := mycnf.MysqlPort
if mysqlPort == 0 {
// we don't know the port, try to get it from mysqld
var err error
mysqlPort, err = mysqld.GetMysqlPort()
if err != nil {
log.Warningf("Cannot get current mysql port, will use 0 for now: %v", err)
}
}
if err := agent.Start(mysqlPort, port, securePort); err != nil {
return nil, err
}
// register the RPC services from the agent
agent.registerQueryService()
// start health check if needed
agent.initHeathCheck()
return agent, nil
}
func (agent *ActionAgent) runChangeCallback(oldTablet *topo.Tablet, context string) {
agent.mutex.Lock()
// Access directly since we have the lock.
newTablet := agent._tablet.Tablet
agent.changeItems <- tabletChangeItem{oldTablet: *oldTablet, newTablet: *newTablet, context: context, queuedTime: time.Now()}
log.Infof("Queued tablet callback: %v", context)
agent.mutex.Unlock()
}
func (agent *ActionAgent) executeCallbacksLoop() {
for {
select {
case changeItem := <-agent.changeItems:
wg := sync.WaitGroup{}
agent.mutex.Lock()
log.Infof("Running tablet callback after %v: %v", time.Now().Sub(changeItem.queuedTime), changeItem.context)
wg.Add(1)
go func() {
defer wg.Done()
agent.changeCallback(changeItem.oldTablet, changeItem.newTablet)
}()
agent.mutex.Unlock()
wg.Wait()
case <-agent.done:
return
}
}
}
func (agent *ActionAgent) readTablet() error {
tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias)
if err != nil {
return err
}
agent.mutex.Lock()
agent._tablet = tablet
agent.mutex.Unlock()
return nil
}
func (agent *ActionAgent) Tablet() *topo.TabletInfo {
agent.mutex.Lock()
tablet := agent._tablet
agent.mutex.Unlock()
return tablet
}
func (agent *ActionAgent) resolvePaths() error {
var p string
if *vtactionBinaryPath != "" {
p = *vtactionBinaryPath
} else {
vtroot, err := env.VtRoot()
if err != nil {
return err
}
p = path.Join(vtroot, "bin/vtaction")
}
if _, err := os.Stat(p); err != nil {
return fmt.Errorf("vtaction binary %s not found: %v", p, err)
}
agent.vtActionBinFile = p
return nil
}
// A non-nil return signals that event processing should stop.
func (agent *ActionAgent) dispatchAction(actionPath, data string) error {
agent.actionMutex.Lock()
defer agent.actionMutex.Unlock()
log.Infof("action dispatch %v", actionPath)
actionNode, err := actionnode.ActionNodeFromJson(data, actionPath)
if err != nil {
log.Errorf("action decode failed: %v %v", actionPath, err)
return nil
}
cmd := []string{
agent.vtActionBinFile,
"-action", actionNode.Action,
"-action-node", actionPath,
"-action-guid", actionNode.ActionGuid,
}
cmd = append(cmd, logutil.GetSubprocessFlags()...)
cmd = append(cmd, topo.GetSubprocessFlags()...)
cmd = append(cmd, dbconfigs.GetSubprocessFlags()...)
cmd = append(cmd, mysqlctl.GetSubprocessFlags()...)
log.Infof("action launch %v", cmd)
vtActionCmd := exec.Command(cmd[0], cmd[1:]...)
stdOut, vtActionErr := vtActionCmd.CombinedOutput()
if vtActionErr != nil {
log.Errorf("agent action failed: %v %v\n%s", actionPath, vtActionErr, stdOut)
// If the action failed, preserve single execution path semantics.
return vtActionErr
}
log.Infof("Agent action completed %v %s", actionPath, stdOut)
agent.afterAction(actionPath, actionNode.Action == actionnode.TABLET_ACTION_APPLY_SCHEMA)
return nil
}
// afterAction needs to be run after an action may have changed the current
// state of the tablet.
func (agent *ActionAgent) afterAction(context string, reloadSchema bool) {
log.Infof("Executing post-action change callbacks")
// Save the old tablet so callbacks can have a better idea of
// the precise nature of the transition.
oldTablet := agent.Tablet().Tablet
// Actions should have side effects on the tablet, so reload the data.
if err := agent.readTablet(); err != nil {
log.Warningf("Failed rereading tablet after %v - services may be inconsistent: %v", context, err) | } else {
if updatedTablet := actor.CheckTabletMysqlPort(agent.TopoServer, agent.Mysqld, agent.Tablet()); updatedTablet != nil {
agent.mutex.Lock()
agent._tablet = updatedTablet
agent.mutex.Unlock()
}
agent.runChangeCallback(oldTablet, context)
}
// Maybe invalidate the schema.
// This adds a dependency between tabletmanager and tabletserver,
// so it's not ideal. But I (alainjobart) think it's better
// to have up to date schema in vtocc.
if reloadSchema {
tabletserver.ReloadSchema()
}
log.Infof("Done with post-action change callbacks")
}
func (agent *ActionAgent) verifyTopology() error {
tablet := agent.Tablet()
if tablet == nil {
return fmt.Errorf("agent._tablet is nil")
}
if err := topo.Validate(agent.TopoServer, agent.TabletAlias); err != nil {
// Don't stop, it's not serious enough, this is likely transient.
log.Warningf("tablet validate failed: %v %v", agent.TabletAlias, err)
}
return agent.TopoServer.ValidateTabletActions(agent.TabletAlias)
}
func (agent *ActionAgent) verifyServingAddrs() error {
if !agent.Tablet().IsRunningQueryService() {
return nil
}
// Check to see our address is registered in the right place.
addr, err := agent.Tablet().Tablet.EndPoint()
if err != nil {
return err
}
return agent.TopoServer.UpdateTabletEndpoint(agent.Tablet().Tablet.Alias.Cell, agent.Tablet().Keyspace, agent.Tablet().Shard, agent.Tablet().Type, addr)
}
// bindAddr: the address for the query service advertised by this agent
func (agent *ActionAgent) Start(mysqlPort, vtPort, vtsPort int) error {
var err error
if err = agent.readTablet(); err != nil {
return err
}
if err = agent.resolvePaths(); err != nil {
return err
}
// find our hostname as fully qualified, and IP
hostname, err := netutil.FullyQualifiedHostname()
if err != nil {
return err
}
ipAddrs, err := net.LookupHost(hostname)
if err != nil {
return err
}
ipAddr := ipAddrs[0]
// Update bind addr for mysql and query service in the tablet node.
f := func(tablet *topo.Tablet) error {
tablet.Hostname = hostname
tablet.IPAddr = ipAddr
if tablet.Portmap == nil {
tablet.Portmap = make(map[string]int)
}
if mysqlPort != 0 {
// only overwrite mysql port if we know it, otherwise
// leave it as is.
tablet.Portmap["mysql"] = mysqlPort
}
tablet.Portmap["vt"] = vtPort
if vtsPort != 0 {
tablet.Portmap["vts"] = vtsPort
} else {
delete(tablet.Portmap, "vts")
}
return nil
}
if err := agent.TopoServer.UpdateTabletFields(agent.Tablet().Alias, f); err != nil {
return err
}
// Reread to get the changes we just made
if err := agent.readTablet(); err != nil {
return err
}
data := fmt.Sprintf("host:%v\npid:%v\n", hostname, os.Getpid())
if err := agent.TopoServer.CreateTabletPidNode(agent.TabletAlias, data, agent.done); err != nil {
return err
}
if err = agent.verifyTopology(); err != nil {
return err
}
if err = agent.verifyServingAddrs(); err != nil {
return err
}
oldTablet := &topo.Tablet{}
agent.runChangeCallback(oldTablet, "Start")
go agent.actionEventLoop()
go agent.executeCallbacksLoop()
return nil
}
func (agent *ActionAgent) Stop() {
close(agent.done)
agent.BinlogPlayerMap.StopAllPlayersAndReset()
agent.Mysqld.Close()
}
func (agent *ActionAgent) actionEventLoop() {
f := func(actionPath, data string) error {
return agent.dispatchAction(actionPath, data)
}
agent.TopoServer.ActionEventLoop(agent.TabletAlias, f, agent.done)
} | random_line_split |
|
agent.go | // Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
The agent handles local execution of actions triggered remotely.
It has two execution models:
- listening on an action path for ActionNode objects. When receiving
an action, it will forward it to vtaction to perform it (vtaction
uses the actor code). We usually use this model for long-running
queries where an RPC would time out.
All vtaction calls lock the actionMutex.
After executing vtaction, we always call agent.changeCallback.
Additionnally, for TABLET_ACTION_APPLY_SCHEMA, we will force a schema
reload.
- listening as an RPC server. The agent performs the action itself,
calling the actor code directly. We use this for short lived actions.
Most RPC calls lock the actionMutex, except the easy read-donly ones.
We will not call changeCallback for all actions, just for the ones
that are relevant. Same for schema reload.
See rpc_server.go for all cases, and which action takes the actionMutex,
runs changeCallback, and reloads the schema.
*/
package tabletmanager
import (
"encoding/json"
"flag"
"fmt"
"net"
"os"
"os/exec"
"path"
"sync"
"time"
log "github.com/golang/glog"
"github.com/youtube/vitess/go/history"
"github.com/youtube/vitess/go/jscfg"
"github.com/youtube/vitess/go/netutil"
"github.com/youtube/vitess/go/vt/dbconfigs"
"github.com/youtube/vitess/go/vt/env"
"github.com/youtube/vitess/go/vt/logutil"
"github.com/youtube/vitess/go/vt/mysqlctl"
"github.com/youtube/vitess/go/vt/tabletmanager/actionnode"
"github.com/youtube/vitess/go/vt/tabletmanager/actor"
"github.com/youtube/vitess/go/vt/tabletserver"
"github.com/youtube/vitess/go/vt/topo"
)
var (
vtactionBinaryPath = flag.String("vtaction_binary_path", "", "Full path (including filename) to vtaction binary. If not set, tries VTROOT/bin/vtaction.")
)
type tabletChangeItem struct {
oldTablet topo.Tablet
newTablet topo.Tablet
context string
queuedTime time.Time
}
// ActionAgent is the main class for the agent.
type ActionAgent struct {
// The following fields are set during creation
TopoServer topo.Server
TabletAlias topo.TabletAlias
Mysqld *mysqlctl.Mysqld
DBConfigs *dbconfigs.DBConfigs
SchemaOverrides []tabletserver.SchemaOverride
BinlogPlayerMap *BinlogPlayerMap
// Internal variables
vtActionBinFile string // path to vtaction binary
done chan struct{} // closed when we are done.
// This is the History of the health checks, public so status
// pages can display it
History *history.History
// actionMutex is there to run only one action at a time. If
// both agent.actionMutex and agent.mutex needs to be taken,
// take actionMutex first.
actionMutex sync.Mutex // to run only one action at a time
// mutex is protecting the rest of the members
mutex sync.Mutex
changeItems chan tabletChangeItem
_tablet *topo.TabletInfo
}
func loadSchemaOverrides(overridesFile string) []tabletserver.SchemaOverride {
var schemaOverrides []tabletserver.SchemaOverride
if overridesFile == "" {
return schemaOverrides
}
if err := jscfg.ReadJson(overridesFile, &schemaOverrides); err != nil {
log.Warningf("can't read overridesFile %v: %v", overridesFile, err)
} else {
data, _ := json.MarshalIndent(schemaOverrides, "", " ")
log.Infof("schemaOverrides: %s\n", data)
}
return schemaOverrides
}
// NewActionAgent creates a new ActionAgent and registers all the
// associated services
func NewActionAgent(
tabletAlias topo.TabletAlias,
dbcfgs *dbconfigs.DBConfigs,
mycnf *mysqlctl.Mycnf,
port, securePort int,
overridesFile string,
) (agent *ActionAgent, err error) {
schemaOverrides := loadSchemaOverrides(overridesFile)
topoServer := topo.GetServer()
mysqld := mysqlctl.NewMysqld("Dba", mycnf, &dbcfgs.Dba, &dbcfgs.Repl)
agent = &ActionAgent{
TopoServer: topoServer,
TabletAlias: tabletAlias,
Mysqld: mysqld,
DBConfigs: dbcfgs,
SchemaOverrides: schemaOverrides,
done: make(chan struct{}),
History: history.New(historyLength),
changeItems: make(chan tabletChangeItem, 100),
}
// Start the binlog player services, not playing at start.
agent.BinlogPlayerMap = NewBinlogPlayerMap(topoServer, &dbcfgs.App.ConnectionParams, mysqld)
RegisterBinlogPlayerMap(agent.BinlogPlayerMap)
// try to figure out the mysql port
mysqlPort := mycnf.MysqlPort
if mysqlPort == 0 {
// we don't know the port, try to get it from mysqld
var err error
mysqlPort, err = mysqld.GetMysqlPort()
if err != nil {
log.Warningf("Cannot get current mysql port, will use 0 for now: %v", err)
}
}
if err := agent.Start(mysqlPort, port, securePort); err != nil |
// register the RPC services from the agent
agent.registerQueryService()
// start health check if needed
agent.initHeathCheck()
return agent, nil
}
func (agent *ActionAgent) runChangeCallback(oldTablet *topo.Tablet, context string) {
agent.mutex.Lock()
// Access directly since we have the lock.
newTablet := agent._tablet.Tablet
agent.changeItems <- tabletChangeItem{oldTablet: *oldTablet, newTablet: *newTablet, context: context, queuedTime: time.Now()}
log.Infof("Queued tablet callback: %v", context)
agent.mutex.Unlock()
}
func (agent *ActionAgent) executeCallbacksLoop() {
for {
select {
case changeItem := <-agent.changeItems:
wg := sync.WaitGroup{}
agent.mutex.Lock()
log.Infof("Running tablet callback after %v: %v", time.Now().Sub(changeItem.queuedTime), changeItem.context)
wg.Add(1)
go func() {
defer wg.Done()
agent.changeCallback(changeItem.oldTablet, changeItem.newTablet)
}()
agent.mutex.Unlock()
wg.Wait()
case <-agent.done:
return
}
}
}
func (agent *ActionAgent) readTablet() error {
tablet, err := agent.TopoServer.GetTablet(agent.TabletAlias)
if err != nil {
return err
}
agent.mutex.Lock()
agent._tablet = tablet
agent.mutex.Unlock()
return nil
}
func (agent *ActionAgent) Tablet() *topo.TabletInfo {
agent.mutex.Lock()
tablet := agent._tablet
agent.mutex.Unlock()
return tablet
}
func (agent *ActionAgent) resolvePaths() error {
var p string
if *vtactionBinaryPath != "" {
p = *vtactionBinaryPath
} else {
vtroot, err := env.VtRoot()
if err != nil {
return err
}
p = path.Join(vtroot, "bin/vtaction")
}
if _, err := os.Stat(p); err != nil {
return fmt.Errorf("vtaction binary %s not found: %v", p, err)
}
agent.vtActionBinFile = p
return nil
}
// A non-nil return signals that event processing should stop.
func (agent *ActionAgent) dispatchAction(actionPath, data string) error {
agent.actionMutex.Lock()
defer agent.actionMutex.Unlock()
log.Infof("action dispatch %v", actionPath)
actionNode, err := actionnode.ActionNodeFromJson(data, actionPath)
if err != nil {
log.Errorf("action decode failed: %v %v", actionPath, err)
return nil
}
cmd := []string{
agent.vtActionBinFile,
"-action", actionNode.Action,
"-action-node", actionPath,
"-action-guid", actionNode.ActionGuid,
}
cmd = append(cmd, logutil.GetSubprocessFlags()...)
cmd = append(cmd, topo.GetSubprocessFlags()...)
cmd = append(cmd, dbconfigs.GetSubprocessFlags()...)
cmd = append(cmd, mysqlctl.GetSubprocessFlags()...)
log.Infof("action launch %v", cmd)
vtActionCmd := exec.Command(cmd[0], cmd[1:]...)
stdOut, vtActionErr := vtActionCmd.CombinedOutput()
if vtActionErr != nil {
log.Errorf("agent action failed: %v %v\n%s", actionPath, vtActionErr, stdOut)
// If the action failed, preserve single execution path semantics.
return vtActionErr
}
log.Infof("Agent action completed %v %s", actionPath, stdOut)
agent.afterAction(actionPath, actionNode.Action == actionnode.TABLET_ACTION_APPLY_SCHEMA)
return nil
}
// afterAction needs to be run after an action may have changed the current
// state of the tablet.
func (agent *ActionAgent) afterAction(context string, reloadSchema bool) {
log.Infof("Executing post-action change callbacks")
// Save the old tablet so callbacks can have a better idea of
// the precise nature of the transition.
oldTablet := agent.Tablet().Tablet
// Actions should have side effects on the tablet, so reload the data.
if err := agent.readTablet(); err != nil {
log.Warningf("Failed rereading tablet after %v - services may be inconsistent: %v", context, err)
} else {
if updatedTablet := actor.CheckTabletMysqlPort(agent.TopoServer, agent.Mysqld, agent.Tablet()); updatedTablet != nil {
agent.mutex.Lock()
agent._tablet = updatedTablet
agent.mutex.Unlock()
}
agent.runChangeCallback(oldTablet, context)
}
// Maybe invalidate the schema.
// This adds a dependency between tabletmanager and tabletserver,
// so it's not ideal. But I (alainjobart) think it's better
// to have up to date schema in vtocc.
if reloadSchema {
tabletserver.ReloadSchema()
}
log.Infof("Done with post-action change callbacks")
}
func (agent *ActionAgent) verifyTopology() error {
tablet := agent.Tablet()
if tablet == nil {
return fmt.Errorf("agent._tablet is nil")
}
if err := topo.Validate(agent.TopoServer, agent.TabletAlias); err != nil {
// Don't stop, it's not serious enough, this is likely transient.
log.Warningf("tablet validate failed: %v %v", agent.TabletAlias, err)
}
return agent.TopoServer.ValidateTabletActions(agent.TabletAlias)
}
func (agent *ActionAgent) verifyServingAddrs() error {
if !agent.Tablet().IsRunningQueryService() {
return nil
}
// Check to see our address is registered in the right place.
addr, err := agent.Tablet().Tablet.EndPoint()
if err != nil {
return err
}
return agent.TopoServer.UpdateTabletEndpoint(agent.Tablet().Tablet.Alias.Cell, agent.Tablet().Keyspace, agent.Tablet().Shard, agent.Tablet().Type, addr)
}
// bindAddr: the address for the query service advertised by this agent
func (agent *ActionAgent) Start(mysqlPort, vtPort, vtsPort int) error {
var err error
if err = agent.readTablet(); err != nil {
return err
}
if err = agent.resolvePaths(); err != nil {
return err
}
// find our hostname as fully qualified, and IP
hostname, err := netutil.FullyQualifiedHostname()
if err != nil {
return err
}
ipAddrs, err := net.LookupHost(hostname)
if err != nil {
return err
}
ipAddr := ipAddrs[0]
// Update bind addr for mysql and query service in the tablet node.
f := func(tablet *topo.Tablet) error {
tablet.Hostname = hostname
tablet.IPAddr = ipAddr
if tablet.Portmap == nil {
tablet.Portmap = make(map[string]int)
}
if mysqlPort != 0 {
// only overwrite mysql port if we know it, otherwise
// leave it as is.
tablet.Portmap["mysql"] = mysqlPort
}
tablet.Portmap["vt"] = vtPort
if vtsPort != 0 {
tablet.Portmap["vts"] = vtsPort
} else {
delete(tablet.Portmap, "vts")
}
return nil
}
if err := agent.TopoServer.UpdateTabletFields(agent.Tablet().Alias, f); err != nil {
return err
}
// Reread to get the changes we just made
if err := agent.readTablet(); err != nil {
return err
}
data := fmt.Sprintf("host:%v\npid:%v\n", hostname, os.Getpid())
if err := agent.TopoServer.CreateTabletPidNode(agent.TabletAlias, data, agent.done); err != nil {
return err
}
if err = agent.verifyTopology(); err != nil {
return err
}
if err = agent.verifyServingAddrs(); err != nil {
return err
}
oldTablet := &topo.Tablet{}
agent.runChangeCallback(oldTablet, "Start")
go agent.actionEventLoop()
go agent.executeCallbacksLoop()
return nil
}
func (agent *ActionAgent) Stop() {
close(agent.done)
agent.BinlogPlayerMap.StopAllPlayersAndReset()
agent.Mysqld.Close()
}
func (agent *ActionAgent) actionEventLoop() {
f := func(actionPath, data string) error {
return agent.dispatchAction(actionPath, data)
}
agent.TopoServer.ActionEventLoop(agent.TabletAlias, f, agent.done)
}
| {
return nil, err
} | conditional_block |
mpsse.go | // Copyright 2017 The Periph Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
// MPSSE is Multi-Protocol Synchronous Serial Engine
//
// MPSSE basics:
// http://www.ftdichip.com/Support/Documents/AppNotes/AN_135_MPSSE_Basics.pdf
//
// MPSSE and MCU emulation modes:
// http://www.ftdichip.com/Support/Documents/AppNotes/AN_108_Command_Processor_for_MPSSE_and_MCU_Host_Bus_Emulation_Modes.pdf
package ftdi
import (
"context"
"errors"
"fmt"
"time"
"periph.io/x/conn/v3/gpio"
"periph.io/x/conn/v3/physic"
)
const (
// TDI/TDO serial operation synchronised on clock edges.
//
// Long streams (default):
// - [1, 65536] bytes (length is sent minus one, requires 8 bits multiple)
// <op>, <LengthLow-1>, <LengthHigh-1>, <byte0>, ..., <byteN>
//
// Short streams (dataBit is specified):
// - [1, 8] bits
// <op>, <Length-1>, <byte>
//
// When both dataOut and dataIn are specified, one of dataOutFall or
// dataInFall should be specified, at least for most sane protocols.
//
// Flags:
dataOut byte = 0x10 // Enable output, default on +VE (Rise)
dataIn byte = 0x20 // Enable input, default on +VE (Rise)
dataOutFall byte = 0x01 // instead of Rise
dataInFall byte = 0x04 // instead of Rise
dataLSBF byte = 0x08 // instead of MSBF
dataBit byte = 0x02 // instead of Byte
// Data line drives low when the data is 0 and tristates on data 1. This is
// used with I²C.
// <op>, <ADBus pins>, <ACBus pins>
dataTristate byte = 0x9E
// TSM operation (for JTAG).
//
// - Send bits 6 to 0 to the TMS pin using LSB or MSB.
// - Bit 7 is passed to TDI/DO before the first clock of TMS and is held
// static for the duration of TMS clocking.
//
// <op>, <Length>, <byte>
tmsOutLSBFRise byte = 0x4A
tmsOutLSBFFall byte = 0x4B
tmsIOLSBInRise byte = 0x6A
tmsIOLSBInFall byte = 0x6B
// Unclear: 0x6E and 0x6F
// GPIO operation.
//
// - Operates on 8 GPIOs at a time, e.g. C0~C7 or D0~D7.
// - Direction 1 means output, 0 means input.
//
// <op>, <value>, <direction>
gpioSetD byte = 0x80
gpioSetC byte = 0x82
// <op>, returns <value>
gpioReadD byte = 0x81
gpioReadC byte = 0x83
// Internal loopback.
//
// Connects TDI and TDO together.
internalLoopbackEnable byte = 0x84
internalLoopbackDisable byte = 0x85
// Clock.
//
// The TCK/SK has a 50% duty cycle.
//
// The inactive clock state can be set via the gpioSetD command and control
// bit 0.
//
// By default, the base clock is 6MHz via a 5x divisor. On
// FT232H/FT2232H/FT4232H, the 5x divisor can be disabled.
clock30MHz byte = 0x8A
clock6MHz byte = 0x8B
// Sets clock divisor.
//
// The effective value depends if clock30MHz was sent or not.
//
// - 0(1) 6MHz / 30MHz
// - 1(2) 3MHz / 15MHz
// - 2(3) 2MHz / 10MHz
// - 3(4) 1.5MHz / 7.5MHz
// - 4(5) 1.25MHz / 6MHz
// - ...
// - 0xFFFF(65536) 91.553Hz / 457.763Hz
//
// <op>, <valueL-1>, <valueH-1>
clockSetDivisor byte = 0x86
// Uses 3 phases data clocking: data is valid on both clock edges. Needed
// for I²C.
clock3Phase byte = 0x8C
// Uses normal 2 phases data clocking.
clock2Phase byte = 0x8D
// Enables clock even while not doing any operation. Used with JTAG.
// Enables the clock between [1, 8] pulses.
// <op>, <length-1>
clockOnShort byte = 0x8E
// Enables the clock between [8, 524288] pulses in 8 multiples.
// <op>, <lengthL-1>, <lengthH-1>
clockOnLong byte = 0x8F
// Enables clock until D5 is high or low. Used with JTAG.
clockUntilHigh byte = 0x94
clockUntilLow byte = 0x95
// <op>, <lengthL-1>, <lengthH-1> in 8 multiples.
clockUntilHighLong byte = 0x9C
clockUntilLowLong byte = 0x9D
// Enables adaptive clocking. Used with JTAG.
//
// This causes the controller to wait for D7 signal state as an ACK.
clockAdaptive byte = 0x96
// Disables adaptive clocking.
clockNormal byte = 0x97
// CPU mode.
//
// Access the device registers like a memory mapped device.
//
// <op>, <addrLow>
cpuReadShort byte = 0x90
// <op>, <addrHi>, <addrLow>
cpuReadFar byte = 0x91
// <op>, <addrLow>, <data>
cpuWriteShort byte = 0x92
// <op>, <addrHi>, <addrLow>, <data>
cpuWriteFar byte = 0x91
// Buffer operations.
//
// Flush the buffer back to the host.
flush byte = 0x87
// Wait until D5 (JTAG) or I/O1 (CPU) is high. Once it is detected as
// high, the MPSSE engine moves on to process the next instruction.
waitHigh byte = 0x88
waitLow byte = 0x89
)
// InitMPSSE sets the device into MPSSE mode.
//
// This requires a f232h, ft2232, ft2232h or a ft4232h.
//
// Use only one of Init or InitMPSSE.
func (h *handle) InitMPSSE() error {
// http://www.ftdichip.com/Support/Documents/AppNotes/AN_255_USB%20to%20I2C%20Example%20using%20the%20FT232H%20and%20FT201X%20devices.pdf
// Pre-state:
// - Write EEPROM i.IsFifo = true so the device DBus is started in tristate.
// Try to verify the MPSSE controller without initializing it first. This is
// the 'happy path', which enables reusing the device is its current state
// without affecting current GPIO state.
if h.mpsseVerify() != nil {
// Do a full reset. Just trying to set the MPSSE controller will
// likely not work. That's a layering violation (since the retry with reset
// is done in driver.go) but we've survived worse things...
//
// TODO(maruel): This is not helping in practice, this need to be fine
// tuned.
if err := h.Reset(); err != nil {
return err
}
if err := h.Init(); err != nil {
return err
}
// That does the magic thing.
if err := h.SetBitMode(0, bitModeMpsse); err != nil {
return err
}
if err := h.mpsseVerify(); err != nil {
return err
}
}
// Initialize MPSSE to a known state.
// Reset the clock since it is impossible to read back the current clock rate.
// Reset all the GPIOs are inputs since it is impossible to read back the
// state of each GPIO (if they are input or output).
cmd := []byte{
clock30MHz, clockNormal, clock2Phase, internalLoopbackDisable,
gpioSetC, 0x00, 0x00,
gpioSetD, 0x00, 0x00,
}
if _, err := h.Write(cmd); err != nil {
return err
}
// Success!!
return nil
}
// mpsseVerify sends an invalid MPSSE command and verifies the returned value
// is incorrect.
//
// In practice this takes around 2ms.
func (h *handle) mpsseVerify() error {
var b [2]byte
for _, v := range []byte{0xAA, 0xAB} {
// Write a bad command and ensure it returned correctly.
// Unlike what the application note proposes, include a flush op right
// after. Without the flush, the device will only flush after the delay
// specified to SetLatencyTimer. The flush removes this unneeded wait,
// which enables increasing the delay specified to SetLatencyTimer.
b[0] = v
b[1] = flush
if _, err := h.Write(b[:]); err != nil {
return fmt.Errorf("ftdi: MPSSE verification failed: %w", err)
}
p, e := h.h.GetQueueStatus()
if e != 0 {
return toErr("Read/GetQueueStatus", e)
}
if p != 2 {
return fmt.Errorf("ftdi: MPSSE verification failed: expected 2 bytes reply, got %d bytes", p)
}
ctx, cancel := context200ms()
defer cancel()
if _, err := h.ReadAll(ctx, b[:]); err != nil {
return fmt.Errorf("ftdi: MPSSE verification failed: %w", err)
}
// 0xFA means invalid command, 0xAA is the command echoed back.
if b[0] != 0xFA || b[1] != v {
return fmt.Errorf("ftdi: MPSSE verification failed test for byte %#x: %#x", v, b)
}
}
return nil
}
//
// MPSSERegRead reads the memory mapped registers from the device.
func (h *handle) MPSSERegRead(addr uint16) (byte, error) {
// Unlike most other operations, the uint16 byte order is <hi>, <lo>.
b := [...]byte{cpuReadFar, byte(addr >> 8), byte(addr), flush}
if _, err := h.Write(b[:]); err != nil {
return 0, err
}
ctx, cancel := context200ms()
defer cancel()
_, err := h.ReadAll(ctx, b[:1])
return b[0], err
}
// MPSSEClock sets the clock at the closest value and returns it.
func (h *handle) MPSSEClock(f physic.Frequency) (physic.Frequency, error) {
// TODO(maruel): Memory clock and skip if the same value.
clk := clock30MHz
base := 30 * physic.MegaHertz
div := base / f
if div >= 65536 {
clk = clock6MHz
base /= 5
div = base / f
if div >= 65536 {
return 0, errors.New("ftdi: clock frequency is too low")
}
}
b := [...]byte{clk, clockSetDivisor, byte(div - 1), byte((div - 1) >> 8)}
_, err := h.Write(b[:])
return base / div, err
}
// mpsseTxOp returns the right MPSSE command byte for the stream.
func mpsseTxOp(w, r bool, ew, er gpio.Edge, lsbf bool) byte {
op := byte(0)
if lsbf {
op |= dataLSBF
}
if w {
op |= dataOut
if ew == gpio.FallingEdge {
op |= dataOutFall
}
}
if r {
op |= dataIn
if er == gpio.FallingEdge {
op |= dataInFall
}
}
return op
}
// MPSSETx runs a transaction on the clock on pins D0, D1 and D2.
//
// It can only do it on a multiple of 8 bits.
func (h *handle) MPSSETx(w, r []byte, ew, er gpio.Edge, lsbf bool) error {
l := len(w)
if len(w) != 0 {
// TODO(maruel): This is easy to fix by daisy chaining operations.
if len(w) > 65536 {
return errors.New("ftdi: write buffer too long; max 65536")
}
}
if len(r) != 0 {
if len(r) > 65536 {
return errors.New("ftdi: read buffer too long; max 65536")
}
if l != 0 && len(r) != l {
return errors.New("ftdi: mismatched buffer lengths")
}
l = len(r)
}
// The FT232H has 1Kb Tx and Rx buffers. So partial writes should be done.
// TODO(maruel): Test.
// Flush can be useful if rbits != 0.
op := mpsseTxOp(len(w) != 0, len(r) != 0, ew, er, lsbf)
cmd := []byte{op, byte(l - 1), byte((l - 1) >> 8)}
cmd = append(cmd, w...)
cmd = append(cmd, flush)
if _, err := h.Write(cmd); err != nil {
return err
}
if len(r) != 0 {
ctx, cancel := context200ms()
defer cancel()
_, err := h.ReadAll(ctx, r)
return err
}
return nil
}
// MPSSETxShort runs a transaction on the clock pins D0, D1 and D2 for a byte
// or less: between 1 and 8 bits.
func (h *handle) MPSSETxShort(w byte, wbits, rbits int, ew, er gpio.Edge, lsbf bool) (byte, error) {
op := byte(dataBit)
if lsbf {
op |= dataLSBF
}
l := wbits
if wbits != 0 {
if wbits > 8 {
return 0, errors.New("ftdi: write buffer too long; max 8")
}
op |= dataOut
if ew == gpio.FallingEdge {
op |= dataOutFall
}
}
if rbits != 0 {
if rbits > 8 {
return 0, errors.New("ftdi: read buffer too long; max 8")
}
op |= dataIn
if er == gpio.FallingEdge {
op |= dataInFall
}
if l != 0 && rbits != l {
return 0, errors.New("ftdi: mismatched buffer lengths")
}
l = rbits
}
b := [3]byte{op, byte(l - 1)}
cmd := b[:2]
if wbits != 0 {
cmd = append(cmd, w)
}
if rbits != 0 {
cmd = append(cmd, flush)
}
if _, err := h.Write(cmd); err != nil {
return 0, err
}
if rbits != 0 {
ctx, cancel := context200ms()
defer cancel()
_, err := h.ReadAll(ctx, b[:1])
return b[0], err
}
return 0, nil
}
// MPSSECBus operates on 8 GPIOs at a time C0~C7.
//
// Direction 1 means output, 0 means input.
func (h *handle) MPSSECBus(mask, value byte) error {
b := [...]byte{gpioSetC, value, mask}
_, err := h.Write(b[:])
return err
}
// MPSSEDBus operates on 8 GPIOs at a time D0~D7.
//
// Direction 1 means output, 0 means input.
func (h *handle) MPSSEDBus(mask, value byte) error {
b := [...]byte{gpioSetD, value, mask}
_, err := h.Write(b[:])
return err
}
// MPSSECBusRead reads all the CBus pins C0~C7.
func (h *handle) MPSSECBusRead() (byte, error) {
| // MPSSEDBusRead reads all the DBus pins D0~D7.
func (h *handle) MPSSEDBusRead() (byte, error) {
b := [...]byte{gpioReadD, flush}
if _, err := h.Write(b[:]); err != nil {
return 0, err
}
ctx, cancel := context200ms()
defer cancel()
if _, err := h.ReadAll(ctx, b[:1]); err != nil {
return 0, err
}
return b[0], nil
}
func context200ms() (context.Context, func()) {
return context.WithTimeout(context.Background(), 200*time.Millisecond)
}
| b := [...]byte{gpioReadC, flush}
if _, err := h.Write(b[:]); err != nil {
return 0, err
}
ctx, cancel := context200ms()
defer cancel()
if _, err := h.ReadAll(ctx, b[:1]); err != nil {
return 0, err
}
return b[0], nil
}
| identifier_body |
mpsse.go | // Copyright 2017 The Periph Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
// MPSSE is Multi-Protocol Synchronous Serial Engine
//
// MPSSE basics:
// http://www.ftdichip.com/Support/Documents/AppNotes/AN_135_MPSSE_Basics.pdf
//
// MPSSE and MCU emulation modes:
// http://www.ftdichip.com/Support/Documents/AppNotes/AN_108_Command_Processor_for_MPSSE_and_MCU_Host_Bus_Emulation_Modes.pdf
package ftdi
import (
"context"
"errors"
"fmt"
"time"
"periph.io/x/conn/v3/gpio"
"periph.io/x/conn/v3/physic"
)
const (
// TDI/TDO serial operation synchronised on clock edges.
//
// Long streams (default):
// - [1, 65536] bytes (length is sent minus one, requires 8 bits multiple)
// <op>, <LengthLow-1>, <LengthHigh-1>, <byte0>, ..., <byteN>
//
// Short streams (dataBit is specified):
// - [1, 8] bits
// <op>, <Length-1>, <byte>
//
// When both dataOut and dataIn are specified, one of dataOutFall or
// dataInFall should be specified, at least for most sane protocols.
//
// Flags:
dataOut byte = 0x10 // Enable output, default on +VE (Rise)
dataIn byte = 0x20 // Enable input, default on +VE (Rise)
dataOutFall byte = 0x01 // instead of Rise
dataInFall byte = 0x04 // instead of Rise
dataLSBF byte = 0x08 // instead of MSBF
dataBit byte = 0x02 // instead of Byte
// Data line drives low when the data is 0 and tristates on data 1. This is
// used with I²C.
// <op>, <ADBus pins>, <ACBus pins>
dataTristate byte = 0x9E
// TSM operation (for JTAG).
//
// - Send bits 6 to 0 to the TMS pin using LSB or MSB.
// - Bit 7 is passed to TDI/DO before the first clock of TMS and is held
// static for the duration of TMS clocking.
//
// <op>, <Length>, <byte>
tmsOutLSBFRise byte = 0x4A
tmsOutLSBFFall byte = 0x4B
tmsIOLSBInRise byte = 0x6A
tmsIOLSBInFall byte = 0x6B
// Unclear: 0x6E and 0x6F
// GPIO operation.
//
// - Operates on 8 GPIOs at a time, e.g. C0~C7 or D0~D7.
// - Direction 1 means output, 0 means input.
//
// <op>, <value>, <direction>
gpioSetD byte = 0x80
gpioSetC byte = 0x82
// <op>, returns <value>
gpioReadD byte = 0x81
gpioReadC byte = 0x83
// Internal loopback.
//
// Connects TDI and TDO together.
internalLoopbackEnable byte = 0x84
internalLoopbackDisable byte = 0x85
// Clock.
//
// The TCK/SK has a 50% duty cycle.
//
// The inactive clock state can be set via the gpioSetD command and control
// bit 0.
//
// By default, the base clock is 6MHz via a 5x divisor. On
// FT232H/FT2232H/FT4232H, the 5x divisor can be disabled.
clock30MHz byte = 0x8A
clock6MHz byte = 0x8B
// Sets clock divisor.
//
// The effective value depends if clock30MHz was sent or not.
//
// - 0(1) 6MHz / 30MHz
// - 1(2) 3MHz / 15MHz
// - 2(3) 2MHz / 10MHz
// - 3(4) 1.5MHz / 7.5MHz
// - 4(5) 1.25MHz / 6MHz
// - ...
// - 0xFFFF(65536) 91.553Hz / 457.763Hz
//
// <op>, <valueL-1>, <valueH-1>
clockSetDivisor byte = 0x86
// Uses 3 phases data clocking: data is valid on both clock edges. Needed
// for I²C.
clock3Phase byte = 0x8C
// Uses normal 2 phases data clocking.
clock2Phase byte = 0x8D
// Enables clock even while not doing any operation. Used with JTAG.
// Enables the clock between [1, 8] pulses.
// <op>, <length-1>
clockOnShort byte = 0x8E
// Enables the clock between [8, 524288] pulses in 8 multiples.
// <op>, <lengthL-1>, <lengthH-1>
clockOnLong byte = 0x8F
// Enables clock until D5 is high or low. Used with JTAG.
clockUntilHigh byte = 0x94
clockUntilLow byte = 0x95
// <op>, <lengthL-1>, <lengthH-1> in 8 multiples.
clockUntilHighLong byte = 0x9C
clockUntilLowLong byte = 0x9D
// Enables adaptive clocking. Used with JTAG.
//
// This causes the controller to wait for D7 signal state as an ACK.
clockAdaptive byte = 0x96
// Disables adaptive clocking.
clockNormal byte = 0x97
// CPU mode.
//
// Access the device registers like a memory mapped device.
//
// <op>, <addrLow>
cpuReadShort byte = 0x90
// <op>, <addrHi>, <addrLow>
cpuReadFar byte = 0x91
// <op>, <addrLow>, <data>
cpuWriteShort byte = 0x92
// <op>, <addrHi>, <addrLow>, <data>
cpuWriteFar byte = 0x91
// Buffer operations.
//
// Flush the buffer back to the host.
flush byte = 0x87
// Wait until D5 (JTAG) or I/O1 (CPU) is high. Once it is detected as
// high, the MPSSE engine moves on to process the next instruction.
waitHigh byte = 0x88
waitLow byte = 0x89
)
// InitMPSSE sets the device into MPSSE mode.
//
// This requires a f232h, ft2232, ft2232h or a ft4232h.
//
// Use only one of Init or InitMPSSE.
func (h *handle) InitMPSSE() error {
// http://www.ftdichip.com/Support/Documents/AppNotes/AN_255_USB%20to%20I2C%20Example%20using%20the%20FT232H%20and%20FT201X%20devices.pdf
// Pre-state:
// - Write EEPROM i.IsFifo = true so the device DBus is started in tristate.
// Try to verify the MPSSE controller without initializing it first. This is
// the 'happy path', which enables reusing the device is its current state
// without affecting current GPIO state.
if h.mpsseVerify() != nil {
// Do a full reset. Just trying to set the MPSSE controller will
// likely not work. That's a layering violation (since the retry with reset
// is done in driver.go) but we've survived worse things...
//
// TODO(maruel): This is not helping in practice, this need to be fine
// tuned.
if err := h.Reset(); err != nil {
return err
}
if err := h.Init(); err != nil {
return err
}
// That does the magic thing.
if err := h.SetBitMode(0, bitModeMpsse); err != nil {
return err
}
if err := h.mpsseVerify(); err != nil {
return err
}
}
// Initialize MPSSE to a known state.
// Reset the clock since it is impossible to read back the current clock rate.
// Reset all the GPIOs are inputs since it is impossible to read back the
// state of each GPIO (if they are input or output).
cmd := []byte{
clock30MHz, clockNormal, clock2Phase, internalLoopbackDisable,
gpioSetC, 0x00, 0x00,
gpioSetD, 0x00, 0x00,
}
if _, err := h.Write(cmd); err != nil {
return err
}
// Success!!
return nil
}
// mpsseVerify sends an invalid MPSSE command and verifies the returned value
// is incorrect.
//
// In practice this takes around 2ms.
func (h *handle) mpsseVerify() error {
var b [2]byte
for _, v := range []byte{0xAA, 0xAB} {
// Write a bad command and ensure it returned correctly.
// Unlike what the application note proposes, include a flush op right
// after. Without the flush, the device will only flush after the delay
// specified to SetLatencyTimer. The flush removes this unneeded wait,
// which enables increasing the delay specified to SetLatencyTimer.
b[0] = v
b[1] = flush
if _, err := h.Write(b[:]); err != nil {
return fmt.Errorf("ftdi: MPSSE verification failed: %w", err)
}
p, e := h.h.GetQueueStatus()
if e != 0 {
return toErr("Read/GetQueueStatus", e)
}
if p != 2 {
return fmt.Errorf("ftdi: MPSSE verification failed: expected 2 bytes reply, got %d bytes", p)
}
ctx, cancel := context200ms()
defer cancel()
if _, err := h.ReadAll(ctx, b[:]); err != nil {
return fmt.Errorf("ftdi: MPSSE verification failed: %w", err)
}
// 0xFA means invalid command, 0xAA is the command echoed back.
if b[0] != 0xFA || b[1] != v {
return fmt.Errorf("ftdi: MPSSE verification failed test for byte %#x: %#x", v, b)
}
}
return nil
}
//
// MPSSERegRead reads the memory mapped registers from the device.
func (h *handle) MPSSERegRead(addr uint16) (byte, error) {
// Unlike most other operations, the uint16 byte order is <hi>, <lo>.
b := [...]byte{cpuReadFar, byte(addr >> 8), byte(addr), flush}
if _, err := h.Write(b[:]); err != nil {
return 0, err
}
ctx, cancel := context200ms()
defer cancel()
_, err := h.ReadAll(ctx, b[:1])
return b[0], err
}
// MPSSEClock sets the clock at the closest value and returns it.
func (h *handle) MPSSEClock(f physic.Frequency) (physic.Frequency, error) {
// TODO(maruel): Memory clock and skip if the same value.
clk := clock30MHz
base := 30 * physic.MegaHertz
div := base / f
if div >= 65536 {
clk = clock6MHz
base /= 5
div = base / f
if div >= 65536 {
return 0, errors.New("ftdi: clock frequency is too low")
}
}
b := [...]byte{clk, clockSetDivisor, byte(div - 1), byte((div - 1) >> 8)}
_, err := h.Write(b[:])
return base / div, err
}
// mpsseTxOp returns the right MPSSE command byte for the stream.
func mpsseTxOp(w, r bool, ew, er gpio.Edge, lsbf bool) byte {
op := byte(0)
if lsbf {
op |= dataLSBF
}
if w {
op |= dataOut
if ew == gpio.FallingEdge {
op |= dataOutFall
}
}
if r {
op |= dataIn
if er == gpio.FallingEdge {
op |= dataInFall
}
}
return op
}
// MPSSETx runs a transaction on the clock on pins D0, D1 and D2.
//
// It can only do it on a multiple of 8 bits.
func (h *handle) MPSSETx(w, r []byte, ew, er gpio.Edge, lsbf bool) error {
l := len(w)
if len(w) != 0 {
// TODO(maruel): This is easy to fix by daisy chaining operations.
if len(w) > 65536 {
return errors.New("ftdi: write buffer too long; max 65536")
}
}
if len(r) != 0 {
if len(r) > 65536 {
return errors.New("ftdi: read buffer too long; max 65536")
}
if l != 0 && len(r) != l {
return errors.New("ftdi: mismatched buffer lengths")
}
l = len(r)
}
// The FT232H has 1Kb Tx and Rx buffers. So partial writes should be done.
// TODO(maruel): Test.
// Flush can be useful if rbits != 0.
op := mpsseTxOp(len(w) != 0, len(r) != 0, ew, er, lsbf)
cmd := []byte{op, byte(l - 1), byte((l - 1) >> 8)}
cmd = append(cmd, w...)
cmd = append(cmd, flush)
if _, err := h.Write(cmd); err != nil {
return err
}
if len(r) != 0 {
ctx, cancel := context200ms()
defer cancel()
_, err := h.ReadAll(ctx, r)
return err
}
return nil
}
// MPSSETxShort runs a transaction on the clock pins D0, D1 and D2 for a byte
// or less: between 1 and 8 bits.
func (h *handle) MP | byte, wbits, rbits int, ew, er gpio.Edge, lsbf bool) (byte, error) {
op := byte(dataBit)
if lsbf {
op |= dataLSBF
}
l := wbits
if wbits != 0 {
if wbits > 8 {
return 0, errors.New("ftdi: write buffer too long; max 8")
}
op |= dataOut
if ew == gpio.FallingEdge {
op |= dataOutFall
}
}
if rbits != 0 {
if rbits > 8 {
return 0, errors.New("ftdi: read buffer too long; max 8")
}
op |= dataIn
if er == gpio.FallingEdge {
op |= dataInFall
}
if l != 0 && rbits != l {
return 0, errors.New("ftdi: mismatched buffer lengths")
}
l = rbits
}
b := [3]byte{op, byte(l - 1)}
cmd := b[:2]
if wbits != 0 {
cmd = append(cmd, w)
}
if rbits != 0 {
cmd = append(cmd, flush)
}
if _, err := h.Write(cmd); err != nil {
return 0, err
}
if rbits != 0 {
ctx, cancel := context200ms()
defer cancel()
_, err := h.ReadAll(ctx, b[:1])
return b[0], err
}
return 0, nil
}
// MPSSECBus operates on 8 GPIOs at a time C0~C7.
//
// Direction 1 means output, 0 means input.
func (h *handle) MPSSECBus(mask, value byte) error {
b := [...]byte{gpioSetC, value, mask}
_, err := h.Write(b[:])
return err
}
// MPSSEDBus operates on 8 GPIOs at a time D0~D7.
//
// Direction 1 means output, 0 means input.
func (h *handle) MPSSEDBus(mask, value byte) error {
b := [...]byte{gpioSetD, value, mask}
_, err := h.Write(b[:])
return err
}
// MPSSECBusRead reads all the CBus pins C0~C7.
func (h *handle) MPSSECBusRead() (byte, error) {
b := [...]byte{gpioReadC, flush}
if _, err := h.Write(b[:]); err != nil {
return 0, err
}
ctx, cancel := context200ms()
defer cancel()
if _, err := h.ReadAll(ctx, b[:1]); err != nil {
return 0, err
}
return b[0], nil
}
// MPSSEDBusRead reads all the DBus pins D0~D7.
func (h *handle) MPSSEDBusRead() (byte, error) {
b := [...]byte{gpioReadD, flush}
if _, err := h.Write(b[:]); err != nil {
return 0, err
}
ctx, cancel := context200ms()
defer cancel()
if _, err := h.ReadAll(ctx, b[:1]); err != nil {
return 0, err
}
return b[0], nil
}
func context200ms() (context.Context, func()) {
return context.WithTimeout(context.Background(), 200*time.Millisecond)
}
| SSETxShort(w | identifier_name |
mpsse.go | // Copyright 2017 The Periph Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
// MPSSE is Multi-Protocol Synchronous Serial Engine
//
// MPSSE basics:
// http://www.ftdichip.com/Support/Documents/AppNotes/AN_135_MPSSE_Basics.pdf
//
// MPSSE and MCU emulation modes:
// http://www.ftdichip.com/Support/Documents/AppNotes/AN_108_Command_Processor_for_MPSSE_and_MCU_Host_Bus_Emulation_Modes.pdf
package ftdi
import (
"context"
"errors"
"fmt"
"time"
"periph.io/x/conn/v3/gpio"
"periph.io/x/conn/v3/physic"
)
const (
// TDI/TDO serial operation synchronised on clock edges.
//
// Long streams (default):
// - [1, 65536] bytes (length is sent minus one, requires 8 bits multiple)
// <op>, <LengthLow-1>, <LengthHigh-1>, <byte0>, ..., <byteN>
//
// Short streams (dataBit is specified):
// - [1, 8] bits
// <op>, <Length-1>, <byte>
//
// When both dataOut and dataIn are specified, one of dataOutFall or
// dataInFall should be specified, at least for most sane protocols.
//
// Flags:
dataOut byte = 0x10 // Enable output, default on +VE (Rise)
dataIn byte = 0x20 // Enable input, default on +VE (Rise)
dataOutFall byte = 0x01 // instead of Rise
dataInFall byte = 0x04 // instead of Rise
dataLSBF byte = 0x08 // instead of MSBF
dataBit byte = 0x02 // instead of Byte
// Data line drives low when the data is 0 and tristates on data 1. This is
// used with I²C.
// <op>, <ADBus pins>, <ACBus pins>
dataTristate byte = 0x9E
// TSM operation (for JTAG).
//
// - Send bits 6 to 0 to the TMS pin using LSB or MSB.
// - Bit 7 is passed to TDI/DO before the first clock of TMS and is held
// static for the duration of TMS clocking.
//
// <op>, <Length>, <byte>
tmsOutLSBFRise byte = 0x4A
tmsOutLSBFFall byte = 0x4B
tmsIOLSBInRise byte = 0x6A
tmsIOLSBInFall byte = 0x6B
// Unclear: 0x6E and 0x6F
// GPIO operation.
//
// - Operates on 8 GPIOs at a time, e.g. C0~C7 or D0~D7.
// - Direction 1 means output, 0 means input.
//
// <op>, <value>, <direction>
gpioSetD byte = 0x80
gpioSetC byte = 0x82
// <op>, returns <value>
gpioReadD byte = 0x81
gpioReadC byte = 0x83
// Internal loopback.
//
// Connects TDI and TDO together.
internalLoopbackEnable byte = 0x84
internalLoopbackDisable byte = 0x85
// Clock.
//
// The TCK/SK has a 50% duty cycle.
//
// The inactive clock state can be set via the gpioSetD command and control
// bit 0.
//
// By default, the base clock is 6MHz via a 5x divisor. On
// FT232H/FT2232H/FT4232H, the 5x divisor can be disabled.
clock30MHz byte = 0x8A
clock6MHz byte = 0x8B
// Sets clock divisor.
//
// The effective value depends if clock30MHz was sent or not.
//
// - 0(1) 6MHz / 30MHz
// - 1(2) 3MHz / 15MHz
// - 2(3) 2MHz / 10MHz
// - 3(4) 1.5MHz / 7.5MHz
// - 4(5) 1.25MHz / 6MHz
// - ...
// - 0xFFFF(65536) 91.553Hz / 457.763Hz
//
// <op>, <valueL-1>, <valueH-1>
clockSetDivisor byte = 0x86
// Uses 3 phases data clocking: data is valid on both clock edges. Needed
// for I²C.
clock3Phase byte = 0x8C
// Uses normal 2 phases data clocking.
clock2Phase byte = 0x8D
// Enables clock even while not doing any operation. Used with JTAG.
// Enables the clock between [1, 8] pulses.
// <op>, <length-1>
clockOnShort byte = 0x8E
// Enables the clock between [8, 524288] pulses in 8 multiples.
// <op>, <lengthL-1>, <lengthH-1>
clockOnLong byte = 0x8F
// Enables clock until D5 is high or low. Used with JTAG.
clockUntilHigh byte = 0x94
clockUntilLow byte = 0x95
// <op>, <lengthL-1>, <lengthH-1> in 8 multiples.
clockUntilHighLong byte = 0x9C
clockUntilLowLong byte = 0x9D
// Enables adaptive clocking. Used with JTAG.
//
// This causes the controller to wait for D7 signal state as an ACK.
clockAdaptive byte = 0x96
// Disables adaptive clocking.
clockNormal byte = 0x97
// CPU mode.
//
// Access the device registers like a memory mapped device.
//
// <op>, <addrLow>
cpuReadShort byte = 0x90
// <op>, <addrHi>, <addrLow>
cpuReadFar byte = 0x91
// <op>, <addrLow>, <data>
cpuWriteShort byte = 0x92
// <op>, <addrHi>, <addrLow>, <data>
cpuWriteFar byte = 0x91
// Buffer operations.
//
// Flush the buffer back to the host.
flush byte = 0x87
// Wait until D5 (JTAG) or I/O1 (CPU) is high. Once it is detected as
// high, the MPSSE engine moves on to process the next instruction.
waitHigh byte = 0x88
waitLow byte = 0x89
)
// InitMPSSE sets the device into MPSSE mode.
//
// This requires a f232h, ft2232, ft2232h or a ft4232h.
//
// Use only one of Init or InitMPSSE.
func (h *handle) InitMPSSE() error {
// http://www.ftdichip.com/Support/Documents/AppNotes/AN_255_USB%20to%20I2C%20Example%20using%20the%20FT232H%20and%20FT201X%20devices.pdf
// Pre-state:
// - Write EEPROM i.IsFifo = true so the device DBus is started in tristate.
// Try to verify the MPSSE controller without initializing it first. This is
// the 'happy path', which enables reusing the device is its current state
// without affecting current GPIO state.
if h.mpsseVerify() != nil {
// Do a full reset. Just trying to set the MPSSE controller will
// likely not work. That's a layering violation (since the retry with reset
// is done in driver.go) but we've survived worse things...
//
// TODO(maruel): This is not helping in practice, this need to be fine
// tuned.
if err := h.Reset(); err != nil {
return err
}
if err := h.Init(); err != nil {
return err
}
// That does the magic thing.
if err := h.SetBitMode(0, bitModeMpsse); err != nil {
return err
}
if err := h.mpsseVerify(); err != nil {
return err
}
}
// Initialize MPSSE to a known state.
// Reset the clock since it is impossible to read back the current clock rate.
// Reset all the GPIOs are inputs since it is impossible to read back the
// state of each GPIO (if they are input or output).
cmd := []byte{
clock30MHz, clockNormal, clock2Phase, internalLoopbackDisable,
gpioSetC, 0x00, 0x00,
gpioSetD, 0x00, 0x00,
}
if _, err := h.Write(cmd); err != nil {
return err
}
// Success!!
return nil
}
// mpsseVerify sends an invalid MPSSE command and verifies the returned value
// is incorrect.
//
// In practice this takes around 2ms.
func (h *handle) mpsseVerify() error {
var b [2]byte
for _, v := range []byte{0xAA, 0xAB} {
// Write a bad command and ensure it returned correctly.
// Unlike what the application note proposes, include a flush op right
// after. Without the flush, the device will only flush after the delay
// specified to SetLatencyTimer. The flush removes this unneeded wait,
// which enables increasing the delay specified to SetLatencyTimer.
b[0] = v
b[1] = flush
if _, err := h.Write(b[:]); err != nil {
return fmt.Errorf("ftdi: MPSSE verification failed: %w", err)
}
p, e := h.h.GetQueueStatus()
if e != 0 {
return toErr("Read/GetQueueStatus", e)
}
if p != 2 {
return fmt.Errorf("ftdi: MPSSE verification failed: expected 2 bytes reply, got %d bytes", p)
}
ctx, cancel := context200ms()
defer cancel()
if _, err := h.ReadAll(ctx, b[:]); err != nil {
return fmt.Errorf("ftdi: MPSSE verification failed: %w", err)
}
// 0xFA means invalid command, 0xAA is the command echoed back.
if b[0] != 0xFA || b[1] != v {
return fmt.Errorf("ftdi: MPSSE verification failed test for byte %#x: %#x", v, b)
}
}
return nil
}
//
// MPSSERegRead reads the memory mapped registers from the device.
func (h *handle) MPSSERegRead(addr uint16) (byte, error) {
// Unlike most other operations, the uint16 byte order is <hi>, <lo>.
b := [...]byte{cpuReadFar, byte(addr >> 8), byte(addr), flush}
if _, err := h.Write(b[:]); err != nil {
return 0, err
}
ctx, cancel := context200ms()
defer cancel()
_, err := h.ReadAll(ctx, b[:1])
return b[0], err
}
// MPSSEClock sets the clock at the closest value and returns it.
func (h *handle) MPSSEClock(f physic.Frequency) (physic.Frequency, error) {
// TODO(maruel): Memory clock and skip if the same value.
clk := clock30MHz
base := 30 * physic.MegaHertz
div := base / f
if div >= 65536 {
clk = clock6MHz
base /= 5
div = base / f
if div >= 65536 {
return 0, errors.New("ftdi: clock frequency is too low")
}
}
b := [...]byte{clk, clockSetDivisor, byte(div - 1), byte((div - 1) >> 8)}
_, err := h.Write(b[:])
return base / div, err
}
// mpsseTxOp returns the right MPSSE command byte for the stream.
func mpsseTxOp(w, r bool, ew, er gpio.Edge, lsbf bool) byte {
op := byte(0)
if lsbf {
op |= dataLSBF
}
if w {
op |= dataOut
if ew == gpio.FallingEdge {
op |= dataOutFall
}
}
if r {
op |= dataIn
if er == gpio.FallingEdge {
| }
return op
}
// MPSSETx runs a transaction on the clock on pins D0, D1 and D2.
//
// It can only do it on a multiple of 8 bits.
func (h *handle) MPSSETx(w, r []byte, ew, er gpio.Edge, lsbf bool) error {
l := len(w)
if len(w) != 0 {
// TODO(maruel): This is easy to fix by daisy chaining operations.
if len(w) > 65536 {
return errors.New("ftdi: write buffer too long; max 65536")
}
}
if len(r) != 0 {
if len(r) > 65536 {
return errors.New("ftdi: read buffer too long; max 65536")
}
if l != 0 && len(r) != l {
return errors.New("ftdi: mismatched buffer lengths")
}
l = len(r)
}
// The FT232H has 1Kb Tx and Rx buffers. So partial writes should be done.
// TODO(maruel): Test.
// Flush can be useful if rbits != 0.
op := mpsseTxOp(len(w) != 0, len(r) != 0, ew, er, lsbf)
cmd := []byte{op, byte(l - 1), byte((l - 1) >> 8)}
cmd = append(cmd, w...)
cmd = append(cmd, flush)
if _, err := h.Write(cmd); err != nil {
return err
}
if len(r) != 0 {
ctx, cancel := context200ms()
defer cancel()
_, err := h.ReadAll(ctx, r)
return err
}
return nil
}
// MPSSETxShort runs a transaction on the clock pins D0, D1 and D2 for a byte
// or less: between 1 and 8 bits.
func (h *handle) MPSSETxShort(w byte, wbits, rbits int, ew, er gpio.Edge, lsbf bool) (byte, error) {
op := byte(dataBit)
if lsbf {
op |= dataLSBF
}
l := wbits
if wbits != 0 {
if wbits > 8 {
return 0, errors.New("ftdi: write buffer too long; max 8")
}
op |= dataOut
if ew == gpio.FallingEdge {
op |= dataOutFall
}
}
if rbits != 0 {
if rbits > 8 {
return 0, errors.New("ftdi: read buffer too long; max 8")
}
op |= dataIn
if er == gpio.FallingEdge {
op |= dataInFall
}
if l != 0 && rbits != l {
return 0, errors.New("ftdi: mismatched buffer lengths")
}
l = rbits
}
b := [3]byte{op, byte(l - 1)}
cmd := b[:2]
if wbits != 0 {
cmd = append(cmd, w)
}
if rbits != 0 {
cmd = append(cmd, flush)
}
if _, err := h.Write(cmd); err != nil {
return 0, err
}
if rbits != 0 {
ctx, cancel := context200ms()
defer cancel()
_, err := h.ReadAll(ctx, b[:1])
return b[0], err
}
return 0, nil
}
// MPSSECBus operates on 8 GPIOs at a time C0~C7.
//
// Direction 1 means output, 0 means input.
func (h *handle) MPSSECBus(mask, value byte) error {
b := [...]byte{gpioSetC, value, mask}
_, err := h.Write(b[:])
return err
}
// MPSSEDBus operates on 8 GPIOs at a time D0~D7.
//
// Direction 1 means output, 0 means input.
func (h *handle) MPSSEDBus(mask, value byte) error {
b := [...]byte{gpioSetD, value, mask}
_, err := h.Write(b[:])
return err
}
// MPSSECBusRead reads all the CBus pins C0~C7.
func (h *handle) MPSSECBusRead() (byte, error) {
b := [...]byte{gpioReadC, flush}
if _, err := h.Write(b[:]); err != nil {
return 0, err
}
ctx, cancel := context200ms()
defer cancel()
if _, err := h.ReadAll(ctx, b[:1]); err != nil {
return 0, err
}
return b[0], nil
}
// MPSSEDBusRead reads all the DBus pins D0~D7.
func (h *handle) MPSSEDBusRead() (byte, error) {
b := [...]byte{gpioReadD, flush}
if _, err := h.Write(b[:]); err != nil {
return 0, err
}
ctx, cancel := context200ms()
defer cancel()
if _, err := h.ReadAll(ctx, b[:1]); err != nil {
return 0, err
}
return b[0], nil
}
func context200ms() (context.Context, func()) {
return context.WithTimeout(context.Background(), 200*time.Millisecond)
}
| op |= dataInFall
}
| conditional_block |
mpsse.go | // Copyright 2017 The Periph Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
// MPSSE is Multi-Protocol Synchronous Serial Engine
//
// MPSSE basics:
// http://www.ftdichip.com/Support/Documents/AppNotes/AN_135_MPSSE_Basics.pdf
//
// MPSSE and MCU emulation modes:
// http://www.ftdichip.com/Support/Documents/AppNotes/AN_108_Command_Processor_for_MPSSE_and_MCU_Host_Bus_Emulation_Modes.pdf
package ftdi
import (
"context"
"errors"
"fmt"
"time"
"periph.io/x/conn/v3/gpio"
"periph.io/x/conn/v3/physic"
)
const (
// TDI/TDO serial operation synchronised on clock edges.
//
// Long streams (default):
// - [1, 65536] bytes (length is sent minus one, requires 8 bits multiple)
// <op>, <LengthLow-1>, <LengthHigh-1>, <byte0>, ..., <byteN>
//
// Short streams (dataBit is specified):
// - [1, 8] bits
// <op>, <Length-1>, <byte>
//
// When both dataOut and dataIn are specified, one of dataOutFall or
// dataInFall should be specified, at least for most sane protocols.
//
// Flags:
dataOut byte = 0x10 // Enable output, default on +VE (Rise)
dataIn byte = 0x20 // Enable input, default on +VE (Rise)
dataOutFall byte = 0x01 // instead of Rise
dataInFall byte = 0x04 // instead of Rise
dataLSBF byte = 0x08 // instead of MSBF
dataBit byte = 0x02 // instead of Byte
// Data line drives low when the data is 0 and tristates on data 1. This is
// used with I²C.
// <op>, <ADBus pins>, <ACBus pins>
dataTristate byte = 0x9E
// TSM operation (for JTAG).
//
// - Send bits 6 to 0 to the TMS pin using LSB or MSB.
// - Bit 7 is passed to TDI/DO before the first clock of TMS and is held
// static for the duration of TMS clocking.
//
// <op>, <Length>, <byte>
tmsOutLSBFRise byte = 0x4A
tmsOutLSBFFall byte = 0x4B
tmsIOLSBInRise byte = 0x6A
tmsIOLSBInFall byte = 0x6B
// Unclear: 0x6E and 0x6F
// GPIO operation.
//
// - Operates on 8 GPIOs at a time, e.g. C0~C7 or D0~D7.
// - Direction 1 means output, 0 means input.
//
// <op>, <value>, <direction>
gpioSetD byte = 0x80
gpioSetC byte = 0x82
// <op>, returns <value>
gpioReadD byte = 0x81
gpioReadC byte = 0x83
// Internal loopback.
//
// Connects TDI and TDO together.
internalLoopbackEnable byte = 0x84
internalLoopbackDisable byte = 0x85
// Clock.
//
// The TCK/SK has a 50% duty cycle.
//
// The inactive clock state can be set via the gpioSetD command and control
// bit 0.
//
// By default, the base clock is 6MHz via a 5x divisor. On
// FT232H/FT2232H/FT4232H, the 5x divisor can be disabled.
clock30MHz byte = 0x8A
clock6MHz byte = 0x8B
// Sets clock divisor.
//
// The effective value depends if clock30MHz was sent or not.
//
// - 0(1) 6MHz / 30MHz
// - 1(2) 3MHz / 15MHz
// - 2(3) 2MHz / 10MHz
// - 3(4) 1.5MHz / 7.5MHz
// - 4(5) 1.25MHz / 6MHz
// - ...
// - 0xFFFF(65536) 91.553Hz / 457.763Hz
//
// <op>, <valueL-1>, <valueH-1>
clockSetDivisor byte = 0x86
// Uses 3 phases data clocking: data is valid on both clock edges. Needed
// for I²C.
clock3Phase byte = 0x8C
// Uses normal 2 phases data clocking.
clock2Phase byte = 0x8D
// Enables clock even while not doing any operation. Used with JTAG.
// Enables the clock between [1, 8] pulses.
// <op>, <length-1>
clockOnShort byte = 0x8E
// Enables the clock between [8, 524288] pulses in 8 multiples.
// <op>, <lengthL-1>, <lengthH-1>
clockOnLong byte = 0x8F
// Enables clock until D5 is high or low. Used with JTAG.
clockUntilHigh byte = 0x94
clockUntilLow byte = 0x95
// <op>, <lengthL-1>, <lengthH-1> in 8 multiples.
clockUntilHighLong byte = 0x9C
clockUntilLowLong byte = 0x9D
// Enables adaptive clocking. Used with JTAG.
//
// This causes the controller to wait for D7 signal state as an ACK.
clockAdaptive byte = 0x96
// Disables adaptive clocking.
clockNormal byte = 0x97
// CPU mode.
//
// Access the device registers like a memory mapped device.
//
// <op>, <addrLow>
cpuReadShort byte = 0x90
// <op>, <addrHi>, <addrLow>
cpuReadFar byte = 0x91
// <op>, <addrLow>, <data>
cpuWriteShort byte = 0x92
// <op>, <addrHi>, <addrLow>, <data>
cpuWriteFar byte = 0x91
// Buffer operations.
//
// Flush the buffer back to the host.
flush byte = 0x87
// Wait until D5 (JTAG) or I/O1 (CPU) is high. Once it is detected as
// high, the MPSSE engine moves on to process the next instruction.
waitHigh byte = 0x88
waitLow byte = 0x89
)
// InitMPSSE sets the device into MPSSE mode.
//
// This requires a f232h, ft2232, ft2232h or a ft4232h.
//
// Use only one of Init or InitMPSSE.
func (h *handle) InitMPSSE() error {
// http://www.ftdichip.com/Support/Documents/AppNotes/AN_255_USB%20to%20I2C%20Example%20using%20the%20FT232H%20and%20FT201X%20devices.pdf
// Pre-state:
// - Write EEPROM i.IsFifo = true so the device DBus is started in tristate.
// Try to verify the MPSSE controller without initializing it first. This is
// the 'happy path', which enables reusing the device is its current state
// without affecting current GPIO state.
if h.mpsseVerify() != nil {
// Do a full reset. Just trying to set the MPSSE controller will
// likely not work. That's a layering violation (since the retry with reset
// is done in driver.go) but we've survived worse things...
//
// TODO(maruel): This is not helping in practice, this need to be fine
// tuned.
if err := h.Reset(); err != nil {
return err
}
if err := h.Init(); err != nil {
return err
}
// That does the magic thing.
if err := h.SetBitMode(0, bitModeMpsse); err != nil {
return err
}
if err := h.mpsseVerify(); err != nil {
return err
}
}
// Initialize MPSSE to a known state.
// Reset the clock since it is impossible to read back the current clock rate.
// Reset all the GPIOs are inputs since it is impossible to read back the
// state of each GPIO (if they are input or output).
cmd := []byte{
clock30MHz, clockNormal, clock2Phase, internalLoopbackDisable,
gpioSetC, 0x00, 0x00,
gpioSetD, 0x00, 0x00,
}
if _, err := h.Write(cmd); err != nil {
return err
}
// Success!!
return nil
}
// mpsseVerify sends an invalid MPSSE command and verifies the returned value
// is incorrect.
//
// In practice this takes around 2ms.
func (h *handle) mpsseVerify() error {
var b [2]byte
for _, v := range []byte{0xAA, 0xAB} {
// Write a bad command and ensure it returned correctly.
// Unlike what the application note proposes, include a flush op right
// after. Without the flush, the device will only flush after the delay
// specified to SetLatencyTimer. The flush removes this unneeded wait,
// which enables increasing the delay specified to SetLatencyTimer.
b[0] = v
b[1] = flush
if _, err := h.Write(b[:]); err != nil {
return fmt.Errorf("ftdi: MPSSE verification failed: %w", err)
}
p, e := h.h.GetQueueStatus()
if e != 0 {
return toErr("Read/GetQueueStatus", e)
}
if p != 2 {
return fmt.Errorf("ftdi: MPSSE verification failed: expected 2 bytes reply, got %d bytes", p)
}
ctx, cancel := context200ms()
defer cancel()
if _, err := h.ReadAll(ctx, b[:]); err != nil {
return fmt.Errorf("ftdi: MPSSE verification failed: %w", err)
}
// 0xFA means invalid command, 0xAA is the command echoed back.
if b[0] != 0xFA || b[1] != v {
return fmt.Errorf("ftdi: MPSSE verification failed test for byte %#x: %#x", v, b)
}
}
return nil
}
//
// MPSSERegRead reads the memory mapped registers from the device.
func (h *handle) MPSSERegRead(addr uint16) (byte, error) {
// Unlike most other operations, the uint16 byte order is <hi>, <lo>.
b := [...]byte{cpuReadFar, byte(addr >> 8), byte(addr), flush}
if _, err := h.Write(b[:]); err != nil {
return 0, err
}
ctx, cancel := context200ms()
defer cancel()
_, err := h.ReadAll(ctx, b[:1])
return b[0], err
}
// MPSSEClock sets the clock at the closest value and returns it.
func (h *handle) MPSSEClock(f physic.Frequency) (physic.Frequency, error) {
// TODO(maruel): Memory clock and skip if the same value.
clk := clock30MHz
base := 30 * physic.MegaHertz
div := base / f
if div >= 65536 {
clk = clock6MHz
base /= 5
div = base / f
if div >= 65536 {
return 0, errors.New("ftdi: clock frequency is too low")
}
}
b := [...]byte{clk, clockSetDivisor, byte(div - 1), byte((div - 1) >> 8)}
_, err := h.Write(b[:])
return base / div, err
}
// mpsseTxOp returns the right MPSSE command byte for the stream.
func mpsseTxOp(w, r bool, ew, er gpio.Edge, lsbf bool) byte {
op := byte(0)
if lsbf {
op |= dataLSBF
}
if w {
op |= dataOut
if ew == gpio.FallingEdge {
op |= dataOutFall
}
}
if r {
op |= dataIn
if er == gpio.FallingEdge {
op |= dataInFall
}
}
return op
}
// MPSSETx runs a transaction on the clock on pins D0, D1 and D2.
//
// It can only do it on a multiple of 8 bits.
func (h *handle) MPSSETx(w, r []byte, ew, er gpio.Edge, lsbf bool) error {
l := len(w)
if len(w) != 0 {
// TODO(maruel): This is easy to fix by daisy chaining operations.
if len(w) > 65536 {
return errors.New("ftdi: write buffer too long; max 65536")
}
}
if len(r) != 0 {
if len(r) > 65536 {
return errors.New("ftdi: read buffer too long; max 65536")
}
if l != 0 && len(r) != l {
return errors.New("ftdi: mismatched buffer lengths")
}
l = len(r)
}
// The FT232H has 1Kb Tx and Rx buffers. So partial writes should be done.
// TODO(maruel): Test.
// Flush can be useful if rbits != 0.
op := mpsseTxOp(len(w) != 0, len(r) != 0, ew, er, lsbf)
cmd := []byte{op, byte(l - 1), byte((l - 1) >> 8)}
cmd = append(cmd, w...)
cmd = append(cmd, flush)
if _, err := h.Write(cmd); err != nil {
return err
}
if len(r) != 0 {
ctx, cancel := context200ms()
defer cancel()
_, err := h.ReadAll(ctx, r)
return err
}
return nil
}
// MPSSETxShort runs a transaction on the clock pins D0, D1 and D2 for a byte
// or less: between 1 and 8 bits.
func (h *handle) MPSSETxShort(w byte, wbits, rbits int, ew, er gpio.Edge, lsbf bool) (byte, error) {
op := byte(dataBit)
if lsbf {
op |= dataLSBF
}
l := wbits
if wbits != 0 {
if wbits > 8 {
return 0, errors.New("ftdi: write buffer too long; max 8")
}
op |= dataOut
if ew == gpio.FallingEdge {
op |= dataOutFall
}
} | return 0, errors.New("ftdi: read buffer too long; max 8")
}
op |= dataIn
if er == gpio.FallingEdge {
op |= dataInFall
}
if l != 0 && rbits != l {
return 0, errors.New("ftdi: mismatched buffer lengths")
}
l = rbits
}
b := [3]byte{op, byte(l - 1)}
cmd := b[:2]
if wbits != 0 {
cmd = append(cmd, w)
}
if rbits != 0 {
cmd = append(cmd, flush)
}
if _, err := h.Write(cmd); err != nil {
return 0, err
}
if rbits != 0 {
ctx, cancel := context200ms()
defer cancel()
_, err := h.ReadAll(ctx, b[:1])
return b[0], err
}
return 0, nil
}
// MPSSECBus operates on 8 GPIOs at a time C0~C7.
//
// Direction 1 means output, 0 means input.
func (h *handle) MPSSECBus(mask, value byte) error {
b := [...]byte{gpioSetC, value, mask}
_, err := h.Write(b[:])
return err
}
// MPSSEDBus operates on 8 GPIOs at a time D0~D7.
//
// Direction 1 means output, 0 means input.
func (h *handle) MPSSEDBus(mask, value byte) error {
b := [...]byte{gpioSetD, value, mask}
_, err := h.Write(b[:])
return err
}
// MPSSECBusRead reads all the CBus pins C0~C7.
func (h *handle) MPSSECBusRead() (byte, error) {
b := [...]byte{gpioReadC, flush}
if _, err := h.Write(b[:]); err != nil {
return 0, err
}
ctx, cancel := context200ms()
defer cancel()
if _, err := h.ReadAll(ctx, b[:1]); err != nil {
return 0, err
}
return b[0], nil
}
// MPSSEDBusRead reads all the DBus pins D0~D7.
func (h *handle) MPSSEDBusRead() (byte, error) {
b := [...]byte{gpioReadD, flush}
if _, err := h.Write(b[:]); err != nil {
return 0, err
}
ctx, cancel := context200ms()
defer cancel()
if _, err := h.ReadAll(ctx, b[:1]); err != nil {
return 0, err
}
return b[0], nil
}
func context200ms() (context.Context, func()) {
return context.WithTimeout(context.Background(), 200*time.Millisecond)
} | if rbits != 0 {
if rbits > 8 { | random_line_split |
subscription_group.go | /*
http://www.apache.org/licenses/LICENSE-2.0.txt
Copyright 2015 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package control
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"sync"
"github.com/intelsdi-x/snap/control/plugin"
"github.com/intelsdi-x/snap/core"
"github.com/intelsdi-x/snap/core/cdata"
"github.com/intelsdi-x/snap/core/control_event"
"github.com/intelsdi-x/snap/core/serror"
log "github.com/sirupsen/logrus"
)
var (
// ErrSubscriptionGroupAlreadyExists - error message when the subscription
// group already exists
ErrSubscriptionGroupAlreadyExists = core.ErrSubscriptionGroupAlreadyExists
// ErrSubscriptionGroupDoesNotExist - error message when the subscription
// group does not exist
ErrSubscriptionGroupDoesNotExist = core.ErrSubscriptionGroupDoesNotExist
ErrConfigRequiredForMetric = errors.New("config required")
)
// ManagesSubscriptionGroups is the interface implemented by an object that can
// manage subscription groups.
type ManagesSubscriptionGroups interface {
Process() (errs []serror.SnapError)
Add(id string, requested []core.RequestedMetric,
configTree *cdata.ConfigDataTree,
plugins []core.SubscribedPlugin) []serror.SnapError
Get(id string) (map[string]metricTypes, []serror.SnapError, error)
Remove(id string) []serror.SnapError
ValidateDeps(requested []core.RequestedMetric,
plugins []core.SubscribedPlugin,
configTree *cdata.ConfigDataTree, asserts ...core.SubscribedPluginAssert) (serrs []serror.SnapError)
validateMetric(metric core.Metric) (serrs []serror.SnapError)
validatePluginUnloading(*loadedPlugin) (errs []serror.SnapError)
}
type subscriptionGroup struct {
*pluginControl
// requested metrics - never updated
requestedMetrics []core.RequestedMetric
// requested plugins - contains only processors and publishers;
// never updated
requestedPlugins []core.SubscribedPlugin
// config from request - never updated
configTree *cdata.ConfigDataTree
// resulting metrics - updated after plugin load/unload events; they are grouped by plugin
metrics map[string]metricTypes
// resulting plugins - updated after plugin load/unload events
plugins []core.SubscribedPlugin
// errors generated the last time the subscription was processed
// subscription groups are processed when the subscription group is added
// and when plugins are loaded/unloaded
errors []serror.SnapError
}
type subscriptionMap map[string]*subscriptionGroup
type subscriptionGroups struct {
subscriptionMap
*sync.Mutex
*pluginControl
}
func newSubscriptionGroups(control *pluginControl) *subscriptionGroups {
return &subscriptionGroups{
make(map[string]*subscriptionGroup),
&sync.Mutex{},
control,
}
}
// Add adds a subscription group provided a subscription group id, requested
// metrics, config tree and plugins. The requested metrics are mapped to
// collector plugins which are then combined with the provided (processor and
// publisher) plugins. The provided config map is used to construct the
// []core.Metric which will be used during collect calls made against the
// subscription group.
// Returns an array of errors ([]serror.SnapError).
// `ErrSubscriptionGroupAlreadyExists` is returned if the subscription already
// exists. Also, if there are errors mapping the requested metrics to plugins
// those are returned.
func (s subscriptionGroups) Add(id string, requested []core.RequestedMetric,
configTree *cdata.ConfigDataTree,
plugins []core.SubscribedPlugin) []serror.SnapError {
s.Lock()
defer s.Unlock()
errs := s.add(id, requested, configTree, plugins)
return errs
}
func (s subscriptionGroups) add(id string, requested []core.RequestedMetric,
configTree *cdata.ConfigDataTree,
plugins []core.SubscribedPlugin) []serror.SnapError {
if _, ok := s.subscriptionMap[id]; ok {
return []serror.SnapError{serror.New(ErrSubscriptionGroupAlreadyExists)}
}
subscriptionGroup := &subscriptionGroup{
requestedMetrics: requested,
requestedPlugins: plugins,
configTree: configTree,
pluginControl: s.pluginControl,
}
errs := subscriptionGroup.process(id)
if errs != nil {
return errs
}
s.subscriptionMap[id] = subscriptionGroup
return nil
}
// Remove removes a subscription group given a subscription group ID.
func (s subscriptionGroups) Remove(id string) []serror.SnapError {
s.Lock()
defer s.Unlock()
return s.remove(id)
}
func (s subscriptionGroups) remove(id string) []serror.SnapError {
subscriptionGroup, ok := s.subscriptionMap[id]
if !ok {
return []serror.SnapError{serror.New(ErrSubscriptionGroupDoesNotExist)}
}
serrs := subscriptionGroup.unsubscribePlugins(id, s.subscriptionMap[id].plugins)
delete(s.subscriptionMap, id)
return serrs
}
// Get returns the metrics (core.Metric) and an array of serror.SnapError when
// provided a subscription ID. The array of serror.SnapError returned was
// produced the last time `process` was run which is important since
// unloading/loading a plugin may produce errors when the requested metrics
// are looked up in the metric catalog. Those errors will be provided back to
// the caller of the subscription group on the next `CollectMetrics`.
// Returns `ErrSubscriptionGroupDoesNotExist` when the subscription group
// does not exist.
func (s subscriptionGroups) Get(id string) (map[string]metricTypes, []serror.SnapError, error) {
s.Lock()
defer s.Unlock()
return s.get(id)
}
func (s subscriptionGroups) get(id string) (map[string]metricTypes, []serror.SnapError, error) {
if _, ok := s.subscriptionMap[id]; !ok {
return nil, nil, ErrSubscriptionGroupDoesNotExist
}
sg := s.subscriptionMap[id]
return sg.metrics, sg.errors, nil
}
// Process compares the new set of plugins with the previous set of plugins
// for the given subscription group subscribing to plugins that were added
// and unsubscribing to those that were removed since the last time the
// subscription group was processed.
// Returns an array of errors ([]serror.SnapError) which can occur when
// mapping requested metrics to collector plugins and getting a core.Plugin
// from a core.Requested.Plugin.
// When processing a subscription group the resulting metrics grouped by plugin
// (subscriptionGroup.metrics) for all subscription groups are updated based
// on the requested metrics (subscriptionGroup.requestedMetrics). Similarly
// the required plugins (subscriptionGroup.plugins) are also updated.
func (s *subscriptionGroups) Process() (errs []serror.SnapError) {
s.Lock()
defer s.Unlock()
for id, group := range s.subscriptionMap {
if serrs := group.process(id); serrs != nil {
errs = append(errs, serrs...)
}
}
return errs
}
func (s *subscriptionGroups) ValidateDeps(requested []core.RequestedMetric,
plugins []core.SubscribedPlugin,
configTree *cdata.ConfigDataTree, asserts ...core.SubscribedPluginAssert) (serrs []serror.SnapError) {
// resolve requested metrics and map to collectors
pluginToMetricMap, collectors, errs := s.getMetricsAndCollectors(requested, configTree)
if errs != nil {
serrs = append(serrs, errs...)
}
// Validate if schedule type is streaming and we have a non-streaming plugin or vice versa
for _, assert := range asserts {
if serr := assert(collectors); serr != nil {
serrs = append(serrs, serr)
}
}
if len(serrs) > 0 {
return serrs
}
// validateMetricsTypes
for _, pmt := range pluginToMetricMap {
for _, mt := range pmt.Metrics() {
errs := s.validateMetric(mt)
if len(errs) > 0 {
serrs = append(serrs, errs...)
}
}
}
// add collectors to plugins (processors and publishers)
for _, collector := range collectors {
plugins = append(plugins, collector)
}
// validate plugins
for _, plg := range plugins {
typ, err := core.ToPluginType(plg.TypeName())
if err != nil {
return []serror.SnapError{serror.New(err)}
}
mergedConfig := plg.Config().ReverseMerge(
s.Config.Plugins.getPluginConfigDataNode(
typ, plg.Name(), plg.Version()))
errs := s.validatePluginSubscription(plg, mergedConfig)
if len(errs) > 0 {
serrs = append(serrs, errs...)
return serrs
}
}
return
}
// validatePluginUnloading checks if process of unloading the plugin is safe for existing running tasks.
// If the plugin is used by running task and there is no replacements, return an error with appropriate message
// containing ids of tasks which use the plugin, what blocks unloading process until they are stopped
func (s *subscriptionGroups) validatePluginUnloading(pluginToUnload *loadedPlugin) (errs []serror.SnapError) {
s.Lock()
defer s.Unlock()
for id, group := range s.subscriptionMap {
if err := group.validatePluginUnloading(id, pluginToUnload); err != nil {
errs = append(errs, err)
}
}
return errs
}
func (p *subscriptionGroups) validatePluginSubscription(pl core.SubscribedPlugin, mergedConfig *cdata.ConfigDataNode) []serror.SnapError {
var serrs = []serror.SnapError{}
controlLogger.WithFields(log.Fields{
"_block": "validate-plugin-subscription",
"plugin": fmt.Sprintf("%s:%d", pl.Name(), pl.Version()),
}).Info(fmt.Sprintf("validating dependencies for plugin %s:%d", pl.Name(), pl.Version()))
lp, err := p.pluginManager.get(key(pl))
if err != nil {
serrs = append(serrs, pluginNotFoundError(pl))
return serrs
}
if lp.ConfigPolicy != nil {
ncd := lp.ConfigPolicy.Get([]string{""})
_, errs := ncd.Process(mergedConfig.Table())
if errs != nil && errs.HasErrors() {
for _, e := range errs.Errors() {
se := serror.New(e)
se.SetFields(map[string]interface{}{"name": pl.Name(), "version": pl.Version()})
serrs = append(serrs, se)
}
}
}
return serrs
}
func (s *subscriptionGroups) validateMetric(
metric core.Metric) (serrs []serror.SnapError) {
mts, err := s.metricCatalog.GetMetrics(metric.Namespace(), metric.Version())
if err != nil {
serrs = append(serrs, serror.New(err, map[string]interface{}{
"name": metric.Namespace().String(),
"version": metric.Version(),
}))
return serrs
}
for _, m := range mts {
// No metric found return error.
if m == nil {
serrs = append(
serrs, serror.New(
fmt.Errorf("no metric found cannot subscribe: (%s) version(%d)",
metric.Namespace(), metric.Version())))
continue
}
m.config = metric.Config()
typ, serr := core.ToPluginType(m.Plugin.TypeName())
if serr != nil {
serrs = append(serrs, serror.New(err))
continue
}
// merge global plugin config
if m.config != nil {
m.config.ReverseMergeInPlace(
s.Config.Plugins.getPluginConfigDataNode(typ,
m.Plugin.Name(), m.Plugin.Version()))
} else {
m.config = s.Config.Plugins.getPluginConfigDataNode(typ,
m.Plugin.Name(), m.Plugin.Version())
}
// When a metric is added to the MetricCatalog, the policy of rules defined by the plugin is added to the metric's policy.
// If no rules are defined for a metric, we set the metric's policy to an empty ConfigPolicyNode.
// Checking m.policy for nil will not work, we need to check if rules are nil.
if m.policy.HasRules() {
if m.Config() == nil {
fields := log.Fields{
"metric": m.Namespace(),
"version": m.Version(),
"plugin": m.Plugin.Name(),
}
serrs = append(serrs, serror.New(ErrConfigRequiredForMetric, fields))
continue
}
ncdTable, errs := m.policy.Process(m.Config().Table())
if errs != nil && errs.HasErrors() {
for _, e := range errs.Errors() {
serrs = append(serrs, serror.New(e))
}
continue
}
m.config = cdata.FromTable(*ncdTable)
}
}
return serrs
}
// pluginIsSubscribed returns true if a provided plugin has been found among subscribed plugins
// in the following subscription group
func (s *subscriptionGroup) pluginIsSubscribed(plugin *loadedPlugin) bool {
// range over subscribed plugins to find if the plugin is there
for _, sp := range s.plugins {
if sp.TypeName() == plugin.TypeName() && sp.Name() == plugin.Name() && sp.Version() == plugin.Version() {
return true
}
}
return false
}
// validatePluginUnloading verifies if a given plugin might be unloaded without causing running task failures
func (s *subscriptionGroup) validatePluginUnloading(id string, plgToUnload *loadedPlugin) (serr serror.SnapError) {
impacted := false
if !s.pluginIsSubscribed(plgToUnload) {
// the plugin is not subscribed, so the task is not impacted by its unloading
return nil
}
controlLogger.WithFields(log.Fields{
"_block": "subscriptionGroup.validatePluginUnloading",
"task-id": id,
"plugin-to-unload": plgToUnload.Key(),
}).Debug("validating impact of unloading the plugin")
for _, requestedMetric := range s.requestedMetrics {
// get all plugins exposing the requested metric
plgs, _ := s.GetPlugins(requestedMetric.Namespace())
// when requested version is fixed (greater than 0), take into account only plugins in the requested version
if requestedMetric.Version() > 0 {
// skip those which are not impacted by unloading (version different than plgToUnload.Version())
if requestedMetric.Version() == plgToUnload.Version() {
plgsInVer := []core.CatalogedPlugin{}
for _, plg := range plgs {
if plg.Version() == requestedMetric.Version() {
plgsInVer = append(plgsInVer, plg)
}
}
// set plugins only in the requested version
plgs = plgsInVer
}
}
if len(plgs) == 1 && plgs[0].Key() == plgToUnload.Key() {
// the requested metric is exposed only by the single plugin and there is no replacement
impacted = true
controlLogger.WithFields(log.Fields{
"_block": "subscriptionGroup.validatePluginUnloading",
"task-id": id,
"plugin-to-unload": plgToUnload.Key(),
"requested-metric": fmt.Sprintf("%s:%d", requestedMetric.Namespace(), requestedMetric.Version()),
}).Errorf("unloading the plugin would cause missing in collection the requested metric")
}
}
if impacted {
serr = serror.New(ErrPluginCannotBeUnloaded, map[string]interface{}{
"task-id": id,
"plugin-to-unload": plgToUnload.Key(),
})
}
return serr
}
func (s *subscriptionGroup) process(id string) (serrs []serror.SnapError) {
// gathers collectors based on requested metrics
pluginToMetricMap, plugins, serrs := s.getMetricsAndCollectors(s.requestedMetrics, s.configTree)
controlLogger.WithFields(log.Fields{
"collectors": fmt.Sprintf("%+v", plugins),
"metrics": fmt.Sprintf("%+v", s.requestedMetrics),
}).Debug("gathered collectors")
// notice that requested plugins contains only processors and publishers
for _, plugin := range s.requestedPlugins {
// add defaults to plugins (exposed in a plugins ConfigPolicy)
if lp, err := s.pluginManager.get(
fmt.Sprintf("%s"+core.Separator+"%s"+core.Separator+"%d",
plugin.TypeName(),
plugin.Name(),
plugin.Version())); err == nil && lp.ConfigPolicy != nil {
if policy := lp.ConfigPolicy.Get([]string{""}); policy != nil && len(policy.Defaults()) > 0 {
// set defaults to plugin config
plugin.Config().ApplyDefaults(policy.Defaults())
}
// update version info for subscribed processor or publisher
version := plugin.Version()
if version < 1 {
version = lp.Version()
}
s := subscribedPlugin{
name: plugin.Name(),
typeName: plugin.TypeName(),
version: version,
config: plugin.Config(),
}
// add processors and publishers to collectors just gathered
plugins = append(plugins, s)
}
}
// calculates those plugins that need to be subscribed and unsubscribed to
subs, unsubs := comparePlugins(plugins, s.plugins)
controlLogger.WithFields(log.Fields{
"subs": fmt.Sprintf("%+v", subs),
"unsubs": fmt.Sprintf("%+v", unsubs),
}).Debug("subscriptions")
if len(subs) > 0 {
if errs := s.subscribePlugins(id, subs); errs != nil {
serrs = append(serrs, errs...)
}
}
if len(unsubs) > 0 {
if errs := s.unsubscribePlugins(id, unsubs); errs != nil {
serrs = append(serrs, errs...)
}
}
// updating view
// metrics are grouped by plugin
s.metrics = pluginToMetricMap
s.plugins = plugins
s.errors = serrs
return serrs
}
func (s *subscriptionGroup) subscribePlugins(id string,
plugins []core.SubscribedPlugin) (serrs []serror.SnapError) {
plgs := make([]*loadedPlugin, len(plugins))
// First range through plugins to verify if all required plugins
// are available
for i, sub := range plugins {
plg, err := s.pluginManager.get(key(sub))
if err != nil {
serrs = append(serrs, pluginNotFoundError(sub))
return serrs
}
plgs[i] = plg
}
// If all plugins are available, subscribe to pools and start
// plugins as needed
for _, plg := range plgs {
controlLogger.WithFields(log.Fields{
"name": plg.Name(),
"type": plg.TypeName(),
"version": plg.Version(),
"_block": "subscriptionGroup.subscribePlugins",
}).Debug("plugin subscription")
if plg.Details.Uri != nil {
// this is a remote plugin
pool, err := s.pluginRunner.AvailablePlugins().getOrCreatePool(plg.Key())
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
if pool.Count() < 1 {
var resp plugin.Response
res, err := http.Get(plg.Details.Uri.String())
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
err = json.Unmarshal(body, &resp)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
ap, err := newAvailablePlugin(resp, s.eventManager, nil, s.grpcSecurity)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
ap.SetIsRemote(true)
err = pool.Insert(ap)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
}
} else {
pool, err := s.pluginRunner.AvailablePlugins().getOrCreatePool(plg.Key())
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
pool.Subscribe(id)
if pool.Eligible() {
err = s.verifyPlugin(plg)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
err = s.pluginRunner.runPlugin(plg.Name(), plg.Details)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
}
}
serr := s.sendPluginSubscriptionEvent(id, plg)
if serr != nil {
serrs = append(serrs, serr)
return serrs
}
}
return serrs
}
func (p *subscriptionGroup) unsubscribePlugins(id string,
plugins []core.SubscribedPlugin) (serrs []serror.SnapError) {
for _, plugin := range plugins {
controlLogger.WithFields(log.Fields{
"name": plugin.Name(),
"type": plugin.TypeName(),
"version": plugin.Version(),
"_block": "subscriptionGroup.unsubscribePlugins",
}).Debug("plugin unsubscription")
pool, err := p.pluginRunner.AvailablePlugins().getPool(key(plugin))
if err != nil {
serrs = append(serrs, err)
return serrs
}
if pool != nil {
pool.Unsubscribe(id)
}
serr := p.sendPluginUnsubscriptionEvent(id, plugin)
if serr != nil {
serrs = append(serrs, serr)
}
}
return
}
func (p *subscriptionGroup) sendPluginSubscriptionEvent(taskID string,
pl core.Plugin) serror.SnapError {
pt, err := core.ToPluginType(pl.TypeName())
if err != nil {
return serror.New(err)
}
e := &control_event.PluginSubscriptionEvent{
TaskId: taskID,
PluginType: int(pt),
PluginName: pl.Name(),
PluginVersion: pl.Version(),
}
if _, err := p.eventManager.Emit(e); err != nil {
return serror.New(err)
}
return nil
}
func (p *subscriptionGroup) sendPluginUnsubscriptionEvent(taskID string,
pl core.Plugin) serror.SnapError {
pt, err := core.ToPluginType(pl.TypeName())
if err != nil {
return serror.New(err)
}
e := &control_event.PluginUnsubscriptionEvent{
TaskId: taskID,
PluginType: int(pt),
PluginName: pl.Name(),
PluginVersion: pl.Version(),
}
if _, err := p.eventManager.Emit(e); err != nil {
return serror.New(err)
}
return nil
}
// comparePlugins compares the new state of plugins with the previous state.
// It returns an array of plugins that need to be subscribed and an array of
// plugins that need to be unsubscribed.
func comparePlugins(newPlugins,
oldPlugins []core.SubscribedPlugin) (adds,
removes []core.SubscribedPlugin) {
newMap := make(map[string]int)
oldMap := make(map[string]int)
for _, n := range newPlugins {
newMap[key(n)]++
}
for _, o := range oldPlugins {
oldMap[key(o)]++
}
for _, n := range newPlugins {
if oldMap[key(n)] > 0 {
oldMap[key(n)]--
continue
}
adds = append(adds, n)
}
for _, o := range oldPlugins {
if newMap[key(o)] > 0 {
newMap[key(o)]--
continue
}
removes = append(removes, o)
}
return
}
func pluginNotFoundError(pl core.SubscribedPlugin) serror.SnapError {
se := serror.New(fmt.Errorf("Plugin not found: type(%s) name(%s) version(%d)", pl.TypeName(), pl.Name(), pl.Version()))
se.SetFields(map[string]interface{}{
"name": pl.Name(),
"version": pl.Version(),
"type": pl.TypeName(),
})
return se
}
func key(p core.SubscribedPlugin) string | {
return fmt.Sprintf("%v"+core.Separator+"%v"+core.Separator+"%v", p.TypeName(), p.Name(), p.Version())
} | identifier_body |
|
subscription_group.go | /*
http://www.apache.org/licenses/LICENSE-2.0.txt
Copyright 2015 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package control
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"sync"
"github.com/intelsdi-x/snap/control/plugin"
"github.com/intelsdi-x/snap/core"
"github.com/intelsdi-x/snap/core/cdata"
"github.com/intelsdi-x/snap/core/control_event"
"github.com/intelsdi-x/snap/core/serror"
log "github.com/sirupsen/logrus"
)
var (
// ErrSubscriptionGroupAlreadyExists - error message when the subscription
// group already exists
ErrSubscriptionGroupAlreadyExists = core.ErrSubscriptionGroupAlreadyExists
// ErrSubscriptionGroupDoesNotExist - error message when the subscription
// group does not exist
ErrSubscriptionGroupDoesNotExist = core.ErrSubscriptionGroupDoesNotExist
ErrConfigRequiredForMetric = errors.New("config required")
)
// ManagesSubscriptionGroups is the interface implemented by an object that can
// manage subscription groups.
type ManagesSubscriptionGroups interface {
Process() (errs []serror.SnapError)
Add(id string, requested []core.RequestedMetric,
configTree *cdata.ConfigDataTree,
plugins []core.SubscribedPlugin) []serror.SnapError
Get(id string) (map[string]metricTypes, []serror.SnapError, error)
Remove(id string) []serror.SnapError
ValidateDeps(requested []core.RequestedMetric,
plugins []core.SubscribedPlugin,
configTree *cdata.ConfigDataTree, asserts ...core.SubscribedPluginAssert) (serrs []serror.SnapError)
validateMetric(metric core.Metric) (serrs []serror.SnapError)
validatePluginUnloading(*loadedPlugin) (errs []serror.SnapError)
}
type subscriptionGroup struct {
*pluginControl
// requested metrics - never updated
requestedMetrics []core.RequestedMetric
// requested plugins - contains only processors and publishers;
// never updated
requestedPlugins []core.SubscribedPlugin
// config from request - never updated
configTree *cdata.ConfigDataTree
// resulting metrics - updated after plugin load/unload events; they are grouped by plugin
metrics map[string]metricTypes
// resulting plugins - updated after plugin load/unload events
plugins []core.SubscribedPlugin
// errors generated the last time the subscription was processed
// subscription groups are processed when the subscription group is added
// and when plugins are loaded/unloaded
errors []serror.SnapError
}
type subscriptionMap map[string]*subscriptionGroup
type subscriptionGroups struct {
subscriptionMap
*sync.Mutex
*pluginControl
}
func newSubscriptionGroups(control *pluginControl) *subscriptionGroups {
return &subscriptionGroups{
make(map[string]*subscriptionGroup),
&sync.Mutex{},
control,
}
}
// Add adds a subscription group provided a subscription group id, requested
// metrics, config tree and plugins. The requested metrics are mapped to
// collector plugins which are then combined with the provided (processor and
// publisher) plugins. The provided config map is used to construct the
// []core.Metric which will be used during collect calls made against the
// subscription group.
// Returns an array of errors ([]serror.SnapError).
// `ErrSubscriptionGroupAlreadyExists` is returned if the subscription already
// exists. Also, if there are errors mapping the requested metrics to plugins
// those are returned.
func (s subscriptionGroups) Add(id string, requested []core.RequestedMetric,
configTree *cdata.ConfigDataTree,
plugins []core.SubscribedPlugin) []serror.SnapError {
s.Lock()
defer s.Unlock()
errs := s.add(id, requested, configTree, plugins)
return errs
}
func (s subscriptionGroups) add(id string, requested []core.RequestedMetric,
configTree *cdata.ConfigDataTree,
plugins []core.SubscribedPlugin) []serror.SnapError {
if _, ok := s.subscriptionMap[id]; ok {
return []serror.SnapError{serror.New(ErrSubscriptionGroupAlreadyExists)}
}
subscriptionGroup := &subscriptionGroup{
requestedMetrics: requested,
requestedPlugins: plugins,
configTree: configTree,
pluginControl: s.pluginControl,
}
errs := subscriptionGroup.process(id)
if errs != nil {
return errs
}
s.subscriptionMap[id] = subscriptionGroup
return nil
}
// Remove removes a subscription group given a subscription group ID.
func (s subscriptionGroups) Remove(id string) []serror.SnapError {
s.Lock()
defer s.Unlock()
return s.remove(id)
}
func (s subscriptionGroups) remove(id string) []serror.SnapError {
subscriptionGroup, ok := s.subscriptionMap[id]
if !ok {
return []serror.SnapError{serror.New(ErrSubscriptionGroupDoesNotExist)}
}
serrs := subscriptionGroup.unsubscribePlugins(id, s.subscriptionMap[id].plugins)
delete(s.subscriptionMap, id)
return serrs
}
// Get returns the metrics (core.Metric) and an array of serror.SnapError when
// provided a subscription ID. The array of serror.SnapError returned was
// produced the last time `process` was run which is important since
// unloading/loading a plugin may produce errors when the requested metrics
// are looked up in the metric catalog. Those errors will be provided back to
// the caller of the subscription group on the next `CollectMetrics`.
// Returns `ErrSubscriptionGroupDoesNotExist` when the subscription group
// does not exist.
func (s subscriptionGroups) Get(id string) (map[string]metricTypes, []serror.SnapError, error) {
s.Lock()
defer s.Unlock()
return s.get(id)
}
func (s subscriptionGroups) get(id string) (map[string]metricTypes, []serror.SnapError, error) {
if _, ok := s.subscriptionMap[id]; !ok {
return nil, nil, ErrSubscriptionGroupDoesNotExist
}
sg := s.subscriptionMap[id]
return sg.metrics, sg.errors, nil
}
// Process compares the new set of plugins with the previous set of plugins
// for the given subscription group subscribing to plugins that were added
// and unsubscribing to those that were removed since the last time the
// subscription group was processed.
// Returns an array of errors ([]serror.SnapError) which can occur when
// mapping requested metrics to collector plugins and getting a core.Plugin
// from a core.Requested.Plugin.
// When processing a subscription group the resulting metrics grouped by plugin
// (subscriptionGroup.metrics) for all subscription groups are updated based
// on the requested metrics (subscriptionGroup.requestedMetrics). Similarly
// the required plugins (subscriptionGroup.plugins) are also updated.
func (s *subscriptionGroups) Process() (errs []serror.SnapError) {
s.Lock()
defer s.Unlock()
for id, group := range s.subscriptionMap {
if serrs := group.process(id); serrs != nil {
errs = append(errs, serrs...)
}
}
return errs
}
func (s *subscriptionGroups) ValidateDeps(requested []core.RequestedMetric,
plugins []core.SubscribedPlugin,
configTree *cdata.ConfigDataTree, asserts ...core.SubscribedPluginAssert) (serrs []serror.SnapError) {
// resolve requested metrics and map to collectors
pluginToMetricMap, collectors, errs := s.getMetricsAndCollectors(requested, configTree)
if errs != nil {
serrs = append(serrs, errs...)
}
// Validate if schedule type is streaming and we have a non-streaming plugin or vice versa
for _, assert := range asserts {
if serr := assert(collectors); serr != nil {
serrs = append(serrs, serr)
}
}
if len(serrs) > 0 {
return serrs
}
// validateMetricsTypes
for _, pmt := range pluginToMetricMap {
for _, mt := range pmt.Metrics() {
errs := s.validateMetric(mt)
if len(errs) > 0 {
serrs = append(serrs, errs...)
}
}
}
// add collectors to plugins (processors and publishers)
for _, collector := range collectors {
plugins = append(plugins, collector)
}
// validate plugins
for _, plg := range plugins {
typ, err := core.ToPluginType(plg.TypeName())
if err != nil {
return []serror.SnapError{serror.New(err)}
}
mergedConfig := plg.Config().ReverseMerge(
s.Config.Plugins.getPluginConfigDataNode(
typ, plg.Name(), plg.Version()))
errs := s.validatePluginSubscription(plg, mergedConfig)
if len(errs) > 0 {
serrs = append(serrs, errs...)
return serrs
}
}
return
}
// validatePluginUnloading checks if process of unloading the plugin is safe for existing running tasks.
// If the plugin is used by running task and there is no replacements, return an error with appropriate message
// containing ids of tasks which use the plugin, what blocks unloading process until they are stopped
func (s *subscriptionGroups) validatePluginUnloading(pluginToUnload *loadedPlugin) (errs []serror.SnapError) {
s.Lock()
defer s.Unlock()
for id, group := range s.subscriptionMap {
if err := group.validatePluginUnloading(id, pluginToUnload); err != nil {
errs = append(errs, err)
}
}
return errs
}
func (p *subscriptionGroups) validatePluginSubscription(pl core.SubscribedPlugin, mergedConfig *cdata.ConfigDataNode) []serror.SnapError {
var serrs = []serror.SnapError{}
controlLogger.WithFields(log.Fields{
"_block": "validate-plugin-subscription",
"plugin": fmt.Sprintf("%s:%d", pl.Name(), pl.Version()),
}).Info(fmt.Sprintf("validating dependencies for plugin %s:%d", pl.Name(), pl.Version()))
lp, err := p.pluginManager.get(key(pl))
if err != nil {
serrs = append(serrs, pluginNotFoundError(pl))
return serrs
}
if lp.ConfigPolicy != nil {
ncd := lp.ConfigPolicy.Get([]string{""})
_, errs := ncd.Process(mergedConfig.Table())
if errs != nil && errs.HasErrors() {
for _, e := range errs.Errors() {
se := serror.New(e)
se.SetFields(map[string]interface{}{"name": pl.Name(), "version": pl.Version()})
serrs = append(serrs, se)
}
}
}
return serrs
}
func (s *subscriptionGroups) validateMetric(
metric core.Metric) (serrs []serror.SnapError) {
mts, err := s.metricCatalog.GetMetrics(metric.Namespace(), metric.Version())
if err != nil {
serrs = append(serrs, serror.New(err, map[string]interface{}{
"name": metric.Namespace().String(),
"version": metric.Version(),
}))
return serrs
}
for _, m := range mts {
// No metric found return error.
if m == nil {
serrs = append(
serrs, serror.New(
fmt.Errorf("no metric found cannot subscribe: (%s) version(%d)",
metric.Namespace(), metric.Version())))
continue
}
m.config = metric.Config()
typ, serr := core.ToPluginType(m.Plugin.TypeName())
if serr != nil {
serrs = append(serrs, serror.New(err))
continue
}
// merge global plugin config
if m.config != nil {
m.config.ReverseMergeInPlace(
s.Config.Plugins.getPluginConfigDataNode(typ,
m.Plugin.Name(), m.Plugin.Version()))
} else {
m.config = s.Config.Plugins.getPluginConfigDataNode(typ,
m.Plugin.Name(), m.Plugin.Version())
}
// When a metric is added to the MetricCatalog, the policy of rules defined by the plugin is added to the metric's policy.
// If no rules are defined for a metric, we set the metric's policy to an empty ConfigPolicyNode.
// Checking m.policy for nil will not work, we need to check if rules are nil.
if m.policy.HasRules() {
if m.Config() == nil {
fields := log.Fields{
"metric": m.Namespace(),
"version": m.Version(),
"plugin": m.Plugin.Name(),
}
serrs = append(serrs, serror.New(ErrConfigRequiredForMetric, fields))
continue
}
ncdTable, errs := m.policy.Process(m.Config().Table())
if errs != nil && errs.HasErrors() {
for _, e := range errs.Errors() {
serrs = append(serrs, serror.New(e))
}
continue
}
m.config = cdata.FromTable(*ncdTable) | }
// pluginIsSubscribed returns true if a provided plugin has been found among subscribed plugins
// in the following subscription group
func (s *subscriptionGroup) pluginIsSubscribed(plugin *loadedPlugin) bool {
// range over subscribed plugins to find if the plugin is there
for _, sp := range s.plugins {
if sp.TypeName() == plugin.TypeName() && sp.Name() == plugin.Name() && sp.Version() == plugin.Version() {
return true
}
}
return false
}
// validatePluginUnloading verifies if a given plugin might be unloaded without causing running task failures
func (s *subscriptionGroup) validatePluginUnloading(id string, plgToUnload *loadedPlugin) (serr serror.SnapError) {
impacted := false
if !s.pluginIsSubscribed(plgToUnload) {
// the plugin is not subscribed, so the task is not impacted by its unloading
return nil
}
controlLogger.WithFields(log.Fields{
"_block": "subscriptionGroup.validatePluginUnloading",
"task-id": id,
"plugin-to-unload": plgToUnload.Key(),
}).Debug("validating impact of unloading the plugin")
for _, requestedMetric := range s.requestedMetrics {
// get all plugins exposing the requested metric
plgs, _ := s.GetPlugins(requestedMetric.Namespace())
// when requested version is fixed (greater than 0), take into account only plugins in the requested version
if requestedMetric.Version() > 0 {
// skip those which are not impacted by unloading (version different than plgToUnload.Version())
if requestedMetric.Version() == plgToUnload.Version() {
plgsInVer := []core.CatalogedPlugin{}
for _, plg := range plgs {
if plg.Version() == requestedMetric.Version() {
plgsInVer = append(plgsInVer, plg)
}
}
// set plugins only in the requested version
plgs = plgsInVer
}
}
if len(plgs) == 1 && plgs[0].Key() == plgToUnload.Key() {
// the requested metric is exposed only by the single plugin and there is no replacement
impacted = true
controlLogger.WithFields(log.Fields{
"_block": "subscriptionGroup.validatePluginUnloading",
"task-id": id,
"plugin-to-unload": plgToUnload.Key(),
"requested-metric": fmt.Sprintf("%s:%d", requestedMetric.Namespace(), requestedMetric.Version()),
}).Errorf("unloading the plugin would cause missing in collection the requested metric")
}
}
if impacted {
serr = serror.New(ErrPluginCannotBeUnloaded, map[string]interface{}{
"task-id": id,
"plugin-to-unload": plgToUnload.Key(),
})
}
return serr
}
func (s *subscriptionGroup) process(id string) (serrs []serror.SnapError) {
// gathers collectors based on requested metrics
pluginToMetricMap, plugins, serrs := s.getMetricsAndCollectors(s.requestedMetrics, s.configTree)
controlLogger.WithFields(log.Fields{
"collectors": fmt.Sprintf("%+v", plugins),
"metrics": fmt.Sprintf("%+v", s.requestedMetrics),
}).Debug("gathered collectors")
// notice that requested plugins contains only processors and publishers
for _, plugin := range s.requestedPlugins {
// add defaults to plugins (exposed in a plugins ConfigPolicy)
if lp, err := s.pluginManager.get(
fmt.Sprintf("%s"+core.Separator+"%s"+core.Separator+"%d",
plugin.TypeName(),
plugin.Name(),
plugin.Version())); err == nil && lp.ConfigPolicy != nil {
if policy := lp.ConfigPolicy.Get([]string{""}); policy != nil && len(policy.Defaults()) > 0 {
// set defaults to plugin config
plugin.Config().ApplyDefaults(policy.Defaults())
}
// update version info for subscribed processor or publisher
version := plugin.Version()
if version < 1 {
version = lp.Version()
}
s := subscribedPlugin{
name: plugin.Name(),
typeName: plugin.TypeName(),
version: version,
config: plugin.Config(),
}
// add processors and publishers to collectors just gathered
plugins = append(plugins, s)
}
}
// calculates those plugins that need to be subscribed and unsubscribed to
subs, unsubs := comparePlugins(plugins, s.plugins)
controlLogger.WithFields(log.Fields{
"subs": fmt.Sprintf("%+v", subs),
"unsubs": fmt.Sprintf("%+v", unsubs),
}).Debug("subscriptions")
if len(subs) > 0 {
if errs := s.subscribePlugins(id, subs); errs != nil {
serrs = append(serrs, errs...)
}
}
if len(unsubs) > 0 {
if errs := s.unsubscribePlugins(id, unsubs); errs != nil {
serrs = append(serrs, errs...)
}
}
// updating view
// metrics are grouped by plugin
s.metrics = pluginToMetricMap
s.plugins = plugins
s.errors = serrs
return serrs
}
func (s *subscriptionGroup) subscribePlugins(id string,
plugins []core.SubscribedPlugin) (serrs []serror.SnapError) {
plgs := make([]*loadedPlugin, len(plugins))
// First range through plugins to verify if all required plugins
// are available
for i, sub := range plugins {
plg, err := s.pluginManager.get(key(sub))
if err != nil {
serrs = append(serrs, pluginNotFoundError(sub))
return serrs
}
plgs[i] = plg
}
// If all plugins are available, subscribe to pools and start
// plugins as needed
for _, plg := range plgs {
controlLogger.WithFields(log.Fields{
"name": plg.Name(),
"type": plg.TypeName(),
"version": plg.Version(),
"_block": "subscriptionGroup.subscribePlugins",
}).Debug("plugin subscription")
if plg.Details.Uri != nil {
// this is a remote plugin
pool, err := s.pluginRunner.AvailablePlugins().getOrCreatePool(plg.Key())
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
if pool.Count() < 1 {
var resp plugin.Response
res, err := http.Get(plg.Details.Uri.String())
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
err = json.Unmarshal(body, &resp)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
ap, err := newAvailablePlugin(resp, s.eventManager, nil, s.grpcSecurity)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
ap.SetIsRemote(true)
err = pool.Insert(ap)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
}
} else {
pool, err := s.pluginRunner.AvailablePlugins().getOrCreatePool(plg.Key())
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
pool.Subscribe(id)
if pool.Eligible() {
err = s.verifyPlugin(plg)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
err = s.pluginRunner.runPlugin(plg.Name(), plg.Details)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
}
}
serr := s.sendPluginSubscriptionEvent(id, plg)
if serr != nil {
serrs = append(serrs, serr)
return serrs
}
}
return serrs
}
func (p *subscriptionGroup) unsubscribePlugins(id string,
plugins []core.SubscribedPlugin) (serrs []serror.SnapError) {
for _, plugin := range plugins {
controlLogger.WithFields(log.Fields{
"name": plugin.Name(),
"type": plugin.TypeName(),
"version": plugin.Version(),
"_block": "subscriptionGroup.unsubscribePlugins",
}).Debug("plugin unsubscription")
pool, err := p.pluginRunner.AvailablePlugins().getPool(key(plugin))
if err != nil {
serrs = append(serrs, err)
return serrs
}
if pool != nil {
pool.Unsubscribe(id)
}
serr := p.sendPluginUnsubscriptionEvent(id, plugin)
if serr != nil {
serrs = append(serrs, serr)
}
}
return
}
func (p *subscriptionGroup) sendPluginSubscriptionEvent(taskID string,
pl core.Plugin) serror.SnapError {
pt, err := core.ToPluginType(pl.TypeName())
if err != nil {
return serror.New(err)
}
e := &control_event.PluginSubscriptionEvent{
TaskId: taskID,
PluginType: int(pt),
PluginName: pl.Name(),
PluginVersion: pl.Version(),
}
if _, err := p.eventManager.Emit(e); err != nil {
return serror.New(err)
}
return nil
}
func (p *subscriptionGroup) sendPluginUnsubscriptionEvent(taskID string,
pl core.Plugin) serror.SnapError {
pt, err := core.ToPluginType(pl.TypeName())
if err != nil {
return serror.New(err)
}
e := &control_event.PluginUnsubscriptionEvent{
TaskId: taskID,
PluginType: int(pt),
PluginName: pl.Name(),
PluginVersion: pl.Version(),
}
if _, err := p.eventManager.Emit(e); err != nil {
return serror.New(err)
}
return nil
}
// comparePlugins compares the new state of plugins with the previous state.
// It returns an array of plugins that need to be subscribed and an array of
// plugins that need to be unsubscribed.
func comparePlugins(newPlugins,
oldPlugins []core.SubscribedPlugin) (adds,
removes []core.SubscribedPlugin) {
newMap := make(map[string]int)
oldMap := make(map[string]int)
for _, n := range newPlugins {
newMap[key(n)]++
}
for _, o := range oldPlugins {
oldMap[key(o)]++
}
for _, n := range newPlugins {
if oldMap[key(n)] > 0 {
oldMap[key(n)]--
continue
}
adds = append(adds, n)
}
for _, o := range oldPlugins {
if newMap[key(o)] > 0 {
newMap[key(o)]--
continue
}
removes = append(removes, o)
}
return
}
func pluginNotFoundError(pl core.SubscribedPlugin) serror.SnapError {
se := serror.New(fmt.Errorf("Plugin not found: type(%s) name(%s) version(%d)", pl.TypeName(), pl.Name(), pl.Version()))
se.SetFields(map[string]interface{}{
"name": pl.Name(),
"version": pl.Version(),
"type": pl.TypeName(),
})
return se
}
func key(p core.SubscribedPlugin) string {
return fmt.Sprintf("%v"+core.Separator+"%v"+core.Separator+"%v", p.TypeName(), p.Name(), p.Version())
} | }
}
return serrs | random_line_split |
subscription_group.go | /*
http://www.apache.org/licenses/LICENSE-2.0.txt
Copyright 2015 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package control
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"sync"
"github.com/intelsdi-x/snap/control/plugin"
"github.com/intelsdi-x/snap/core"
"github.com/intelsdi-x/snap/core/cdata"
"github.com/intelsdi-x/snap/core/control_event"
"github.com/intelsdi-x/snap/core/serror"
log "github.com/sirupsen/logrus"
)
var (
// ErrSubscriptionGroupAlreadyExists - error message when the subscription
// group already exists
ErrSubscriptionGroupAlreadyExists = core.ErrSubscriptionGroupAlreadyExists
// ErrSubscriptionGroupDoesNotExist - error message when the subscription
// group does not exist
ErrSubscriptionGroupDoesNotExist = core.ErrSubscriptionGroupDoesNotExist
ErrConfigRequiredForMetric = errors.New("config required")
)
// ManagesSubscriptionGroups is the interface implemented by an object that can
// manage subscription groups.
type ManagesSubscriptionGroups interface {
Process() (errs []serror.SnapError)
Add(id string, requested []core.RequestedMetric,
configTree *cdata.ConfigDataTree,
plugins []core.SubscribedPlugin) []serror.SnapError
Get(id string) (map[string]metricTypes, []serror.SnapError, error)
Remove(id string) []serror.SnapError
ValidateDeps(requested []core.RequestedMetric,
plugins []core.SubscribedPlugin,
configTree *cdata.ConfigDataTree, asserts ...core.SubscribedPluginAssert) (serrs []serror.SnapError)
validateMetric(metric core.Metric) (serrs []serror.SnapError)
validatePluginUnloading(*loadedPlugin) (errs []serror.SnapError)
}
type subscriptionGroup struct {
*pluginControl
// requested metrics - never updated
requestedMetrics []core.RequestedMetric
// requested plugins - contains only processors and publishers;
// never updated
requestedPlugins []core.SubscribedPlugin
// config from request - never updated
configTree *cdata.ConfigDataTree
// resulting metrics - updated after plugin load/unload events; they are grouped by plugin
metrics map[string]metricTypes
// resulting plugins - updated after plugin load/unload events
plugins []core.SubscribedPlugin
// errors generated the last time the subscription was processed
// subscription groups are processed when the subscription group is added
// and when plugins are loaded/unloaded
errors []serror.SnapError
}
type subscriptionMap map[string]*subscriptionGroup
type subscriptionGroups struct {
subscriptionMap
*sync.Mutex
*pluginControl
}
func newSubscriptionGroups(control *pluginControl) *subscriptionGroups {
return &subscriptionGroups{
make(map[string]*subscriptionGroup),
&sync.Mutex{},
control,
}
}
// Add adds a subscription group provided a subscription group id, requested
// metrics, config tree and plugins. The requested metrics are mapped to
// collector plugins which are then combined with the provided (processor and
// publisher) plugins. The provided config map is used to construct the
// []core.Metric which will be used during collect calls made against the
// subscription group.
// Returns an array of errors ([]serror.SnapError).
// `ErrSubscriptionGroupAlreadyExists` is returned if the subscription already
// exists. Also, if there are errors mapping the requested metrics to plugins
// those are returned.
func (s subscriptionGroups) Add(id string, requested []core.RequestedMetric,
configTree *cdata.ConfigDataTree,
plugins []core.SubscribedPlugin) []serror.SnapError {
s.Lock()
defer s.Unlock()
errs := s.add(id, requested, configTree, plugins)
return errs
}
func (s subscriptionGroups) add(id string, requested []core.RequestedMetric,
configTree *cdata.ConfigDataTree,
plugins []core.SubscribedPlugin) []serror.SnapError {
if _, ok := s.subscriptionMap[id]; ok {
return []serror.SnapError{serror.New(ErrSubscriptionGroupAlreadyExists)}
}
subscriptionGroup := &subscriptionGroup{
requestedMetrics: requested,
requestedPlugins: plugins,
configTree: configTree,
pluginControl: s.pluginControl,
}
errs := subscriptionGroup.process(id)
if errs != nil {
return errs
}
s.subscriptionMap[id] = subscriptionGroup
return nil
}
// Remove removes a subscription group given a subscription group ID.
func (s subscriptionGroups) Remove(id string) []serror.SnapError {
s.Lock()
defer s.Unlock()
return s.remove(id)
}
func (s subscriptionGroups) remove(id string) []serror.SnapError {
subscriptionGroup, ok := s.subscriptionMap[id]
if !ok {
return []serror.SnapError{serror.New(ErrSubscriptionGroupDoesNotExist)}
}
serrs := subscriptionGroup.unsubscribePlugins(id, s.subscriptionMap[id].plugins)
delete(s.subscriptionMap, id)
return serrs
}
// Get returns the metrics (core.Metric) and an array of serror.SnapError when
// provided a subscription ID. The array of serror.SnapError returned was
// produced the last time `process` was run which is important since
// unloading/loading a plugin may produce errors when the requested metrics
// are looked up in the metric catalog. Those errors will be provided back to
// the caller of the subscription group on the next `CollectMetrics`.
// Returns `ErrSubscriptionGroupDoesNotExist` when the subscription group
// does not exist.
func (s subscriptionGroups) Get(id string) (map[string]metricTypes, []serror.SnapError, error) {
s.Lock()
defer s.Unlock()
return s.get(id)
}
func (s subscriptionGroups) get(id string) (map[string]metricTypes, []serror.SnapError, error) {
if _, ok := s.subscriptionMap[id]; !ok {
return nil, nil, ErrSubscriptionGroupDoesNotExist
}
sg := s.subscriptionMap[id]
return sg.metrics, sg.errors, nil
}
// Process compares the new set of plugins with the previous set of plugins
// for the given subscription group subscribing to plugins that were added
// and unsubscribing to those that were removed since the last time the
// subscription group was processed.
// Returns an array of errors ([]serror.SnapError) which can occur when
// mapping requested metrics to collector plugins and getting a core.Plugin
// from a core.Requested.Plugin.
// When processing a subscription group the resulting metrics grouped by plugin
// (subscriptionGroup.metrics) for all subscription groups are updated based
// on the requested metrics (subscriptionGroup.requestedMetrics). Similarly
// the required plugins (subscriptionGroup.plugins) are also updated.
func (s *subscriptionGroups) Process() (errs []serror.SnapError) {
s.Lock()
defer s.Unlock()
for id, group := range s.subscriptionMap {
if serrs := group.process(id); serrs != nil {
errs = append(errs, serrs...)
}
}
return errs
}
func (s *subscriptionGroups) ValidateDeps(requested []core.RequestedMetric,
plugins []core.SubscribedPlugin,
configTree *cdata.ConfigDataTree, asserts ...core.SubscribedPluginAssert) (serrs []serror.SnapError) {
// resolve requested metrics and map to collectors
pluginToMetricMap, collectors, errs := s.getMetricsAndCollectors(requested, configTree)
if errs != nil {
serrs = append(serrs, errs...)
}
// Validate if schedule type is streaming and we have a non-streaming plugin or vice versa
for _, assert := range asserts {
if serr := assert(collectors); serr != nil {
serrs = append(serrs, serr)
}
}
if len(serrs) > 0 {
return serrs
}
// validateMetricsTypes
for _, pmt := range pluginToMetricMap {
for _, mt := range pmt.Metrics() {
errs := s.validateMetric(mt)
if len(errs) > 0 {
serrs = append(serrs, errs...)
}
}
}
// add collectors to plugins (processors and publishers)
for _, collector := range collectors {
plugins = append(plugins, collector)
}
// validate plugins
for _, plg := range plugins {
typ, err := core.ToPluginType(plg.TypeName())
if err != nil {
return []serror.SnapError{serror.New(err)}
}
mergedConfig := plg.Config().ReverseMerge(
s.Config.Plugins.getPluginConfigDataNode(
typ, plg.Name(), plg.Version()))
errs := s.validatePluginSubscription(plg, mergedConfig)
if len(errs) > 0 {
serrs = append(serrs, errs...)
return serrs
}
}
return
}
// validatePluginUnloading checks if process of unloading the plugin is safe for existing running tasks.
// If the plugin is used by running task and there is no replacements, return an error with appropriate message
// containing ids of tasks which use the plugin, what blocks unloading process until they are stopped
func (s *subscriptionGroups) validatePluginUnloading(pluginToUnload *loadedPlugin) (errs []serror.SnapError) {
s.Lock()
defer s.Unlock()
for id, group := range s.subscriptionMap {
if err := group.validatePluginUnloading(id, pluginToUnload); err != nil {
errs = append(errs, err)
}
}
return errs
}
func (p *subscriptionGroups) validatePluginSubscription(pl core.SubscribedPlugin, mergedConfig *cdata.ConfigDataNode) []serror.SnapError {
var serrs = []serror.SnapError{}
controlLogger.WithFields(log.Fields{
"_block": "validate-plugin-subscription",
"plugin": fmt.Sprintf("%s:%d", pl.Name(), pl.Version()),
}).Info(fmt.Sprintf("validating dependencies for plugin %s:%d", pl.Name(), pl.Version()))
lp, err := p.pluginManager.get(key(pl))
if err != nil {
serrs = append(serrs, pluginNotFoundError(pl))
return serrs
}
if lp.ConfigPolicy != nil {
ncd := lp.ConfigPolicy.Get([]string{""})
_, errs := ncd.Process(mergedConfig.Table())
if errs != nil && errs.HasErrors() {
for _, e := range errs.Errors() {
se := serror.New(e)
se.SetFields(map[string]interface{}{"name": pl.Name(), "version": pl.Version()})
serrs = append(serrs, se)
}
}
}
return serrs
}
func (s *subscriptionGroups) validateMetric(
metric core.Metric) (serrs []serror.SnapError) {
mts, err := s.metricCatalog.GetMetrics(metric.Namespace(), metric.Version())
if err != nil {
serrs = append(serrs, serror.New(err, map[string]interface{}{
"name": metric.Namespace().String(),
"version": metric.Version(),
}))
return serrs
}
for _, m := range mts {
// No metric found return error.
if m == nil {
serrs = append(
serrs, serror.New(
fmt.Errorf("no metric found cannot subscribe: (%s) version(%d)",
metric.Namespace(), metric.Version())))
continue
}
m.config = metric.Config()
typ, serr := core.ToPluginType(m.Plugin.TypeName())
if serr != nil {
serrs = append(serrs, serror.New(err))
continue
}
// merge global plugin config
if m.config != nil {
m.config.ReverseMergeInPlace(
s.Config.Plugins.getPluginConfigDataNode(typ,
m.Plugin.Name(), m.Plugin.Version()))
} else {
m.config = s.Config.Plugins.getPluginConfigDataNode(typ,
m.Plugin.Name(), m.Plugin.Version())
}
// When a metric is added to the MetricCatalog, the policy of rules defined by the plugin is added to the metric's policy.
// If no rules are defined for a metric, we set the metric's policy to an empty ConfigPolicyNode.
// Checking m.policy for nil will not work, we need to check if rules are nil.
if m.policy.HasRules() {
if m.Config() == nil {
fields := log.Fields{
"metric": m.Namespace(),
"version": m.Version(),
"plugin": m.Plugin.Name(),
}
serrs = append(serrs, serror.New(ErrConfigRequiredForMetric, fields))
continue
}
ncdTable, errs := m.policy.Process(m.Config().Table())
if errs != nil && errs.HasErrors() {
for _, e := range errs.Errors() {
serrs = append(serrs, serror.New(e))
}
continue
}
m.config = cdata.FromTable(*ncdTable)
}
}
return serrs
}
// pluginIsSubscribed returns true if a provided plugin has been found among subscribed plugins
// in the following subscription group
func (s *subscriptionGroup) pluginIsSubscribed(plugin *loadedPlugin) bool {
// range over subscribed plugins to find if the plugin is there
for _, sp := range s.plugins {
if sp.TypeName() == plugin.TypeName() && sp.Name() == plugin.Name() && sp.Version() == plugin.Version() {
return true
}
}
return false
}
// validatePluginUnloading verifies if a given plugin might be unloaded without causing running task failures
func (s *subscriptionGroup) validatePluginUnloading(id string, plgToUnload *loadedPlugin) (serr serror.SnapError) {
impacted := false
if !s.pluginIsSubscribed(plgToUnload) {
// the plugin is not subscribed, so the task is not impacted by its unloading
return nil
}
controlLogger.WithFields(log.Fields{
"_block": "subscriptionGroup.validatePluginUnloading",
"task-id": id,
"plugin-to-unload": plgToUnload.Key(),
}).Debug("validating impact of unloading the plugin")
for _, requestedMetric := range s.requestedMetrics {
// get all plugins exposing the requested metric
plgs, _ := s.GetPlugins(requestedMetric.Namespace())
// when requested version is fixed (greater than 0), take into account only plugins in the requested version
if requestedMetric.Version() > 0 {
// skip those which are not impacted by unloading (version different than plgToUnload.Version())
if requestedMetric.Version() == plgToUnload.Version() {
plgsInVer := []core.CatalogedPlugin{}
for _, plg := range plgs {
if plg.Version() == requestedMetric.Version() {
plgsInVer = append(plgsInVer, plg)
}
}
// set plugins only in the requested version
plgs = plgsInVer
}
}
if len(plgs) == 1 && plgs[0].Key() == plgToUnload.Key() {
// the requested metric is exposed only by the single plugin and there is no replacement
impacted = true
controlLogger.WithFields(log.Fields{
"_block": "subscriptionGroup.validatePluginUnloading",
"task-id": id,
"plugin-to-unload": plgToUnload.Key(),
"requested-metric": fmt.Sprintf("%s:%d", requestedMetric.Namespace(), requestedMetric.Version()),
}).Errorf("unloading the plugin would cause missing in collection the requested metric")
}
}
if impacted {
serr = serror.New(ErrPluginCannotBeUnloaded, map[string]interface{}{
"task-id": id,
"plugin-to-unload": plgToUnload.Key(),
})
}
return serr
}
func (s *subscriptionGroup) process(id string) (serrs []serror.SnapError) {
// gathers collectors based on requested metrics
pluginToMetricMap, plugins, serrs := s.getMetricsAndCollectors(s.requestedMetrics, s.configTree)
controlLogger.WithFields(log.Fields{
"collectors": fmt.Sprintf("%+v", plugins),
"metrics": fmt.Sprintf("%+v", s.requestedMetrics),
}).Debug("gathered collectors")
// notice that requested plugins contains only processors and publishers
for _, plugin := range s.requestedPlugins {
// add defaults to plugins (exposed in a plugins ConfigPolicy)
if lp, err := s.pluginManager.get(
fmt.Sprintf("%s"+core.Separator+"%s"+core.Separator+"%d",
plugin.TypeName(),
plugin.Name(),
plugin.Version())); err == nil && lp.ConfigPolicy != nil {
if policy := lp.ConfigPolicy.Get([]string{""}); policy != nil && len(policy.Defaults()) > 0 {
// set defaults to plugin config
plugin.Config().ApplyDefaults(policy.Defaults())
}
// update version info for subscribed processor or publisher
version := plugin.Version()
if version < 1 {
version = lp.Version()
}
s := subscribedPlugin{
name: plugin.Name(),
typeName: plugin.TypeName(),
version: version,
config: plugin.Config(),
}
// add processors and publishers to collectors just gathered
plugins = append(plugins, s)
}
}
// calculates those plugins that need to be subscribed and unsubscribed to
subs, unsubs := comparePlugins(plugins, s.plugins)
controlLogger.WithFields(log.Fields{
"subs": fmt.Sprintf("%+v", subs),
"unsubs": fmt.Sprintf("%+v", unsubs),
}).Debug("subscriptions")
if len(subs) > 0 {
if errs := s.subscribePlugins(id, subs); errs != nil {
serrs = append(serrs, errs...)
}
}
if len(unsubs) > 0 {
if errs := s.unsubscribePlugins(id, unsubs); errs != nil {
serrs = append(serrs, errs...)
}
}
// updating view
// metrics are grouped by plugin
s.metrics = pluginToMetricMap
s.plugins = plugins
s.errors = serrs
return serrs
}
func (s *subscriptionGroup) subscribePlugins(id string,
plugins []core.SubscribedPlugin) (serrs []serror.SnapError) {
plgs := make([]*loadedPlugin, len(plugins))
// First range through plugins to verify if all required plugins
// are available
for i, sub := range plugins {
plg, err := s.pluginManager.get(key(sub))
if err != nil {
serrs = append(serrs, pluginNotFoundError(sub))
return serrs
}
plgs[i] = plg
}
// If all plugins are available, subscribe to pools and start
// plugins as needed
for _, plg := range plgs {
controlLogger.WithFields(log.Fields{
"name": plg.Name(),
"type": plg.TypeName(),
"version": plg.Version(),
"_block": "subscriptionGroup.subscribePlugins",
}).Debug("plugin subscription")
if plg.Details.Uri != nil {
// this is a remote plugin
pool, err := s.pluginRunner.AvailablePlugins().getOrCreatePool(plg.Key())
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
if pool.Count() < 1 {
var resp plugin.Response
res, err := http.Get(plg.Details.Uri.String())
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
err = json.Unmarshal(body, &resp)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
ap, err := newAvailablePlugin(resp, s.eventManager, nil, s.grpcSecurity)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
ap.SetIsRemote(true)
err = pool.Insert(ap)
if err != nil |
}
} else {
pool, err := s.pluginRunner.AvailablePlugins().getOrCreatePool(plg.Key())
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
pool.Subscribe(id)
if pool.Eligible() {
err = s.verifyPlugin(plg)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
err = s.pluginRunner.runPlugin(plg.Name(), plg.Details)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
}
}
serr := s.sendPluginSubscriptionEvent(id, plg)
if serr != nil {
serrs = append(serrs, serr)
return serrs
}
}
return serrs
}
func (p *subscriptionGroup) unsubscribePlugins(id string,
plugins []core.SubscribedPlugin) (serrs []serror.SnapError) {
for _, plugin := range plugins {
controlLogger.WithFields(log.Fields{
"name": plugin.Name(),
"type": plugin.TypeName(),
"version": plugin.Version(),
"_block": "subscriptionGroup.unsubscribePlugins",
}).Debug("plugin unsubscription")
pool, err := p.pluginRunner.AvailablePlugins().getPool(key(plugin))
if err != nil {
serrs = append(serrs, err)
return serrs
}
if pool != nil {
pool.Unsubscribe(id)
}
serr := p.sendPluginUnsubscriptionEvent(id, plugin)
if serr != nil {
serrs = append(serrs, serr)
}
}
return
}
func (p *subscriptionGroup) sendPluginSubscriptionEvent(taskID string,
pl core.Plugin) serror.SnapError {
pt, err := core.ToPluginType(pl.TypeName())
if err != nil {
return serror.New(err)
}
e := &control_event.PluginSubscriptionEvent{
TaskId: taskID,
PluginType: int(pt),
PluginName: pl.Name(),
PluginVersion: pl.Version(),
}
if _, err := p.eventManager.Emit(e); err != nil {
return serror.New(err)
}
return nil
}
func (p *subscriptionGroup) sendPluginUnsubscriptionEvent(taskID string,
pl core.Plugin) serror.SnapError {
pt, err := core.ToPluginType(pl.TypeName())
if err != nil {
return serror.New(err)
}
e := &control_event.PluginUnsubscriptionEvent{
TaskId: taskID,
PluginType: int(pt),
PluginName: pl.Name(),
PluginVersion: pl.Version(),
}
if _, err := p.eventManager.Emit(e); err != nil {
return serror.New(err)
}
return nil
}
// comparePlugins compares the new state of plugins with the previous state.
// It returns an array of plugins that need to be subscribed and an array of
// plugins that need to be unsubscribed.
func comparePlugins(newPlugins,
oldPlugins []core.SubscribedPlugin) (adds,
removes []core.SubscribedPlugin) {
newMap := make(map[string]int)
oldMap := make(map[string]int)
for _, n := range newPlugins {
newMap[key(n)]++
}
for _, o := range oldPlugins {
oldMap[key(o)]++
}
for _, n := range newPlugins {
if oldMap[key(n)] > 0 {
oldMap[key(n)]--
continue
}
adds = append(adds, n)
}
for _, o := range oldPlugins {
if newMap[key(o)] > 0 {
newMap[key(o)]--
continue
}
removes = append(removes, o)
}
return
}
func pluginNotFoundError(pl core.SubscribedPlugin) serror.SnapError {
se := serror.New(fmt.Errorf("Plugin not found: type(%s) name(%s) version(%d)", pl.TypeName(), pl.Name(), pl.Version()))
se.SetFields(map[string]interface{}{
"name": pl.Name(),
"version": pl.Version(),
"type": pl.TypeName(),
})
return se
}
func key(p core.SubscribedPlugin) string {
return fmt.Sprintf("%v"+core.Separator+"%v"+core.Separator+"%v", p.TypeName(), p.Name(), p.Version())
}
| {
serrs = append(serrs, serror.New(err))
return serrs
} | conditional_block |
subscription_group.go | /*
http://www.apache.org/licenses/LICENSE-2.0.txt
Copyright 2015 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package control
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"sync"
"github.com/intelsdi-x/snap/control/plugin"
"github.com/intelsdi-x/snap/core"
"github.com/intelsdi-x/snap/core/cdata"
"github.com/intelsdi-x/snap/core/control_event"
"github.com/intelsdi-x/snap/core/serror"
log "github.com/sirupsen/logrus"
)
var (
// ErrSubscriptionGroupAlreadyExists - error message when the subscription
// group already exists
ErrSubscriptionGroupAlreadyExists = core.ErrSubscriptionGroupAlreadyExists
// ErrSubscriptionGroupDoesNotExist - error message when the subscription
// group does not exist
ErrSubscriptionGroupDoesNotExist = core.ErrSubscriptionGroupDoesNotExist
ErrConfigRequiredForMetric = errors.New("config required")
)
// ManagesSubscriptionGroups is the interface implemented by an object that can
// manage subscription groups.
type ManagesSubscriptionGroups interface {
Process() (errs []serror.SnapError)
Add(id string, requested []core.RequestedMetric,
configTree *cdata.ConfigDataTree,
plugins []core.SubscribedPlugin) []serror.SnapError
Get(id string) (map[string]metricTypes, []serror.SnapError, error)
Remove(id string) []serror.SnapError
ValidateDeps(requested []core.RequestedMetric,
plugins []core.SubscribedPlugin,
configTree *cdata.ConfigDataTree, asserts ...core.SubscribedPluginAssert) (serrs []serror.SnapError)
validateMetric(metric core.Metric) (serrs []serror.SnapError)
validatePluginUnloading(*loadedPlugin) (errs []serror.SnapError)
}
type subscriptionGroup struct {
*pluginControl
// requested metrics - never updated
requestedMetrics []core.RequestedMetric
// requested plugins - contains only processors and publishers;
// never updated
requestedPlugins []core.SubscribedPlugin
// config from request - never updated
configTree *cdata.ConfigDataTree
// resulting metrics - updated after plugin load/unload events; they are grouped by plugin
metrics map[string]metricTypes
// resulting plugins - updated after plugin load/unload events
plugins []core.SubscribedPlugin
// errors generated the last time the subscription was processed
// subscription groups are processed when the subscription group is added
// and when plugins are loaded/unloaded
errors []serror.SnapError
}
type subscriptionMap map[string]*subscriptionGroup
type subscriptionGroups struct {
subscriptionMap
*sync.Mutex
*pluginControl
}
func newSubscriptionGroups(control *pluginControl) *subscriptionGroups {
return &subscriptionGroups{
make(map[string]*subscriptionGroup),
&sync.Mutex{},
control,
}
}
// Add adds a subscription group provided a subscription group id, requested
// metrics, config tree and plugins. The requested metrics are mapped to
// collector plugins which are then combined with the provided (processor and
// publisher) plugins. The provided config map is used to construct the
// []core.Metric which will be used during collect calls made against the
// subscription group.
// Returns an array of errors ([]serror.SnapError).
// `ErrSubscriptionGroupAlreadyExists` is returned if the subscription already
// exists. Also, if there are errors mapping the requested metrics to plugins
// those are returned.
func (s subscriptionGroups) Add(id string, requested []core.RequestedMetric,
configTree *cdata.ConfigDataTree,
plugins []core.SubscribedPlugin) []serror.SnapError {
s.Lock()
defer s.Unlock()
errs := s.add(id, requested, configTree, plugins)
return errs
}
func (s subscriptionGroups) add(id string, requested []core.RequestedMetric,
configTree *cdata.ConfigDataTree,
plugins []core.SubscribedPlugin) []serror.SnapError {
if _, ok := s.subscriptionMap[id]; ok {
return []serror.SnapError{serror.New(ErrSubscriptionGroupAlreadyExists)}
}
subscriptionGroup := &subscriptionGroup{
requestedMetrics: requested,
requestedPlugins: plugins,
configTree: configTree,
pluginControl: s.pluginControl,
}
errs := subscriptionGroup.process(id)
if errs != nil {
return errs
}
s.subscriptionMap[id] = subscriptionGroup
return nil
}
// Remove removes a subscription group given a subscription group ID.
func (s subscriptionGroups) Remove(id string) []serror.SnapError {
s.Lock()
defer s.Unlock()
return s.remove(id)
}
func (s subscriptionGroups) remove(id string) []serror.SnapError {
subscriptionGroup, ok := s.subscriptionMap[id]
if !ok {
return []serror.SnapError{serror.New(ErrSubscriptionGroupDoesNotExist)}
}
serrs := subscriptionGroup.unsubscribePlugins(id, s.subscriptionMap[id].plugins)
delete(s.subscriptionMap, id)
return serrs
}
// Get returns the metrics (core.Metric) and an array of serror.SnapError when
// provided a subscription ID. The array of serror.SnapError returned was
// produced the last time `process` was run which is important since
// unloading/loading a plugin may produce errors when the requested metrics
// are looked up in the metric catalog. Those errors will be provided back to
// the caller of the subscription group on the next `CollectMetrics`.
// Returns `ErrSubscriptionGroupDoesNotExist` when the subscription group
// does not exist.
func (s subscriptionGroups) Get(id string) (map[string]metricTypes, []serror.SnapError, error) {
s.Lock()
defer s.Unlock()
return s.get(id)
}
func (s subscriptionGroups) get(id string) (map[string]metricTypes, []serror.SnapError, error) {
if _, ok := s.subscriptionMap[id]; !ok {
return nil, nil, ErrSubscriptionGroupDoesNotExist
}
sg := s.subscriptionMap[id]
return sg.metrics, sg.errors, nil
}
// Process compares the new set of plugins with the previous set of plugins
// for the given subscription group subscribing to plugins that were added
// and unsubscribing to those that were removed since the last time the
// subscription group was processed.
// Returns an array of errors ([]serror.SnapError) which can occur when
// mapping requested metrics to collector plugins and getting a core.Plugin
// from a core.Requested.Plugin.
// When processing a subscription group the resulting metrics grouped by plugin
// (subscriptionGroup.metrics) for all subscription groups are updated based
// on the requested metrics (subscriptionGroup.requestedMetrics). Similarly
// the required plugins (subscriptionGroup.plugins) are also updated.
func (s *subscriptionGroups) Process() (errs []serror.SnapError) {
s.Lock()
defer s.Unlock()
for id, group := range s.subscriptionMap {
if serrs := group.process(id); serrs != nil {
errs = append(errs, serrs...)
}
}
return errs
}
func (s *subscriptionGroups) ValidateDeps(requested []core.RequestedMetric,
plugins []core.SubscribedPlugin,
configTree *cdata.ConfigDataTree, asserts ...core.SubscribedPluginAssert) (serrs []serror.SnapError) {
// resolve requested metrics and map to collectors
pluginToMetricMap, collectors, errs := s.getMetricsAndCollectors(requested, configTree)
if errs != nil {
serrs = append(serrs, errs...)
}
// Validate if schedule type is streaming and we have a non-streaming plugin or vice versa
for _, assert := range asserts {
if serr := assert(collectors); serr != nil {
serrs = append(serrs, serr)
}
}
if len(serrs) > 0 {
return serrs
}
// validateMetricsTypes
for _, pmt := range pluginToMetricMap {
for _, mt := range pmt.Metrics() {
errs := s.validateMetric(mt)
if len(errs) > 0 {
serrs = append(serrs, errs...)
}
}
}
// add collectors to plugins (processors and publishers)
for _, collector := range collectors {
plugins = append(plugins, collector)
}
// validate plugins
for _, plg := range plugins {
typ, err := core.ToPluginType(plg.TypeName())
if err != nil {
return []serror.SnapError{serror.New(err)}
}
mergedConfig := plg.Config().ReverseMerge(
s.Config.Plugins.getPluginConfigDataNode(
typ, plg.Name(), plg.Version()))
errs := s.validatePluginSubscription(plg, mergedConfig)
if len(errs) > 0 {
serrs = append(serrs, errs...)
return serrs
}
}
return
}
// validatePluginUnloading checks if process of unloading the plugin is safe for existing running tasks.
// If the plugin is used by running task and there is no replacements, return an error with appropriate message
// containing ids of tasks which use the plugin, what blocks unloading process until they are stopped
func (s *subscriptionGroups) validatePluginUnloading(pluginToUnload *loadedPlugin) (errs []serror.SnapError) {
s.Lock()
defer s.Unlock()
for id, group := range s.subscriptionMap {
if err := group.validatePluginUnloading(id, pluginToUnload); err != nil {
errs = append(errs, err)
}
}
return errs
}
func (p *subscriptionGroups) validatePluginSubscription(pl core.SubscribedPlugin, mergedConfig *cdata.ConfigDataNode) []serror.SnapError {
var serrs = []serror.SnapError{}
controlLogger.WithFields(log.Fields{
"_block": "validate-plugin-subscription",
"plugin": fmt.Sprintf("%s:%d", pl.Name(), pl.Version()),
}).Info(fmt.Sprintf("validating dependencies for plugin %s:%d", pl.Name(), pl.Version()))
lp, err := p.pluginManager.get(key(pl))
if err != nil {
serrs = append(serrs, pluginNotFoundError(pl))
return serrs
}
if lp.ConfigPolicy != nil {
ncd := lp.ConfigPolicy.Get([]string{""})
_, errs := ncd.Process(mergedConfig.Table())
if errs != nil && errs.HasErrors() {
for _, e := range errs.Errors() {
se := serror.New(e)
se.SetFields(map[string]interface{}{"name": pl.Name(), "version": pl.Version()})
serrs = append(serrs, se)
}
}
}
return serrs
}
func (s *subscriptionGroups) validateMetric(
metric core.Metric) (serrs []serror.SnapError) {
mts, err := s.metricCatalog.GetMetrics(metric.Namespace(), metric.Version())
if err != nil {
serrs = append(serrs, serror.New(err, map[string]interface{}{
"name": metric.Namespace().String(),
"version": metric.Version(),
}))
return serrs
}
for _, m := range mts {
// No metric found return error.
if m == nil {
serrs = append(
serrs, serror.New(
fmt.Errorf("no metric found cannot subscribe: (%s) version(%d)",
metric.Namespace(), metric.Version())))
continue
}
m.config = metric.Config()
typ, serr := core.ToPluginType(m.Plugin.TypeName())
if serr != nil {
serrs = append(serrs, serror.New(err))
continue
}
// merge global plugin config
if m.config != nil {
m.config.ReverseMergeInPlace(
s.Config.Plugins.getPluginConfigDataNode(typ,
m.Plugin.Name(), m.Plugin.Version()))
} else {
m.config = s.Config.Plugins.getPluginConfigDataNode(typ,
m.Plugin.Name(), m.Plugin.Version())
}
// When a metric is added to the MetricCatalog, the policy of rules defined by the plugin is added to the metric's policy.
// If no rules are defined for a metric, we set the metric's policy to an empty ConfigPolicyNode.
// Checking m.policy for nil will not work, we need to check if rules are nil.
if m.policy.HasRules() {
if m.Config() == nil {
fields := log.Fields{
"metric": m.Namespace(),
"version": m.Version(),
"plugin": m.Plugin.Name(),
}
serrs = append(serrs, serror.New(ErrConfigRequiredForMetric, fields))
continue
}
ncdTable, errs := m.policy.Process(m.Config().Table())
if errs != nil && errs.HasErrors() {
for _, e := range errs.Errors() {
serrs = append(serrs, serror.New(e))
}
continue
}
m.config = cdata.FromTable(*ncdTable)
}
}
return serrs
}
// pluginIsSubscribed returns true if a provided plugin has been found among subscribed plugins
// in the following subscription group
func (s *subscriptionGroup) | (plugin *loadedPlugin) bool {
// range over subscribed plugins to find if the plugin is there
for _, sp := range s.plugins {
if sp.TypeName() == plugin.TypeName() && sp.Name() == plugin.Name() && sp.Version() == plugin.Version() {
return true
}
}
return false
}
// validatePluginUnloading verifies if a given plugin might be unloaded without causing running task failures
func (s *subscriptionGroup) validatePluginUnloading(id string, plgToUnload *loadedPlugin) (serr serror.SnapError) {
impacted := false
if !s.pluginIsSubscribed(plgToUnload) {
// the plugin is not subscribed, so the task is not impacted by its unloading
return nil
}
controlLogger.WithFields(log.Fields{
"_block": "subscriptionGroup.validatePluginUnloading",
"task-id": id,
"plugin-to-unload": plgToUnload.Key(),
}).Debug("validating impact of unloading the plugin")
for _, requestedMetric := range s.requestedMetrics {
// get all plugins exposing the requested metric
plgs, _ := s.GetPlugins(requestedMetric.Namespace())
// when requested version is fixed (greater than 0), take into account only plugins in the requested version
if requestedMetric.Version() > 0 {
// skip those which are not impacted by unloading (version different than plgToUnload.Version())
if requestedMetric.Version() == plgToUnload.Version() {
plgsInVer := []core.CatalogedPlugin{}
for _, plg := range plgs {
if plg.Version() == requestedMetric.Version() {
plgsInVer = append(plgsInVer, plg)
}
}
// set plugins only in the requested version
plgs = plgsInVer
}
}
if len(plgs) == 1 && plgs[0].Key() == plgToUnload.Key() {
// the requested metric is exposed only by the single plugin and there is no replacement
impacted = true
controlLogger.WithFields(log.Fields{
"_block": "subscriptionGroup.validatePluginUnloading",
"task-id": id,
"plugin-to-unload": plgToUnload.Key(),
"requested-metric": fmt.Sprintf("%s:%d", requestedMetric.Namespace(), requestedMetric.Version()),
}).Errorf("unloading the plugin would cause missing in collection the requested metric")
}
}
if impacted {
serr = serror.New(ErrPluginCannotBeUnloaded, map[string]interface{}{
"task-id": id,
"plugin-to-unload": plgToUnload.Key(),
})
}
return serr
}
func (s *subscriptionGroup) process(id string) (serrs []serror.SnapError) {
// gathers collectors based on requested metrics
pluginToMetricMap, plugins, serrs := s.getMetricsAndCollectors(s.requestedMetrics, s.configTree)
controlLogger.WithFields(log.Fields{
"collectors": fmt.Sprintf("%+v", plugins),
"metrics": fmt.Sprintf("%+v", s.requestedMetrics),
}).Debug("gathered collectors")
// notice that requested plugins contains only processors and publishers
for _, plugin := range s.requestedPlugins {
// add defaults to plugins (exposed in a plugins ConfigPolicy)
if lp, err := s.pluginManager.get(
fmt.Sprintf("%s"+core.Separator+"%s"+core.Separator+"%d",
plugin.TypeName(),
plugin.Name(),
plugin.Version())); err == nil && lp.ConfigPolicy != nil {
if policy := lp.ConfigPolicy.Get([]string{""}); policy != nil && len(policy.Defaults()) > 0 {
// set defaults to plugin config
plugin.Config().ApplyDefaults(policy.Defaults())
}
// update version info for subscribed processor or publisher
version := plugin.Version()
if version < 1 {
version = lp.Version()
}
s := subscribedPlugin{
name: plugin.Name(),
typeName: plugin.TypeName(),
version: version,
config: plugin.Config(),
}
// add processors and publishers to collectors just gathered
plugins = append(plugins, s)
}
}
// calculates those plugins that need to be subscribed and unsubscribed to
subs, unsubs := comparePlugins(plugins, s.plugins)
controlLogger.WithFields(log.Fields{
"subs": fmt.Sprintf("%+v", subs),
"unsubs": fmt.Sprintf("%+v", unsubs),
}).Debug("subscriptions")
if len(subs) > 0 {
if errs := s.subscribePlugins(id, subs); errs != nil {
serrs = append(serrs, errs...)
}
}
if len(unsubs) > 0 {
if errs := s.unsubscribePlugins(id, unsubs); errs != nil {
serrs = append(serrs, errs...)
}
}
// updating view
// metrics are grouped by plugin
s.metrics = pluginToMetricMap
s.plugins = plugins
s.errors = serrs
return serrs
}
func (s *subscriptionGroup) subscribePlugins(id string,
plugins []core.SubscribedPlugin) (serrs []serror.SnapError) {
plgs := make([]*loadedPlugin, len(plugins))
// First range through plugins to verify if all required plugins
// are available
for i, sub := range plugins {
plg, err := s.pluginManager.get(key(sub))
if err != nil {
serrs = append(serrs, pluginNotFoundError(sub))
return serrs
}
plgs[i] = plg
}
// If all plugins are available, subscribe to pools and start
// plugins as needed
for _, plg := range plgs {
controlLogger.WithFields(log.Fields{
"name": plg.Name(),
"type": plg.TypeName(),
"version": plg.Version(),
"_block": "subscriptionGroup.subscribePlugins",
}).Debug("plugin subscription")
if plg.Details.Uri != nil {
// this is a remote plugin
pool, err := s.pluginRunner.AvailablePlugins().getOrCreatePool(plg.Key())
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
if pool.Count() < 1 {
var resp plugin.Response
res, err := http.Get(plg.Details.Uri.String())
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
err = json.Unmarshal(body, &resp)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
ap, err := newAvailablePlugin(resp, s.eventManager, nil, s.grpcSecurity)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
ap.SetIsRemote(true)
err = pool.Insert(ap)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
}
} else {
pool, err := s.pluginRunner.AvailablePlugins().getOrCreatePool(plg.Key())
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
pool.Subscribe(id)
if pool.Eligible() {
err = s.verifyPlugin(plg)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
err = s.pluginRunner.runPlugin(plg.Name(), plg.Details)
if err != nil {
serrs = append(serrs, serror.New(err))
return serrs
}
}
}
serr := s.sendPluginSubscriptionEvent(id, plg)
if serr != nil {
serrs = append(serrs, serr)
return serrs
}
}
return serrs
}
func (p *subscriptionGroup) unsubscribePlugins(id string,
plugins []core.SubscribedPlugin) (serrs []serror.SnapError) {
for _, plugin := range plugins {
controlLogger.WithFields(log.Fields{
"name": plugin.Name(),
"type": plugin.TypeName(),
"version": plugin.Version(),
"_block": "subscriptionGroup.unsubscribePlugins",
}).Debug("plugin unsubscription")
pool, err := p.pluginRunner.AvailablePlugins().getPool(key(plugin))
if err != nil {
serrs = append(serrs, err)
return serrs
}
if pool != nil {
pool.Unsubscribe(id)
}
serr := p.sendPluginUnsubscriptionEvent(id, plugin)
if serr != nil {
serrs = append(serrs, serr)
}
}
return
}
func (p *subscriptionGroup) sendPluginSubscriptionEvent(taskID string,
pl core.Plugin) serror.SnapError {
pt, err := core.ToPluginType(pl.TypeName())
if err != nil {
return serror.New(err)
}
e := &control_event.PluginSubscriptionEvent{
TaskId: taskID,
PluginType: int(pt),
PluginName: pl.Name(),
PluginVersion: pl.Version(),
}
if _, err := p.eventManager.Emit(e); err != nil {
return serror.New(err)
}
return nil
}
func (p *subscriptionGroup) sendPluginUnsubscriptionEvent(taskID string,
pl core.Plugin) serror.SnapError {
pt, err := core.ToPluginType(pl.TypeName())
if err != nil {
return serror.New(err)
}
e := &control_event.PluginUnsubscriptionEvent{
TaskId: taskID,
PluginType: int(pt),
PluginName: pl.Name(),
PluginVersion: pl.Version(),
}
if _, err := p.eventManager.Emit(e); err != nil {
return serror.New(err)
}
return nil
}
// comparePlugins compares the new state of plugins with the previous state.
// It returns an array of plugins that need to be subscribed and an array of
// plugins that need to be unsubscribed.
func comparePlugins(newPlugins,
oldPlugins []core.SubscribedPlugin) (adds,
removes []core.SubscribedPlugin) {
newMap := make(map[string]int)
oldMap := make(map[string]int)
for _, n := range newPlugins {
newMap[key(n)]++
}
for _, o := range oldPlugins {
oldMap[key(o)]++
}
for _, n := range newPlugins {
if oldMap[key(n)] > 0 {
oldMap[key(n)]--
continue
}
adds = append(adds, n)
}
for _, o := range oldPlugins {
if newMap[key(o)] > 0 {
newMap[key(o)]--
continue
}
removes = append(removes, o)
}
return
}
func pluginNotFoundError(pl core.SubscribedPlugin) serror.SnapError {
se := serror.New(fmt.Errorf("Plugin not found: type(%s) name(%s) version(%d)", pl.TypeName(), pl.Name(), pl.Version()))
se.SetFields(map[string]interface{}{
"name": pl.Name(),
"version": pl.Version(),
"type": pl.TypeName(),
})
return se
}
func key(p core.SubscribedPlugin) string {
return fmt.Sprintf("%v"+core.Separator+"%v"+core.Separator+"%v", p.TypeName(), p.Name(), p.Version())
}
| pluginIsSubscribed | identifier_name |
prod.go | package env
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"fmt"
"net"
"os"
"strings"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/deploy/generator"
basekeyvault "github.com/Azure/ARO-RP/pkg/util/azureclient/keyvault"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/compute"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/dns"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/documentdb"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/keyvault"
"github.com/Azure/ARO-RP/pkg/util/clientauthorizer"
"github.com/Azure/ARO-RP/pkg/util/instancemetadata"
"github.com/Azure/ARO-RP/pkg/util/pem"
"github.com/Azure/ARO-RP/pkg/util/refreshable"
"github.com/Azure/ARO-RP/pkg/util/version"
)
type prod struct {
instancemetadata.InstanceMetadata
armClientAuthorizer clientauthorizer.ClientAuthorizer
adminClientAuthorizer clientauthorizer.ClientAuthorizer
keyvault basekeyvault.BaseClient
acrName string
clustersKeyvaultURI string
cosmosDBAccountName string
cosmosDBPrimaryMasterKey string
domain string
serviceKeyvaultURI string
zones map[string][]string
fpCertificate *x509.Certificate
fpPrivateKey *rsa.PrivateKey
fpServicePrincipalID string
clustersGenevaLoggingCertificate *x509.Certificate
clustersGenevaLoggingPrivateKey *rsa.PrivateKey
clustersGenevaLoggingConfigVersion string
clustersGenevaLoggingEnvironment string
e2eStorageAccountName string
e2eStorageAccountRGName string
e2eStorageAccountSubID string
log *logrus.Entry
envType environmentType
}
func newProd(ctx context.Context, log *logrus.Entry, instancemetadata instancemetadata.InstanceMetadata, rpAuthorizer, kvAuthorizer autorest.Authorizer) (*prod, error) {
p := &prod{
InstanceMetadata: instancemetadata,
keyvault: basekeyvault.New(kvAuthorizer),
clustersGenevaLoggingEnvironment: "DiagnosticsProd",
clustersGenevaLoggingConfigVersion: "2.2",
log: log,
envType: environmentTypeProduction,
}
err := p.populateCosmosDB(ctx, rpAuthorizer)
if err != nil {
return nil, err
}
err = p.populateDomain(ctx, rpAuthorizer)
if err != nil {
return nil, err
}
err = p.populateVaultURIs(ctx, rpAuthorizer)
if err != nil {
return nil, err
}
err = p.populateZones(ctx, rpAuthorizer)
if err != nil {
return nil, err
}
fpPrivateKey, fpCertificates, err := p.GetCertificateSecret(ctx, RPFirstPartySecretName)
if err != nil {
return nil, err
}
p.fpPrivateKey = fpPrivateKey
p.fpCertificate = fpCertificates[0]
p.fpServicePrincipalID = "f1dd0a37-89c6-4e07-bcd1-ffd3d43d8875"
clustersGenevaLoggingPrivateKey, clustersGenevaLoggingCertificates, err := p.GetCertificateSecret(ctx, ClusterLoggingSecretName)
if err != nil {
return nil, err
}
p.clustersGenevaLoggingPrivateKey = clustersGenevaLoggingPrivateKey
p.clustersGenevaLoggingCertificate = clustersGenevaLoggingCertificates[0]
p.e2eStorageAccountName = "arov4e2e"
p.e2eStorageAccountRGName = "global"
p.e2eStorageAccountSubID = "0923c7de-9fca-4d9e-baf3-131d0c5b2ea4"
if p.ACRResourceID() != "" { // TODO: ugh!
acrResource, err := azure.ParseResourceID(p.ACRResourceID())
if err != nil {
return nil, err
}
p.acrName = acrResource.ResourceName
} else {
p.acrName = "arointsvc"
}
return p, nil
}
func (p *prod) InitializeAuthorizers() error {
p.armClientAuthorizer = clientauthorizer.NewARM(p.log)
adminClientAuthorizer, err := clientauthorizer.NewAdmin(
p.log,
"/etc/aro-rp/admin-ca-bundle.pem",
os.Getenv("ADMIN_API_CLIENT_CERT_COMMON_NAME"),
)
if err != nil {
return err
}
p.adminClientAuthorizer = adminClientAuthorizer
return nil
}
func (p *prod) ArmClientAuthorizer() clientauthorizer.ClientAuthorizer {
return p.armClientAuthorizer
}
func (p *prod) AdminClientAuthorizer() clientauthorizer.ClientAuthorizer {
return p.adminClientAuthorizer
}
func (p *prod) ACRResourceID() string {
return os.Getenv("ACR_RESOURCE_ID")
}
func (p *prod) ACRName() string {
return p.acrName
}
func (p *prod) AROOperatorImage() string {
return fmt.Sprintf("%s.azurecr.io/aro:%s", p.acrName, version.GitCommit)
}
func (p *prod) populateCosmosDB(ctx context.Context, rpAuthorizer autorest.Authorizer) error {
databaseaccounts := documentdb.NewDatabaseAccountsClient(p.SubscriptionID(), rpAuthorizer)
accts, err := databaseaccounts.ListByResourceGroup(ctx, p.ResourceGroup())
if err != nil {
return err
}
if len(*accts.Value) != 1 {
return fmt.Errorf("found %d database accounts, expected 1", len(*accts.Value))
}
keys, err := databaseaccounts.ListKeys(ctx, p.ResourceGroup(), *(*accts.Value)[0].Name)
if err != nil {
return err
}
p.cosmosDBAccountName = *(*accts.Value)[0].Name
p.cosmosDBPrimaryMasterKey = *keys.PrimaryMasterKey
return nil
}
func (p *prod) populateDomain(ctx context.Context, rpAuthorizer autorest.Authorizer) error {
zones := dns.NewZonesClient(p.SubscriptionID(), rpAuthorizer)
zs, err := zones.ListByResourceGroup(ctx, p.ResourceGroup(), nil)
if err != nil {
return err
}
if len(zs) != 1 {
return fmt.Errorf("found %d zones, expected 1", len(zs))
}
p.domain = *zs[0].Name
return nil
}
func (p *prod) populateVaultURIs(ctx context.Context, rpAuthorizer autorest.Authorizer) error {
vaults := keyvault.NewVaultsClient(p.SubscriptionID(), rpAuthorizer)
vs, err := vaults.ListByResourceGroup(ctx, p.ResourceGroup(), nil)
if err != nil {
return err
}
for _, v := range vs {
if v.Tags[generator.KeyVaultTagName] != nil {
switch *v.Tags[generator.KeyVaultTagName] {
case generator.ClustersKeyVaultTagValue:
p.clustersKeyvaultURI = *v.Properties.VaultURI
case generator.ServiceKeyVaultTagValue:
p.serviceKeyvaultURI = *v.Properties.VaultURI
}
}
}
if p.clustersKeyvaultURI == "" {
return fmt.Errorf("clusters key vault not found")
}
if p.serviceKeyvaultURI == "" {
return fmt.Errorf("service key vault not found")
}
return nil
}
func (p *prod) populateZones(ctx context.Context, rpAuthorizer autorest.Authorizer) error {
c := compute.NewResourceSkusClient(p.SubscriptionID(), rpAuthorizer)
skus, err := c.List(ctx, "")
if err != nil {
return err
}
p.zones = map[string][]string{}
for _, sku := range skus {
if !strings.EqualFold((*sku.Locations)[0], p.Location()) ||
*sku.ResourceType != "virtualMachines" {
continue
}
p.zones[*sku.Name] = *(*sku.LocationInfo)[0].Zones
}
return nil
}
func (p *prod) ClustersGenevaLoggingConfigVersion() string {
return p.clustersGenevaLoggingConfigVersion
}
func (p *prod) ClustersGenevaLoggingEnvironment() string {
return p.clustersGenevaLoggingEnvironment
}
func (p *prod) ClustersGenevaLoggingSecret() (*rsa.PrivateKey, *x509.Certificate) {
return p.clustersGenevaLoggingPrivateKey, p.clustersGenevaLoggingCertificate
}
func (p *prod) ClustersKeyvaultURI() string {
return p.clustersKeyvaultURI
}
func (p *prod) CosmosDB() (string, string) {
return p.cosmosDBAccountName, p.cosmosDBPrimaryMasterKey
}
func (p *prod) DatabaseName() string {
return "ARO"
}
func (p *prod) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext(ctx, network, address)
}
func (p *prod) Domain() string {
return p.domain
}
func (p *prod) FPAuthorizer(tenantID, resource string) (refreshable.Authorizer, error) {
oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, tenantID)
if err != nil {
return nil, err
}
sp, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, p.fpServicePrincipalID, p.fpCertificate, p.fpPrivateKey, resource)
if err != nil {
return nil, err
}
return refreshable.NewAuthorizer(sp), nil
}
func (p *prod) GetCertificateSecret(ctx context.Context, secretName string) (*rsa.PrivateKey, []*x509.Certificate, error) {
bundle, err := p.keyvault.GetSecret(ctx, p.serviceKeyvaultURI, secretName, "")
if err != nil {
return nil, nil, err
}
key, certs, err := pem.Parse([]byte(*bundle.Value))
if err != nil {
return nil, nil, err
}
if key == nil {
return nil, nil, fmt.Errorf("no private key found")
}
if len(certs) == 0 {
return nil, nil, fmt.Errorf("no certificate found")
}
return key, certs, nil
}
func (p *prod) GetSecret(ctx context.Context, secretName string) ([]byte, error) {
bundle, err := p.keyvault.GetSecret(ctx, p.serviceKeyvaultURI, secretName, "")
if err != nil {
return nil, err
}
return base64.StdEncoding.DecodeString(*bundle.Value)
}
func (p *prod) Listen() (net.Listener, error) {
return net.Listen("tcp", ":8443")
}
// ManagedDomain returns the fully qualified domain of a cluster if we manage
// it. If we don't, it returns the empty string. We manage only domains of the
// form "foo.$LOCATION.aroapp.io" and "foo" (we consider this a short form of
// the former).
func (p *prod) ManagedDomain(domain string) (string, error) {
if domain == "" ||
strings.HasPrefix(domain, ".") ||
strings.HasSuffix(domain, ".") {
// belt and braces: validation should already prevent this
return "", fmt.Errorf("invalid domain %q", domain)
}
domain = strings.TrimSuffix(domain, "."+p.Domain())
if strings.ContainsRune(domain, '.') {
return "", nil
}
return domain + "." + p.Domain(), nil
}
func (p *prod) MetricsSocketPath() string {
return "/var/etw/mdm_statsd.socket"
}
func (p *prod) Zones(vmSize string) ([]string, error) {
zones, found := p.zones[vmSize]
if !found {
return nil, fmt.Errorf("zone information not found for vm size %q", vmSize)
}
return zones, nil
}
func (d *prod) CreateARMResourceGroupRoleAssignment(ctx context.Context, fpAuthorizer refreshable.Authorizer, resourceGroup string) error |
func (p *prod) E2EStorageAccountName() string {
return p.e2eStorageAccountName
}
func (p *prod) E2EStorageAccountRGName() string {
return p.e2eStorageAccountRGName
}
func (p *prod) E2EStorageAccountSubID() string {
return p.e2eStorageAccountSubID
}
func (p *prod) ShouldDeployDenyAssignment() bool {
return p.envType == environmentTypeProduction
}
func (p *prod) IsDevelopment() bool {
return p.envType == environmentTypeDevelopment
}
| {
// ARM ResourceGroup role assignments are not required in production.
return nil
} | identifier_body |
prod.go | package env
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"fmt"
"net"
"os"
"strings"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/deploy/generator"
basekeyvault "github.com/Azure/ARO-RP/pkg/util/azureclient/keyvault"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/compute"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/dns"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/documentdb"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/keyvault"
"github.com/Azure/ARO-RP/pkg/util/clientauthorizer"
"github.com/Azure/ARO-RP/pkg/util/instancemetadata"
"github.com/Azure/ARO-RP/pkg/util/pem"
"github.com/Azure/ARO-RP/pkg/util/refreshable"
"github.com/Azure/ARO-RP/pkg/util/version"
)
type prod struct {
instancemetadata.InstanceMetadata
armClientAuthorizer clientauthorizer.ClientAuthorizer
adminClientAuthorizer clientauthorizer.ClientAuthorizer
keyvault basekeyvault.BaseClient
acrName string
clustersKeyvaultURI string
cosmosDBAccountName string
cosmosDBPrimaryMasterKey string
domain string
serviceKeyvaultURI string
zones map[string][]string
fpCertificate *x509.Certificate
fpPrivateKey *rsa.PrivateKey
fpServicePrincipalID string
clustersGenevaLoggingCertificate *x509.Certificate
clustersGenevaLoggingPrivateKey *rsa.PrivateKey
clustersGenevaLoggingConfigVersion string
clustersGenevaLoggingEnvironment string
e2eStorageAccountName string
e2eStorageAccountRGName string
e2eStorageAccountSubID string
log *logrus.Entry
envType environmentType
}
func newProd(ctx context.Context, log *logrus.Entry, instancemetadata instancemetadata.InstanceMetadata, rpAuthorizer, kvAuthorizer autorest.Authorizer) (*prod, error) {
p := &prod{
InstanceMetadata: instancemetadata,
keyvault: basekeyvault.New(kvAuthorizer),
clustersGenevaLoggingEnvironment: "DiagnosticsProd",
clustersGenevaLoggingConfigVersion: "2.2",
log: log,
envType: environmentTypeProduction,
}
err := p.populateCosmosDB(ctx, rpAuthorizer)
if err != nil {
return nil, err
}
err = p.populateDomain(ctx, rpAuthorizer)
if err != nil {
return nil, err
}
err = p.populateVaultURIs(ctx, rpAuthorizer)
if err != nil {
return nil, err
}
err = p.populateZones(ctx, rpAuthorizer)
if err != nil {
return nil, err
}
fpPrivateKey, fpCertificates, err := p.GetCertificateSecret(ctx, RPFirstPartySecretName)
if err != nil {
return nil, err
}
p.fpPrivateKey = fpPrivateKey
p.fpCertificate = fpCertificates[0]
p.fpServicePrincipalID = "f1dd0a37-89c6-4e07-bcd1-ffd3d43d8875"
clustersGenevaLoggingPrivateKey, clustersGenevaLoggingCertificates, err := p.GetCertificateSecret(ctx, ClusterLoggingSecretName)
if err != nil {
return nil, err
}
p.clustersGenevaLoggingPrivateKey = clustersGenevaLoggingPrivateKey
p.clustersGenevaLoggingCertificate = clustersGenevaLoggingCertificates[0]
p.e2eStorageAccountName = "arov4e2e"
p.e2eStorageAccountRGName = "global"
p.e2eStorageAccountSubID = "0923c7de-9fca-4d9e-baf3-131d0c5b2ea4"
if p.ACRResourceID() != "" { // TODO: ugh!
acrResource, err := azure.ParseResourceID(p.ACRResourceID())
if err != nil {
return nil, err
}
p.acrName = acrResource.ResourceName
} else {
p.acrName = "arointsvc"
}
return p, nil
}
func (p *prod) InitializeAuthorizers() error {
p.armClientAuthorizer = clientauthorizer.NewARM(p.log)
adminClientAuthorizer, err := clientauthorizer.NewAdmin(
p.log,
"/etc/aro-rp/admin-ca-bundle.pem",
os.Getenv("ADMIN_API_CLIENT_CERT_COMMON_NAME"),
)
if err != nil {
return err
}
p.adminClientAuthorizer = adminClientAuthorizer
return nil
}
func (p *prod) ArmClientAuthorizer() clientauthorizer.ClientAuthorizer {
return p.armClientAuthorizer
}
func (p *prod) AdminClientAuthorizer() clientauthorizer.ClientAuthorizer {
return p.adminClientAuthorizer
}
func (p *prod) ACRResourceID() string {
return os.Getenv("ACR_RESOURCE_ID")
}
func (p *prod) ACRName() string {
return p.acrName
}
func (p *prod) AROOperatorImage() string {
return fmt.Sprintf("%s.azurecr.io/aro:%s", p.acrName, version.GitCommit)
}
func (p *prod) populateCosmosDB(ctx context.Context, rpAuthorizer autorest.Authorizer) error {
databaseaccounts := documentdb.NewDatabaseAccountsClient(p.SubscriptionID(), rpAuthorizer)
accts, err := databaseaccounts.ListByResourceGroup(ctx, p.ResourceGroup())
if err != nil {
return err
}
if len(*accts.Value) != 1 |
keys, err := databaseaccounts.ListKeys(ctx, p.ResourceGroup(), *(*accts.Value)[0].Name)
if err != nil {
return err
}
p.cosmosDBAccountName = *(*accts.Value)[0].Name
p.cosmosDBPrimaryMasterKey = *keys.PrimaryMasterKey
return nil
}
func (p *prod) populateDomain(ctx context.Context, rpAuthorizer autorest.Authorizer) error {
zones := dns.NewZonesClient(p.SubscriptionID(), rpAuthorizer)
zs, err := zones.ListByResourceGroup(ctx, p.ResourceGroup(), nil)
if err != nil {
return err
}
if len(zs) != 1 {
return fmt.Errorf("found %d zones, expected 1", len(zs))
}
p.domain = *zs[0].Name
return nil
}
func (p *prod) populateVaultURIs(ctx context.Context, rpAuthorizer autorest.Authorizer) error {
vaults := keyvault.NewVaultsClient(p.SubscriptionID(), rpAuthorizer)
vs, err := vaults.ListByResourceGroup(ctx, p.ResourceGroup(), nil)
if err != nil {
return err
}
for _, v := range vs {
if v.Tags[generator.KeyVaultTagName] != nil {
switch *v.Tags[generator.KeyVaultTagName] {
case generator.ClustersKeyVaultTagValue:
p.clustersKeyvaultURI = *v.Properties.VaultURI
case generator.ServiceKeyVaultTagValue:
p.serviceKeyvaultURI = *v.Properties.VaultURI
}
}
}
if p.clustersKeyvaultURI == "" {
return fmt.Errorf("clusters key vault not found")
}
if p.serviceKeyvaultURI == "" {
return fmt.Errorf("service key vault not found")
}
return nil
}
func (p *prod) populateZones(ctx context.Context, rpAuthorizer autorest.Authorizer) error {
c := compute.NewResourceSkusClient(p.SubscriptionID(), rpAuthorizer)
skus, err := c.List(ctx, "")
if err != nil {
return err
}
p.zones = map[string][]string{}
for _, sku := range skus {
if !strings.EqualFold((*sku.Locations)[0], p.Location()) ||
*sku.ResourceType != "virtualMachines" {
continue
}
p.zones[*sku.Name] = *(*sku.LocationInfo)[0].Zones
}
return nil
}
func (p *prod) ClustersGenevaLoggingConfigVersion() string {
return p.clustersGenevaLoggingConfigVersion
}
func (p *prod) ClustersGenevaLoggingEnvironment() string {
return p.clustersGenevaLoggingEnvironment
}
func (p *prod) ClustersGenevaLoggingSecret() (*rsa.PrivateKey, *x509.Certificate) {
return p.clustersGenevaLoggingPrivateKey, p.clustersGenevaLoggingCertificate
}
func (p *prod) ClustersKeyvaultURI() string {
return p.clustersKeyvaultURI
}
func (p *prod) CosmosDB() (string, string) {
return p.cosmosDBAccountName, p.cosmosDBPrimaryMasterKey
}
func (p *prod) DatabaseName() string {
return "ARO"
}
func (p *prod) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext(ctx, network, address)
}
func (p *prod) Domain() string {
return p.domain
}
func (p *prod) FPAuthorizer(tenantID, resource string) (refreshable.Authorizer, error) {
oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, tenantID)
if err != nil {
return nil, err
}
sp, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, p.fpServicePrincipalID, p.fpCertificate, p.fpPrivateKey, resource)
if err != nil {
return nil, err
}
return refreshable.NewAuthorizer(sp), nil
}
func (p *prod) GetCertificateSecret(ctx context.Context, secretName string) (*rsa.PrivateKey, []*x509.Certificate, error) {
bundle, err := p.keyvault.GetSecret(ctx, p.serviceKeyvaultURI, secretName, "")
if err != nil {
return nil, nil, err
}
key, certs, err := pem.Parse([]byte(*bundle.Value))
if err != nil {
return nil, nil, err
}
if key == nil {
return nil, nil, fmt.Errorf("no private key found")
}
if len(certs) == 0 {
return nil, nil, fmt.Errorf("no certificate found")
}
return key, certs, nil
}
func (p *prod) GetSecret(ctx context.Context, secretName string) ([]byte, error) {
bundle, err := p.keyvault.GetSecret(ctx, p.serviceKeyvaultURI, secretName, "")
if err != nil {
return nil, err
}
return base64.StdEncoding.DecodeString(*bundle.Value)
}
func (p *prod) Listen() (net.Listener, error) {
return net.Listen("tcp", ":8443")
}
// ManagedDomain returns the fully qualified domain of a cluster if we manage
// it. If we don't, it returns the empty string. We manage only domains of the
// form "foo.$LOCATION.aroapp.io" and "foo" (we consider this a short form of
// the former).
func (p *prod) ManagedDomain(domain string) (string, error) {
if domain == "" ||
strings.HasPrefix(domain, ".") ||
strings.HasSuffix(domain, ".") {
// belt and braces: validation should already prevent this
return "", fmt.Errorf("invalid domain %q", domain)
}
domain = strings.TrimSuffix(domain, "."+p.Domain())
if strings.ContainsRune(domain, '.') {
return "", nil
}
return domain + "." + p.Domain(), nil
}
func (p *prod) MetricsSocketPath() string {
return "/var/etw/mdm_statsd.socket"
}
func (p *prod) Zones(vmSize string) ([]string, error) {
zones, found := p.zones[vmSize]
if !found {
return nil, fmt.Errorf("zone information not found for vm size %q", vmSize)
}
return zones, nil
}
func (d *prod) CreateARMResourceGroupRoleAssignment(ctx context.Context, fpAuthorizer refreshable.Authorizer, resourceGroup string) error {
// ARM ResourceGroup role assignments are not required in production.
return nil
}
func (p *prod) E2EStorageAccountName() string {
return p.e2eStorageAccountName
}
func (p *prod) E2EStorageAccountRGName() string {
return p.e2eStorageAccountRGName
}
func (p *prod) E2EStorageAccountSubID() string {
return p.e2eStorageAccountSubID
}
func (p *prod) ShouldDeployDenyAssignment() bool {
return p.envType == environmentTypeProduction
}
func (p *prod) IsDevelopment() bool {
return p.envType == environmentTypeDevelopment
}
| {
return fmt.Errorf("found %d database accounts, expected 1", len(*accts.Value))
} | conditional_block |
prod.go | package env
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"fmt"
"net"
"os"
"strings"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/deploy/generator"
basekeyvault "github.com/Azure/ARO-RP/pkg/util/azureclient/keyvault"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/compute"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/dns"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/documentdb"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/keyvault"
"github.com/Azure/ARO-RP/pkg/util/clientauthorizer"
"github.com/Azure/ARO-RP/pkg/util/instancemetadata"
"github.com/Azure/ARO-RP/pkg/util/pem"
"github.com/Azure/ARO-RP/pkg/util/refreshable"
"github.com/Azure/ARO-RP/pkg/util/version"
)
type prod struct {
instancemetadata.InstanceMetadata
armClientAuthorizer clientauthorizer.ClientAuthorizer
adminClientAuthorizer clientauthorizer.ClientAuthorizer
keyvault basekeyvault.BaseClient
acrName string
clustersKeyvaultURI string
cosmosDBAccountName string
cosmosDBPrimaryMasterKey string
domain string
serviceKeyvaultURI string
zones map[string][]string
fpCertificate *x509.Certificate
fpPrivateKey *rsa.PrivateKey
fpServicePrincipalID string
clustersGenevaLoggingCertificate *x509.Certificate
clustersGenevaLoggingPrivateKey *rsa.PrivateKey
clustersGenevaLoggingConfigVersion string
clustersGenevaLoggingEnvironment string
e2eStorageAccountName string
e2eStorageAccountRGName string
e2eStorageAccountSubID string
log *logrus.Entry
envType environmentType
}
func newProd(ctx context.Context, log *logrus.Entry, instancemetadata instancemetadata.InstanceMetadata, rpAuthorizer, kvAuthorizer autorest.Authorizer) (*prod, error) {
p := &prod{
InstanceMetadata: instancemetadata,
keyvault: basekeyvault.New(kvAuthorizer),
clustersGenevaLoggingEnvironment: "DiagnosticsProd",
clustersGenevaLoggingConfigVersion: "2.2",
log: log,
envType: environmentTypeProduction,
}
err := p.populateCosmosDB(ctx, rpAuthorizer)
if err != nil {
return nil, err
}
err = p.populateDomain(ctx, rpAuthorizer)
if err != nil {
return nil, err
}
err = p.populateVaultURIs(ctx, rpAuthorizer)
if err != nil {
return nil, err
}
err = p.populateZones(ctx, rpAuthorizer)
if err != nil {
return nil, err
}
fpPrivateKey, fpCertificates, err := p.GetCertificateSecret(ctx, RPFirstPartySecretName)
if err != nil {
return nil, err
}
p.fpPrivateKey = fpPrivateKey
p.fpCertificate = fpCertificates[0]
p.fpServicePrincipalID = "f1dd0a37-89c6-4e07-bcd1-ffd3d43d8875"
clustersGenevaLoggingPrivateKey, clustersGenevaLoggingCertificates, err := p.GetCertificateSecret(ctx, ClusterLoggingSecretName)
if err != nil {
return nil, err
}
p.clustersGenevaLoggingPrivateKey = clustersGenevaLoggingPrivateKey
p.clustersGenevaLoggingCertificate = clustersGenevaLoggingCertificates[0]
p.e2eStorageAccountName = "arov4e2e"
p.e2eStorageAccountRGName = "global"
p.e2eStorageAccountSubID = "0923c7de-9fca-4d9e-baf3-131d0c5b2ea4"
if p.ACRResourceID() != "" { // TODO: ugh!
acrResource, err := azure.ParseResourceID(p.ACRResourceID())
if err != nil {
return nil, err
}
p.acrName = acrResource.ResourceName
} else {
p.acrName = "arointsvc"
}
return p, nil
}
func (p *prod) InitializeAuthorizers() error {
p.armClientAuthorizer = clientauthorizer.NewARM(p.log)
adminClientAuthorizer, err := clientauthorizer.NewAdmin(
p.log,
"/etc/aro-rp/admin-ca-bundle.pem",
os.Getenv("ADMIN_API_CLIENT_CERT_COMMON_NAME"),
)
if err != nil {
return err
}
p.adminClientAuthorizer = adminClientAuthorizer
return nil
}
func (p *prod) ArmClientAuthorizer() clientauthorizer.ClientAuthorizer {
return p.armClientAuthorizer
}
func (p *prod) AdminClientAuthorizer() clientauthorizer.ClientAuthorizer {
return p.adminClientAuthorizer
}
| func (p *prod) ACRResourceID() string {
return os.Getenv("ACR_RESOURCE_ID")
}
func (p *prod) ACRName() string {
return p.acrName
}
func (p *prod) AROOperatorImage() string {
return fmt.Sprintf("%s.azurecr.io/aro:%s", p.acrName, version.GitCommit)
}
func (p *prod) populateCosmosDB(ctx context.Context, rpAuthorizer autorest.Authorizer) error {
databaseaccounts := documentdb.NewDatabaseAccountsClient(p.SubscriptionID(), rpAuthorizer)
accts, err := databaseaccounts.ListByResourceGroup(ctx, p.ResourceGroup())
if err != nil {
return err
}
if len(*accts.Value) != 1 {
return fmt.Errorf("found %d database accounts, expected 1", len(*accts.Value))
}
keys, err := databaseaccounts.ListKeys(ctx, p.ResourceGroup(), *(*accts.Value)[0].Name)
if err != nil {
return err
}
p.cosmosDBAccountName = *(*accts.Value)[0].Name
p.cosmosDBPrimaryMasterKey = *keys.PrimaryMasterKey
return nil
}
func (p *prod) populateDomain(ctx context.Context, rpAuthorizer autorest.Authorizer) error {
zones := dns.NewZonesClient(p.SubscriptionID(), rpAuthorizer)
zs, err := zones.ListByResourceGroup(ctx, p.ResourceGroup(), nil)
if err != nil {
return err
}
if len(zs) != 1 {
return fmt.Errorf("found %d zones, expected 1", len(zs))
}
p.domain = *zs[0].Name
return nil
}
func (p *prod) populateVaultURIs(ctx context.Context, rpAuthorizer autorest.Authorizer) error {
vaults := keyvault.NewVaultsClient(p.SubscriptionID(), rpAuthorizer)
vs, err := vaults.ListByResourceGroup(ctx, p.ResourceGroup(), nil)
if err != nil {
return err
}
for _, v := range vs {
if v.Tags[generator.KeyVaultTagName] != nil {
switch *v.Tags[generator.KeyVaultTagName] {
case generator.ClustersKeyVaultTagValue:
p.clustersKeyvaultURI = *v.Properties.VaultURI
case generator.ServiceKeyVaultTagValue:
p.serviceKeyvaultURI = *v.Properties.VaultURI
}
}
}
if p.clustersKeyvaultURI == "" {
return fmt.Errorf("clusters key vault not found")
}
if p.serviceKeyvaultURI == "" {
return fmt.Errorf("service key vault not found")
}
return nil
}
func (p *prod) populateZones(ctx context.Context, rpAuthorizer autorest.Authorizer) error {
c := compute.NewResourceSkusClient(p.SubscriptionID(), rpAuthorizer)
skus, err := c.List(ctx, "")
if err != nil {
return err
}
p.zones = map[string][]string{}
for _, sku := range skus {
if !strings.EqualFold((*sku.Locations)[0], p.Location()) ||
*sku.ResourceType != "virtualMachines" {
continue
}
p.zones[*sku.Name] = *(*sku.LocationInfo)[0].Zones
}
return nil
}
func (p *prod) ClustersGenevaLoggingConfigVersion() string {
return p.clustersGenevaLoggingConfigVersion
}
func (p *prod) ClustersGenevaLoggingEnvironment() string {
return p.clustersGenevaLoggingEnvironment
}
func (p *prod) ClustersGenevaLoggingSecret() (*rsa.PrivateKey, *x509.Certificate) {
return p.clustersGenevaLoggingPrivateKey, p.clustersGenevaLoggingCertificate
}
func (p *prod) ClustersKeyvaultURI() string {
return p.clustersKeyvaultURI
}
func (p *prod) CosmosDB() (string, string) {
return p.cosmosDBAccountName, p.cosmosDBPrimaryMasterKey
}
func (p *prod) DatabaseName() string {
return "ARO"
}
func (p *prod) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext(ctx, network, address)
}
func (p *prod) Domain() string {
return p.domain
}
func (p *prod) FPAuthorizer(tenantID, resource string) (refreshable.Authorizer, error) {
oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, tenantID)
if err != nil {
return nil, err
}
sp, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, p.fpServicePrincipalID, p.fpCertificate, p.fpPrivateKey, resource)
if err != nil {
return nil, err
}
return refreshable.NewAuthorizer(sp), nil
}
func (p *prod) GetCertificateSecret(ctx context.Context, secretName string) (*rsa.PrivateKey, []*x509.Certificate, error) {
bundle, err := p.keyvault.GetSecret(ctx, p.serviceKeyvaultURI, secretName, "")
if err != nil {
return nil, nil, err
}
key, certs, err := pem.Parse([]byte(*bundle.Value))
if err != nil {
return nil, nil, err
}
if key == nil {
return nil, nil, fmt.Errorf("no private key found")
}
if len(certs) == 0 {
return nil, nil, fmt.Errorf("no certificate found")
}
return key, certs, nil
}
func (p *prod) GetSecret(ctx context.Context, secretName string) ([]byte, error) {
bundle, err := p.keyvault.GetSecret(ctx, p.serviceKeyvaultURI, secretName, "")
if err != nil {
return nil, err
}
return base64.StdEncoding.DecodeString(*bundle.Value)
}
func (p *prod) Listen() (net.Listener, error) {
return net.Listen("tcp", ":8443")
}
// ManagedDomain returns the fully qualified domain of a cluster if we manage
// it. If we don't, it returns the empty string. We manage only domains of the
// form "foo.$LOCATION.aroapp.io" and "foo" (we consider this a short form of
// the former).
func (p *prod) ManagedDomain(domain string) (string, error) {
if domain == "" ||
strings.HasPrefix(domain, ".") ||
strings.HasSuffix(domain, ".") {
// belt and braces: validation should already prevent this
return "", fmt.Errorf("invalid domain %q", domain)
}
domain = strings.TrimSuffix(domain, "."+p.Domain())
if strings.ContainsRune(domain, '.') {
return "", nil
}
return domain + "." + p.Domain(), nil
}
func (p *prod) MetricsSocketPath() string {
return "/var/etw/mdm_statsd.socket"
}
func (p *prod) Zones(vmSize string) ([]string, error) {
zones, found := p.zones[vmSize]
if !found {
return nil, fmt.Errorf("zone information not found for vm size %q", vmSize)
}
return zones, nil
}
func (d *prod) CreateARMResourceGroupRoleAssignment(ctx context.Context, fpAuthorizer refreshable.Authorizer, resourceGroup string) error {
// ARM ResourceGroup role assignments are not required in production.
return nil
}
func (p *prod) E2EStorageAccountName() string {
return p.e2eStorageAccountName
}
func (p *prod) E2EStorageAccountRGName() string {
return p.e2eStorageAccountRGName
}
func (p *prod) E2EStorageAccountSubID() string {
return p.e2eStorageAccountSubID
}
func (p *prod) ShouldDeployDenyAssignment() bool {
return p.envType == environmentTypeProduction
}
func (p *prod) IsDevelopment() bool {
return p.envType == environmentTypeDevelopment
} | random_line_split |
|
prod.go | package env
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"fmt"
"net"
"os"
"strings"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/deploy/generator"
basekeyvault "github.com/Azure/ARO-RP/pkg/util/azureclient/keyvault"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/compute"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/dns"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/documentdb"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/keyvault"
"github.com/Azure/ARO-RP/pkg/util/clientauthorizer"
"github.com/Azure/ARO-RP/pkg/util/instancemetadata"
"github.com/Azure/ARO-RP/pkg/util/pem"
"github.com/Azure/ARO-RP/pkg/util/refreshable"
"github.com/Azure/ARO-RP/pkg/util/version"
)
type prod struct {
instancemetadata.InstanceMetadata
armClientAuthorizer clientauthorizer.ClientAuthorizer
adminClientAuthorizer clientauthorizer.ClientAuthorizer
keyvault basekeyvault.BaseClient
acrName string
clustersKeyvaultURI string
cosmosDBAccountName string
cosmosDBPrimaryMasterKey string
domain string
serviceKeyvaultURI string
zones map[string][]string
fpCertificate *x509.Certificate
fpPrivateKey *rsa.PrivateKey
fpServicePrincipalID string
clustersGenevaLoggingCertificate *x509.Certificate
clustersGenevaLoggingPrivateKey *rsa.PrivateKey
clustersGenevaLoggingConfigVersion string
clustersGenevaLoggingEnvironment string
e2eStorageAccountName string
e2eStorageAccountRGName string
e2eStorageAccountSubID string
log *logrus.Entry
envType environmentType
}
func newProd(ctx context.Context, log *logrus.Entry, instancemetadata instancemetadata.InstanceMetadata, rpAuthorizer, kvAuthorizer autorest.Authorizer) (*prod, error) {
p := &prod{
InstanceMetadata: instancemetadata,
keyvault: basekeyvault.New(kvAuthorizer),
clustersGenevaLoggingEnvironment: "DiagnosticsProd",
clustersGenevaLoggingConfigVersion: "2.2",
log: log,
envType: environmentTypeProduction,
}
err := p.populateCosmosDB(ctx, rpAuthorizer)
if err != nil {
return nil, err
}
err = p.populateDomain(ctx, rpAuthorizer)
if err != nil {
return nil, err
}
err = p.populateVaultURIs(ctx, rpAuthorizer)
if err != nil {
return nil, err
}
err = p.populateZones(ctx, rpAuthorizer)
if err != nil {
return nil, err
}
fpPrivateKey, fpCertificates, err := p.GetCertificateSecret(ctx, RPFirstPartySecretName)
if err != nil {
return nil, err
}
p.fpPrivateKey = fpPrivateKey
p.fpCertificate = fpCertificates[0]
p.fpServicePrincipalID = "f1dd0a37-89c6-4e07-bcd1-ffd3d43d8875"
clustersGenevaLoggingPrivateKey, clustersGenevaLoggingCertificates, err := p.GetCertificateSecret(ctx, ClusterLoggingSecretName)
if err != nil {
return nil, err
}
p.clustersGenevaLoggingPrivateKey = clustersGenevaLoggingPrivateKey
p.clustersGenevaLoggingCertificate = clustersGenevaLoggingCertificates[0]
p.e2eStorageAccountName = "arov4e2e"
p.e2eStorageAccountRGName = "global"
p.e2eStorageAccountSubID = "0923c7de-9fca-4d9e-baf3-131d0c5b2ea4"
if p.ACRResourceID() != "" { // TODO: ugh!
acrResource, err := azure.ParseResourceID(p.ACRResourceID())
if err != nil {
return nil, err
}
p.acrName = acrResource.ResourceName
} else {
p.acrName = "arointsvc"
}
return p, nil
}
func (p *prod) InitializeAuthorizers() error {
p.armClientAuthorizer = clientauthorizer.NewARM(p.log)
adminClientAuthorizer, err := clientauthorizer.NewAdmin(
p.log,
"/etc/aro-rp/admin-ca-bundle.pem",
os.Getenv("ADMIN_API_CLIENT_CERT_COMMON_NAME"),
)
if err != nil {
return err
}
p.adminClientAuthorizer = adminClientAuthorizer
return nil
}
func (p *prod) ArmClientAuthorizer() clientauthorizer.ClientAuthorizer {
return p.armClientAuthorizer
}
func (p *prod) AdminClientAuthorizer() clientauthorizer.ClientAuthorizer {
return p.adminClientAuthorizer
}
func (p *prod) ACRResourceID() string {
return os.Getenv("ACR_RESOURCE_ID")
}
func (p *prod) ACRName() string {
return p.acrName
}
func (p *prod) AROOperatorImage() string {
return fmt.Sprintf("%s.azurecr.io/aro:%s", p.acrName, version.GitCommit)
}
func (p *prod) populateCosmosDB(ctx context.Context, rpAuthorizer autorest.Authorizer) error {
databaseaccounts := documentdb.NewDatabaseAccountsClient(p.SubscriptionID(), rpAuthorizer)
accts, err := databaseaccounts.ListByResourceGroup(ctx, p.ResourceGroup())
if err != nil {
return err
}
if len(*accts.Value) != 1 {
return fmt.Errorf("found %d database accounts, expected 1", len(*accts.Value))
}
keys, err := databaseaccounts.ListKeys(ctx, p.ResourceGroup(), *(*accts.Value)[0].Name)
if err != nil {
return err
}
p.cosmosDBAccountName = *(*accts.Value)[0].Name
p.cosmosDBPrimaryMasterKey = *keys.PrimaryMasterKey
return nil
}
func (p *prod) populateDomain(ctx context.Context, rpAuthorizer autorest.Authorizer) error {
zones := dns.NewZonesClient(p.SubscriptionID(), rpAuthorizer)
zs, err := zones.ListByResourceGroup(ctx, p.ResourceGroup(), nil)
if err != nil {
return err
}
if len(zs) != 1 {
return fmt.Errorf("found %d zones, expected 1", len(zs))
}
p.domain = *zs[0].Name
return nil
}
func (p *prod) populateVaultURIs(ctx context.Context, rpAuthorizer autorest.Authorizer) error {
vaults := keyvault.NewVaultsClient(p.SubscriptionID(), rpAuthorizer)
vs, err := vaults.ListByResourceGroup(ctx, p.ResourceGroup(), nil)
if err != nil {
return err
}
for _, v := range vs {
if v.Tags[generator.KeyVaultTagName] != nil {
switch *v.Tags[generator.KeyVaultTagName] {
case generator.ClustersKeyVaultTagValue:
p.clustersKeyvaultURI = *v.Properties.VaultURI
case generator.ServiceKeyVaultTagValue:
p.serviceKeyvaultURI = *v.Properties.VaultURI
}
}
}
if p.clustersKeyvaultURI == "" {
return fmt.Errorf("clusters key vault not found")
}
if p.serviceKeyvaultURI == "" {
return fmt.Errorf("service key vault not found")
}
return nil
}
func (p *prod) populateZones(ctx context.Context, rpAuthorizer autorest.Authorizer) error {
c := compute.NewResourceSkusClient(p.SubscriptionID(), rpAuthorizer)
skus, err := c.List(ctx, "")
if err != nil {
return err
}
p.zones = map[string][]string{}
for _, sku := range skus {
if !strings.EqualFold((*sku.Locations)[0], p.Location()) ||
*sku.ResourceType != "virtualMachines" {
continue
}
p.zones[*sku.Name] = *(*sku.LocationInfo)[0].Zones
}
return nil
}
func (p *prod) ClustersGenevaLoggingConfigVersion() string {
return p.clustersGenevaLoggingConfigVersion
}
func (p *prod) ClustersGenevaLoggingEnvironment() string {
return p.clustersGenevaLoggingEnvironment
}
func (p *prod) ClustersGenevaLoggingSecret() (*rsa.PrivateKey, *x509.Certificate) {
return p.clustersGenevaLoggingPrivateKey, p.clustersGenevaLoggingCertificate
}
func (p *prod) ClustersKeyvaultURI() string {
return p.clustersKeyvaultURI
}
func (p *prod) CosmosDB() (string, string) {
return p.cosmosDBAccountName, p.cosmosDBPrimaryMasterKey
}
func (p *prod) DatabaseName() string {
return "ARO"
}
func (p *prod) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext(ctx, network, address)
}
func (p *prod) Domain() string {
return p.domain
}
func (p *prod) FPAuthorizer(tenantID, resource string) (refreshable.Authorizer, error) {
oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, tenantID)
if err != nil {
return nil, err
}
sp, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, p.fpServicePrincipalID, p.fpCertificate, p.fpPrivateKey, resource)
if err != nil {
return nil, err
}
return refreshable.NewAuthorizer(sp), nil
}
func (p *prod) GetCertificateSecret(ctx context.Context, secretName string) (*rsa.PrivateKey, []*x509.Certificate, error) {
bundle, err := p.keyvault.GetSecret(ctx, p.serviceKeyvaultURI, secretName, "")
if err != nil {
return nil, nil, err
}
key, certs, err := pem.Parse([]byte(*bundle.Value))
if err != nil {
return nil, nil, err
}
if key == nil {
return nil, nil, fmt.Errorf("no private key found")
}
if len(certs) == 0 {
return nil, nil, fmt.Errorf("no certificate found")
}
return key, certs, nil
}
func (p *prod) GetSecret(ctx context.Context, secretName string) ([]byte, error) {
bundle, err := p.keyvault.GetSecret(ctx, p.serviceKeyvaultURI, secretName, "")
if err != nil {
return nil, err
}
return base64.StdEncoding.DecodeString(*bundle.Value)
}
func (p *prod) Listen() (net.Listener, error) {
return net.Listen("tcp", ":8443")
}
// ManagedDomain returns the fully qualified domain of a cluster if we manage
// it. If we don't, it returns the empty string. We manage only domains of the
// form "foo.$LOCATION.aroapp.io" and "foo" (we consider this a short form of
// the former).
func (p *prod) ManagedDomain(domain string) (string, error) {
if domain == "" ||
strings.HasPrefix(domain, ".") ||
strings.HasSuffix(domain, ".") {
// belt and braces: validation should already prevent this
return "", fmt.Errorf("invalid domain %q", domain)
}
domain = strings.TrimSuffix(domain, "."+p.Domain())
if strings.ContainsRune(domain, '.') {
return "", nil
}
return domain + "." + p.Domain(), nil
}
func (p *prod) MetricsSocketPath() string {
return "/var/etw/mdm_statsd.socket"
}
func (p *prod) Zones(vmSize string) ([]string, error) {
zones, found := p.zones[vmSize]
if !found {
return nil, fmt.Errorf("zone information not found for vm size %q", vmSize)
}
return zones, nil
}
func (d *prod) CreateARMResourceGroupRoleAssignment(ctx context.Context, fpAuthorizer refreshable.Authorizer, resourceGroup string) error {
// ARM ResourceGroup role assignments are not required in production.
return nil
}
func (p *prod) E2EStorageAccountName() string {
return p.e2eStorageAccountName
}
func (p *prod) E2EStorageAccountRGName() string {
return p.e2eStorageAccountRGName
}
func (p *prod) E2EStorageAccountSubID() string {
return p.e2eStorageAccountSubID
}
func (p *prod) | () bool {
return p.envType == environmentTypeProduction
}
func (p *prod) IsDevelopment() bool {
return p.envType == environmentTypeDevelopment
}
| ShouldDeployDenyAssignment | identifier_name |
pathy.go | package main
import (
"fmt"
"os"
"strings"
"path/filepath"
"strconv"
"time"
"math"
)
type PathyMode int
const (
Draw PathyMode = iota
BenchSingle
BenchAndDrawSingle
BenchMultiple
BenchAndDrawMultiple
)
type PathyParameters struct {
Mode PathyMode
InPath string
OutPath string
Scale int
Algo func(Node, Node) []Node
N int
Trials int
StartX, StartY, GoalX, GoalY int
}
var counter = 0
func readNextArg() string {
arg := os.Args[counter]
counter++
return arg
}
func main() {
// Print help
if len(os.Args) < 2 {
fmt.Printf("%s is a tool for visualization and benchmarking of pathfinding algorithms.\n\n", os.Args[0])
fmt.Println("To draw a map:")
fmt.Printf(" %s draw map_file output_jpg scale\n", os.Args[0])
fmt.Println("To benchmark one scenario:")
fmt.Printf(" %s single map_file start_x start_y goal_x goal_y algorithm trials\n", os.Args[0])
fmt.Println("To benchmark one scenario and draw its path:")
fmt.Printf(" %s single map_file start_x start_y goal_x goal_y algorithm trials output_jpg scale\n", os.Args[0])
fmt.Println("To benchmark multiple scenarios:")
fmt.Printf(" %s multiple scenarios_file algorithm n trials\n", os.Args[0])
fmt.Println("To benchmark multiple scenarios and draw their paths:")
fmt.Printf(" %s multiple scenarios_file algorithm n trials output_dir scale\n\n", os.Args[0])
fmt.Println("Accepted algorithms are \"dijkstra\", \"astar\", \"astar-ps\" and \"thetastar\". N is the amount of scenarios to pick from the file. They are evenly spread out in terms of problem size.")
os.Exit(0)
}
readNextArg() // Skip program name
modeString := readNextArg()
var p PathyParameters
// Read command-line arguments
switch (strings.ToLower(modeString)) {
case "draw":
p = getDrawModeParameters()
case "single":
p = getSingleModeParameters()
case "multiple":
p = getMultipleModeParameters()
default:
fmt.Printf("Unknown mode \"%s\", accepted modes are \"draw\", \"single\" and \"multiple\"\n", modeString)
os.Exit(1)
}
// Check some of the arguments
if (p.Mode == Draw || p.Mode == BenchAndDrawSingle || p.Mode == BenchAndDrawMultiple) && p.Scale < 1 {
fmt.Println("Scale must be a positive integer.")
os.Exit(1)
}
if (p.Mode == BenchMultiple || p.Mode == BenchAndDrawMultiple) && p.N < 1 {
fmt.Println("N must be a positive integer.")
os.Exit(1)
}
if (p.Mode == BenchSingle || p.Mode == BenchAndDrawSingle || p.Mode == BenchMultiple || p.Mode == BenchAndDrawMultiple) && p.Trials < 1 {
fmt.Println("Trials must be a positive integer.")
os.Exit(1)
}
// Run the appropriate mode
switch (p.Mode) {
case Draw:
runDrawMode(p)
case BenchSingle, BenchAndDrawSingle:
runSingleMode(p)
case BenchMultiple, BenchAndDrawMultiple:
runMultipleMode(p)
default:
panic("Assertion failed: unexpected mode")
}
fmt.Println("Success")
}
func getDrawModeParameters() PathyParameters {
if len(os.Args) != 5 {
fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0])
os.Exit(1)
}
p := PathyParameters{}
p.Mode = Draw
p.InPath = readNextArg()
p.OutPath = readNextArg()
p.Scale = MustParseInt(readNextArg())
return p
}
func getSingleModeParameters() PathyParameters {
if len(os.Args) != 9 && len(os.Args) != 11 {
fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0])
os.Exit(1)
}
p := PathyParameters{}
p.InPath = readNextArg()
p.StartX = MustParseInt(readNextArg())
p.StartY = MustParseInt(readNextArg())
p.GoalX = MustParseInt(readNextArg())
p.GoalY = MustParseInt(readNextArg())
p.Algo = MustParsePathfindingFunction(readNextArg())
p.Trials = MustParseInt(readNextArg())
if len(os.Args) == 11 {
p.Mode = BenchAndDrawSingle
p.OutPath = readNextArg()
p.Scale = MustParseInt(readNextArg())
} else {
p.Mode = BenchSingle
}
return p
}
func getMultipleModeParameters() PathyParameters {
if len(os.Args) != 6 && len(os.Args) != 8 {
fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0])
os.Exit(1)
}
p := PathyParameters{}
p.InPath = readNextArg()
p.Algo = MustParsePathfindingFunction(readNextArg())
p.N = MustParseInt(readNextArg())
p.Trials = MustParseInt(readNextArg())
if len(os.Args) == 8 | else {
p.Mode = BenchMultiple
}
return p
}
func runDrawMode(p PathyParameters) {
if p.Mode != Draw {
panic("Assertion failed: unexpected mode")
}
var err error
grid, err = LoadMap(p.InPath)
if err != nil {
fmt.Printf("Error reading file \"%s\": %s\n", p.InPath, err.Error())
os.Exit(1)
}
img := MakeMapImage(p.Scale)
err = SaveImage(img, p.OutPath)
if err != nil {
fmt.Printf("Error writing image \"%s\": %s\n", p.OutPath, err.Error())
os.Exit(1)
}
}
func runSingleMode(p PathyParameters) {
if p.Mode != BenchSingle && p.Mode != BenchAndDrawSingle {
panic("Assertion failed: unexpected mode")
}
var err error
grid, err = LoadMap(p.InPath)
if err != nil {
fmt.Printf("Error reading file \"%s\": %s\n", p.InPath, err.Error())
os.Exit(1)
}
start := NewNode(p.StartX, p.StartY)
goal := NewNode(p.GoalX, p.GoalY)
path, turns, pathLen, avgAngle, avgRuntime := testOneScenario(start, goal, p.Algo, p.Trials)
fmt.Printf("Stats: %d turn(s), length %.1f, avg angle %.1f rad (%.1f deg), runtime %dms\n", turns, pathLen, avgAngle, avgAngle*radToDeg, avgRuntime)
if p.Mode == BenchAndDrawSingle {
img := MakeMapImage(p.Scale)
img = DrawPath(img, path, p.Scale)
err = SaveImage(img, p.OutPath)
if err != nil {
fmt.Printf("Error writing image \"%s\": %s\n", p.OutPath, err.Error())
os.Exit(1)
}
}
}
func runMultipleMode(p PathyParameters) {
if p.Mode != BenchMultiple && p.Mode != BenchAndDrawMultiple {
panic("Assertion failed: unexpected mode")
}
// Load scenarios
scenarios, err := LoadScenarios(p.InPath)
if err != nil {
fmt.Printf("Error loading scenarios file \"%s\": %s\n", p.InPath, err.Error())
os.Exit(1)
}
// Load map
mapPath := filepath.Join(filepath.Dir(p.InPath), scenarios[0].MapName)
grid, err = LoadMap(mapPath)
if err != nil {
fmt.Printf("Error reading map file \"%s\": %s\n", mapPath, err.Error())
os.Exit(1)
}
// If needed, create an output directory for images
if p.Mode == BenchAndDrawMultiple {
// Create the output directory if it doesn't exist
_, err := os.Stat(p.OutPath)
if os.IsNotExist(err) {
err = os.Mkdir(p.OutPath, os.ModeDir)
if err != nil {
fmt.Printf("Error creating output directory: %s\n", err.Error())
os.Exit(1)
}
}
}
// Select n evenly spread out scenarios
selectedScenarios := []Scenario{}
var inc float64
if p.N >= len(scenarios) {
inc = 1
p.N = len(scenarios)
} else {
inc = float64(len(scenarios)-1) / float64(p.N-1)
}
for i := 0.0; i < float64(len(scenarios)); i += inc {
index := int(i)
selectedScenarios = append(selectedScenarios, scenarios[index])
}
// Assertion
if len(selectedScenarios) != p.N {
panic("Assertion failed: unexpected number of selected scenarios")
}
// Benchmark and draw scenarios
sumTurnCount := 0.0
sumPathLen := 0.0
sumAvgAngle := 0.0
sumAvgRuntime := 0
for _, scenario := range selectedScenarios {
// Assertion
if scenario.MapName != scenarios[0].MapName {
panic("Assertion failed: scenarios file referred to multiple map files")
}
sx, sy, gx, gy := scenario.Start.X, scenario.Start.Y, scenario.Goal.X, scenario.Goal.Y
start := NewNode(sx,sy)
goal := NewNode(gx,gy)
path, turns, pathLen, avgAngle, avgRuntime := testOneScenario(start, goal, p.Algo, p.Trials)
fmt.Printf("(%d,%d) -> (%d,%d) stats: %d turn(s), length %.1f, avg angle %.1f rad (%.1f deg), runtime %dms\n", sx, sy, gx, gy, turns, pathLen, avgAngle, avgAngle*radToDeg, avgRuntime)
sumTurnCount += float64(turns)
sumPathLen += pathLen
sumAvgAngle += avgAngle
sumAvgRuntime += avgRuntime
if p.Mode == BenchAndDrawMultiple {
// Create a nice name for the image
ext := filepath.Ext(scenario.MapName)
fname := scenario.MapName[0:len(scenario.MapName)-len(ext)]
fname = fmt.Sprintf("%s_%d_%d_%d_%d.jpg", fname, sx, sy, gx, gy)
out := filepath.Join(p.OutPath, fname)
img := MakeMapImage(p.Scale)
img = DrawPath(img, path, p.Scale)
err = SaveImage(img, out)
if err != nil {
fmt.Printf("Error writing image \"%s\": %s\n", out, err.Error())
os.Exit(1)
}
}
}
// Stats are the average across all selected scenarios
overallTurnCount := sumTurnCount / float64(p.N)
overallPathLen := sumPathLen / float64(p.N)
overallAvgAngle := sumAvgAngle / float64(p.N)
overallAvgRuntime := sumAvgRuntime / p.N
fmt.Printf("\nAvg stats: %f turn(s), length %f, avg angle %f rad (%.1f deg), runtime %dms\n", overallTurnCount, overallPathLen, overallAvgAngle, overallAvgAngle*radToDeg, overallAvgRuntime)
}
/*
* Performs test runs of the scenario. Returns the following things:
* turn count
* path length
* average angle of turns (radians)
* average runtime (ms)
*/
func testOneScenario(start, goal Node, algo func (start, goal Node) []Node, trials int) ([]Node, int, float64, float64, int) {
var path []Node
// Get path and average runtime
totalRuntime := 0.0
for i := 0; i < trials; i++ {
before := time.Now()
path = algo(start, goal)
after := time.Now()
elapsed := after.Sub(before)
totalRuntime += float64(elapsed.Milliseconds())
}
avgRuntime := int(math.Round(totalRuntime/float64(trials)))
// Calculate path length
pathLen := 0.0
for i := 0; i < len(path)-1; i++ {
n1 := path[i]
n2 := path[i+1]
pathLen += StraightLineDist(n1, n2)
}
// Calculate turn count and average angle of turns
turns := 0
avgAngle := 0.0
for i := 0; i < len(path)-2; i++ {
n1 := path[i]
n2 := path[i+1]
n3 := path[i+2]
// There are two vectors (n1,n2) and (n2,n3)
v1_x, v1_y := float64(n2.X - n1.X), float64(n2.Y - n1.Y)
v2_x, v2_y := float64(n3.X - n2.X), float64(n3.Y - n2.Y)
dot := v1_x * v2_x + v1_y * v2_y
v1_len := math.Sqrt(v1_x * v1_x + v1_y * v1_y)
v2_len := math.Sqrt(v2_x * v2_x + v2_y * v2_y)
a := dot / (v1_len * v2_len)
a = math.Max(-1, math.Min(1, a)) // Rounding errors may produce values outside [-1,1] so clamp it.
angle := math.Acos(a)
if angle >= 0.001 {
// We are turning at node n1
avgAngle += angle
turns++
}
}
if turns > 0 {
avgAngle /= float64(turns)
}
return path, turns, pathLen, avgAngle, avgRuntime
}
func MustParsePathfindingFunction(algoName string) func(Node, Node) []Node {
switch strings.ToLower(algoName) {
case "dijkstra":
return Dijkstra
case "astar":
return AStar
case "astar-ps":
return AStarPs
case "thetastar":
return ThetaStar
// case "ap-thetastar":
// return true, ApThetaStar
}
fmt.Printf("Unknown algorithm \"%s\"\n", algoName)
os.Exit(1)
return func( Node, Node) []Node { return []Node{} }
}
func MustParseInt(arg string) int {
n, err := strconv.Atoi(arg)
if err != nil {
fmt.Printf("Non-int argument \"%s\"\n", arg)
os.Exit(1)
}
return n
}
| {
p.Mode = BenchAndDrawMultiple
p.OutPath = readNextArg()
p.Scale = MustParseInt(readNextArg())
} | conditional_block |
pathy.go | package main
import (
"fmt"
"os"
"strings"
"path/filepath"
"strconv"
"time"
"math"
)
type PathyMode int
const (
Draw PathyMode = iota
BenchSingle
BenchAndDrawSingle
BenchMultiple
BenchAndDrawMultiple
)
type PathyParameters struct {
Mode PathyMode
InPath string
OutPath string
Scale int
Algo func(Node, Node) []Node
N int
Trials int
StartX, StartY, GoalX, GoalY int
}
var counter = 0
func readNextArg() string {
arg := os.Args[counter]
counter++
return arg
}
func main() {
// Print help
if len(os.Args) < 2 {
fmt.Printf("%s is a tool for visualization and benchmarking of pathfinding algorithms.\n\n", os.Args[0])
fmt.Println("To draw a map:")
fmt.Printf(" %s draw map_file output_jpg scale\n", os.Args[0])
fmt.Println("To benchmark one scenario:")
fmt.Printf(" %s single map_file start_x start_y goal_x goal_y algorithm trials\n", os.Args[0])
fmt.Println("To benchmark one scenario and draw its path:")
fmt.Printf(" %s single map_file start_x start_y goal_x goal_y algorithm trials output_jpg scale\n", os.Args[0])
fmt.Println("To benchmark multiple scenarios:")
fmt.Printf(" %s multiple scenarios_file algorithm n trials\n", os.Args[0])
fmt.Println("To benchmark multiple scenarios and draw their paths:")
fmt.Printf(" %s multiple scenarios_file algorithm n trials output_dir scale\n\n", os.Args[0])
fmt.Println("Accepted algorithms are \"dijkstra\", \"astar\", \"astar-ps\" and \"thetastar\". N is the amount of scenarios to pick from the file. They are evenly spread out in terms of problem size.")
os.Exit(0)
}
readNextArg() // Skip program name
modeString := readNextArg()
var p PathyParameters
// Read command-line arguments
switch (strings.ToLower(modeString)) {
case "draw":
p = getDrawModeParameters()
case "single":
p = getSingleModeParameters()
case "multiple":
p = getMultipleModeParameters()
default:
fmt.Printf("Unknown mode \"%s\", accepted modes are \"draw\", \"single\" and \"multiple\"\n", modeString)
os.Exit(1)
}
// Check some of the arguments
if (p.Mode == Draw || p.Mode == BenchAndDrawSingle || p.Mode == BenchAndDrawMultiple) && p.Scale < 1 {
fmt.Println("Scale must be a positive integer.")
os.Exit(1)
}
if (p.Mode == BenchMultiple || p.Mode == BenchAndDrawMultiple) && p.N < 1 {
fmt.Println("N must be a positive integer.")
os.Exit(1)
}
if (p.Mode == BenchSingle || p.Mode == BenchAndDrawSingle || p.Mode == BenchMultiple || p.Mode == BenchAndDrawMultiple) && p.Trials < 1 {
fmt.Println("Trials must be a positive integer.")
os.Exit(1)
}
// Run the appropriate mode
switch (p.Mode) {
case Draw:
runDrawMode(p)
case BenchSingle, BenchAndDrawSingle:
runSingleMode(p)
case BenchMultiple, BenchAndDrawMultiple:
runMultipleMode(p)
default:
panic("Assertion failed: unexpected mode")
}
fmt.Println("Success")
}
func | () PathyParameters {
if len(os.Args) != 5 {
fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0])
os.Exit(1)
}
p := PathyParameters{}
p.Mode = Draw
p.InPath = readNextArg()
p.OutPath = readNextArg()
p.Scale = MustParseInt(readNextArg())
return p
}
func getSingleModeParameters() PathyParameters {
if len(os.Args) != 9 && len(os.Args) != 11 {
fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0])
os.Exit(1)
}
p := PathyParameters{}
p.InPath = readNextArg()
p.StartX = MustParseInt(readNextArg())
p.StartY = MustParseInt(readNextArg())
p.GoalX = MustParseInt(readNextArg())
p.GoalY = MustParseInt(readNextArg())
p.Algo = MustParsePathfindingFunction(readNextArg())
p.Trials = MustParseInt(readNextArg())
if len(os.Args) == 11 {
p.Mode = BenchAndDrawSingle
p.OutPath = readNextArg()
p.Scale = MustParseInt(readNextArg())
} else {
p.Mode = BenchSingle
}
return p
}
func getMultipleModeParameters() PathyParameters {
if len(os.Args) != 6 && len(os.Args) != 8 {
fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0])
os.Exit(1)
}
p := PathyParameters{}
p.InPath = readNextArg()
p.Algo = MustParsePathfindingFunction(readNextArg())
p.N = MustParseInt(readNextArg())
p.Trials = MustParseInt(readNextArg())
if len(os.Args) == 8 {
p.Mode = BenchAndDrawMultiple
p.OutPath = readNextArg()
p.Scale = MustParseInt(readNextArg())
} else {
p.Mode = BenchMultiple
}
return p
}
func runDrawMode(p PathyParameters) {
if p.Mode != Draw {
panic("Assertion failed: unexpected mode")
}
var err error
grid, err = LoadMap(p.InPath)
if err != nil {
fmt.Printf("Error reading file \"%s\": %s\n", p.InPath, err.Error())
os.Exit(1)
}
img := MakeMapImage(p.Scale)
err = SaveImage(img, p.OutPath)
if err != nil {
fmt.Printf("Error writing image \"%s\": %s\n", p.OutPath, err.Error())
os.Exit(1)
}
}
func runSingleMode(p PathyParameters) {
if p.Mode != BenchSingle && p.Mode != BenchAndDrawSingle {
panic("Assertion failed: unexpected mode")
}
var err error
grid, err = LoadMap(p.InPath)
if err != nil {
fmt.Printf("Error reading file \"%s\": %s\n", p.InPath, err.Error())
os.Exit(1)
}
start := NewNode(p.StartX, p.StartY)
goal := NewNode(p.GoalX, p.GoalY)
path, turns, pathLen, avgAngle, avgRuntime := testOneScenario(start, goal, p.Algo, p.Trials)
fmt.Printf("Stats: %d turn(s), length %.1f, avg angle %.1f rad (%.1f deg), runtime %dms\n", turns, pathLen, avgAngle, avgAngle*radToDeg, avgRuntime)
if p.Mode == BenchAndDrawSingle {
img := MakeMapImage(p.Scale)
img = DrawPath(img, path, p.Scale)
err = SaveImage(img, p.OutPath)
if err != nil {
fmt.Printf("Error writing image \"%s\": %s\n", p.OutPath, err.Error())
os.Exit(1)
}
}
}
func runMultipleMode(p PathyParameters) {
if p.Mode != BenchMultiple && p.Mode != BenchAndDrawMultiple {
panic("Assertion failed: unexpected mode")
}
// Load scenarios
scenarios, err := LoadScenarios(p.InPath)
if err != nil {
fmt.Printf("Error loading scenarios file \"%s\": %s\n", p.InPath, err.Error())
os.Exit(1)
}
// Load map
mapPath := filepath.Join(filepath.Dir(p.InPath), scenarios[0].MapName)
grid, err = LoadMap(mapPath)
if err != nil {
fmt.Printf("Error reading map file \"%s\": %s\n", mapPath, err.Error())
os.Exit(1)
}
// If needed, create an output directory for images
if p.Mode == BenchAndDrawMultiple {
// Create the output directory if it doesn't exist
_, err := os.Stat(p.OutPath)
if os.IsNotExist(err) {
err = os.Mkdir(p.OutPath, os.ModeDir)
if err != nil {
fmt.Printf("Error creating output directory: %s\n", err.Error())
os.Exit(1)
}
}
}
// Select n evenly spread out scenarios
selectedScenarios := []Scenario{}
var inc float64
if p.N >= len(scenarios) {
inc = 1
p.N = len(scenarios)
} else {
inc = float64(len(scenarios)-1) / float64(p.N-1)
}
for i := 0.0; i < float64(len(scenarios)); i += inc {
index := int(i)
selectedScenarios = append(selectedScenarios, scenarios[index])
}
// Assertion
if len(selectedScenarios) != p.N {
panic("Assertion failed: unexpected number of selected scenarios")
}
// Benchmark and draw scenarios
sumTurnCount := 0.0
sumPathLen := 0.0
sumAvgAngle := 0.0
sumAvgRuntime := 0
for _, scenario := range selectedScenarios {
// Assertion
if scenario.MapName != scenarios[0].MapName {
panic("Assertion failed: scenarios file referred to multiple map files")
}
sx, sy, gx, gy := scenario.Start.X, scenario.Start.Y, scenario.Goal.X, scenario.Goal.Y
start := NewNode(sx,sy)
goal := NewNode(gx,gy)
path, turns, pathLen, avgAngle, avgRuntime := testOneScenario(start, goal, p.Algo, p.Trials)
fmt.Printf("(%d,%d) -> (%d,%d) stats: %d turn(s), length %.1f, avg angle %.1f rad (%.1f deg), runtime %dms\n", sx, sy, gx, gy, turns, pathLen, avgAngle, avgAngle*radToDeg, avgRuntime)
sumTurnCount += float64(turns)
sumPathLen += pathLen
sumAvgAngle += avgAngle
sumAvgRuntime += avgRuntime
if p.Mode == BenchAndDrawMultiple {
// Create a nice name for the image
ext := filepath.Ext(scenario.MapName)
fname := scenario.MapName[0:len(scenario.MapName)-len(ext)]
fname = fmt.Sprintf("%s_%d_%d_%d_%d.jpg", fname, sx, sy, gx, gy)
out := filepath.Join(p.OutPath, fname)
img := MakeMapImage(p.Scale)
img = DrawPath(img, path, p.Scale)
err = SaveImage(img, out)
if err != nil {
fmt.Printf("Error writing image \"%s\": %s\n", out, err.Error())
os.Exit(1)
}
}
}
// Stats are the average across all selected scenarios
overallTurnCount := sumTurnCount / float64(p.N)
overallPathLen := sumPathLen / float64(p.N)
overallAvgAngle := sumAvgAngle / float64(p.N)
overallAvgRuntime := sumAvgRuntime / p.N
fmt.Printf("\nAvg stats: %f turn(s), length %f, avg angle %f rad (%.1f deg), runtime %dms\n", overallTurnCount, overallPathLen, overallAvgAngle, overallAvgAngle*radToDeg, overallAvgRuntime)
}
/*
* Performs test runs of the scenario. Returns the following things:
* turn count
* path length
* average angle of turns (radians)
* average runtime (ms)
*/
func testOneScenario(start, goal Node, algo func (start, goal Node) []Node, trials int) ([]Node, int, float64, float64, int) {
var path []Node
// Get path and average runtime
totalRuntime := 0.0
for i := 0; i < trials; i++ {
before := time.Now()
path = algo(start, goal)
after := time.Now()
elapsed := after.Sub(before)
totalRuntime += float64(elapsed.Milliseconds())
}
avgRuntime := int(math.Round(totalRuntime/float64(trials)))
// Calculate path length
pathLen := 0.0
for i := 0; i < len(path)-1; i++ {
n1 := path[i]
n2 := path[i+1]
pathLen += StraightLineDist(n1, n2)
}
// Calculate turn count and average angle of turns
turns := 0
avgAngle := 0.0
for i := 0; i < len(path)-2; i++ {
n1 := path[i]
n2 := path[i+1]
n3 := path[i+2]
// There are two vectors (n1,n2) and (n2,n3)
v1_x, v1_y := float64(n2.X - n1.X), float64(n2.Y - n1.Y)
v2_x, v2_y := float64(n3.X - n2.X), float64(n3.Y - n2.Y)
dot := v1_x * v2_x + v1_y * v2_y
v1_len := math.Sqrt(v1_x * v1_x + v1_y * v1_y)
v2_len := math.Sqrt(v2_x * v2_x + v2_y * v2_y)
a := dot / (v1_len * v2_len)
a = math.Max(-1, math.Min(1, a)) // Rounding errors may produce values outside [-1,1] so clamp it.
angle := math.Acos(a)
if angle >= 0.001 {
// We are turning at node n1
avgAngle += angle
turns++
}
}
if turns > 0 {
avgAngle /= float64(turns)
}
return path, turns, pathLen, avgAngle, avgRuntime
}
func MustParsePathfindingFunction(algoName string) func(Node, Node) []Node {
switch strings.ToLower(algoName) {
case "dijkstra":
return Dijkstra
case "astar":
return AStar
case "astar-ps":
return AStarPs
case "thetastar":
return ThetaStar
// case "ap-thetastar":
// return true, ApThetaStar
}
fmt.Printf("Unknown algorithm \"%s\"\n", algoName)
os.Exit(1)
return func( Node, Node) []Node { return []Node{} }
}
func MustParseInt(arg string) int {
n, err := strconv.Atoi(arg)
if err != nil {
fmt.Printf("Non-int argument \"%s\"\n", arg)
os.Exit(1)
}
return n
}
| getDrawModeParameters | identifier_name |
pathy.go | package main
import (
"fmt"
"os"
"strings"
"path/filepath"
"strconv"
"time"
"math"
)
type PathyMode int
const (
Draw PathyMode = iota
BenchSingle
BenchAndDrawSingle
BenchMultiple
BenchAndDrawMultiple
)
type PathyParameters struct {
Mode PathyMode
InPath string
OutPath string
Scale int
Algo func(Node, Node) []Node
N int
Trials int
StartX, StartY, GoalX, GoalY int
}
var counter = 0
func readNextArg() string {
arg := os.Args[counter]
counter++
return arg
}
func main() {
// Print help
if len(os.Args) < 2 {
fmt.Printf("%s is a tool for visualization and benchmarking of pathfinding algorithms.\n\n", os.Args[0])
fmt.Println("To draw a map:")
fmt.Printf(" %s draw map_file output_jpg scale\n", os.Args[0])
fmt.Println("To benchmark one scenario:")
fmt.Printf(" %s single map_file start_x start_y goal_x goal_y algorithm trials\n", os.Args[0])
fmt.Println("To benchmark one scenario and draw its path:")
fmt.Printf(" %s single map_file start_x start_y goal_x goal_y algorithm trials output_jpg scale\n", os.Args[0])
fmt.Println("To benchmark multiple scenarios:")
fmt.Printf(" %s multiple scenarios_file algorithm n trials\n", os.Args[0])
fmt.Println("To benchmark multiple scenarios and draw their paths:")
fmt.Printf(" %s multiple scenarios_file algorithm n trials output_dir scale\n\n", os.Args[0])
fmt.Println("Accepted algorithms are \"dijkstra\", \"astar\", \"astar-ps\" and \"thetastar\". N is the amount of scenarios to pick from the file. They are evenly spread out in terms of problem size.")
os.Exit(0)
}
readNextArg() // Skip program name
modeString := readNextArg()
var p PathyParameters
// Read command-line arguments
switch (strings.ToLower(modeString)) {
case "draw":
p = getDrawModeParameters()
case "single":
p = getSingleModeParameters()
case "multiple":
p = getMultipleModeParameters()
default:
fmt.Printf("Unknown mode \"%s\", accepted modes are \"draw\", \"single\" and \"multiple\"\n", modeString)
os.Exit(1)
}
// Check some of the arguments
if (p.Mode == Draw || p.Mode == BenchAndDrawSingle || p.Mode == BenchAndDrawMultiple) && p.Scale < 1 {
fmt.Println("Scale must be a positive integer.")
os.Exit(1)
}
if (p.Mode == BenchMultiple || p.Mode == BenchAndDrawMultiple) && p.N < 1 {
fmt.Println("N must be a positive integer.")
os.Exit(1)
}
if (p.Mode == BenchSingle || p.Mode == BenchAndDrawSingle || p.Mode == BenchMultiple || p.Mode == BenchAndDrawMultiple) && p.Trials < 1 {
fmt.Println("Trials must be a positive integer.")
os.Exit(1)
}
// Run the appropriate mode
switch (p.Mode) {
case Draw:
runDrawMode(p)
case BenchSingle, BenchAndDrawSingle:
runSingleMode(p)
case BenchMultiple, BenchAndDrawMultiple:
runMultipleMode(p)
default:
panic("Assertion failed: unexpected mode")
}
fmt.Println("Success")
}
func getDrawModeParameters() PathyParameters {
if len(os.Args) != 5 {
fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0])
os.Exit(1)
}
p := PathyParameters{}
p.Mode = Draw
p.InPath = readNextArg()
p.OutPath = readNextArg()
p.Scale = MustParseInt(readNextArg())
return p
}
func getSingleModeParameters() PathyParameters {
if len(os.Args) != 9 && len(os.Args) != 11 {
fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0])
os.Exit(1)
}
p := PathyParameters{}
p.InPath = readNextArg()
p.StartX = MustParseInt(readNextArg())
p.StartY = MustParseInt(readNextArg())
p.GoalX = MustParseInt(readNextArg())
p.GoalY = MustParseInt(readNextArg())
p.Algo = MustParsePathfindingFunction(readNextArg())
p.Trials = MustParseInt(readNextArg())
if len(os.Args) == 11 {
p.Mode = BenchAndDrawSingle
p.OutPath = readNextArg()
p.Scale = MustParseInt(readNextArg())
} else {
p.Mode = BenchSingle
}
return p
}
func getMultipleModeParameters() PathyParameters {
if len(os.Args) != 6 && len(os.Args) != 8 {
fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0])
os.Exit(1)
}
p := PathyParameters{}
p.InPath = readNextArg()
p.Algo = MustParsePathfindingFunction(readNextArg())
p.N = MustParseInt(readNextArg())
p.Trials = MustParseInt(readNextArg())
if len(os.Args) == 8 {
p.Mode = BenchAndDrawMultiple
p.OutPath = readNextArg()
p.Scale = MustParseInt(readNextArg())
} else {
p.Mode = BenchMultiple
}
return p
}
func runDrawMode(p PathyParameters) {
if p.Mode != Draw {
panic("Assertion failed: unexpected mode")
}
var err error
grid, err = LoadMap(p.InPath)
if err != nil {
fmt.Printf("Error reading file \"%s\": %s\n", p.InPath, err.Error())
os.Exit(1)
}
img := MakeMapImage(p.Scale)
err = SaveImage(img, p.OutPath)
if err != nil {
fmt.Printf("Error writing image \"%s\": %s\n", p.OutPath, err.Error())
os.Exit(1)
}
}
func runSingleMode(p PathyParameters) {
if p.Mode != BenchSingle && p.Mode != BenchAndDrawSingle {
panic("Assertion failed: unexpected mode")
}
var err error
grid, err = LoadMap(p.InPath)
if err != nil {
fmt.Printf("Error reading file \"%s\": %s\n", p.InPath, err.Error())
os.Exit(1)
}
start := NewNode(p.StartX, p.StartY)
goal := NewNode(p.GoalX, p.GoalY)
path, turns, pathLen, avgAngle, avgRuntime := testOneScenario(start, goal, p.Algo, p.Trials)
fmt.Printf("Stats: %d turn(s), length %.1f, avg angle %.1f rad (%.1f deg), runtime %dms\n", turns, pathLen, avgAngle, avgAngle*radToDeg, avgRuntime) | img = DrawPath(img, path, p.Scale)
err = SaveImage(img, p.OutPath)
if err != nil {
fmt.Printf("Error writing image \"%s\": %s\n", p.OutPath, err.Error())
os.Exit(1)
}
}
}
func runMultipleMode(p PathyParameters) {
if p.Mode != BenchMultiple && p.Mode != BenchAndDrawMultiple {
panic("Assertion failed: unexpected mode")
}
// Load scenarios
scenarios, err := LoadScenarios(p.InPath)
if err != nil {
fmt.Printf("Error loading scenarios file \"%s\": %s\n", p.InPath, err.Error())
os.Exit(1)
}
// Load map
mapPath := filepath.Join(filepath.Dir(p.InPath), scenarios[0].MapName)
grid, err = LoadMap(mapPath)
if err != nil {
fmt.Printf("Error reading map file \"%s\": %s\n", mapPath, err.Error())
os.Exit(1)
}
// If needed, create an output directory for images
if p.Mode == BenchAndDrawMultiple {
// Create the output directory if it doesn't exist
_, err := os.Stat(p.OutPath)
if os.IsNotExist(err) {
err = os.Mkdir(p.OutPath, os.ModeDir)
if err != nil {
fmt.Printf("Error creating output directory: %s\n", err.Error())
os.Exit(1)
}
}
}
// Select n evenly spread out scenarios
selectedScenarios := []Scenario{}
var inc float64
if p.N >= len(scenarios) {
inc = 1
p.N = len(scenarios)
} else {
inc = float64(len(scenarios)-1) / float64(p.N-1)
}
for i := 0.0; i < float64(len(scenarios)); i += inc {
index := int(i)
selectedScenarios = append(selectedScenarios, scenarios[index])
}
// Assertion
if len(selectedScenarios) != p.N {
panic("Assertion failed: unexpected number of selected scenarios")
}
// Benchmark and draw scenarios
sumTurnCount := 0.0
sumPathLen := 0.0
sumAvgAngle := 0.0
sumAvgRuntime := 0
for _, scenario := range selectedScenarios {
// Assertion
if scenario.MapName != scenarios[0].MapName {
panic("Assertion failed: scenarios file referred to multiple map files")
}
sx, sy, gx, gy := scenario.Start.X, scenario.Start.Y, scenario.Goal.X, scenario.Goal.Y
start := NewNode(sx,sy)
goal := NewNode(gx,gy)
path, turns, pathLen, avgAngle, avgRuntime := testOneScenario(start, goal, p.Algo, p.Trials)
fmt.Printf("(%d,%d) -> (%d,%d) stats: %d turn(s), length %.1f, avg angle %.1f rad (%.1f deg), runtime %dms\n", sx, sy, gx, gy, turns, pathLen, avgAngle, avgAngle*radToDeg, avgRuntime)
sumTurnCount += float64(turns)
sumPathLen += pathLen
sumAvgAngle += avgAngle
sumAvgRuntime += avgRuntime
if p.Mode == BenchAndDrawMultiple {
// Create a nice name for the image
ext := filepath.Ext(scenario.MapName)
fname := scenario.MapName[0:len(scenario.MapName)-len(ext)]
fname = fmt.Sprintf("%s_%d_%d_%d_%d.jpg", fname, sx, sy, gx, gy)
out := filepath.Join(p.OutPath, fname)
img := MakeMapImage(p.Scale)
img = DrawPath(img, path, p.Scale)
err = SaveImage(img, out)
if err != nil {
fmt.Printf("Error writing image \"%s\": %s\n", out, err.Error())
os.Exit(1)
}
}
}
// Stats are the average across all selected scenarios
overallTurnCount := sumTurnCount / float64(p.N)
overallPathLen := sumPathLen / float64(p.N)
overallAvgAngle := sumAvgAngle / float64(p.N)
overallAvgRuntime := sumAvgRuntime / p.N
fmt.Printf("\nAvg stats: %f turn(s), length %f, avg angle %f rad (%.1f deg), runtime %dms\n", overallTurnCount, overallPathLen, overallAvgAngle, overallAvgAngle*radToDeg, overallAvgRuntime)
}
/*
* Performs test runs of the scenario. Returns the following things:
* turn count
* path length
* average angle of turns (radians)
* average runtime (ms)
*/
func testOneScenario(start, goal Node, algo func (start, goal Node) []Node, trials int) ([]Node, int, float64, float64, int) {
var path []Node
// Get path and average runtime
totalRuntime := 0.0
for i := 0; i < trials; i++ {
before := time.Now()
path = algo(start, goal)
after := time.Now()
elapsed := after.Sub(before)
totalRuntime += float64(elapsed.Milliseconds())
}
avgRuntime := int(math.Round(totalRuntime/float64(trials)))
// Calculate path length
pathLen := 0.0
for i := 0; i < len(path)-1; i++ {
n1 := path[i]
n2 := path[i+1]
pathLen += StraightLineDist(n1, n2)
}
// Calculate turn count and average angle of turns
turns := 0
avgAngle := 0.0
for i := 0; i < len(path)-2; i++ {
n1 := path[i]
n2 := path[i+1]
n3 := path[i+2]
// There are two vectors (n1,n2) and (n2,n3)
v1_x, v1_y := float64(n2.X - n1.X), float64(n2.Y - n1.Y)
v2_x, v2_y := float64(n3.X - n2.X), float64(n3.Y - n2.Y)
dot := v1_x * v2_x + v1_y * v2_y
v1_len := math.Sqrt(v1_x * v1_x + v1_y * v1_y)
v2_len := math.Sqrt(v2_x * v2_x + v2_y * v2_y)
a := dot / (v1_len * v2_len)
a = math.Max(-1, math.Min(1, a)) // Rounding errors may produce values outside [-1,1] so clamp it.
angle := math.Acos(a)
if angle >= 0.001 {
// We are turning at node n1
avgAngle += angle
turns++
}
}
if turns > 0 {
avgAngle /= float64(turns)
}
return path, turns, pathLen, avgAngle, avgRuntime
}
func MustParsePathfindingFunction(algoName string) func(Node, Node) []Node {
switch strings.ToLower(algoName) {
case "dijkstra":
return Dijkstra
case "astar":
return AStar
case "astar-ps":
return AStarPs
case "thetastar":
return ThetaStar
// case "ap-thetastar":
// return true, ApThetaStar
}
fmt.Printf("Unknown algorithm \"%s\"\n", algoName)
os.Exit(1)
return func( Node, Node) []Node { return []Node{} }
}
func MustParseInt(arg string) int {
n, err := strconv.Atoi(arg)
if err != nil {
fmt.Printf("Non-int argument \"%s\"\n", arg)
os.Exit(1)
}
return n
} |
if p.Mode == BenchAndDrawSingle {
img := MakeMapImage(p.Scale) | random_line_split |
pathy.go | package main
import (
"fmt"
"os"
"strings"
"path/filepath"
"strconv"
"time"
"math"
)
type PathyMode int
const (
Draw PathyMode = iota
BenchSingle
BenchAndDrawSingle
BenchMultiple
BenchAndDrawMultiple
)
type PathyParameters struct {
Mode PathyMode
InPath string
OutPath string
Scale int
Algo func(Node, Node) []Node
N int
Trials int
StartX, StartY, GoalX, GoalY int
}
var counter = 0
func readNextArg() string {
arg := os.Args[counter]
counter++
return arg
}
func main() {
// Print help
if len(os.Args) < 2 {
fmt.Printf("%s is a tool for visualization and benchmarking of pathfinding algorithms.\n\n", os.Args[0])
fmt.Println("To draw a map:")
fmt.Printf(" %s draw map_file output_jpg scale\n", os.Args[0])
fmt.Println("To benchmark one scenario:")
fmt.Printf(" %s single map_file start_x start_y goal_x goal_y algorithm trials\n", os.Args[0])
fmt.Println("To benchmark one scenario and draw its path:")
fmt.Printf(" %s single map_file start_x start_y goal_x goal_y algorithm trials output_jpg scale\n", os.Args[0])
fmt.Println("To benchmark multiple scenarios:")
fmt.Printf(" %s multiple scenarios_file algorithm n trials\n", os.Args[0])
fmt.Println("To benchmark multiple scenarios and draw their paths:")
fmt.Printf(" %s multiple scenarios_file algorithm n trials output_dir scale\n\n", os.Args[0])
fmt.Println("Accepted algorithms are \"dijkstra\", \"astar\", \"astar-ps\" and \"thetastar\". N is the amount of scenarios to pick from the file. They are evenly spread out in terms of problem size.")
os.Exit(0)
}
readNextArg() // Skip program name
modeString := readNextArg()
var p PathyParameters
// Read command-line arguments
switch (strings.ToLower(modeString)) {
case "draw":
p = getDrawModeParameters()
case "single":
p = getSingleModeParameters()
case "multiple":
p = getMultipleModeParameters()
default:
fmt.Printf("Unknown mode \"%s\", accepted modes are \"draw\", \"single\" and \"multiple\"\n", modeString)
os.Exit(1)
}
// Check some of the arguments
if (p.Mode == Draw || p.Mode == BenchAndDrawSingle || p.Mode == BenchAndDrawMultiple) && p.Scale < 1 {
fmt.Println("Scale must be a positive integer.")
os.Exit(1)
}
if (p.Mode == BenchMultiple || p.Mode == BenchAndDrawMultiple) && p.N < 1 {
fmt.Println("N must be a positive integer.")
os.Exit(1)
}
if (p.Mode == BenchSingle || p.Mode == BenchAndDrawSingle || p.Mode == BenchMultiple || p.Mode == BenchAndDrawMultiple) && p.Trials < 1 {
fmt.Println("Trials must be a positive integer.")
os.Exit(1)
}
// Run the appropriate mode
switch (p.Mode) {
case Draw:
runDrawMode(p)
case BenchSingle, BenchAndDrawSingle:
runSingleMode(p)
case BenchMultiple, BenchAndDrawMultiple:
runMultipleMode(p)
default:
panic("Assertion failed: unexpected mode")
}
fmt.Println("Success")
}
func getDrawModeParameters() PathyParameters {
if len(os.Args) != 5 {
fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0])
os.Exit(1)
}
p := PathyParameters{}
p.Mode = Draw
p.InPath = readNextArg()
p.OutPath = readNextArg()
p.Scale = MustParseInt(readNextArg())
return p
}
func getSingleModeParameters() PathyParameters {
if len(os.Args) != 9 && len(os.Args) != 11 {
fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0])
os.Exit(1)
}
p := PathyParameters{}
p.InPath = readNextArg()
p.StartX = MustParseInt(readNextArg())
p.StartY = MustParseInt(readNextArg())
p.GoalX = MustParseInt(readNextArg())
p.GoalY = MustParseInt(readNextArg())
p.Algo = MustParsePathfindingFunction(readNextArg())
p.Trials = MustParseInt(readNextArg())
if len(os.Args) == 11 {
p.Mode = BenchAndDrawSingle
p.OutPath = readNextArg()
p.Scale = MustParseInt(readNextArg())
} else {
p.Mode = BenchSingle
}
return p
}
func getMultipleModeParameters() PathyParameters {
if len(os.Args) != 6 && len(os.Args) != 8 {
fmt.Printf("Wrong number of arguments. Run %s without parameters for more info.\n", os.Args[0])
os.Exit(1)
}
p := PathyParameters{}
p.InPath = readNextArg()
p.Algo = MustParsePathfindingFunction(readNextArg())
p.N = MustParseInt(readNextArg())
p.Trials = MustParseInt(readNextArg())
if len(os.Args) == 8 {
p.Mode = BenchAndDrawMultiple
p.OutPath = readNextArg()
p.Scale = MustParseInt(readNextArg())
} else {
p.Mode = BenchMultiple
}
return p
}
func runDrawMode(p PathyParameters) {
if p.Mode != Draw {
panic("Assertion failed: unexpected mode")
}
var err error
grid, err = LoadMap(p.InPath)
if err != nil {
fmt.Printf("Error reading file \"%s\": %s\n", p.InPath, err.Error())
os.Exit(1)
}
img := MakeMapImage(p.Scale)
err = SaveImage(img, p.OutPath)
if err != nil {
fmt.Printf("Error writing image \"%s\": %s\n", p.OutPath, err.Error())
os.Exit(1)
}
}
func runSingleMode(p PathyParameters) |
func runMultipleMode(p PathyParameters) {
if p.Mode != BenchMultiple && p.Mode != BenchAndDrawMultiple {
panic("Assertion failed: unexpected mode")
}
// Load scenarios
scenarios, err := LoadScenarios(p.InPath)
if err != nil {
fmt.Printf("Error loading scenarios file \"%s\": %s\n", p.InPath, err.Error())
os.Exit(1)
}
// Load map
mapPath := filepath.Join(filepath.Dir(p.InPath), scenarios[0].MapName)
grid, err = LoadMap(mapPath)
if err != nil {
fmt.Printf("Error reading map file \"%s\": %s\n", mapPath, err.Error())
os.Exit(1)
}
// If needed, create an output directory for images
if p.Mode == BenchAndDrawMultiple {
// Create the output directory if it doesn't exist
_, err := os.Stat(p.OutPath)
if os.IsNotExist(err) {
err = os.Mkdir(p.OutPath, os.ModeDir)
if err != nil {
fmt.Printf("Error creating output directory: %s\n", err.Error())
os.Exit(1)
}
}
}
// Select n evenly spread out scenarios
selectedScenarios := []Scenario{}
var inc float64
if p.N >= len(scenarios) {
inc = 1
p.N = len(scenarios)
} else {
inc = float64(len(scenarios)-1) / float64(p.N-1)
}
for i := 0.0; i < float64(len(scenarios)); i += inc {
index := int(i)
selectedScenarios = append(selectedScenarios, scenarios[index])
}
// Assertion
if len(selectedScenarios) != p.N {
panic("Assertion failed: unexpected number of selected scenarios")
}
// Benchmark and draw scenarios
sumTurnCount := 0.0
sumPathLen := 0.0
sumAvgAngle := 0.0
sumAvgRuntime := 0
for _, scenario := range selectedScenarios {
// Assertion
if scenario.MapName != scenarios[0].MapName {
panic("Assertion failed: scenarios file referred to multiple map files")
}
sx, sy, gx, gy := scenario.Start.X, scenario.Start.Y, scenario.Goal.X, scenario.Goal.Y
start := NewNode(sx,sy)
goal := NewNode(gx,gy)
path, turns, pathLen, avgAngle, avgRuntime := testOneScenario(start, goal, p.Algo, p.Trials)
fmt.Printf("(%d,%d) -> (%d,%d) stats: %d turn(s), length %.1f, avg angle %.1f rad (%.1f deg), runtime %dms\n", sx, sy, gx, gy, turns, pathLen, avgAngle, avgAngle*radToDeg, avgRuntime)
sumTurnCount += float64(turns)
sumPathLen += pathLen
sumAvgAngle += avgAngle
sumAvgRuntime += avgRuntime
if p.Mode == BenchAndDrawMultiple {
// Create a nice name for the image
ext := filepath.Ext(scenario.MapName)
fname := scenario.MapName[0:len(scenario.MapName)-len(ext)]
fname = fmt.Sprintf("%s_%d_%d_%d_%d.jpg", fname, sx, sy, gx, gy)
out := filepath.Join(p.OutPath, fname)
img := MakeMapImage(p.Scale)
img = DrawPath(img, path, p.Scale)
err = SaveImage(img, out)
if err != nil {
fmt.Printf("Error writing image \"%s\": %s\n", out, err.Error())
os.Exit(1)
}
}
}
// Stats are the average across all selected scenarios
overallTurnCount := sumTurnCount / float64(p.N)
overallPathLen := sumPathLen / float64(p.N)
overallAvgAngle := sumAvgAngle / float64(p.N)
overallAvgRuntime := sumAvgRuntime / p.N
fmt.Printf("\nAvg stats: %f turn(s), length %f, avg angle %f rad (%.1f deg), runtime %dms\n", overallTurnCount, overallPathLen, overallAvgAngle, overallAvgAngle*radToDeg, overallAvgRuntime)
}
/*
* Performs test runs of the scenario. Returns the following things:
* turn count
* path length
* average angle of turns (radians)
* average runtime (ms)
*/
func testOneScenario(start, goal Node, algo func (start, goal Node) []Node, trials int) ([]Node, int, float64, float64, int) {
var path []Node
// Get path and average runtime
totalRuntime := 0.0
for i := 0; i < trials; i++ {
before := time.Now()
path = algo(start, goal)
after := time.Now()
elapsed := after.Sub(before)
totalRuntime += float64(elapsed.Milliseconds())
}
avgRuntime := int(math.Round(totalRuntime/float64(trials)))
// Calculate path length
pathLen := 0.0
for i := 0; i < len(path)-1; i++ {
n1 := path[i]
n2 := path[i+1]
pathLen += StraightLineDist(n1, n2)
}
// Calculate turn count and average angle of turns
turns := 0
avgAngle := 0.0
for i := 0; i < len(path)-2; i++ {
n1 := path[i]
n2 := path[i+1]
n3 := path[i+2]
// There are two vectors (n1,n2) and (n2,n3)
v1_x, v1_y := float64(n2.X - n1.X), float64(n2.Y - n1.Y)
v2_x, v2_y := float64(n3.X - n2.X), float64(n3.Y - n2.Y)
dot := v1_x * v2_x + v1_y * v2_y
v1_len := math.Sqrt(v1_x * v1_x + v1_y * v1_y)
v2_len := math.Sqrt(v2_x * v2_x + v2_y * v2_y)
a := dot / (v1_len * v2_len)
a = math.Max(-1, math.Min(1, a)) // Rounding errors may produce values outside [-1,1] so clamp it.
angle := math.Acos(a)
if angle >= 0.001 {
// We are turning at node n1
avgAngle += angle
turns++
}
}
if turns > 0 {
avgAngle /= float64(turns)
}
return path, turns, pathLen, avgAngle, avgRuntime
}
func MustParsePathfindingFunction(algoName string) func(Node, Node) []Node {
switch strings.ToLower(algoName) {
case "dijkstra":
return Dijkstra
case "astar":
return AStar
case "astar-ps":
return AStarPs
case "thetastar":
return ThetaStar
// case "ap-thetastar":
// return true, ApThetaStar
}
fmt.Printf("Unknown algorithm \"%s\"\n", algoName)
os.Exit(1)
return func( Node, Node) []Node { return []Node{} }
}
func MustParseInt(arg string) int {
n, err := strconv.Atoi(arg)
if err != nil {
fmt.Printf("Non-int argument \"%s\"\n", arg)
os.Exit(1)
}
return n
}
| {
if p.Mode != BenchSingle && p.Mode != BenchAndDrawSingle {
panic("Assertion failed: unexpected mode")
}
var err error
grid, err = LoadMap(p.InPath)
if err != nil {
fmt.Printf("Error reading file \"%s\": %s\n", p.InPath, err.Error())
os.Exit(1)
}
start := NewNode(p.StartX, p.StartY)
goal := NewNode(p.GoalX, p.GoalY)
path, turns, pathLen, avgAngle, avgRuntime := testOneScenario(start, goal, p.Algo, p.Trials)
fmt.Printf("Stats: %d turn(s), length %.1f, avg angle %.1f rad (%.1f deg), runtime %dms\n", turns, pathLen, avgAngle, avgAngle*radToDeg, avgRuntime)
if p.Mode == BenchAndDrawSingle {
img := MakeMapImage(p.Scale)
img = DrawPath(img, path, p.Scale)
err = SaveImage(img, p.OutPath)
if err != nil {
fmt.Printf("Error writing image \"%s\": %s\n", p.OutPath, err.Error())
os.Exit(1)
}
}
} | identifier_body |
dac.rs | //! Stabilizer DAC management interface
//!
//! # Design
//!
//! Stabilizer DACs are connected to the MCU via a simplex, SPI-compatible interface. Each DAC
//! accepts a 16-bit output code.
//!
//! In order to maximize CPU processing time, the DAC code updates are offloaded to hardware using
//! a timer compare channel, DMA stream, and the DAC SPI interface.
//!
//! The timer comparison channel is configured to generate a DMA request whenever the comparison
//! occurs. Thus, whenever a comparison happens, a single DAC code can be written to the output. By
//! configuring a DMA stream for a number of successive DAC codes, hardware can regularly update
//! the DAC without requiring the CPU.
//!
//! In order to ensure alignment between the ADC sample batches and DAC output code batches, a DAC
//! output batch is always exactly 3 batches after the ADC batch that generated it.
//!
//! The DMA transfer for the DAC output codes utilizes a double-buffer mode to avoid losing any
//! transfer events generated by the timer (for example, when 2 update cycles occur before the DMA
//! transfer completion is handled). In this mode, by the time DMA swaps buffers, there is always a valid buffer in the
//! "next-transfer" double-buffer location for the DMA transfer. Once a transfer completes,
//! software then has exactly one batch duration to fill the next buffer before its
//! transfer begins. If software does not meet this deadline, old data will be repeatedly generated
//! on the output and output will be shifted by one batch.
//!
//! ## Multiple Samples to Single DAC Codes
//!
//! For some applications, it may be desirable to generate a single DAC code from multiple ADC
//! samples. In order to maintain timing characteristics between ADC samples and DAC code outputs,
//! applications are required to generate one DAC code for each ADC sample. To accomodate mapping
//! multiple inputs to a single output, the output code can be repeated a number of times in the
//! output buffer corresponding with the number of input samples that were used to generate it.
//!
//!
//! # Note
//!
//! There is a very small amount of latency between updating the two DACs due to bus matrix
//! priority. As such, one of the DACs will be updated marginally earlier before the other because
//! the DMA requests are generated simultaneously. This can be avoided by providing a known offset
//! to other DMA requests, which can be completed by setting e.g. DAC0's comparison to a
//! counter value of 2 and DAC1's comparison to a counter value of 3. This will have the effect of
//! generating the DAC updates with a known latency of 1 timer tick to each other and prevent the
//! DMAs from racing for the bus. As implemented, the DMA channels utilize natural priority of the
//! DMA channels to arbitrate which transfer occurs first.
//!
//!
//! # Limitations
//!
//! While double-buffered mode is used for DMA to avoid lost DAC-update events, there is no check
//! for re-use of a previously provided DAC output buffer. It is assumed that the DMA request is
//! served promptly after the transfer completes.
use stm32h7xx_hal as hal;
use mutex_trait::Mutex;
use super::design_parameters::{SampleBuffer, MAX_SAMPLE_BUFFER_SIZE};
use super::timers;
use core::convert::TryFrom;
use hal::{
dma::{
dma::{DMAReq, DmaConfig},
traits::TargetAddress,
DMAError, MemoryToPeripheral, Transfer,
},
spi::{HalDisabledSpi, HalEnabledSpi, HalSpi},
};
// The following global buffers are used for the DAC code DMA transfers. Two buffers are used for
// each transfer in a ping-pong buffer configuration (one is being prepared while the other is being
// processed). Note that the contents of AXI SRAM is uninitialized, so the buffer contents on
// startup are undefined. The dimensions are `ADC_BUF[adc_index][ping_pong_index][sample_index]`.
#[link_section = ".axisram.buffers"]
static mut DAC_BUF: [[SampleBuffer; 2]; 2] =
[[[0; MAX_SAMPLE_BUFFER_SIZE]; 2]; 2];
/// Custom type for referencing DAC output codes.
/// The internal integer is the raw code written to the DAC output register.
#[derive(Copy, Clone)]
pub struct DacCode(pub u16);
impl DacCode {
// The DAC output range in bipolar mode (including the external output op-amp) is +/- 4.096
// V with 16-bit resolution. The anti-aliasing filter has an additional gain of 2.5.
pub const FULL_SCALE: f32 = 4.096 * 2.5;
pub const VOLT_PER_LSB: f32 = -Self::FULL_SCALE / i16::MIN as f32;
pub const LSB_PER_VOLT: f32 = 1. / Self::VOLT_PER_LSB;
}
impl TryFrom<f32> for DacCode {
type Error = ();
fn try_from(voltage: f32) -> Result<DacCode, ()> {
let code = voltage * Self::LSB_PER_VOLT;
if !(i16::MIN as f32..=i16::MAX as f32).contains(&code) {
Err(())
} else {
Ok(DacCode::from(code as i16))
}
}
}
impl From<DacCode> for f32 {
fn from(code: DacCode) -> f32 {
i16::from(code) as f32 * DacCode::VOLT_PER_LSB
}
}
impl From<DacCode> for i16 {
fn from(code: DacCode) -> i16 {
(code.0 as i16).wrapping_sub(i16::MIN)
}
}
impl From<i16> for DacCode {
/// Encode signed 16-bit values into DAC offset binary for a bipolar output configuration.
fn from(value: i16) -> Self {
Self(value.wrapping_add(i16::MIN) as u16)
}
}
impl From<u16> for DacCode {
/// Create a dac code from the provided DAC output code.
fn | (value: u16) -> Self {
Self(value)
}
}
macro_rules! dac_output {
($name:ident, $index:literal, $data_stream:ident,
$spi:ident, $trigger_channel:ident, $dma_req:ident) => {
/// $spi is used as a type for indicating a DMA transfer into the SPI TX FIFO
struct $spi {
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
_channel: timers::tim2::$trigger_channel,
}
impl $spi {
pub fn new(
_channel: timers::tim2::$trigger_channel,
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
) -> Self {
Self { spi, _channel }
}
/// Start the SPI and begin operating in a DMA-driven transfer mode.
pub fn start_dma(&mut self) {
// Allow the SPI FIFOs to operate using only DMA data channels.
self.spi.enable_dma_tx();
// Enable SPI and start it in infinite transaction mode.
self.spi.inner().cr1.modify(|_, w| w.spe().set_bit());
self.spi.inner().cr1.modify(|_, w| w.cstart().started());
}
}
// Note(unsafe): This is safe because the DMA request line is logically owned by this module.
// Additionally, the SPI is owned by this structure and is known to be configured for u16 word
// sizes.
unsafe impl TargetAddress<MemoryToPeripheral> for $spi {
/// SPI is configured to operate using 16-bit transfer words.
type MemSize = u16;
/// SPI DMA requests are generated whenever TIM2 CHx ($dma_req) comparison occurs.
const REQUEST_LINE: Option<u8> = Some(DMAReq::$dma_req as u8);
/// Whenever the DMA request occurs, it should write into SPI's TX FIFO.
fn address(&self) -> usize {
&self.spi.inner().txdr as *const _ as usize
}
}
/// Represents data associated with DAC.
pub struct $name {
// Note: SPI TX functionality may not be used from this structure to ensure safety with DMA.
transfer: Transfer<
hal::dma::dma::$data_stream<hal::stm32::DMA1>,
$spi,
MemoryToPeripheral,
&'static mut [u16],
hal::dma::DBTransfer,
>,
}
impl $name {
/// Construct the DAC output channel.
///
/// # Args
/// * `spi` - The SPI interface used to communicate with the ADC.
/// * `stream` - The DMA stream used to write DAC codes over SPI.
/// * `trigger_channel` - The sampling timer output compare channel for update triggers.
pub fn new(
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Enabled, u16>,
stream: hal::dma::dma::$data_stream<hal::stm32::DMA1>,
trigger_channel: timers::tim2::$trigger_channel,
batch_size: usize,
) -> Self {
// Generate DMA events when an output compare of the timer hitting zero (timer roll over)
// occurs.
trigger_channel.listen_dma();
trigger_channel.to_output_compare(4 + $index);
// The stream constantly writes to the TX FIFO to write new update codes.
let trigger_config = DmaConfig::default()
.memory_increment(true)
.double_buffer(true)
.peripheral_increment(false);
// Listen for any potential SPI error signals, which may indicate that we are not generating
// update codes.
let mut spi = spi.disable();
spi.listen(hal::spi::Event::Error);
// AXISRAM is uninitialized. As such, we manually initialize it for a 0V DAC output
// here before starting the transfer .
// Note(unsafe): We currently own all DAC_BUF[index] buffers and are not using them
// elsewhere, so it is safe to access them here.
for buf in unsafe { DAC_BUF[$index].iter_mut() } {
for byte in buf.iter_mut() {
*byte = DacCode::try_from(0.0f32).unwrap().0;
}
}
// Construct the trigger stream to write from memory to the peripheral.
let transfer: Transfer<_, _, MemoryToPeripheral, _, _> =
Transfer::init(
stream,
$spi::new(trigger_channel, spi),
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { &mut DAC_BUF[$index][0][..batch_size] },
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { Some(&mut DAC_BUF[$index][1][..batch_size]) },
trigger_config,
);
Self { transfer }
}
pub fn start(&mut self) {
self.transfer.start(|spi| spi.start_dma());
}
/// Wait for the transfer of the currently active buffer to complete,
/// then call a function on the now inactive buffer and acknowledge the
/// transfer complete flag.
///
/// NOTE(unsafe): Memory safety and access ordering is not guaranteed
/// (see the HAL DMA docs).
pub fn with_buffer<F, R>(&mut self, f: F) -> Result<R, DMAError>
where
F: FnOnce(&mut &'static mut [u16]) -> R,
{
unsafe {
self.transfer.next_dbm_transfer_with(|buf, _current| f(buf))
}
}
}
// This is not actually a Mutex. It only re-uses the semantics and macros of mutex-trait
// to reduce rightward drift when jointly calling `with_buffer(f)` on multiple DAC/ADCs.
impl Mutex for $name {
type Data = &'static mut [u16];
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
self.with_buffer(f).unwrap()
}
}
};
}
dac_output!(Dac0Output, 0, Stream6, SPI4, Channel3, Tim2Ch3);
dac_output!(Dac1Output, 1, Stream7, SPI5, Channel4, Tim2Ch4);
| from | identifier_name |
dac.rs | //! Stabilizer DAC management interface
//!
//! # Design
//!
//! Stabilizer DACs are connected to the MCU via a simplex, SPI-compatible interface. Each DAC
//! accepts a 16-bit output code.
//!
//! In order to maximize CPU processing time, the DAC code updates are offloaded to hardware using
//! a timer compare channel, DMA stream, and the DAC SPI interface.
//!
//! The timer comparison channel is configured to generate a DMA request whenever the comparison
//! occurs. Thus, whenever a comparison happens, a single DAC code can be written to the output. By
//! configuring a DMA stream for a number of successive DAC codes, hardware can regularly update
//! the DAC without requiring the CPU.
//!
//! In order to ensure alignment between the ADC sample batches and DAC output code batches, a DAC
//! output batch is always exactly 3 batches after the ADC batch that generated it.
//!
//! The DMA transfer for the DAC output codes utilizes a double-buffer mode to avoid losing any
//! transfer events generated by the timer (for example, when 2 update cycles occur before the DMA
//! transfer completion is handled). In this mode, by the time DMA swaps buffers, there is always a valid buffer in the
//! "next-transfer" double-buffer location for the DMA transfer. Once a transfer completes,
//! software then has exactly one batch duration to fill the next buffer before its
//! transfer begins. If software does not meet this deadline, old data will be repeatedly generated
//! on the output and output will be shifted by one batch.
//!
//! ## Multiple Samples to Single DAC Codes
//!
//! For some applications, it may be desirable to generate a single DAC code from multiple ADC
//! samples. In order to maintain timing characteristics between ADC samples and DAC code outputs,
//! applications are required to generate one DAC code for each ADC sample. To accomodate mapping
//! multiple inputs to a single output, the output code can be repeated a number of times in the
//! output buffer corresponding with the number of input samples that were used to generate it.
//!
//!
//! # Note
//!
//! There is a very small amount of latency between updating the two DACs due to bus matrix
//! priority. As such, one of the DACs will be updated marginally earlier before the other because
//! the DMA requests are generated simultaneously. This can be avoided by providing a known offset
//! to other DMA requests, which can be completed by setting e.g. DAC0's comparison to a
//! counter value of 2 and DAC1's comparison to a counter value of 3. This will have the effect of
//! generating the DAC updates with a known latency of 1 timer tick to each other and prevent the
//! DMAs from racing for the bus. As implemented, the DMA channels utilize natural priority of the
//! DMA channels to arbitrate which transfer occurs first.
//!
//!
//! # Limitations
//!
//! While double-buffered mode is used for DMA to avoid lost DAC-update events, there is no check
//! for re-use of a previously provided DAC output buffer. It is assumed that the DMA request is
//! served promptly after the transfer completes.
use stm32h7xx_hal as hal;
use mutex_trait::Mutex;
use super::design_parameters::{SampleBuffer, MAX_SAMPLE_BUFFER_SIZE};
use super::timers;
use core::convert::TryFrom;
use hal::{
dma::{
dma::{DMAReq, DmaConfig},
traits::TargetAddress,
DMAError, MemoryToPeripheral, Transfer,
},
spi::{HalDisabledSpi, HalEnabledSpi, HalSpi},
};
// The following global buffers are used for the DAC code DMA transfers. Two buffers are used for
// each transfer in a ping-pong buffer configuration (one is being prepared while the other is being
// processed). Note that the contents of AXI SRAM is uninitialized, so the buffer contents on
// startup are undefined. The dimensions are `ADC_BUF[adc_index][ping_pong_index][sample_index]`.
#[link_section = ".axisram.buffers"]
static mut DAC_BUF: [[SampleBuffer; 2]; 2] =
[[[0; MAX_SAMPLE_BUFFER_SIZE]; 2]; 2];
/// Custom type for referencing DAC output codes.
/// The internal integer is the raw code written to the DAC output register.
#[derive(Copy, Clone)]
pub struct DacCode(pub u16);
impl DacCode {
// The DAC output range in bipolar mode (including the external output op-amp) is +/- 4.096
// V with 16-bit resolution. The anti-aliasing filter has an additional gain of 2.5.
pub const FULL_SCALE: f32 = 4.096 * 2.5;
pub const VOLT_PER_LSB: f32 = -Self::FULL_SCALE / i16::MIN as f32;
pub const LSB_PER_VOLT: f32 = 1. / Self::VOLT_PER_LSB;
}
impl TryFrom<f32> for DacCode {
type Error = ();
fn try_from(voltage: f32) -> Result<DacCode, ()> {
let code = voltage * Self::LSB_PER_VOLT;
if !(i16::MIN as f32..=i16::MAX as f32).contains(&code) {
Err(())
} else {
Ok(DacCode::from(code as i16))
}
}
}
impl From<DacCode> for f32 {
fn from(code: DacCode) -> f32 {
i16::from(code) as f32 * DacCode::VOLT_PER_LSB
}
}
impl From<DacCode> for i16 {
fn from(code: DacCode) -> i16 {
(code.0 as i16).wrapping_sub(i16::MIN)
}
}
impl From<i16> for DacCode {
/// Encode signed 16-bit values into DAC offset binary for a bipolar output configuration.
fn from(value: i16) -> Self {
Self(value.wrapping_add(i16::MIN) as u16)
}
}
impl From<u16> for DacCode {
/// Create a dac code from the provided DAC output code.
fn from(value: u16) -> Self |
}
macro_rules! dac_output {
($name:ident, $index:literal, $data_stream:ident,
$spi:ident, $trigger_channel:ident, $dma_req:ident) => {
/// $spi is used as a type for indicating a DMA transfer into the SPI TX FIFO
struct $spi {
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
_channel: timers::tim2::$trigger_channel,
}
impl $spi {
pub fn new(
_channel: timers::tim2::$trigger_channel,
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
) -> Self {
Self { spi, _channel }
}
/// Start the SPI and begin operating in a DMA-driven transfer mode.
pub fn start_dma(&mut self) {
// Allow the SPI FIFOs to operate using only DMA data channels.
self.spi.enable_dma_tx();
// Enable SPI and start it in infinite transaction mode.
self.spi.inner().cr1.modify(|_, w| w.spe().set_bit());
self.spi.inner().cr1.modify(|_, w| w.cstart().started());
}
}
// Note(unsafe): This is safe because the DMA request line is logically owned by this module.
// Additionally, the SPI is owned by this structure and is known to be configured for u16 word
// sizes.
unsafe impl TargetAddress<MemoryToPeripheral> for $spi {
/// SPI is configured to operate using 16-bit transfer words.
type MemSize = u16;
/// SPI DMA requests are generated whenever TIM2 CHx ($dma_req) comparison occurs.
const REQUEST_LINE: Option<u8> = Some(DMAReq::$dma_req as u8);
/// Whenever the DMA request occurs, it should write into SPI's TX FIFO.
fn address(&self) -> usize {
&self.spi.inner().txdr as *const _ as usize
}
}
/// Represents data associated with DAC.
pub struct $name {
// Note: SPI TX functionality may not be used from this structure to ensure safety with DMA.
transfer: Transfer<
hal::dma::dma::$data_stream<hal::stm32::DMA1>,
$spi,
MemoryToPeripheral,
&'static mut [u16],
hal::dma::DBTransfer,
>,
}
impl $name {
/// Construct the DAC output channel.
///
/// # Args
/// * `spi` - The SPI interface used to communicate with the ADC.
/// * `stream` - The DMA stream used to write DAC codes over SPI.
/// * `trigger_channel` - The sampling timer output compare channel for update triggers.
pub fn new(
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Enabled, u16>,
stream: hal::dma::dma::$data_stream<hal::stm32::DMA1>,
trigger_channel: timers::tim2::$trigger_channel,
batch_size: usize,
) -> Self {
// Generate DMA events when an output compare of the timer hitting zero (timer roll over)
// occurs.
trigger_channel.listen_dma();
trigger_channel.to_output_compare(4 + $index);
// The stream constantly writes to the TX FIFO to write new update codes.
let trigger_config = DmaConfig::default()
.memory_increment(true)
.double_buffer(true)
.peripheral_increment(false);
// Listen for any potential SPI error signals, which may indicate that we are not generating
// update codes.
let mut spi = spi.disable();
spi.listen(hal::spi::Event::Error);
// AXISRAM is uninitialized. As such, we manually initialize it for a 0V DAC output
// here before starting the transfer .
// Note(unsafe): We currently own all DAC_BUF[index] buffers and are not using them
// elsewhere, so it is safe to access them here.
for buf in unsafe { DAC_BUF[$index].iter_mut() } {
for byte in buf.iter_mut() {
*byte = DacCode::try_from(0.0f32).unwrap().0;
}
}
// Construct the trigger stream to write from memory to the peripheral.
let transfer: Transfer<_, _, MemoryToPeripheral, _, _> =
Transfer::init(
stream,
$spi::new(trigger_channel, spi),
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { &mut DAC_BUF[$index][0][..batch_size] },
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { Some(&mut DAC_BUF[$index][1][..batch_size]) },
trigger_config,
);
Self { transfer }
}
pub fn start(&mut self) {
self.transfer.start(|spi| spi.start_dma());
}
/// Wait for the transfer of the currently active buffer to complete,
/// then call a function on the now inactive buffer and acknowledge the
/// transfer complete flag.
///
/// NOTE(unsafe): Memory safety and access ordering is not guaranteed
/// (see the HAL DMA docs).
pub fn with_buffer<F, R>(&mut self, f: F) -> Result<R, DMAError>
where
F: FnOnce(&mut &'static mut [u16]) -> R,
{
unsafe {
self.transfer.next_dbm_transfer_with(|buf, _current| f(buf))
}
}
}
// This is not actually a Mutex. It only re-uses the semantics and macros of mutex-trait
// to reduce rightward drift when jointly calling `with_buffer(f)` on multiple DAC/ADCs.
impl Mutex for $name {
type Data = &'static mut [u16];
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
self.with_buffer(f).unwrap()
}
}
};
}
dac_output!(Dac0Output, 0, Stream6, SPI4, Channel3, Tim2Ch3);
dac_output!(Dac1Output, 1, Stream7, SPI5, Channel4, Tim2Ch4);
| {
Self(value)
} | identifier_body |
dac.rs | //! Stabilizer DAC management interface
//!
//! # Design
//!
//! Stabilizer DACs are connected to the MCU via a simplex, SPI-compatible interface. Each DAC
//! accepts a 16-bit output code.
//!
//! In order to maximize CPU processing time, the DAC code updates are offloaded to hardware using
//! a timer compare channel, DMA stream, and the DAC SPI interface.
//!
//! The timer comparison channel is configured to generate a DMA request whenever the comparison
//! occurs. Thus, whenever a comparison happens, a single DAC code can be written to the output. By
//! configuring a DMA stream for a number of successive DAC codes, hardware can regularly update
//! the DAC without requiring the CPU.
//!
//! In order to ensure alignment between the ADC sample batches and DAC output code batches, a DAC
//! output batch is always exactly 3 batches after the ADC batch that generated it.
//!
//! The DMA transfer for the DAC output codes utilizes a double-buffer mode to avoid losing any
//! transfer events generated by the timer (for example, when 2 update cycles occur before the DMA
//! transfer completion is handled). In this mode, by the time DMA swaps buffers, there is always a valid buffer in the
//! "next-transfer" double-buffer location for the DMA transfer. Once a transfer completes,
//! software then has exactly one batch duration to fill the next buffer before its
//! transfer begins. If software does not meet this deadline, old data will be repeatedly generated
//! on the output and output will be shifted by one batch.
//!
//! ## Multiple Samples to Single DAC Codes
//!
//! For some applications, it may be desirable to generate a single DAC code from multiple ADC
//! samples. In order to maintain timing characteristics between ADC samples and DAC code outputs,
//! applications are required to generate one DAC code for each ADC sample. To accomodate mapping
//! multiple inputs to a single output, the output code can be repeated a number of times in the
//! output buffer corresponding with the number of input samples that were used to generate it.
//!
//!
//! # Note
//!
//! There is a very small amount of latency between updating the two DACs due to bus matrix
//! priority. As such, one of the DACs will be updated marginally earlier before the other because
//! the DMA requests are generated simultaneously. This can be avoided by providing a known offset
//! to other DMA requests, which can be completed by setting e.g. DAC0's comparison to a
//! counter value of 2 and DAC1's comparison to a counter value of 3. This will have the effect of
//! generating the DAC updates with a known latency of 1 timer tick to each other and prevent the
//! DMAs from racing for the bus. As implemented, the DMA channels utilize natural priority of the
//! DMA channels to arbitrate which transfer occurs first.
//!
//!
//! # Limitations
//!
//! While double-buffered mode is used for DMA to avoid lost DAC-update events, there is no check
//! for re-use of a previously provided DAC output buffer. It is assumed that the DMA request is
//! served promptly after the transfer completes.
use stm32h7xx_hal as hal;
use mutex_trait::Mutex;
use super::design_parameters::{SampleBuffer, MAX_SAMPLE_BUFFER_SIZE};
use super::timers;
use core::convert::TryFrom;
use hal::{
dma::{
dma::{DMAReq, DmaConfig},
traits::TargetAddress,
DMAError, MemoryToPeripheral, Transfer,
},
spi::{HalDisabledSpi, HalEnabledSpi, HalSpi},
};
// The following global buffers are used for the DAC code DMA transfers. Two buffers are used for
// each transfer in a ping-pong buffer configuration (one is being prepared while the other is being
// processed). Note that the contents of AXI SRAM is uninitialized, so the buffer contents on
// startup are undefined. The dimensions are `ADC_BUF[adc_index][ping_pong_index][sample_index]`.
#[link_section = ".axisram.buffers"]
static mut DAC_BUF: [[SampleBuffer; 2]; 2] =
[[[0; MAX_SAMPLE_BUFFER_SIZE]; 2]; 2];
/// Custom type for referencing DAC output codes.
/// The internal integer is the raw code written to the DAC output register.
#[derive(Copy, Clone)]
pub struct DacCode(pub u16);
impl DacCode {
// The DAC output range in bipolar mode (including the external output op-amp) is +/- 4.096
// V with 16-bit resolution. The anti-aliasing filter has an additional gain of 2.5.
pub const FULL_SCALE: f32 = 4.096 * 2.5;
pub const VOLT_PER_LSB: f32 = -Self::FULL_SCALE / i16::MIN as f32;
pub const LSB_PER_VOLT: f32 = 1. / Self::VOLT_PER_LSB;
}
impl TryFrom<f32> for DacCode {
type Error = ();
fn try_from(voltage: f32) -> Result<DacCode, ()> {
let code = voltage * Self::LSB_PER_VOLT;
if !(i16::MIN as f32..=i16::MAX as f32).contains(&code) {
Err(())
} else |
}
}
impl From<DacCode> for f32 {
fn from(code: DacCode) -> f32 {
i16::from(code) as f32 * DacCode::VOLT_PER_LSB
}
}
impl From<DacCode> for i16 {
fn from(code: DacCode) -> i16 {
(code.0 as i16).wrapping_sub(i16::MIN)
}
}
impl From<i16> for DacCode {
/// Encode signed 16-bit values into DAC offset binary for a bipolar output configuration.
fn from(value: i16) -> Self {
Self(value.wrapping_add(i16::MIN) as u16)
}
}
impl From<u16> for DacCode {
/// Create a dac code from the provided DAC output code.
fn from(value: u16) -> Self {
Self(value)
}
}
macro_rules! dac_output {
($name:ident, $index:literal, $data_stream:ident,
$spi:ident, $trigger_channel:ident, $dma_req:ident) => {
/// $spi is used as a type for indicating a DMA transfer into the SPI TX FIFO
struct $spi {
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
_channel: timers::tim2::$trigger_channel,
}
impl $spi {
pub fn new(
_channel: timers::tim2::$trigger_channel,
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
) -> Self {
Self { spi, _channel }
}
/// Start the SPI and begin operating in a DMA-driven transfer mode.
pub fn start_dma(&mut self) {
// Allow the SPI FIFOs to operate using only DMA data channels.
self.spi.enable_dma_tx();
// Enable SPI and start it in infinite transaction mode.
self.spi.inner().cr1.modify(|_, w| w.spe().set_bit());
self.spi.inner().cr1.modify(|_, w| w.cstart().started());
}
}
// Note(unsafe): This is safe because the DMA request line is logically owned by this module.
// Additionally, the SPI is owned by this structure and is known to be configured for u16 word
// sizes.
unsafe impl TargetAddress<MemoryToPeripheral> for $spi {
/// SPI is configured to operate using 16-bit transfer words.
type MemSize = u16;
/// SPI DMA requests are generated whenever TIM2 CHx ($dma_req) comparison occurs.
const REQUEST_LINE: Option<u8> = Some(DMAReq::$dma_req as u8);
/// Whenever the DMA request occurs, it should write into SPI's TX FIFO.
fn address(&self) -> usize {
&self.spi.inner().txdr as *const _ as usize
}
}
/// Represents data associated with DAC.
pub struct $name {
// Note: SPI TX functionality may not be used from this structure to ensure safety with DMA.
transfer: Transfer<
hal::dma::dma::$data_stream<hal::stm32::DMA1>,
$spi,
MemoryToPeripheral,
&'static mut [u16],
hal::dma::DBTransfer,
>,
}
impl $name {
/// Construct the DAC output channel.
///
/// # Args
/// * `spi` - The SPI interface used to communicate with the ADC.
/// * `stream` - The DMA stream used to write DAC codes over SPI.
/// * `trigger_channel` - The sampling timer output compare channel for update triggers.
pub fn new(
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Enabled, u16>,
stream: hal::dma::dma::$data_stream<hal::stm32::DMA1>,
trigger_channel: timers::tim2::$trigger_channel,
batch_size: usize,
) -> Self {
// Generate DMA events when an output compare of the timer hitting zero (timer roll over)
// occurs.
trigger_channel.listen_dma();
trigger_channel.to_output_compare(4 + $index);
// The stream constantly writes to the TX FIFO to write new update codes.
let trigger_config = DmaConfig::default()
.memory_increment(true)
.double_buffer(true)
.peripheral_increment(false);
// Listen for any potential SPI error signals, which may indicate that we are not generating
// update codes.
let mut spi = spi.disable();
spi.listen(hal::spi::Event::Error);
// AXISRAM is uninitialized. As such, we manually initialize it for a 0V DAC output
// here before starting the transfer .
// Note(unsafe): We currently own all DAC_BUF[index] buffers and are not using them
// elsewhere, so it is safe to access them here.
for buf in unsafe { DAC_BUF[$index].iter_mut() } {
for byte in buf.iter_mut() {
*byte = DacCode::try_from(0.0f32).unwrap().0;
}
}
// Construct the trigger stream to write from memory to the peripheral.
let transfer: Transfer<_, _, MemoryToPeripheral, _, _> =
Transfer::init(
stream,
$spi::new(trigger_channel, spi),
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { &mut DAC_BUF[$index][0][..batch_size] },
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { Some(&mut DAC_BUF[$index][1][..batch_size]) },
trigger_config,
);
Self { transfer }
}
pub fn start(&mut self) {
self.transfer.start(|spi| spi.start_dma());
}
/// Wait for the transfer of the currently active buffer to complete,
/// then call a function on the now inactive buffer and acknowledge the
/// transfer complete flag.
///
/// NOTE(unsafe): Memory safety and access ordering is not guaranteed
/// (see the HAL DMA docs).
pub fn with_buffer<F, R>(&mut self, f: F) -> Result<R, DMAError>
where
F: FnOnce(&mut &'static mut [u16]) -> R,
{
unsafe {
self.transfer.next_dbm_transfer_with(|buf, _current| f(buf))
}
}
}
// This is not actually a Mutex. It only re-uses the semantics and macros of mutex-trait
// to reduce rightward drift when jointly calling `with_buffer(f)` on multiple DAC/ADCs.
impl Mutex for $name {
type Data = &'static mut [u16];
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
self.with_buffer(f).unwrap()
}
}
};
}
dac_output!(Dac0Output, 0, Stream6, SPI4, Channel3, Tim2Ch3);
dac_output!(Dac1Output, 1, Stream7, SPI5, Channel4, Tim2Ch4);
| {
Ok(DacCode::from(code as i16))
} | conditional_block |
dac.rs | //! Stabilizer DAC management interface
//!
//! # Design
//!
//! Stabilizer DACs are connected to the MCU via a simplex, SPI-compatible interface. Each DAC
//! accepts a 16-bit output code.
//!
//! In order to maximize CPU processing time, the DAC code updates are offloaded to hardware using
//! a timer compare channel, DMA stream, and the DAC SPI interface.
//!
//! The timer comparison channel is configured to generate a DMA request whenever the comparison
//! occurs. Thus, whenever a comparison happens, a single DAC code can be written to the output. By
//! configuring a DMA stream for a number of successive DAC codes, hardware can regularly update
//! the DAC without requiring the CPU.
//!
//! In order to ensure alignment between the ADC sample batches and DAC output code batches, a DAC
//! output batch is always exactly 3 batches after the ADC batch that generated it.
//!
//! The DMA transfer for the DAC output codes utilizes a double-buffer mode to avoid losing any
//! transfer events generated by the timer (for example, when 2 update cycles occur before the DMA
//! transfer completion is handled). In this mode, by the time DMA swaps buffers, there is always a valid buffer in the
//! "next-transfer" double-buffer location for the DMA transfer. Once a transfer completes,
//! software then has exactly one batch duration to fill the next buffer before its
//! transfer begins. If software does not meet this deadline, old data will be repeatedly generated
//! on the output and output will be shifted by one batch.
//!
//! ## Multiple Samples to Single DAC Codes
//!
//! For some applications, it may be desirable to generate a single DAC code from multiple ADC
//! samples. In order to maintain timing characteristics between ADC samples and DAC code outputs,
//! applications are required to generate one DAC code for each ADC sample. To accomodate mapping
//! multiple inputs to a single output, the output code can be repeated a number of times in the
//! output buffer corresponding with the number of input samples that were used to generate it.
//!
//!
//! # Note
//!
//! There is a very small amount of latency between updating the two DACs due to bus matrix
//! priority. As such, one of the DACs will be updated marginally earlier before the other because
//! the DMA requests are generated simultaneously. This can be avoided by providing a known offset
//! to other DMA requests, which can be completed by setting e.g. DAC0's comparison to a
//! counter value of 2 and DAC1's comparison to a counter value of 3. This will have the effect of
//! generating the DAC updates with a known latency of 1 timer tick to each other and prevent the
//! DMAs from racing for the bus. As implemented, the DMA channels utilize natural priority of the
//! DMA channels to arbitrate which transfer occurs first.
//!
//!
//! # Limitations
//!
//! While double-buffered mode is used for DMA to avoid lost DAC-update events, there is no check
//! for re-use of a previously provided DAC output buffer. It is assumed that the DMA request is
//! served promptly after the transfer completes.
use stm32h7xx_hal as hal;
use mutex_trait::Mutex;
use super::design_parameters::{SampleBuffer, MAX_SAMPLE_BUFFER_SIZE};
use super::timers;
use core::convert::TryFrom;
use hal::{
dma::{
dma::{DMAReq, DmaConfig},
traits::TargetAddress,
DMAError, MemoryToPeripheral, Transfer,
},
spi::{HalDisabledSpi, HalEnabledSpi, HalSpi},
};
// The following global buffers are used for the DAC code DMA transfers. Two buffers are used for
// each transfer in a ping-pong buffer configuration (one is being prepared while the other is being
// processed). Note that the contents of AXI SRAM is uninitialized, so the buffer contents on
// startup are undefined. The dimensions are `ADC_BUF[adc_index][ping_pong_index][sample_index]`.
#[link_section = ".axisram.buffers"]
static mut DAC_BUF: [[SampleBuffer; 2]; 2] =
[[[0; MAX_SAMPLE_BUFFER_SIZE]; 2]; 2];
/// Custom type for referencing DAC output codes.
/// The internal integer is the raw code written to the DAC output register.
#[derive(Copy, Clone)]
pub struct DacCode(pub u16);
impl DacCode {
// The DAC output range in bipolar mode (including the external output op-amp) is +/- 4.096
// V with 16-bit resolution. The anti-aliasing filter has an additional gain of 2.5.
pub const FULL_SCALE: f32 = 4.096 * 2.5;
pub const VOLT_PER_LSB: f32 = -Self::FULL_SCALE / i16::MIN as f32;
pub const LSB_PER_VOLT: f32 = 1. / Self::VOLT_PER_LSB;
}
impl TryFrom<f32> for DacCode {
type Error = ();
fn try_from(voltage: f32) -> Result<DacCode, ()> {
let code = voltage * Self::LSB_PER_VOLT;
if !(i16::MIN as f32..=i16::MAX as f32).contains(&code) {
Err(())
} else {
Ok(DacCode::from(code as i16))
}
}
}
impl From<DacCode> for f32 {
fn from(code: DacCode) -> f32 {
i16::from(code) as f32 * DacCode::VOLT_PER_LSB
}
}
impl From<DacCode> for i16 {
fn from(code: DacCode) -> i16 {
(code.0 as i16).wrapping_sub(i16::MIN)
}
}
impl From<i16> for DacCode {
/// Encode signed 16-bit values into DAC offset binary for a bipolar output configuration.
fn from(value: i16) -> Self {
Self(value.wrapping_add(i16::MIN) as u16)
}
}
impl From<u16> for DacCode {
/// Create a dac code from the provided DAC output code.
fn from(value: u16) -> Self {
Self(value)
}
}
macro_rules! dac_output {
($name:ident, $index:literal, $data_stream:ident,
$spi:ident, $trigger_channel:ident, $dma_req:ident) => {
/// $spi is used as a type for indicating a DMA transfer into the SPI TX FIFO
struct $spi {
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
_channel: timers::tim2::$trigger_channel,
}
impl $spi {
pub fn new(
_channel: timers::tim2::$trigger_channel,
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
) -> Self {
Self { spi, _channel }
}
/// Start the SPI and begin operating in a DMA-driven transfer mode.
pub fn start_dma(&mut self) {
// Allow the SPI FIFOs to operate using only DMA data channels.
self.spi.enable_dma_tx();
// Enable SPI and start it in infinite transaction mode.
self.spi.inner().cr1.modify(|_, w| w.spe().set_bit());
self.spi.inner().cr1.modify(|_, w| w.cstart().started());
}
}
// Note(unsafe): This is safe because the DMA request line is logically owned by this module.
// Additionally, the SPI is owned by this structure and is known to be configured for u16 word
// sizes.
unsafe impl TargetAddress<MemoryToPeripheral> for $spi {
/// SPI is configured to operate using 16-bit transfer words.
type MemSize = u16;
/// SPI DMA requests are generated whenever TIM2 CHx ($dma_req) comparison occurs.
const REQUEST_LINE: Option<u8> = Some(DMAReq::$dma_req as u8);
/// Whenever the DMA request occurs, it should write into SPI's TX FIFO.
fn address(&self) -> usize {
&self.spi.inner().txdr as *const _ as usize
}
}
/// Represents data associated with DAC.
pub struct $name {
// Note: SPI TX functionality may not be used from this structure to ensure safety with DMA.
transfer: Transfer<
hal::dma::dma::$data_stream<hal::stm32::DMA1>,
$spi,
MemoryToPeripheral,
&'static mut [u16],
hal::dma::DBTransfer, | ///
/// # Args
/// * `spi` - The SPI interface used to communicate with the ADC.
/// * `stream` - The DMA stream used to write DAC codes over SPI.
/// * `trigger_channel` - The sampling timer output compare channel for update triggers.
pub fn new(
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Enabled, u16>,
stream: hal::dma::dma::$data_stream<hal::stm32::DMA1>,
trigger_channel: timers::tim2::$trigger_channel,
batch_size: usize,
) -> Self {
// Generate DMA events when an output compare of the timer hitting zero (timer roll over)
// occurs.
trigger_channel.listen_dma();
trigger_channel.to_output_compare(4 + $index);
// The stream constantly writes to the TX FIFO to write new update codes.
let trigger_config = DmaConfig::default()
.memory_increment(true)
.double_buffer(true)
.peripheral_increment(false);
// Listen for any potential SPI error signals, which may indicate that we are not generating
// update codes.
let mut spi = spi.disable();
spi.listen(hal::spi::Event::Error);
// AXISRAM is uninitialized. As such, we manually initialize it for a 0V DAC output
// here before starting the transfer .
// Note(unsafe): We currently own all DAC_BUF[index] buffers and are not using them
// elsewhere, so it is safe to access them here.
for buf in unsafe { DAC_BUF[$index].iter_mut() } {
for byte in buf.iter_mut() {
*byte = DacCode::try_from(0.0f32).unwrap().0;
}
}
// Construct the trigger stream to write from memory to the peripheral.
let transfer: Transfer<_, _, MemoryToPeripheral, _, _> =
Transfer::init(
stream,
$spi::new(trigger_channel, spi),
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { &mut DAC_BUF[$index][0][..batch_size] },
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { Some(&mut DAC_BUF[$index][1][..batch_size]) },
trigger_config,
);
Self { transfer }
}
pub fn start(&mut self) {
self.transfer.start(|spi| spi.start_dma());
}
/// Wait for the transfer of the currently active buffer to complete,
/// then call a function on the now inactive buffer and acknowledge the
/// transfer complete flag.
///
/// NOTE(unsafe): Memory safety and access ordering is not guaranteed
/// (see the HAL DMA docs).
pub fn with_buffer<F, R>(&mut self, f: F) -> Result<R, DMAError>
where
F: FnOnce(&mut &'static mut [u16]) -> R,
{
unsafe {
self.transfer.next_dbm_transfer_with(|buf, _current| f(buf))
}
}
}
// This is not actually a Mutex. It only re-uses the semantics and macros of mutex-trait
// to reduce rightward drift when jointly calling `with_buffer(f)` on multiple DAC/ADCs.
impl Mutex for $name {
type Data = &'static mut [u16];
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
self.with_buffer(f).unwrap()
}
}
};
}
dac_output!(Dac0Output, 0, Stream6, SPI4, Channel3, Tim2Ch3);
dac_output!(Dac1Output, 1, Stream7, SPI5, Channel4, Tim2Ch4); | >,
}
impl $name {
/// Construct the DAC output channel. | random_line_split |
sqlite.py | # -*- coding: utf-8 -*-
import sqlite3
from ..utils.builtins import *
from ..utils import decimal
from ..load.sqltemp import TemporarySqliteTable
from ..utils.misc import _is_nsiterable
from ..__past__.api07_comp import CompareDict
from ..__past__.api07_comp import CompareSet
from .base import BaseSource
sqlite3.register_adapter(decimal.Decimal, float)
class SqliteBase(BaseSource):
"""Base class four SqliteSource and CsvSource (not intended to be
instantiated directly).
"""
def __new__(cls, *args, **kwds):
if cls is SqliteBase:
msg = 'cannot instantiate SqliteBase directly - make a subclass'
raise NotImplementedError(msg)
return super(SqliteBase, cls).__new__(cls)
def __init__(self, connection, table):
"""Initialize self."""
self._connection = connection
self._table = table
def __repr__(self):
"""Return a string representation of the data source."""
cls_name = self.__class__.__name__
conn_name = str(self._connection)
tbl_name = self._table
return '{0}({1}, table={2!r})'.format(cls_name, conn_name, tbl_name)
def columns(self):
"""Return list of column names."""
cursor = self._connection.cursor()
cursor.execute('PRAGMA table_info(' + self._table + ')')
return [x[1] for x in cursor.fetchall()]
def __iter__(self):
"""Return iterable of dictionary rows (like csv.DictReader)."""
cursor = self._connection.cursor()
cursor.execute('SELECT * FROM ' + self._table)
column_names = self.columns()
dict_row = lambda x: dict(zip(column_names, x))
return (dict_row(row) for row in cursor.fetchall())
def filter_rows(self, **kwds):
if kwds:
cursor = self._connection.cursor()
cursor = self._execute_query('*', **kwds) # <- applies filter
column_names = self.columns()
dict_row = lambda row: dict(zip(column_names, row))
return (dict_row(row) for row in cursor)
return self.__iter__()
def distinct(self, columns, **kwds_filter):
"""Return iterable of tuples containing distinct *columns*
values.
"""
if not _is_nsiterable(columns):
columns = (columns,)
self._assert_columns_exist(columns)
select_clause = [self._normalize_column(x) for x in columns]
select_clause = ', '.join(select_clause)
select_clause = 'DISTINCT ' + select_clause
cursor = self._execute_query(select_clause, **kwds_filter)
return CompareSet(cursor)
def sum(self, column, keys=None, **kwds_filter):
"""Returns :class:`CompareDict` containing sums of *column*
values grouped by *keys*.
"""
self._assert_columns_exist(column)
column = self._normalize_column(column)
sql_functions = 'SUM({0})'.format(column)
return self._sql_aggregate(sql_functions, keys, **kwds_filter)
def count(self, column, keys=None, **kwds_filter):
"""Returns :class:`CompareDict` containing count of non-empty
*column* values grouped by *keys*.
"""
self._assert_columns_exist(column)
sql_function = "SUM(CASE COALESCE({0}, '') WHEN '' THEN 0 ELSE 1 END)"
sql_function = sql_function.format(self._normalize_column(column))
return self._sql_aggregate(sql_function, keys, **kwds_filter)
def _sql_aggregate(self, sql_function, keys=None, **kwds_filter):
"""Aggregates values using SQL function select--e.g.,
'COUNT(*)', 'SUM(col1)', etc.
"""
# TODO: _sql_aggregate has grown messy after a handful of
# iterations look to refactor it in the future to improve
# maintainability.
if not _is_nsiterable(sql_function):
sql_function = (sql_function,)
if keys == None:
|
if not _is_nsiterable(keys):
keys = (keys,)
group_clause = [self._normalize_column(x) for x in keys]
group_clause = ', '.join(group_clause)
select_clause = '{0}, {1}'.format(group_clause, ', '.join(sql_function))
trailing_clause = 'GROUP BY ' + group_clause
cursor = self._execute_query(select_clause, trailing_clause, **kwds_filter)
pos = len(sql_function)
iterable = ((row[:-pos], getvals(row)) for row in cursor)
if pos > 1:
# Gets values by slicing (i.e., row[-pos:]).
iterable = ((row[:-pos], row[-pos:]) for row in cursor)
else:
# Gets value by index (i.e., row[-pos]).
iterable = ((row[:-pos], row[-pos]) for row in cursor)
return CompareDict(iterable, keys)
def mapreduce(self, mapper, reducer, columns, keys=None, **kwds_filter):
obj = super(SqliteBase, self) # 2.x compatible calling convention.
return obj.mapreduce(mapper, reducer, columns, keys, **kwds_filter)
# SqliteBase doesn't implement its own mapreduce() optimization.
# A generalized, SQL optimization could do little more than the
# already-optmized filter_rows() method. Since the super-class'
# mapreduce() already uses filter_rows() internally, a separate
# optimization is unnecessary.
def _execute_query(self, select_clause, trailing_clause=None, **kwds_filter):
"""Execute query and return cursor object."""
try:
stmnt, params = self._build_query(self._table, select_clause, **kwds_filter)
if trailing_clause:
stmnt += '\n' + trailing_clause
cursor = self._connection.cursor()
cursor.execute('PRAGMA synchronous=OFF')
#print(stmnt, params)
cursor.execute(stmnt, params)
except Exception as e:
exc_cls = e.__class__
msg = '%s\n query: %s\n params: %r' % (e, stmnt, params)
raise exc_cls(msg)
return cursor
@classmethod
def _build_query(cls, table, select_clause, **kwds_filter):
"""Return 'SELECT' query."""
query = 'SELECT ' + select_clause + ' FROM ' + table
where_clause, params = cls._build_where_clause(**kwds_filter)
if where_clause:
query = query + ' WHERE ' + where_clause
return query, params
@staticmethod
def _build_where_clause(**kwds_filter):
"""Return 'WHERE' clause that implements *kwds_filter*
constraints.
"""
clause = []
params = []
items = kwds_filter.items()
items = sorted(items, key=lambda x: x[0]) # Ordered by key.
for key, val in items:
if _is_nsiterable(val):
clause.append(key + ' IN (%s)' % (', '.join('?' * len(val))))
for x in val:
params.append(x)
else:
clause.append(key + '=?')
params.append(val)
clause = ' AND '.join(clause) if clause else ''
return clause, params
def create_index(self, *columns):
"""Create an index for specified columns---can speed up testing
in some cases.
See :meth:`SqliteSource.create_index` for more details.
"""
self._assert_columns_exist(columns)
# Build index name.
whitelist = lambda col: ''.join(x for x in col if x.isalnum())
idx_name = '_'.join(whitelist(col) for col in columns)
idx_name = 'idx_{0}_{1}'.format(self._table, idx_name)
# Build column names.
col_names = [self._normalize_column(x) for x in columns]
col_names = ', '.join(col_names)
# Prepare statement.
statement = 'CREATE INDEX IF NOT EXISTS {0} ON {1} ({2})'
statement = statement.format(idx_name, self._table, col_names)
# Create index.
cursor = self._connection.cursor()
cursor.execute('PRAGMA synchronous=OFF')
cursor.execute(statement)
@staticmethod
def _normalize_column(column):
"""Normalize value for use as SQLite column name."""
if not isinstance(column, str):
msg = "expected column of type 'str', got {0!r} instead"
raise TypeError(msg.format(column.__class__.__name__))
column = column.strip()
column = column.replace('"', '""') # Escape quotes.
if column == '':
column = '_empty_'
return '"' + column + '"'
class SqliteSource(SqliteBase):
"""Loads *table* data from given SQLite *connection*:
::
conn = sqlite3.connect('mydatabase.sqlite3')
subject = datatest.SqliteSource(conn, 'mytable')
"""
@classmethod
def from_records(cls, data, columns=None):
"""Alternate constructor to load an existing collection of
records into a tempoarary SQLite database. Loads *data* (an
iterable of lists, tuples, or dicts) into a temporary table
using the named *columns*::
records = [
('a', 'x'),
('b', 'y'),
('c', 'z'),
...
]
subject = datatest.SqliteSource.from_records(records, ['col1', 'col2'])
The *columns* argument can be omitted if *data* is a collection
of dictionary or namedtuple records::
dict_rows = [
{'col1': 'a', 'col2': 'x'},
{'col1': 'b', 'col2': 'y'},
{'col1': 'c', 'col2': 'z'},
...
]
subject = datatest.SqliteSource.from_records(dict_rows)
"""
temptable = TemporarySqliteTable(data, columns)
return cls(temptable.connection, temptable.name)
def create_index(self, *columns):
"""Create an index for specified columns---can speed up testing
in some cases.
Indexes should be added one-by-one to tune a test suite's
over-all performance. Creating several indexes before testing
even begins could lead to worse performance so use them with
discretion.
An example: If you're using "town" to group aggregation tests
(like ``self.assertSubjectSum('population', ['town'])``), then
you might be able to improve performance by adding an index for
the "town" column::
subject.create_index('town')
Using two or more columns creates a multi-column index::
subject.create_index('town', 'zipcode')
Calling the function multiple times will create multiple
indexes::
subject.create_index('town')
subject.create_index('zipcode')
"""
# Calling super() with older convention to support Python 2.7 & 2.6.
super(SqliteSource, self).create_index(*columns)
| sql_function = ', '.join(sql_function)
cursor = self._execute_query(sql_function, **kwds_filter)
result = cursor.fetchone()
if len(result) == 1:
return result[0]
return result # <- EXIT! | conditional_block |
sqlite.py | # -*- coding: utf-8 -*-
import sqlite3
from ..utils.builtins import *
from ..utils import decimal
from ..load.sqltemp import TemporarySqliteTable
from ..utils.misc import _is_nsiterable
from ..__past__.api07_comp import CompareDict
from ..__past__.api07_comp import CompareSet
from .base import BaseSource
sqlite3.register_adapter(decimal.Decimal, float)
class SqliteBase(BaseSource):
"""Base class four SqliteSource and CsvSource (not intended to be
instantiated directly).
"""
def __new__(cls, *args, **kwds):
if cls is SqliteBase:
msg = 'cannot instantiate SqliteBase directly - make a subclass'
raise NotImplementedError(msg)
return super(SqliteBase, cls).__new__(cls)
def __init__(self, connection, table):
"""Initialize self."""
self._connection = connection
self._table = table
def __repr__(self):
"""Return a string representation of the data source."""
cls_name = self.__class__.__name__
conn_name = str(self._connection)
tbl_name = self._table
return '{0}({1}, table={2!r})'.format(cls_name, conn_name, tbl_name)
def columns(self):
"""Return list of column names."""
cursor = self._connection.cursor()
cursor.execute('PRAGMA table_info(' + self._table + ')')
return [x[1] for x in cursor.fetchall()]
def __iter__(self):
"""Return iterable of dictionary rows (like csv.DictReader)."""
cursor = self._connection.cursor()
cursor.execute('SELECT * FROM ' + self._table)
column_names = self.columns()
dict_row = lambda x: dict(zip(column_names, x))
return (dict_row(row) for row in cursor.fetchall())
def filter_rows(self, **kwds):
if kwds:
cursor = self._connection.cursor()
cursor = self._execute_query('*', **kwds) # <- applies filter
column_names = self.columns()
dict_row = lambda row: dict(zip(column_names, row))
return (dict_row(row) for row in cursor)
return self.__iter__()
def distinct(self, columns, **kwds_filter):
"""Return iterable of tuples containing distinct *columns*
values.
"""
if not _is_nsiterable(columns):
columns = (columns,)
self._assert_columns_exist(columns)
select_clause = [self._normalize_column(x) for x in columns]
select_clause = ', '.join(select_clause)
select_clause = 'DISTINCT ' + select_clause
cursor = self._execute_query(select_clause, **kwds_filter)
return CompareSet(cursor)
def sum(self, column, keys=None, **kwds_filter):
"""Returns :class:`CompareDict` containing sums of *column*
values grouped by *keys*.
"""
self._assert_columns_exist(column)
column = self._normalize_column(column)
sql_functions = 'SUM({0})'.format(column)
return self._sql_aggregate(sql_functions, keys, **kwds_filter)
def count(self, column, keys=None, **kwds_filter):
"""Returns :class:`CompareDict` containing count of non-empty
*column* values grouped by *keys*.
"""
self._assert_columns_exist(column)
sql_function = "SUM(CASE COALESCE({0}, '') WHEN '' THEN 0 ELSE 1 END)"
sql_function = sql_function.format(self._normalize_column(column))
return self._sql_aggregate(sql_function, keys, **kwds_filter)
def _sql_aggregate(self, sql_function, keys=None, **kwds_filter):
"""Aggregates values using SQL function select--e.g.,
'COUNT(*)', 'SUM(col1)', etc.
"""
# TODO: _sql_aggregate has grown messy after a handful of
# iterations look to refactor it in the future to improve
# maintainability.
if not _is_nsiterable(sql_function):
sql_function = (sql_function,)
if keys == None:
sql_function = ', '.join(sql_function)
cursor = self._execute_query(sql_function, **kwds_filter)
result = cursor.fetchone()
if len(result) == 1:
return result[0]
return result # <- EXIT!
if not _is_nsiterable(keys):
keys = (keys,)
group_clause = [self._normalize_column(x) for x in keys]
group_clause = ', '.join(group_clause)
select_clause = '{0}, {1}'.format(group_clause, ', '.join(sql_function))
trailing_clause = 'GROUP BY ' + group_clause
cursor = self._execute_query(select_clause, trailing_clause, **kwds_filter)
pos = len(sql_function)
iterable = ((row[:-pos], getvals(row)) for row in cursor)
if pos > 1:
# Gets values by slicing (i.e., row[-pos:]).
iterable = ((row[:-pos], row[-pos:]) for row in cursor)
else:
# Gets value by index (i.e., row[-pos]).
iterable = ((row[:-pos], row[-pos]) for row in cursor)
return CompareDict(iterable, keys)
def mapreduce(self, mapper, reducer, columns, keys=None, **kwds_filter):
obj = super(SqliteBase, self) # 2.x compatible calling convention.
return obj.mapreduce(mapper, reducer, columns, keys, **kwds_filter)
# SqliteBase doesn't implement its own mapreduce() optimization.
# A generalized, SQL optimization could do little more than the
# already-optmized filter_rows() method. Since the super-class'
# mapreduce() already uses filter_rows() internally, a separate
# optimization is unnecessary.
def _execute_query(self, select_clause, trailing_clause=None, **kwds_filter):
"""Execute query and return cursor object."""
try:
stmnt, params = self._build_query(self._table, select_clause, **kwds_filter)
if trailing_clause:
stmnt += '\n' + trailing_clause
cursor = self._connection.cursor()
cursor.execute('PRAGMA synchronous=OFF')
#print(stmnt, params)
cursor.execute(stmnt, params)
except Exception as e:
exc_cls = e.__class__
msg = '%s\n query: %s\n params: %r' % (e, stmnt, params)
raise exc_cls(msg)
return cursor
@classmethod
def _build_query(cls, table, select_clause, **kwds_filter):
"""Return 'SELECT' query."""
query = 'SELECT ' + select_clause + ' FROM ' + table
where_clause, params = cls._build_where_clause(**kwds_filter)
if where_clause:
query = query + ' WHERE ' + where_clause
return query, params
@staticmethod
def _build_where_clause(**kwds_filter):
"""Return 'WHERE' clause that implements *kwds_filter*
constraints.
"""
clause = []
params = []
items = kwds_filter.items()
items = sorted(items, key=lambda x: x[0]) # Ordered by key.
for key, val in items:
if _is_nsiterable(val):
clause.append(key + ' IN (%s)' % (', '.join('?' * len(val))))
for x in val:
params.append(x)
else:
clause.append(key + '=?')
params.append(val)
clause = ' AND '.join(clause) if clause else ''
return clause, params
def create_index(self, *columns):
"""Create an index for specified columns---can speed up testing
in some cases.
See :meth:`SqliteSource.create_index` for more details.
"""
self._assert_columns_exist(columns)
# Build index name.
whitelist = lambda col: ''.join(x for x in col if x.isalnum())
idx_name = '_'.join(whitelist(col) for col in columns)
idx_name = 'idx_{0}_{1}'.format(self._table, idx_name)
# Build column names.
col_names = [self._normalize_column(x) for x in columns]
col_names = ', '.join(col_names)
# Prepare statement.
statement = 'CREATE INDEX IF NOT EXISTS {0} ON {1} ({2})'
statement = statement.format(idx_name, self._table, col_names)
# Create index.
cursor = self._connection.cursor()
cursor.execute('PRAGMA synchronous=OFF')
cursor.execute(statement)
@staticmethod
def _normalize_column(column):
"""Normalize value for use as SQLite column name."""
if not isinstance(column, str):
msg = "expected column of type 'str', got {0!r} instead"
raise TypeError(msg.format(column.__class__.__name__))
column = column.strip()
column = column.replace('"', '""') # Escape quotes.
if column == '': | """Loads *table* data from given SQLite *connection*:
::
conn = sqlite3.connect('mydatabase.sqlite3')
subject = datatest.SqliteSource(conn, 'mytable')
"""
@classmethod
def from_records(cls, data, columns=None):
"""Alternate constructor to load an existing collection of
records into a tempoarary SQLite database. Loads *data* (an
iterable of lists, tuples, or dicts) into a temporary table
using the named *columns*::
records = [
('a', 'x'),
('b', 'y'),
('c', 'z'),
...
]
subject = datatest.SqliteSource.from_records(records, ['col1', 'col2'])
The *columns* argument can be omitted if *data* is a collection
of dictionary or namedtuple records::
dict_rows = [
{'col1': 'a', 'col2': 'x'},
{'col1': 'b', 'col2': 'y'},
{'col1': 'c', 'col2': 'z'},
...
]
subject = datatest.SqliteSource.from_records(dict_rows)
"""
temptable = TemporarySqliteTable(data, columns)
return cls(temptable.connection, temptable.name)
def create_index(self, *columns):
"""Create an index for specified columns---can speed up testing
in some cases.
Indexes should be added one-by-one to tune a test suite's
over-all performance. Creating several indexes before testing
even begins could lead to worse performance so use them with
discretion.
An example: If you're using "town" to group aggregation tests
(like ``self.assertSubjectSum('population', ['town'])``), then
you might be able to improve performance by adding an index for
the "town" column::
subject.create_index('town')
Using two or more columns creates a multi-column index::
subject.create_index('town', 'zipcode')
Calling the function multiple times will create multiple
indexes::
subject.create_index('town')
subject.create_index('zipcode')
"""
# Calling super() with older convention to support Python 2.7 & 2.6.
super(SqliteSource, self).create_index(*columns) | column = '_empty_'
return '"' + column + '"'
class SqliteSource(SqliteBase): | random_line_split |
sqlite.py | # -*- coding: utf-8 -*-
import sqlite3
from ..utils.builtins import *
from ..utils import decimal
from ..load.sqltemp import TemporarySqliteTable
from ..utils.misc import _is_nsiterable
from ..__past__.api07_comp import CompareDict
from ..__past__.api07_comp import CompareSet
from .base import BaseSource
sqlite3.register_adapter(decimal.Decimal, float)
class SqliteBase(BaseSource):
"""Base class four SqliteSource and CsvSource (not intended to be
instantiated directly).
"""
def __new__(cls, *args, **kwds):
if cls is SqliteBase:
msg = 'cannot instantiate SqliteBase directly - make a subclass'
raise NotImplementedError(msg)
return super(SqliteBase, cls).__new__(cls)
def __init__(self, connection, table):
"""Initialize self."""
self._connection = connection
self._table = table
def __repr__(self):
"""Return a string representation of the data source."""
cls_name = self.__class__.__name__
conn_name = str(self._connection)
tbl_name = self._table
return '{0}({1}, table={2!r})'.format(cls_name, conn_name, tbl_name)
def columns(self):
"""Return list of column names."""
cursor = self._connection.cursor()
cursor.execute('PRAGMA table_info(' + self._table + ')')
return [x[1] for x in cursor.fetchall()]
def | (self):
"""Return iterable of dictionary rows (like csv.DictReader)."""
cursor = self._connection.cursor()
cursor.execute('SELECT * FROM ' + self._table)
column_names = self.columns()
dict_row = lambda x: dict(zip(column_names, x))
return (dict_row(row) for row in cursor.fetchall())
def filter_rows(self, **kwds):
if kwds:
cursor = self._connection.cursor()
cursor = self._execute_query('*', **kwds) # <- applies filter
column_names = self.columns()
dict_row = lambda row: dict(zip(column_names, row))
return (dict_row(row) for row in cursor)
return self.__iter__()
def distinct(self, columns, **kwds_filter):
"""Return iterable of tuples containing distinct *columns*
values.
"""
if not _is_nsiterable(columns):
columns = (columns,)
self._assert_columns_exist(columns)
select_clause = [self._normalize_column(x) for x in columns]
select_clause = ', '.join(select_clause)
select_clause = 'DISTINCT ' + select_clause
cursor = self._execute_query(select_clause, **kwds_filter)
return CompareSet(cursor)
def sum(self, column, keys=None, **kwds_filter):
"""Returns :class:`CompareDict` containing sums of *column*
values grouped by *keys*.
"""
self._assert_columns_exist(column)
column = self._normalize_column(column)
sql_functions = 'SUM({0})'.format(column)
return self._sql_aggregate(sql_functions, keys, **kwds_filter)
def count(self, column, keys=None, **kwds_filter):
"""Returns :class:`CompareDict` containing count of non-empty
*column* values grouped by *keys*.
"""
self._assert_columns_exist(column)
sql_function = "SUM(CASE COALESCE({0}, '') WHEN '' THEN 0 ELSE 1 END)"
sql_function = sql_function.format(self._normalize_column(column))
return self._sql_aggregate(sql_function, keys, **kwds_filter)
def _sql_aggregate(self, sql_function, keys=None, **kwds_filter):
"""Aggregates values using SQL function select--e.g.,
'COUNT(*)', 'SUM(col1)', etc.
"""
# TODO: _sql_aggregate has grown messy after a handful of
# iterations look to refactor it in the future to improve
# maintainability.
if not _is_nsiterable(sql_function):
sql_function = (sql_function,)
if keys == None:
sql_function = ', '.join(sql_function)
cursor = self._execute_query(sql_function, **kwds_filter)
result = cursor.fetchone()
if len(result) == 1:
return result[0]
return result # <- EXIT!
if not _is_nsiterable(keys):
keys = (keys,)
group_clause = [self._normalize_column(x) for x in keys]
group_clause = ', '.join(group_clause)
select_clause = '{0}, {1}'.format(group_clause, ', '.join(sql_function))
trailing_clause = 'GROUP BY ' + group_clause
cursor = self._execute_query(select_clause, trailing_clause, **kwds_filter)
pos = len(sql_function)
iterable = ((row[:-pos], getvals(row)) for row in cursor)
if pos > 1:
# Gets values by slicing (i.e., row[-pos:]).
iterable = ((row[:-pos], row[-pos:]) for row in cursor)
else:
# Gets value by index (i.e., row[-pos]).
iterable = ((row[:-pos], row[-pos]) for row in cursor)
return CompareDict(iterable, keys)
def mapreduce(self, mapper, reducer, columns, keys=None, **kwds_filter):
obj = super(SqliteBase, self) # 2.x compatible calling convention.
return obj.mapreduce(mapper, reducer, columns, keys, **kwds_filter)
# SqliteBase doesn't implement its own mapreduce() optimization.
# A generalized, SQL optimization could do little more than the
# already-optmized filter_rows() method. Since the super-class'
# mapreduce() already uses filter_rows() internally, a separate
# optimization is unnecessary.
def _execute_query(self, select_clause, trailing_clause=None, **kwds_filter):
"""Execute query and return cursor object."""
try:
stmnt, params = self._build_query(self._table, select_clause, **kwds_filter)
if trailing_clause:
stmnt += '\n' + trailing_clause
cursor = self._connection.cursor()
cursor.execute('PRAGMA synchronous=OFF')
#print(stmnt, params)
cursor.execute(stmnt, params)
except Exception as e:
exc_cls = e.__class__
msg = '%s\n query: %s\n params: %r' % (e, stmnt, params)
raise exc_cls(msg)
return cursor
@classmethod
def _build_query(cls, table, select_clause, **kwds_filter):
"""Return 'SELECT' query."""
query = 'SELECT ' + select_clause + ' FROM ' + table
where_clause, params = cls._build_where_clause(**kwds_filter)
if where_clause:
query = query + ' WHERE ' + where_clause
return query, params
@staticmethod
def _build_where_clause(**kwds_filter):
"""Return 'WHERE' clause that implements *kwds_filter*
constraints.
"""
clause = []
params = []
items = kwds_filter.items()
items = sorted(items, key=lambda x: x[0]) # Ordered by key.
for key, val in items:
if _is_nsiterable(val):
clause.append(key + ' IN (%s)' % (', '.join('?' * len(val))))
for x in val:
params.append(x)
else:
clause.append(key + '=?')
params.append(val)
clause = ' AND '.join(clause) if clause else ''
return clause, params
def create_index(self, *columns):
"""Create an index for specified columns---can speed up testing
in some cases.
See :meth:`SqliteSource.create_index` for more details.
"""
self._assert_columns_exist(columns)
# Build index name.
whitelist = lambda col: ''.join(x for x in col if x.isalnum())
idx_name = '_'.join(whitelist(col) for col in columns)
idx_name = 'idx_{0}_{1}'.format(self._table, idx_name)
# Build column names.
col_names = [self._normalize_column(x) for x in columns]
col_names = ', '.join(col_names)
# Prepare statement.
statement = 'CREATE INDEX IF NOT EXISTS {0} ON {1} ({2})'
statement = statement.format(idx_name, self._table, col_names)
# Create index.
cursor = self._connection.cursor()
cursor.execute('PRAGMA synchronous=OFF')
cursor.execute(statement)
@staticmethod
def _normalize_column(column):
"""Normalize value for use as SQLite column name."""
if not isinstance(column, str):
msg = "expected column of type 'str', got {0!r} instead"
raise TypeError(msg.format(column.__class__.__name__))
column = column.strip()
column = column.replace('"', '""') # Escape quotes.
if column == '':
column = '_empty_'
return '"' + column + '"'
class SqliteSource(SqliteBase):
"""Loads *table* data from given SQLite *connection*:
::
conn = sqlite3.connect('mydatabase.sqlite3')
subject = datatest.SqliteSource(conn, 'mytable')
"""
@classmethod
def from_records(cls, data, columns=None):
"""Alternate constructor to load an existing collection of
records into a tempoarary SQLite database. Loads *data* (an
iterable of lists, tuples, or dicts) into a temporary table
using the named *columns*::
records = [
('a', 'x'),
('b', 'y'),
('c', 'z'),
...
]
subject = datatest.SqliteSource.from_records(records, ['col1', 'col2'])
The *columns* argument can be omitted if *data* is a collection
of dictionary or namedtuple records::
dict_rows = [
{'col1': 'a', 'col2': 'x'},
{'col1': 'b', 'col2': 'y'},
{'col1': 'c', 'col2': 'z'},
...
]
subject = datatest.SqliteSource.from_records(dict_rows)
"""
temptable = TemporarySqliteTable(data, columns)
return cls(temptable.connection, temptable.name)
def create_index(self, *columns):
"""Create an index for specified columns---can speed up testing
in some cases.
Indexes should be added one-by-one to tune a test suite's
over-all performance. Creating several indexes before testing
even begins could lead to worse performance so use them with
discretion.
An example: If you're using "town" to group aggregation tests
(like ``self.assertSubjectSum('population', ['town'])``), then
you might be able to improve performance by adding an index for
the "town" column::
subject.create_index('town')
Using two or more columns creates a multi-column index::
subject.create_index('town', 'zipcode')
Calling the function multiple times will create multiple
indexes::
subject.create_index('town')
subject.create_index('zipcode')
"""
# Calling super() with older convention to support Python 2.7 & 2.6.
super(SqliteSource, self).create_index(*columns)
| __iter__ | identifier_name |
sqlite.py | # -*- coding: utf-8 -*-
import sqlite3
from ..utils.builtins import *
from ..utils import decimal
from ..load.sqltemp import TemporarySqliteTable
from ..utils.misc import _is_nsiterable
from ..__past__.api07_comp import CompareDict
from ..__past__.api07_comp import CompareSet
from .base import BaseSource
sqlite3.register_adapter(decimal.Decimal, float)
class SqliteBase(BaseSource):
|
class SqliteSource(SqliteBase):
"""Loads *table* data from given SQLite *connection*:
::
conn = sqlite3.connect('mydatabase.sqlite3')
subject = datatest.SqliteSource(conn, 'mytable')
"""
@classmethod
def from_records(cls, data, columns=None):
"""Alternate constructor to load an existing collection of
records into a tempoarary SQLite database. Loads *data* (an
iterable of lists, tuples, or dicts) into a temporary table
using the named *columns*::
records = [
('a', 'x'),
('b', 'y'),
('c', 'z'),
...
]
subject = datatest.SqliteSource.from_records(records, ['col1', 'col2'])
The *columns* argument can be omitted if *data* is a collection
of dictionary or namedtuple records::
dict_rows = [
{'col1': 'a', 'col2': 'x'},
{'col1': 'b', 'col2': 'y'},
{'col1': 'c', 'col2': 'z'},
...
]
subject = datatest.SqliteSource.from_records(dict_rows)
"""
temptable = TemporarySqliteTable(data, columns)
return cls(temptable.connection, temptable.name)
def create_index(self, *columns):
"""Create an index for specified columns---can speed up testing
in some cases.
Indexes should be added one-by-one to tune a test suite's
over-all performance. Creating several indexes before testing
even begins could lead to worse performance so use them with
discretion.
An example: If you're using "town" to group aggregation tests
(like ``self.assertSubjectSum('population', ['town'])``), then
you might be able to improve performance by adding an index for
the "town" column::
subject.create_index('town')
Using two or more columns creates a multi-column index::
subject.create_index('town', 'zipcode')
Calling the function multiple times will create multiple
indexes::
subject.create_index('town')
subject.create_index('zipcode')
"""
# Calling super() with older convention to support Python 2.7 & 2.6.
super(SqliteSource, self).create_index(*columns)
| """Base class four SqliteSource and CsvSource (not intended to be
instantiated directly).
"""
def __new__(cls, *args, **kwds):
if cls is SqliteBase:
msg = 'cannot instantiate SqliteBase directly - make a subclass'
raise NotImplementedError(msg)
return super(SqliteBase, cls).__new__(cls)
def __init__(self, connection, table):
"""Initialize self."""
self._connection = connection
self._table = table
def __repr__(self):
"""Return a string representation of the data source."""
cls_name = self.__class__.__name__
conn_name = str(self._connection)
tbl_name = self._table
return '{0}({1}, table={2!r})'.format(cls_name, conn_name, tbl_name)
def columns(self):
"""Return list of column names."""
cursor = self._connection.cursor()
cursor.execute('PRAGMA table_info(' + self._table + ')')
return [x[1] for x in cursor.fetchall()]
def __iter__(self):
"""Return iterable of dictionary rows (like csv.DictReader)."""
cursor = self._connection.cursor()
cursor.execute('SELECT * FROM ' + self._table)
column_names = self.columns()
dict_row = lambda x: dict(zip(column_names, x))
return (dict_row(row) for row in cursor.fetchall())
def filter_rows(self, **kwds):
if kwds:
cursor = self._connection.cursor()
cursor = self._execute_query('*', **kwds) # <- applies filter
column_names = self.columns()
dict_row = lambda row: dict(zip(column_names, row))
return (dict_row(row) for row in cursor)
return self.__iter__()
def distinct(self, columns, **kwds_filter):
"""Return iterable of tuples containing distinct *columns*
values.
"""
if not _is_nsiterable(columns):
columns = (columns,)
self._assert_columns_exist(columns)
select_clause = [self._normalize_column(x) for x in columns]
select_clause = ', '.join(select_clause)
select_clause = 'DISTINCT ' + select_clause
cursor = self._execute_query(select_clause, **kwds_filter)
return CompareSet(cursor)
def sum(self, column, keys=None, **kwds_filter):
"""Returns :class:`CompareDict` containing sums of *column*
values grouped by *keys*.
"""
self._assert_columns_exist(column)
column = self._normalize_column(column)
sql_functions = 'SUM({0})'.format(column)
return self._sql_aggregate(sql_functions, keys, **kwds_filter)
def count(self, column, keys=None, **kwds_filter):
"""Returns :class:`CompareDict` containing count of non-empty
*column* values grouped by *keys*.
"""
self._assert_columns_exist(column)
sql_function = "SUM(CASE COALESCE({0}, '') WHEN '' THEN 0 ELSE 1 END)"
sql_function = sql_function.format(self._normalize_column(column))
return self._sql_aggregate(sql_function, keys, **kwds_filter)
def _sql_aggregate(self, sql_function, keys=None, **kwds_filter):
"""Aggregates values using SQL function select--e.g.,
'COUNT(*)', 'SUM(col1)', etc.
"""
# TODO: _sql_aggregate has grown messy after a handful of
# iterations look to refactor it in the future to improve
# maintainability.
if not _is_nsiterable(sql_function):
sql_function = (sql_function,)
if keys == None:
sql_function = ', '.join(sql_function)
cursor = self._execute_query(sql_function, **kwds_filter)
result = cursor.fetchone()
if len(result) == 1:
return result[0]
return result # <- EXIT!
if not _is_nsiterable(keys):
keys = (keys,)
group_clause = [self._normalize_column(x) for x in keys]
group_clause = ', '.join(group_clause)
select_clause = '{0}, {1}'.format(group_clause, ', '.join(sql_function))
trailing_clause = 'GROUP BY ' + group_clause
cursor = self._execute_query(select_clause, trailing_clause, **kwds_filter)
pos = len(sql_function)
iterable = ((row[:-pos], getvals(row)) for row in cursor)
if pos > 1:
# Gets values by slicing (i.e., row[-pos:]).
iterable = ((row[:-pos], row[-pos:]) for row in cursor)
else:
# Gets value by index (i.e., row[-pos]).
iterable = ((row[:-pos], row[-pos]) for row in cursor)
return CompareDict(iterable, keys)
def mapreduce(self, mapper, reducer, columns, keys=None, **kwds_filter):
obj = super(SqliteBase, self) # 2.x compatible calling convention.
return obj.mapreduce(mapper, reducer, columns, keys, **kwds_filter)
# SqliteBase doesn't implement its own mapreduce() optimization.
# A generalized, SQL optimization could do little more than the
# already-optmized filter_rows() method. Since the super-class'
# mapreduce() already uses filter_rows() internally, a separate
# optimization is unnecessary.
def _execute_query(self, select_clause, trailing_clause=None, **kwds_filter):
"""Execute query and return cursor object."""
try:
stmnt, params = self._build_query(self._table, select_clause, **kwds_filter)
if trailing_clause:
stmnt += '\n' + trailing_clause
cursor = self._connection.cursor()
cursor.execute('PRAGMA synchronous=OFF')
#print(stmnt, params)
cursor.execute(stmnt, params)
except Exception as e:
exc_cls = e.__class__
msg = '%s\n query: %s\n params: %r' % (e, stmnt, params)
raise exc_cls(msg)
return cursor
@classmethod
def _build_query(cls, table, select_clause, **kwds_filter):
"""Return 'SELECT' query."""
query = 'SELECT ' + select_clause + ' FROM ' + table
where_clause, params = cls._build_where_clause(**kwds_filter)
if where_clause:
query = query + ' WHERE ' + where_clause
return query, params
@staticmethod
def _build_where_clause(**kwds_filter):
"""Return 'WHERE' clause that implements *kwds_filter*
constraints.
"""
clause = []
params = []
items = kwds_filter.items()
items = sorted(items, key=lambda x: x[0]) # Ordered by key.
for key, val in items:
if _is_nsiterable(val):
clause.append(key + ' IN (%s)' % (', '.join('?' * len(val))))
for x in val:
params.append(x)
else:
clause.append(key + '=?')
params.append(val)
clause = ' AND '.join(clause) if clause else ''
return clause, params
def create_index(self, *columns):
"""Create an index for specified columns---can speed up testing
in some cases.
See :meth:`SqliteSource.create_index` for more details.
"""
self._assert_columns_exist(columns)
# Build index name.
whitelist = lambda col: ''.join(x for x in col if x.isalnum())
idx_name = '_'.join(whitelist(col) for col in columns)
idx_name = 'idx_{0}_{1}'.format(self._table, idx_name)
# Build column names.
col_names = [self._normalize_column(x) for x in columns]
col_names = ', '.join(col_names)
# Prepare statement.
statement = 'CREATE INDEX IF NOT EXISTS {0} ON {1} ({2})'
statement = statement.format(idx_name, self._table, col_names)
# Create index.
cursor = self._connection.cursor()
cursor.execute('PRAGMA synchronous=OFF')
cursor.execute(statement)
@staticmethod
def _normalize_column(column):
"""Normalize value for use as SQLite column name."""
if not isinstance(column, str):
msg = "expected column of type 'str', got {0!r} instead"
raise TypeError(msg.format(column.__class__.__name__))
column = column.strip()
column = column.replace('"', '""') # Escape quotes.
if column == '':
column = '_empty_'
return '"' + column + '"' | identifier_body |
storage.rs | use regex::Regex;
use std::collections::HashMap;
use std::path::Path;
use std::sync::Mutex;
use log::{error, info};
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::params;
use rusqlite_migration::{Migrations, M};
use super::errors::Error;
pub type DatabaseConnection = r2d2::PooledConnection<SqliteConnectionManager>;
pub type DatabaseConnectionPool = r2d2::Pool<SqliteConnectionManager>;
#[derive(PartialEq, Eq, Hash)]
pub struct RoomId {
id: String,
}
lazy_static::lazy_static! {
// Alphanumeric, Decimals "-" & "_" only and must be between 1 - 64 characters
static ref REGULAR_CHARACTERS_ONLY: Regex = Regex::new(r"^[\w-]{1,64}$").unwrap();
}
impl RoomId {
pub fn new(room_id: &str) -> Option<RoomId> {
if REGULAR_CHARACTERS_ONLY.is_match(room_id) {
return Some(RoomId { id: room_id.to_string() });
} else {
return None;
}
}
pub fn get_id(&self) -> &str {
&self.id
}
}
// Main
lazy_static::lazy_static! {
pub static ref MAIN_POOL: DatabaseConnectionPool = {
let file_name = "database.db";
let db_manager = r2d2_sqlite::SqliteConnectionManager::file(file_name);
return r2d2::Pool::new(db_manager).unwrap();
};
}
pub fn create_main_database_if_needed() {
let pool = &MAIN_POOL;
let conn = pool.get().unwrap();
create_main_tables_if_needed(&conn);
}
fn create_main_tables_if_needed(conn: &DatabaseConnection) |
// Rooms
pub const PENDING_TOKEN_EXPIRATION: i64 = 10 * 60;
pub const TOKEN_EXPIRATION: i64 = 7 * 24 * 60 * 60;
pub const FILE_EXPIRATION: i64 = 15 * 24 * 60 * 60;
lazy_static::lazy_static! {
static ref POOLS: Mutex<HashMap<String, DatabaseConnectionPool>> = Mutex::new(HashMap::new());
}
pub fn pool_by_room_id(room_id: &RoomId) -> DatabaseConnectionPool {
let mut pools = POOLS.lock().unwrap();
if let Some(pool) = pools.get(room_id.get_id()) {
return pool.clone();
} else {
let raw_path = format!("rooms/{}.db", room_id.get_id());
let path = Path::new(&raw_path);
let db_manager = r2d2_sqlite::SqliteConnectionManager::file(path);
let pool = r2d2::Pool::new(db_manager).unwrap();
pools.insert(room_id.get_id().to_string(), pool);
return pools[room_id.get_id()].clone();
}
}
pub fn create_database_if_needed(room_id: &RoomId) {
let pool = pool_by_room_id(room_id);
let conn = pool.get().unwrap();
create_room_tables_if_needed(&conn);
}
pub fn create_room_tables_if_needed(conn: &DatabaseConnection) {
// Messages
// The `id` field is needed to make `rowid` stable, which is important because otherwise
// the `id`s in this table won't correspond to those in the deleted messages table
let messages_table_cmd = "CREATE TABLE IF NOT EXISTS messages (
id INTEGER PRIMARY KEY,
public_key TEXT,
timestamp INTEGER,
data TEXT,
signature TEXT,
is_deleted INTEGER
)";
conn.execute(&messages_table_cmd, params![]).expect("Couldn't create messages table.");
// Deleted messages
let deleted_messages_table_cmd = "CREATE TABLE IF NOT EXISTS deleted_messages (
id INTEGER PRIMARY KEY,
deleted_message_id INTEGER
)";
conn.execute(&deleted_messages_table_cmd, params![])
.expect("Couldn't create deleted messages table.");
// Moderators
let moderators_table_cmd = "CREATE TABLE IF NOT EXISTS moderators (
public_key TEXT
)";
conn.execute(&moderators_table_cmd, params![]).expect("Couldn't create moderators table.");
// Block list
let block_list_table_cmd = "CREATE TABLE IF NOT EXISTS block_list (
public_key TEXT
)";
conn.execute(&block_list_table_cmd, params![]).expect("Couldn't create block list table.");
// Pending tokens
// Note that a given public key can have multiple pending tokens
let pending_tokens_table_cmd = "CREATE TABLE IF NOT EXISTS pending_tokens (
public_key TEXT,
timestamp INTEGER,
token BLOB
)";
conn.execute(&pending_tokens_table_cmd, params![])
.expect("Couldn't create pending tokens table.");
// Tokens
// The token is stored as hex here (rather than as bytes) because it's more convenient for lookup
let tokens_table_cmd = "CREATE TABLE IF NOT EXISTS tokens (
public_key TEXT,
timestamp INTEGER,
token TEXT PRIMARY KEY
)";
conn.execute(&tokens_table_cmd, params![]).expect("Couldn't create tokens table.");
// Files
let files_table_cmd = "CREATE TABLE IF NOT EXISTS files (
id TEXT PRIMARY KEY,
timestamp INTEGER
)";
conn.execute(&files_table_cmd, params![]).expect("Couldn't create files table.");
// User activity table
let user_activity_table_cmd = "CREATE TABLE IF NOT EXISTS user_activity (
public_key TEXT PRIMARY KEY,
last_active INTEGER NOT NULL
)";
conn.execute(&user_activity_table_cmd, params![])
.expect("Couldn't create user activity table.");
}
// Pruning
pub async fn prune_tokens_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_tokens().await;
});
}
}
pub async fn prune_pending_tokens_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_pending_tokens().await;
});
}
}
pub async fn prune_files_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::days(1).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_files(FILE_EXPIRATION).await;
});
}
}
async fn prune_tokens() {
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
for room in rooms {
let pool = pool_by_room_id(&room);
// It's not catastrophic if we fail to prune the database for a given room
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => return error!("Couldn't prune tokens due to error: {}.", e),
};
let stmt = "DELETE FROM tokens WHERE timestamp < (?1)";
let now = chrono::Utc::now().timestamp();
let expiration = now - TOKEN_EXPIRATION;
match conn.execute(&stmt, params![expiration]) {
Ok(_) => (),
Err(e) => return error!("Couldn't prune tokens due to error: {}.", e),
};
}
info!("Pruned tokens.");
}
async fn prune_pending_tokens() {
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
for room in rooms {
let pool = pool_by_room_id(&room);
// It's not catastrophic if we fail to prune the database for a given room
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e),
};
let stmt = "DELETE FROM pending_tokens WHERE timestamp < (?1)";
let now = chrono::Utc::now().timestamp();
let expiration = now - PENDING_TOKEN_EXPIRATION;
match conn.execute(&stmt, params![expiration]) {
Ok(_) => (),
Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e),
};
}
info!("Pruned pending tokens.");
}
fn get_expired_file_ids(
pool: &DatabaseConnectionPool, file_expiration: i64,
) -> Result<Vec<String>, ()> {
let now = chrono::Utc::now().timestamp();
let expiration = now - file_expiration;
// Get a database connection and open a transaction
let conn = pool.get().map_err(|e| {
error!("Couldn't get database connection to prune files due to error: {}.", e);
})?;
// Get the IDs of the files to delete
let raw_query = "SELECT id FROM files WHERE timestamp < (?1)";
let mut query = conn.prepare(&raw_query).map_err(|e| {
error!("Couldn't prepare query to prune files due to error: {}.", e);
})?;
let rows = query.query_map(params![expiration], |row| row.get(0)).map_err(|e| {
error!("Couldn't prune files due to error: {} (expiration = {}).", e, expiration);
})?;
Ok(rows.filter_map(|result| result.ok()).collect())
}
pub async fn prune_files_for_room(
pool: &DatabaseConnectionPool, room: &RoomId, file_expiration: i64,
) {
let ids = get_expired_file_ids(&pool, file_expiration);
match ids {
Ok(ids) if !ids.is_empty() => {
// Delete the files
let futs = ids.iter().map(|id| async move {
(
tokio::fs::remove_file(format!("files/{}_files/{}", room.get_id(), id)).await,
id.to_owned(),
)
});
let results = futures::future::join_all(futs).await;
for (res, id) in results {
if let Err(err) = res {
error!(
"Couldn't delete file: {} from room: {} due to error: {}.",
id,
room.get_id(),
err
);
}
}
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => {
return error!(
"Couldn't get database connection to prune files due to error: {}.",
e
)
}
};
// Measure the time it takes to delete all files sequentially
// (this might become a problem since we're not using an async interface)
let now = std::time::Instant::now();
// Remove the file records from the database
// FIXME: It'd be great to do this in a single statement, but apparently this is not supported very well
for id in ids {
let stmt = "DELETE FROM files WHERE id = (?1)";
match conn.execute(&stmt, params![id]) {
Ok(_) => (),
Err(e) => {
return error!("Couldn't prune file with ID: {} due to error: {}.", id, e)
}
};
}
// Log the result
info!("Pruned files for room: {}. Took: {:?}", room.get_id(), now.elapsed());
}
Ok(_) => {
// empty
}
Err(_) => {
// It's not catastrophic if we fail to prune the database for a given room
}
}
}
pub async fn prune_files(file_expiration: i64) {
// The expiration setting is passed in for testing purposes
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
let futs = rooms.into_iter().map(|room| async move {
let pool = pool_by_room_id(&room);
prune_files_for_room(&pool, &room, file_expiration).await;
});
futures::future::join_all(futs).await;
}
// Migration
pub fn perform_migration() {
let rooms = match get_all_room_ids() {
Ok(ids) => ids,
Err(_e) => {
return error!("Couldn't get all room IDs.");
}
};
let create_tokens_table_cmd = "CREATE TABLE IF NOT EXISTS tokens (
public_key TEXT,
timestamp INTEGER,
token TEXT PRIMARY KEY
)";
let migrations =
Migrations::new(vec![M::up("DROP TABLE tokens"), M::up(&create_tokens_table_cmd)]);
for room in rooms {
create_database_if_needed(&room);
let pool = pool_by_room_id(&room);
let mut conn = pool.get().unwrap();
migrations.to_latest(&mut conn).unwrap();
}
}
// Utilities
fn get_all_room_ids() -> Result<Vec<RoomId>, Error> {
// Get a database connection
let conn = MAIN_POOL.get().map_err(|_| Error::DatabaseFailedInternally)?;
// Query the database
let raw_query = "SELECT id FROM main";
let mut query = conn.prepare(&raw_query).map_err(|_| Error::DatabaseFailedInternally)?;
let rows = match query.query_map(params![], |row| row.get(0)) {
Ok(rows) => rows,
Err(e) => {
error!("Couldn't query database due to error: {}.", e);
return Err(Error::DatabaseFailedInternally);
}
};
let room_ids: Vec<_> = rows
.filter_map(|result: Result<String, _>| result.ok())
.map(|opt| RoomId::new(&opt))
.flatten()
.collect();
// Return
return Ok(room_ids);
}
| {
let main_table_cmd = "CREATE TABLE IF NOT EXISTS main (
id TEXT PRIMARY KEY,
name TEXT,
image_id TEXT
)";
conn.execute(&main_table_cmd, params![]).expect("Couldn't create main table.");
} | identifier_body |
storage.rs | use regex::Regex;
use std::collections::HashMap;
use std::path::Path;
use std::sync::Mutex;
use log::{error, info};
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::params;
use rusqlite_migration::{Migrations, M};
use super::errors::Error;
pub type DatabaseConnection = r2d2::PooledConnection<SqliteConnectionManager>;
pub type DatabaseConnectionPool = r2d2::Pool<SqliteConnectionManager>;
#[derive(PartialEq, Eq, Hash)]
pub struct RoomId {
id: String,
}
lazy_static::lazy_static! {
// Alphanumeric, Decimals "-" & "_" only and must be between 1 - 64 characters
static ref REGULAR_CHARACTERS_ONLY: Regex = Regex::new(r"^[\w-]{1,64}$").unwrap();
}
impl RoomId {
pub fn new(room_id: &str) -> Option<RoomId> {
if REGULAR_CHARACTERS_ONLY.is_match(room_id) {
return Some(RoomId { id: room_id.to_string() });
} else {
return None;
}
}
pub fn get_id(&self) -> &str {
&self.id
}
}
// Main
lazy_static::lazy_static! {
pub static ref MAIN_POOL: DatabaseConnectionPool = {
let file_name = "database.db";
let db_manager = r2d2_sqlite::SqliteConnectionManager::file(file_name);
return r2d2::Pool::new(db_manager).unwrap();
};
}
pub fn create_main_database_if_needed() {
let pool = &MAIN_POOL;
let conn = pool.get().unwrap();
create_main_tables_if_needed(&conn);
}
fn create_main_tables_if_needed(conn: &DatabaseConnection) {
let main_table_cmd = "CREATE TABLE IF NOT EXISTS main (
id TEXT PRIMARY KEY,
name TEXT,
image_id TEXT
)";
conn.execute(&main_table_cmd, params![]).expect("Couldn't create main table.");
}
// Rooms
pub const PENDING_TOKEN_EXPIRATION: i64 = 10 * 60;
pub const TOKEN_EXPIRATION: i64 = 7 * 24 * 60 * 60;
pub const FILE_EXPIRATION: i64 = 15 * 24 * 60 * 60;
lazy_static::lazy_static! {
static ref POOLS: Mutex<HashMap<String, DatabaseConnectionPool>> = Mutex::new(HashMap::new());
}
pub fn pool_by_room_id(room_id: &RoomId) -> DatabaseConnectionPool {
let mut pools = POOLS.lock().unwrap();
if let Some(pool) = pools.get(room_id.get_id()) {
return pool.clone();
} else {
let raw_path = format!("rooms/{}.db", room_id.get_id());
let path = Path::new(&raw_path);
let db_manager = r2d2_sqlite::SqliteConnectionManager::file(path);
let pool = r2d2::Pool::new(db_manager).unwrap();
pools.insert(room_id.get_id().to_string(), pool);
return pools[room_id.get_id()].clone();
}
}
pub fn create_database_if_needed(room_id: &RoomId) {
let pool = pool_by_room_id(room_id);
let conn = pool.get().unwrap();
create_room_tables_if_needed(&conn);
}
pub fn create_room_tables_if_needed(conn: &DatabaseConnection) {
// Messages
// The `id` field is needed to make `rowid` stable, which is important because otherwise
// the `id`s in this table won't correspond to those in the deleted messages table
let messages_table_cmd = "CREATE TABLE IF NOT EXISTS messages (
id INTEGER PRIMARY KEY,
public_key TEXT,
timestamp INTEGER,
data TEXT,
signature TEXT,
is_deleted INTEGER
)";
conn.execute(&messages_table_cmd, params![]).expect("Couldn't create messages table.");
// Deleted messages
let deleted_messages_table_cmd = "CREATE TABLE IF NOT EXISTS deleted_messages (
id INTEGER PRIMARY KEY,
deleted_message_id INTEGER
)";
conn.execute(&deleted_messages_table_cmd, params![])
.expect("Couldn't create deleted messages table.");
// Moderators
let moderators_table_cmd = "CREATE TABLE IF NOT EXISTS moderators (
public_key TEXT
)";
conn.execute(&moderators_table_cmd, params![]).expect("Couldn't create moderators table.");
// Block list
let block_list_table_cmd = "CREATE TABLE IF NOT EXISTS block_list (
public_key TEXT
)";
conn.execute(&block_list_table_cmd, params![]).expect("Couldn't create block list table.");
// Pending tokens
// Note that a given public key can have multiple pending tokens
let pending_tokens_table_cmd = "CREATE TABLE IF NOT EXISTS pending_tokens (
public_key TEXT,
timestamp INTEGER,
token BLOB
)";
conn.execute(&pending_tokens_table_cmd, params![])
.expect("Couldn't create pending tokens table.");
// Tokens
// The token is stored as hex here (rather than as bytes) because it's more convenient for lookup
let tokens_table_cmd = "CREATE TABLE IF NOT EXISTS tokens (
public_key TEXT,
timestamp INTEGER,
token TEXT PRIMARY KEY
)";
conn.execute(&tokens_table_cmd, params![]).expect("Couldn't create tokens table.");
// Files
let files_table_cmd = "CREATE TABLE IF NOT EXISTS files (
id TEXT PRIMARY KEY,
timestamp INTEGER
)";
conn.execute(&files_table_cmd, params![]).expect("Couldn't create files table.");
// User activity table
let user_activity_table_cmd = "CREATE TABLE IF NOT EXISTS user_activity (
public_key TEXT PRIMARY KEY,
last_active INTEGER NOT NULL
)";
conn.execute(&user_activity_table_cmd, params![])
.expect("Couldn't create user activity table.");
}
// Pruning
pub async fn prune_tokens_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_tokens().await;
});
}
}
pub async fn prune_pending_tokens_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_pending_tokens().await;
});
}
}
pub async fn prune_files_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::days(1).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_files(FILE_EXPIRATION).await;
});
}
}
async fn prune_tokens() {
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
for room in rooms {
let pool = pool_by_room_id(&room);
// It's not catastrophic if we fail to prune the database for a given room
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => return error!("Couldn't prune tokens due to error: {}.", e),
};
let stmt = "DELETE FROM tokens WHERE timestamp < (?1)";
let now = chrono::Utc::now().timestamp();
let expiration = now - TOKEN_EXPIRATION;
match conn.execute(&stmt, params![expiration]) {
Ok(_) => (),
Err(e) => return error!("Couldn't prune tokens due to error: {}.", e),
};
}
info!("Pruned tokens.");
}
async fn prune_pending_tokens() {
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
for room in rooms {
let pool = pool_by_room_id(&room);
// It's not catastrophic if we fail to prune the database for a given room
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e), | Ok(_) => (),
Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e),
};
}
info!("Pruned pending tokens.");
}
fn get_expired_file_ids(
pool: &DatabaseConnectionPool, file_expiration: i64,
) -> Result<Vec<String>, ()> {
let now = chrono::Utc::now().timestamp();
let expiration = now - file_expiration;
// Get a database connection and open a transaction
let conn = pool.get().map_err(|e| {
error!("Couldn't get database connection to prune files due to error: {}.", e);
})?;
// Get the IDs of the files to delete
let raw_query = "SELECT id FROM files WHERE timestamp < (?1)";
let mut query = conn.prepare(&raw_query).map_err(|e| {
error!("Couldn't prepare query to prune files due to error: {}.", e);
})?;
let rows = query.query_map(params![expiration], |row| row.get(0)).map_err(|e| {
error!("Couldn't prune files due to error: {} (expiration = {}).", e, expiration);
})?;
Ok(rows.filter_map(|result| result.ok()).collect())
}
pub async fn prune_files_for_room(
pool: &DatabaseConnectionPool, room: &RoomId, file_expiration: i64,
) {
let ids = get_expired_file_ids(&pool, file_expiration);
match ids {
Ok(ids) if !ids.is_empty() => {
// Delete the files
let futs = ids.iter().map(|id| async move {
(
tokio::fs::remove_file(format!("files/{}_files/{}", room.get_id(), id)).await,
id.to_owned(),
)
});
let results = futures::future::join_all(futs).await;
for (res, id) in results {
if let Err(err) = res {
error!(
"Couldn't delete file: {} from room: {} due to error: {}.",
id,
room.get_id(),
err
);
}
}
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => {
return error!(
"Couldn't get database connection to prune files due to error: {}.",
e
)
}
};
// Measure the time it takes to delete all files sequentially
// (this might become a problem since we're not using an async interface)
let now = std::time::Instant::now();
// Remove the file records from the database
// FIXME: It'd be great to do this in a single statement, but apparently this is not supported very well
for id in ids {
let stmt = "DELETE FROM files WHERE id = (?1)";
match conn.execute(&stmt, params![id]) {
Ok(_) => (),
Err(e) => {
return error!("Couldn't prune file with ID: {} due to error: {}.", id, e)
}
};
}
// Log the result
info!("Pruned files for room: {}. Took: {:?}", room.get_id(), now.elapsed());
}
Ok(_) => {
// empty
}
Err(_) => {
// It's not catastrophic if we fail to prune the database for a given room
}
}
}
pub async fn prune_files(file_expiration: i64) {
// The expiration setting is passed in for testing purposes
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
let futs = rooms.into_iter().map(|room| async move {
let pool = pool_by_room_id(&room);
prune_files_for_room(&pool, &room, file_expiration).await;
});
futures::future::join_all(futs).await;
}
// Migration
pub fn perform_migration() {
let rooms = match get_all_room_ids() {
Ok(ids) => ids,
Err(_e) => {
return error!("Couldn't get all room IDs.");
}
};
let create_tokens_table_cmd = "CREATE TABLE IF NOT EXISTS tokens (
public_key TEXT,
timestamp INTEGER,
token TEXT PRIMARY KEY
)";
let migrations =
Migrations::new(vec![M::up("DROP TABLE tokens"), M::up(&create_tokens_table_cmd)]);
for room in rooms {
create_database_if_needed(&room);
let pool = pool_by_room_id(&room);
let mut conn = pool.get().unwrap();
migrations.to_latest(&mut conn).unwrap();
}
}
// Utilities
fn get_all_room_ids() -> Result<Vec<RoomId>, Error> {
// Get a database connection
let conn = MAIN_POOL.get().map_err(|_| Error::DatabaseFailedInternally)?;
// Query the database
let raw_query = "SELECT id FROM main";
let mut query = conn.prepare(&raw_query).map_err(|_| Error::DatabaseFailedInternally)?;
let rows = match query.query_map(params![], |row| row.get(0)) {
Ok(rows) => rows,
Err(e) => {
error!("Couldn't query database due to error: {}.", e);
return Err(Error::DatabaseFailedInternally);
}
};
let room_ids: Vec<_> = rows
.filter_map(|result: Result<String, _>| result.ok())
.map(|opt| RoomId::new(&opt))
.flatten()
.collect();
// Return
return Ok(room_ids);
} | };
let stmt = "DELETE FROM pending_tokens WHERE timestamp < (?1)";
let now = chrono::Utc::now().timestamp();
let expiration = now - PENDING_TOKEN_EXPIRATION;
match conn.execute(&stmt, params![expiration]) { | random_line_split |
storage.rs | use regex::Regex;
use std::collections::HashMap;
use std::path::Path;
use std::sync::Mutex;
use log::{error, info};
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::params;
use rusqlite_migration::{Migrations, M};
use super::errors::Error;
pub type DatabaseConnection = r2d2::PooledConnection<SqliteConnectionManager>;
pub type DatabaseConnectionPool = r2d2::Pool<SqliteConnectionManager>;
#[derive(PartialEq, Eq, Hash)]
pub struct RoomId {
id: String,
}
lazy_static::lazy_static! {
// Alphanumeric, Decimals "-" & "_" only and must be between 1 - 64 characters
static ref REGULAR_CHARACTERS_ONLY: Regex = Regex::new(r"^[\w-]{1,64}$").unwrap();
}
impl RoomId {
pub fn new(room_id: &str) -> Option<RoomId> {
if REGULAR_CHARACTERS_ONLY.is_match(room_id) {
return Some(RoomId { id: room_id.to_string() });
} else {
return None;
}
}
pub fn get_id(&self) -> &str {
&self.id
}
}
// Main
lazy_static::lazy_static! {
pub static ref MAIN_POOL: DatabaseConnectionPool = {
let file_name = "database.db";
let db_manager = r2d2_sqlite::SqliteConnectionManager::file(file_name);
return r2d2::Pool::new(db_manager).unwrap();
};
}
pub fn create_main_database_if_needed() {
let pool = &MAIN_POOL;
let conn = pool.get().unwrap();
create_main_tables_if_needed(&conn);
}
fn | (conn: &DatabaseConnection) {
let main_table_cmd = "CREATE TABLE IF NOT EXISTS main (
id TEXT PRIMARY KEY,
name TEXT,
image_id TEXT
)";
conn.execute(&main_table_cmd, params![]).expect("Couldn't create main table.");
}
// Rooms
pub const PENDING_TOKEN_EXPIRATION: i64 = 10 * 60;
pub const TOKEN_EXPIRATION: i64 = 7 * 24 * 60 * 60;
pub const FILE_EXPIRATION: i64 = 15 * 24 * 60 * 60;
lazy_static::lazy_static! {
static ref POOLS: Mutex<HashMap<String, DatabaseConnectionPool>> = Mutex::new(HashMap::new());
}
pub fn pool_by_room_id(room_id: &RoomId) -> DatabaseConnectionPool {
let mut pools = POOLS.lock().unwrap();
if let Some(pool) = pools.get(room_id.get_id()) {
return pool.clone();
} else {
let raw_path = format!("rooms/{}.db", room_id.get_id());
let path = Path::new(&raw_path);
let db_manager = r2d2_sqlite::SqliteConnectionManager::file(path);
let pool = r2d2::Pool::new(db_manager).unwrap();
pools.insert(room_id.get_id().to_string(), pool);
return pools[room_id.get_id()].clone();
}
}
pub fn create_database_if_needed(room_id: &RoomId) {
let pool = pool_by_room_id(room_id);
let conn = pool.get().unwrap();
create_room_tables_if_needed(&conn);
}
pub fn create_room_tables_if_needed(conn: &DatabaseConnection) {
// Messages
// The `id` field is needed to make `rowid` stable, which is important because otherwise
// the `id`s in this table won't correspond to those in the deleted messages table
let messages_table_cmd = "CREATE TABLE IF NOT EXISTS messages (
id INTEGER PRIMARY KEY,
public_key TEXT,
timestamp INTEGER,
data TEXT,
signature TEXT,
is_deleted INTEGER
)";
conn.execute(&messages_table_cmd, params![]).expect("Couldn't create messages table.");
// Deleted messages
let deleted_messages_table_cmd = "CREATE TABLE IF NOT EXISTS deleted_messages (
id INTEGER PRIMARY KEY,
deleted_message_id INTEGER
)";
conn.execute(&deleted_messages_table_cmd, params![])
.expect("Couldn't create deleted messages table.");
// Moderators
let moderators_table_cmd = "CREATE TABLE IF NOT EXISTS moderators (
public_key TEXT
)";
conn.execute(&moderators_table_cmd, params![]).expect("Couldn't create moderators table.");
// Block list
let block_list_table_cmd = "CREATE TABLE IF NOT EXISTS block_list (
public_key TEXT
)";
conn.execute(&block_list_table_cmd, params![]).expect("Couldn't create block list table.");
// Pending tokens
// Note that a given public key can have multiple pending tokens
let pending_tokens_table_cmd = "CREATE TABLE IF NOT EXISTS pending_tokens (
public_key TEXT,
timestamp INTEGER,
token BLOB
)";
conn.execute(&pending_tokens_table_cmd, params![])
.expect("Couldn't create pending tokens table.");
// Tokens
// The token is stored as hex here (rather than as bytes) because it's more convenient for lookup
let tokens_table_cmd = "CREATE TABLE IF NOT EXISTS tokens (
public_key TEXT,
timestamp INTEGER,
token TEXT PRIMARY KEY
)";
conn.execute(&tokens_table_cmd, params![]).expect("Couldn't create tokens table.");
// Files
let files_table_cmd = "CREATE TABLE IF NOT EXISTS files (
id TEXT PRIMARY KEY,
timestamp INTEGER
)";
conn.execute(&files_table_cmd, params![]).expect("Couldn't create files table.");
// User activity table
let user_activity_table_cmd = "CREATE TABLE IF NOT EXISTS user_activity (
public_key TEXT PRIMARY KEY,
last_active INTEGER NOT NULL
)";
conn.execute(&user_activity_table_cmd, params![])
.expect("Couldn't create user activity table.");
}
// Pruning
pub async fn prune_tokens_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_tokens().await;
});
}
}
pub async fn prune_pending_tokens_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_pending_tokens().await;
});
}
}
pub async fn prune_files_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::days(1).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_files(FILE_EXPIRATION).await;
});
}
}
async fn prune_tokens() {
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
for room in rooms {
let pool = pool_by_room_id(&room);
// It's not catastrophic if we fail to prune the database for a given room
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => return error!("Couldn't prune tokens due to error: {}.", e),
};
let stmt = "DELETE FROM tokens WHERE timestamp < (?1)";
let now = chrono::Utc::now().timestamp();
let expiration = now - TOKEN_EXPIRATION;
match conn.execute(&stmt, params![expiration]) {
Ok(_) => (),
Err(e) => return error!("Couldn't prune tokens due to error: {}.", e),
};
}
info!("Pruned tokens.");
}
async fn prune_pending_tokens() {
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
for room in rooms {
let pool = pool_by_room_id(&room);
// It's not catastrophic if we fail to prune the database for a given room
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e),
};
let stmt = "DELETE FROM pending_tokens WHERE timestamp < (?1)";
let now = chrono::Utc::now().timestamp();
let expiration = now - PENDING_TOKEN_EXPIRATION;
match conn.execute(&stmt, params![expiration]) {
Ok(_) => (),
Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e),
};
}
info!("Pruned pending tokens.");
}
fn get_expired_file_ids(
pool: &DatabaseConnectionPool, file_expiration: i64,
) -> Result<Vec<String>, ()> {
let now = chrono::Utc::now().timestamp();
let expiration = now - file_expiration;
// Get a database connection and open a transaction
let conn = pool.get().map_err(|e| {
error!("Couldn't get database connection to prune files due to error: {}.", e);
})?;
// Get the IDs of the files to delete
let raw_query = "SELECT id FROM files WHERE timestamp < (?1)";
let mut query = conn.prepare(&raw_query).map_err(|e| {
error!("Couldn't prepare query to prune files due to error: {}.", e);
})?;
let rows = query.query_map(params![expiration], |row| row.get(0)).map_err(|e| {
error!("Couldn't prune files due to error: {} (expiration = {}).", e, expiration);
})?;
Ok(rows.filter_map(|result| result.ok()).collect())
}
pub async fn prune_files_for_room(
pool: &DatabaseConnectionPool, room: &RoomId, file_expiration: i64,
) {
let ids = get_expired_file_ids(&pool, file_expiration);
match ids {
Ok(ids) if !ids.is_empty() => {
// Delete the files
let futs = ids.iter().map(|id| async move {
(
tokio::fs::remove_file(format!("files/{}_files/{}", room.get_id(), id)).await,
id.to_owned(),
)
});
let results = futures::future::join_all(futs).await;
for (res, id) in results {
if let Err(err) = res {
error!(
"Couldn't delete file: {} from room: {} due to error: {}.",
id,
room.get_id(),
err
);
}
}
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => {
return error!(
"Couldn't get database connection to prune files due to error: {}.",
e
)
}
};
// Measure the time it takes to delete all files sequentially
// (this might become a problem since we're not using an async interface)
let now = std::time::Instant::now();
// Remove the file records from the database
// FIXME: It'd be great to do this in a single statement, but apparently this is not supported very well
for id in ids {
let stmt = "DELETE FROM files WHERE id = (?1)";
match conn.execute(&stmt, params![id]) {
Ok(_) => (),
Err(e) => {
return error!("Couldn't prune file with ID: {} due to error: {}.", id, e)
}
};
}
// Log the result
info!("Pruned files for room: {}. Took: {:?}", room.get_id(), now.elapsed());
}
Ok(_) => {
// empty
}
Err(_) => {
// It's not catastrophic if we fail to prune the database for a given room
}
}
}
pub async fn prune_files(file_expiration: i64) {
// The expiration setting is passed in for testing purposes
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
let futs = rooms.into_iter().map(|room| async move {
let pool = pool_by_room_id(&room);
prune_files_for_room(&pool, &room, file_expiration).await;
});
futures::future::join_all(futs).await;
}
// Migration
pub fn perform_migration() {
let rooms = match get_all_room_ids() {
Ok(ids) => ids,
Err(_e) => {
return error!("Couldn't get all room IDs.");
}
};
let create_tokens_table_cmd = "CREATE TABLE IF NOT EXISTS tokens (
public_key TEXT,
timestamp INTEGER,
token TEXT PRIMARY KEY
)";
let migrations =
Migrations::new(vec![M::up("DROP TABLE tokens"), M::up(&create_tokens_table_cmd)]);
for room in rooms {
create_database_if_needed(&room);
let pool = pool_by_room_id(&room);
let mut conn = pool.get().unwrap();
migrations.to_latest(&mut conn).unwrap();
}
}
// Utilities
fn get_all_room_ids() -> Result<Vec<RoomId>, Error> {
// Get a database connection
let conn = MAIN_POOL.get().map_err(|_| Error::DatabaseFailedInternally)?;
// Query the database
let raw_query = "SELECT id FROM main";
let mut query = conn.prepare(&raw_query).map_err(|_| Error::DatabaseFailedInternally)?;
let rows = match query.query_map(params![], |row| row.get(0)) {
Ok(rows) => rows,
Err(e) => {
error!("Couldn't query database due to error: {}.", e);
return Err(Error::DatabaseFailedInternally);
}
};
let room_ids: Vec<_> = rows
.filter_map(|result: Result<String, _>| result.ok())
.map(|opt| RoomId::new(&opt))
.flatten()
.collect();
// Return
return Ok(room_ids);
}
| create_main_tables_if_needed | identifier_name |
grid.go | package grid
import (
"encoding/json"
"fmt"
"net/http"
"sort"
"strings"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/adapters"
"github.com/prebid/prebid-server/config"
"github.com/prebid/prebid-server/errortypes"
"github.com/prebid/prebid-server/openrtb_ext"
"github.com/prebid/prebid-server/util/maputil"
)
type GridAdapter struct {
endpoint string
}
type GridBid struct {
*openrtb2.Bid
AdmNative json.RawMessage `json:"adm_native,omitempty"`
ContentType openrtb_ext.BidType `json:"content_type"`
}
type GridSeatBid struct {
*openrtb2.SeatBid
Bid []GridBid `json:"bid"`
}
type GridResponse struct {
*openrtb2.BidResponse
SeatBid []GridSeatBid `json:"seatbid,omitempty"`
}
type GridBidExt struct {
Bidder ExtBidder `json:"bidder"`
}
type ExtBidder struct {
Grid ExtBidderGrid `json:"grid"`
}
type ExtBidderGrid struct {
DemandSource string `json:"demandSource"`
}
type ExtImpDataAdServer struct {
Name string `json:"name"`
AdSlot string `json:"adslot"`
}
type ExtImpData struct {
PbAdslot string `json:"pbadslot,omitempty"`
AdServer *ExtImpDataAdServer `json:"adserver,omitempty"`
}
type ExtImp struct {
Prebid *openrtb_ext.ExtImpPrebid `json:"prebid,omitempty"`
Bidder json.RawMessage `json:"bidder"`
Data *ExtImpData `json:"data,omitempty"`
Gpid string `json:"gpid,omitempty"`
Skadn json.RawMessage `json:"skadn,omitempty"`
Context json.RawMessage `json:"context,omitempty"`
}
type KeywordSegment struct {
Name string `json:"name"`
Value string `json:"value"`
}
type KeywordsPublisherItem struct {
Name string `json:"name"`
Segments []KeywordSegment `json:"segments"`
}
type KeywordsPublisher map[string][]KeywordsPublisherItem
type Keywords map[string]KeywordsPublisher
// buildConsolidatedKeywordsReqExt builds a new request.ext json incorporating request.site.keywords, request.user.keywords,
// and request.imp[0].ext.keywords, and request.ext.keywords. Invalid keywords in request.imp[0].ext.keywords are not incorporated.
// Invalid keywords in request.ext.keywords.site and request.ext.keywords.user are dropped.
func buildConsolidatedKeywordsReqExt(openRTBUser, openRTBSite string, firstImpExt, requestExt json.RawMessage) (json.RawMessage, error) {
// unmarshal ext to object map
requestExtMap := parseExtToMap(requestExt)
firstImpExtMap := parseExtToMap(firstImpExt)
// extract `keywords` field
requestExtKeywordsMap := extractKeywordsMap(requestExtMap)
firstImpExtKeywordsMap := extractBidderKeywordsMap(firstImpExtMap)
// parse + merge keywords
keywords := parseKeywordsFromMap(requestExtKeywordsMap) // request.ext.keywords
mergeKeywords(keywords, parseKeywordsFromMap(firstImpExtKeywordsMap)) // request.imp[0].ext.bidder.keywords
mergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBUser, "user")) // request.user.keywords
mergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBSite, "site")) // request.site.keywords
// overlay site + user keywords
if site, exists := keywords["site"]; exists && len(site) > 0 {
requestExtKeywordsMap["site"] = site
} else {
delete(requestExtKeywordsMap, "site")
}
if user, exists := keywords["user"]; exists && len(user) > 0 {
requestExtKeywordsMap["user"] = user
} else {
delete(requestExtKeywordsMap, "user")
}
// reconcile keywords with request.ext
if len(requestExtKeywordsMap) > 0 {
requestExtMap["keywords"] = requestExtKeywordsMap
} else {
delete(requestExtMap, "keywords")
}
// marshal final result
if len(requestExtMap) > 0 {
return json.Marshal(requestExtMap)
}
return nil, nil
}
func parseExtToMap(ext json.RawMessage) map[string]interface{} {
var root map[string]interface{}
if err := json.Unmarshal(ext, &root); err != nil {
return make(map[string]interface{})
}
return root
}
func extractKeywordsMap(ext map[string]interface{}) map[string]interface{} {
if keywords, exists := maputil.ReadEmbeddedMap(ext, "keywords"); exists {
return keywords
}
return make(map[string]interface{})
}
func extractBidderKeywordsMap(ext map[string]interface{}) map[string]interface{} {
if bidder, exists := maputil.ReadEmbeddedMap(ext, "bidder"); exists {
return extractKeywordsMap(bidder)
}
return make(map[string]interface{})
}
func parseKeywordsFromMap(extKeywords map[string]interface{}) Keywords {
keywords := make(Keywords)
for k, v := range extKeywords {
// keywords may only be provided in the site and user sections
if k != "site" && k != "user" {
continue
}
// the site or user sections must be an object
if section, ok := v.(map[string]interface{}); ok {
keywords[k] = parseKeywordsFromSection(section)
}
}
return keywords
}
func parseKeywordsFromSection(section map[string]interface{}) KeywordsPublisher {
keywordsPublishers := make(KeywordsPublisher) | for publisherKey, publisherValue := range section {
// publisher value must be a slice
publisherValueSlice, ok := publisherValue.([]interface{})
if !ok {
continue
}
for _, publisherValueItem := range publisherValueSlice {
// item must be an object
publisherItem, ok := publisherValueItem.(map[string]interface{})
if !ok {
continue
}
// publisher item must have a name
publisherName, ok := maputil.ReadEmbeddedString(publisherItem, "name")
if !ok {
continue
}
var segments []KeywordSegment
// extract valid segments
if segmentsSlice, exists := maputil.ReadEmbeddedSlice(publisherItem, "segments"); exists {
for _, segment := range segmentsSlice {
if segmentMap, ok := segment.(map[string]interface{}); ok {
name, hasName := maputil.ReadEmbeddedString(segmentMap, "name")
value, hasValue := maputil.ReadEmbeddedString(segmentMap, "value")
if hasName && hasValue {
segments = append(segments, KeywordSegment{Name: name, Value: value})
}
}
}
}
// ensure consistent ordering for publisher item map
publisherItemKeys := make([]string, 0, len(publisherItem))
for v := range publisherItem {
publisherItemKeys = append(publisherItemKeys, v)
}
sort.Strings(publisherItemKeys)
// compose compatible alternate segment format
for _, potentialSegmentName := range publisherItemKeys {
potentialSegmentValues := publisherItem[potentialSegmentName]
// values must be an array
if valuesSlice, ok := potentialSegmentValues.([]interface{}); ok {
for _, value := range valuesSlice {
if valueAsString, ok := value.(string); ok {
segments = append(segments, KeywordSegment{Name: potentialSegmentName, Value: valueAsString})
}
}
}
}
if len(segments) > 0 {
keywordsPublishers[publisherKey] = append(keywordsPublishers[publisherKey], KeywordsPublisherItem{Name: publisherName, Segments: segments})
}
}
}
return keywordsPublishers
}
func parseKeywordsFromOpenRTB(keywords, section string) Keywords {
keywordsSplit := strings.Split(keywords, ",")
segments := make([]KeywordSegment, 0, len(keywordsSplit))
for _, v := range keywordsSplit {
if v != "" {
segments = append(segments, KeywordSegment{Name: "keywords", Value: v})
}
}
if len(segments) > 0 {
return map[string]KeywordsPublisher{section: map[string][]KeywordsPublisherItem{"ortb2": {{Name: "keywords", Segments: segments}}}}
}
return make(Keywords)
}
func mergeKeywords(a, b Keywords) {
for key, values := range b {
if _, sectionExists := a[key]; !sectionExists {
a[key] = KeywordsPublisher{}
}
for publisherKey, publisherValues := range values {
a[key][publisherKey] = append(publisherValues, a[key][publisherKey]...)
}
}
}
func setImpExtKeywords(request *openrtb2.BidRequest) error {
userKeywords := ""
if request.User != nil {
userKeywords = request.User.Keywords
}
siteKeywords := ""
if request.Site != nil {
siteKeywords = request.Site.Keywords
}
var err error
request.Ext, err = buildConsolidatedKeywordsReqExt(userKeywords, siteKeywords, request.Imp[0].Ext, request.Ext)
return err
}
func processImp(imp *openrtb2.Imp) error {
// get the grid extension
var ext adapters.ExtImpBidder
var gridExt openrtb_ext.ExtImpGrid
if err := json.Unmarshal(imp.Ext, &ext); err != nil {
return err
}
if err := json.Unmarshal(ext.Bidder, &gridExt); err != nil {
return err
}
if gridExt.Uid == 0 {
err := &errortypes.BadInput{
Message: "uid is empty",
}
return err
}
// no error
return nil
}
func setImpExtData(imp openrtb2.Imp) openrtb2.Imp {
var ext ExtImp
if err := json.Unmarshal(imp.Ext, &ext); err != nil {
return imp
}
if ext.Data != nil && ext.Data.AdServer != nil && ext.Data.AdServer.AdSlot != "" {
ext.Gpid = ext.Data.AdServer.AdSlot
extJSON, err := json.Marshal(ext)
if err == nil {
imp.Ext = extJSON
}
}
return imp
}
func fixNative(req json.RawMessage) (json.RawMessage, error) {
var gridReq map[string]interface{}
var parsedRequest map[string]interface{}
if err := json.Unmarshal(req, &gridReq); err != nil {
return req, nil
}
if imps, exists := maputil.ReadEmbeddedSlice(gridReq, "imp"); exists {
for _, imp := range imps {
if gridImp, ok := imp.(map[string]interface{}); ok {
native, hasNative := maputil.ReadEmbeddedMap(gridImp, "native")
if hasNative {
request, hasRequest := maputil.ReadEmbeddedString(native, "request")
if hasRequest {
delete(native, "request")
if err := json.Unmarshal([]byte(request), &parsedRequest); err == nil {
native["request_native"] = parsedRequest
} else {
native["request_native"] = request
}
}
}
}
}
}
return json.Marshal(gridReq)
}
// MakeRequests makes the HTTP requests which should be made to fetch bids.
func (a *GridAdapter) MakeRequests(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) {
var errors = make([]error, 0)
// this will contain all the valid impressions
var validImps []openrtb2.Imp
// pre-process the imps
for _, imp := range request.Imp {
if err := processImp(&imp); err == nil {
validImps = append(validImps, setImpExtData(imp))
} else {
errors = append(errors, err)
}
}
if len(validImps) == 0 {
err := &errortypes.BadInput{
Message: "No valid impressions for grid",
}
errors = append(errors, err)
return nil, errors
}
if err := setImpExtKeywords(request); err != nil {
errors = append(errors, err)
return nil, errors
}
request.Imp = validImps
reqJSON, err := json.Marshal(request)
if err != nil {
errors = append(errors, err)
return nil, errors
}
fixedReqJSON, err := fixNative(reqJSON)
if err != nil {
errors = append(errors, err)
return nil, errors
}
headers := http.Header{}
headers.Add("Content-Type", "application/json;charset=utf-8")
return []*adapters.RequestData{{
Method: "POST",
Uri: a.endpoint,
Body: fixedReqJSON,
Headers: headers,
}}, errors
}
// MakeBids unpacks the server's response into Bids.
func (a *GridAdapter) MakeBids(internalRequest *openrtb2.BidRequest, externalRequest *adapters.RequestData, response *adapters.ResponseData) (*adapters.BidderResponse, []error) {
if response.StatusCode == http.StatusNoContent {
return nil, nil
}
if response.StatusCode == http.StatusBadRequest {
return nil, []error{&errortypes.BadInput{
Message: fmt.Sprintf("Unexpected status code: %d. Run with request.debug = 1 for more info", response.StatusCode),
}}
}
if response.StatusCode != http.StatusOK {
return nil, []error{&errortypes.BadServerResponse{
Message: fmt.Sprintf("Unexpected status code: %d. Run with request.debug = 1 for more info", response.StatusCode),
}}
}
var bidResp GridResponse
if err := json.Unmarshal(response.Body, &bidResp); err != nil {
return nil, []error{err}
}
bidResponse := adapters.NewBidderResponseWithBidsCapacity(1)
for _, sb := range bidResp.SeatBid {
for i := range sb.Bid {
bidMeta, err := getBidMeta(sb.Bid[i].Ext)
bidType, err := getMediaTypeForImp(sb.Bid[i].ImpID, internalRequest.Imp, sb.Bid[i])
if sb.Bid[i].AdmNative != nil && sb.Bid[i].AdM == "" {
if bytes, err := json.Marshal(sb.Bid[i].AdmNative); err == nil {
sb.Bid[i].AdM = string(bytes)
}
}
if err != nil {
return nil, []error{err}
}
openrtb2Bid := sb.Bid[i].Bid
bidResponse.Bids = append(bidResponse.Bids, &adapters.TypedBid{
Bid: openrtb2Bid,
BidType: bidType,
BidMeta: bidMeta,
})
}
}
return bidResponse, nil
}
// Builder builds a new instance of the Grid adapter for the given bidder with the given config.
func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {
bidder := &GridAdapter{
endpoint: config.Endpoint,
}
return bidder, nil
}
func getBidMeta(ext json.RawMessage) (*openrtb_ext.ExtBidPrebidMeta, error) {
var bidExt GridBidExt
if err := json.Unmarshal(ext, &bidExt); err != nil {
return nil, err
}
var bidMeta *openrtb_ext.ExtBidPrebidMeta
if bidExt.Bidder.Grid.DemandSource != "" {
bidMeta = &openrtb_ext.ExtBidPrebidMeta{
NetworkName: bidExt.Bidder.Grid.DemandSource,
}
}
return bidMeta, nil
}
func getMediaTypeForImp(impID string, imps []openrtb2.Imp, bidWithType GridBid) (openrtb_ext.BidType, error) {
if bidWithType.ContentType != "" {
return bidWithType.ContentType, nil
} else {
for _, imp := range imps {
if imp.ID == impID {
if imp.Banner != nil {
return openrtb_ext.BidTypeBanner, nil
}
if imp.Video != nil {
return openrtb_ext.BidTypeVideo, nil
}
if imp.Native != nil {
return openrtb_ext.BidTypeNative, nil
}
return "", &errortypes.BadServerResponse{
Message: fmt.Sprintf("Unknown impression type for ID: \"%s\"", impID),
}
}
}
}
// This shouldnt happen. Lets handle it just incase by returning an error.
return "", &errortypes.BadServerResponse{
Message: fmt.Sprintf("Failed to find impression for ID: \"%s\"", impID),
}
} | random_line_split |
|
grid.go | package grid
import (
"encoding/json"
"fmt"
"net/http"
"sort"
"strings"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/adapters"
"github.com/prebid/prebid-server/config"
"github.com/prebid/prebid-server/errortypes"
"github.com/prebid/prebid-server/openrtb_ext"
"github.com/prebid/prebid-server/util/maputil"
)
type GridAdapter struct {
endpoint string
}
type GridBid struct {
*openrtb2.Bid
AdmNative json.RawMessage `json:"adm_native,omitempty"`
ContentType openrtb_ext.BidType `json:"content_type"`
}
type GridSeatBid struct {
*openrtb2.SeatBid
Bid []GridBid `json:"bid"`
}
type GridResponse struct {
*openrtb2.BidResponse
SeatBid []GridSeatBid `json:"seatbid,omitempty"`
}
type GridBidExt struct {
Bidder ExtBidder `json:"bidder"`
}
type ExtBidder struct {
Grid ExtBidderGrid `json:"grid"`
}
type ExtBidderGrid struct {
DemandSource string `json:"demandSource"`
}
type ExtImpDataAdServer struct {
Name string `json:"name"`
AdSlot string `json:"adslot"`
}
type ExtImpData struct {
PbAdslot string `json:"pbadslot,omitempty"`
AdServer *ExtImpDataAdServer `json:"adserver,omitempty"`
}
type ExtImp struct {
Prebid *openrtb_ext.ExtImpPrebid `json:"prebid,omitempty"`
Bidder json.RawMessage `json:"bidder"`
Data *ExtImpData `json:"data,omitempty"`
Gpid string `json:"gpid,omitempty"`
Skadn json.RawMessage `json:"skadn,omitempty"`
Context json.RawMessage `json:"context,omitempty"`
}
type KeywordSegment struct {
Name string `json:"name"`
Value string `json:"value"`
}
type KeywordsPublisherItem struct {
Name string `json:"name"`
Segments []KeywordSegment `json:"segments"`
}
type KeywordsPublisher map[string][]KeywordsPublisherItem
type Keywords map[string]KeywordsPublisher
// buildConsolidatedKeywordsReqExt builds a new request.ext json incorporating request.site.keywords, request.user.keywords,
// and request.imp[0].ext.keywords, and request.ext.keywords. Invalid keywords in request.imp[0].ext.keywords are not incorporated.
// Invalid keywords in request.ext.keywords.site and request.ext.keywords.user are dropped.
func buildConsolidatedKeywordsReqExt(openRTBUser, openRTBSite string, firstImpExt, requestExt json.RawMessage) (json.RawMessage, error) {
// unmarshal ext to object map
requestExtMap := parseExtToMap(requestExt)
firstImpExtMap := parseExtToMap(firstImpExt)
// extract `keywords` field
requestExtKeywordsMap := extractKeywordsMap(requestExtMap)
firstImpExtKeywordsMap := extractBidderKeywordsMap(firstImpExtMap)
// parse + merge keywords
keywords := parseKeywordsFromMap(requestExtKeywordsMap) // request.ext.keywords
mergeKeywords(keywords, parseKeywordsFromMap(firstImpExtKeywordsMap)) // request.imp[0].ext.bidder.keywords
mergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBUser, "user")) // request.user.keywords
mergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBSite, "site")) // request.site.keywords
// overlay site + user keywords
if site, exists := keywords["site"]; exists && len(site) > 0 {
requestExtKeywordsMap["site"] = site
} else {
delete(requestExtKeywordsMap, "site")
}
if user, exists := keywords["user"]; exists && len(user) > 0 {
requestExtKeywordsMap["user"] = user
} else {
delete(requestExtKeywordsMap, "user")
}
// reconcile keywords with request.ext
if len(requestExtKeywordsMap) > 0 {
requestExtMap["keywords"] = requestExtKeywordsMap
} else {
delete(requestExtMap, "keywords")
}
// marshal final result
if len(requestExtMap) > 0 {
return json.Marshal(requestExtMap)
}
return nil, nil
}
func parseExtToMap(ext json.RawMessage) map[string]interface{} {
var root map[string]interface{}
if err := json.Unmarshal(ext, &root); err != nil {
return make(map[string]interface{})
}
return root
}
func extractKeywordsMap(ext map[string]interface{}) map[string]interface{} {
if keywords, exists := maputil.ReadEmbeddedMap(ext, "keywords"); exists {
return keywords
}
return make(map[string]interface{})
}
func extractBidderKeywordsMap(ext map[string]interface{}) map[string]interface{} {
if bidder, exists := maputil.ReadEmbeddedMap(ext, "bidder"); exists {
return extractKeywordsMap(bidder)
}
return make(map[string]interface{})
}
func | (extKeywords map[string]interface{}) Keywords {
keywords := make(Keywords)
for k, v := range extKeywords {
// keywords may only be provided in the site and user sections
if k != "site" && k != "user" {
continue
}
// the site or user sections must be an object
if section, ok := v.(map[string]interface{}); ok {
keywords[k] = parseKeywordsFromSection(section)
}
}
return keywords
}
func parseKeywordsFromSection(section map[string]interface{}) KeywordsPublisher {
keywordsPublishers := make(KeywordsPublisher)
for publisherKey, publisherValue := range section {
// publisher value must be a slice
publisherValueSlice, ok := publisherValue.([]interface{})
if !ok {
continue
}
for _, publisherValueItem := range publisherValueSlice {
// item must be an object
publisherItem, ok := publisherValueItem.(map[string]interface{})
if !ok {
continue
}
// publisher item must have a name
publisherName, ok := maputil.ReadEmbeddedString(publisherItem, "name")
if !ok {
continue
}
var segments []KeywordSegment
// extract valid segments
if segmentsSlice, exists := maputil.ReadEmbeddedSlice(publisherItem, "segments"); exists {
for _, segment := range segmentsSlice {
if segmentMap, ok := segment.(map[string]interface{}); ok {
name, hasName := maputil.ReadEmbeddedString(segmentMap, "name")
value, hasValue := maputil.ReadEmbeddedString(segmentMap, "value")
if hasName && hasValue {
segments = append(segments, KeywordSegment{Name: name, Value: value})
}
}
}
}
// ensure consistent ordering for publisher item map
publisherItemKeys := make([]string, 0, len(publisherItem))
for v := range publisherItem {
publisherItemKeys = append(publisherItemKeys, v)
}
sort.Strings(publisherItemKeys)
// compose compatible alternate segment format
for _, potentialSegmentName := range publisherItemKeys {
potentialSegmentValues := publisherItem[potentialSegmentName]
// values must be an array
if valuesSlice, ok := potentialSegmentValues.([]interface{}); ok {
for _, value := range valuesSlice {
if valueAsString, ok := value.(string); ok {
segments = append(segments, KeywordSegment{Name: potentialSegmentName, Value: valueAsString})
}
}
}
}
if len(segments) > 0 {
keywordsPublishers[publisherKey] = append(keywordsPublishers[publisherKey], KeywordsPublisherItem{Name: publisherName, Segments: segments})
}
}
}
return keywordsPublishers
}
func parseKeywordsFromOpenRTB(keywords, section string) Keywords {
keywordsSplit := strings.Split(keywords, ",")
segments := make([]KeywordSegment, 0, len(keywordsSplit))
for _, v := range keywordsSplit {
if v != "" {
segments = append(segments, KeywordSegment{Name: "keywords", Value: v})
}
}
if len(segments) > 0 {
return map[string]KeywordsPublisher{section: map[string][]KeywordsPublisherItem{"ortb2": {{Name: "keywords", Segments: segments}}}}
}
return make(Keywords)
}
func mergeKeywords(a, b Keywords) {
for key, values := range b {
if _, sectionExists := a[key]; !sectionExists {
a[key] = KeywordsPublisher{}
}
for publisherKey, publisherValues := range values {
a[key][publisherKey] = append(publisherValues, a[key][publisherKey]...)
}
}
}
func setImpExtKeywords(request *openrtb2.BidRequest) error {
userKeywords := ""
if request.User != nil {
userKeywords = request.User.Keywords
}
siteKeywords := ""
if request.Site != nil {
siteKeywords = request.Site.Keywords
}
var err error
request.Ext, err = buildConsolidatedKeywordsReqExt(userKeywords, siteKeywords, request.Imp[0].Ext, request.Ext)
return err
}
func processImp(imp *openrtb2.Imp) error {
// get the grid extension
var ext adapters.ExtImpBidder
var gridExt openrtb_ext.ExtImpGrid
if err := json.Unmarshal(imp.Ext, &ext); err != nil {
return err
}
if err := json.Unmarshal(ext.Bidder, &gridExt); err != nil {
return err
}
if gridExt.Uid == 0 {
err := &errortypes.BadInput{
Message: "uid is empty",
}
return err
}
// no error
return nil
}
func setImpExtData(imp openrtb2.Imp) openrtb2.Imp {
var ext ExtImp
if err := json.Unmarshal(imp.Ext, &ext); err != nil {
return imp
}
if ext.Data != nil && ext.Data.AdServer != nil && ext.Data.AdServer.AdSlot != "" {
ext.Gpid = ext.Data.AdServer.AdSlot
extJSON, err := json.Marshal(ext)
if err == nil {
imp.Ext = extJSON
}
}
return imp
}
func fixNative(req json.RawMessage) (json.RawMessage, error) {
var gridReq map[string]interface{}
var parsedRequest map[string]interface{}
if err := json.Unmarshal(req, &gridReq); err != nil {
return req, nil
}
if imps, exists := maputil.ReadEmbeddedSlice(gridReq, "imp"); exists {
for _, imp := range imps {
if gridImp, ok := imp.(map[string]interface{}); ok {
native, hasNative := maputil.ReadEmbeddedMap(gridImp, "native")
if hasNative {
request, hasRequest := maputil.ReadEmbeddedString(native, "request")
if hasRequest {
delete(native, "request")
if err := json.Unmarshal([]byte(request), &parsedRequest); err == nil {
native["request_native"] = parsedRequest
} else {
native["request_native"] = request
}
}
}
}
}
}
return json.Marshal(gridReq)
}
// MakeRequests makes the HTTP requests which should be made to fetch bids.
func (a *GridAdapter) MakeRequests(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) {
var errors = make([]error, 0)
// this will contain all the valid impressions
var validImps []openrtb2.Imp
// pre-process the imps
for _, imp := range request.Imp {
if err := processImp(&imp); err == nil {
validImps = append(validImps, setImpExtData(imp))
} else {
errors = append(errors, err)
}
}
if len(validImps) == 0 {
err := &errortypes.BadInput{
Message: "No valid impressions for grid",
}
errors = append(errors, err)
return nil, errors
}
if err := setImpExtKeywords(request); err != nil {
errors = append(errors, err)
return nil, errors
}
request.Imp = validImps
reqJSON, err := json.Marshal(request)
if err != nil {
errors = append(errors, err)
return nil, errors
}
fixedReqJSON, err := fixNative(reqJSON)
if err != nil {
errors = append(errors, err)
return nil, errors
}
headers := http.Header{}
headers.Add("Content-Type", "application/json;charset=utf-8")
return []*adapters.RequestData{{
Method: "POST",
Uri: a.endpoint,
Body: fixedReqJSON,
Headers: headers,
}}, errors
}
// MakeBids unpacks the server's response into Bids.
func (a *GridAdapter) MakeBids(internalRequest *openrtb2.BidRequest, externalRequest *adapters.RequestData, response *adapters.ResponseData) (*adapters.BidderResponse, []error) {
if response.StatusCode == http.StatusNoContent {
return nil, nil
}
if response.StatusCode == http.StatusBadRequest {
return nil, []error{&errortypes.BadInput{
Message: fmt.Sprintf("Unexpected status code: %d. Run with request.debug = 1 for more info", response.StatusCode),
}}
}
if response.StatusCode != http.StatusOK {
return nil, []error{&errortypes.BadServerResponse{
Message: fmt.Sprintf("Unexpected status code: %d. Run with request.debug = 1 for more info", response.StatusCode),
}}
}
var bidResp GridResponse
if err := json.Unmarshal(response.Body, &bidResp); err != nil {
return nil, []error{err}
}
bidResponse := adapters.NewBidderResponseWithBidsCapacity(1)
for _, sb := range bidResp.SeatBid {
for i := range sb.Bid {
bidMeta, err := getBidMeta(sb.Bid[i].Ext)
bidType, err := getMediaTypeForImp(sb.Bid[i].ImpID, internalRequest.Imp, sb.Bid[i])
if sb.Bid[i].AdmNative != nil && sb.Bid[i].AdM == "" {
if bytes, err := json.Marshal(sb.Bid[i].AdmNative); err == nil {
sb.Bid[i].AdM = string(bytes)
}
}
if err != nil {
return nil, []error{err}
}
openrtb2Bid := sb.Bid[i].Bid
bidResponse.Bids = append(bidResponse.Bids, &adapters.TypedBid{
Bid: openrtb2Bid,
BidType: bidType,
BidMeta: bidMeta,
})
}
}
return bidResponse, nil
}
// Builder builds a new instance of the Grid adapter for the given bidder with the given config.
func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {
bidder := &GridAdapter{
endpoint: config.Endpoint,
}
return bidder, nil
}
func getBidMeta(ext json.RawMessage) (*openrtb_ext.ExtBidPrebidMeta, error) {
var bidExt GridBidExt
if err := json.Unmarshal(ext, &bidExt); err != nil {
return nil, err
}
var bidMeta *openrtb_ext.ExtBidPrebidMeta
if bidExt.Bidder.Grid.DemandSource != "" {
bidMeta = &openrtb_ext.ExtBidPrebidMeta{
NetworkName: bidExt.Bidder.Grid.DemandSource,
}
}
return bidMeta, nil
}
func getMediaTypeForImp(impID string, imps []openrtb2.Imp, bidWithType GridBid) (openrtb_ext.BidType, error) {
if bidWithType.ContentType != "" {
return bidWithType.ContentType, nil
} else {
for _, imp := range imps {
if imp.ID == impID {
if imp.Banner != nil {
return openrtb_ext.BidTypeBanner, nil
}
if imp.Video != nil {
return openrtb_ext.BidTypeVideo, nil
}
if imp.Native != nil {
return openrtb_ext.BidTypeNative, nil
}
return "", &errortypes.BadServerResponse{
Message: fmt.Sprintf("Unknown impression type for ID: \"%s\"", impID),
}
}
}
}
// This shouldnt happen. Lets handle it just incase by returning an error.
return "", &errortypes.BadServerResponse{
Message: fmt.Sprintf("Failed to find impression for ID: \"%s\"", impID),
}
}
| parseKeywordsFromMap | identifier_name |
grid.go | package grid
import (
"encoding/json"
"fmt"
"net/http"
"sort"
"strings"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/adapters"
"github.com/prebid/prebid-server/config"
"github.com/prebid/prebid-server/errortypes"
"github.com/prebid/prebid-server/openrtb_ext"
"github.com/prebid/prebid-server/util/maputil"
)
type GridAdapter struct {
endpoint string
}
type GridBid struct {
*openrtb2.Bid
AdmNative json.RawMessage `json:"adm_native,omitempty"`
ContentType openrtb_ext.BidType `json:"content_type"`
}
type GridSeatBid struct {
*openrtb2.SeatBid
Bid []GridBid `json:"bid"`
}
type GridResponse struct {
*openrtb2.BidResponse
SeatBid []GridSeatBid `json:"seatbid,omitempty"`
}
type GridBidExt struct {
Bidder ExtBidder `json:"bidder"`
}
type ExtBidder struct {
Grid ExtBidderGrid `json:"grid"`
}
type ExtBidderGrid struct {
DemandSource string `json:"demandSource"`
}
type ExtImpDataAdServer struct {
Name string `json:"name"`
AdSlot string `json:"adslot"`
}
type ExtImpData struct {
PbAdslot string `json:"pbadslot,omitempty"`
AdServer *ExtImpDataAdServer `json:"adserver,omitempty"`
}
type ExtImp struct {
Prebid *openrtb_ext.ExtImpPrebid `json:"prebid,omitempty"`
Bidder json.RawMessage `json:"bidder"`
Data *ExtImpData `json:"data,omitempty"`
Gpid string `json:"gpid,omitempty"`
Skadn json.RawMessage `json:"skadn,omitempty"`
Context json.RawMessage `json:"context,omitempty"`
}
type KeywordSegment struct {
Name string `json:"name"`
Value string `json:"value"`
}
type KeywordsPublisherItem struct {
Name string `json:"name"`
Segments []KeywordSegment `json:"segments"`
}
type KeywordsPublisher map[string][]KeywordsPublisherItem
type Keywords map[string]KeywordsPublisher
// buildConsolidatedKeywordsReqExt builds a new request.ext json incorporating request.site.keywords, request.user.keywords,
// and request.imp[0].ext.keywords, and request.ext.keywords. Invalid keywords in request.imp[0].ext.keywords are not incorporated.
// Invalid keywords in request.ext.keywords.site and request.ext.keywords.user are dropped.
func buildConsolidatedKeywordsReqExt(openRTBUser, openRTBSite string, firstImpExt, requestExt json.RawMessage) (json.RawMessage, error) {
// unmarshal ext to object map
requestExtMap := parseExtToMap(requestExt)
firstImpExtMap := parseExtToMap(firstImpExt)
// extract `keywords` field
requestExtKeywordsMap := extractKeywordsMap(requestExtMap)
firstImpExtKeywordsMap := extractBidderKeywordsMap(firstImpExtMap)
// parse + merge keywords
keywords := parseKeywordsFromMap(requestExtKeywordsMap) // request.ext.keywords
mergeKeywords(keywords, parseKeywordsFromMap(firstImpExtKeywordsMap)) // request.imp[0].ext.bidder.keywords
mergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBUser, "user")) // request.user.keywords
mergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBSite, "site")) // request.site.keywords
// overlay site + user keywords
if site, exists := keywords["site"]; exists && len(site) > 0 {
requestExtKeywordsMap["site"] = site
} else {
delete(requestExtKeywordsMap, "site")
}
if user, exists := keywords["user"]; exists && len(user) > 0 {
requestExtKeywordsMap["user"] = user
} else {
delete(requestExtKeywordsMap, "user")
}
// reconcile keywords with request.ext
if len(requestExtKeywordsMap) > 0 {
requestExtMap["keywords"] = requestExtKeywordsMap
} else {
delete(requestExtMap, "keywords")
}
// marshal final result
if len(requestExtMap) > 0 {
return json.Marshal(requestExtMap)
}
return nil, nil
}
func parseExtToMap(ext json.RawMessage) map[string]interface{} {
var root map[string]interface{}
if err := json.Unmarshal(ext, &root); err != nil {
return make(map[string]interface{})
}
return root
}
func extractKeywordsMap(ext map[string]interface{}) map[string]interface{} {
if keywords, exists := maputil.ReadEmbeddedMap(ext, "keywords"); exists {
return keywords
}
return make(map[string]interface{})
}
func extractBidderKeywordsMap(ext map[string]interface{}) map[string]interface{} {
if bidder, exists := maputil.ReadEmbeddedMap(ext, "bidder"); exists {
return extractKeywordsMap(bidder)
}
return make(map[string]interface{})
}
func parseKeywordsFromMap(extKeywords map[string]interface{}) Keywords {
keywords := make(Keywords)
for k, v := range extKeywords {
// keywords may only be provided in the site and user sections
if k != "site" && k != "user" {
continue
}
// the site or user sections must be an object
if section, ok := v.(map[string]interface{}); ok {
keywords[k] = parseKeywordsFromSection(section)
}
}
return keywords
}
func parseKeywordsFromSection(section map[string]interface{}) KeywordsPublisher {
keywordsPublishers := make(KeywordsPublisher)
for publisherKey, publisherValue := range section {
// publisher value must be a slice
publisherValueSlice, ok := publisherValue.([]interface{})
if !ok {
continue
}
for _, publisherValueItem := range publisherValueSlice {
// item must be an object
publisherItem, ok := publisherValueItem.(map[string]interface{})
if !ok {
continue
}
// publisher item must have a name
publisherName, ok := maputil.ReadEmbeddedString(publisherItem, "name")
if !ok {
continue
}
var segments []KeywordSegment
// extract valid segments
if segmentsSlice, exists := maputil.ReadEmbeddedSlice(publisherItem, "segments"); exists {
for _, segment := range segmentsSlice {
if segmentMap, ok := segment.(map[string]interface{}); ok {
name, hasName := maputil.ReadEmbeddedString(segmentMap, "name")
value, hasValue := maputil.ReadEmbeddedString(segmentMap, "value")
if hasName && hasValue {
segments = append(segments, KeywordSegment{Name: name, Value: value})
}
}
}
}
// ensure consistent ordering for publisher item map
publisherItemKeys := make([]string, 0, len(publisherItem))
for v := range publisherItem |
sort.Strings(publisherItemKeys)
// compose compatible alternate segment format
for _, potentialSegmentName := range publisherItemKeys {
potentialSegmentValues := publisherItem[potentialSegmentName]
// values must be an array
if valuesSlice, ok := potentialSegmentValues.([]interface{}); ok {
for _, value := range valuesSlice {
if valueAsString, ok := value.(string); ok {
segments = append(segments, KeywordSegment{Name: potentialSegmentName, Value: valueAsString})
}
}
}
}
if len(segments) > 0 {
keywordsPublishers[publisherKey] = append(keywordsPublishers[publisherKey], KeywordsPublisherItem{Name: publisherName, Segments: segments})
}
}
}
return keywordsPublishers
}
func parseKeywordsFromOpenRTB(keywords, section string) Keywords {
keywordsSplit := strings.Split(keywords, ",")
segments := make([]KeywordSegment, 0, len(keywordsSplit))
for _, v := range keywordsSplit {
if v != "" {
segments = append(segments, KeywordSegment{Name: "keywords", Value: v})
}
}
if len(segments) > 0 {
return map[string]KeywordsPublisher{section: map[string][]KeywordsPublisherItem{"ortb2": {{Name: "keywords", Segments: segments}}}}
}
return make(Keywords)
}
func mergeKeywords(a, b Keywords) {
for key, values := range b {
if _, sectionExists := a[key]; !sectionExists {
a[key] = KeywordsPublisher{}
}
for publisherKey, publisherValues := range values {
a[key][publisherKey] = append(publisherValues, a[key][publisherKey]...)
}
}
}
func setImpExtKeywords(request *openrtb2.BidRequest) error {
userKeywords := ""
if request.User != nil {
userKeywords = request.User.Keywords
}
siteKeywords := ""
if request.Site != nil {
siteKeywords = request.Site.Keywords
}
var err error
request.Ext, err = buildConsolidatedKeywordsReqExt(userKeywords, siteKeywords, request.Imp[0].Ext, request.Ext)
return err
}
func processImp(imp *openrtb2.Imp) error {
// get the grid extension
var ext adapters.ExtImpBidder
var gridExt openrtb_ext.ExtImpGrid
if err := json.Unmarshal(imp.Ext, &ext); err != nil {
return err
}
if err := json.Unmarshal(ext.Bidder, &gridExt); err != nil {
return err
}
if gridExt.Uid == 0 {
err := &errortypes.BadInput{
Message: "uid is empty",
}
return err
}
// no error
return nil
}
func setImpExtData(imp openrtb2.Imp) openrtb2.Imp {
var ext ExtImp
if err := json.Unmarshal(imp.Ext, &ext); err != nil {
return imp
}
if ext.Data != nil && ext.Data.AdServer != nil && ext.Data.AdServer.AdSlot != "" {
ext.Gpid = ext.Data.AdServer.AdSlot
extJSON, err := json.Marshal(ext)
if err == nil {
imp.Ext = extJSON
}
}
return imp
}
func fixNative(req json.RawMessage) (json.RawMessage, error) {
var gridReq map[string]interface{}
var parsedRequest map[string]interface{}
if err := json.Unmarshal(req, &gridReq); err != nil {
return req, nil
}
if imps, exists := maputil.ReadEmbeddedSlice(gridReq, "imp"); exists {
for _, imp := range imps {
if gridImp, ok := imp.(map[string]interface{}); ok {
native, hasNative := maputil.ReadEmbeddedMap(gridImp, "native")
if hasNative {
request, hasRequest := maputil.ReadEmbeddedString(native, "request")
if hasRequest {
delete(native, "request")
if err := json.Unmarshal([]byte(request), &parsedRequest); err == nil {
native["request_native"] = parsedRequest
} else {
native["request_native"] = request
}
}
}
}
}
}
return json.Marshal(gridReq)
}
// MakeRequests makes the HTTP requests which should be made to fetch bids.
func (a *GridAdapter) MakeRequests(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) {
var errors = make([]error, 0)
// this will contain all the valid impressions
var validImps []openrtb2.Imp
// pre-process the imps
for _, imp := range request.Imp {
if err := processImp(&imp); err == nil {
validImps = append(validImps, setImpExtData(imp))
} else {
errors = append(errors, err)
}
}
if len(validImps) == 0 {
err := &errortypes.BadInput{
Message: "No valid impressions for grid",
}
errors = append(errors, err)
return nil, errors
}
if err := setImpExtKeywords(request); err != nil {
errors = append(errors, err)
return nil, errors
}
request.Imp = validImps
reqJSON, err := json.Marshal(request)
if err != nil {
errors = append(errors, err)
return nil, errors
}
fixedReqJSON, err := fixNative(reqJSON)
if err != nil {
errors = append(errors, err)
return nil, errors
}
headers := http.Header{}
headers.Add("Content-Type", "application/json;charset=utf-8")
return []*adapters.RequestData{{
Method: "POST",
Uri: a.endpoint,
Body: fixedReqJSON,
Headers: headers,
}}, errors
}
// MakeBids unpacks the server's response into Bids.
func (a *GridAdapter) MakeBids(internalRequest *openrtb2.BidRequest, externalRequest *adapters.RequestData, response *adapters.ResponseData) (*adapters.BidderResponse, []error) {
if response.StatusCode == http.StatusNoContent {
return nil, nil
}
if response.StatusCode == http.StatusBadRequest {
return nil, []error{&errortypes.BadInput{
Message: fmt.Sprintf("Unexpected status code: %d. Run with request.debug = 1 for more info", response.StatusCode),
}}
}
if response.StatusCode != http.StatusOK {
return nil, []error{&errortypes.BadServerResponse{
Message: fmt.Sprintf("Unexpected status code: %d. Run with request.debug = 1 for more info", response.StatusCode),
}}
}
var bidResp GridResponse
if err := json.Unmarshal(response.Body, &bidResp); err != nil {
return nil, []error{err}
}
bidResponse := adapters.NewBidderResponseWithBidsCapacity(1)
for _, sb := range bidResp.SeatBid {
for i := range sb.Bid {
bidMeta, err := getBidMeta(sb.Bid[i].Ext)
bidType, err := getMediaTypeForImp(sb.Bid[i].ImpID, internalRequest.Imp, sb.Bid[i])
if sb.Bid[i].AdmNative != nil && sb.Bid[i].AdM == "" {
if bytes, err := json.Marshal(sb.Bid[i].AdmNative); err == nil {
sb.Bid[i].AdM = string(bytes)
}
}
if err != nil {
return nil, []error{err}
}
openrtb2Bid := sb.Bid[i].Bid
bidResponse.Bids = append(bidResponse.Bids, &adapters.TypedBid{
Bid: openrtb2Bid,
BidType: bidType,
BidMeta: bidMeta,
})
}
}
return bidResponse, nil
}
// Builder builds a new instance of the Grid adapter for the given bidder with the given config.
func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {
bidder := &GridAdapter{
endpoint: config.Endpoint,
}
return bidder, nil
}
func getBidMeta(ext json.RawMessage) (*openrtb_ext.ExtBidPrebidMeta, error) {
var bidExt GridBidExt
if err := json.Unmarshal(ext, &bidExt); err != nil {
return nil, err
}
var bidMeta *openrtb_ext.ExtBidPrebidMeta
if bidExt.Bidder.Grid.DemandSource != "" {
bidMeta = &openrtb_ext.ExtBidPrebidMeta{
NetworkName: bidExt.Bidder.Grid.DemandSource,
}
}
return bidMeta, nil
}
func getMediaTypeForImp(impID string, imps []openrtb2.Imp, bidWithType GridBid) (openrtb_ext.BidType, error) {
if bidWithType.ContentType != "" {
return bidWithType.ContentType, nil
} else {
for _, imp := range imps {
if imp.ID == impID {
if imp.Banner != nil {
return openrtb_ext.BidTypeBanner, nil
}
if imp.Video != nil {
return openrtb_ext.BidTypeVideo, nil
}
if imp.Native != nil {
return openrtb_ext.BidTypeNative, nil
}
return "", &errortypes.BadServerResponse{
Message: fmt.Sprintf("Unknown impression type for ID: \"%s\"", impID),
}
}
}
}
// This shouldnt happen. Lets handle it just incase by returning an error.
return "", &errortypes.BadServerResponse{
Message: fmt.Sprintf("Failed to find impression for ID: \"%s\"", impID),
}
}
| {
publisherItemKeys = append(publisherItemKeys, v)
} | conditional_block |
grid.go | package grid
import (
"encoding/json"
"fmt"
"net/http"
"sort"
"strings"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/adapters"
"github.com/prebid/prebid-server/config"
"github.com/prebid/prebid-server/errortypes"
"github.com/prebid/prebid-server/openrtb_ext"
"github.com/prebid/prebid-server/util/maputil"
)
type GridAdapter struct {
endpoint string
}
type GridBid struct {
*openrtb2.Bid
AdmNative json.RawMessage `json:"adm_native,omitempty"`
ContentType openrtb_ext.BidType `json:"content_type"`
}
type GridSeatBid struct {
*openrtb2.SeatBid
Bid []GridBid `json:"bid"`
}
type GridResponse struct {
*openrtb2.BidResponse
SeatBid []GridSeatBid `json:"seatbid,omitempty"`
}
type GridBidExt struct {
Bidder ExtBidder `json:"bidder"`
}
type ExtBidder struct {
Grid ExtBidderGrid `json:"grid"`
}
type ExtBidderGrid struct {
DemandSource string `json:"demandSource"`
}
type ExtImpDataAdServer struct {
Name string `json:"name"`
AdSlot string `json:"adslot"`
}
type ExtImpData struct {
PbAdslot string `json:"pbadslot,omitempty"`
AdServer *ExtImpDataAdServer `json:"adserver,omitempty"`
}
type ExtImp struct {
Prebid *openrtb_ext.ExtImpPrebid `json:"prebid,omitempty"`
Bidder json.RawMessage `json:"bidder"`
Data *ExtImpData `json:"data,omitempty"`
Gpid string `json:"gpid,omitempty"`
Skadn json.RawMessage `json:"skadn,omitempty"`
Context json.RawMessage `json:"context,omitempty"`
}
type KeywordSegment struct {
Name string `json:"name"`
Value string `json:"value"`
}
type KeywordsPublisherItem struct {
Name string `json:"name"`
Segments []KeywordSegment `json:"segments"`
}
type KeywordsPublisher map[string][]KeywordsPublisherItem
type Keywords map[string]KeywordsPublisher
// buildConsolidatedKeywordsReqExt builds a new request.ext json incorporating request.site.keywords, request.user.keywords,
// and request.imp[0].ext.keywords, and request.ext.keywords. Invalid keywords in request.imp[0].ext.keywords are not incorporated.
// Invalid keywords in request.ext.keywords.site and request.ext.keywords.user are dropped.
func buildConsolidatedKeywordsReqExt(openRTBUser, openRTBSite string, firstImpExt, requestExt json.RawMessage) (json.RawMessage, error) {
// unmarshal ext to object map
requestExtMap := parseExtToMap(requestExt)
firstImpExtMap := parseExtToMap(firstImpExt)
// extract `keywords` field
requestExtKeywordsMap := extractKeywordsMap(requestExtMap)
firstImpExtKeywordsMap := extractBidderKeywordsMap(firstImpExtMap)
// parse + merge keywords
keywords := parseKeywordsFromMap(requestExtKeywordsMap) // request.ext.keywords
mergeKeywords(keywords, parseKeywordsFromMap(firstImpExtKeywordsMap)) // request.imp[0].ext.bidder.keywords
mergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBUser, "user")) // request.user.keywords
mergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBSite, "site")) // request.site.keywords
// overlay site + user keywords
if site, exists := keywords["site"]; exists && len(site) > 0 {
requestExtKeywordsMap["site"] = site
} else {
delete(requestExtKeywordsMap, "site")
}
if user, exists := keywords["user"]; exists && len(user) > 0 {
requestExtKeywordsMap["user"] = user
} else {
delete(requestExtKeywordsMap, "user")
}
// reconcile keywords with request.ext
if len(requestExtKeywordsMap) > 0 {
requestExtMap["keywords"] = requestExtKeywordsMap
} else {
delete(requestExtMap, "keywords")
}
// marshal final result
if len(requestExtMap) > 0 {
return json.Marshal(requestExtMap)
}
return nil, nil
}
func parseExtToMap(ext json.RawMessage) map[string]interface{} {
var root map[string]interface{}
if err := json.Unmarshal(ext, &root); err != nil {
return make(map[string]interface{})
}
return root
}
func extractKeywordsMap(ext map[string]interface{}) map[string]interface{} {
if keywords, exists := maputil.ReadEmbeddedMap(ext, "keywords"); exists {
return keywords
}
return make(map[string]interface{})
}
func extractBidderKeywordsMap(ext map[string]interface{}) map[string]interface{} {
if bidder, exists := maputil.ReadEmbeddedMap(ext, "bidder"); exists {
return extractKeywordsMap(bidder)
}
return make(map[string]interface{})
}
func parseKeywordsFromMap(extKeywords map[string]interface{}) Keywords {
keywords := make(Keywords)
for k, v := range extKeywords {
// keywords may only be provided in the site and user sections
if k != "site" && k != "user" {
continue
}
// the site or user sections must be an object
if section, ok := v.(map[string]interface{}); ok {
keywords[k] = parseKeywordsFromSection(section)
}
}
return keywords
}
func parseKeywordsFromSection(section map[string]interface{}) KeywordsPublisher |
func parseKeywordsFromOpenRTB(keywords, section string) Keywords {
keywordsSplit := strings.Split(keywords, ",")
segments := make([]KeywordSegment, 0, len(keywordsSplit))
for _, v := range keywordsSplit {
if v != "" {
segments = append(segments, KeywordSegment{Name: "keywords", Value: v})
}
}
if len(segments) > 0 {
return map[string]KeywordsPublisher{section: map[string][]KeywordsPublisherItem{"ortb2": {{Name: "keywords", Segments: segments}}}}
}
return make(Keywords)
}
func mergeKeywords(a, b Keywords) {
for key, values := range b {
if _, sectionExists := a[key]; !sectionExists {
a[key] = KeywordsPublisher{}
}
for publisherKey, publisherValues := range values {
a[key][publisherKey] = append(publisherValues, a[key][publisherKey]...)
}
}
}
func setImpExtKeywords(request *openrtb2.BidRequest) error {
userKeywords := ""
if request.User != nil {
userKeywords = request.User.Keywords
}
siteKeywords := ""
if request.Site != nil {
siteKeywords = request.Site.Keywords
}
var err error
request.Ext, err = buildConsolidatedKeywordsReqExt(userKeywords, siteKeywords, request.Imp[0].Ext, request.Ext)
return err
}
func processImp(imp *openrtb2.Imp) error {
// get the grid extension
var ext adapters.ExtImpBidder
var gridExt openrtb_ext.ExtImpGrid
if err := json.Unmarshal(imp.Ext, &ext); err != nil {
return err
}
if err := json.Unmarshal(ext.Bidder, &gridExt); err != nil {
return err
}
if gridExt.Uid == 0 {
err := &errortypes.BadInput{
Message: "uid is empty",
}
return err
}
// no error
return nil
}
func setImpExtData(imp openrtb2.Imp) openrtb2.Imp {
var ext ExtImp
if err := json.Unmarshal(imp.Ext, &ext); err != nil {
return imp
}
if ext.Data != nil && ext.Data.AdServer != nil && ext.Data.AdServer.AdSlot != "" {
ext.Gpid = ext.Data.AdServer.AdSlot
extJSON, err := json.Marshal(ext)
if err == nil {
imp.Ext = extJSON
}
}
return imp
}
func fixNative(req json.RawMessage) (json.RawMessage, error) {
var gridReq map[string]interface{}
var parsedRequest map[string]interface{}
if err := json.Unmarshal(req, &gridReq); err != nil {
return req, nil
}
if imps, exists := maputil.ReadEmbeddedSlice(gridReq, "imp"); exists {
for _, imp := range imps {
if gridImp, ok := imp.(map[string]interface{}); ok {
native, hasNative := maputil.ReadEmbeddedMap(gridImp, "native")
if hasNative {
request, hasRequest := maputil.ReadEmbeddedString(native, "request")
if hasRequest {
delete(native, "request")
if err := json.Unmarshal([]byte(request), &parsedRequest); err == nil {
native["request_native"] = parsedRequest
} else {
native["request_native"] = request
}
}
}
}
}
}
return json.Marshal(gridReq)
}
// MakeRequests makes the HTTP requests which should be made to fetch bids.
func (a *GridAdapter) MakeRequests(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) {
var errors = make([]error, 0)
// this will contain all the valid impressions
var validImps []openrtb2.Imp
// pre-process the imps
for _, imp := range request.Imp {
if err := processImp(&imp); err == nil {
validImps = append(validImps, setImpExtData(imp))
} else {
errors = append(errors, err)
}
}
if len(validImps) == 0 {
err := &errortypes.BadInput{
Message: "No valid impressions for grid",
}
errors = append(errors, err)
return nil, errors
}
if err := setImpExtKeywords(request); err != nil {
errors = append(errors, err)
return nil, errors
}
request.Imp = validImps
reqJSON, err := json.Marshal(request)
if err != nil {
errors = append(errors, err)
return nil, errors
}
fixedReqJSON, err := fixNative(reqJSON)
if err != nil {
errors = append(errors, err)
return nil, errors
}
headers := http.Header{}
headers.Add("Content-Type", "application/json;charset=utf-8")
return []*adapters.RequestData{{
Method: "POST",
Uri: a.endpoint,
Body: fixedReqJSON,
Headers: headers,
}}, errors
}
// MakeBids unpacks the server's response into Bids.
func (a *GridAdapter) MakeBids(internalRequest *openrtb2.BidRequest, externalRequest *adapters.RequestData, response *adapters.ResponseData) (*adapters.BidderResponse, []error) {
if response.StatusCode == http.StatusNoContent {
return nil, nil
}
if response.StatusCode == http.StatusBadRequest {
return nil, []error{&errortypes.BadInput{
Message: fmt.Sprintf("Unexpected status code: %d. Run with request.debug = 1 for more info", response.StatusCode),
}}
}
if response.StatusCode != http.StatusOK {
return nil, []error{&errortypes.BadServerResponse{
Message: fmt.Sprintf("Unexpected status code: %d. Run with request.debug = 1 for more info", response.StatusCode),
}}
}
var bidResp GridResponse
if err := json.Unmarshal(response.Body, &bidResp); err != nil {
return nil, []error{err}
}
bidResponse := adapters.NewBidderResponseWithBidsCapacity(1)
for _, sb := range bidResp.SeatBid {
for i := range sb.Bid {
bidMeta, err := getBidMeta(sb.Bid[i].Ext)
bidType, err := getMediaTypeForImp(sb.Bid[i].ImpID, internalRequest.Imp, sb.Bid[i])
if sb.Bid[i].AdmNative != nil && sb.Bid[i].AdM == "" {
if bytes, err := json.Marshal(sb.Bid[i].AdmNative); err == nil {
sb.Bid[i].AdM = string(bytes)
}
}
if err != nil {
return nil, []error{err}
}
openrtb2Bid := sb.Bid[i].Bid
bidResponse.Bids = append(bidResponse.Bids, &adapters.TypedBid{
Bid: openrtb2Bid,
BidType: bidType,
BidMeta: bidMeta,
})
}
}
return bidResponse, nil
}
// Builder builds a new instance of the Grid adapter for the given bidder with the given config.
func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {
bidder := &GridAdapter{
endpoint: config.Endpoint,
}
return bidder, nil
}
func getBidMeta(ext json.RawMessage) (*openrtb_ext.ExtBidPrebidMeta, error) {
var bidExt GridBidExt
if err := json.Unmarshal(ext, &bidExt); err != nil {
return nil, err
}
var bidMeta *openrtb_ext.ExtBidPrebidMeta
if bidExt.Bidder.Grid.DemandSource != "" {
bidMeta = &openrtb_ext.ExtBidPrebidMeta{
NetworkName: bidExt.Bidder.Grid.DemandSource,
}
}
return bidMeta, nil
}
func getMediaTypeForImp(impID string, imps []openrtb2.Imp, bidWithType GridBid) (openrtb_ext.BidType, error) {
if bidWithType.ContentType != "" {
return bidWithType.ContentType, nil
} else {
for _, imp := range imps {
if imp.ID == impID {
if imp.Banner != nil {
return openrtb_ext.BidTypeBanner, nil
}
if imp.Video != nil {
return openrtb_ext.BidTypeVideo, nil
}
if imp.Native != nil {
return openrtb_ext.BidTypeNative, nil
}
return "", &errortypes.BadServerResponse{
Message: fmt.Sprintf("Unknown impression type for ID: \"%s\"", impID),
}
}
}
}
// This shouldnt happen. Lets handle it just incase by returning an error.
return "", &errortypes.BadServerResponse{
Message: fmt.Sprintf("Failed to find impression for ID: \"%s\"", impID),
}
}
| {
keywordsPublishers := make(KeywordsPublisher)
for publisherKey, publisherValue := range section {
// publisher value must be a slice
publisherValueSlice, ok := publisherValue.([]interface{})
if !ok {
continue
}
for _, publisherValueItem := range publisherValueSlice {
// item must be an object
publisherItem, ok := publisherValueItem.(map[string]interface{})
if !ok {
continue
}
// publisher item must have a name
publisherName, ok := maputil.ReadEmbeddedString(publisherItem, "name")
if !ok {
continue
}
var segments []KeywordSegment
// extract valid segments
if segmentsSlice, exists := maputil.ReadEmbeddedSlice(publisherItem, "segments"); exists {
for _, segment := range segmentsSlice {
if segmentMap, ok := segment.(map[string]interface{}); ok {
name, hasName := maputil.ReadEmbeddedString(segmentMap, "name")
value, hasValue := maputil.ReadEmbeddedString(segmentMap, "value")
if hasName && hasValue {
segments = append(segments, KeywordSegment{Name: name, Value: value})
}
}
}
}
// ensure consistent ordering for publisher item map
publisherItemKeys := make([]string, 0, len(publisherItem))
for v := range publisherItem {
publisherItemKeys = append(publisherItemKeys, v)
}
sort.Strings(publisherItemKeys)
// compose compatible alternate segment format
for _, potentialSegmentName := range publisherItemKeys {
potentialSegmentValues := publisherItem[potentialSegmentName]
// values must be an array
if valuesSlice, ok := potentialSegmentValues.([]interface{}); ok {
for _, value := range valuesSlice {
if valueAsString, ok := value.(string); ok {
segments = append(segments, KeywordSegment{Name: potentialSegmentName, Value: valueAsString})
}
}
}
}
if len(segments) > 0 {
keywordsPublishers[publisherKey] = append(keywordsPublishers[publisherKey], KeywordsPublisherItem{Name: publisherName, Segments: segments})
}
}
}
return keywordsPublishers
} | identifier_body |
decode.rs | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
use crate::unescape::unescape;
use std::borrow::Cow;
use std::convert::TryFrom;
use thiserror::Error;
use xmlparser::{ElementEnd, Token, Tokenizer};
pub type Depth = usize;
// in general, these errors are just for reporting what happened, there isn't
// much value in lots of different match variants
#[derive(Debug, Error)]
pub enum XmlError {
#[error("XML Parse Error")]
InvalidXml(#[from] xmlparser::Error),
#[error("Invalid XML Escape: {esc}")]
InvalidEscape { esc: String },
#[error("Error parsing XML: {0}")]
Custom(Cow<'static, str>),
#[error("Encountered another error parsing XML: {0}")]
Unhandled(#[from] Box<dyn std::error::Error + Send + Sync + 'static>),
}
impl XmlError {
pub fn custom(msg: impl Into<Cow<'static, str>>) -> Self {
XmlError::Custom(msg.into())
}
}
#[derive(PartialEq, Debug)]
pub struct | <'a> {
pub prefix: &'a str,
pub local: &'a str,
}
impl Name<'_> {
/// Check if a given name matches a tag name composed of `prefix:local` or just `local`
pub fn matches(&self, tag_name: &str) -> bool {
let split = tag_name.find(':');
match split {
None => tag_name == self.local,
Some(idx) => {
let (prefix, local) = tag_name.split_at(idx);
let local = &local[1..];
self.local == local && self.prefix == prefix
}
}
}
}
#[derive(Debug, PartialEq)]
pub struct Attr<'a> {
name: Name<'a>,
// attribute values can be escaped (eg. with double quotes, so we need a Cow)
value: Cow<'a, str>,
}
#[derive(Debug, PartialEq)]
pub struct StartEl<'a> {
name: Name<'a>,
attributes: Vec<Attr<'a>>,
closed: bool,
depth: Depth,
}
/// Xml Start Element
///
/// ```xml
/// <a:b c="d">
/// ^^^ ^^^^^
/// name attributes
/// ```
impl<'a> StartEl<'a> {
pub fn depth(&self) -> Depth {
self.depth
}
fn new(local: &'a str, prefix: &'a str, depth: Depth) -> Self {
Self {
name: Name { prefix, local },
attributes: vec![],
closed: false,
depth,
}
}
/// Retrieve an attribute with a given key
///
/// key `prefix:local` combined as a str, joined by a `:`
pub fn attr<'b>(&'b self, key: &'b str) -> Option<&'b str> {
self.attributes
.iter()
.find(|attr| attr.name.matches(key))
.map(|attr| attr.value.as_ref())
}
/// Returns whether this `StartEl` matches a given name
/// in `prefix:local` form.
pub fn matches(&self, pat: &str) -> bool {
self.name.matches(pat)
}
/// Local component of this element's name
///
/// ```xml
/// <foo:bar>
/// ^^^
/// ```
pub fn local(&self) -> &str {
self.name.local
}
/// Prefix component of this elements name (or empty string)
/// ```xml
/// <foo:bar>
/// ^^^
/// ```
pub fn prefix(&self) -> &str {
self.name.prefix
}
/// Returns true of `el` at `depth` is a match for this `start_el`
fn end_el(&self, el: ElementEnd, depth: Depth) -> bool {
if depth != self.depth {
return false;
}
match el {
ElementEnd::Open => false,
ElementEnd::Close(prefix, local) => {
prefix.as_str() == self.name.prefix && local.as_str() == self.name.local
}
ElementEnd::Empty => false,
}
}
}
/// Xml Document abstraction
///
/// This document wraps a lazy tokenizer with depth tracking.
/// Constructing a document is essentially free.
pub struct Document<'a> {
tokenizer: Tokenizer<'a>,
depth: Depth,
}
impl<'a> TryFrom<&'a [u8]> for Document<'a> {
type Error = XmlError;
fn try_from(value: &'a [u8]) -> Result<Self, Self::Error> {
Ok(Document::new(
std::str::from_utf8(value).map_err(|err| XmlError::Unhandled(Box::new(err)))?,
))
}
}
impl<'inp> Document<'inp> {
pub fn new(doc: &'inp str) -> Self {
Document {
tokenizer: Tokenizer::from(doc),
depth: 0,
}
}
/// "Depth first" iterator
///
/// Unlike [`next_tag()`](ScopedDecoder::next_tag), this method returns the next
/// start element regardless of depth. This is useful to give a pointer into the middle
/// of a document to start reading.
///
/// ```xml
/// <Response> <-- first call returns this:
/// <A> <-- next call
/// <Nested /> <-- next call returns this
/// <MoreNested>hello</MoreNested> <-- then this:
/// </A>
/// <B/> <-- second call to next_tag returns this
/// </Response>
/// ```
pub fn next_start_element<'a>(&'a mut self) -> Option<StartEl<'inp>> {
next_start_element(self)
}
/// A scoped reader for the entire document
pub fn root_element<'a>(&'a mut self) -> Result<ScopedDecoder<'inp, 'a>, XmlError> {
let start_el = self
.next_start_element()
.ok_or_else(|| XmlError::custom("no root element"))?;
Ok(ScopedDecoder {
doc: self,
start_el,
terminated: false,
})
}
/// A scoped reader for a specific tag
///
/// This method is necessary for when you need to return a ScopedDecoder from a function
/// since normally the stacked-ownership that `next_tag()` uses would prevent returning a reference
/// to a field owned by the current function
pub fn scoped_to<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> {
ScopedDecoder {
doc: self,
start_el,
terminated: false,
}
}
}
/// Depth tracking iterator
///
/// ```xml
/// <a> <- startel depth 0
/// <b> <- startel depth 1
/// <c> <- startel depth 2
/// </c> <- endel depth 2
/// </b> <- endel depth 1
/// </a> <- endel depth 0
/// ```
impl<'inp> Iterator for Document<'inp> {
type Item = Result<(Token<'inp>, Depth), XmlError>;
fn next<'a>(&'a mut self) -> Option<Result<(Token<'inp>, Depth), XmlError>> {
let tok = self.tokenizer.next()?;
let tok = match tok {
Err(e) => return Some(Err(e.into())),
Ok(tok) => tok,
};
// depth bookkeeping
match tok {
Token::ElementEnd {
end: ElementEnd::Close(_, _),
..
} => {
self.depth -= 1;
}
Token::ElementEnd {
end: ElementEnd::Empty,
..
} => self.depth -= 1,
t @ Token::ElementStart { .. } => {
self.depth += 1;
// We want the startel and endel to have the same depth, but after the opener,
// the parser will be at depth 1. Return the previous depth:
return Some(Ok((t, self.depth - 1)));
}
_ => {}
}
Some(Ok((tok, self.depth)))
}
}
/// XmlTag Abstraction
///
/// ScopedDecoder represents a tag-scoped view into an XML document. Methods
/// on `ScopedDecoder` return `None` when the current tag has been exhausted.
pub struct ScopedDecoder<'inp, 'a> {
doc: &'a mut Document<'inp>,
start_el: StartEl<'inp>,
terminated: bool,
}
/// When a scoped decoder is dropped, its entire scope is consumed so that the
/// next read begins at the next tag at the same depth.
impl Drop for ScopedDecoder<'_, '_> {
fn drop(&mut self) {
for _ in self {}
}
}
impl<'inp> ScopedDecoder<'inp, '_> {
/// The start element for this scope
pub fn start_el<'a>(&'a self) -> &'a StartEl<'inp> {
&self.start_el
}
/// Returns the next top-level tag in this scope
/// The returned reader will fully read the tag during its lifetime. If it is dropped without
/// the data being read, the reader will be advanced until the matching close tag. If you read
/// an element with `next_tag()` and you want to ignore it, simply drop the resulting `ScopeDecoder`.
///
/// ```xml
/// <Response> <-- scoped reader on this tag
/// <A> <-- first call to next_tag returns this
/// <Nested /> <-- to get inner data, call `next_tag` on the returned decoder for `A`
/// <MoreNested>hello</MoreNested>
/// </A>
/// <B/> <-- second call to next_tag returns this
/// </Response>
/// ```
pub fn next_tag<'a>(&'a mut self) -> Option<ScopedDecoder<'inp, 'a>> {
let next_tag = next_start_element(self)?;
Some(self.nested_decoder(next_tag))
}
fn nested_decoder<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> {
ScopedDecoder {
doc: &mut self.doc,
start_el,
terminated: false,
}
}
}
impl<'inp, 'a> Iterator for ScopedDecoder<'inp, 'a> {
type Item = Result<(Token<'inp>, Depth), XmlError>;
fn next(&mut self) -> Option<Self::Item> {
if self.start_el.closed {
self.terminated = true;
}
if self.terminated {
return None;
}
let (tok, depth) = match self.doc.next() {
Some(Ok((tok, depth))) => (tok, depth),
other => return other,
};
match tok {
Token::ElementEnd { end, .. } if self.start_el.end_el(end, depth) => {
self.terminated = true;
return None;
}
_ => {}
}
Some(Ok((tok, depth)))
}
}
/// Load the next start element out of a depth-tagged token iterator
fn next_start_element<'a, 'inp>(
tokens: &'a mut impl Iterator<Item = Result<(Token<'inp>, Depth), XmlError>>,
) -> Option<StartEl<'inp>> {
let mut out = StartEl::new("", "", 0);
loop {
match tokens.next()? {
Ok((Token::ElementStart { local, prefix, .. }, depth)) => {
out.name.local = local.as_str();
out.name.prefix = prefix.as_str();
out.depth = depth;
}
Ok((
Token::Attribute {
prefix,
local,
value,
..
},
_,
)) => out.attributes.push(Attr {
name: Name {
local: local.as_str(),
prefix: prefix.as_str(),
},
value: unescape(value.as_str()).ok()?,
}),
Ok((
Token::ElementEnd {
end: ElementEnd::Open,
..
},
_,
)) => break,
Ok((
Token::ElementEnd {
end: ElementEnd::Empty,
..
},
_,
)) => {
out.closed = true;
break;
}
_ => {}
}
}
Some(out)
}
/// Returns the data element at the current position
///
/// If the current position is not a data element (and is instead a <startelement>) an error
/// will be returned
pub fn try_data<'a, 'inp>(
tokens: &'a mut impl Iterator<Item = Result<(Token<'inp>, Depth), XmlError>>,
) -> Result<Cow<'inp, str>, XmlError> {
loop {
match tokens.next().map(|opt| opt.map(|opt| opt.0)) {
None => return Ok(Cow::Borrowed("")),
Some(Ok(Token::Text { text })) => return unescape(text.as_str()),
Some(Ok(e @ Token::ElementStart { .. })) => {
return Err(XmlError::custom(format!(
"Looking for a data element, found: {:?}",
e
)))
}
Some(Err(e)) => return Err(e),
_ => {}
}
}
}
#[cfg(test)]
mod test {
use crate::decode::{try_data, Attr, Depth, Document, Name, StartEl};
// test helper to create a closed startel
fn closed<'a>(local: &'a str, prefix: &'a str, depth: Depth) -> StartEl<'a> {
let mut s = StartEl::new(local, prefix, depth);
s.closed = true;
s
}
#[test]
fn scoped_tokens() {
let xml = r#"<Response><A></A></Response>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().expect("valid document");
assert_eq!(root.start_el().local(), "Response");
assert_eq!(root.next_tag().expect("tag exists").start_el().local(), "A");
assert!(root.next_tag().is_none());
}
#[test]
fn handle_depth_properly() {
let xml = r#"<Response><Response></Response><A/></Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().expect("valid document");
assert_eq!(
scoped.next_tag().unwrap().start_el(),
&StartEl::new("Response", "", 1)
);
let closed_a = closed("A", "", 1);
assert_eq!(scoped.next_tag().unwrap().start_el(), &closed_a);
assert!(scoped.next_tag().is_none())
}
#[test]
fn self_closing() {
let xml = r#"<Response/>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().expect("valid doc");
assert_eq!(scoped.start_el.closed, true);
assert!(scoped.next_tag().is_none())
}
#[test]
fn terminate_scope() {
let xml = r#"<Response><Struct><A></A><Also/></Struct><More/></Response>"#;
let mut doc = Document::new(xml);
let mut response_iter = doc.root_element().expect("valid doc");
let mut struct_iter = response_iter.next_tag().unwrap();
assert_eq!(
struct_iter.next_tag().as_ref().map(|t| t.start_el()),
Some(&StartEl::new("A", "", 2))
);
// When the inner iter is dropped, it will read to the end of its scope
// prevent accidental behavior where we didn't read a full node
drop(struct_iter);
assert_eq!(
response_iter.next_tag().unwrap().start_el(),
&closed("More", "", 1)
);
}
#[test]
fn read_data_invalid() {
let xml = r#"<Response><A></A></Response>"#;
let mut doc = Document::new(xml);
let mut resp = doc.root_element().unwrap();
try_data(&mut resp).expect_err("no data");
}
#[test]
fn read_data() {
let xml = r#"<Response>hello</Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().unwrap();
assert_eq!(try_data(&mut scoped).unwrap(), "hello");
}
/// Whitespace within an element is preserved
#[test]
fn read_data_whitespace() {
let xml = r#"<Response> hello </Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().unwrap();
assert_eq!(try_data(&mut scoped).unwrap(), " hello ");
}
#[test]
fn ignore_insignificant_whitespace() {
let xml = r#"<Response> <A> </A> </Response>"#;
let mut doc = Document::new(xml);
let mut resp = doc.root_element().unwrap();
let mut a = resp.next_tag().expect("should be a");
let data = try_data(&mut a).expect("valid");
assert_eq!(data, " ");
}
#[test]
fn read_attributes() {
let xml = r#"<Response xsi:type="CanonicalUser">hello</Response>"#;
let mut tokenizer = Document::new(xml);
let root = tokenizer.root_element().unwrap();
assert_eq!(
root.start_el().attributes,
vec![Attr {
name: Name {
prefix: "xsi".into(),
local: "type".into()
},
value: "CanonicalUser".into()
}]
)
}
#[test]
fn escape_data() {
let xml = r#"<Response key=""hey">">></Response>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
assert_eq!(try_data(&mut root).unwrap(), ">");
assert_eq!(root.start_el().attr("key"), Some("\"hey\">"));
}
#[test]
fn nested_self_closer() {
let xml = r#"<XmlListsInputOutput>
<stringList/>
<stringSet></stringSet>
</XmlListsInputOutput>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
let mut string_list = root.next_tag().unwrap();
assert_eq!(string_list.start_el(), &closed("stringList", "", 1));
assert!(string_list.next_tag().is_none());
drop(string_list);
assert_eq!(
root.next_tag().unwrap().start_el(),
&StartEl::new("stringSet", "", 1)
);
}
#[test]
fn confusing_nested_same_name_tag() {
// an inner b which could be confused as closing the outer b if depth
// is not properly tracked:
let root_tags = &["a", "b", "c", "d"];
let xml = r#"<XmlListsInputOutput>
<a/>
<b>
<c/>
<b></b>
<here/>
</b>
<c></c>
<d>more</d>
</XmlListsInputOutput>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
let mut cmp = vec![];
while let Some(tag) = root.next_tag() {
cmp.push(tag.start_el().local().to_owned());
}
assert_eq!(root_tags, cmp.as_slice());
}
}
| Name | identifier_name |
decode.rs | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
use crate::unescape::unescape;
use std::borrow::Cow;
use std::convert::TryFrom;
use thiserror::Error;
use xmlparser::{ElementEnd, Token, Tokenizer};
pub type Depth = usize;
// in general, these errors are just for reporting what happened, there isn't
// much value in lots of different match variants
#[derive(Debug, Error)]
pub enum XmlError {
#[error("XML Parse Error")]
InvalidXml(#[from] xmlparser::Error),
#[error("Invalid XML Escape: {esc}")]
InvalidEscape { esc: String },
#[error("Error parsing XML: {0}")]
Custom(Cow<'static, str>),
#[error("Encountered another error parsing XML: {0}")]
Unhandled(#[from] Box<dyn std::error::Error + Send + Sync + 'static>),
}
impl XmlError {
pub fn custom(msg: impl Into<Cow<'static, str>>) -> Self {
XmlError::Custom(msg.into())
}
}
#[derive(PartialEq, Debug)]
pub struct Name<'a> {
pub prefix: &'a str,
pub local: &'a str,
}
impl Name<'_> {
/// Check if a given name matches a tag name composed of `prefix:local` or just `local`
pub fn matches(&self, tag_name: &str) -> bool {
let split = tag_name.find(':');
match split {
None => tag_name == self.local,
Some(idx) => {
let (prefix, local) = tag_name.split_at(idx);
let local = &local[1..];
self.local == local && self.prefix == prefix
}
}
}
}
#[derive(Debug, PartialEq)]
pub struct Attr<'a> {
name: Name<'a>,
// attribute values can be escaped (eg. with double quotes, so we need a Cow)
value: Cow<'a, str>,
}
#[derive(Debug, PartialEq)]
pub struct StartEl<'a> {
name: Name<'a>,
attributes: Vec<Attr<'a>>,
closed: bool,
depth: Depth,
}
/// Xml Start Element
///
/// ```xml
/// <a:b c="d">
/// ^^^ ^^^^^
/// name attributes
/// ```
impl<'a> StartEl<'a> {
pub fn depth(&self) -> Depth {
self.depth
}
fn new(local: &'a str, prefix: &'a str, depth: Depth) -> Self {
Self {
name: Name { prefix, local },
attributes: vec![],
closed: false,
depth,
}
}
/// Retrieve an attribute with a given key
///
/// key `prefix:local` combined as a str, joined by a `:`
pub fn attr<'b>(&'b self, key: &'b str) -> Option<&'b str> {
self.attributes
.iter()
.find(|attr| attr.name.matches(key))
.map(|attr| attr.value.as_ref())
}
/// Returns whether this `StartEl` matches a given name
/// in `prefix:local` form.
pub fn matches(&self, pat: &str) -> bool {
self.name.matches(pat)
}
/// Local component of this element's name
///
/// ```xml
/// <foo:bar>
/// ^^^
/// ```
pub fn local(&self) -> &str |
/// Prefix component of this elements name (or empty string)
/// ```xml
/// <foo:bar>
/// ^^^
/// ```
pub fn prefix(&self) -> &str {
self.name.prefix
}
/// Returns true of `el` at `depth` is a match for this `start_el`
fn end_el(&self, el: ElementEnd, depth: Depth) -> bool {
if depth != self.depth {
return false;
}
match el {
ElementEnd::Open => false,
ElementEnd::Close(prefix, local) => {
prefix.as_str() == self.name.prefix && local.as_str() == self.name.local
}
ElementEnd::Empty => false,
}
}
}
/// Xml Document abstraction
///
/// This document wraps a lazy tokenizer with depth tracking.
/// Constructing a document is essentially free.
pub struct Document<'a> {
tokenizer: Tokenizer<'a>,
depth: Depth,
}
impl<'a> TryFrom<&'a [u8]> for Document<'a> {
type Error = XmlError;
fn try_from(value: &'a [u8]) -> Result<Self, Self::Error> {
Ok(Document::new(
std::str::from_utf8(value).map_err(|err| XmlError::Unhandled(Box::new(err)))?,
))
}
}
impl<'inp> Document<'inp> {
pub fn new(doc: &'inp str) -> Self {
Document {
tokenizer: Tokenizer::from(doc),
depth: 0,
}
}
/// "Depth first" iterator
///
/// Unlike [`next_tag()`](ScopedDecoder::next_tag), this method returns the next
/// start element regardless of depth. This is useful to give a pointer into the middle
/// of a document to start reading.
///
/// ```xml
/// <Response> <-- first call returns this:
/// <A> <-- next call
/// <Nested /> <-- next call returns this
/// <MoreNested>hello</MoreNested> <-- then this:
/// </A>
/// <B/> <-- second call to next_tag returns this
/// </Response>
/// ```
pub fn next_start_element<'a>(&'a mut self) -> Option<StartEl<'inp>> {
next_start_element(self)
}
/// A scoped reader for the entire document
pub fn root_element<'a>(&'a mut self) -> Result<ScopedDecoder<'inp, 'a>, XmlError> {
let start_el = self
.next_start_element()
.ok_or_else(|| XmlError::custom("no root element"))?;
Ok(ScopedDecoder {
doc: self,
start_el,
terminated: false,
})
}
/// A scoped reader for a specific tag
///
/// This method is necessary for when you need to return a ScopedDecoder from a function
/// since normally the stacked-ownership that `next_tag()` uses would prevent returning a reference
/// to a field owned by the current function
pub fn scoped_to<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> {
ScopedDecoder {
doc: self,
start_el,
terminated: false,
}
}
}
/// Depth tracking iterator
///
/// ```xml
/// <a> <- startel depth 0
/// <b> <- startel depth 1
/// <c> <- startel depth 2
/// </c> <- endel depth 2
/// </b> <- endel depth 1
/// </a> <- endel depth 0
/// ```
impl<'inp> Iterator for Document<'inp> {
type Item = Result<(Token<'inp>, Depth), XmlError>;
fn next<'a>(&'a mut self) -> Option<Result<(Token<'inp>, Depth), XmlError>> {
let tok = self.tokenizer.next()?;
let tok = match tok {
Err(e) => return Some(Err(e.into())),
Ok(tok) => tok,
};
// depth bookkeeping
match tok {
Token::ElementEnd {
end: ElementEnd::Close(_, _),
..
} => {
self.depth -= 1;
}
Token::ElementEnd {
end: ElementEnd::Empty,
..
} => self.depth -= 1,
t @ Token::ElementStart { .. } => {
self.depth += 1;
// We want the startel and endel to have the same depth, but after the opener,
// the parser will be at depth 1. Return the previous depth:
return Some(Ok((t, self.depth - 1)));
}
_ => {}
}
Some(Ok((tok, self.depth)))
}
}
/// XmlTag Abstraction
///
/// ScopedDecoder represents a tag-scoped view into an XML document. Methods
/// on `ScopedDecoder` return `None` when the current tag has been exhausted.
pub struct ScopedDecoder<'inp, 'a> {
doc: &'a mut Document<'inp>,
start_el: StartEl<'inp>,
terminated: bool,
}
/// When a scoped decoder is dropped, its entire scope is consumed so that the
/// next read begins at the next tag at the same depth.
impl Drop for ScopedDecoder<'_, '_> {
fn drop(&mut self) {
for _ in self {}
}
}
impl<'inp> ScopedDecoder<'inp, '_> {
/// The start element for this scope
pub fn start_el<'a>(&'a self) -> &'a StartEl<'inp> {
&self.start_el
}
/// Returns the next top-level tag in this scope
/// The returned reader will fully read the tag during its lifetime. If it is dropped without
/// the data being read, the reader will be advanced until the matching close tag. If you read
/// an element with `next_tag()` and you want to ignore it, simply drop the resulting `ScopeDecoder`.
///
/// ```xml
/// <Response> <-- scoped reader on this tag
/// <A> <-- first call to next_tag returns this
/// <Nested /> <-- to get inner data, call `next_tag` on the returned decoder for `A`
/// <MoreNested>hello</MoreNested>
/// </A>
/// <B/> <-- second call to next_tag returns this
/// </Response>
/// ```
pub fn next_tag<'a>(&'a mut self) -> Option<ScopedDecoder<'inp, 'a>> {
let next_tag = next_start_element(self)?;
Some(self.nested_decoder(next_tag))
}
fn nested_decoder<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> {
ScopedDecoder {
doc: &mut self.doc,
start_el,
terminated: false,
}
}
}
impl<'inp, 'a> Iterator for ScopedDecoder<'inp, 'a> {
type Item = Result<(Token<'inp>, Depth), XmlError>;
fn next(&mut self) -> Option<Self::Item> {
if self.start_el.closed {
self.terminated = true;
}
if self.terminated {
return None;
}
let (tok, depth) = match self.doc.next() {
Some(Ok((tok, depth))) => (tok, depth),
other => return other,
};
match tok {
Token::ElementEnd { end, .. } if self.start_el.end_el(end, depth) => {
self.terminated = true;
return None;
}
_ => {}
}
Some(Ok((tok, depth)))
}
}
/// Load the next start element out of a depth-tagged token iterator
fn next_start_element<'a, 'inp>(
tokens: &'a mut impl Iterator<Item = Result<(Token<'inp>, Depth), XmlError>>,
) -> Option<StartEl<'inp>> {
let mut out = StartEl::new("", "", 0);
loop {
match tokens.next()? {
Ok((Token::ElementStart { local, prefix, .. }, depth)) => {
out.name.local = local.as_str();
out.name.prefix = prefix.as_str();
out.depth = depth;
}
Ok((
Token::Attribute {
prefix,
local,
value,
..
},
_,
)) => out.attributes.push(Attr {
name: Name {
local: local.as_str(),
prefix: prefix.as_str(),
},
value: unescape(value.as_str()).ok()?,
}),
Ok((
Token::ElementEnd {
end: ElementEnd::Open,
..
},
_,
)) => break,
Ok((
Token::ElementEnd {
end: ElementEnd::Empty,
..
},
_,
)) => {
out.closed = true;
break;
}
_ => {}
}
}
Some(out)
}
/// Returns the data element at the current position
///
/// If the current position is not a data element (and is instead a <startelement>) an error
/// will be returned
pub fn try_data<'a, 'inp>(
tokens: &'a mut impl Iterator<Item = Result<(Token<'inp>, Depth), XmlError>>,
) -> Result<Cow<'inp, str>, XmlError> {
loop {
match tokens.next().map(|opt| opt.map(|opt| opt.0)) {
None => return Ok(Cow::Borrowed("")),
Some(Ok(Token::Text { text })) => return unescape(text.as_str()),
Some(Ok(e @ Token::ElementStart { .. })) => {
return Err(XmlError::custom(format!(
"Looking for a data element, found: {:?}",
e
)))
}
Some(Err(e)) => return Err(e),
_ => {}
}
}
}
#[cfg(test)]
mod test {
use crate::decode::{try_data, Attr, Depth, Document, Name, StartEl};
// test helper to create a closed startel
fn closed<'a>(local: &'a str, prefix: &'a str, depth: Depth) -> StartEl<'a> {
let mut s = StartEl::new(local, prefix, depth);
s.closed = true;
s
}
#[test]
fn scoped_tokens() {
let xml = r#"<Response><A></A></Response>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().expect("valid document");
assert_eq!(root.start_el().local(), "Response");
assert_eq!(root.next_tag().expect("tag exists").start_el().local(), "A");
assert!(root.next_tag().is_none());
}
#[test]
fn handle_depth_properly() {
let xml = r#"<Response><Response></Response><A/></Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().expect("valid document");
assert_eq!(
scoped.next_tag().unwrap().start_el(),
&StartEl::new("Response", "", 1)
);
let closed_a = closed("A", "", 1);
assert_eq!(scoped.next_tag().unwrap().start_el(), &closed_a);
assert!(scoped.next_tag().is_none())
}
#[test]
fn self_closing() {
let xml = r#"<Response/>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().expect("valid doc");
assert_eq!(scoped.start_el.closed, true);
assert!(scoped.next_tag().is_none())
}
#[test]
fn terminate_scope() {
let xml = r#"<Response><Struct><A></A><Also/></Struct><More/></Response>"#;
let mut doc = Document::new(xml);
let mut response_iter = doc.root_element().expect("valid doc");
let mut struct_iter = response_iter.next_tag().unwrap();
assert_eq!(
struct_iter.next_tag().as_ref().map(|t| t.start_el()),
Some(&StartEl::new("A", "", 2))
);
// When the inner iter is dropped, it will read to the end of its scope
// prevent accidental behavior where we didn't read a full node
drop(struct_iter);
assert_eq!(
response_iter.next_tag().unwrap().start_el(),
&closed("More", "", 1)
);
}
#[test]
fn read_data_invalid() {
let xml = r#"<Response><A></A></Response>"#;
let mut doc = Document::new(xml);
let mut resp = doc.root_element().unwrap();
try_data(&mut resp).expect_err("no data");
}
#[test]
fn read_data() {
let xml = r#"<Response>hello</Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().unwrap();
assert_eq!(try_data(&mut scoped).unwrap(), "hello");
}
/// Whitespace within an element is preserved
#[test]
fn read_data_whitespace() {
let xml = r#"<Response> hello </Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().unwrap();
assert_eq!(try_data(&mut scoped).unwrap(), " hello ");
}
#[test]
fn ignore_insignificant_whitespace() {
let xml = r#"<Response> <A> </A> </Response>"#;
let mut doc = Document::new(xml);
let mut resp = doc.root_element().unwrap();
let mut a = resp.next_tag().expect("should be a");
let data = try_data(&mut a).expect("valid");
assert_eq!(data, " ");
}
#[test]
fn read_attributes() {
let xml = r#"<Response xsi:type="CanonicalUser">hello</Response>"#;
let mut tokenizer = Document::new(xml);
let root = tokenizer.root_element().unwrap();
assert_eq!(
root.start_el().attributes,
vec![Attr {
name: Name {
prefix: "xsi".into(),
local: "type".into()
},
value: "CanonicalUser".into()
}]
)
}
#[test]
fn escape_data() {
let xml = r#"<Response key=""hey">">></Response>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
assert_eq!(try_data(&mut root).unwrap(), ">");
assert_eq!(root.start_el().attr("key"), Some("\"hey\">"));
}
#[test]
fn nested_self_closer() {
let xml = r#"<XmlListsInputOutput>
<stringList/>
<stringSet></stringSet>
</XmlListsInputOutput>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
let mut string_list = root.next_tag().unwrap();
assert_eq!(string_list.start_el(), &closed("stringList", "", 1));
assert!(string_list.next_tag().is_none());
drop(string_list);
assert_eq!(
root.next_tag().unwrap().start_el(),
&StartEl::new("stringSet", "", 1)
);
}
#[test]
fn confusing_nested_same_name_tag() {
// an inner b which could be confused as closing the outer b if depth
// is not properly tracked:
let root_tags = &["a", "b", "c", "d"];
let xml = r#"<XmlListsInputOutput>
<a/>
<b>
<c/>
<b></b>
<here/>
</b>
<c></c>
<d>more</d>
</XmlListsInputOutput>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
let mut cmp = vec![];
while let Some(tag) = root.next_tag() {
cmp.push(tag.start_el().local().to_owned());
}
assert_eq!(root_tags, cmp.as_slice());
}
}
| {
self.name.local
} | identifier_body |
decode.rs | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
use crate::unescape::unescape;
use std::borrow::Cow;
use std::convert::TryFrom;
use thiserror::Error;
use xmlparser::{ElementEnd, Token, Tokenizer};
pub type Depth = usize;
// in general, these errors are just for reporting what happened, there isn't
// much value in lots of different match variants
#[derive(Debug, Error)]
pub enum XmlError {
#[error("XML Parse Error")]
InvalidXml(#[from] xmlparser::Error),
#[error("Invalid XML Escape: {esc}")]
InvalidEscape { esc: String },
#[error("Error parsing XML: {0}")]
Custom(Cow<'static, str>),
#[error("Encountered another error parsing XML: {0}")]
Unhandled(#[from] Box<dyn std::error::Error + Send + Sync + 'static>),
}
impl XmlError {
pub fn custom(msg: impl Into<Cow<'static, str>>) -> Self {
XmlError::Custom(msg.into())
}
}
#[derive(PartialEq, Debug)]
pub struct Name<'a> {
pub prefix: &'a str,
pub local: &'a str,
}
impl Name<'_> {
/// Check if a given name matches a tag name composed of `prefix:local` or just `local`
pub fn matches(&self, tag_name: &str) -> bool {
let split = tag_name.find(':');
match split {
None => tag_name == self.local,
Some(idx) => {
let (prefix, local) = tag_name.split_at(idx);
let local = &local[1..];
self.local == local && self.prefix == prefix
}
}
}
}
#[derive(Debug, PartialEq)]
pub struct Attr<'a> {
name: Name<'a>,
// attribute values can be escaped (eg. with double quotes, so we need a Cow)
value: Cow<'a, str>,
}
#[derive(Debug, PartialEq)]
pub struct StartEl<'a> {
name: Name<'a>,
attributes: Vec<Attr<'a>>,
closed: bool,
depth: Depth,
}
/// Xml Start Element
///
/// ```xml
/// <a:b c="d">
/// ^^^ ^^^^^
/// name attributes
/// ```
impl<'a> StartEl<'a> {
pub fn depth(&self) -> Depth {
self.depth
}
fn new(local: &'a str, prefix: &'a str, depth: Depth) -> Self {
Self {
name: Name { prefix, local },
attributes: vec![],
closed: false,
depth,
}
}
/// Retrieve an attribute with a given key
///
/// key `prefix:local` combined as a str, joined by a `:`
pub fn attr<'b>(&'b self, key: &'b str) -> Option<&'b str> {
self.attributes
.iter()
.find(|attr| attr.name.matches(key))
.map(|attr| attr.value.as_ref())
}
/// Returns whether this `StartEl` matches a given name
/// in `prefix:local` form.
pub fn matches(&self, pat: &str) -> bool {
self.name.matches(pat)
}
/// Local component of this element's name
///
/// ```xml
/// <foo:bar>
/// ^^^
/// ```
pub fn local(&self) -> &str {
self.name.local
}
/// Prefix component of this elements name (or empty string)
/// ```xml
/// <foo:bar>
/// ^^^
/// ```
pub fn prefix(&self) -> &str {
self.name.prefix
}
/// Returns true of `el` at `depth` is a match for this `start_el`
fn end_el(&self, el: ElementEnd, depth: Depth) -> bool {
if depth != self.depth {
return false;
}
match el {
ElementEnd::Open => false,
ElementEnd::Close(prefix, local) => {
prefix.as_str() == self.name.prefix && local.as_str() == self.name.local
}
ElementEnd::Empty => false,
}
}
}
/// Xml Document abstraction
///
/// This document wraps a lazy tokenizer with depth tracking.
/// Constructing a document is essentially free.
pub struct Document<'a> {
tokenizer: Tokenizer<'a>,
depth: Depth,
}
impl<'a> TryFrom<&'a [u8]> for Document<'a> {
type Error = XmlError;
fn try_from(value: &'a [u8]) -> Result<Self, Self::Error> {
Ok(Document::new(
std::str::from_utf8(value).map_err(|err| XmlError::Unhandled(Box::new(err)))?,
))
}
}
impl<'inp> Document<'inp> {
pub fn new(doc: &'inp str) -> Self {
Document {
tokenizer: Tokenizer::from(doc),
depth: 0,
}
}
/// "Depth first" iterator
///
/// Unlike [`next_tag()`](ScopedDecoder::next_tag), this method returns the next
/// start element regardless of depth. This is useful to give a pointer into the middle
/// of a document to start reading.
///
/// ```xml
/// <Response> <-- first call returns this:
/// <A> <-- next call | /// <Nested /> <-- next call returns this
/// <MoreNested>hello</MoreNested> <-- then this:
/// </A>
/// <B/> <-- second call to next_tag returns this
/// </Response>
/// ```
pub fn next_start_element<'a>(&'a mut self) -> Option<StartEl<'inp>> {
next_start_element(self)
}
/// A scoped reader for the entire document
pub fn root_element<'a>(&'a mut self) -> Result<ScopedDecoder<'inp, 'a>, XmlError> {
let start_el = self
.next_start_element()
.ok_or_else(|| XmlError::custom("no root element"))?;
Ok(ScopedDecoder {
doc: self,
start_el,
terminated: false,
})
}
/// A scoped reader for a specific tag
///
/// This method is necessary for when you need to return a ScopedDecoder from a function
/// since normally the stacked-ownership that `next_tag()` uses would prevent returning a reference
/// to a field owned by the current function
pub fn scoped_to<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> {
ScopedDecoder {
doc: self,
start_el,
terminated: false,
}
}
}
/// Depth tracking iterator
///
/// ```xml
/// <a> <- startel depth 0
/// <b> <- startel depth 1
/// <c> <- startel depth 2
/// </c> <- endel depth 2
/// </b> <- endel depth 1
/// </a> <- endel depth 0
/// ```
impl<'inp> Iterator for Document<'inp> {
type Item = Result<(Token<'inp>, Depth), XmlError>;
fn next<'a>(&'a mut self) -> Option<Result<(Token<'inp>, Depth), XmlError>> {
let tok = self.tokenizer.next()?;
let tok = match tok {
Err(e) => return Some(Err(e.into())),
Ok(tok) => tok,
};
// depth bookkeeping
match tok {
Token::ElementEnd {
end: ElementEnd::Close(_, _),
..
} => {
self.depth -= 1;
}
Token::ElementEnd {
end: ElementEnd::Empty,
..
} => self.depth -= 1,
t @ Token::ElementStart { .. } => {
self.depth += 1;
// We want the startel and endel to have the same depth, but after the opener,
// the parser will be at depth 1. Return the previous depth:
return Some(Ok((t, self.depth - 1)));
}
_ => {}
}
Some(Ok((tok, self.depth)))
}
}
/// XmlTag Abstraction
///
/// ScopedDecoder represents a tag-scoped view into an XML document. Methods
/// on `ScopedDecoder` return `None` when the current tag has been exhausted.
pub struct ScopedDecoder<'inp, 'a> {
doc: &'a mut Document<'inp>,
start_el: StartEl<'inp>,
terminated: bool,
}
/// When a scoped decoder is dropped, its entire scope is consumed so that the
/// next read begins at the next tag at the same depth.
impl Drop for ScopedDecoder<'_, '_> {
fn drop(&mut self) {
for _ in self {}
}
}
impl<'inp> ScopedDecoder<'inp, '_> {
/// The start element for this scope
pub fn start_el<'a>(&'a self) -> &'a StartEl<'inp> {
&self.start_el
}
/// Returns the next top-level tag in this scope
/// The returned reader will fully read the tag during its lifetime. If it is dropped without
/// the data being read, the reader will be advanced until the matching close tag. If you read
/// an element with `next_tag()` and you want to ignore it, simply drop the resulting `ScopeDecoder`.
///
/// ```xml
/// <Response> <-- scoped reader on this tag
/// <A> <-- first call to next_tag returns this
/// <Nested /> <-- to get inner data, call `next_tag` on the returned decoder for `A`
/// <MoreNested>hello</MoreNested>
/// </A>
/// <B/> <-- second call to next_tag returns this
/// </Response>
/// ```
pub fn next_tag<'a>(&'a mut self) -> Option<ScopedDecoder<'inp, 'a>> {
let next_tag = next_start_element(self)?;
Some(self.nested_decoder(next_tag))
}
fn nested_decoder<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> {
ScopedDecoder {
doc: &mut self.doc,
start_el,
terminated: false,
}
}
}
impl<'inp, 'a> Iterator for ScopedDecoder<'inp, 'a> {
type Item = Result<(Token<'inp>, Depth), XmlError>;
fn next(&mut self) -> Option<Self::Item> {
if self.start_el.closed {
self.terminated = true;
}
if self.terminated {
return None;
}
let (tok, depth) = match self.doc.next() {
Some(Ok((tok, depth))) => (tok, depth),
other => return other,
};
match tok {
Token::ElementEnd { end, .. } if self.start_el.end_el(end, depth) => {
self.terminated = true;
return None;
}
_ => {}
}
Some(Ok((tok, depth)))
}
}
/// Load the next start element out of a depth-tagged token iterator
fn next_start_element<'a, 'inp>(
tokens: &'a mut impl Iterator<Item = Result<(Token<'inp>, Depth), XmlError>>,
) -> Option<StartEl<'inp>> {
let mut out = StartEl::new("", "", 0);
loop {
match tokens.next()? {
Ok((Token::ElementStart { local, prefix, .. }, depth)) => {
out.name.local = local.as_str();
out.name.prefix = prefix.as_str();
out.depth = depth;
}
Ok((
Token::Attribute {
prefix,
local,
value,
..
},
_,
)) => out.attributes.push(Attr {
name: Name {
local: local.as_str(),
prefix: prefix.as_str(),
},
value: unescape(value.as_str()).ok()?,
}),
Ok((
Token::ElementEnd {
end: ElementEnd::Open,
..
},
_,
)) => break,
Ok((
Token::ElementEnd {
end: ElementEnd::Empty,
..
},
_,
)) => {
out.closed = true;
break;
}
_ => {}
}
}
Some(out)
}
/// Returns the data element at the current position
///
/// If the current position is not a data element (and is instead a <startelement>) an error
/// will be returned
pub fn try_data<'a, 'inp>(
tokens: &'a mut impl Iterator<Item = Result<(Token<'inp>, Depth), XmlError>>,
) -> Result<Cow<'inp, str>, XmlError> {
loop {
match tokens.next().map(|opt| opt.map(|opt| opt.0)) {
None => return Ok(Cow::Borrowed("")),
Some(Ok(Token::Text { text })) => return unescape(text.as_str()),
Some(Ok(e @ Token::ElementStart { .. })) => {
return Err(XmlError::custom(format!(
"Looking for a data element, found: {:?}",
e
)))
}
Some(Err(e)) => return Err(e),
_ => {}
}
}
}
#[cfg(test)]
mod test {
use crate::decode::{try_data, Attr, Depth, Document, Name, StartEl};
// test helper to create a closed startel
fn closed<'a>(local: &'a str, prefix: &'a str, depth: Depth) -> StartEl<'a> {
let mut s = StartEl::new(local, prefix, depth);
s.closed = true;
s
}
#[test]
fn scoped_tokens() {
let xml = r#"<Response><A></A></Response>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().expect("valid document");
assert_eq!(root.start_el().local(), "Response");
assert_eq!(root.next_tag().expect("tag exists").start_el().local(), "A");
assert!(root.next_tag().is_none());
}
#[test]
fn handle_depth_properly() {
let xml = r#"<Response><Response></Response><A/></Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().expect("valid document");
assert_eq!(
scoped.next_tag().unwrap().start_el(),
&StartEl::new("Response", "", 1)
);
let closed_a = closed("A", "", 1);
assert_eq!(scoped.next_tag().unwrap().start_el(), &closed_a);
assert!(scoped.next_tag().is_none())
}
#[test]
fn self_closing() {
let xml = r#"<Response/>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().expect("valid doc");
assert_eq!(scoped.start_el.closed, true);
assert!(scoped.next_tag().is_none())
}
#[test]
fn terminate_scope() {
let xml = r#"<Response><Struct><A></A><Also/></Struct><More/></Response>"#;
let mut doc = Document::new(xml);
let mut response_iter = doc.root_element().expect("valid doc");
let mut struct_iter = response_iter.next_tag().unwrap();
assert_eq!(
struct_iter.next_tag().as_ref().map(|t| t.start_el()),
Some(&StartEl::new("A", "", 2))
);
// When the inner iter is dropped, it will read to the end of its scope
// prevent accidental behavior where we didn't read a full node
drop(struct_iter);
assert_eq!(
response_iter.next_tag().unwrap().start_el(),
&closed("More", "", 1)
);
}
#[test]
fn read_data_invalid() {
let xml = r#"<Response><A></A></Response>"#;
let mut doc = Document::new(xml);
let mut resp = doc.root_element().unwrap();
try_data(&mut resp).expect_err("no data");
}
#[test]
fn read_data() {
let xml = r#"<Response>hello</Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().unwrap();
assert_eq!(try_data(&mut scoped).unwrap(), "hello");
}
/// Whitespace within an element is preserved
#[test]
fn read_data_whitespace() {
let xml = r#"<Response> hello </Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().unwrap();
assert_eq!(try_data(&mut scoped).unwrap(), " hello ");
}
#[test]
fn ignore_insignificant_whitespace() {
let xml = r#"<Response> <A> </A> </Response>"#;
let mut doc = Document::new(xml);
let mut resp = doc.root_element().unwrap();
let mut a = resp.next_tag().expect("should be a");
let data = try_data(&mut a).expect("valid");
assert_eq!(data, " ");
}
#[test]
fn read_attributes() {
let xml = r#"<Response xsi:type="CanonicalUser">hello</Response>"#;
let mut tokenizer = Document::new(xml);
let root = tokenizer.root_element().unwrap();
assert_eq!(
root.start_el().attributes,
vec![Attr {
name: Name {
prefix: "xsi".into(),
local: "type".into()
},
value: "CanonicalUser".into()
}]
)
}
#[test]
fn escape_data() {
let xml = r#"<Response key=""hey">">></Response>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
assert_eq!(try_data(&mut root).unwrap(), ">");
assert_eq!(root.start_el().attr("key"), Some("\"hey\">"));
}
#[test]
fn nested_self_closer() {
let xml = r#"<XmlListsInputOutput>
<stringList/>
<stringSet></stringSet>
</XmlListsInputOutput>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
let mut string_list = root.next_tag().unwrap();
assert_eq!(string_list.start_el(), &closed("stringList", "", 1));
assert!(string_list.next_tag().is_none());
drop(string_list);
assert_eq!(
root.next_tag().unwrap().start_el(),
&StartEl::new("stringSet", "", 1)
);
}
#[test]
fn confusing_nested_same_name_tag() {
// an inner b which could be confused as closing the outer b if depth
// is not properly tracked:
let root_tags = &["a", "b", "c", "d"];
let xml = r#"<XmlListsInputOutput>
<a/>
<b>
<c/>
<b></b>
<here/>
</b>
<c></c>
<d>more</d>
</XmlListsInputOutput>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
let mut cmp = vec![];
while let Some(tag) = root.next_tag() {
cmp.push(tag.start_el().local().to_owned());
}
assert_eq!(root_tags, cmp.as_slice());
}
} | random_line_split |
|
controller.go | package controller
import (
"errors"
"log"
"net/http"
"reflect"
"strings"
"github.com/zaolab/sunnified/mvc"
"github.com/zaolab/sunnified/mvc/view"
"github.com/zaolab/sunnified/web"
)
var ErrControllerNotFound = errors.New("controller not found")
var ErrUnprepared = errors.New("controller has not been prep'ed")
var ErrUnexecuted = errors.New("controller has not been executed")
var ErrParseStruct = errors.New("Sunnified Parser error")
const StructValueFeedTag = "sunnified.feed"
const StructValueResTag = "sunnified.res"
type StructValueFeeder interface {
FeedStructValue(*web.Context, *FieldMeta, reflect.Value) (reflect.Value, error)
}
type ControlHandler interface {
GetControlManager(*web.Context) *ControlManager
}
func NewControlManager(context *web.Context, cm *Meta, action string) *ControlManager {
rtype := cm.RType()
if rtype.Kind() == reflect.Ptr {
rtype = rtype.Elem()
}
return &ControlManager{
control: reflect.New(rtype),
context: context,
controlmeta: cm,
action: action,
}
}
type ControlManager struct {
control reflect.Value
context *web.Context
controlmeta *Meta
action string
prepared bool
executed bool
state int
vw mvc.View
}
func (c *ControlManager) SetControllerMeta(cm *Meta) (ok bool) {
if !c.prepared {
rtype := cm.RType()
if rtype.Kind() == reflect.Ptr {
rtype = rtype.Elem()
}
c.controlmeta = cm
c.control = reflect.New(rtype)
ok = true
}
return
}
func (c *ControlManager) SetAction(action string) (ok bool) {
if !c.prepared {
c.action = action
ok = true
}
return
}
func (c *ControlManager) SetState(state int) {
c.state = state
}
func (c *ControlManager) State() int {
return c.state
}
func (c *ControlManager) View() mvc.View {
return c.vw
}
func (c *ControlManager) IsPrepared() bool {
return c.prepared
}
func (c *ControlManager) IsExecuted() bool {
return c.executed
}
func (c *ControlManager) MvcMeta() mvc.Meta {
if c.controlmeta != nil {
return mvc.Meta{c.controlmeta.Module(), c.controlmeta.Name(), c.action, c.context.Ext}
}
return mvc.Meta{}
}
func (c *ControlManager) ModuleName() string {
if c.controlmeta != nil {
return c.controlmeta.Module()
}
return ""
}
func (c *ControlManager) ControllerName() string {
if c.controlmeta != nil {
return c.controlmeta.Name()
}
return ""
}
func (c *ControlManager) ActionName() string {
return c.action
}
func (c *ControlManager) Controller() reflect.Value {
return c.control
}
func (c *ControlManager) ActionMeta() *ActionMeta {
return c.controlmeta.ActionFromRequest(c.MvcMeta()[mvc.MVCAction], c.context)
}
func (c *ControlManager) AvailableMethods() ReqMethod {
return c.controlmeta.ActionAvailableMethods(c.action)
}
func (c *ControlManager) AvailableMethodsList() []string {
return c.controlmeta.ActionAvailableMethodsList(c.action)
}
func (c *ControlManager) ControllerMeta() *Meta {
return c.controlmeta
}
func (c *ControlManager) Context() *web.Context {
return c.context
}
func (c *ControlManager) PrepareAndExecute() (state int, vw mvc.View) {
if c.Prepare() == nil {
return c.Execute()
}
return c.state, nil
}
func (c *ControlManager) Prepare() error {
if !c.prepared && (c.state == 0 || (c.state >= 200 && c.state < 300)) {
if c.controlmeta == nil {
c.state = 404
return ErrControllerNotFound
}
switch c.controlmeta.T() {
case ContypeConstructor:
results := c.control.Call(getArgSlice(c.controlmeta.Args(),
getVMap(c.context),
c.context.PData))
c.control = results[0]
if c.control.Kind() == reflect.Interface {
c.control = c.control.Elem()
}
// after Elem from Interface, it might be a pointer to a struct too
if c.control.Kind() == reflect.Ptr {
c.control = c.control.Elem()
}
if c.controlmeta.Status() {
state := int(results[1].Int())
if state <= 0 {
state = http.StatusOK
}
c.state = state
}
case ContypeStruct, ContypeScontroller:
fields := c.controlmeta.Fields()
tmpcontrol := reflect.Indirect(c.control)
for _, field := range fields {
value := getDataValue(&field.DataMeta,
getVMap(c.context),
c.context.PData)
// allows middleware resources to make changes to value based on tag
// this can be useful to csrf where non csrf verified values are filtered
if res := field.Tag().Get(StructValueFeedTag); res != "" {
var reses []string
if strings.Contains(res, ",") {
reses = strings.Split(res, ",")
} else {
reses = []string{res}
}
for _, r := range reses {
rinterface := c.context.Resource(strings.TrimSpace(r))
if rinterface != nil {
if parser, ok := rinterface.(StructValueFeeder); ok {
var err error
value, err = parser.FeedStructValue(c.context, field, value)
if err != nil {
c.state = 500
log.Println(err)
return ErrParseStruct
}
}
} else {
log.Println("Resource to parse struct var not found: ", r)
c.state = 500
return ErrParseStruct
}
}
} else if res := field.Tag().Get(StructValueResTag); res != "" {
rinterface := c.context.Resource(strings.TrimSpace(res))
if rinterface != nil {
value = reflect.ValueOf(rinterface)
}
}
if value.IsValid() {
tmpcontrol.FieldByName(field.Name()).Set(value)
}
}
if c.state != 500 && c.controlmeta.T() == ContypeScontroller {
ctrler := c.control.Interface().(mvc.Controller)
ctrler.Construct_(c.context)
}
}
if c.context.HasErrorCode() {
c.state = c.context.ErrorCode()
}
c.prepared = true
}
return nil
}
func (c *ControlManager) Execute() (state int, vw mvc.View) {
if c.prepared {
if c.state == 0 {
c.state = 200
}
var results []reflect.Value
var rstyle = c.controlmeta.ResultStyle
if c.state >= http.StatusOK && c.state < http.StatusMultipleChoices {
switch c.controlmeta.T() {
case ContypeFunc:
results = c.control.Call(getArgSlice(c.controlmeta.Args(),
getVMap(c.context),
c.context.PData))
default:
actmeta := c.ActionMeta()
if actmeta != nil {
meth := c.control.MethodByName(actmeta.Name())
results = meth.Call(getArgSlice(actmeta.Args(),
getVMap(c.context),
c.context.PData))
rstyle = actmeta.ResultStyle
} else {
c.state = 404
state = c.state
return
}
}
}
if rstyle.Status() {
c.state = int(results[1].Int())
}
if rstyle.View() || rstyle.Vmap() || rstyle.MapSI() {
// for a consistent error page, error should be returned instead and allow sunny server itself
// to render the error page
state = c.state
if state == 200 || state == 0 {
if rstyle.View() {
if !results[0].IsNil() && results[0].IsValid() {
c.vw = (results[0].Interface()).(mvc.View)
}
} else {
var vmap mvc.VM
if results[0].IsNil() || !results[0].IsValid() {
vmap = mvc.VM{}
} else if rstyle.Vmap() {
vmap = results[0].Interface().(mvc.VM)
} else {
vmap = mvc.VM(results[0].Interface().(map[string]interface{}))
}
c.vw = view.NewResultView(vmap)
}
vw = c.vw
if vw == nil {
state = -1
}
}
} else {
// if state returned is -1, it means the controller has handled the response
state = -1
}
c.executed = true
} else {
state = c.state
}
return
}
func (c *ControlManager) PublishView() (err error) {
if !c.prepared {
err = ErrUnprepared
} else if !c.executed {
err = ErrUnexecuted
} else if c.vw != nil {
if c.context.Request.Method == "HEAD" {
c.context.Response.Header().Set("Content-Type", c.vw.ContentType(c.context))
} else {
err = c.vw.Publish(c.context)
}
}
return
}
func (c *ControlManager) Cleanup() {
if c.prepared && c.controlmeta.T() == ContypeScontroller {
ctrler := c.control.Interface().(mvc.Controller)
ctrler.Destruct_()
}
}
func getVMap(context *web.Context) map[string]reflect.Value {
return map[string]reflect.Value{
"context": reflect.ValueOf(context),
"w": reflect.ValueOf(context.Response),
"r": reflect.ValueOf(context.Request),
"upath": reflect.ValueOf(context.UPath),
"pdata": reflect.ValueOf(context.PData),
"upath_slice": reflect.ValueOf([]string(context.UPath)),
"pdata_map": reflect.ValueOf(map[string]string(context.PData)),
}
}
func getArgSlice(args []*ArgMeta, vmap map[string]reflect.Value, d web.PData) (values []reflect.Value) {
values = make([]reflect.Value, len(args))
for i, arg := range args {
values[i] = getDataValue(&arg.DataMeta, vmap, d)
}
return
}
func getDataValue(arg *DataMeta, vmap map[string]reflect.Value, d web.PData) (value reflect.Value) {
switch arg.T() {
case DatatypeWebContext:
value = vmap["context"]
case DatatypeRequest:
value = vmap["r"]
case DatatypeResponseWriter:
value = vmap["w"]
case DatatypeUpath:
value = vmap["upath"]
case DatatypeUpathSlice:
value = vmap["upath_slice"]
case DatatypePdata:
value = vmap["pdata"]
case DatatypePdataMap:
value = vmap["pdata_map"]
case DatatypeString:
val, _ := d.String(arg.LName())
value = reflect.ValueOf(val)
case DatatypeInt:
val, _ := d.Int(arg.LName())
value = reflect.ValueOf(val)
case DatatypeInt64:
val, _ := d.Int64(arg.LName())
value = reflect.ValueOf(val)
case DatatypeFloat:
val, _ := d.Float32(arg.LName())
value = reflect.ValueOf(val) | case DatatypeEmail:
val, _ := d.Email(arg.LName())
value = reflect.ValueOf(val)
case DatatypeURL:
val, _ := d.Url(arg.LName())
value = reflect.ValueOf(val)
case DatatypeDate:
val, _ := d.Date(arg.LName())
value = reflect.ValueOf(val)
case DatatypeTime:
val, _ := d.Time(arg.LName())
value = reflect.ValueOf(val)
case DatatypeDateTime:
val, _ := d.DateTime(arg.LName())
value = reflect.ValueOf(val)
case DatatypeStruct, DatatypeEmbedded:
fields := arg.Fields()
model := reflect.New(arg.RType())
modelval := model.Elem()
for _, field := range fields {
modelval.FieldByName(field.Name()).Set(getDataValue(&field.DataMeta, vmap, d))
}
if arg.RType().Kind() == reflect.Ptr {
value = model
} else {
value = modelval
}
}
return
} | case DatatypeFloat64:
val, _ := d.Float64(arg.LName())
value = reflect.ValueOf(val) | random_line_split |
controller.go | package controller
import (
"errors"
"log"
"net/http"
"reflect"
"strings"
"github.com/zaolab/sunnified/mvc"
"github.com/zaolab/sunnified/mvc/view"
"github.com/zaolab/sunnified/web"
)
var ErrControllerNotFound = errors.New("controller not found")
var ErrUnprepared = errors.New("controller has not been prep'ed")
var ErrUnexecuted = errors.New("controller has not been executed")
var ErrParseStruct = errors.New("Sunnified Parser error")
const StructValueFeedTag = "sunnified.feed"
const StructValueResTag = "sunnified.res"
type StructValueFeeder interface {
FeedStructValue(*web.Context, *FieldMeta, reflect.Value) (reflect.Value, error)
}
type ControlHandler interface {
GetControlManager(*web.Context) *ControlManager
}
func NewControlManager(context *web.Context, cm *Meta, action string) *ControlManager {
rtype := cm.RType()
if rtype.Kind() == reflect.Ptr {
rtype = rtype.Elem()
}
return &ControlManager{
control: reflect.New(rtype),
context: context,
controlmeta: cm,
action: action,
}
}
type ControlManager struct {
control reflect.Value
context *web.Context
controlmeta *Meta
action string
prepared bool
executed bool
state int
vw mvc.View
}
func (c *ControlManager) SetControllerMeta(cm *Meta) (ok bool) {
if !c.prepared {
rtype := cm.RType()
if rtype.Kind() == reflect.Ptr {
rtype = rtype.Elem()
}
c.controlmeta = cm
c.control = reflect.New(rtype)
ok = true
}
return
}
func (c *ControlManager) SetAction(action string) (ok bool) {
if !c.prepared {
c.action = action
ok = true
}
return
}
func (c *ControlManager) SetState(state int) {
c.state = state
}
func (c *ControlManager) State() int {
return c.state
}
func (c *ControlManager) View() mvc.View {
return c.vw
}
func (c *ControlManager) IsPrepared() bool {
return c.prepared
}
func (c *ControlManager) IsExecuted() bool {
return c.executed
}
func (c *ControlManager) MvcMeta() mvc.Meta {
if c.controlmeta != nil {
return mvc.Meta{c.controlmeta.Module(), c.controlmeta.Name(), c.action, c.context.Ext}
}
return mvc.Meta{}
}
func (c *ControlManager) ModuleName() string {
if c.controlmeta != nil {
return c.controlmeta.Module()
}
return ""
}
func (c *ControlManager) | () string {
if c.controlmeta != nil {
return c.controlmeta.Name()
}
return ""
}
func (c *ControlManager) ActionName() string {
return c.action
}
func (c *ControlManager) Controller() reflect.Value {
return c.control
}
func (c *ControlManager) ActionMeta() *ActionMeta {
return c.controlmeta.ActionFromRequest(c.MvcMeta()[mvc.MVCAction], c.context)
}
func (c *ControlManager) AvailableMethods() ReqMethod {
return c.controlmeta.ActionAvailableMethods(c.action)
}
func (c *ControlManager) AvailableMethodsList() []string {
return c.controlmeta.ActionAvailableMethodsList(c.action)
}
func (c *ControlManager) ControllerMeta() *Meta {
return c.controlmeta
}
func (c *ControlManager) Context() *web.Context {
return c.context
}
func (c *ControlManager) PrepareAndExecute() (state int, vw mvc.View) {
if c.Prepare() == nil {
return c.Execute()
}
return c.state, nil
}
func (c *ControlManager) Prepare() error {
if !c.prepared && (c.state == 0 || (c.state >= 200 && c.state < 300)) {
if c.controlmeta == nil {
c.state = 404
return ErrControllerNotFound
}
switch c.controlmeta.T() {
case ContypeConstructor:
results := c.control.Call(getArgSlice(c.controlmeta.Args(),
getVMap(c.context),
c.context.PData))
c.control = results[0]
if c.control.Kind() == reflect.Interface {
c.control = c.control.Elem()
}
// after Elem from Interface, it might be a pointer to a struct too
if c.control.Kind() == reflect.Ptr {
c.control = c.control.Elem()
}
if c.controlmeta.Status() {
state := int(results[1].Int())
if state <= 0 {
state = http.StatusOK
}
c.state = state
}
case ContypeStruct, ContypeScontroller:
fields := c.controlmeta.Fields()
tmpcontrol := reflect.Indirect(c.control)
for _, field := range fields {
value := getDataValue(&field.DataMeta,
getVMap(c.context),
c.context.PData)
// allows middleware resources to make changes to value based on tag
// this can be useful to csrf where non csrf verified values are filtered
if res := field.Tag().Get(StructValueFeedTag); res != "" {
var reses []string
if strings.Contains(res, ",") {
reses = strings.Split(res, ",")
} else {
reses = []string{res}
}
for _, r := range reses {
rinterface := c.context.Resource(strings.TrimSpace(r))
if rinterface != nil {
if parser, ok := rinterface.(StructValueFeeder); ok {
var err error
value, err = parser.FeedStructValue(c.context, field, value)
if err != nil {
c.state = 500
log.Println(err)
return ErrParseStruct
}
}
} else {
log.Println("Resource to parse struct var not found: ", r)
c.state = 500
return ErrParseStruct
}
}
} else if res := field.Tag().Get(StructValueResTag); res != "" {
rinterface := c.context.Resource(strings.TrimSpace(res))
if rinterface != nil {
value = reflect.ValueOf(rinterface)
}
}
if value.IsValid() {
tmpcontrol.FieldByName(field.Name()).Set(value)
}
}
if c.state != 500 && c.controlmeta.T() == ContypeScontroller {
ctrler := c.control.Interface().(mvc.Controller)
ctrler.Construct_(c.context)
}
}
if c.context.HasErrorCode() {
c.state = c.context.ErrorCode()
}
c.prepared = true
}
return nil
}
func (c *ControlManager) Execute() (state int, vw mvc.View) {
if c.prepared {
if c.state == 0 {
c.state = 200
}
var results []reflect.Value
var rstyle = c.controlmeta.ResultStyle
if c.state >= http.StatusOK && c.state < http.StatusMultipleChoices {
switch c.controlmeta.T() {
case ContypeFunc:
results = c.control.Call(getArgSlice(c.controlmeta.Args(),
getVMap(c.context),
c.context.PData))
default:
actmeta := c.ActionMeta()
if actmeta != nil {
meth := c.control.MethodByName(actmeta.Name())
results = meth.Call(getArgSlice(actmeta.Args(),
getVMap(c.context),
c.context.PData))
rstyle = actmeta.ResultStyle
} else {
c.state = 404
state = c.state
return
}
}
}
if rstyle.Status() {
c.state = int(results[1].Int())
}
if rstyle.View() || rstyle.Vmap() || rstyle.MapSI() {
// for a consistent error page, error should be returned instead and allow sunny server itself
// to render the error page
state = c.state
if state == 200 || state == 0 {
if rstyle.View() {
if !results[0].IsNil() && results[0].IsValid() {
c.vw = (results[0].Interface()).(mvc.View)
}
} else {
var vmap mvc.VM
if results[0].IsNil() || !results[0].IsValid() {
vmap = mvc.VM{}
} else if rstyle.Vmap() {
vmap = results[0].Interface().(mvc.VM)
} else {
vmap = mvc.VM(results[0].Interface().(map[string]interface{}))
}
c.vw = view.NewResultView(vmap)
}
vw = c.vw
if vw == nil {
state = -1
}
}
} else {
// if state returned is -1, it means the controller has handled the response
state = -1
}
c.executed = true
} else {
state = c.state
}
return
}
func (c *ControlManager) PublishView() (err error) {
if !c.prepared {
err = ErrUnprepared
} else if !c.executed {
err = ErrUnexecuted
} else if c.vw != nil {
if c.context.Request.Method == "HEAD" {
c.context.Response.Header().Set("Content-Type", c.vw.ContentType(c.context))
} else {
err = c.vw.Publish(c.context)
}
}
return
}
func (c *ControlManager) Cleanup() {
if c.prepared && c.controlmeta.T() == ContypeScontroller {
ctrler := c.control.Interface().(mvc.Controller)
ctrler.Destruct_()
}
}
func getVMap(context *web.Context) map[string]reflect.Value {
return map[string]reflect.Value{
"context": reflect.ValueOf(context),
"w": reflect.ValueOf(context.Response),
"r": reflect.ValueOf(context.Request),
"upath": reflect.ValueOf(context.UPath),
"pdata": reflect.ValueOf(context.PData),
"upath_slice": reflect.ValueOf([]string(context.UPath)),
"pdata_map": reflect.ValueOf(map[string]string(context.PData)),
}
}
func getArgSlice(args []*ArgMeta, vmap map[string]reflect.Value, d web.PData) (values []reflect.Value) {
values = make([]reflect.Value, len(args))
for i, arg := range args {
values[i] = getDataValue(&arg.DataMeta, vmap, d)
}
return
}
func getDataValue(arg *DataMeta, vmap map[string]reflect.Value, d web.PData) (value reflect.Value) {
switch arg.T() {
case DatatypeWebContext:
value = vmap["context"]
case DatatypeRequest:
value = vmap["r"]
case DatatypeResponseWriter:
value = vmap["w"]
case DatatypeUpath:
value = vmap["upath"]
case DatatypeUpathSlice:
value = vmap["upath_slice"]
case DatatypePdata:
value = vmap["pdata"]
case DatatypePdataMap:
value = vmap["pdata_map"]
case DatatypeString:
val, _ := d.String(arg.LName())
value = reflect.ValueOf(val)
case DatatypeInt:
val, _ := d.Int(arg.LName())
value = reflect.ValueOf(val)
case DatatypeInt64:
val, _ := d.Int64(arg.LName())
value = reflect.ValueOf(val)
case DatatypeFloat:
val, _ := d.Float32(arg.LName())
value = reflect.ValueOf(val)
case DatatypeFloat64:
val, _ := d.Float64(arg.LName())
value = reflect.ValueOf(val)
case DatatypeEmail:
val, _ := d.Email(arg.LName())
value = reflect.ValueOf(val)
case DatatypeURL:
val, _ := d.Url(arg.LName())
value = reflect.ValueOf(val)
case DatatypeDate:
val, _ := d.Date(arg.LName())
value = reflect.ValueOf(val)
case DatatypeTime:
val, _ := d.Time(arg.LName())
value = reflect.ValueOf(val)
case DatatypeDateTime:
val, _ := d.DateTime(arg.LName())
value = reflect.ValueOf(val)
case DatatypeStruct, DatatypeEmbedded:
fields := arg.Fields()
model := reflect.New(arg.RType())
modelval := model.Elem()
for _, field := range fields {
modelval.FieldByName(field.Name()).Set(getDataValue(&field.DataMeta, vmap, d))
}
if arg.RType().Kind() == reflect.Ptr {
value = model
} else {
value = modelval
}
}
return
}
| ControllerName | identifier_name |
controller.go | package controller
import (
"errors"
"log"
"net/http"
"reflect"
"strings"
"github.com/zaolab/sunnified/mvc"
"github.com/zaolab/sunnified/mvc/view"
"github.com/zaolab/sunnified/web"
)
var ErrControllerNotFound = errors.New("controller not found")
var ErrUnprepared = errors.New("controller has not been prep'ed")
var ErrUnexecuted = errors.New("controller has not been executed")
var ErrParseStruct = errors.New("Sunnified Parser error")
const StructValueFeedTag = "sunnified.feed"
const StructValueResTag = "sunnified.res"
type StructValueFeeder interface {
FeedStructValue(*web.Context, *FieldMeta, reflect.Value) (reflect.Value, error)
}
type ControlHandler interface {
GetControlManager(*web.Context) *ControlManager
}
func NewControlManager(context *web.Context, cm *Meta, action string) *ControlManager {
rtype := cm.RType()
if rtype.Kind() == reflect.Ptr {
rtype = rtype.Elem()
}
return &ControlManager{
control: reflect.New(rtype),
context: context,
controlmeta: cm,
action: action,
}
}
type ControlManager struct {
control reflect.Value
context *web.Context
controlmeta *Meta
action string
prepared bool
executed bool
state int
vw mvc.View
}
func (c *ControlManager) SetControllerMeta(cm *Meta) (ok bool) {
if !c.prepared {
rtype := cm.RType()
if rtype.Kind() == reflect.Ptr {
rtype = rtype.Elem()
}
c.controlmeta = cm
c.control = reflect.New(rtype)
ok = true
}
return
}
func (c *ControlManager) SetAction(action string) (ok bool) {
if !c.prepared {
c.action = action
ok = true
}
return
}
func (c *ControlManager) SetState(state int) {
c.state = state
}
func (c *ControlManager) State() int {
return c.state
}
func (c *ControlManager) View() mvc.View {
return c.vw
}
func (c *ControlManager) IsPrepared() bool {
return c.prepared
}
func (c *ControlManager) IsExecuted() bool {
return c.executed
}
func (c *ControlManager) MvcMeta() mvc.Meta {
if c.controlmeta != nil {
return mvc.Meta{c.controlmeta.Module(), c.controlmeta.Name(), c.action, c.context.Ext}
}
return mvc.Meta{}
}
func (c *ControlManager) ModuleName() string {
if c.controlmeta != nil {
return c.controlmeta.Module()
}
return ""
}
func (c *ControlManager) ControllerName() string {
if c.controlmeta != nil {
return c.controlmeta.Name()
}
return ""
}
func (c *ControlManager) ActionName() string {
return c.action
}
func (c *ControlManager) Controller() reflect.Value {
return c.control
}
func (c *ControlManager) ActionMeta() *ActionMeta {
return c.controlmeta.ActionFromRequest(c.MvcMeta()[mvc.MVCAction], c.context)
}
func (c *ControlManager) AvailableMethods() ReqMethod {
return c.controlmeta.ActionAvailableMethods(c.action)
}
func (c *ControlManager) AvailableMethodsList() []string {
return c.controlmeta.ActionAvailableMethodsList(c.action)
}
func (c *ControlManager) ControllerMeta() *Meta {
return c.controlmeta
}
func (c *ControlManager) Context() *web.Context {
return c.context
}
func (c *ControlManager) PrepareAndExecute() (state int, vw mvc.View) {
if c.Prepare() == nil {
return c.Execute()
}
return c.state, nil
}
func (c *ControlManager) Prepare() error {
if !c.prepared && (c.state == 0 || (c.state >= 200 && c.state < 300)) {
if c.controlmeta == nil {
c.state = 404
return ErrControllerNotFound
}
switch c.controlmeta.T() {
case ContypeConstructor:
results := c.control.Call(getArgSlice(c.controlmeta.Args(),
getVMap(c.context),
c.context.PData))
c.control = results[0]
if c.control.Kind() == reflect.Interface {
c.control = c.control.Elem()
}
// after Elem from Interface, it might be a pointer to a struct too
if c.control.Kind() == reflect.Ptr {
c.control = c.control.Elem()
}
if c.controlmeta.Status() {
state := int(results[1].Int())
if state <= 0 {
state = http.StatusOK
}
c.state = state
}
case ContypeStruct, ContypeScontroller:
fields := c.controlmeta.Fields()
tmpcontrol := reflect.Indirect(c.control)
for _, field := range fields {
value := getDataValue(&field.DataMeta,
getVMap(c.context),
c.context.PData)
// allows middleware resources to make changes to value based on tag
// this can be useful to csrf where non csrf verified values are filtered
if res := field.Tag().Get(StructValueFeedTag); res != "" {
var reses []string
if strings.Contains(res, ",") | else {
reses = []string{res}
}
for _, r := range reses {
rinterface := c.context.Resource(strings.TrimSpace(r))
if rinterface != nil {
if parser, ok := rinterface.(StructValueFeeder); ok {
var err error
value, err = parser.FeedStructValue(c.context, field, value)
if err != nil {
c.state = 500
log.Println(err)
return ErrParseStruct
}
}
} else {
log.Println("Resource to parse struct var not found: ", r)
c.state = 500
return ErrParseStruct
}
}
} else if res := field.Tag().Get(StructValueResTag); res != "" {
rinterface := c.context.Resource(strings.TrimSpace(res))
if rinterface != nil {
value = reflect.ValueOf(rinterface)
}
}
if value.IsValid() {
tmpcontrol.FieldByName(field.Name()).Set(value)
}
}
if c.state != 500 && c.controlmeta.T() == ContypeScontroller {
ctrler := c.control.Interface().(mvc.Controller)
ctrler.Construct_(c.context)
}
}
if c.context.HasErrorCode() {
c.state = c.context.ErrorCode()
}
c.prepared = true
}
return nil
}
func (c *ControlManager) Execute() (state int, vw mvc.View) {
if c.prepared {
if c.state == 0 {
c.state = 200
}
var results []reflect.Value
var rstyle = c.controlmeta.ResultStyle
if c.state >= http.StatusOK && c.state < http.StatusMultipleChoices {
switch c.controlmeta.T() {
case ContypeFunc:
results = c.control.Call(getArgSlice(c.controlmeta.Args(),
getVMap(c.context),
c.context.PData))
default:
actmeta := c.ActionMeta()
if actmeta != nil {
meth := c.control.MethodByName(actmeta.Name())
results = meth.Call(getArgSlice(actmeta.Args(),
getVMap(c.context),
c.context.PData))
rstyle = actmeta.ResultStyle
} else {
c.state = 404
state = c.state
return
}
}
}
if rstyle.Status() {
c.state = int(results[1].Int())
}
if rstyle.View() || rstyle.Vmap() || rstyle.MapSI() {
// for a consistent error page, error should be returned instead and allow sunny server itself
// to render the error page
state = c.state
if state == 200 || state == 0 {
if rstyle.View() {
if !results[0].IsNil() && results[0].IsValid() {
c.vw = (results[0].Interface()).(mvc.View)
}
} else {
var vmap mvc.VM
if results[0].IsNil() || !results[0].IsValid() {
vmap = mvc.VM{}
} else if rstyle.Vmap() {
vmap = results[0].Interface().(mvc.VM)
} else {
vmap = mvc.VM(results[0].Interface().(map[string]interface{}))
}
c.vw = view.NewResultView(vmap)
}
vw = c.vw
if vw == nil {
state = -1
}
}
} else {
// if state returned is -1, it means the controller has handled the response
state = -1
}
c.executed = true
} else {
state = c.state
}
return
}
func (c *ControlManager) PublishView() (err error) {
if !c.prepared {
err = ErrUnprepared
} else if !c.executed {
err = ErrUnexecuted
} else if c.vw != nil {
if c.context.Request.Method == "HEAD" {
c.context.Response.Header().Set("Content-Type", c.vw.ContentType(c.context))
} else {
err = c.vw.Publish(c.context)
}
}
return
}
func (c *ControlManager) Cleanup() {
if c.prepared && c.controlmeta.T() == ContypeScontroller {
ctrler := c.control.Interface().(mvc.Controller)
ctrler.Destruct_()
}
}
func getVMap(context *web.Context) map[string]reflect.Value {
return map[string]reflect.Value{
"context": reflect.ValueOf(context),
"w": reflect.ValueOf(context.Response),
"r": reflect.ValueOf(context.Request),
"upath": reflect.ValueOf(context.UPath),
"pdata": reflect.ValueOf(context.PData),
"upath_slice": reflect.ValueOf([]string(context.UPath)),
"pdata_map": reflect.ValueOf(map[string]string(context.PData)),
}
}
func getArgSlice(args []*ArgMeta, vmap map[string]reflect.Value, d web.PData) (values []reflect.Value) {
values = make([]reflect.Value, len(args))
for i, arg := range args {
values[i] = getDataValue(&arg.DataMeta, vmap, d)
}
return
}
func getDataValue(arg *DataMeta, vmap map[string]reflect.Value, d web.PData) (value reflect.Value) {
switch arg.T() {
case DatatypeWebContext:
value = vmap["context"]
case DatatypeRequest:
value = vmap["r"]
case DatatypeResponseWriter:
value = vmap["w"]
case DatatypeUpath:
value = vmap["upath"]
case DatatypeUpathSlice:
value = vmap["upath_slice"]
case DatatypePdata:
value = vmap["pdata"]
case DatatypePdataMap:
value = vmap["pdata_map"]
case DatatypeString:
val, _ := d.String(arg.LName())
value = reflect.ValueOf(val)
case DatatypeInt:
val, _ := d.Int(arg.LName())
value = reflect.ValueOf(val)
case DatatypeInt64:
val, _ := d.Int64(arg.LName())
value = reflect.ValueOf(val)
case DatatypeFloat:
val, _ := d.Float32(arg.LName())
value = reflect.ValueOf(val)
case DatatypeFloat64:
val, _ := d.Float64(arg.LName())
value = reflect.ValueOf(val)
case DatatypeEmail:
val, _ := d.Email(arg.LName())
value = reflect.ValueOf(val)
case DatatypeURL:
val, _ := d.Url(arg.LName())
value = reflect.ValueOf(val)
case DatatypeDate:
val, _ := d.Date(arg.LName())
value = reflect.ValueOf(val)
case DatatypeTime:
val, _ := d.Time(arg.LName())
value = reflect.ValueOf(val)
case DatatypeDateTime:
val, _ := d.DateTime(arg.LName())
value = reflect.ValueOf(val)
case DatatypeStruct, DatatypeEmbedded:
fields := arg.Fields()
model := reflect.New(arg.RType())
modelval := model.Elem()
for _, field := range fields {
modelval.FieldByName(field.Name()).Set(getDataValue(&field.DataMeta, vmap, d))
}
if arg.RType().Kind() == reflect.Ptr {
value = model
} else {
value = modelval
}
}
return
}
| {
reses = strings.Split(res, ",")
} | conditional_block |
controller.go | package controller
import (
"errors"
"log"
"net/http"
"reflect"
"strings"
"github.com/zaolab/sunnified/mvc"
"github.com/zaolab/sunnified/mvc/view"
"github.com/zaolab/sunnified/web"
)
var ErrControllerNotFound = errors.New("controller not found")
var ErrUnprepared = errors.New("controller has not been prep'ed")
var ErrUnexecuted = errors.New("controller has not been executed")
var ErrParseStruct = errors.New("Sunnified Parser error")
const StructValueFeedTag = "sunnified.feed"
const StructValueResTag = "sunnified.res"
type StructValueFeeder interface {
FeedStructValue(*web.Context, *FieldMeta, reflect.Value) (reflect.Value, error)
}
type ControlHandler interface {
GetControlManager(*web.Context) *ControlManager
}
func NewControlManager(context *web.Context, cm *Meta, action string) *ControlManager {
rtype := cm.RType()
if rtype.Kind() == reflect.Ptr {
rtype = rtype.Elem()
}
return &ControlManager{
control: reflect.New(rtype),
context: context,
controlmeta: cm,
action: action,
}
}
type ControlManager struct {
control reflect.Value
context *web.Context
controlmeta *Meta
action string
prepared bool
executed bool
state int
vw mvc.View
}
func (c *ControlManager) SetControllerMeta(cm *Meta) (ok bool) {
if !c.prepared {
rtype := cm.RType()
if rtype.Kind() == reflect.Ptr {
rtype = rtype.Elem()
}
c.controlmeta = cm
c.control = reflect.New(rtype)
ok = true
}
return
}
func (c *ControlManager) SetAction(action string) (ok bool) {
if !c.prepared {
c.action = action
ok = true
}
return
}
func (c *ControlManager) SetState(state int) {
c.state = state
}
func (c *ControlManager) State() int {
return c.state
}
func (c *ControlManager) View() mvc.View {
return c.vw
}
func (c *ControlManager) IsPrepared() bool {
return c.prepared
}
func (c *ControlManager) IsExecuted() bool {
return c.executed
}
func (c *ControlManager) MvcMeta() mvc.Meta {
if c.controlmeta != nil {
return mvc.Meta{c.controlmeta.Module(), c.controlmeta.Name(), c.action, c.context.Ext}
}
return mvc.Meta{}
}
func (c *ControlManager) ModuleName() string {
if c.controlmeta != nil {
return c.controlmeta.Module()
}
return ""
}
func (c *ControlManager) ControllerName() string {
if c.controlmeta != nil {
return c.controlmeta.Name()
}
return ""
}
func (c *ControlManager) ActionName() string {
return c.action
}
func (c *ControlManager) Controller() reflect.Value {
return c.control
}
func (c *ControlManager) ActionMeta() *ActionMeta {
return c.controlmeta.ActionFromRequest(c.MvcMeta()[mvc.MVCAction], c.context)
}
func (c *ControlManager) AvailableMethods() ReqMethod {
return c.controlmeta.ActionAvailableMethods(c.action)
}
func (c *ControlManager) AvailableMethodsList() []string {
return c.controlmeta.ActionAvailableMethodsList(c.action)
}
func (c *ControlManager) ControllerMeta() *Meta {
return c.controlmeta
}
func (c *ControlManager) Context() *web.Context {
return c.context
}
func (c *ControlManager) PrepareAndExecute() (state int, vw mvc.View) {
if c.Prepare() == nil {
return c.Execute()
}
return c.state, nil
}
func (c *ControlManager) Prepare() error {
if !c.prepared && (c.state == 0 || (c.state >= 200 && c.state < 300)) {
if c.controlmeta == nil {
c.state = 404
return ErrControllerNotFound
}
switch c.controlmeta.T() {
case ContypeConstructor:
results := c.control.Call(getArgSlice(c.controlmeta.Args(),
getVMap(c.context),
c.context.PData))
c.control = results[0]
if c.control.Kind() == reflect.Interface {
c.control = c.control.Elem()
}
// after Elem from Interface, it might be a pointer to a struct too
if c.control.Kind() == reflect.Ptr {
c.control = c.control.Elem()
}
if c.controlmeta.Status() {
state := int(results[1].Int())
if state <= 0 {
state = http.StatusOK
}
c.state = state
}
case ContypeStruct, ContypeScontroller:
fields := c.controlmeta.Fields()
tmpcontrol := reflect.Indirect(c.control)
for _, field := range fields {
value := getDataValue(&field.DataMeta,
getVMap(c.context),
c.context.PData)
// allows middleware resources to make changes to value based on tag
// this can be useful to csrf where non csrf verified values are filtered
if res := field.Tag().Get(StructValueFeedTag); res != "" {
var reses []string
if strings.Contains(res, ",") {
reses = strings.Split(res, ",")
} else {
reses = []string{res}
}
for _, r := range reses {
rinterface := c.context.Resource(strings.TrimSpace(r))
if rinterface != nil {
if parser, ok := rinterface.(StructValueFeeder); ok {
var err error
value, err = parser.FeedStructValue(c.context, field, value)
if err != nil {
c.state = 500
log.Println(err)
return ErrParseStruct
}
}
} else {
log.Println("Resource to parse struct var not found: ", r)
c.state = 500
return ErrParseStruct
}
}
} else if res := field.Tag().Get(StructValueResTag); res != "" {
rinterface := c.context.Resource(strings.TrimSpace(res))
if rinterface != nil {
value = reflect.ValueOf(rinterface)
}
}
if value.IsValid() {
tmpcontrol.FieldByName(field.Name()).Set(value)
}
}
if c.state != 500 && c.controlmeta.T() == ContypeScontroller {
ctrler := c.control.Interface().(mvc.Controller)
ctrler.Construct_(c.context)
}
}
if c.context.HasErrorCode() {
c.state = c.context.ErrorCode()
}
c.prepared = true
}
return nil
}
func (c *ControlManager) Execute() (state int, vw mvc.View) {
if c.prepared {
if c.state == 0 {
c.state = 200
}
var results []reflect.Value
var rstyle = c.controlmeta.ResultStyle
if c.state >= http.StatusOK && c.state < http.StatusMultipleChoices {
switch c.controlmeta.T() {
case ContypeFunc:
results = c.control.Call(getArgSlice(c.controlmeta.Args(),
getVMap(c.context),
c.context.PData))
default:
actmeta := c.ActionMeta()
if actmeta != nil {
meth := c.control.MethodByName(actmeta.Name())
results = meth.Call(getArgSlice(actmeta.Args(),
getVMap(c.context),
c.context.PData))
rstyle = actmeta.ResultStyle
} else {
c.state = 404
state = c.state
return
}
}
}
if rstyle.Status() {
c.state = int(results[1].Int())
}
if rstyle.View() || rstyle.Vmap() || rstyle.MapSI() {
// for a consistent error page, error should be returned instead and allow sunny server itself
// to render the error page
state = c.state
if state == 200 || state == 0 {
if rstyle.View() {
if !results[0].IsNil() && results[0].IsValid() {
c.vw = (results[0].Interface()).(mvc.View)
}
} else {
var vmap mvc.VM
if results[0].IsNil() || !results[0].IsValid() {
vmap = mvc.VM{}
} else if rstyle.Vmap() {
vmap = results[0].Interface().(mvc.VM)
} else {
vmap = mvc.VM(results[0].Interface().(map[string]interface{}))
}
c.vw = view.NewResultView(vmap)
}
vw = c.vw
if vw == nil {
state = -1
}
}
} else {
// if state returned is -1, it means the controller has handled the response
state = -1
}
c.executed = true
} else {
state = c.state
}
return
}
func (c *ControlManager) PublishView() (err error) {
if !c.prepared {
err = ErrUnprepared
} else if !c.executed {
err = ErrUnexecuted
} else if c.vw != nil {
if c.context.Request.Method == "HEAD" {
c.context.Response.Header().Set("Content-Type", c.vw.ContentType(c.context))
} else {
err = c.vw.Publish(c.context)
}
}
return
}
func (c *ControlManager) Cleanup() |
func getVMap(context *web.Context) map[string]reflect.Value {
return map[string]reflect.Value{
"context": reflect.ValueOf(context),
"w": reflect.ValueOf(context.Response),
"r": reflect.ValueOf(context.Request),
"upath": reflect.ValueOf(context.UPath),
"pdata": reflect.ValueOf(context.PData),
"upath_slice": reflect.ValueOf([]string(context.UPath)),
"pdata_map": reflect.ValueOf(map[string]string(context.PData)),
}
}
func getArgSlice(args []*ArgMeta, vmap map[string]reflect.Value, d web.PData) (values []reflect.Value) {
values = make([]reflect.Value, len(args))
for i, arg := range args {
values[i] = getDataValue(&arg.DataMeta, vmap, d)
}
return
}
func getDataValue(arg *DataMeta, vmap map[string]reflect.Value, d web.PData) (value reflect.Value) {
switch arg.T() {
case DatatypeWebContext:
value = vmap["context"]
case DatatypeRequest:
value = vmap["r"]
case DatatypeResponseWriter:
value = vmap["w"]
case DatatypeUpath:
value = vmap["upath"]
case DatatypeUpathSlice:
value = vmap["upath_slice"]
case DatatypePdata:
value = vmap["pdata"]
case DatatypePdataMap:
value = vmap["pdata_map"]
case DatatypeString:
val, _ := d.String(arg.LName())
value = reflect.ValueOf(val)
case DatatypeInt:
val, _ := d.Int(arg.LName())
value = reflect.ValueOf(val)
case DatatypeInt64:
val, _ := d.Int64(arg.LName())
value = reflect.ValueOf(val)
case DatatypeFloat:
val, _ := d.Float32(arg.LName())
value = reflect.ValueOf(val)
case DatatypeFloat64:
val, _ := d.Float64(arg.LName())
value = reflect.ValueOf(val)
case DatatypeEmail:
val, _ := d.Email(arg.LName())
value = reflect.ValueOf(val)
case DatatypeURL:
val, _ := d.Url(arg.LName())
value = reflect.ValueOf(val)
case DatatypeDate:
val, _ := d.Date(arg.LName())
value = reflect.ValueOf(val)
case DatatypeTime:
val, _ := d.Time(arg.LName())
value = reflect.ValueOf(val)
case DatatypeDateTime:
val, _ := d.DateTime(arg.LName())
value = reflect.ValueOf(val)
case DatatypeStruct, DatatypeEmbedded:
fields := arg.Fields()
model := reflect.New(arg.RType())
modelval := model.Elem()
for _, field := range fields {
modelval.FieldByName(field.Name()).Set(getDataValue(&field.DataMeta, vmap, d))
}
if arg.RType().Kind() == reflect.Ptr {
value = model
} else {
value = modelval
}
}
return
}
| {
if c.prepared && c.controlmeta.T() == ContypeScontroller {
ctrler := c.control.Interface().(mvc.Controller)
ctrler.Destruct_()
}
} | identifier_body |
inifile.py | import configparser
import difflib
import logging
import os
from pathlib import Path
import pytoml as toml
from .validate import validate_config
from .vendorized.readme.rst import render
import io
log = logging.getLogger(__name__)
class ConfigError(ValueError):
pass
metadata_list_fields = {
'classifiers',
'requires',
'dev-requires'
}
metadata_allowed_fields = {
'module',
'author',
'author-email',
'maintainer',
'maintainer-email',
'home-page',
'license',
'keywords',
'requires-python',
'dist-name',
'entry-points-file',
'description-file',
'requires-extra',
} | metadata_list_fields
metadata_required_fields = {
'module',
'author',
'author-email',
}
def read_pkg_ini(path: Path):
"""Read and check the `pyproject.toml` or `flit.ini` file with data about the package.
"""
if path.suffix == '.toml':
with path.open() as f:
d = toml.load(f)
res = prep_toml_config(d, path)
else:
# Treat all other extensions as the older flit.ini format
cp = _read_pkg_ini(path)
res = _validate_config(cp, path)
if validate_config(res):
if os.environ.get('FLIT_ALLOW_INVALID'):
log.warning("Allowing invalid data (FLIT_ALLOW_INVALID set). Uploads may still fail.")
else:
raise ConfigError("Invalid config values (see log)")
return res
class EntryPointsConflict(ConfigError):
def __str__(self):
return ('Please specify console_scripts entry points, or [scripts] in '
'flit config, not both.')
def prep_toml_config(d, path):
"""Validate config loaded from pyproject.toml and prepare common metadata
Returns a dictionary with keys: module, metadata, scripts, entrypoints,
raw_config.
"""
if ('tool' not in d) or ('flit' not in d['tool']) \
or (not isinstance(d['tool']['flit'], dict)):
raise ConfigError("TOML file missing [tool.flit] table.")
d = d['tool']['flit']
unknown_sections = set(d) - {'metadata', 'scripts', 'entrypoints'}
unknown_sections = [s for s in unknown_sections if not s.lower().startswith('x-')]
if unknown_sections:
raise ConfigError('Unknown sections: ' + ', '.join(unknown_sections))
if 'metadata' not in d:
raise ConfigError('[tool.flit.metadata] section is required')
md_dict, module, reqs_by_extra = _prep_metadata(d['metadata'], path)
if 'scripts' in d:
scripts_dict = dict(d['scripts'])
else:
scripts_dict = {}
if 'entrypoints' in d:
entrypoints = flatten_entrypoints(d['entrypoints'])
else:
entrypoints = {}
_add_scripts_to_entrypoints(entrypoints, scripts_dict)
return {
'module': module,
'metadata': md_dict,
'reqs_by_extra': reqs_by_extra,
'scripts': scripts_dict,
'entrypoints': entrypoints,
'raw_config': d,
}
def flatten_entrypoints(ep):
"""Flatten nested entrypoints dicts.
Entry points group names can include dots. But dots in TOML make nested
dictionaries:
[entrypoints.a.b] # {'entrypoints': {'a': {'b': {}}}}
The proper way to avoid this is:
[entrypoints."a.b"] # {'entrypoints': {'a.b': {}}}
But since there isn't a need for arbitrarily nested mappings in entrypoints,
flit allows you to use the former. This flattens the nested dictionaries
from loading pyproject.toml.
"""
def _flatten(d, prefix):
d1 = {}
for k, v in d.items():
if isinstance(v, dict):
yield from _flatten(v, prefix+'.'+k)
else:
d1[k] = v
if d1:
yield prefix, d1
res = {}
for k, v in ep.items():
res.update(_flatten(v, k))
return res
def _add_scripts_to_entrypoints(entrypoints, scripts_dict):
if scripts_dict:
if 'console_scripts' in entrypoints:
raise EntryPointsConflict
else:
entrypoints['console_scripts'] = scripts_dict
def _read_pkg_ini(path):
"""Reads old-style flit.ini
"""
cp = configparser.ConfigParser()
with path.open(encoding='utf-8') as f:
cp.read_file(f)
return cp
readme_ext_to_content_type = {
'.rst': 'text/x-rst',
'.md': 'text/markdown',
'.txt': 'text/plain',
}
def _prep_metadata(md_sect, path):
"""Process & verify the metadata from a config file
- Pull out the module name we're packaging.
- Read description-file and check that it's valid rst
- Convert dashes in key names to underscores
(e.g. home-page in config -> home_page in metadata)
"""
if not set(md_sect).issuperset(metadata_required_fields):
missing = metadata_required_fields - set(md_sect)
raise ConfigError("Required fields missing: " + '\n'.join(missing))
module = md_sect.get('module')
if not module.isidentifier():
raise ConfigError("Module name %r is not a valid identifier" % module)
md_dict = {}
# Description file
if 'description-file' in md_sect: | raise ConfigError(
"Description file {} does not exist".format(description_file)
)
ext = description_file.suffix
try:
mimetype = readme_ext_to_content_type[ext]
except KeyError:
log.warning("Unknown extension %r for description file.", ext)
log.warning(" Recognised extensions: %s",
" ".join(readme_ext_to_content_type))
mimetype = None
if mimetype == 'text/x-rst':
# rst check
stream = io.StringIO()
res = render(raw_desc, stream)
if not res:
log.warning("The file description seems not to be valid rst for PyPI;"
" it will be interpreted as plain text")
log.warning(stream.getvalue())
md_dict['description'] = raw_desc
md_dict['description_content_type'] = mimetype
if 'urls' in md_sect:
project_urls = md_dict['project_urls'] = []
for label, url in sorted(md_sect.pop('urls').items()):
project_urls.append("{}, {}".format(label, url))
for key, value in md_sect.items():
if key in {'description-file', 'module'}:
continue
if key not in metadata_allowed_fields:
closest = difflib.get_close_matches(key, metadata_allowed_fields,
n=1, cutoff=0.7)
msg = "Unrecognised metadata key: {!r}".format(key)
if closest:
msg += " (did you mean {!r}?)".format(closest[0])
raise ConfigError(msg)
k2 = key.replace('-', '_')
md_dict[k2] = value
if key in metadata_list_fields:
if not isinstance(value, list):
raise ConfigError('Expected a list for {} field, found {!r}'
.format(key, value))
if not all(isinstance(a, str) for a in value):
raise ConfigError('Expected a list of strings for {} field'
.format(key))
elif key == 'requires-extra':
if not isinstance(value, dict):
raise ConfigError('Expected a dict for requires-extra field, found {!r}'
.format(value))
if not all(isinstance(e, list) for e in value.values()):
raise ConfigError('Expected a dict of lists for requires-extra field')
for e, reqs in value.items():
if not all(isinstance(a, str) for a in reqs):
raise ConfigError('Expected a string list for requires-extra. (extra {})'
.format(e))
else:
if not isinstance(value, str):
raise ConfigError('Expected a string for {} field, found {!r}'
.format(key, value))
# What we call requires in the ini file is technically requires_dist in
# the metadata.
if 'requires' in md_dict:
md_dict['requires_dist'] = md_dict.pop('requires')
# And what we call dist-name is name in the metadata
if 'dist_name' in md_dict:
md_dict['name'] = md_dict.pop('dist_name')
# Move dev-requires into requires-extra
reqs_noextra = md_dict.pop('requires_dist', [])
reqs_by_extra = md_dict.pop('requires_extra', {})
dev_requires = md_dict.pop('dev_requires', None)
if dev_requires is not None:
if 'dev' in reqs_by_extra:
raise ConfigError(
'dev-requires occurs together with its replacement requires-extra.dev.')
else:
log.warning(
'“dev-requires = ...” is obsolete. Use “requires-extra = {"dev" = ...}” instead.')
reqs_by_extra['dev'] = dev_requires
# Add requires-extra requirements into requires_dist
md_dict['requires_dist'] = \
reqs_noextra + list(_expand_requires_extra(reqs_by_extra))
md_dict['provides_extra'] = sorted(reqs_by_extra.keys())
# For internal use, record the main requirements as a '.none' extra.
reqs_by_extra['.none'] = reqs_noextra
return md_dict, module, reqs_by_extra
def _expand_requires_extra(re):
for extra, reqs in sorted(re.items()):
for req in reqs:
if ';' in req:
name, envmark = req.split(';', 1)
yield '{}; extra == "{}" and ({})'.format(name, extra, envmark)
else:
yield '{}; extra == "{}"'.format(req, extra)
def _validate_config(cp, path):
"""Validate and process config loaded from a flit.ini file.
Returns a dict with keys: module, metadata, scripts, entrypoints, raw_config
"""
unknown_sections = set(cp.sections()) - {'metadata', 'scripts'}
unknown_sections = [s for s in unknown_sections if not s.lower().startswith('x-')]
if unknown_sections:
raise ConfigError('Unknown sections: ' + ', '.join(unknown_sections))
if not cp.has_section('metadata'):
raise ConfigError('[metadata] section is required')
md_sect = {}
for k, v in cp['metadata'].items():
if k in metadata_list_fields:
md_sect[k] = [l for l in v.splitlines() if l.strip()]
else:
md_sect[k] = v
if 'entry-points-file' in md_sect:
entry_points_file = path.parent / md_sect.pop('entry-points-file')
if not entry_points_file.is_file():
raise FileNotFoundError(entry_points_file)
else:
entry_points_file = path.parent / 'entry_points.txt'
if not entry_points_file.is_file():
entry_points_file = None
if entry_points_file:
ep_cp = configparser.ConfigParser()
with entry_points_file.open() as f:
ep_cp.read_file(f)
# Convert to regular dict
entrypoints = {k: dict(v) for k,v in ep_cp.items()}
else:
entrypoints = {}
md_dict, module, reqs_by_extra = _prep_metadata(md_sect, path)
# Scripts ---------------
if cp.has_section('scripts'):
scripts_dict = dict(cp['scripts'])
else:
scripts_dict = {}
_add_scripts_to_entrypoints(entrypoints, scripts_dict)
return {
'module': module,
'metadata': md_dict,
'reqs_by_extra': reqs_by_extra,
'scripts': scripts_dict,
'entrypoints': entrypoints,
'raw_config': cp,
} | description_file = path.parent / md_sect.get('description-file')
try:
with description_file.open(encoding='utf-8') as f:
raw_desc = f.read()
except FileNotFoundError: | random_line_split |
inifile.py | import configparser
import difflib
import logging
import os
from pathlib import Path
import pytoml as toml
from .validate import validate_config
from .vendorized.readme.rst import render
import io
log = logging.getLogger(__name__)
class ConfigError(ValueError):
pass
metadata_list_fields = {
'classifiers',
'requires',
'dev-requires'
}
metadata_allowed_fields = {
'module',
'author',
'author-email',
'maintainer',
'maintainer-email',
'home-page',
'license',
'keywords',
'requires-python',
'dist-name',
'entry-points-file',
'description-file',
'requires-extra',
} | metadata_list_fields
metadata_required_fields = {
'module',
'author',
'author-email',
}
def read_pkg_ini(path: Path):
"""Read and check the `pyproject.toml` or `flit.ini` file with data about the package.
"""
if path.suffix == '.toml':
with path.open() as f:
d = toml.load(f)
res = prep_toml_config(d, path)
else:
# Treat all other extensions as the older flit.ini format
cp = _read_pkg_ini(path)
res = _validate_config(cp, path)
if validate_config(res):
if os.environ.get('FLIT_ALLOW_INVALID'):
log.warning("Allowing invalid data (FLIT_ALLOW_INVALID set). Uploads may still fail.")
else:
raise ConfigError("Invalid config values (see log)")
return res
class EntryPointsConflict(ConfigError):
def __str__(self):
return ('Please specify console_scripts entry points, or [scripts] in '
'flit config, not both.')
def prep_toml_config(d, path):
"""Validate config loaded from pyproject.toml and prepare common metadata
Returns a dictionary with keys: module, metadata, scripts, entrypoints,
raw_config.
"""
if ('tool' not in d) or ('flit' not in d['tool']) \
or (not isinstance(d['tool']['flit'], dict)):
raise ConfigError("TOML file missing [tool.flit] table.")
d = d['tool']['flit']
unknown_sections = set(d) - {'metadata', 'scripts', 'entrypoints'}
unknown_sections = [s for s in unknown_sections if not s.lower().startswith('x-')]
if unknown_sections:
raise ConfigError('Unknown sections: ' + ', '.join(unknown_sections))
if 'metadata' not in d:
raise ConfigError('[tool.flit.metadata] section is required')
md_dict, module, reqs_by_extra = _prep_metadata(d['metadata'], path)
if 'scripts' in d:
scripts_dict = dict(d['scripts'])
else:
scripts_dict = {}
if 'entrypoints' in d:
entrypoints = flatten_entrypoints(d['entrypoints'])
else:
entrypoints = {}
_add_scripts_to_entrypoints(entrypoints, scripts_dict)
return {
'module': module,
'metadata': md_dict,
'reqs_by_extra': reqs_by_extra,
'scripts': scripts_dict,
'entrypoints': entrypoints,
'raw_config': d,
}
def flatten_entrypoints(ep):
"""Flatten nested entrypoints dicts.
Entry points group names can include dots. But dots in TOML make nested
dictionaries:
[entrypoints.a.b] # {'entrypoints': {'a': {'b': {}}}}
The proper way to avoid this is:
[entrypoints."a.b"] # {'entrypoints': {'a.b': {}}}
But since there isn't a need for arbitrarily nested mappings in entrypoints,
flit allows you to use the former. This flattens the nested dictionaries
from loading pyproject.toml.
"""
def _flatten(d, prefix):
d1 = {}
for k, v in d.items():
if isinstance(v, dict):
yield from _flatten(v, prefix+'.'+k)
else:
d1[k] = v
if d1:
yield prefix, d1
res = {}
for k, v in ep.items():
res.update(_flatten(v, k))
return res
def _add_scripts_to_entrypoints(entrypoints, scripts_dict):
if scripts_dict:
if 'console_scripts' in entrypoints:
|
else:
entrypoints['console_scripts'] = scripts_dict
def _read_pkg_ini(path):
"""Reads old-style flit.ini
"""
cp = configparser.ConfigParser()
with path.open(encoding='utf-8') as f:
cp.read_file(f)
return cp
readme_ext_to_content_type = {
'.rst': 'text/x-rst',
'.md': 'text/markdown',
'.txt': 'text/plain',
}
def _prep_metadata(md_sect, path):
"""Process & verify the metadata from a config file
- Pull out the module name we're packaging.
- Read description-file and check that it's valid rst
- Convert dashes in key names to underscores
(e.g. home-page in config -> home_page in metadata)
"""
if not set(md_sect).issuperset(metadata_required_fields):
missing = metadata_required_fields - set(md_sect)
raise ConfigError("Required fields missing: " + '\n'.join(missing))
module = md_sect.get('module')
if not module.isidentifier():
raise ConfigError("Module name %r is not a valid identifier" % module)
md_dict = {}
# Description file
if 'description-file' in md_sect:
description_file = path.parent / md_sect.get('description-file')
try:
with description_file.open(encoding='utf-8') as f:
raw_desc = f.read()
except FileNotFoundError:
raise ConfigError(
"Description file {} does not exist".format(description_file)
)
ext = description_file.suffix
try:
mimetype = readme_ext_to_content_type[ext]
except KeyError:
log.warning("Unknown extension %r for description file.", ext)
log.warning(" Recognised extensions: %s",
" ".join(readme_ext_to_content_type))
mimetype = None
if mimetype == 'text/x-rst':
# rst check
stream = io.StringIO()
res = render(raw_desc, stream)
if not res:
log.warning("The file description seems not to be valid rst for PyPI;"
" it will be interpreted as plain text")
log.warning(stream.getvalue())
md_dict['description'] = raw_desc
md_dict['description_content_type'] = mimetype
if 'urls' in md_sect:
project_urls = md_dict['project_urls'] = []
for label, url in sorted(md_sect.pop('urls').items()):
project_urls.append("{}, {}".format(label, url))
for key, value in md_sect.items():
if key in {'description-file', 'module'}:
continue
if key not in metadata_allowed_fields:
closest = difflib.get_close_matches(key, metadata_allowed_fields,
n=1, cutoff=0.7)
msg = "Unrecognised metadata key: {!r}".format(key)
if closest:
msg += " (did you mean {!r}?)".format(closest[0])
raise ConfigError(msg)
k2 = key.replace('-', '_')
md_dict[k2] = value
if key in metadata_list_fields:
if not isinstance(value, list):
raise ConfigError('Expected a list for {} field, found {!r}'
.format(key, value))
if not all(isinstance(a, str) for a in value):
raise ConfigError('Expected a list of strings for {} field'
.format(key))
elif key == 'requires-extra':
if not isinstance(value, dict):
raise ConfigError('Expected a dict for requires-extra field, found {!r}'
.format(value))
if not all(isinstance(e, list) for e in value.values()):
raise ConfigError('Expected a dict of lists for requires-extra field')
for e, reqs in value.items():
if not all(isinstance(a, str) for a in reqs):
raise ConfigError('Expected a string list for requires-extra. (extra {})'
.format(e))
else:
if not isinstance(value, str):
raise ConfigError('Expected a string for {} field, found {!r}'
.format(key, value))
# What we call requires in the ini file is technically requires_dist in
# the metadata.
if 'requires' in md_dict:
md_dict['requires_dist'] = md_dict.pop('requires')
# And what we call dist-name is name in the metadata
if 'dist_name' in md_dict:
md_dict['name'] = md_dict.pop('dist_name')
# Move dev-requires into requires-extra
reqs_noextra = md_dict.pop('requires_dist', [])
reqs_by_extra = md_dict.pop('requires_extra', {})
dev_requires = md_dict.pop('dev_requires', None)
if dev_requires is not None:
if 'dev' in reqs_by_extra:
raise ConfigError(
'dev-requires occurs together with its replacement requires-extra.dev.')
else:
log.warning(
'“dev-requires = ...” is obsolete. Use “requires-extra = {"dev" = ...}” instead.')
reqs_by_extra['dev'] = dev_requires
# Add requires-extra requirements into requires_dist
md_dict['requires_dist'] = \
reqs_noextra + list(_expand_requires_extra(reqs_by_extra))
md_dict['provides_extra'] = sorted(reqs_by_extra.keys())
# For internal use, record the main requirements as a '.none' extra.
reqs_by_extra['.none'] = reqs_noextra
return md_dict, module, reqs_by_extra
def _expand_requires_extra(re):
for extra, reqs in sorted(re.items()):
for req in reqs:
if ';' in req:
name, envmark = req.split(';', 1)
yield '{}; extra == "{}" and ({})'.format(name, extra, envmark)
else:
yield '{}; extra == "{}"'.format(req, extra)
def _validate_config(cp, path):
"""Validate and process config loaded from a flit.ini file.
Returns a dict with keys: module, metadata, scripts, entrypoints, raw_config
"""
unknown_sections = set(cp.sections()) - {'metadata', 'scripts'}
unknown_sections = [s for s in unknown_sections if not s.lower().startswith('x-')]
if unknown_sections:
raise ConfigError('Unknown sections: ' + ', '.join(unknown_sections))
if not cp.has_section('metadata'):
raise ConfigError('[metadata] section is required')
md_sect = {}
for k, v in cp['metadata'].items():
if k in metadata_list_fields:
md_sect[k] = [l for l in v.splitlines() if l.strip()]
else:
md_sect[k] = v
if 'entry-points-file' in md_sect:
entry_points_file = path.parent / md_sect.pop('entry-points-file')
if not entry_points_file.is_file():
raise FileNotFoundError(entry_points_file)
else:
entry_points_file = path.parent / 'entry_points.txt'
if not entry_points_file.is_file():
entry_points_file = None
if entry_points_file:
ep_cp = configparser.ConfigParser()
with entry_points_file.open() as f:
ep_cp.read_file(f)
# Convert to regular dict
entrypoints = {k: dict(v) for k,v in ep_cp.items()}
else:
entrypoints = {}
md_dict, module, reqs_by_extra = _prep_metadata(md_sect, path)
# Scripts ---------------
if cp.has_section('scripts'):
scripts_dict = dict(cp['scripts'])
else:
scripts_dict = {}
_add_scripts_to_entrypoints(entrypoints, scripts_dict)
return {
'module': module,
'metadata': md_dict,
'reqs_by_extra': reqs_by_extra,
'scripts': scripts_dict,
'entrypoints': entrypoints,
'raw_config': cp,
}
| raise EntryPointsConflict | conditional_block |
inifile.py | import configparser
import difflib
import logging
import os
from pathlib import Path
import pytoml as toml
from .validate import validate_config
from .vendorized.readme.rst import render
import io
log = logging.getLogger(__name__)
class ConfigError(ValueError):
pass
metadata_list_fields = {
'classifiers',
'requires',
'dev-requires'
}
metadata_allowed_fields = {
'module',
'author',
'author-email',
'maintainer',
'maintainer-email',
'home-page',
'license',
'keywords',
'requires-python',
'dist-name',
'entry-points-file',
'description-file',
'requires-extra',
} | metadata_list_fields
metadata_required_fields = {
'module',
'author',
'author-email',
}
def read_pkg_ini(path: Path):
"""Read and check the `pyproject.toml` or `flit.ini` file with data about the package.
"""
if path.suffix == '.toml':
with path.open() as f:
d = toml.load(f)
res = prep_toml_config(d, path)
else:
# Treat all other extensions as the older flit.ini format
cp = _read_pkg_ini(path)
res = _validate_config(cp, path)
if validate_config(res):
if os.environ.get('FLIT_ALLOW_INVALID'):
log.warning("Allowing invalid data (FLIT_ALLOW_INVALID set). Uploads may still fail.")
else:
raise ConfigError("Invalid config values (see log)")
return res
class EntryPointsConflict(ConfigError):
|
def prep_toml_config(d, path):
"""Validate config loaded from pyproject.toml and prepare common metadata
Returns a dictionary with keys: module, metadata, scripts, entrypoints,
raw_config.
"""
if ('tool' not in d) or ('flit' not in d['tool']) \
or (not isinstance(d['tool']['flit'], dict)):
raise ConfigError("TOML file missing [tool.flit] table.")
d = d['tool']['flit']
unknown_sections = set(d) - {'metadata', 'scripts', 'entrypoints'}
unknown_sections = [s for s in unknown_sections if not s.lower().startswith('x-')]
if unknown_sections:
raise ConfigError('Unknown sections: ' + ', '.join(unknown_sections))
if 'metadata' not in d:
raise ConfigError('[tool.flit.metadata] section is required')
md_dict, module, reqs_by_extra = _prep_metadata(d['metadata'], path)
if 'scripts' in d:
scripts_dict = dict(d['scripts'])
else:
scripts_dict = {}
if 'entrypoints' in d:
entrypoints = flatten_entrypoints(d['entrypoints'])
else:
entrypoints = {}
_add_scripts_to_entrypoints(entrypoints, scripts_dict)
return {
'module': module,
'metadata': md_dict,
'reqs_by_extra': reqs_by_extra,
'scripts': scripts_dict,
'entrypoints': entrypoints,
'raw_config': d,
}
def flatten_entrypoints(ep):
"""Flatten nested entrypoints dicts.
Entry points group names can include dots. But dots in TOML make nested
dictionaries:
[entrypoints.a.b] # {'entrypoints': {'a': {'b': {}}}}
The proper way to avoid this is:
[entrypoints."a.b"] # {'entrypoints': {'a.b': {}}}
But since there isn't a need for arbitrarily nested mappings in entrypoints,
flit allows you to use the former. This flattens the nested dictionaries
from loading pyproject.toml.
"""
def _flatten(d, prefix):
d1 = {}
for k, v in d.items():
if isinstance(v, dict):
yield from _flatten(v, prefix+'.'+k)
else:
d1[k] = v
if d1:
yield prefix, d1
res = {}
for k, v in ep.items():
res.update(_flatten(v, k))
return res
def _add_scripts_to_entrypoints(entrypoints, scripts_dict):
if scripts_dict:
if 'console_scripts' in entrypoints:
raise EntryPointsConflict
else:
entrypoints['console_scripts'] = scripts_dict
def _read_pkg_ini(path):
"""Reads old-style flit.ini
"""
cp = configparser.ConfigParser()
with path.open(encoding='utf-8') as f:
cp.read_file(f)
return cp
readme_ext_to_content_type = {
'.rst': 'text/x-rst',
'.md': 'text/markdown',
'.txt': 'text/plain',
}
def _prep_metadata(md_sect, path):
"""Process & verify the metadata from a config file
- Pull out the module name we're packaging.
- Read description-file and check that it's valid rst
- Convert dashes in key names to underscores
(e.g. home-page in config -> home_page in metadata)
"""
if not set(md_sect).issuperset(metadata_required_fields):
missing = metadata_required_fields - set(md_sect)
raise ConfigError("Required fields missing: " + '\n'.join(missing))
module = md_sect.get('module')
if not module.isidentifier():
raise ConfigError("Module name %r is not a valid identifier" % module)
md_dict = {}
# Description file
if 'description-file' in md_sect:
description_file = path.parent / md_sect.get('description-file')
try:
with description_file.open(encoding='utf-8') as f:
raw_desc = f.read()
except FileNotFoundError:
raise ConfigError(
"Description file {} does not exist".format(description_file)
)
ext = description_file.suffix
try:
mimetype = readme_ext_to_content_type[ext]
except KeyError:
log.warning("Unknown extension %r for description file.", ext)
log.warning(" Recognised extensions: %s",
" ".join(readme_ext_to_content_type))
mimetype = None
if mimetype == 'text/x-rst':
# rst check
stream = io.StringIO()
res = render(raw_desc, stream)
if not res:
log.warning("The file description seems not to be valid rst for PyPI;"
" it will be interpreted as plain text")
log.warning(stream.getvalue())
md_dict['description'] = raw_desc
md_dict['description_content_type'] = mimetype
if 'urls' in md_sect:
project_urls = md_dict['project_urls'] = []
for label, url in sorted(md_sect.pop('urls').items()):
project_urls.append("{}, {}".format(label, url))
for key, value in md_sect.items():
if key in {'description-file', 'module'}:
continue
if key not in metadata_allowed_fields:
closest = difflib.get_close_matches(key, metadata_allowed_fields,
n=1, cutoff=0.7)
msg = "Unrecognised metadata key: {!r}".format(key)
if closest:
msg += " (did you mean {!r}?)".format(closest[0])
raise ConfigError(msg)
k2 = key.replace('-', '_')
md_dict[k2] = value
if key in metadata_list_fields:
if not isinstance(value, list):
raise ConfigError('Expected a list for {} field, found {!r}'
.format(key, value))
if not all(isinstance(a, str) for a in value):
raise ConfigError('Expected a list of strings for {} field'
.format(key))
elif key == 'requires-extra':
if not isinstance(value, dict):
raise ConfigError('Expected a dict for requires-extra field, found {!r}'
.format(value))
if not all(isinstance(e, list) for e in value.values()):
raise ConfigError('Expected a dict of lists for requires-extra field')
for e, reqs in value.items():
if not all(isinstance(a, str) for a in reqs):
raise ConfigError('Expected a string list for requires-extra. (extra {})'
.format(e))
else:
if not isinstance(value, str):
raise ConfigError('Expected a string for {} field, found {!r}'
.format(key, value))
# What we call requires in the ini file is technically requires_dist in
# the metadata.
if 'requires' in md_dict:
md_dict['requires_dist'] = md_dict.pop('requires')
# And what we call dist-name is name in the metadata
if 'dist_name' in md_dict:
md_dict['name'] = md_dict.pop('dist_name')
# Move dev-requires into requires-extra
reqs_noextra = md_dict.pop('requires_dist', [])
reqs_by_extra = md_dict.pop('requires_extra', {})
dev_requires = md_dict.pop('dev_requires', None)
if dev_requires is not None:
if 'dev' in reqs_by_extra:
raise ConfigError(
'dev-requires occurs together with its replacement requires-extra.dev.')
else:
log.warning(
'“dev-requires = ...” is obsolete. Use “requires-extra = {"dev" = ...}” instead.')
reqs_by_extra['dev'] = dev_requires
# Add requires-extra requirements into requires_dist
md_dict['requires_dist'] = \
reqs_noextra + list(_expand_requires_extra(reqs_by_extra))
md_dict['provides_extra'] = sorted(reqs_by_extra.keys())
# For internal use, record the main requirements as a '.none' extra.
reqs_by_extra['.none'] = reqs_noextra
return md_dict, module, reqs_by_extra
def _expand_requires_extra(re):
for extra, reqs in sorted(re.items()):
for req in reqs:
if ';' in req:
name, envmark = req.split(';', 1)
yield '{}; extra == "{}" and ({})'.format(name, extra, envmark)
else:
yield '{}; extra == "{}"'.format(req, extra)
def _validate_config(cp, path):
"""Validate and process config loaded from a flit.ini file.
Returns a dict with keys: module, metadata, scripts, entrypoints, raw_config
"""
unknown_sections = set(cp.sections()) - {'metadata', 'scripts'}
unknown_sections = [s for s in unknown_sections if not s.lower().startswith('x-')]
if unknown_sections:
raise ConfigError('Unknown sections: ' + ', '.join(unknown_sections))
if not cp.has_section('metadata'):
raise ConfigError('[metadata] section is required')
md_sect = {}
for k, v in cp['metadata'].items():
if k in metadata_list_fields:
md_sect[k] = [l for l in v.splitlines() if l.strip()]
else:
md_sect[k] = v
if 'entry-points-file' in md_sect:
entry_points_file = path.parent / md_sect.pop('entry-points-file')
if not entry_points_file.is_file():
raise FileNotFoundError(entry_points_file)
else:
entry_points_file = path.parent / 'entry_points.txt'
if not entry_points_file.is_file():
entry_points_file = None
if entry_points_file:
ep_cp = configparser.ConfigParser()
with entry_points_file.open() as f:
ep_cp.read_file(f)
# Convert to regular dict
entrypoints = {k: dict(v) for k,v in ep_cp.items()}
else:
entrypoints = {}
md_dict, module, reqs_by_extra = _prep_metadata(md_sect, path)
# Scripts ---------------
if cp.has_section('scripts'):
scripts_dict = dict(cp['scripts'])
else:
scripts_dict = {}
_add_scripts_to_entrypoints(entrypoints, scripts_dict)
return {
'module': module,
'metadata': md_dict,
'reqs_by_extra': reqs_by_extra,
'scripts': scripts_dict,
'entrypoints': entrypoints,
'raw_config': cp,
}
| def __str__(self):
return ('Please specify console_scripts entry points, or [scripts] in '
'flit config, not both.') | identifier_body |
inifile.py | import configparser
import difflib
import logging
import os
from pathlib import Path
import pytoml as toml
from .validate import validate_config
from .vendorized.readme.rst import render
import io
log = logging.getLogger(__name__)
class ConfigError(ValueError):
pass
metadata_list_fields = {
'classifiers',
'requires',
'dev-requires'
}
metadata_allowed_fields = {
'module',
'author',
'author-email',
'maintainer',
'maintainer-email',
'home-page',
'license',
'keywords',
'requires-python',
'dist-name',
'entry-points-file',
'description-file',
'requires-extra',
} | metadata_list_fields
metadata_required_fields = {
'module',
'author',
'author-email',
}
def read_pkg_ini(path: Path):
"""Read and check the `pyproject.toml` or `flit.ini` file with data about the package.
"""
if path.suffix == '.toml':
with path.open() as f:
d = toml.load(f)
res = prep_toml_config(d, path)
else:
# Treat all other extensions as the older flit.ini format
cp = _read_pkg_ini(path)
res = _validate_config(cp, path)
if validate_config(res):
if os.environ.get('FLIT_ALLOW_INVALID'):
log.warning("Allowing invalid data (FLIT_ALLOW_INVALID set). Uploads may still fail.")
else:
raise ConfigError("Invalid config values (see log)")
return res
class EntryPointsConflict(ConfigError):
def __str__(self):
return ('Please specify console_scripts entry points, or [scripts] in '
'flit config, not both.')
def prep_toml_config(d, path):
"""Validate config loaded from pyproject.toml and prepare common metadata
Returns a dictionary with keys: module, metadata, scripts, entrypoints,
raw_config.
"""
if ('tool' not in d) or ('flit' not in d['tool']) \
or (not isinstance(d['tool']['flit'], dict)):
raise ConfigError("TOML file missing [tool.flit] table.")
d = d['tool']['flit']
unknown_sections = set(d) - {'metadata', 'scripts', 'entrypoints'}
unknown_sections = [s for s in unknown_sections if not s.lower().startswith('x-')]
if unknown_sections:
raise ConfigError('Unknown sections: ' + ', '.join(unknown_sections))
if 'metadata' not in d:
raise ConfigError('[tool.flit.metadata] section is required')
md_dict, module, reqs_by_extra = _prep_metadata(d['metadata'], path)
if 'scripts' in d:
scripts_dict = dict(d['scripts'])
else:
scripts_dict = {}
if 'entrypoints' in d:
entrypoints = flatten_entrypoints(d['entrypoints'])
else:
entrypoints = {}
_add_scripts_to_entrypoints(entrypoints, scripts_dict)
return {
'module': module,
'metadata': md_dict,
'reqs_by_extra': reqs_by_extra,
'scripts': scripts_dict,
'entrypoints': entrypoints,
'raw_config': d,
}
def flatten_entrypoints(ep):
"""Flatten nested entrypoints dicts.
Entry points group names can include dots. But dots in TOML make nested
dictionaries:
[entrypoints.a.b] # {'entrypoints': {'a': {'b': {}}}}
The proper way to avoid this is:
[entrypoints."a.b"] # {'entrypoints': {'a.b': {}}}
But since there isn't a need for arbitrarily nested mappings in entrypoints,
flit allows you to use the former. This flattens the nested dictionaries
from loading pyproject.toml.
"""
def _flatten(d, prefix):
d1 = {}
for k, v in d.items():
if isinstance(v, dict):
yield from _flatten(v, prefix+'.'+k)
else:
d1[k] = v
if d1:
yield prefix, d1
res = {}
for k, v in ep.items():
res.update(_flatten(v, k))
return res
def | (entrypoints, scripts_dict):
if scripts_dict:
if 'console_scripts' in entrypoints:
raise EntryPointsConflict
else:
entrypoints['console_scripts'] = scripts_dict
def _read_pkg_ini(path):
"""Reads old-style flit.ini
"""
cp = configparser.ConfigParser()
with path.open(encoding='utf-8') as f:
cp.read_file(f)
return cp
readme_ext_to_content_type = {
'.rst': 'text/x-rst',
'.md': 'text/markdown',
'.txt': 'text/plain',
}
def _prep_metadata(md_sect, path):
"""Process & verify the metadata from a config file
- Pull out the module name we're packaging.
- Read description-file and check that it's valid rst
- Convert dashes in key names to underscores
(e.g. home-page in config -> home_page in metadata)
"""
if not set(md_sect).issuperset(metadata_required_fields):
missing = metadata_required_fields - set(md_sect)
raise ConfigError("Required fields missing: " + '\n'.join(missing))
module = md_sect.get('module')
if not module.isidentifier():
raise ConfigError("Module name %r is not a valid identifier" % module)
md_dict = {}
# Description file
if 'description-file' in md_sect:
description_file = path.parent / md_sect.get('description-file')
try:
with description_file.open(encoding='utf-8') as f:
raw_desc = f.read()
except FileNotFoundError:
raise ConfigError(
"Description file {} does not exist".format(description_file)
)
ext = description_file.suffix
try:
mimetype = readme_ext_to_content_type[ext]
except KeyError:
log.warning("Unknown extension %r for description file.", ext)
log.warning(" Recognised extensions: %s",
" ".join(readme_ext_to_content_type))
mimetype = None
if mimetype == 'text/x-rst':
# rst check
stream = io.StringIO()
res = render(raw_desc, stream)
if not res:
log.warning("The file description seems not to be valid rst for PyPI;"
" it will be interpreted as plain text")
log.warning(stream.getvalue())
md_dict['description'] = raw_desc
md_dict['description_content_type'] = mimetype
if 'urls' in md_sect:
project_urls = md_dict['project_urls'] = []
for label, url in sorted(md_sect.pop('urls').items()):
project_urls.append("{}, {}".format(label, url))
for key, value in md_sect.items():
if key in {'description-file', 'module'}:
continue
if key not in metadata_allowed_fields:
closest = difflib.get_close_matches(key, metadata_allowed_fields,
n=1, cutoff=0.7)
msg = "Unrecognised metadata key: {!r}".format(key)
if closest:
msg += " (did you mean {!r}?)".format(closest[0])
raise ConfigError(msg)
k2 = key.replace('-', '_')
md_dict[k2] = value
if key in metadata_list_fields:
if not isinstance(value, list):
raise ConfigError('Expected a list for {} field, found {!r}'
.format(key, value))
if not all(isinstance(a, str) for a in value):
raise ConfigError('Expected a list of strings for {} field'
.format(key))
elif key == 'requires-extra':
if not isinstance(value, dict):
raise ConfigError('Expected a dict for requires-extra field, found {!r}'
.format(value))
if not all(isinstance(e, list) for e in value.values()):
raise ConfigError('Expected a dict of lists for requires-extra field')
for e, reqs in value.items():
if not all(isinstance(a, str) for a in reqs):
raise ConfigError('Expected a string list for requires-extra. (extra {})'
.format(e))
else:
if not isinstance(value, str):
raise ConfigError('Expected a string for {} field, found {!r}'
.format(key, value))
# What we call requires in the ini file is technically requires_dist in
# the metadata.
if 'requires' in md_dict:
md_dict['requires_dist'] = md_dict.pop('requires')
# And what we call dist-name is name in the metadata
if 'dist_name' in md_dict:
md_dict['name'] = md_dict.pop('dist_name')
# Move dev-requires into requires-extra
reqs_noextra = md_dict.pop('requires_dist', [])
reqs_by_extra = md_dict.pop('requires_extra', {})
dev_requires = md_dict.pop('dev_requires', None)
if dev_requires is not None:
if 'dev' in reqs_by_extra:
raise ConfigError(
'dev-requires occurs together with its replacement requires-extra.dev.')
else:
log.warning(
'“dev-requires = ...” is obsolete. Use “requires-extra = {"dev" = ...}” instead.')
reqs_by_extra['dev'] = dev_requires
# Add requires-extra requirements into requires_dist
md_dict['requires_dist'] = \
reqs_noextra + list(_expand_requires_extra(reqs_by_extra))
md_dict['provides_extra'] = sorted(reqs_by_extra.keys())
# For internal use, record the main requirements as a '.none' extra.
reqs_by_extra['.none'] = reqs_noextra
return md_dict, module, reqs_by_extra
def _expand_requires_extra(re):
for extra, reqs in sorted(re.items()):
for req in reqs:
if ';' in req:
name, envmark = req.split(';', 1)
yield '{}; extra == "{}" and ({})'.format(name, extra, envmark)
else:
yield '{}; extra == "{}"'.format(req, extra)
def _validate_config(cp, path):
"""Validate and process config loaded from a flit.ini file.
Returns a dict with keys: module, metadata, scripts, entrypoints, raw_config
"""
unknown_sections = set(cp.sections()) - {'metadata', 'scripts'}
unknown_sections = [s for s in unknown_sections if not s.lower().startswith('x-')]
if unknown_sections:
raise ConfigError('Unknown sections: ' + ', '.join(unknown_sections))
if not cp.has_section('metadata'):
raise ConfigError('[metadata] section is required')
md_sect = {}
for k, v in cp['metadata'].items():
if k in metadata_list_fields:
md_sect[k] = [l for l in v.splitlines() if l.strip()]
else:
md_sect[k] = v
if 'entry-points-file' in md_sect:
entry_points_file = path.parent / md_sect.pop('entry-points-file')
if not entry_points_file.is_file():
raise FileNotFoundError(entry_points_file)
else:
entry_points_file = path.parent / 'entry_points.txt'
if not entry_points_file.is_file():
entry_points_file = None
if entry_points_file:
ep_cp = configparser.ConfigParser()
with entry_points_file.open() as f:
ep_cp.read_file(f)
# Convert to regular dict
entrypoints = {k: dict(v) for k,v in ep_cp.items()}
else:
entrypoints = {}
md_dict, module, reqs_by_extra = _prep_metadata(md_sect, path)
# Scripts ---------------
if cp.has_section('scripts'):
scripts_dict = dict(cp['scripts'])
else:
scripts_dict = {}
_add_scripts_to_entrypoints(entrypoints, scripts_dict)
return {
'module': module,
'metadata': md_dict,
'reqs_by_extra': reqs_by_extra,
'scripts': scripts_dict,
'entrypoints': entrypoints,
'raw_config': cp,
}
| _add_scripts_to_entrypoints | identifier_name |
dynmap.go | package dynmap
import (
"strings"
"log"
"encoding/json"
"errors"
"fmt"
"net/url"
"time"
"reflect"
)
//Dont make this a map type, since we want the option of
//extending this and adding members.
type DynMap struct {
Map map[string]interface{}
}
type DynMaper interface {
ToDynMap() *DynMap
}
// Creates a new dynmap
func New() *DynMap {
return &DynMap{make(map[string]interface{})}
}
// Deprecated. use New() instead
func NewDynMap() *DynMap {
return &DynMap{make(map[string]interface{})}
}
// Recursively converts this to a regular go map.
// (will convert any sub maps)
func (this *DynMap) ToMap() map[string]interface{} {
mp := make(map[string]interface{}) | for k, v := range(this.Map) {
submp, ok := ToDynMap(this.Map[k])
if ok {
v = submp.ToMap()
}
mp[k] = v
}
return mp
}
// recursively clones this DynMap. all sub maps will be clones as well
func (this *DynMap) Clone() *DynMap {
mp := New()
for k, v := range(this.Map) {
submp, ok := ToDynMap(this.Map[k])
if ok {
v = submp.Clone()
}
mp.Put(k, v)
}
return mp
}
// Returns self. Here so that we satisfy the DynMaper interface
func (this *DynMap) ToDynMap() *DynMap {
return this
}
//encodes this map into a url encoded string.
//maps are encoded in the rails style (key[key2][key2]=value)
// TODO: we should sort the keynames so ordering is consistent and then this
// can be used a cache key
func (this *DynMap) MarshalURL() (string, error) {
vals := &url.Values{}
for key, value := range this.Map {
err := this.urlEncode(vals, key, value)
if err != nil {
return "", err
}
}
str := vals.Encode()
log.Printf(str)
return vals.Encode(), nil
}
// Unmarshals a url encoded string.
// will also parse rails style maps in the form key[key1][key2]=val
func (this *DynMap) UnmarshalURL(urlstring string) error {
//TODO: split on ?
values, err := url.ParseQuery(urlstring)
if err != nil {
return err
}
return this.UnmarshalURLValues(values)
}
// Unmarshals url.Values into the map.
// Will correctly handle rails style maps in the form key[key1][key2]=val
func (this *DynMap) UnmarshalURLValues(values url.Values) error {
for k := range values {
var v = values[k]
key := strings.Replace(k, "[", ".", -1)
key = strings.Replace(key, "]", "", -1)
if len(v) == 1 {
this.PutWithDot(key, v[0])
} else {
this.PutWithDot(key, v)
}
}
return nil
}
//adds the requested value to the Values
func (this *DynMap) urlEncode(vals *url.Values, key string, value interface{}) error {
if DynMapConvertable(value) {
mp, ok := ToDynMap(value)
if !ok {
return fmt.Errorf("Unable to convert %s", mp)
}
for k, v := range mp.Map {
//encode in rails style key[key2]=value
this.urlEncode(vals, fmt.Sprintf("%s[%s]", key, k), v)
}
return nil
}
r := reflect.ValueOf(value)
//now test if it is an array
if r.Kind() == reflect.Array || r.Kind() == reflect.Slice {
for i :=0; i < r.Len(); i++ {
this.urlEncode(vals, key, r.Index(i).Interface())
}
}
vals.Add(key, ToString(value))
return nil
}
func (this *DynMap) MarshalJSON() ([]byte, error) {
bytes, err := json.Marshal(this.Map)
return bytes, err
}
func (this *DynMap) UnmarshalJSON(bytes []byte) error {
return json.Unmarshal(bytes, &this.Map)
}
// Gets the value at the specified key as an int64. returns -1,false if value not available or is not convertable
func (this *DynMap) GetInt64(key string) (int64, bool) {
tmp, ok := this.Get(key)
if !ok {
return -1, ok
}
val, err := ToInt64(tmp)
if err == nil {
return val, true
}
return -1, false
}
func (this *DynMap) MustInt64(key string, def int64) int64 {
v, ok := this.GetInt64(key)
if ok {
return v
}
return def
}
func (this *DynMap) MustInt(key string, def int) int {
v, ok := this.GetInt(key)
if ok {
return v
}
return def
}
func (this *DynMap) GetInt(key string) (int, bool) {
v, ok := this.GetInt64(key)
if !ok {
return -1, ok
}
return int(v), true
}
//
// Gets a string representation of the value at key
//
func (this *DynMap) GetString(key string) (string, bool) {
tmp, ok := this.Get(key)
if !ok {
return ToString(tmp), ok
}
return ToString(tmp), true
}
// gets a string. if string is not available in the map, then the default
//is returned
func (this *DynMap) MustString(key string, def string) string {
tmp, ok := this.GetString(key)
if !ok {
return def
}
return tmp
}
func (this *DynMap) GetTime(key string) (time.Time, bool) {
tmp, ok := this.Get(key)
if !ok {
return time.Now(), false
}
t, err := ToTime(tmp)
if err != nil {
return time.Now(), false
}
return t, true
}
func (this *DynMap) MustTime(key string, def time.Time) time.Time {
tmp, ok := this.GetTime(key)
if !ok {
return def
}
return tmp
}
func (this *DynMap) GetBool(key string) (bool, bool) {
tmp, ok := this.Get(key)
if !ok {
return false, ok
}
b, err := ToBool(tmp)
if err != nil {
return false, false
}
return b, true
}
func (this *DynMap) MustBool(key string, def bool) bool {
tmp, ok := this.GetBool(key)
if !ok {
return def
}
return tmp
}
//Gets a dynmap from the requested.
// This will update the value in the map if the
// value was not already a dynmap.
func (this *DynMap) GetDynMap(key string) (*DynMap, bool) {
tmp, ok := this.Get(key)
if !ok {
return nil, ok
}
mp, ok := ToDynMap(tmp)
return mp, ok
}
func (this *DynMap) MustDynMap(key string, def *DynMap) *DynMap {
tmp, ok := this.GetDynMap(key)
if !ok {
return def
}
return tmp
}
// gets a slice of dynmaps
func (this *DynMap) GetDynMapSlice(key string) ([]*DynMap, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case []*DynMap:
return v, true
case []interface{}:
retlist := make([]*DynMap, 0)
for _, tmp := range v {
in, ok := ToDynMap(tmp)
if !ok {
return nil, false
}
retlist = append(retlist, in)
}
return retlist, true
}
return nil, false
}
//Returns a slice of ints
func (this *DynMap) GetIntSlice(key string) ([]int, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case []int:
return v, true
case []interface{}:
retlist := make([]int, 0)
for _, tmp := range v {
in, err := ToInt(tmp)
if err != nil {
return nil, false
}
retlist = append(retlist, in)
}
return retlist, true
}
return nil, false
}
//gets a slice of ints. if the value is a string it will
//split by the requested delimiter
func (this *DynMap) GetIntSliceSplit(key, delim string) ([]int, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case string:
retlist := make([]int, 0)
for _, tmp := range strings.Split(v, delim) {
in, err := ToInt(tmp)
if err != nil {
return nil, false
}
retlist = append(retlist, in)
}
return retlist, true
}
ret, ok := this.GetIntSlice(key)
return ret, ok
}
//Returns a slice of strings
func (this *DynMap) GetStringSlice(key string) ([]string, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case []string:
return v, true
case []interface{}:
retlist := make([]string, 0)
for _, tmp := range v {
in := ToString(tmp)
retlist = append(retlist, in)
}
return retlist, true
}
return nil, false
}
//gets a slice of strings. if the value is a string it will
//split by the requested delimiter
func (this *DynMap) GetStringSliceSplit(key, delim string) ([]string, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case string:
return strings.Split(v, delim), true
}
ret, ok := this.GetStringSlice(key)
return ret, ok
}
// Adds the item to a slice
func (this *DynMap) AddToSlice(key string, mp interface{}) error {
this.PutIfAbsent(key, make([]interface{}, 0))
lst, _ := this.Get(key)
switch v := lst.(type) {
case []interface{}:
v = append(v, mp)
this.Put(key, v)
}
return nil
}
// puts all the values from the passed in map into this dynmap
// the passed in map must be convertable to a DynMap via ToDynMap.
// returns false if the passed value is not convertable to dynmap
func (this *DynMap) PutAll(mp interface{}) bool {
dynmap, ok := ToDynMap(mp)
if !ok {
return false
}
for k, v := range dynmap.Map {
this.Put(k, v)
}
return true
}
//
// Puts the value into the map if and only if no value exists at the
// specified key.
// This does not honor the dot operator on insert.
func (this *DynMap) PutIfAbsent(key string, value interface{}) (interface{}, bool) {
v, ok := this.Get(key)
if ok {
return v, false
}
this.Put(key, value)
return value, true
}
//
// Same as PutIfAbsent but honors the dot operator
//
func (this *DynMap) PutIfAbsentWithDot(key string, value interface{}) (interface{}, bool) {
v, ok := this.Get(key)
if ok {
return v, false
}
this.PutWithDot(key, value)
return value, true
}
//
// Put's a value into the map
//
func (this *DynMap) Put(key string, value interface{}) {
this.Map[key] = value
}
//
// puts the value into the map, honoring the dot operator.
// so PutWithDot("map1.map2.value", 100)
// would result in:
// {
// map1 : { map2 : { value: 100 }}
//
// }
func (this *DynMap) PutWithDot(key string, value interface{}) error {
splitStr := strings.Split(key, ".")
if len(splitStr) == 1 {
this.Put(key, value)
return nil
}
mapKeys := splitStr[:(len(splitStr) - 1)]
var mp = this.Map
for _, k := range mapKeys {
tmp, o := mp[k]
if !o {
//create a new map and insert
newmap := make(map[string]interface{})
mp[k] = newmap
mp = newmap
} else {
mp, o = ToMap(tmp)
if !o {
//error
return errors.New("Error, value at key was not a map")
}
}
}
mp[splitStr[len(splitStr)-1]] = value
return nil
}
func (this *DynMap) Exists(key string) bool {
_, ok := this.Get(key)
return ok
}
//Remove a mapping
//TODO: this will need to honor the dot operator!
func (this *DynMap) Remove(key string) (interface{}, bool) {
val, ok := this.Map[key]
if ok {
delete(this.Map, key)
return val, true
}
//TODO: dot op..
return val, false
}
//
// Get's the value. will honor the dot operator if needed.
// key = 'map.map2'
// will first attempt to matche the literal key 'map.map2'
// if no value is present it will look for a sub map at key 'map'
//
func (this *DynMap) Get(key string) (interface{}, bool) {
val, ok := this.Map[key]
if ok {
return val, true
}
//look for dot operator.
splitStr := strings.Split(key, ".")
if len(splitStr) == 1 {
return val, false
}
var mp = this.Map
for index, k := range splitStr {
tmp, o := mp[k]
if !o {
return val, ok
}
if index == (len(splitStr) - 1) {
return tmp, o
} else {
mp, o = ToMap(tmp)
if !o {
return val, ok
}
}
}
return val, ok
} | random_line_split |
|
dynmap.go | package dynmap
import (
"strings"
"log"
"encoding/json"
"errors"
"fmt"
"net/url"
"time"
"reflect"
)
//Dont make this a map type, since we want the option of
//extending this and adding members.
type DynMap struct {
Map map[string]interface{}
}
type DynMaper interface {
ToDynMap() *DynMap
}
// Creates a new dynmap
func New() *DynMap {
return &DynMap{make(map[string]interface{})}
}
// Deprecated. use New() instead
func NewDynMap() *DynMap {
return &DynMap{make(map[string]interface{})}
}
// Recursively converts this to a regular go map.
// (will convert any sub maps)
func (this *DynMap) ToMap() map[string]interface{} {
mp := make(map[string]interface{})
for k, v := range(this.Map) {
submp, ok := ToDynMap(this.Map[k])
if ok {
v = submp.ToMap()
}
mp[k] = v
}
return mp
}
// recursively clones this DynMap. all sub maps will be clones as well
func (this *DynMap) Clone() *DynMap {
mp := New()
for k, v := range(this.Map) {
submp, ok := ToDynMap(this.Map[k])
if ok {
v = submp.Clone()
}
mp.Put(k, v)
}
return mp
}
// Returns self. Here so that we satisfy the DynMaper interface
func (this *DynMap) ToDynMap() *DynMap {
return this
}
//encodes this map into a url encoded string.
//maps are encoded in the rails style (key[key2][key2]=value)
// TODO: we should sort the keynames so ordering is consistent and then this
// can be used a cache key
func (this *DynMap) MarshalURL() (string, error) {
vals := &url.Values{}
for key, value := range this.Map {
err := this.urlEncode(vals, key, value)
if err != nil {
return "", err
}
}
str := vals.Encode()
log.Printf(str)
return vals.Encode(), nil
}
// Unmarshals a url encoded string.
// will also parse rails style maps in the form key[key1][key2]=val
func (this *DynMap) UnmarshalURL(urlstring string) error {
//TODO: split on ?
values, err := url.ParseQuery(urlstring)
if err != nil {
return err
}
return this.UnmarshalURLValues(values)
}
// Unmarshals url.Values into the map.
// Will correctly handle rails style maps in the form key[key1][key2]=val
func (this *DynMap) UnmarshalURLValues(values url.Values) error {
for k := range values {
var v = values[k]
key := strings.Replace(k, "[", ".", -1)
key = strings.Replace(key, "]", "", -1)
if len(v) == 1 {
this.PutWithDot(key, v[0])
} else {
this.PutWithDot(key, v)
}
}
return nil
}
//adds the requested value to the Values
func (this *DynMap) urlEncode(vals *url.Values, key string, value interface{}) error {
if DynMapConvertable(value) {
mp, ok := ToDynMap(value)
if !ok |
for k, v := range mp.Map {
//encode in rails style key[key2]=value
this.urlEncode(vals, fmt.Sprintf("%s[%s]", key, k), v)
}
return nil
}
r := reflect.ValueOf(value)
//now test if it is an array
if r.Kind() == reflect.Array || r.Kind() == reflect.Slice {
for i :=0; i < r.Len(); i++ {
this.urlEncode(vals, key, r.Index(i).Interface())
}
}
vals.Add(key, ToString(value))
return nil
}
func (this *DynMap) MarshalJSON() ([]byte, error) {
bytes, err := json.Marshal(this.Map)
return bytes, err
}
func (this *DynMap) UnmarshalJSON(bytes []byte) error {
return json.Unmarshal(bytes, &this.Map)
}
// Gets the value at the specified key as an int64. returns -1,false if value not available or is not convertable
func (this *DynMap) GetInt64(key string) (int64, bool) {
tmp, ok := this.Get(key)
if !ok {
return -1, ok
}
val, err := ToInt64(tmp)
if err == nil {
return val, true
}
return -1, false
}
func (this *DynMap) MustInt64(key string, def int64) int64 {
v, ok := this.GetInt64(key)
if ok {
return v
}
return def
}
func (this *DynMap) MustInt(key string, def int) int {
v, ok := this.GetInt(key)
if ok {
return v
}
return def
}
func (this *DynMap) GetInt(key string) (int, bool) {
v, ok := this.GetInt64(key)
if !ok {
return -1, ok
}
return int(v), true
}
//
// Gets a string representation of the value at key
//
func (this *DynMap) GetString(key string) (string, bool) {
tmp, ok := this.Get(key)
if !ok {
return ToString(tmp), ok
}
return ToString(tmp), true
}
// gets a string. if string is not available in the map, then the default
//is returned
func (this *DynMap) MustString(key string, def string) string {
tmp, ok := this.GetString(key)
if !ok {
return def
}
return tmp
}
func (this *DynMap) GetTime(key string) (time.Time, bool) {
tmp, ok := this.Get(key)
if !ok {
return time.Now(), false
}
t, err := ToTime(tmp)
if err != nil {
return time.Now(), false
}
return t, true
}
func (this *DynMap) MustTime(key string, def time.Time) time.Time {
tmp, ok := this.GetTime(key)
if !ok {
return def
}
return tmp
}
func (this *DynMap) GetBool(key string) (bool, bool) {
tmp, ok := this.Get(key)
if !ok {
return false, ok
}
b, err := ToBool(tmp)
if err != nil {
return false, false
}
return b, true
}
func (this *DynMap) MustBool(key string, def bool) bool {
tmp, ok := this.GetBool(key)
if !ok {
return def
}
return tmp
}
//Gets a dynmap from the requested.
// This will update the value in the map if the
// value was not already a dynmap.
func (this *DynMap) GetDynMap(key string) (*DynMap, bool) {
tmp, ok := this.Get(key)
if !ok {
return nil, ok
}
mp, ok := ToDynMap(tmp)
return mp, ok
}
func (this *DynMap) MustDynMap(key string, def *DynMap) *DynMap {
tmp, ok := this.GetDynMap(key)
if !ok {
return def
}
return tmp
}
// gets a slice of dynmaps
func (this *DynMap) GetDynMapSlice(key string) ([]*DynMap, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case []*DynMap:
return v, true
case []interface{}:
retlist := make([]*DynMap, 0)
for _, tmp := range v {
in, ok := ToDynMap(tmp)
if !ok {
return nil, false
}
retlist = append(retlist, in)
}
return retlist, true
}
return nil, false
}
//Returns a slice of ints
func (this *DynMap) GetIntSlice(key string) ([]int, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case []int:
return v, true
case []interface{}:
retlist := make([]int, 0)
for _, tmp := range v {
in, err := ToInt(tmp)
if err != nil {
return nil, false
}
retlist = append(retlist, in)
}
return retlist, true
}
return nil, false
}
//gets a slice of ints. if the value is a string it will
//split by the requested delimiter
func (this *DynMap) GetIntSliceSplit(key, delim string) ([]int, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case string:
retlist := make([]int, 0)
for _, tmp := range strings.Split(v, delim) {
in, err := ToInt(tmp)
if err != nil {
return nil, false
}
retlist = append(retlist, in)
}
return retlist, true
}
ret, ok := this.GetIntSlice(key)
return ret, ok
}
//Returns a slice of strings
func (this *DynMap) GetStringSlice(key string) ([]string, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case []string:
return v, true
case []interface{}:
retlist := make([]string, 0)
for _, tmp := range v {
in := ToString(tmp)
retlist = append(retlist, in)
}
return retlist, true
}
return nil, false
}
//gets a slice of strings. if the value is a string it will
//split by the requested delimiter
func (this *DynMap) GetStringSliceSplit(key, delim string) ([]string, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case string:
return strings.Split(v, delim), true
}
ret, ok := this.GetStringSlice(key)
return ret, ok
}
// Adds the item to a slice
func (this *DynMap) AddToSlice(key string, mp interface{}) error {
this.PutIfAbsent(key, make([]interface{}, 0))
lst, _ := this.Get(key)
switch v := lst.(type) {
case []interface{}:
v = append(v, mp)
this.Put(key, v)
}
return nil
}
// puts all the values from the passed in map into this dynmap
// the passed in map must be convertable to a DynMap via ToDynMap.
// returns false if the passed value is not convertable to dynmap
func (this *DynMap) PutAll(mp interface{}) bool {
dynmap, ok := ToDynMap(mp)
if !ok {
return false
}
for k, v := range dynmap.Map {
this.Put(k, v)
}
return true
}
//
// Puts the value into the map if and only if no value exists at the
// specified key.
// This does not honor the dot operator on insert.
func (this *DynMap) PutIfAbsent(key string, value interface{}) (interface{}, bool) {
v, ok := this.Get(key)
if ok {
return v, false
}
this.Put(key, value)
return value, true
}
//
// Same as PutIfAbsent but honors the dot operator
//
func (this *DynMap) PutIfAbsentWithDot(key string, value interface{}) (interface{}, bool) {
v, ok := this.Get(key)
if ok {
return v, false
}
this.PutWithDot(key, value)
return value, true
}
//
// Put's a value into the map
//
func (this *DynMap) Put(key string, value interface{}) {
this.Map[key] = value
}
//
// puts the value into the map, honoring the dot operator.
// so PutWithDot("map1.map2.value", 100)
// would result in:
// {
// map1 : { map2 : { value: 100 }}
//
// }
func (this *DynMap) PutWithDot(key string, value interface{}) error {
splitStr := strings.Split(key, ".")
if len(splitStr) == 1 {
this.Put(key, value)
return nil
}
mapKeys := splitStr[:(len(splitStr) - 1)]
var mp = this.Map
for _, k := range mapKeys {
tmp, o := mp[k]
if !o {
//create a new map and insert
newmap := make(map[string]interface{})
mp[k] = newmap
mp = newmap
} else {
mp, o = ToMap(tmp)
if !o {
//error
return errors.New("Error, value at key was not a map")
}
}
}
mp[splitStr[len(splitStr)-1]] = value
return nil
}
func (this *DynMap) Exists(key string) bool {
_, ok := this.Get(key)
return ok
}
//Remove a mapping
//TODO: this will need to honor the dot operator!
func (this *DynMap) Remove(key string) (interface{}, bool) {
val, ok := this.Map[key]
if ok {
delete(this.Map, key)
return val, true
}
//TODO: dot op..
return val, false
}
//
// Get's the value. will honor the dot operator if needed.
// key = 'map.map2'
// will first attempt to matche the literal key 'map.map2'
// if no value is present it will look for a sub map at key 'map'
//
func (this *DynMap) Get(key string) (interface{}, bool) {
val, ok := this.Map[key]
if ok {
return val, true
}
//look for dot operator.
splitStr := strings.Split(key, ".")
if len(splitStr) == 1 {
return val, false
}
var mp = this.Map
for index, k := range splitStr {
tmp, o := mp[k]
if !o {
return val, ok
}
if index == (len(splitStr) - 1) {
return tmp, o
} else {
mp, o = ToMap(tmp)
if !o {
return val, ok
}
}
}
return val, ok
}
| {
return fmt.Errorf("Unable to convert %s", mp)
} | conditional_block |
dynmap.go | package dynmap
import (
"strings"
"log"
"encoding/json"
"errors"
"fmt"
"net/url"
"time"
"reflect"
)
//Dont make this a map type, since we want the option of
//extending this and adding members.
type DynMap struct {
Map map[string]interface{}
}
type DynMaper interface {
ToDynMap() *DynMap
}
// Creates a new dynmap
func New() *DynMap {
return &DynMap{make(map[string]interface{})}
}
// Deprecated. use New() instead
func NewDynMap() *DynMap {
return &DynMap{make(map[string]interface{})}
}
// Recursively converts this to a regular go map.
// (will convert any sub maps)
func (this *DynMap) ToMap() map[string]interface{} {
mp := make(map[string]interface{})
for k, v := range(this.Map) {
submp, ok := ToDynMap(this.Map[k])
if ok {
v = submp.ToMap()
}
mp[k] = v
}
return mp
}
// recursively clones this DynMap. all sub maps will be clones as well
func (this *DynMap) Clone() *DynMap {
mp := New()
for k, v := range(this.Map) {
submp, ok := ToDynMap(this.Map[k])
if ok {
v = submp.Clone()
}
mp.Put(k, v)
}
return mp
}
// Returns self. Here so that we satisfy the DynMaper interface
func (this *DynMap) ToDynMap() *DynMap {
return this
}
//encodes this map into a url encoded string.
//maps are encoded in the rails style (key[key2][key2]=value)
// TODO: we should sort the keynames so ordering is consistent and then this
// can be used a cache key
func (this *DynMap) MarshalURL() (string, error) {
vals := &url.Values{}
for key, value := range this.Map {
err := this.urlEncode(vals, key, value)
if err != nil {
return "", err
}
}
str := vals.Encode()
log.Printf(str)
return vals.Encode(), nil
}
// Unmarshals a url encoded string.
// will also parse rails style maps in the form key[key1][key2]=val
func (this *DynMap) UnmarshalURL(urlstring string) error {
//TODO: split on ?
values, err := url.ParseQuery(urlstring)
if err != nil {
return err
}
return this.UnmarshalURLValues(values)
}
// Unmarshals url.Values into the map.
// Will correctly handle rails style maps in the form key[key1][key2]=val
func (this *DynMap) UnmarshalURLValues(values url.Values) error {
for k := range values {
var v = values[k]
key := strings.Replace(k, "[", ".", -1)
key = strings.Replace(key, "]", "", -1)
if len(v) == 1 {
this.PutWithDot(key, v[0])
} else {
this.PutWithDot(key, v)
}
}
return nil
}
//adds the requested value to the Values
func (this *DynMap) urlEncode(vals *url.Values, key string, value interface{}) error {
if DynMapConvertable(value) {
mp, ok := ToDynMap(value)
if !ok {
return fmt.Errorf("Unable to convert %s", mp)
}
for k, v := range mp.Map {
//encode in rails style key[key2]=value
this.urlEncode(vals, fmt.Sprintf("%s[%s]", key, k), v)
}
return nil
}
r := reflect.ValueOf(value)
//now test if it is an array
if r.Kind() == reflect.Array || r.Kind() == reflect.Slice {
for i :=0; i < r.Len(); i++ {
this.urlEncode(vals, key, r.Index(i).Interface())
}
}
vals.Add(key, ToString(value))
return nil
}
func (this *DynMap) MarshalJSON() ([]byte, error) {
bytes, err := json.Marshal(this.Map)
return bytes, err
}
func (this *DynMap) UnmarshalJSON(bytes []byte) error {
return json.Unmarshal(bytes, &this.Map)
}
// Gets the value at the specified key as an int64. returns -1,false if value not available or is not convertable
func (this *DynMap) GetInt64(key string) (int64, bool) {
tmp, ok := this.Get(key)
if !ok {
return -1, ok
}
val, err := ToInt64(tmp)
if err == nil {
return val, true
}
return -1, false
}
func (this *DynMap) MustInt64(key string, def int64) int64 {
v, ok := this.GetInt64(key)
if ok {
return v
}
return def
}
func (this *DynMap) MustInt(key string, def int) int {
v, ok := this.GetInt(key)
if ok {
return v
}
return def
}
func (this *DynMap) GetInt(key string) (int, bool) {
v, ok := this.GetInt64(key)
if !ok {
return -1, ok
}
return int(v), true
}
//
// Gets a string representation of the value at key
//
func (this *DynMap) GetString(key string) (string, bool) |
// gets a string. if string is not available in the map, then the default
//is returned
func (this *DynMap) MustString(key string, def string) string {
tmp, ok := this.GetString(key)
if !ok {
return def
}
return tmp
}
func (this *DynMap) GetTime(key string) (time.Time, bool) {
tmp, ok := this.Get(key)
if !ok {
return time.Now(), false
}
t, err := ToTime(tmp)
if err != nil {
return time.Now(), false
}
return t, true
}
func (this *DynMap) MustTime(key string, def time.Time) time.Time {
tmp, ok := this.GetTime(key)
if !ok {
return def
}
return tmp
}
func (this *DynMap) GetBool(key string) (bool, bool) {
tmp, ok := this.Get(key)
if !ok {
return false, ok
}
b, err := ToBool(tmp)
if err != nil {
return false, false
}
return b, true
}
func (this *DynMap) MustBool(key string, def bool) bool {
tmp, ok := this.GetBool(key)
if !ok {
return def
}
return tmp
}
//Gets a dynmap from the requested.
// This will update the value in the map if the
// value was not already a dynmap.
func (this *DynMap) GetDynMap(key string) (*DynMap, bool) {
tmp, ok := this.Get(key)
if !ok {
return nil, ok
}
mp, ok := ToDynMap(tmp)
return mp, ok
}
func (this *DynMap) MustDynMap(key string, def *DynMap) *DynMap {
tmp, ok := this.GetDynMap(key)
if !ok {
return def
}
return tmp
}
// gets a slice of dynmaps
func (this *DynMap) GetDynMapSlice(key string) ([]*DynMap, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case []*DynMap:
return v, true
case []interface{}:
retlist := make([]*DynMap, 0)
for _, tmp := range v {
in, ok := ToDynMap(tmp)
if !ok {
return nil, false
}
retlist = append(retlist, in)
}
return retlist, true
}
return nil, false
}
//Returns a slice of ints
func (this *DynMap) GetIntSlice(key string) ([]int, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case []int:
return v, true
case []interface{}:
retlist := make([]int, 0)
for _, tmp := range v {
in, err := ToInt(tmp)
if err != nil {
return nil, false
}
retlist = append(retlist, in)
}
return retlist, true
}
return nil, false
}
//gets a slice of ints. if the value is a string it will
//split by the requested delimiter
func (this *DynMap) GetIntSliceSplit(key, delim string) ([]int, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case string:
retlist := make([]int, 0)
for _, tmp := range strings.Split(v, delim) {
in, err := ToInt(tmp)
if err != nil {
return nil, false
}
retlist = append(retlist, in)
}
return retlist, true
}
ret, ok := this.GetIntSlice(key)
return ret, ok
}
//Returns a slice of strings
func (this *DynMap) GetStringSlice(key string) ([]string, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case []string:
return v, true
case []interface{}:
retlist := make([]string, 0)
for _, tmp := range v {
in := ToString(tmp)
retlist = append(retlist, in)
}
return retlist, true
}
return nil, false
}
//gets a slice of strings. if the value is a string it will
//split by the requested delimiter
func (this *DynMap) GetStringSliceSplit(key, delim string) ([]string, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case string:
return strings.Split(v, delim), true
}
ret, ok := this.GetStringSlice(key)
return ret, ok
}
// Adds the item to a slice
func (this *DynMap) AddToSlice(key string, mp interface{}) error {
this.PutIfAbsent(key, make([]interface{}, 0))
lst, _ := this.Get(key)
switch v := lst.(type) {
case []interface{}:
v = append(v, mp)
this.Put(key, v)
}
return nil
}
// puts all the values from the passed in map into this dynmap
// the passed in map must be convertable to a DynMap via ToDynMap.
// returns false if the passed value is not convertable to dynmap
func (this *DynMap) PutAll(mp interface{}) bool {
dynmap, ok := ToDynMap(mp)
if !ok {
return false
}
for k, v := range dynmap.Map {
this.Put(k, v)
}
return true
}
//
// Puts the value into the map if and only if no value exists at the
// specified key.
// This does not honor the dot operator on insert.
func (this *DynMap) PutIfAbsent(key string, value interface{}) (interface{}, bool) {
v, ok := this.Get(key)
if ok {
return v, false
}
this.Put(key, value)
return value, true
}
//
// Same as PutIfAbsent but honors the dot operator
//
func (this *DynMap) PutIfAbsentWithDot(key string, value interface{}) (interface{}, bool) {
v, ok := this.Get(key)
if ok {
return v, false
}
this.PutWithDot(key, value)
return value, true
}
//
// Put's a value into the map
//
func (this *DynMap) Put(key string, value interface{}) {
this.Map[key] = value
}
//
// puts the value into the map, honoring the dot operator.
// so PutWithDot("map1.map2.value", 100)
// would result in:
// {
// map1 : { map2 : { value: 100 }}
//
// }
func (this *DynMap) PutWithDot(key string, value interface{}) error {
splitStr := strings.Split(key, ".")
if len(splitStr) == 1 {
this.Put(key, value)
return nil
}
mapKeys := splitStr[:(len(splitStr) - 1)]
var mp = this.Map
for _, k := range mapKeys {
tmp, o := mp[k]
if !o {
//create a new map and insert
newmap := make(map[string]interface{})
mp[k] = newmap
mp = newmap
} else {
mp, o = ToMap(tmp)
if !o {
//error
return errors.New("Error, value at key was not a map")
}
}
}
mp[splitStr[len(splitStr)-1]] = value
return nil
}
func (this *DynMap) Exists(key string) bool {
_, ok := this.Get(key)
return ok
}
//Remove a mapping
//TODO: this will need to honor the dot operator!
func (this *DynMap) Remove(key string) (interface{}, bool) {
val, ok := this.Map[key]
if ok {
delete(this.Map, key)
return val, true
}
//TODO: dot op..
return val, false
}
//
// Get's the value. will honor the dot operator if needed.
// key = 'map.map2'
// will first attempt to matche the literal key 'map.map2'
// if no value is present it will look for a sub map at key 'map'
//
func (this *DynMap) Get(key string) (interface{}, bool) {
val, ok := this.Map[key]
if ok {
return val, true
}
//look for dot operator.
splitStr := strings.Split(key, ".")
if len(splitStr) == 1 {
return val, false
}
var mp = this.Map
for index, k := range splitStr {
tmp, o := mp[k]
if !o {
return val, ok
}
if index == (len(splitStr) - 1) {
return tmp, o
} else {
mp, o = ToMap(tmp)
if !o {
return val, ok
}
}
}
return val, ok
}
| {
tmp, ok := this.Get(key)
if !ok {
return ToString(tmp), ok
}
return ToString(tmp), true
} | identifier_body |
dynmap.go | package dynmap
import (
"strings"
"log"
"encoding/json"
"errors"
"fmt"
"net/url"
"time"
"reflect"
)
//Dont make this a map type, since we want the option of
//extending this and adding members.
type DynMap struct {
Map map[string]interface{}
}
type DynMaper interface {
ToDynMap() *DynMap
}
// Creates a new dynmap
func New() *DynMap {
return &DynMap{make(map[string]interface{})}
}
// Deprecated. use New() instead
func NewDynMap() *DynMap {
return &DynMap{make(map[string]interface{})}
}
// Recursively converts this to a regular go map.
// (will convert any sub maps)
func (this *DynMap) ToMap() map[string]interface{} {
mp := make(map[string]interface{})
for k, v := range(this.Map) {
submp, ok := ToDynMap(this.Map[k])
if ok {
v = submp.ToMap()
}
mp[k] = v
}
return mp
}
// recursively clones this DynMap. all sub maps will be clones as well
func (this *DynMap) Clone() *DynMap {
mp := New()
for k, v := range(this.Map) {
submp, ok := ToDynMap(this.Map[k])
if ok {
v = submp.Clone()
}
mp.Put(k, v)
}
return mp
}
// Returns self. Here so that we satisfy the DynMaper interface
func (this *DynMap) ToDynMap() *DynMap {
return this
}
//encodes this map into a url encoded string.
//maps are encoded in the rails style (key[key2][key2]=value)
// TODO: we should sort the keynames so ordering is consistent and then this
// can be used a cache key
func (this *DynMap) MarshalURL() (string, error) {
vals := &url.Values{}
for key, value := range this.Map {
err := this.urlEncode(vals, key, value)
if err != nil {
return "", err
}
}
str := vals.Encode()
log.Printf(str)
return vals.Encode(), nil
}
// Unmarshals a url encoded string.
// will also parse rails style maps in the form key[key1][key2]=val
func (this *DynMap) UnmarshalURL(urlstring string) error {
//TODO: split on ?
values, err := url.ParseQuery(urlstring)
if err != nil {
return err
}
return this.UnmarshalURLValues(values)
}
// Unmarshals url.Values into the map.
// Will correctly handle rails style maps in the form key[key1][key2]=val
func (this *DynMap) UnmarshalURLValues(values url.Values) error {
for k := range values {
var v = values[k]
key := strings.Replace(k, "[", ".", -1)
key = strings.Replace(key, "]", "", -1)
if len(v) == 1 {
this.PutWithDot(key, v[0])
} else {
this.PutWithDot(key, v)
}
}
return nil
}
//adds the requested value to the Values
func (this *DynMap) urlEncode(vals *url.Values, key string, value interface{}) error {
if DynMapConvertable(value) {
mp, ok := ToDynMap(value)
if !ok {
return fmt.Errorf("Unable to convert %s", mp)
}
for k, v := range mp.Map {
//encode in rails style key[key2]=value
this.urlEncode(vals, fmt.Sprintf("%s[%s]", key, k), v)
}
return nil
}
r := reflect.ValueOf(value)
//now test if it is an array
if r.Kind() == reflect.Array || r.Kind() == reflect.Slice {
for i :=0; i < r.Len(); i++ {
this.urlEncode(vals, key, r.Index(i).Interface())
}
}
vals.Add(key, ToString(value))
return nil
}
func (this *DynMap) MarshalJSON() ([]byte, error) {
bytes, err := json.Marshal(this.Map)
return bytes, err
}
func (this *DynMap) UnmarshalJSON(bytes []byte) error {
return json.Unmarshal(bytes, &this.Map)
}
// Gets the value at the specified key as an int64. returns -1,false if value not available or is not convertable
func (this *DynMap) GetInt64(key string) (int64, bool) {
tmp, ok := this.Get(key)
if !ok {
return -1, ok
}
val, err := ToInt64(tmp)
if err == nil {
return val, true
}
return -1, false
}
func (this *DynMap) MustInt64(key string, def int64) int64 {
v, ok := this.GetInt64(key)
if ok {
return v
}
return def
}
func (this *DynMap) MustInt(key string, def int) int {
v, ok := this.GetInt(key)
if ok {
return v
}
return def
}
func (this *DynMap) GetInt(key string) (int, bool) {
v, ok := this.GetInt64(key)
if !ok {
return -1, ok
}
return int(v), true
}
//
// Gets a string representation of the value at key
//
func (this *DynMap) GetString(key string) (string, bool) {
tmp, ok := this.Get(key)
if !ok {
return ToString(tmp), ok
}
return ToString(tmp), true
}
// gets a string. if string is not available in the map, then the default
//is returned
func (this *DynMap) MustString(key string, def string) string {
tmp, ok := this.GetString(key)
if !ok {
return def
}
return tmp
}
func (this *DynMap) GetTime(key string) (time.Time, bool) {
tmp, ok := this.Get(key)
if !ok {
return time.Now(), false
}
t, err := ToTime(tmp)
if err != nil {
return time.Now(), false
}
return t, true
}
func (this *DynMap) | (key string, def time.Time) time.Time {
tmp, ok := this.GetTime(key)
if !ok {
return def
}
return tmp
}
func (this *DynMap) GetBool(key string) (bool, bool) {
tmp, ok := this.Get(key)
if !ok {
return false, ok
}
b, err := ToBool(tmp)
if err != nil {
return false, false
}
return b, true
}
func (this *DynMap) MustBool(key string, def bool) bool {
tmp, ok := this.GetBool(key)
if !ok {
return def
}
return tmp
}
//Gets a dynmap from the requested.
// This will update the value in the map if the
// value was not already a dynmap.
func (this *DynMap) GetDynMap(key string) (*DynMap, bool) {
tmp, ok := this.Get(key)
if !ok {
return nil, ok
}
mp, ok := ToDynMap(tmp)
return mp, ok
}
func (this *DynMap) MustDynMap(key string, def *DynMap) *DynMap {
tmp, ok := this.GetDynMap(key)
if !ok {
return def
}
return tmp
}
// gets a slice of dynmaps
func (this *DynMap) GetDynMapSlice(key string) ([]*DynMap, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case []*DynMap:
return v, true
case []interface{}:
retlist := make([]*DynMap, 0)
for _, tmp := range v {
in, ok := ToDynMap(tmp)
if !ok {
return nil, false
}
retlist = append(retlist, in)
}
return retlist, true
}
return nil, false
}
//Returns a slice of ints
func (this *DynMap) GetIntSlice(key string) ([]int, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case []int:
return v, true
case []interface{}:
retlist := make([]int, 0)
for _, tmp := range v {
in, err := ToInt(tmp)
if err != nil {
return nil, false
}
retlist = append(retlist, in)
}
return retlist, true
}
return nil, false
}
//gets a slice of ints. if the value is a string it will
//split by the requested delimiter
func (this *DynMap) GetIntSliceSplit(key, delim string) ([]int, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case string:
retlist := make([]int, 0)
for _, tmp := range strings.Split(v, delim) {
in, err := ToInt(tmp)
if err != nil {
return nil, false
}
retlist = append(retlist, in)
}
return retlist, true
}
ret, ok := this.GetIntSlice(key)
return ret, ok
}
//Returns a slice of strings
func (this *DynMap) GetStringSlice(key string) ([]string, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case []string:
return v, true
case []interface{}:
retlist := make([]string, 0)
for _, tmp := range v {
in := ToString(tmp)
retlist = append(retlist, in)
}
return retlist, true
}
return nil, false
}
//gets a slice of strings. if the value is a string it will
//split by the requested delimiter
func (this *DynMap) GetStringSliceSplit(key, delim string) ([]string, bool) {
lst, ok := this.Get(key)
if !ok {
return nil, false
}
switch v := lst.(type) {
case string:
return strings.Split(v, delim), true
}
ret, ok := this.GetStringSlice(key)
return ret, ok
}
// Adds the item to a slice
func (this *DynMap) AddToSlice(key string, mp interface{}) error {
this.PutIfAbsent(key, make([]interface{}, 0))
lst, _ := this.Get(key)
switch v := lst.(type) {
case []interface{}:
v = append(v, mp)
this.Put(key, v)
}
return nil
}
// puts all the values from the passed in map into this dynmap
// the passed in map must be convertable to a DynMap via ToDynMap.
// returns false if the passed value is not convertable to dynmap
func (this *DynMap) PutAll(mp interface{}) bool {
dynmap, ok := ToDynMap(mp)
if !ok {
return false
}
for k, v := range dynmap.Map {
this.Put(k, v)
}
return true
}
//
// Puts the value into the map if and only if no value exists at the
// specified key.
// This does not honor the dot operator on insert.
func (this *DynMap) PutIfAbsent(key string, value interface{}) (interface{}, bool) {
v, ok := this.Get(key)
if ok {
return v, false
}
this.Put(key, value)
return value, true
}
//
// Same as PutIfAbsent but honors the dot operator
//
func (this *DynMap) PutIfAbsentWithDot(key string, value interface{}) (interface{}, bool) {
v, ok := this.Get(key)
if ok {
return v, false
}
this.PutWithDot(key, value)
return value, true
}
//
// Put's a value into the map
//
func (this *DynMap) Put(key string, value interface{}) {
this.Map[key] = value
}
//
// puts the value into the map, honoring the dot operator.
// so PutWithDot("map1.map2.value", 100)
// would result in:
// {
// map1 : { map2 : { value: 100 }}
//
// }
func (this *DynMap) PutWithDot(key string, value interface{}) error {
splitStr := strings.Split(key, ".")
if len(splitStr) == 1 {
this.Put(key, value)
return nil
}
mapKeys := splitStr[:(len(splitStr) - 1)]
var mp = this.Map
for _, k := range mapKeys {
tmp, o := mp[k]
if !o {
//create a new map and insert
newmap := make(map[string]interface{})
mp[k] = newmap
mp = newmap
} else {
mp, o = ToMap(tmp)
if !o {
//error
return errors.New("Error, value at key was not a map")
}
}
}
mp[splitStr[len(splitStr)-1]] = value
return nil
}
func (this *DynMap) Exists(key string) bool {
_, ok := this.Get(key)
return ok
}
//Remove a mapping
//TODO: this will need to honor the dot operator!
func (this *DynMap) Remove(key string) (interface{}, bool) {
val, ok := this.Map[key]
if ok {
delete(this.Map, key)
return val, true
}
//TODO: dot op..
return val, false
}
//
// Get's the value. will honor the dot operator if needed.
// key = 'map.map2'
// will first attempt to matche the literal key 'map.map2'
// if no value is present it will look for a sub map at key 'map'
//
func (this *DynMap) Get(key string) (interface{}, bool) {
val, ok := this.Map[key]
if ok {
return val, true
}
//look for dot operator.
splitStr := strings.Split(key, ".")
if len(splitStr) == 1 {
return val, false
}
var mp = this.Map
for index, k := range splitStr {
tmp, o := mp[k]
if !o {
return val, ok
}
if index == (len(splitStr) - 1) {
return tmp, o
} else {
mp, o = ToMap(tmp)
if !o {
return val, ok
}
}
}
return val, ok
}
| MustTime | identifier_name |
lex.go | // Copyright 2017 Joel Scoble
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy
// of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package linewrap
import (
"fmt"
"strings"
"unicode/utf8"
)
const (
cr = '\r'
nl = '\n'
tab = '\t'
zeroWidthNoBreakSpace = "\uFEFF"
)
// Pos is a byte position in the original input text.
type Pos int
type token struct {
typ tokenType
pos Pos
len int // kength in chars (not bytes)
value string
}
func (t token) String() string |
func (t token) Error() string {
return fmt.Sprintf("lex error at %d: %s", int(t.pos), t.value)
}
type tokenType int
const (
tokenNone tokenType = iota
tokenError
tokenEOF
tokenText // anything that isn't one of the following
tokenZeroWidthNoBreakSpace // U+FEFF used for unwrappable
tokenNL // \n
tokenCR // \r
// unicode tokens we care about, mostly because of breaking rules. The whitespace
// and dash tokens listed may be different than what Go uses in the relevant Go
// unicode tables.
// whitespace tokens from https://www.cs.tut.fi/~jkorpela/chars/spaces.html
//
// exceptions to the table:
// no-break space U+00A0 is not considered whitespace for line break purposes
// narrow no-break space U+202F is not considered whitespace for line break purposes
// zero width no-break space U+FEFF is not considered whitespace for line break purposes
tokenTab // \t
tokenSpace // U+0020
tokenOghamSpaceMark // U+1680
tokenMongolianVowelSeparator // U+180E
tokenEnQuad // U+2000
tokenEmQuad // U+2001
tokenEnSpace // U+2002
tokenEmSpace // U+2003
tokenThreePerEmSpace // U+2004
tokenFourPerEmSpace // U+2005
tokenSixPerEmSpace // U+2006
tokenFigureSpace // U+2007
tokenPunctuationSpace // U+2008
tokenThinSpace // U+2009
tokenHairSpace // U+200A
tokenZeroWidthSpace // U+200B
tokenMediumMathematicalSpace // U+205F
tokenIdeographicSpace // U+3000
// dash tokens from https://www.cs.tut.fi/~jkorpela/dashes.html
// hyphens and dashes in lines breaking rules sections
//
// exceptions to the table:
// tilde U+007E does not cause a line break because of possibility of ~/dir, ~=, etc.
// hyphen minus U+002D this is not supposed to break on a numeric context but no differentiation is done
// minus sign U+2212 does not cause a line break
// wavy dash U+301C does not cause a line break
// wavy dash U+3939 does not cause a line break
// two em dash U+2E3A is not in table but is here.
// three em dash U+2E3B is not in table but is here.
// small em dash U+FE58 is not in table but is here.
// small hyphen-minus U+FE63 is not in table but is here.
// full width hyphen-minus U+FF0D is not in table but is here.
// mongolian todo hyphen U+1806 does not cause a line break becaues it is a break before char
// presentation form for vertical em dash U+FE31 is not in table but is here.
// presentation form for vertical en dash U+FE32 is not in table but is here.
tokenHyphenMinus // U+002D
tokenSoftHyphen // U+00AD
tokenArmenianHyphen // U+058A
tokenHyphen // U+2010
tokenFigureDash // U+2012
tokenEnDash // U+2013
tokenEmDash // U+2014 can be before or after but only after is supported here
tokenHorizontalBar // U+2015
tokenSwungDash // U+2053
tokenSuperscriptMinus // U+207B
tokenSubScriptMinus // U+208B
tokenTwoEmDash // U+2E3A
tokenThreeEmDash // U+2E3B
tokenPresentationFormForVerticalEmDash // U+FE31
tokenPresentationFormForVerticalEnDash // U+FE32
tokenSmallEmDash // U+FE58
tokenSmallHyphenMinus // U+FE63
tokenFullWidthHyphenMinus // U+FF0D
)
var key = map[string]tokenType{
"\r": tokenCR,
"\n": tokenNL,
"\t": tokenTab,
"\uFEFF": tokenZeroWidthNoBreakSpace,
"\u0020": tokenSpace,
"\u1680": tokenOghamSpaceMark,
"\u180E": tokenMongolianVowelSeparator,
"\u2000": tokenEnQuad,
"\u2001": tokenEmQuad,
"\u2002": tokenEnSpace,
"\u2003": tokenEmSpace,
"\u2004": tokenThreePerEmSpace,
"\u2005": tokenFourPerEmSpace,
"\u2006": tokenSixPerEmSpace,
"\u2007": tokenFigureSpace,
"\u2008": tokenPunctuationSpace,
"\u2009": tokenThinSpace,
"\u200A": tokenHairSpace,
"\u200B": tokenZeroWidthSpace,
"\u205F": tokenMediumMathematicalSpace,
"\u3000": tokenIdeographicSpace,
"\u002D": tokenHyphenMinus,
"\u00AD": tokenSoftHyphen,
"\u058A": tokenArmenianHyphen,
"\u2010": tokenHyphen,
"\u2012": tokenFigureDash,
"\u2013": tokenEnDash,
"\u2014": tokenEmDash,
"\u2015": tokenHorizontalBar,
"\u2053": tokenSwungDash,
"\u207B": tokenSuperscriptMinus,
"\u208B": tokenSubScriptMinus,
"\u2E3A": tokenTwoEmDash,
"\u2E3B": tokenThreeEmDash,
"\uFE31": tokenPresentationFormForVerticalEmDash,
"\uFE32": tokenPresentationFormForVerticalEnDash,
"\uFE58": tokenSmallEmDash,
"\uFE63": tokenSmallHyphenMinus,
"\uFF0D": tokenFullWidthHyphenMinus,
}
var vals = map[tokenType]string{
tokenNone: "none",
tokenError: "error",
tokenEOF: "eof",
tokenText: "text",
tokenZeroWidthNoBreakSpace: "zero width no break space",
tokenNL: "nl",
tokenCR: "cr",
tokenTab: "tab",
tokenSpace: "space",
tokenOghamSpaceMark: "ogham space mark",
tokenMongolianVowelSeparator: "mongolian vowel separator",
tokenEnQuad: "en quad",
tokenEmQuad: "em quad",
tokenEnSpace: "en space",
tokenEmSpace: "em space",
tokenThreePerEmSpace: "three per em space",
tokenFourPerEmSpace: "four per em space",
tokenSixPerEmSpace: "siz per em space",
tokenFigureSpace: "token figure space",
tokenPunctuationSpace: "punctuation space",
tokenThinSpace: "thin space",
tokenHairSpace: "hair space",
tokenZeroWidthSpace: "width space",
tokenMediumMathematicalSpace: "medium mathematical space",
tokenIdeographicSpace: "ideographic space",
tokenHyphenMinus: "hyphen minus",
tokenSoftHyphen: "soft hyphen",
tokenArmenianHyphen: "armenian hyphen",
tokenHyphen: "hyphen",
tokenFigureDash: "figure dash",
tokenEnDash: "en dash",
tokenEmDash: "em dash",
tokenHorizontalBar: "horizontal bar",
tokenSwungDash: "swung dash",
tokenSuperscriptMinus: "superscript minus",
tokenSubScriptMinus: "subscript minus",
tokenTwoEmDash: "two em dash",
tokenThreeEmDash: "three em dash",
tokenPresentationFormForVerticalEmDash: "presentation form for vertical em dash",
tokenPresentationFormForVerticalEnDash: "presentation form for vertical em dash",
tokenSmallEmDash: "small em dash",
tokenSmallHyphenMinus: "small hyphen minus",
tokenFullWidthHyphenMinus: "full width hyphen minus",
}
const eof = -1
const (
classText tokenClass = iota
classCR
classNL
classTab
classSpace
classHyphen
)
type tokenClass int
type stateFn func(*lexer) stateFn
type lexer struct {
input []byte // the string being scanned
state stateFn // the next lexing function to enter
pos Pos // current position of this item
start Pos // start position of this item
width Pos // width of last rune read from input
lastPos Pos // position of most recent item returned by nextItem
runeCnt int // the number of runes in the current token sequence
tokens chan token // channel of scanned tokens
}
func lex(input []byte) *lexer {
l := &lexer{
input: input,
state: lexText,
tokens: make(chan token, 2),
}
go l.run()
return l
}
// next returns the next rune in the input.
func (l *lexer) next() rune {
l.runeCnt++
if int(l.pos) >= len(l.input) {
l.width = 0
return eof
}
r, w := utf8.DecodeRune(l.input[l.pos:])
l.width = Pos(w)
l.pos += l.width
return r
}
// peek returns but does not consume the next rune in the input
func (l *lexer) peek() rune {
r := l.next()
l.backup()
return r
}
// backup steps back one rune. Can be called only once per call of next.
func (l *lexer) backup() {
l.pos -= l.width
l.runeCnt--
}
// emit passes an item back to the client.
func (l *lexer) emit(t tokenType) {
l.tokens <- token{t, l.start, l.runeCnt, string(l.input[l.start:l.pos])}
l.start = l.pos
l.runeCnt = 0
}
// ignore skips over the pending input before this point.
func (l *lexer) ignore() {
l.start = l.pos
l.runeCnt = 0
}
// accept consumes the next rune if it's from the valid set.
func (l *lexer) accept(valid string) bool {
if strings.ContainsRune(valid, l.next()) {
return true
}
l.backup()
return false
}
// acceptRun cunsumes a run of runes from the valid set.
func (l *lexer) acceptRun(valid string) {
if strings.ContainsRune(valid, l.next()) {
}
l.backup()
}
// error returns an error token and terminates the scan by passing back a nil
// pointer that will be the next state, terminating l.run.
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
l.tokens <- token{tokenError, l.start, 0, fmt.Sprintf(format, args...)}
return nil
}
// nextToken returns the next token from the input.
func (l *lexer) nextToken() token {
token := <-l.tokens
l.lastPos = token.pos
return token
}
// drain the channel so the lex go routine will exit: called by caller.
func (l *lexer) drain() {
for range l.tokens {
}
}
// run lexes the input by executing state functions until the state is nil.
func (l *lexer) run() {
for state := lexText; state != nil; {
state = state(l)
}
close(l.tokens) // No more tokens will be delivered
}
// lexText scans non whitespace/hyphen chars.
func lexText(l *lexer) stateFn {
for {
is, class := l.atBreakPoint() // a breakpoint is any char after which a new line can be
if is {
if l.pos > l.start {
l.emit(tokenText)
}
switch class {
case classCR:
return lexCR
case classNL:
return lexNL
case classSpace:
return lexSpace
case classTab:
return lexTab
case classHyphen:
return lexHyphen
}
}
if l.next() == eof {
l.runeCnt-- // eof doesn't count.
break
}
}
// Correctly reached EOF.
if l.pos > l.start {
l.emit(tokenText)
}
l.emit(tokenEOF) // Useful to make EOF a token
return nil // Stop the run loop.
}
// a breakpoint is any character afterwhich a wrap may occur. If it is a
// breakpoint char, the type of char is returned.
func (l *lexer) atBreakPoint() (breakpoint bool, class tokenClass) {
r, _ := utf8.DecodeRune(l.input[l.pos:])
t, ok := key[string(r)]
if !ok || t <= tokenZeroWidthNoBreakSpace {
return false, classText
}
switch t {
case tokenCR:
return true, classCR
case tokenNL:
return true, classNL
case tokenTab:
return true, classTab
}
if isSpace(t) {
return true, classSpace
}
if isHyphen(t) {
return true, classHyphen
}
// it really shouldn't get to here, but if it does, treat it like classText
return false, classText
}
// lexCR handles a carriage return, `\r`; these are skipped. The prior token
// should already have been emitted and the next token should be a CR, which
// are skipped. The next token is checked to ensure that it really is a CR.
func lexCR(l *lexer) stateFn {
r := l.next()
t := key[string(r)] // don't need to check ok, as the zero value won't match
if t == tokenCR {
l.ignore()
}
return lexText
}
// lexNL handles a new line, `\n`; the prior token should already have been
// emitted and the next token should be a NL. The next token is checked to
// ensure that it really is a NL
func lexNL(l *lexer) stateFn {
r := l.next()
t := key[string(r)] // don't need to check ok, as the zero value won't match
if t == tokenNL {
l.emit(tokenNL)
}
return lexText
}
// lexTab handles a tab, '\t'; the prior token should already have been emitted
// and the next token should be a tab. The next token is checked to ensure that
// it really is a tab.
func lexTab(l *lexer) stateFn {
r := l.next()
t := key[string(r)] // don't need to check ok, as the zero value won't match
if t == tokenTab {
l.emit(tokenTab)
}
return lexText
}
// This scans until end of the space sequence is encountered. If no spaces were
// found, nothing will be emitted. The prior token should already have been
// emitted before this function gets called.
func lexSpace(l *lexer) stateFn {
var i int
// scan until the spaces are consumed
for {
r := l.next()
// ok doesn't need to be checked as the zeroo value won't be classified as a hyphen.
tkn := key[string(r)]
if !isSpace(tkn) {
break
}
i++
}
if i == 0 { // if no spaces were processed; nothing to emit.
return lexText
}
// otherwise backup to ensure only space tokens are emitted.
l.backup()
l.emit(tokenSpace)
return lexText
}
// Scan until end of the hyphen sequence is encountered. If no hyphens were
// found, nothing will be emitted. The prior token should already have been
// emitted before this function gets called.
func lexHyphen(l *lexer) stateFn {
var i int
// scan until the spaces are consumed
for {
r := l.next()
// ok doesn't need to be checked as the zero value won't be classified as a hyphen.
tkn := key[string(r)]
if !isHyphen(tkn) {
break
}
i++
}
if i == 0 { // if no hyphens. nothing to emit.
return lexText
}
l.backup()
l.emit(tokenHyphen)
return lexText
}
func isSpace(t tokenType) bool {
if t >= tokenTab && t <= tokenIdeographicSpace {
return true
}
return false
}
func isHyphen(t tokenType) bool {
if t >= tokenHyphenMinus && t <= tokenFullWidthHyphenMinus {
return true
}
return false
}
| {
switch {
case t.typ == tokenEOF:
return "EOF"
case t.typ == tokenError:
return t.value
}
return t.value
} | identifier_body |
lex.go | // Copyright 2017 Joel Scoble
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy
// of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package linewrap
import (
"fmt"
"strings"
"unicode/utf8"
)
const (
cr = '\r'
nl = '\n'
tab = '\t'
zeroWidthNoBreakSpace = "\uFEFF"
)
// Pos is a byte position in the original input text.
type Pos int
type token struct {
typ tokenType
pos Pos
len int // kength in chars (not bytes)
value string
}
func (t token) String() string {
switch {
case t.typ == tokenEOF:
return "EOF"
case t.typ == tokenError:
return t.value
}
return t.value
}
func (t token) Error() string {
return fmt.Sprintf("lex error at %d: %s", int(t.pos), t.value)
}
type tokenType int
const (
tokenNone tokenType = iota
tokenError
tokenEOF
tokenText // anything that isn't one of the following
tokenZeroWidthNoBreakSpace // U+FEFF used for unwrappable
tokenNL // \n
tokenCR // \r
// unicode tokens we care about, mostly because of breaking rules. The whitespace
// and dash tokens listed may be different than what Go uses in the relevant Go
// unicode tables.
// whitespace tokens from https://www.cs.tut.fi/~jkorpela/chars/spaces.html
//
// exceptions to the table:
// no-break space U+00A0 is not considered whitespace for line break purposes
// narrow no-break space U+202F is not considered whitespace for line break purposes
// zero width no-break space U+FEFF is not considered whitespace for line break purposes
tokenTab // \t
tokenSpace // U+0020
tokenOghamSpaceMark // U+1680
tokenMongolianVowelSeparator // U+180E
tokenEnQuad // U+2000
tokenEmQuad // U+2001
tokenEnSpace // U+2002
tokenEmSpace // U+2003
tokenThreePerEmSpace // U+2004
tokenFourPerEmSpace // U+2005
tokenSixPerEmSpace // U+2006
tokenFigureSpace // U+2007
tokenPunctuationSpace // U+2008
tokenThinSpace // U+2009
tokenHairSpace // U+200A
tokenZeroWidthSpace // U+200B
tokenMediumMathematicalSpace // U+205F
tokenIdeographicSpace // U+3000
// dash tokens from https://www.cs.tut.fi/~jkorpela/dashes.html
// hyphens and dashes in lines breaking rules sections
//
// exceptions to the table:
// tilde U+007E does not cause a line break because of possibility of ~/dir, ~=, etc.
// hyphen minus U+002D this is not supposed to break on a numeric context but no differentiation is done
// minus sign U+2212 does not cause a line break
// wavy dash U+301C does not cause a line break
// wavy dash U+3939 does not cause a line break
// two em dash U+2E3A is not in table but is here.
// three em dash U+2E3B is not in table but is here.
// small em dash U+FE58 is not in table but is here.
// small hyphen-minus U+FE63 is not in table but is here.
// full width hyphen-minus U+FF0D is not in table but is here.
// mongolian todo hyphen U+1806 does not cause a line break becaues it is a break before char
// presentation form for vertical em dash U+FE31 is not in table but is here.
// presentation form for vertical en dash U+FE32 is not in table but is here.
tokenHyphenMinus // U+002D
tokenSoftHyphen // U+00AD
tokenArmenianHyphen // U+058A
tokenHyphen // U+2010
tokenFigureDash // U+2012
tokenEnDash // U+2013
tokenEmDash // U+2014 can be before or after but only after is supported here
tokenHorizontalBar // U+2015
tokenSwungDash // U+2053
tokenSuperscriptMinus // U+207B
tokenSubScriptMinus // U+208B
tokenTwoEmDash // U+2E3A
tokenThreeEmDash // U+2E3B
tokenPresentationFormForVerticalEmDash // U+FE31
tokenPresentationFormForVerticalEnDash // U+FE32
tokenSmallEmDash // U+FE58
tokenSmallHyphenMinus // U+FE63
tokenFullWidthHyphenMinus // U+FF0D
)
var key = map[string]tokenType{
"\r": tokenCR,
"\n": tokenNL,
"\t": tokenTab,
"\uFEFF": tokenZeroWidthNoBreakSpace,
"\u0020": tokenSpace,
"\u1680": tokenOghamSpaceMark,
"\u180E": tokenMongolianVowelSeparator,
"\u2000": tokenEnQuad,
"\u2001": tokenEmQuad,
"\u2002": tokenEnSpace,
"\u2003": tokenEmSpace,
"\u2004": tokenThreePerEmSpace,
"\u2005": tokenFourPerEmSpace,
"\u2006": tokenSixPerEmSpace,
"\u2007": tokenFigureSpace,
"\u2008": tokenPunctuationSpace,
"\u2009": tokenThinSpace,
"\u200A": tokenHairSpace,
"\u200B": tokenZeroWidthSpace,
"\u205F": tokenMediumMathematicalSpace,
"\u3000": tokenIdeographicSpace,
"\u002D": tokenHyphenMinus,
"\u00AD": tokenSoftHyphen,
"\u058A": tokenArmenianHyphen,
"\u2010": tokenHyphen,
"\u2012": tokenFigureDash,
"\u2013": tokenEnDash,
"\u2014": tokenEmDash,
"\u2015": tokenHorizontalBar,
"\u2053": tokenSwungDash,
"\u207B": tokenSuperscriptMinus,
"\u208B": tokenSubScriptMinus,
"\u2E3A": tokenTwoEmDash,
"\u2E3B": tokenThreeEmDash,
"\uFE31": tokenPresentationFormForVerticalEmDash,
"\uFE32": tokenPresentationFormForVerticalEnDash,
"\uFE58": tokenSmallEmDash,
"\uFE63": tokenSmallHyphenMinus,
"\uFF0D": tokenFullWidthHyphenMinus,
}
var vals = map[tokenType]string{
tokenNone: "none",
tokenError: "error",
tokenEOF: "eof",
tokenText: "text",
tokenZeroWidthNoBreakSpace: "zero width no break space",
tokenNL: "nl",
tokenCR: "cr",
tokenTab: "tab",
tokenSpace: "space",
tokenOghamSpaceMark: "ogham space mark",
tokenMongolianVowelSeparator: "mongolian vowel separator",
tokenEnQuad: "en quad",
tokenEmQuad: "em quad",
tokenEnSpace: "en space",
tokenEmSpace: "em space",
tokenThreePerEmSpace: "three per em space",
tokenFourPerEmSpace: "four per em space",
tokenSixPerEmSpace: "siz per em space",
tokenFigureSpace: "token figure space",
tokenPunctuationSpace: "punctuation space",
tokenThinSpace: "thin space",
tokenHairSpace: "hair space",
tokenZeroWidthSpace: "width space",
tokenMediumMathematicalSpace: "medium mathematical space",
tokenIdeographicSpace: "ideographic space",
tokenHyphenMinus: "hyphen minus",
tokenSoftHyphen: "soft hyphen",
tokenArmenianHyphen: "armenian hyphen",
tokenHyphen: "hyphen",
tokenFigureDash: "figure dash",
tokenEnDash: "en dash",
tokenEmDash: "em dash",
tokenHorizontalBar: "horizontal bar",
tokenSwungDash: "swung dash",
tokenSuperscriptMinus: "superscript minus",
tokenSubScriptMinus: "subscript minus",
tokenTwoEmDash: "two em dash",
tokenThreeEmDash: "three em dash",
tokenPresentationFormForVerticalEmDash: "presentation form for vertical em dash",
tokenPresentationFormForVerticalEnDash: "presentation form for vertical em dash",
tokenSmallEmDash: "small em dash",
tokenSmallHyphenMinus: "small hyphen minus",
tokenFullWidthHyphenMinus: "full width hyphen minus",
}
const eof = -1
const (
classText tokenClass = iota
classCR
classNL
classTab
classSpace
classHyphen
)
type tokenClass int
type stateFn func(*lexer) stateFn
type lexer struct {
input []byte // the string being scanned
state stateFn // the next lexing function to enter
pos Pos // current position of this item
start Pos // start position of this item
width Pos // width of last rune read from input
lastPos Pos // position of most recent item returned by nextItem
runeCnt int // the number of runes in the current token sequence
tokens chan token // channel of scanned tokens
}
func lex(input []byte) *lexer {
l := &lexer{
input: input,
state: lexText,
tokens: make(chan token, 2),
}
go l.run()
return l
}
// next returns the next rune in the input.
func (l *lexer) next() rune {
l.runeCnt++
if int(l.pos) >= len(l.input) {
l.width = 0
return eof
}
r, w := utf8.DecodeRune(l.input[l.pos:])
l.width = Pos(w)
l.pos += l.width
return r
}
// peek returns but does not consume the next rune in the input
func (l *lexer) peek() rune {
r := l.next()
l.backup()
return r
}
// backup steps back one rune. Can be called only once per call of next.
func (l *lexer) backup() {
l.pos -= l.width
l.runeCnt--
}
// emit passes an item back to the client.
func (l *lexer) emit(t tokenType) {
l.tokens <- token{t, l.start, l.runeCnt, string(l.input[l.start:l.pos])}
l.start = l.pos
l.runeCnt = 0
}
// ignore skips over the pending input before this point.
func (l *lexer) ignore() {
l.start = l.pos
l.runeCnt = 0
}
// accept consumes the next rune if it's from the valid set.
func (l *lexer) accept(valid string) bool {
if strings.ContainsRune(valid, l.next()) {
return true
}
l.backup()
return false
}
// acceptRun cunsumes a run of runes from the valid set.
func (l *lexer) acceptRun(valid string) {
if strings.ContainsRune(valid, l.next()) {
}
l.backup()
}
// error returns an error token and terminates the scan by passing back a nil
// pointer that will be the next state, terminating l.run.
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
l.tokens <- token{tokenError, l.start, 0, fmt.Sprintf(format, args...)}
return nil
}
// nextToken returns the next token from the input.
func (l *lexer) nextToken() token {
token := <-l.tokens
l.lastPos = token.pos
return token
}
// drain the channel so the lex go routine will exit: called by caller.
func (l *lexer) drain() {
for range l.tokens {
}
}
// run lexes the input by executing state functions until the state is nil.
func (l *lexer) run() {
for state := lexText; state != nil; {
state = state(l)
}
close(l.tokens) // No more tokens will be delivered
}
// lexText scans non whitespace/hyphen chars.
func lexText(l *lexer) stateFn {
for {
is, class := l.atBreakPoint() // a breakpoint is any char after which a new line can be
if is {
if l.pos > l.start {
l.emit(tokenText)
}
switch class {
case classCR:
return lexCR
case classNL:
return lexNL
case classSpace:
return lexSpace
case classTab:
return lexTab
case classHyphen:
return lexHyphen
}
}
if l.next() == eof {
l.runeCnt-- // eof doesn't count.
break
}
}
// Correctly reached EOF.
if l.pos > l.start {
l.emit(tokenText)
}
l.emit(tokenEOF) // Useful to make EOF a token
return nil // Stop the run loop.
}
// a breakpoint is any character afterwhich a wrap may occur. If it is a
// breakpoint char, the type of char is returned.
func (l *lexer) atBreakPoint() (breakpoint bool, class tokenClass) {
r, _ := utf8.DecodeRune(l.input[l.pos:])
t, ok := key[string(r)]
if !ok || t <= tokenZeroWidthNoBreakSpace {
return false, classText
}
switch t {
case tokenCR:
return true, classCR
case tokenNL:
return true, classNL
case tokenTab:
return true, classTab
}
if isSpace(t) {
return true, classSpace
}
if isHyphen(t) {
return true, classHyphen
}
// it really shouldn't get to here, but if it does, treat it like classText
return false, classText
}
// lexCR handles a carriage return, `\r`; these are skipped. The prior token
// should already have been emitted and the next token should be a CR, which
// are skipped. The next token is checked to ensure that it really is a CR.
func lexCR(l *lexer) stateFn {
r := l.next()
t := key[string(r)] // don't need to check ok, as the zero value won't match
if t == tokenCR {
l.ignore()
}
return lexText
}
// lexNL handles a new line, `\n`; the prior token should already have been
// emitted and the next token should be a NL. The next token is checked to
// ensure that it really is a NL
func lexNL(l *lexer) stateFn {
r := l.next()
t := key[string(r)] // don't need to check ok, as the zero value won't match
if t == tokenNL {
l.emit(tokenNL)
}
return lexText
}
// lexTab handles a tab, '\t'; the prior token should already have been emitted
// and the next token should be a tab. The next token is checked to ensure that
// it really is a tab.
func | (l *lexer) stateFn {
r := l.next()
t := key[string(r)] // don't need to check ok, as the zero value won't match
if t == tokenTab {
l.emit(tokenTab)
}
return lexText
}
// This scans until end of the space sequence is encountered. If no spaces were
// found, nothing will be emitted. The prior token should already have been
// emitted before this function gets called.
func lexSpace(l *lexer) stateFn {
var i int
// scan until the spaces are consumed
for {
r := l.next()
// ok doesn't need to be checked as the zeroo value won't be classified as a hyphen.
tkn := key[string(r)]
if !isSpace(tkn) {
break
}
i++
}
if i == 0 { // if no spaces were processed; nothing to emit.
return lexText
}
// otherwise backup to ensure only space tokens are emitted.
l.backup()
l.emit(tokenSpace)
return lexText
}
// Scan until end of the hyphen sequence is encountered. If no hyphens were
// found, nothing will be emitted. The prior token should already have been
// emitted before this function gets called.
func lexHyphen(l *lexer) stateFn {
var i int
// scan until the spaces are consumed
for {
r := l.next()
// ok doesn't need to be checked as the zero value won't be classified as a hyphen.
tkn := key[string(r)]
if !isHyphen(tkn) {
break
}
i++
}
if i == 0 { // if no hyphens. nothing to emit.
return lexText
}
l.backup()
l.emit(tokenHyphen)
return lexText
}
func isSpace(t tokenType) bool {
if t >= tokenTab && t <= tokenIdeographicSpace {
return true
}
return false
}
func isHyphen(t tokenType) bool {
if t >= tokenHyphenMinus && t <= tokenFullWidthHyphenMinus {
return true
}
return false
}
| lexTab | identifier_name |
lex.go | // Copyright 2017 Joel Scoble
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy
// of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package linewrap
import (
"fmt"
"strings"
"unicode/utf8"
)
const (
cr = '\r'
nl = '\n'
tab = '\t'
zeroWidthNoBreakSpace = "\uFEFF"
)
// Pos is a byte position in the original input text.
type Pos int
type token struct {
typ tokenType
pos Pos
len int // kength in chars (not bytes)
value string
}
func (t token) String() string {
switch {
case t.typ == tokenEOF:
return "EOF"
case t.typ == tokenError:
return t.value
}
return t.value
}
func (t token) Error() string {
return fmt.Sprintf("lex error at %d: %s", int(t.pos), t.value)
}
type tokenType int
const (
tokenNone tokenType = iota
tokenError
tokenEOF
tokenText // anything that isn't one of the following
tokenZeroWidthNoBreakSpace // U+FEFF used for unwrappable
tokenNL // \n
tokenCR // \r
// unicode tokens we care about, mostly because of breaking rules. The whitespace
// and dash tokens listed may be different than what Go uses in the relevant Go
// unicode tables.
// whitespace tokens from https://www.cs.tut.fi/~jkorpela/chars/spaces.html
//
// exceptions to the table:
// no-break space U+00A0 is not considered whitespace for line break purposes
// narrow no-break space U+202F is not considered whitespace for line break purposes
// zero width no-break space U+FEFF is not considered whitespace for line break purposes
tokenTab // \t
tokenSpace // U+0020
tokenOghamSpaceMark // U+1680
tokenMongolianVowelSeparator // U+180E
tokenEnQuad // U+2000
tokenEmQuad // U+2001
tokenEnSpace // U+2002
tokenEmSpace // U+2003
tokenThreePerEmSpace // U+2004
tokenFourPerEmSpace // U+2005
tokenSixPerEmSpace // U+2006
tokenFigureSpace // U+2007
tokenPunctuationSpace // U+2008
tokenThinSpace // U+2009
tokenHairSpace // U+200A
tokenZeroWidthSpace // U+200B
tokenMediumMathematicalSpace // U+205F
tokenIdeographicSpace // U+3000
// dash tokens from https://www.cs.tut.fi/~jkorpela/dashes.html
// hyphens and dashes in lines breaking rules sections
//
// exceptions to the table:
// tilde U+007E does not cause a line break because of possibility of ~/dir, ~=, etc.
// hyphen minus U+002D this is not supposed to break on a numeric context but no differentiation is done
// minus sign U+2212 does not cause a line break
// wavy dash U+301C does not cause a line break
// wavy dash U+3939 does not cause a line break
// two em dash U+2E3A is not in table but is here.
// three em dash U+2E3B is not in table but is here.
// small em dash U+FE58 is not in table but is here.
// small hyphen-minus U+FE63 is not in table but is here.
// full width hyphen-minus U+FF0D is not in table but is here.
// mongolian todo hyphen U+1806 does not cause a line break becaues it is a break before char
// presentation form for vertical em dash U+FE31 is not in table but is here.
// presentation form for vertical en dash U+FE32 is not in table but is here.
tokenHyphenMinus // U+002D
tokenSoftHyphen // U+00AD
tokenArmenianHyphen // U+058A
tokenHyphen // U+2010
tokenFigureDash // U+2012
tokenEnDash // U+2013
tokenEmDash // U+2014 can be before or after but only after is supported here
tokenHorizontalBar // U+2015
tokenSwungDash // U+2053
tokenSuperscriptMinus // U+207B
tokenSubScriptMinus // U+208B
tokenTwoEmDash // U+2E3A
tokenThreeEmDash // U+2E3B
tokenPresentationFormForVerticalEmDash // U+FE31
tokenPresentationFormForVerticalEnDash // U+FE32
tokenSmallEmDash // U+FE58
tokenSmallHyphenMinus // U+FE63
tokenFullWidthHyphenMinus // U+FF0D
)
var key = map[string]tokenType{
"\r": tokenCR,
"\n": tokenNL,
"\t": tokenTab,
"\uFEFF": tokenZeroWidthNoBreakSpace,
"\u0020": tokenSpace,
"\u1680": tokenOghamSpaceMark,
"\u180E": tokenMongolianVowelSeparator,
"\u2000": tokenEnQuad,
"\u2001": tokenEmQuad,
"\u2002": tokenEnSpace,
"\u2003": tokenEmSpace,
"\u2004": tokenThreePerEmSpace,
"\u2005": tokenFourPerEmSpace,
"\u2006": tokenSixPerEmSpace,
"\u2007": tokenFigureSpace,
"\u2008": tokenPunctuationSpace,
"\u2009": tokenThinSpace,
"\u200A": tokenHairSpace,
"\u200B": tokenZeroWidthSpace,
"\u205F": tokenMediumMathematicalSpace,
"\u3000": tokenIdeographicSpace,
"\u002D": tokenHyphenMinus,
"\u00AD": tokenSoftHyphen,
"\u058A": tokenArmenianHyphen,
"\u2010": tokenHyphen,
"\u2012": tokenFigureDash,
"\u2013": tokenEnDash,
"\u2014": tokenEmDash,
"\u2015": tokenHorizontalBar,
"\u2053": tokenSwungDash,
"\u207B": tokenSuperscriptMinus,
"\u208B": tokenSubScriptMinus,
"\u2E3A": tokenTwoEmDash,
"\u2E3B": tokenThreeEmDash,
"\uFE31": tokenPresentationFormForVerticalEmDash,
"\uFE32": tokenPresentationFormForVerticalEnDash,
"\uFE58": tokenSmallEmDash,
"\uFE63": tokenSmallHyphenMinus,
"\uFF0D": tokenFullWidthHyphenMinus,
}
var vals = map[tokenType]string{
tokenNone: "none",
tokenError: "error",
tokenEOF: "eof",
tokenText: "text",
tokenZeroWidthNoBreakSpace: "zero width no break space",
tokenNL: "nl",
tokenCR: "cr",
tokenTab: "tab",
tokenSpace: "space",
tokenOghamSpaceMark: "ogham space mark",
tokenMongolianVowelSeparator: "mongolian vowel separator",
tokenEnQuad: "en quad",
tokenEmQuad: "em quad",
tokenEnSpace: "en space",
tokenEmSpace: "em space",
tokenThreePerEmSpace: "three per em space",
tokenFourPerEmSpace: "four per em space",
tokenSixPerEmSpace: "siz per em space",
tokenFigureSpace: "token figure space",
tokenPunctuationSpace: "punctuation space",
tokenThinSpace: "thin space",
tokenHairSpace: "hair space",
tokenZeroWidthSpace: "width space",
tokenMediumMathematicalSpace: "medium mathematical space",
tokenIdeographicSpace: "ideographic space",
tokenHyphenMinus: "hyphen minus",
tokenSoftHyphen: "soft hyphen",
tokenArmenianHyphen: "armenian hyphen",
tokenHyphen: "hyphen",
tokenFigureDash: "figure dash",
tokenEnDash: "en dash",
tokenEmDash: "em dash",
tokenHorizontalBar: "horizontal bar",
tokenSwungDash: "swung dash",
tokenSuperscriptMinus: "superscript minus",
tokenSubScriptMinus: "subscript minus",
tokenTwoEmDash: "two em dash",
tokenThreeEmDash: "three em dash",
tokenPresentationFormForVerticalEmDash: "presentation form for vertical em dash",
tokenPresentationFormForVerticalEnDash: "presentation form for vertical em dash",
tokenSmallEmDash: "small em dash",
tokenSmallHyphenMinus: "small hyphen minus",
tokenFullWidthHyphenMinus: "full width hyphen minus",
}
const eof = -1
const (
classText tokenClass = iota
classCR
classNL
classTab
classSpace
classHyphen
)
type tokenClass int
type stateFn func(*lexer) stateFn
type lexer struct {
input []byte // the string being scanned
state stateFn // the next lexing function to enter
pos Pos // current position of this item
start Pos // start position of this item
width Pos // width of last rune read from input
lastPos Pos // position of most recent item returned by nextItem
runeCnt int // the number of runes in the current token sequence
tokens chan token // channel of scanned tokens
}
func lex(input []byte) *lexer {
l := &lexer{
input: input,
state: lexText,
tokens: make(chan token, 2),
}
go l.run()
return l
}
// next returns the next rune in the input.
func (l *lexer) next() rune {
l.runeCnt++
if int(l.pos) >= len(l.input) {
l.width = 0
return eof
}
r, w := utf8.DecodeRune(l.input[l.pos:])
l.width = Pos(w)
l.pos += l.width
return r
}
// peek returns but does not consume the next rune in the input
func (l *lexer) peek() rune {
r := l.next()
l.backup()
return r
}
// backup steps back one rune. Can be called only once per call of next.
func (l *lexer) backup() {
l.pos -= l.width
l.runeCnt--
}
// emit passes an item back to the client.
func (l *lexer) emit(t tokenType) {
l.tokens <- token{t, l.start, l.runeCnt, string(l.input[l.start:l.pos])}
l.start = l.pos
l.runeCnt = 0
}
// ignore skips over the pending input before this point.
func (l *lexer) ignore() {
l.start = l.pos
l.runeCnt = 0
}
// accept consumes the next rune if it's from the valid set.
func (l *lexer) accept(valid string) bool {
if strings.ContainsRune(valid, l.next()) {
return true
}
l.backup()
return false
}
// acceptRun cunsumes a run of runes from the valid set.
func (l *lexer) acceptRun(valid string) {
if strings.ContainsRune(valid, l.next()) {
}
l.backup()
}
// error returns an error token and terminates the scan by passing back a nil
// pointer that will be the next state, terminating l.run.
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
l.tokens <- token{tokenError, l.start, 0, fmt.Sprintf(format, args...)}
return nil
}
// nextToken returns the next token from the input.
func (l *lexer) nextToken() token {
token := <-l.tokens
l.lastPos = token.pos
return token
}
// drain the channel so the lex go routine will exit: called by caller.
func (l *lexer) drain() {
for range l.tokens {
}
}
// run lexes the input by executing state functions until the state is nil.
func (l *lexer) run() {
for state := lexText; state != nil; {
state = state(l)
}
close(l.tokens) // No more tokens will be delivered
}
// lexText scans non whitespace/hyphen chars.
func lexText(l *lexer) stateFn {
for {
is, class := l.atBreakPoint() // a breakpoint is any char after which a new line can be
if is {
if l.pos > l.start {
l.emit(tokenText)
}
switch class {
case classCR:
return lexCR
case classNL:
return lexNL
case classSpace:
return lexSpace
case classTab:
return lexTab
case classHyphen:
return lexHyphen
}
}
if l.next() == eof {
l.runeCnt-- // eof doesn't count.
break
}
}
// Correctly reached EOF.
if l.pos > l.start {
l.emit(tokenText)
}
l.emit(tokenEOF) // Useful to make EOF a token
return nil // Stop the run loop.
}
// a breakpoint is any character afterwhich a wrap may occur. If it is a
// breakpoint char, the type of char is returned.
func (l *lexer) atBreakPoint() (breakpoint bool, class tokenClass) {
r, _ := utf8.DecodeRune(l.input[l.pos:])
t, ok := key[string(r)]
if !ok || t <= tokenZeroWidthNoBreakSpace |
switch t {
case tokenCR:
return true, classCR
case tokenNL:
return true, classNL
case tokenTab:
return true, classTab
}
if isSpace(t) {
return true, classSpace
}
if isHyphen(t) {
return true, classHyphen
}
// it really shouldn't get to here, but if it does, treat it like classText
return false, classText
}
// lexCR handles a carriage return, `\r`; these are skipped. The prior token
// should already have been emitted and the next token should be a CR, which
// are skipped. The next token is checked to ensure that it really is a CR.
func lexCR(l *lexer) stateFn {
r := l.next()
t := key[string(r)] // don't need to check ok, as the zero value won't match
if t == tokenCR {
l.ignore()
}
return lexText
}
// lexNL handles a new line, `\n`; the prior token should already have been
// emitted and the next token should be a NL. The next token is checked to
// ensure that it really is a NL
func lexNL(l *lexer) stateFn {
r := l.next()
t := key[string(r)] // don't need to check ok, as the zero value won't match
if t == tokenNL {
l.emit(tokenNL)
}
return lexText
}
// lexTab handles a tab, '\t'; the prior token should already have been emitted
// and the next token should be a tab. The next token is checked to ensure that
// it really is a tab.
func lexTab(l *lexer) stateFn {
r := l.next()
t := key[string(r)] // don't need to check ok, as the zero value won't match
if t == tokenTab {
l.emit(tokenTab)
}
return lexText
}
// This scans until end of the space sequence is encountered. If no spaces were
// found, nothing will be emitted. The prior token should already have been
// emitted before this function gets called.
func lexSpace(l *lexer) stateFn {
var i int
// scan until the spaces are consumed
for {
r := l.next()
// ok doesn't need to be checked as the zeroo value won't be classified as a hyphen.
tkn := key[string(r)]
if !isSpace(tkn) {
break
}
i++
}
if i == 0 { // if no spaces were processed; nothing to emit.
return lexText
}
// otherwise backup to ensure only space tokens are emitted.
l.backup()
l.emit(tokenSpace)
return lexText
}
// Scan until end of the hyphen sequence is encountered. If no hyphens were
// found, nothing will be emitted. The prior token should already have been
// emitted before this function gets called.
func lexHyphen(l *lexer) stateFn {
var i int
// scan until the spaces are consumed
for {
r := l.next()
// ok doesn't need to be checked as the zero value won't be classified as a hyphen.
tkn := key[string(r)]
if !isHyphen(tkn) {
break
}
i++
}
if i == 0 { // if no hyphens. nothing to emit.
return lexText
}
l.backup()
l.emit(tokenHyphen)
return lexText
}
func isSpace(t tokenType) bool {
if t >= tokenTab && t <= tokenIdeographicSpace {
return true
}
return false
}
func isHyphen(t tokenType) bool {
if t >= tokenHyphenMinus && t <= tokenFullWidthHyphenMinus {
return true
}
return false
}
| {
return false, classText
} | conditional_block |
lex.go | // Copyright 2017 Joel Scoble
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy
// of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package linewrap
import (
"fmt"
"strings"
"unicode/utf8"
)
const (
cr = '\r'
nl = '\n'
tab = '\t'
zeroWidthNoBreakSpace = "\uFEFF"
)
// Pos is a byte position in the original input text.
type Pos int
type token struct {
typ tokenType
pos Pos
len int // kength in chars (not bytes)
value string
}
func (t token) String() string {
switch {
case t.typ == tokenEOF:
return "EOF"
case t.typ == tokenError:
return t.value
}
return t.value
}
func (t token) Error() string {
return fmt.Sprintf("lex error at %d: %s", int(t.pos), t.value)
}
type tokenType int
const (
tokenNone tokenType = iota
tokenError
tokenEOF
tokenText // anything that isn't one of the following
tokenZeroWidthNoBreakSpace // U+FEFF used for unwrappable
tokenNL // \n
tokenCR // \r
// unicode tokens we care about, mostly because of breaking rules. The whitespace
// and dash tokens listed may be different than what Go uses in the relevant Go
// unicode tables.
// whitespace tokens from https://www.cs.tut.fi/~jkorpela/chars/spaces.html
//
// exceptions to the table:
// no-break space U+00A0 is not considered whitespace for line break purposes
// narrow no-break space U+202F is not considered whitespace for line break purposes
// zero width no-break space U+FEFF is not considered whitespace for line break purposes
tokenTab // \t
tokenSpace // U+0020
tokenOghamSpaceMark // U+1680
tokenMongolianVowelSeparator // U+180E
tokenEnQuad // U+2000
tokenEmQuad // U+2001
tokenEnSpace // U+2002
tokenEmSpace // U+2003
tokenThreePerEmSpace // U+2004
tokenFourPerEmSpace // U+2005
tokenSixPerEmSpace // U+2006
tokenFigureSpace // U+2007
tokenPunctuationSpace // U+2008
tokenThinSpace // U+2009
tokenHairSpace // U+200A
tokenZeroWidthSpace // U+200B
tokenMediumMathematicalSpace // U+205F
tokenIdeographicSpace // U+3000
// dash tokens from https://www.cs.tut.fi/~jkorpela/dashes.html
// hyphens and dashes in lines breaking rules sections
//
// exceptions to the table:
// tilde U+007E does not cause a line break because of possibility of ~/dir, ~=, etc.
// hyphen minus U+002D this is not supposed to break on a numeric context but no differentiation is done
// minus sign U+2212 does not cause a line break
// wavy dash U+301C does not cause a line break
// wavy dash U+3939 does not cause a line break
// two em dash U+2E3A is not in table but is here.
// three em dash U+2E3B is not in table but is here.
// small em dash U+FE58 is not in table but is here.
// small hyphen-minus U+FE63 is not in table but is here.
// full width hyphen-minus U+FF0D is not in table but is here.
// mongolian todo hyphen U+1806 does not cause a line break becaues it is a break before char
// presentation form for vertical em dash U+FE31 is not in table but is here.
// presentation form for vertical en dash U+FE32 is not in table but is here.
tokenHyphenMinus // U+002D
tokenSoftHyphen // U+00AD
tokenArmenianHyphen // U+058A
tokenHyphen // U+2010
tokenFigureDash // U+2012
tokenEnDash // U+2013
tokenEmDash // U+2014 can be before or after but only after is supported here
tokenHorizontalBar // U+2015
tokenSwungDash // U+2053
tokenSuperscriptMinus // U+207B
tokenSubScriptMinus // U+208B
tokenTwoEmDash // U+2E3A
tokenThreeEmDash // U+2E3B
tokenPresentationFormForVerticalEmDash // U+FE31
tokenPresentationFormForVerticalEnDash // U+FE32
tokenSmallEmDash // U+FE58
tokenSmallHyphenMinus // U+FE63
tokenFullWidthHyphenMinus // U+FF0D
)
var key = map[string]tokenType{
"\r": tokenCR,
"\n": tokenNL,
"\t": tokenTab,
"\uFEFF": tokenZeroWidthNoBreakSpace,
"\u0020": tokenSpace,
"\u1680": tokenOghamSpaceMark,
"\u180E": tokenMongolianVowelSeparator,
"\u2000": tokenEnQuad,
"\u2001": tokenEmQuad,
"\u2002": tokenEnSpace,
"\u2003": tokenEmSpace,
"\u2004": tokenThreePerEmSpace,
"\u2005": tokenFourPerEmSpace,
"\u2006": tokenSixPerEmSpace,
"\u2007": tokenFigureSpace,
"\u2008": tokenPunctuationSpace,
"\u2009": tokenThinSpace,
"\u200A": tokenHairSpace,
"\u200B": tokenZeroWidthSpace,
"\u205F": tokenMediumMathematicalSpace,
"\u3000": tokenIdeographicSpace,
"\u002D": tokenHyphenMinus,
"\u00AD": tokenSoftHyphen,
"\u058A": tokenArmenianHyphen,
"\u2010": tokenHyphen,
"\u2012": tokenFigureDash,
"\u2013": tokenEnDash,
"\u2014": tokenEmDash,
"\u2015": tokenHorizontalBar,
"\u2053": tokenSwungDash,
"\u207B": tokenSuperscriptMinus,
"\u208B": tokenSubScriptMinus,
"\u2E3A": tokenTwoEmDash,
"\u2E3B": tokenThreeEmDash,
"\uFE31": tokenPresentationFormForVerticalEmDash,
"\uFE32": tokenPresentationFormForVerticalEnDash,
"\uFE58": tokenSmallEmDash,
"\uFE63": tokenSmallHyphenMinus,
"\uFF0D": tokenFullWidthHyphenMinus,
}
var vals = map[tokenType]string{
tokenNone: "none",
tokenError: "error",
tokenEOF: "eof",
tokenText: "text",
tokenZeroWidthNoBreakSpace: "zero width no break space",
tokenNL: "nl",
tokenCR: "cr",
tokenTab: "tab",
tokenSpace: "space",
tokenOghamSpaceMark: "ogham space mark",
tokenMongolianVowelSeparator: "mongolian vowel separator",
tokenEnQuad: "en quad",
tokenEmQuad: "em quad",
tokenEnSpace: "en space",
tokenEmSpace: "em space",
tokenThreePerEmSpace: "three per em space",
tokenFourPerEmSpace: "four per em space",
tokenSixPerEmSpace: "siz per em space",
tokenFigureSpace: "token figure space",
tokenPunctuationSpace: "punctuation space",
tokenThinSpace: "thin space",
tokenHairSpace: "hair space",
tokenZeroWidthSpace: "width space",
tokenMediumMathematicalSpace: "medium mathematical space",
tokenIdeographicSpace: "ideographic space",
tokenHyphenMinus: "hyphen minus",
tokenSoftHyphen: "soft hyphen",
tokenArmenianHyphen: "armenian hyphen",
tokenHyphen: "hyphen",
tokenFigureDash: "figure dash",
tokenEnDash: "en dash", | tokenEmDash: "em dash",
tokenHorizontalBar: "horizontal bar",
tokenSwungDash: "swung dash",
tokenSuperscriptMinus: "superscript minus",
tokenSubScriptMinus: "subscript minus",
tokenTwoEmDash: "two em dash",
tokenThreeEmDash: "three em dash",
tokenPresentationFormForVerticalEmDash: "presentation form for vertical em dash",
tokenPresentationFormForVerticalEnDash: "presentation form for vertical em dash",
tokenSmallEmDash: "small em dash",
tokenSmallHyphenMinus: "small hyphen minus",
tokenFullWidthHyphenMinus: "full width hyphen minus",
}
const eof = -1
const (
classText tokenClass = iota
classCR
classNL
classTab
classSpace
classHyphen
)
type tokenClass int
type stateFn func(*lexer) stateFn
type lexer struct {
input []byte // the string being scanned
state stateFn // the next lexing function to enter
pos Pos // current position of this item
start Pos // start position of this item
width Pos // width of last rune read from input
lastPos Pos // position of most recent item returned by nextItem
runeCnt int // the number of runes in the current token sequence
tokens chan token // channel of scanned tokens
}
func lex(input []byte) *lexer {
l := &lexer{
input: input,
state: lexText,
tokens: make(chan token, 2),
}
go l.run()
return l
}
// next returns the next rune in the input.
func (l *lexer) next() rune {
l.runeCnt++
if int(l.pos) >= len(l.input) {
l.width = 0
return eof
}
r, w := utf8.DecodeRune(l.input[l.pos:])
l.width = Pos(w)
l.pos += l.width
return r
}
// peek returns but does not consume the next rune in the input
func (l *lexer) peek() rune {
r := l.next()
l.backup()
return r
}
// backup steps back one rune. Can be called only once per call of next.
func (l *lexer) backup() {
l.pos -= l.width
l.runeCnt--
}
// emit passes an item back to the client.
func (l *lexer) emit(t tokenType) {
l.tokens <- token{t, l.start, l.runeCnt, string(l.input[l.start:l.pos])}
l.start = l.pos
l.runeCnt = 0
}
// ignore skips over the pending input before this point.
func (l *lexer) ignore() {
l.start = l.pos
l.runeCnt = 0
}
// accept consumes the next rune if it's from the valid set.
func (l *lexer) accept(valid string) bool {
if strings.ContainsRune(valid, l.next()) {
return true
}
l.backup()
return false
}
// acceptRun cunsumes a run of runes from the valid set.
func (l *lexer) acceptRun(valid string) {
if strings.ContainsRune(valid, l.next()) {
}
l.backup()
}
// error returns an error token and terminates the scan by passing back a nil
// pointer that will be the next state, terminating l.run.
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
l.tokens <- token{tokenError, l.start, 0, fmt.Sprintf(format, args...)}
return nil
}
// nextToken returns the next token from the input.
func (l *lexer) nextToken() token {
token := <-l.tokens
l.lastPos = token.pos
return token
}
// drain the channel so the lex go routine will exit: called by caller.
func (l *lexer) drain() {
for range l.tokens {
}
}
// run lexes the input by executing state functions until the state is nil.
func (l *lexer) run() {
for state := lexText; state != nil; {
state = state(l)
}
close(l.tokens) // No more tokens will be delivered
}
// lexText scans non whitespace/hyphen chars.
func lexText(l *lexer) stateFn {
for {
is, class := l.atBreakPoint() // a breakpoint is any char after which a new line can be
if is {
if l.pos > l.start {
l.emit(tokenText)
}
switch class {
case classCR:
return lexCR
case classNL:
return lexNL
case classSpace:
return lexSpace
case classTab:
return lexTab
case classHyphen:
return lexHyphen
}
}
if l.next() == eof {
l.runeCnt-- // eof doesn't count.
break
}
}
// Correctly reached EOF.
if l.pos > l.start {
l.emit(tokenText)
}
l.emit(tokenEOF) // Useful to make EOF a token
return nil // Stop the run loop.
}
// a breakpoint is any character afterwhich a wrap may occur. If it is a
// breakpoint char, the type of char is returned.
func (l *lexer) atBreakPoint() (breakpoint bool, class tokenClass) {
r, _ := utf8.DecodeRune(l.input[l.pos:])
t, ok := key[string(r)]
if !ok || t <= tokenZeroWidthNoBreakSpace {
return false, classText
}
switch t {
case tokenCR:
return true, classCR
case tokenNL:
return true, classNL
case tokenTab:
return true, classTab
}
if isSpace(t) {
return true, classSpace
}
if isHyphen(t) {
return true, classHyphen
}
// it really shouldn't get to here, but if it does, treat it like classText
return false, classText
}
// lexCR handles a carriage return, `\r`; these are skipped. The prior token
// should already have been emitted and the next token should be a CR, which
// are skipped. The next token is checked to ensure that it really is a CR.
func lexCR(l *lexer) stateFn {
r := l.next()
t := key[string(r)] // don't need to check ok, as the zero value won't match
if t == tokenCR {
l.ignore()
}
return lexText
}
// lexNL handles a new line, `\n`; the prior token should already have been
// emitted and the next token should be a NL. The next token is checked to
// ensure that it really is a NL
func lexNL(l *lexer) stateFn {
r := l.next()
t := key[string(r)] // don't need to check ok, as the zero value won't match
if t == tokenNL {
l.emit(tokenNL)
}
return lexText
}
// lexTab handles a tab, '\t'; the prior token should already have been emitted
// and the next token should be a tab. The next token is checked to ensure that
// it really is a tab.
func lexTab(l *lexer) stateFn {
r := l.next()
t := key[string(r)] // don't need to check ok, as the zero value won't match
if t == tokenTab {
l.emit(tokenTab)
}
return lexText
}
// This scans until end of the space sequence is encountered. If no spaces were
// found, nothing will be emitted. The prior token should already have been
// emitted before this function gets called.
func lexSpace(l *lexer) stateFn {
var i int
// scan until the spaces are consumed
for {
r := l.next()
// ok doesn't need to be checked as the zeroo value won't be classified as a hyphen.
tkn := key[string(r)]
if !isSpace(tkn) {
break
}
i++
}
if i == 0 { // if no spaces were processed; nothing to emit.
return lexText
}
// otherwise backup to ensure only space tokens are emitted.
l.backup()
l.emit(tokenSpace)
return lexText
}
// Scan until end of the hyphen sequence is encountered. If no hyphens were
// found, nothing will be emitted. The prior token should already have been
// emitted before this function gets called.
func lexHyphen(l *lexer) stateFn {
var i int
// scan until the spaces are consumed
for {
r := l.next()
// ok doesn't need to be checked as the zero value won't be classified as a hyphen.
tkn := key[string(r)]
if !isHyphen(tkn) {
break
}
i++
}
if i == 0 { // if no hyphens. nothing to emit.
return lexText
}
l.backup()
l.emit(tokenHyphen)
return lexText
}
func isSpace(t tokenType) bool {
if t >= tokenTab && t <= tokenIdeographicSpace {
return true
}
return false
}
func isHyphen(t tokenType) bool {
if t >= tokenHyphenMinus && t <= tokenFullWidthHyphenMinus {
return true
}
return false
} | random_line_split |
|
se-spam-helper.user.js | // ==UserScript==
// @name Stack Exchange spam helper
// @description filter for the stack exchange real time question viewer,
// @description aiding in identification and removal of network-wide obvious spam
// @include http://stackexchange.com/questions?tab=realtime
// @version 3.1.6
// ==/UserScript==
/* global unsafeWindow, GM_xmlhttpRequest, GM_openInTab, GM_setClipboard */
/* jshint loopfunc:true, jquery:true */
(function(window){
var $ = window.$;
var Notification = window.Notification;
var StackExchange = window.StackExchange;
debugger;
var is = {
mostlyUppercase : function(str){
return (str.match(/[A-Z]/g)||[]).length > (str.match(/[a-z]/g)||[]).length;
}
};
var QUEUE_TIMEOUT = 12 * 60 * 60 * 1000;
var WEBSOCKET_TIMEOUT = 6 * 60 * 1000;
var ws, wsRefreshTimeout;
(function wsRefresh(){
//refresh the official stream
StackExchange.realtime.init("ws://qa.sockets.stackexchange.com");
//establish our own socket
wsRefreshTimeout = setTimeout(wsRefresh, 30000);
ws = new WebSocket("ws://qa.sockets.stackexchange.com");
ws.onmessage = function(){
clearTimeout(wsRefreshTimeout);
wsRefreshTimeout = setTimeout(wsRefresh, WEBSOCKET_TIMEOUT);
onMessage.apply(this, arguments);
};
ws.onerror = function(){
console.log.apply(console, ["console.error"].concat(arguments));
$("#mainArea").load(location.href + " #mainArea", scrapePage);
};
ws.onopen = function(){
ws.send("155-questions-active");
for(var site in siteWebsocketIDs){
if(siteWebsocketIDs[site]){
ws.send(siteWebsocketIDs[site] + "-questions-active");
}
}
};
})();
var css = document.createElement("style");
document.head.appendChild(css);
var daily_css = document.createElement("style");
document.head.appendChild(daily_css);
var hours = 1000 * 60 * 60;
(function resetDailyCss(){
daily_css.textContent = "";
ooflagSites = {};
atGMT(0, resetDailyCss);
})();
var menu;
var notification_granted;
var imgPool = new ElementPool();
var notifiedOf = {}, notifiedOfToday = {};
var ooflagSites = {};
var questionQueue = {};
var siteWebsocketIDs = {};
var sitesByWebsocketID = {};
var onQuestionQueueTimeout = flushQuestionQueue;
var checkAnswer = checkPost, checkQuestion = checkPost;
menu_init();
notification_init();
window.addEventListener("unload", onbeforeunload);
scrapePage();
function atGMT(time, func){
var timeLeft = (time - Date.now()) % (24 * hours);
setTimeout(func, timeLeft);
}
function onMessage(e){
var response = JSON.parse(e.data);
var data = response.data && JSON.parse(response.data);
if(response.action === "hb"){
ws.send("hb");
} else if(response.action === "155-questions-active"){
onQuestionActive(parseRealtimeSocket(data));
} else if(response.action.match(/\d+-questions-active/)){
scrapePerSiteQuestion(data.body, sitesByWebsocketID[data.siteid]);
} else {
console.log("unknown response type: %s in %o", response.action, response);
}
}
function scrapePage(){
$(".realtime-question:visible").each(function(){
var qLink = this.querySelector("a.realtime-question-url");
onQuestionActive({
body: undefined,
link: qLink.href,
site: hostNameToSiteName(qLink.hostname),
tags: $(".post-tag", this).map(function(){return this.textContent;}),
title: $("h2", this).text().trim(),
question_id: qLink.href.match(/\/questions\/(\d+)\//)[1],
});
});
hiderInstall();
}
function scrapePerSiteQuestion(html, site){
var question = new DOMParser().parseFromString(html, "text/html")
.getElementsByClassName("question-summary")[0];
var qLink = "http://" + siteNameToHostName(site)
+ question.querySelector("a.question-hyperlink").getAttribute("href");
onQuestionActive({
body: $(".excerpt", question).html().trim(),
link: qLink,
site: site,
tags: $(".post-tag", question).map(function(){return this.textContent;}),
title: $("h3 a", question).text().trim(),
question_id: question.id.split("-").pop(),
});
}
function | (site){
if(siteWebsocketIDs[site] === undefined){
siteWebsocketIDs[site] = false; // prevent double fetching
GM_xmlhttpRequest({
method: "GET",
url: "http://" + siteNameToHostName(site),
ontimeout: checkSiteHasSocket.bind(null, site),
onerror: function(response) {
console.log(response);
checkSiteHasSocket(site); // retry
},
onload: function(response){
var scripts = (new DOMParser())
.parseFromString(response.response, "text/html")
.head.querySelectorAll("script:not([src])");
[].forEach.call(scripts, function(script){
var match = /StackExchange\.realtime\.subscribeToActiveQuestions\(["']?(\d+)/.exec(script.innerHTML);
if(match){
siteWebsocketIDs[site] = match[1];
sitesByWebsocketID[match[1]] = site;
}
});
if(siteWebsocketIDs[site]){
console.log("the ID for %s is %o", site, siteWebsocketIDs[site]);
ws.send(siteWebsocketIDs[site] + "-questions-active");
} else {
console.log("could not find the ID for %s", site);
}
}
});
}
}
function parseRealtimeSocket(wsData){
return{
body: wsData.bodySummary,
link: wsData.url,
site: wsData.apiSiteParameter,
tags: wsData.tags,
title: htmlUnescape(wsData.titleEncodedFancy),
question_id: wsData.id,
};
}
function onQuestionActive(qData){
checkQuestion(qData);
hiderInstall();
checkSiteHasSocket(qData.site);
questionQueuePush(qData);
}
function questionQueuePush(qData){
var site = qData.site;
var id = qData.question_id;
var queue = questionQueue[site] = questionQueue[site] || {site:site, questions:{}, length:0};
if(!queue.questions[id]) queue.length++;
queue.questions[id] = qData;
if(queue.length >= 100){
flushQuestionQueue(queue);
}else{
if(!queue.timeout){
queue.timeout = setTimeout(onQuestionQueueTimeout.bind(null, queue), QUEUE_TIMEOUT);
}
}
}
function flushQuestionQueue(queue){
var ids = Object.keys(queue.questions);
queue.length = 0;
queue.questions = {};
clearTimeout(queue.timeout);
queue.timeout = null;
console.log("requesting answers for " + ids.length + " questions on " + queue.site);
seApiCall("questions", ids.join(";"), {
filter: "!*7Pmg7yi0JKTUuaBigtbGINmVtEq",
site: queue.site})
.then(function(response){
response.items.forEach(function(question){
question.site = queue.site;
question.title = htmlUnescape(question.title);
checkQuestion(question);
if(question.answers) question.answers.forEach(function(answer){
checkAnswer(question, answer);
});
});
});
}
function checkPost(question, answer){var title = question.title;
var host = question.site;
var site = hostNameToSiteName(host);
var site_class = "realtime-" + siteToClass(site);
var classname = site_class + "-" + question.question_id;
var q_body = $("<div/>", {html: question.body});
var a_body; if(answer) a_body = $("<div/>", {html: answer.body});
var text = answer ? a_body.text() : title + "\n" + q_body.text();
var id = answer ? answer.answer_id : question.question_id;
var link = answer ? answer.link : question.link;
if(!notifiedOf[site]) notifiedOf[site] = {};
if(!notifiedOf[site][id]){
if(/\b(ass(hole)?|bitch|crap|damn|dumb(ass)?|fag|fuck|idiot|motherfucker|nigga|shit(hole)?|stupid|whore)e?s?\b/i.test(text) ||
is.mostlyUppercase(text) ||
/\w+@(\w+\.)+\w{2,}/.test(text.replace(/\s/,'')) ||
!answer && (
site == "meta" || site == "drupal" ||
/(?:[^a-hj-np-z ] *){9,}/i.test(title) ||
is.mostlyUppercase(title) ||
/\b(vs?|l[ae]|live|watch|free|cheap|online|best|nike|buy|replica|here is|porn|packers|movers|slim|concord|black magic|vashikaran|baba(ji)?|\d+s|kgl|fifa|escort|swtor)\b/i.test(title)
)
){
css.textContent += "." + classname + " {background-color: #FCC}\n";
notify(site, title,
answer ? "A - " + a_body.text() :
question.body ? "Q - " + q_body.text() :
undefined,
link);
}
notifiedOf[site][id] = true;
if(!notifiedOfToday[site]) notifiedOfToday[site] = {};
notifiedOfToday[site][id] = true;
}
}
function hiderInstall(){
var children = document.getElementById("mainArea").children;
for(var i = 0; i < children.length; i++){
if(children[i].getElementsByClassName("spam-helper-site-hider").length) break;
var match = children[i].className.match(/(realtime-[-a-z]+)-\d+/);
if(!match) break;
var siteClass = match[1];
var hider = imgPool.get(function(){
var hider = document.createElement("img");
hider.src = "https://raw.github.com/honnza/se-spam-helper/master/no-flag.png";
hider.title = "I'm out of spam flags for today here";
hider.className = "spam-helper-site-hider";
hider.style.cursor = "pointer";
return hider;
});
hider.onclick = function(siteClass){
daily_css.textContent += "." + siteClass + " {display: none}\n";
ooflagSites[siteClass] = true;
}.bind(null, siteClass);
children[i].getElementsByClassName("hot-question-site-icon")[0].appendChild(hider);
children[i].classList.add(siteClass);
}
}
function notify(site, title, body, url){
if(notification_granted && !ooflagSites[site]){
var notification = new Notification(title, {
icon: classToImageUrl(siteToClass(site)),
body: body || ''
});
notification.onclick = function(){
GM_openInTab(url);
GM_setClipboard(url);
};
}
}
function menu_init(){
menu = document.createElement("div");
menu.id = "spam-helper-menu";
var a = document.createElement("a");
a.href = "#";
a.id = "spam-helper-menu-a";
a.textContent = "spam helper";
a.onclick = function(){
if(menu.parentElement){
document.body.removeChild(menu);
}else{
document.body.appendChild(menu);
menu.style.top = a.offsetTop + 2 * a.offsetHeight + "px";
menu.style.left = a.offsetLeft + "px";
}
};
var wrapper = document.getElementsByClassName('topbar-wrapper')[0];
var links = document.getElementsByClassName('topbar-links')[0];
wrapper.insertBefore(menu, links);
css.textContent +=
"#spam-helper-menu {display: inline-block; padding-top:7px}" +
"#spam-helper-menu > span {display: block; width: 150px; color: white}" +
"#spam-helper-menu > span > input { vertical-align: -2px; }";
}
function notification_init(){
notification_granted = JSON.parse(localStorage.getItem("spam-helper-notification_granted")) || false;
var cb = document.createElement("input");
cb.type = "checkbox";
cb.checked = notification_granted;
cb.id = "spamhelpernotificationcb";
cb.onchange = function(){
if(cb.checked){
Notification.requestPermission(function(permission){
notification_granted = (permission === "granted");
localStorage.setItem("spam-helper-notification_granted", notification_granted);
});
}else{
notification_granted = false;
localStorage.setItem("spam-helper-notification_granted", false);
}
};
var label = document.createElement("label");
label.textContent = "enable notifications";
label.htmlFor = "spamhelpernotificationcb";
var span = document.createElement("span");
span.appendChild(cb);
span.appendChild(label);
menu.appendChild(span);
}
//
function ElementPool(){
var queue = [];
return {
constructor: ElementPool,
get: function(func){
var r;
for(var i = 0; i < queue.length; i++){
if(!document.contains(queue[i])){
r = queue.splice(i,1)[0];
break;
}
}
r = r || func();
queue.push(r);
return r;
}
};
}
var apiQueue = new Mutex();
function seApiCall(/* path..., options */){
var path = [].slice.call(arguments);
var options = path.pop();
var partialOk = options.partialOk;
delete options.partialOk;
var responseDeferred = $.Deferred();
var results = [];
(function getPage(page){
apiQueue.enqueue(function(){
var apiQueueDeferred = $.Deferred();
options.pagesize = 100;
options.page = page;
console.log("fired request");
GM_xmlhttpRequest({
method: "GET",
url: "http://api.stackexchange.com/2.2/" + path.join('/') + "?" + $.param(options),
ontimeout: getPage.bind(null, page),
onerror: function(response) {
console.log(response);
getPage(page); // retry
},
onload: function(response) {
response = JSON.parse(response.responseText);
if(response.error_message) throw response.error_message;
console.log("got response, remaining quota: " + response.quota_remaining);
[].push.apply(results, response.items);
if(response.has_more && !partialOk){
console.log("need more pages");
getPage(page + 1);
}else{
console.log("collected " + results.length + " results");
responseDeferred.resolve({items: results, partial: !!response.has_more});
}
if(!response.quota_remaining){
alert ("I'm out of API quota!");
atGMT(10*hours, function(){apiQueueDeferred.resolve();});
}else if(response.backoff){
console.log("got backoff! " + response.backoff);
setTimeout(function(){apiQueueDeferred.resolve();}, response.backoff * 1000);
}else{
apiQueueDeferred.resolve();
}
}
});
return apiQueueDeferred.promise();
});
})(1);
return responseDeferred.promise();
}
function Mutex(){
var mutex = {
lock: $.Deferred().resolve(),
enqueue: function(func){
//change to `then` when SE upgrades to jQuery 1.8+
mutex.lock = mutex.lock.pipe(func, func);
}
};
return mutex;
}
function siteToClass(site){
var exceptions = {
"mathoverflow.net":"mathoverflow"
};
return exceptions[site] || site.replace(/\./, '-');
}
function classToImageUrl(site){
var exceptions = {
"answers-onstartups":"onstartups",
"meta": "stackexchangemeta",
"pt-stackoverflow":"br",
};
site = exceptions[site] || site;
site = site.replace(/^meta\-(.*)/, "$1meta"); //TODO: is this outdated?
return "//cdn.sstatic.net/" + site + "/img/icon-48.png";
}
function hostNameToSiteName(host){
var match;
if((match = host.match(/(\w+)\.stackexchange\.com/))) return match[1];
if((match = host.match(/(\w+)\.com/))) return match[1];
return host;
}
function siteNameToHostName(site){
var SLDSites = ["askubuntu", "stackapps", "superuser", "serverfault", "stackoverflow", "pt.stackoverflow"];
if(SLDSites.indexOf(site) !== -1) return site + ".com";
else if(site.indexOf(".") !== -1) return site;
else return site + ".stackexchange.com";
}
function htmlUnescape(html){
return $("<div>").html(html).text();
}
})(unsafeWindow || window);
| checkSiteHasSocket | identifier_name |
se-spam-helper.user.js | // ==UserScript==
// @name Stack Exchange spam helper
// @description filter for the stack exchange real time question viewer,
// @description aiding in identification and removal of network-wide obvious spam
// @include http://stackexchange.com/questions?tab=realtime
// @version 3.1.6
// ==/UserScript==
/* global unsafeWindow, GM_xmlhttpRequest, GM_openInTab, GM_setClipboard */
/* jshint loopfunc:true, jquery:true */
(function(window){
var $ = window.$;
var Notification = window.Notification;
var StackExchange = window.StackExchange;
debugger;
var is = {
mostlyUppercase : function(str){
return (str.match(/[A-Z]/g)||[]).length > (str.match(/[a-z]/g)||[]).length;
}
};
var QUEUE_TIMEOUT = 12 * 60 * 60 * 1000;
var WEBSOCKET_TIMEOUT = 6 * 60 * 1000;
var ws, wsRefreshTimeout;
(function wsRefresh(){
//refresh the official stream
StackExchange.realtime.init("ws://qa.sockets.stackexchange.com");
//establish our own socket
wsRefreshTimeout = setTimeout(wsRefresh, 30000);
ws = new WebSocket("ws://qa.sockets.stackexchange.com");
ws.onmessage = function(){
clearTimeout(wsRefreshTimeout);
wsRefreshTimeout = setTimeout(wsRefresh, WEBSOCKET_TIMEOUT);
onMessage.apply(this, arguments);
};
ws.onerror = function(){
console.log.apply(console, ["console.error"].concat(arguments));
$("#mainArea").load(location.href + " #mainArea", scrapePage);
};
ws.onopen = function(){
ws.send("155-questions-active");
for(var site in siteWebsocketIDs){
if(siteWebsocketIDs[site]){
ws.send(siteWebsocketIDs[site] + "-questions-active");
}
}
};
})();
var css = document.createElement("style");
document.head.appendChild(css);
var daily_css = document.createElement("style");
document.head.appendChild(daily_css);
var hours = 1000 * 60 * 60;
(function resetDailyCss(){
daily_css.textContent = "";
ooflagSites = {};
atGMT(0, resetDailyCss);
})();
var menu;
var notification_granted;
var imgPool = new ElementPool();
var notifiedOf = {}, notifiedOfToday = {};
var ooflagSites = {};
var questionQueue = {};
var siteWebsocketIDs = {};
var sitesByWebsocketID = {};
var onQuestionQueueTimeout = flushQuestionQueue;
var checkAnswer = checkPost, checkQuestion = checkPost;
menu_init();
notification_init();
window.addEventListener("unload", onbeforeunload);
scrapePage();
function atGMT(time, func){
var timeLeft = (time - Date.now()) % (24 * hours);
setTimeout(func, timeLeft);
}
function onMessage(e){
var response = JSON.parse(e.data);
var data = response.data && JSON.parse(response.data);
if(response.action === "hb"){
ws.send("hb");
} else if(response.action === "155-questions-active"){
onQuestionActive(parseRealtimeSocket(data));
} else if(response.action.match(/\d+-questions-active/)){
scrapePerSiteQuestion(data.body, sitesByWebsocketID[data.siteid]);
} else {
console.log("unknown response type: %s in %o", response.action, response);
}
}
function scrapePage(){
$(".realtime-question:visible").each(function(){
var qLink = this.querySelector("a.realtime-question-url");
onQuestionActive({
body: undefined,
link: qLink.href,
site: hostNameToSiteName(qLink.hostname),
tags: $(".post-tag", this).map(function(){return this.textContent;}),
title: $("h2", this).text().trim(),
question_id: qLink.href.match(/\/questions\/(\d+)\//)[1],
});
});
hiderInstall();
}
function scrapePerSiteQuestion(html, site){
var question = new DOMParser().parseFromString(html, "text/html")
.getElementsByClassName("question-summary")[0];
var qLink = "http://" + siteNameToHostName(site)
+ question.querySelector("a.question-hyperlink").getAttribute("href");
onQuestionActive({
body: $(".excerpt", question).html().trim(),
link: qLink,
site: site,
tags: $(".post-tag", question).map(function(){return this.textContent;}),
title: $("h3 a", question).text().trim(),
question_id: question.id.split("-").pop(),
});
}
function checkSiteHasSocket(site){
if(siteWebsocketIDs[site] === undefined){
siteWebsocketIDs[site] = false; // prevent double fetching
GM_xmlhttpRequest({
method: "GET",
url: "http://" + siteNameToHostName(site),
ontimeout: checkSiteHasSocket.bind(null, site),
onerror: function(response) {
console.log(response);
checkSiteHasSocket(site); // retry
},
onload: function(response){
var scripts = (new DOMParser())
.parseFromString(response.response, "text/html")
.head.querySelectorAll("script:not([src])");
[].forEach.call(scripts, function(script){
var match = /StackExchange\.realtime\.subscribeToActiveQuestions\(["']?(\d+)/.exec(script.innerHTML);
if(match){
siteWebsocketIDs[site] = match[1];
sitesByWebsocketID[match[1]] = site;
}
});
if(siteWebsocketIDs[site]){
console.log("the ID for %s is %o", site, siteWebsocketIDs[site]);
ws.send(siteWebsocketIDs[site] + "-questions-active");
} else {
console.log("could not find the ID for %s", site);
}
}
});
}
}
function parseRealtimeSocket(wsData){
return{
body: wsData.bodySummary,
link: wsData.url,
site: wsData.apiSiteParameter,
tags: wsData.tags,
title: htmlUnescape(wsData.titleEncodedFancy),
question_id: wsData.id,
};
}
function onQuestionActive(qData){
checkQuestion(qData);
hiderInstall();
checkSiteHasSocket(qData.site);
questionQueuePush(qData);
}
function questionQueuePush(qData){
var site = qData.site;
var id = qData.question_id;
var queue = questionQueue[site] = questionQueue[site] || {site:site, questions:{}, length:0};
if(!queue.questions[id]) queue.length++;
queue.questions[id] = qData;
if(queue.length >= 100){
flushQuestionQueue(queue);
}else{
if(!queue.timeout){
queue.timeout = setTimeout(onQuestionQueueTimeout.bind(null, queue), QUEUE_TIMEOUT);
}
}
}
function flushQuestionQueue(queue){
var ids = Object.keys(queue.questions);
queue.length = 0;
queue.questions = {};
clearTimeout(queue.timeout);
queue.timeout = null;
console.log("requesting answers for " + ids.length + " questions on " + queue.site);
seApiCall("questions", ids.join(";"), {
filter: "!*7Pmg7yi0JKTUuaBigtbGINmVtEq",
site: queue.site})
.then(function(response){
response.items.forEach(function(question){
question.site = queue.site;
question.title = htmlUnescape(question.title);
checkQuestion(question);
if(question.answers) question.answers.forEach(function(answer){
checkAnswer(question, answer);
});
});
});
}
function checkPost(question, answer){var title = question.title;
var host = question.site;
var site = hostNameToSiteName(host);
var site_class = "realtime-" + siteToClass(site);
var classname = site_class + "-" + question.question_id;
var q_body = $("<div/>", {html: question.body});
var a_body; if(answer) a_body = $("<div/>", {html: answer.body});
var text = answer ? a_body.text() : title + "\n" + q_body.text();
var id = answer ? answer.answer_id : question.question_id;
var link = answer ? answer.link : question.link;
if(!notifiedOf[site]) notifiedOf[site] = {};
if(!notifiedOf[site][id]){
if(/\b(ass(hole)?|bitch|crap|damn|dumb(ass)?|fag|fuck|idiot|motherfucker|nigga|shit(hole)?|stupid|whore)e?s?\b/i.test(text) ||
is.mostlyUppercase(text) ||
/\w+@(\w+\.)+\w{2,}/.test(text.replace(/\s/,'')) ||
!answer && (
site == "meta" || site == "drupal" ||
/(?:[^a-hj-np-z ] *){9,}/i.test(title) ||
is.mostlyUppercase(title) ||
/\b(vs?|l[ae]|live|watch|free|cheap|online|best|nike|buy|replica|here is|porn|packers|movers|slim|concord|black magic|vashikaran|baba(ji)?|\d+s|kgl|fifa|escort|swtor)\b/i.test(title)
)
){
css.textContent += "." + classname + " {background-color: #FCC}\n";
notify(site, title,
answer ? "A - " + a_body.text() :
question.body ? "Q - " + q_body.text() :
undefined,
link);
}
notifiedOf[site][id] = true;
if(!notifiedOfToday[site]) notifiedOfToday[site] = {};
notifiedOfToday[site][id] = true;
}
}
function hiderInstall(){
var children = document.getElementById("mainArea").children;
for(var i = 0; i < children.length; i++){
if(children[i].getElementsByClassName("spam-helper-site-hider").length) break;
var match = children[i].className.match(/(realtime-[-a-z]+)-\d+/);
if(!match) break;
var siteClass = match[1];
var hider = imgPool.get(function(){
var hider = document.createElement("img");
hider.src = "https://raw.github.com/honnza/se-spam-helper/master/no-flag.png";
hider.title = "I'm out of spam flags for today here";
hider.className = "spam-helper-site-hider";
hider.style.cursor = "pointer";
return hider;
});
hider.onclick = function(siteClass){
daily_css.textContent += "." + siteClass + " {display: none}\n";
ooflagSites[siteClass] = true;
}.bind(null, siteClass);
children[i].getElementsByClassName("hot-question-site-icon")[0].appendChild(hider);
children[i].classList.add(siteClass);
}
}
function notify(site, title, body, url){
if(notification_granted && !ooflagSites[site]){
var notification = new Notification(title, {
icon: classToImageUrl(siteToClass(site)),
body: body || ''
});
notification.onclick = function(){
GM_openInTab(url);
GM_setClipboard(url);
};
}
}
function menu_init(){
menu = document.createElement("div");
menu.id = "spam-helper-menu";
var a = document.createElement("a");
a.href = "#";
a.id = "spam-helper-menu-a";
a.textContent = "spam helper";
a.onclick = function(){
if(menu.parentElement){
document.body.removeChild(menu);
}else{
document.body.appendChild(menu);
menu.style.top = a.offsetTop + 2 * a.offsetHeight + "px";
menu.style.left = a.offsetLeft + "px";
}
};
var wrapper = document.getElementsByClassName('topbar-wrapper')[0];
var links = document.getElementsByClassName('topbar-links')[0];
wrapper.insertBefore(menu, links);
css.textContent +=
"#spam-helper-menu {display: inline-block; padding-top:7px}" +
"#spam-helper-menu > span {display: block; width: 150px; color: white}" +
"#spam-helper-menu > span > input { vertical-align: -2px; }";
}
function notification_init(){
notification_granted = JSON.parse(localStorage.getItem("spam-helper-notification_granted")) || false;
var cb = document.createElement("input");
cb.type = "checkbox";
cb.checked = notification_granted;
cb.id = "spamhelpernotificationcb";
cb.onchange = function(){
if(cb.checked){
Notification.requestPermission(function(permission){
notification_granted = (permission === "granted");
localStorage.setItem("spam-helper-notification_granted", notification_granted);
});
}else{
notification_granted = false;
localStorage.setItem("spam-helper-notification_granted", false);
}
};
var label = document.createElement("label");
label.textContent = "enable notifications";
label.htmlFor = "spamhelpernotificationcb";
var span = document.createElement("span");
span.appendChild(cb);
span.appendChild(label);
menu.appendChild(span);
}
//
function ElementPool(){
var queue = [];
return {
constructor: ElementPool,
get: function(func){
var r;
for(var i = 0; i < queue.length; i++){
if(!document.contains(queue[i])){
r = queue.splice(i,1)[0];
break;
}
}
r = r || func();
queue.push(r);
return r;
}
};
}
var apiQueue = new Mutex();
function seApiCall(/* path..., options */){
var path = [].slice.call(arguments);
var options = path.pop();
var partialOk = options.partialOk;
delete options.partialOk;
var responseDeferred = $.Deferred();
var results = [];
(function getPage(page){
apiQueue.enqueue(function(){
var apiQueueDeferred = $.Deferred();
options.pagesize = 100;
options.page = page;
console.log("fired request");
GM_xmlhttpRequest({
method: "GET", | ontimeout: getPage.bind(null, page),
onerror: function(response) {
console.log(response);
getPage(page); // retry
},
onload: function(response) {
response = JSON.parse(response.responseText);
if(response.error_message) throw response.error_message;
console.log("got response, remaining quota: " + response.quota_remaining);
[].push.apply(results, response.items);
if(response.has_more && !partialOk){
console.log("need more pages");
getPage(page + 1);
}else{
console.log("collected " + results.length + " results");
responseDeferred.resolve({items: results, partial: !!response.has_more});
}
if(!response.quota_remaining){
alert ("I'm out of API quota!");
atGMT(10*hours, function(){apiQueueDeferred.resolve();});
}else if(response.backoff){
console.log("got backoff! " + response.backoff);
setTimeout(function(){apiQueueDeferred.resolve();}, response.backoff * 1000);
}else{
apiQueueDeferred.resolve();
}
}
});
return apiQueueDeferred.promise();
});
})(1);
return responseDeferred.promise();
}
function Mutex(){
var mutex = {
lock: $.Deferred().resolve(),
enqueue: function(func){
//change to `then` when SE upgrades to jQuery 1.8+
mutex.lock = mutex.lock.pipe(func, func);
}
};
return mutex;
}
function siteToClass(site){
var exceptions = {
"mathoverflow.net":"mathoverflow"
};
return exceptions[site] || site.replace(/\./, '-');
}
function classToImageUrl(site){
var exceptions = {
"answers-onstartups":"onstartups",
"meta": "stackexchangemeta",
"pt-stackoverflow":"br",
};
site = exceptions[site] || site;
site = site.replace(/^meta\-(.*)/, "$1meta"); //TODO: is this outdated?
return "//cdn.sstatic.net/" + site + "/img/icon-48.png";
}
function hostNameToSiteName(host){
var match;
if((match = host.match(/(\w+)\.stackexchange\.com/))) return match[1];
if((match = host.match(/(\w+)\.com/))) return match[1];
return host;
}
function siteNameToHostName(site){
var SLDSites = ["askubuntu", "stackapps", "superuser", "serverfault", "stackoverflow", "pt.stackoverflow"];
if(SLDSites.indexOf(site) !== -1) return site + ".com";
else if(site.indexOf(".") !== -1) return site;
else return site + ".stackexchange.com";
}
function htmlUnescape(html){
return $("<div>").html(html).text();
}
})(unsafeWindow || window); | url: "http://api.stackexchange.com/2.2/" + path.join('/') + "?" + $.param(options), | random_line_split |
se-spam-helper.user.js | // ==UserScript==
// @name Stack Exchange spam helper
// @description filter for the stack exchange real time question viewer,
// @description aiding in identification and removal of network-wide obvious spam
// @include http://stackexchange.com/questions?tab=realtime
// @version 3.1.6
// ==/UserScript==
/* global unsafeWindow, GM_xmlhttpRequest, GM_openInTab, GM_setClipboard */
/* jshint loopfunc:true, jquery:true */
(function(window){
var $ = window.$;
var Notification = window.Notification;
var StackExchange = window.StackExchange;
debugger;
var is = {
mostlyUppercase : function(str){
return (str.match(/[A-Z]/g)||[]).length > (str.match(/[a-z]/g)||[]).length;
}
};
var QUEUE_TIMEOUT = 12 * 60 * 60 * 1000;
var WEBSOCKET_TIMEOUT = 6 * 60 * 1000;
var ws, wsRefreshTimeout;
(function wsRefresh(){
//refresh the official stream
StackExchange.realtime.init("ws://qa.sockets.stackexchange.com");
//establish our own socket
wsRefreshTimeout = setTimeout(wsRefresh, 30000);
ws = new WebSocket("ws://qa.sockets.stackexchange.com");
ws.onmessage = function(){
clearTimeout(wsRefreshTimeout);
wsRefreshTimeout = setTimeout(wsRefresh, WEBSOCKET_TIMEOUT);
onMessage.apply(this, arguments);
};
ws.onerror = function(){
console.log.apply(console, ["console.error"].concat(arguments));
$("#mainArea").load(location.href + " #mainArea", scrapePage);
};
ws.onopen = function(){
ws.send("155-questions-active");
for(var site in siteWebsocketIDs){
if(siteWebsocketIDs[site]){
ws.send(siteWebsocketIDs[site] + "-questions-active");
}
}
};
})();
var css = document.createElement("style");
document.head.appendChild(css);
var daily_css = document.createElement("style");
document.head.appendChild(daily_css);
var hours = 1000 * 60 * 60;
(function resetDailyCss(){
daily_css.textContent = "";
ooflagSites = {};
atGMT(0, resetDailyCss);
})();
var menu;
var notification_granted;
var imgPool = new ElementPool();
var notifiedOf = {}, notifiedOfToday = {};
var ooflagSites = {};
var questionQueue = {};
var siteWebsocketIDs = {};
var sitesByWebsocketID = {};
var onQuestionQueueTimeout = flushQuestionQueue;
var checkAnswer = checkPost, checkQuestion = checkPost;
menu_init();
notification_init();
window.addEventListener("unload", onbeforeunload);
scrapePage();
function atGMT(time, func){
var timeLeft = (time - Date.now()) % (24 * hours);
setTimeout(func, timeLeft);
}
function onMessage(e){
var response = JSON.parse(e.data);
var data = response.data && JSON.parse(response.data);
if(response.action === "hb"){
ws.send("hb");
} else if(response.action === "155-questions-active"){
onQuestionActive(parseRealtimeSocket(data));
} else if(response.action.match(/\d+-questions-active/)){
scrapePerSiteQuestion(data.body, sitesByWebsocketID[data.siteid]);
} else {
console.log("unknown response type: %s in %o", response.action, response);
}
}
function scrapePage(){
$(".realtime-question:visible").each(function(){
var qLink = this.querySelector("a.realtime-question-url");
onQuestionActive({
body: undefined,
link: qLink.href,
site: hostNameToSiteName(qLink.hostname),
tags: $(".post-tag", this).map(function(){return this.textContent;}),
title: $("h2", this).text().trim(),
question_id: qLink.href.match(/\/questions\/(\d+)\//)[1],
});
});
hiderInstall();
}
function scrapePerSiteQuestion(html, site){
var question = new DOMParser().parseFromString(html, "text/html")
.getElementsByClassName("question-summary")[0];
var qLink = "http://" + siteNameToHostName(site)
+ question.querySelector("a.question-hyperlink").getAttribute("href");
onQuestionActive({
body: $(".excerpt", question).html().trim(),
link: qLink,
site: site,
tags: $(".post-tag", question).map(function(){return this.textContent;}),
title: $("h3 a", question).text().trim(),
question_id: question.id.split("-").pop(),
});
}
function checkSiteHasSocket(site){
if(siteWebsocketIDs[site] === undefined){
siteWebsocketIDs[site] = false; // prevent double fetching
GM_xmlhttpRequest({
method: "GET",
url: "http://" + siteNameToHostName(site),
ontimeout: checkSiteHasSocket.bind(null, site),
onerror: function(response) {
console.log(response);
checkSiteHasSocket(site); // retry
},
onload: function(response){
var scripts = (new DOMParser())
.parseFromString(response.response, "text/html")
.head.querySelectorAll("script:not([src])");
[].forEach.call(scripts, function(script){
var match = /StackExchange\.realtime\.subscribeToActiveQuestions\(["']?(\d+)/.exec(script.innerHTML);
if(match){
siteWebsocketIDs[site] = match[1];
sitesByWebsocketID[match[1]] = site;
}
});
if(siteWebsocketIDs[site]){
console.log("the ID for %s is %o", site, siteWebsocketIDs[site]);
ws.send(siteWebsocketIDs[site] + "-questions-active");
} else {
console.log("could not find the ID for %s", site);
}
}
});
}
}
function parseRealtimeSocket(wsData){
return{
body: wsData.bodySummary,
link: wsData.url,
site: wsData.apiSiteParameter,
tags: wsData.tags,
title: htmlUnescape(wsData.titleEncodedFancy),
question_id: wsData.id,
};
}
function onQuestionActive(qData){
checkQuestion(qData);
hiderInstall();
checkSiteHasSocket(qData.site);
questionQueuePush(qData);
}
function questionQueuePush(qData){
var site = qData.site;
var id = qData.question_id;
var queue = questionQueue[site] = questionQueue[site] || {site:site, questions:{}, length:0};
if(!queue.questions[id]) queue.length++;
queue.questions[id] = qData;
if(queue.length >= 100){
flushQuestionQueue(queue);
}else{
if(!queue.timeout){
queue.timeout = setTimeout(onQuestionQueueTimeout.bind(null, queue), QUEUE_TIMEOUT);
}
}
}
function flushQuestionQueue(queue){
var ids = Object.keys(queue.questions);
queue.length = 0;
queue.questions = {};
clearTimeout(queue.timeout);
queue.timeout = null;
console.log("requesting answers for " + ids.length + " questions on " + queue.site);
seApiCall("questions", ids.join(";"), {
filter: "!*7Pmg7yi0JKTUuaBigtbGINmVtEq",
site: queue.site})
.then(function(response){
response.items.forEach(function(question){
question.site = queue.site;
question.title = htmlUnescape(question.title);
checkQuestion(question);
if(question.answers) question.answers.forEach(function(answer){
checkAnswer(question, answer);
});
});
});
}
function checkPost(question, answer){var title = question.title;
var host = question.site;
var site = hostNameToSiteName(host);
var site_class = "realtime-" + siteToClass(site);
var classname = site_class + "-" + question.question_id;
var q_body = $("<div/>", {html: question.body});
var a_body; if(answer) a_body = $("<div/>", {html: answer.body});
var text = answer ? a_body.text() : title + "\n" + q_body.text();
var id = answer ? answer.answer_id : question.question_id;
var link = answer ? answer.link : question.link;
if(!notifiedOf[site]) notifiedOf[site] = {};
if(!notifiedOf[site][id]){
if(/\b(ass(hole)?|bitch|crap|damn|dumb(ass)?|fag|fuck|idiot|motherfucker|nigga|shit(hole)?|stupid|whore)e?s?\b/i.test(text) ||
is.mostlyUppercase(text) ||
/\w+@(\w+\.)+\w{2,}/.test(text.replace(/\s/,'')) ||
!answer && (
site == "meta" || site == "drupal" ||
/(?:[^a-hj-np-z ] *){9,}/i.test(title) ||
is.mostlyUppercase(title) ||
/\b(vs?|l[ae]|live|watch|free|cheap|online|best|nike|buy|replica|here is|porn|packers|movers|slim|concord|black magic|vashikaran|baba(ji)?|\d+s|kgl|fifa|escort|swtor)\b/i.test(title)
)
){
css.textContent += "." + classname + " {background-color: #FCC}\n";
notify(site, title,
answer ? "A - " + a_body.text() :
question.body ? "Q - " + q_body.text() :
undefined,
link);
}
notifiedOf[site][id] = true;
if(!notifiedOfToday[site]) notifiedOfToday[site] = {};
notifiedOfToday[site][id] = true;
}
}
function hiderInstall(){
var children = document.getElementById("mainArea").children;
for(var i = 0; i < children.length; i++){
if(children[i].getElementsByClassName("spam-helper-site-hider").length) break;
var match = children[i].className.match(/(realtime-[-a-z]+)-\d+/);
if(!match) break;
var siteClass = match[1];
var hider = imgPool.get(function(){
var hider = document.createElement("img");
hider.src = "https://raw.github.com/honnza/se-spam-helper/master/no-flag.png";
hider.title = "I'm out of spam flags for today here";
hider.className = "spam-helper-site-hider";
hider.style.cursor = "pointer";
return hider;
});
hider.onclick = function(siteClass){
daily_css.textContent += "." + siteClass + " {display: none}\n";
ooflagSites[siteClass] = true;
}.bind(null, siteClass);
children[i].getElementsByClassName("hot-question-site-icon")[0].appendChild(hider);
children[i].classList.add(siteClass);
}
}
function notify(site, title, body, url){
if(notification_granted && !ooflagSites[site]){
var notification = new Notification(title, {
icon: classToImageUrl(siteToClass(site)),
body: body || ''
});
notification.onclick = function(){
GM_openInTab(url);
GM_setClipboard(url);
};
}
}
function menu_init(){
menu = document.createElement("div");
menu.id = "spam-helper-menu";
var a = document.createElement("a");
a.href = "#";
a.id = "spam-helper-menu-a";
a.textContent = "spam helper";
a.onclick = function(){
if(menu.parentElement){
document.body.removeChild(menu);
}else{
document.body.appendChild(menu);
menu.style.top = a.offsetTop + 2 * a.offsetHeight + "px";
menu.style.left = a.offsetLeft + "px";
}
};
var wrapper = document.getElementsByClassName('topbar-wrapper')[0];
var links = document.getElementsByClassName('topbar-links')[0];
wrapper.insertBefore(menu, links);
css.textContent +=
"#spam-helper-menu {display: inline-block; padding-top:7px}" +
"#spam-helper-menu > span {display: block; width: 150px; color: white}" +
"#spam-helper-menu > span > input { vertical-align: -2px; }";
}
function notification_init(){
notification_granted = JSON.parse(localStorage.getItem("spam-helper-notification_granted")) || false;
var cb = document.createElement("input");
cb.type = "checkbox";
cb.checked = notification_granted;
cb.id = "spamhelpernotificationcb";
cb.onchange = function(){
if(cb.checked){
Notification.requestPermission(function(permission){
notification_granted = (permission === "granted");
localStorage.setItem("spam-helper-notification_granted", notification_granted);
});
}else{
notification_granted = false;
localStorage.setItem("spam-helper-notification_granted", false);
}
};
var label = document.createElement("label");
label.textContent = "enable notifications";
label.htmlFor = "spamhelpernotificationcb";
var span = document.createElement("span");
span.appendChild(cb);
span.appendChild(label);
menu.appendChild(span);
}
//
function ElementPool(){
var queue = [];
return {
constructor: ElementPool,
get: function(func){
var r;
for(var i = 0; i < queue.length; i++){
if(!document.contains(queue[i])){
r = queue.splice(i,1)[0];
break;
}
}
r = r || func();
queue.push(r);
return r;
}
};
}
var apiQueue = new Mutex();
function seApiCall(/* path..., options */){
var path = [].slice.call(arguments);
var options = path.pop();
var partialOk = options.partialOk;
delete options.partialOk;
var responseDeferred = $.Deferred();
var results = [];
(function getPage(page){
apiQueue.enqueue(function(){
var apiQueueDeferred = $.Deferred();
options.pagesize = 100;
options.page = page;
console.log("fired request");
GM_xmlhttpRequest({
method: "GET",
url: "http://api.stackexchange.com/2.2/" + path.join('/') + "?" + $.param(options),
ontimeout: getPage.bind(null, page),
onerror: function(response) {
console.log(response);
getPage(page); // retry
},
onload: function(response) {
response = JSON.parse(response.responseText);
if(response.error_message) throw response.error_message;
console.log("got response, remaining quota: " + response.quota_remaining);
[].push.apply(results, response.items);
if(response.has_more && !partialOk){
console.log("need more pages");
getPage(page + 1);
}else |
if(!response.quota_remaining){
alert ("I'm out of API quota!");
atGMT(10*hours, function(){apiQueueDeferred.resolve();});
}else if(response.backoff){
console.log("got backoff! " + response.backoff);
setTimeout(function(){apiQueueDeferred.resolve();}, response.backoff * 1000);
}else{
apiQueueDeferred.resolve();
}
}
});
return apiQueueDeferred.promise();
});
})(1);
return responseDeferred.promise();
}
function Mutex(){
var mutex = {
lock: $.Deferred().resolve(),
enqueue: function(func){
//change to `then` when SE upgrades to jQuery 1.8+
mutex.lock = mutex.lock.pipe(func, func);
}
};
return mutex;
}
function siteToClass(site){
var exceptions = {
"mathoverflow.net":"mathoverflow"
};
return exceptions[site] || site.replace(/\./, '-');
}
function classToImageUrl(site){
var exceptions = {
"answers-onstartups":"onstartups",
"meta": "stackexchangemeta",
"pt-stackoverflow":"br",
};
site = exceptions[site] || site;
site = site.replace(/^meta\-(.*)/, "$1meta"); //TODO: is this outdated?
return "//cdn.sstatic.net/" + site + "/img/icon-48.png";
}
function hostNameToSiteName(host){
var match;
if((match = host.match(/(\w+)\.stackexchange\.com/))) return match[1];
if((match = host.match(/(\w+)\.com/))) return match[1];
return host;
}
function siteNameToHostName(site){
var SLDSites = ["askubuntu", "stackapps", "superuser", "serverfault", "stackoverflow", "pt.stackoverflow"];
if(SLDSites.indexOf(site) !== -1) return site + ".com";
else if(site.indexOf(".") !== -1) return site;
else return site + ".stackexchange.com";
}
function htmlUnescape(html){
return $("<div>").html(html).text();
}
})(unsafeWindow || window);
| {
console.log("collected " + results.length + " results");
responseDeferred.resolve({items: results, partial: !!response.has_more});
} | conditional_block |
se-spam-helper.user.js | // ==UserScript==
// @name Stack Exchange spam helper
// @description filter for the stack exchange real time question viewer,
// @description aiding in identification and removal of network-wide obvious spam
// @include http://stackexchange.com/questions?tab=realtime
// @version 3.1.6
// ==/UserScript==
/* global unsafeWindow, GM_xmlhttpRequest, GM_openInTab, GM_setClipboard */
/* jshint loopfunc:true, jquery:true */
(function(window){
var $ = window.$;
var Notification = window.Notification;
var StackExchange = window.StackExchange;
debugger;
var is = {
mostlyUppercase : function(str){
return (str.match(/[A-Z]/g)||[]).length > (str.match(/[a-z]/g)||[]).length;
}
};
var QUEUE_TIMEOUT = 12 * 60 * 60 * 1000;
var WEBSOCKET_TIMEOUT = 6 * 60 * 1000;
var ws, wsRefreshTimeout;
(function wsRefresh(){
//refresh the official stream
StackExchange.realtime.init("ws://qa.sockets.stackexchange.com");
//establish our own socket
wsRefreshTimeout = setTimeout(wsRefresh, 30000);
ws = new WebSocket("ws://qa.sockets.stackexchange.com");
ws.onmessage = function(){
clearTimeout(wsRefreshTimeout);
wsRefreshTimeout = setTimeout(wsRefresh, WEBSOCKET_TIMEOUT);
onMessage.apply(this, arguments);
};
ws.onerror = function(){
console.log.apply(console, ["console.error"].concat(arguments));
$("#mainArea").load(location.href + " #mainArea", scrapePage);
};
ws.onopen = function(){
ws.send("155-questions-active");
for(var site in siteWebsocketIDs){
if(siteWebsocketIDs[site]){
ws.send(siteWebsocketIDs[site] + "-questions-active");
}
}
};
})();
var css = document.createElement("style");
document.head.appendChild(css);
var daily_css = document.createElement("style");
document.head.appendChild(daily_css);
var hours = 1000 * 60 * 60;
(function resetDailyCss(){
daily_css.textContent = "";
ooflagSites = {};
atGMT(0, resetDailyCss);
})();
var menu;
var notification_granted;
var imgPool = new ElementPool();
var notifiedOf = {}, notifiedOfToday = {};
var ooflagSites = {};
var questionQueue = {};
var siteWebsocketIDs = {};
var sitesByWebsocketID = {};
var onQuestionQueueTimeout = flushQuestionQueue;
var checkAnswer = checkPost, checkQuestion = checkPost;
menu_init();
notification_init();
window.addEventListener("unload", onbeforeunload);
scrapePage();
function atGMT(time, func){
var timeLeft = (time - Date.now()) % (24 * hours);
setTimeout(func, timeLeft);
}
function onMessage(e){
var response = JSON.parse(e.data);
var data = response.data && JSON.parse(response.data);
if(response.action === "hb"){
ws.send("hb");
} else if(response.action === "155-questions-active"){
onQuestionActive(parseRealtimeSocket(data));
} else if(response.action.match(/\d+-questions-active/)){
scrapePerSiteQuestion(data.body, sitesByWebsocketID[data.siteid]);
} else {
console.log("unknown response type: %s in %o", response.action, response);
}
}
function scrapePage() |
function scrapePerSiteQuestion(html, site){
var question = new DOMParser().parseFromString(html, "text/html")
.getElementsByClassName("question-summary")[0];
var qLink = "http://" + siteNameToHostName(site)
+ question.querySelector("a.question-hyperlink").getAttribute("href");
onQuestionActive({
body: $(".excerpt", question).html().trim(),
link: qLink,
site: site,
tags: $(".post-tag", question).map(function(){return this.textContent;}),
title: $("h3 a", question).text().trim(),
question_id: question.id.split("-").pop(),
});
}
function checkSiteHasSocket(site){
if(siteWebsocketIDs[site] === undefined){
siteWebsocketIDs[site] = false; // prevent double fetching
GM_xmlhttpRequest({
method: "GET",
url: "http://" + siteNameToHostName(site),
ontimeout: checkSiteHasSocket.bind(null, site),
onerror: function(response) {
console.log(response);
checkSiteHasSocket(site); // retry
},
onload: function(response){
var scripts = (new DOMParser())
.parseFromString(response.response, "text/html")
.head.querySelectorAll("script:not([src])");
[].forEach.call(scripts, function(script){
var match = /StackExchange\.realtime\.subscribeToActiveQuestions\(["']?(\d+)/.exec(script.innerHTML);
if(match){
siteWebsocketIDs[site] = match[1];
sitesByWebsocketID[match[1]] = site;
}
});
if(siteWebsocketIDs[site]){
console.log("the ID for %s is %o", site, siteWebsocketIDs[site]);
ws.send(siteWebsocketIDs[site] + "-questions-active");
} else {
console.log("could not find the ID for %s", site);
}
}
});
}
}
function parseRealtimeSocket(wsData){
return{
body: wsData.bodySummary,
link: wsData.url,
site: wsData.apiSiteParameter,
tags: wsData.tags,
title: htmlUnescape(wsData.titleEncodedFancy),
question_id: wsData.id,
};
}
function onQuestionActive(qData){
checkQuestion(qData);
hiderInstall();
checkSiteHasSocket(qData.site);
questionQueuePush(qData);
}
function questionQueuePush(qData){
var site = qData.site;
var id = qData.question_id;
var queue = questionQueue[site] = questionQueue[site] || {site:site, questions:{}, length:0};
if(!queue.questions[id]) queue.length++;
queue.questions[id] = qData;
if(queue.length >= 100){
flushQuestionQueue(queue);
}else{
if(!queue.timeout){
queue.timeout = setTimeout(onQuestionQueueTimeout.bind(null, queue), QUEUE_TIMEOUT);
}
}
}
function flushQuestionQueue(queue){
var ids = Object.keys(queue.questions);
queue.length = 0;
queue.questions = {};
clearTimeout(queue.timeout);
queue.timeout = null;
console.log("requesting answers for " + ids.length + " questions on " + queue.site);
seApiCall("questions", ids.join(";"), {
filter: "!*7Pmg7yi0JKTUuaBigtbGINmVtEq",
site: queue.site})
.then(function(response){
response.items.forEach(function(question){
question.site = queue.site;
question.title = htmlUnescape(question.title);
checkQuestion(question);
if(question.answers) question.answers.forEach(function(answer){
checkAnswer(question, answer);
});
});
});
}
function checkPost(question, answer){var title = question.title;
var host = question.site;
var site = hostNameToSiteName(host);
var site_class = "realtime-" + siteToClass(site);
var classname = site_class + "-" + question.question_id;
var q_body = $("<div/>", {html: question.body});
var a_body; if(answer) a_body = $("<div/>", {html: answer.body});
var text = answer ? a_body.text() : title + "\n" + q_body.text();
var id = answer ? answer.answer_id : question.question_id;
var link = answer ? answer.link : question.link;
if(!notifiedOf[site]) notifiedOf[site] = {};
if(!notifiedOf[site][id]){
if(/\b(ass(hole)?|bitch|crap|damn|dumb(ass)?|fag|fuck|idiot|motherfucker|nigga|shit(hole)?|stupid|whore)e?s?\b/i.test(text) ||
is.mostlyUppercase(text) ||
/\w+@(\w+\.)+\w{2,}/.test(text.replace(/\s/,'')) ||
!answer && (
site == "meta" || site == "drupal" ||
/(?:[^a-hj-np-z ] *){9,}/i.test(title) ||
is.mostlyUppercase(title) ||
/\b(vs?|l[ae]|live|watch|free|cheap|online|best|nike|buy|replica|here is|porn|packers|movers|slim|concord|black magic|vashikaran|baba(ji)?|\d+s|kgl|fifa|escort|swtor)\b/i.test(title)
)
){
css.textContent += "." + classname + " {background-color: #FCC}\n";
notify(site, title,
answer ? "A - " + a_body.text() :
question.body ? "Q - " + q_body.text() :
undefined,
link);
}
notifiedOf[site][id] = true;
if(!notifiedOfToday[site]) notifiedOfToday[site] = {};
notifiedOfToday[site][id] = true;
}
}
function hiderInstall(){
var children = document.getElementById("mainArea").children;
for(var i = 0; i < children.length; i++){
if(children[i].getElementsByClassName("spam-helper-site-hider").length) break;
var match = children[i].className.match(/(realtime-[-a-z]+)-\d+/);
if(!match) break;
var siteClass = match[1];
var hider = imgPool.get(function(){
var hider = document.createElement("img");
hider.src = "https://raw.github.com/honnza/se-spam-helper/master/no-flag.png";
hider.title = "I'm out of spam flags for today here";
hider.className = "spam-helper-site-hider";
hider.style.cursor = "pointer";
return hider;
});
hider.onclick = function(siteClass){
daily_css.textContent += "." + siteClass + " {display: none}\n";
ooflagSites[siteClass] = true;
}.bind(null, siteClass);
children[i].getElementsByClassName("hot-question-site-icon")[0].appendChild(hider);
children[i].classList.add(siteClass);
}
}
function notify(site, title, body, url){
if(notification_granted && !ooflagSites[site]){
var notification = new Notification(title, {
icon: classToImageUrl(siteToClass(site)),
body: body || ''
});
notification.onclick = function(){
GM_openInTab(url);
GM_setClipboard(url);
};
}
}
function menu_init(){
menu = document.createElement("div");
menu.id = "spam-helper-menu";
var a = document.createElement("a");
a.href = "#";
a.id = "spam-helper-menu-a";
a.textContent = "spam helper";
a.onclick = function(){
if(menu.parentElement){
document.body.removeChild(menu);
}else{
document.body.appendChild(menu);
menu.style.top = a.offsetTop + 2 * a.offsetHeight + "px";
menu.style.left = a.offsetLeft + "px";
}
};
var wrapper = document.getElementsByClassName('topbar-wrapper')[0];
var links = document.getElementsByClassName('topbar-links')[0];
wrapper.insertBefore(menu, links);
css.textContent +=
"#spam-helper-menu {display: inline-block; padding-top:7px}" +
"#spam-helper-menu > span {display: block; width: 150px; color: white}" +
"#spam-helper-menu > span > input { vertical-align: -2px; }";
}
function notification_init(){
notification_granted = JSON.parse(localStorage.getItem("spam-helper-notification_granted")) || false;
var cb = document.createElement("input");
cb.type = "checkbox";
cb.checked = notification_granted;
cb.id = "spamhelpernotificationcb";
cb.onchange = function(){
if(cb.checked){
Notification.requestPermission(function(permission){
notification_granted = (permission === "granted");
localStorage.setItem("spam-helper-notification_granted", notification_granted);
});
}else{
notification_granted = false;
localStorage.setItem("spam-helper-notification_granted", false);
}
};
var label = document.createElement("label");
label.textContent = "enable notifications";
label.htmlFor = "spamhelpernotificationcb";
var span = document.createElement("span");
span.appendChild(cb);
span.appendChild(label);
menu.appendChild(span);
}
//
function ElementPool(){
var queue = [];
return {
constructor: ElementPool,
get: function(func){
var r;
for(var i = 0; i < queue.length; i++){
if(!document.contains(queue[i])){
r = queue.splice(i,1)[0];
break;
}
}
r = r || func();
queue.push(r);
return r;
}
};
}
var apiQueue = new Mutex();
function seApiCall(/* path..., options */){
var path = [].slice.call(arguments);
var options = path.pop();
var partialOk = options.partialOk;
delete options.partialOk;
var responseDeferred = $.Deferred();
var results = [];
(function getPage(page){
apiQueue.enqueue(function(){
var apiQueueDeferred = $.Deferred();
options.pagesize = 100;
options.page = page;
console.log("fired request");
GM_xmlhttpRequest({
method: "GET",
url: "http://api.stackexchange.com/2.2/" + path.join('/') + "?" + $.param(options),
ontimeout: getPage.bind(null, page),
onerror: function(response) {
console.log(response);
getPage(page); // retry
},
onload: function(response) {
response = JSON.parse(response.responseText);
if(response.error_message) throw response.error_message;
console.log("got response, remaining quota: " + response.quota_remaining);
[].push.apply(results, response.items);
if(response.has_more && !partialOk){
console.log("need more pages");
getPage(page + 1);
}else{
console.log("collected " + results.length + " results");
responseDeferred.resolve({items: results, partial: !!response.has_more});
}
if(!response.quota_remaining){
alert ("I'm out of API quota!");
atGMT(10*hours, function(){apiQueueDeferred.resolve();});
}else if(response.backoff){
console.log("got backoff! " + response.backoff);
setTimeout(function(){apiQueueDeferred.resolve();}, response.backoff * 1000);
}else{
apiQueueDeferred.resolve();
}
}
});
return apiQueueDeferred.promise();
});
})(1);
return responseDeferred.promise();
}
function Mutex(){
var mutex = {
lock: $.Deferred().resolve(),
enqueue: function(func){
//change to `then` when SE upgrades to jQuery 1.8+
mutex.lock = mutex.lock.pipe(func, func);
}
};
return mutex;
}
function siteToClass(site){
var exceptions = {
"mathoverflow.net":"mathoverflow"
};
return exceptions[site] || site.replace(/\./, '-');
}
function classToImageUrl(site){
var exceptions = {
"answers-onstartups":"onstartups",
"meta": "stackexchangemeta",
"pt-stackoverflow":"br",
};
site = exceptions[site] || site;
site = site.replace(/^meta\-(.*)/, "$1meta"); //TODO: is this outdated?
return "//cdn.sstatic.net/" + site + "/img/icon-48.png";
}
function hostNameToSiteName(host){
var match;
if((match = host.match(/(\w+)\.stackexchange\.com/))) return match[1];
if((match = host.match(/(\w+)\.com/))) return match[1];
return host;
}
function siteNameToHostName(site){
var SLDSites = ["askubuntu", "stackapps", "superuser", "serverfault", "stackoverflow", "pt.stackoverflow"];
if(SLDSites.indexOf(site) !== -1) return site + ".com";
else if(site.indexOf(".") !== -1) return site;
else return site + ".stackexchange.com";
}
function htmlUnescape(html){
return $("<div>").html(html).text();
}
})(unsafeWindow || window);
| {
$(".realtime-question:visible").each(function(){
var qLink = this.querySelector("a.realtime-question-url");
onQuestionActive({
body: undefined,
link: qLink.href,
site: hostNameToSiteName(qLink.hostname),
tags: $(".post-tag", this).map(function(){return this.textContent;}),
title: $("h2", this).text().trim(),
question_id: qLink.href.match(/\/questions\/(\d+)\//)[1],
});
});
hiderInstall();
} | identifier_body |
channel_router.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp;
use std::collections::HashMap;
struct Ranges {
ranges: Vec<std::ops::Range<usize>>,
}
impl Ranges {
fn new() -> Self {
Ranges { ranges: Vec::new() }
}
fn add(&mut self, start: usize, end: usize) {
let (start, end) = (cmp::min(start, end), cmp::max(start, end) + 1);
self.ranges.push(std::ops::Range { start, end });
}
fn contains(&self, start: usize, end: usize) -> bool {
let (start, end) = (cmp::min(start, end), cmp::max(start, end));
(start..=end).any(|v| self.ranges.iter().any(|r| r.contains(&v)))
}
fn contains_range(&self, range: &std::ops::Range<usize>) -> bool {
self.contains(range.start, range.end)
}
fn range_sum(&self) -> usize {
self.ranges.iter().map(|r| r.end - r.start).sum()
}
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum ChannelState {
Free,
// Occupied means no connection. This is the same as a constant false.
Occupied,
// Constant true.
Constant,
Net(usize),
}
pub type ChannelLayout = [ChannelState];
impl ChannelState {
pub fn is_free(&self) -> bool {
self == &ChannelState::Free
}
pub fn contains_net(&self) -> bool {
matches!(self, ChannelState::Net(_))
}
pub fn is_constant_on(&self) -> bool {
matches!(self, ChannelState::Constant)
}
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum ChannelOp {
Move,
Copy,
}
#[derive(Debug, Clone)]
pub struct WireConnection {
pub from: usize,
pub to: Vec<usize>,
pub mode: ChannelOp,
}
#[derive(Debug)]
pub struct ChannelSubState {
pub wires: Vec<WireConnection>,
pub occupancy_map: bitmap::Bitmap<Vec<usize>, bitmap::OneBit>,
}
#[derive(Debug)]
struct Task {
net: usize,
from: usize,
to: Vec<usize>,
}
impl Task {
fn channel_range_required(&self) -> std::ops::Range<usize> {
let from = [self.from];
let min = self.to.iter().chain(&from).min().unwrap();
let max = self.to.iter().chain(&from).max().unwrap();
std::ops::Range {
start: *min,
end: max + 1,
}
}
fn channel_width_required(&self) -> usize {
let r = self.channel_range_required();
r.end - r.start
}
fn occupied_target_pins(&self, layout: &ChannelLayout) -> Vec<usize> {
let mut occupied = Vec::new();
for &idx in &self.to {
if layout[idx].contains_net() && layout[idx] != ChannelState::Net(self.net) {
occupied.push(idx);
}
}
occupied
}
// Returns how 'good' a new 'from' position is for this task (when evicting)
// so that we can prefer nice spots.
fn eviction_cost(&self, new_pos: usize) -> usize {
let min = self.to.iter().min().unwrap();
let max = self.to.iter().max().unwrap();
let dist = (self.from as isize - new_pos as isize).abs() as usize;
if new_pos > *max {
2 * (new_pos - *max) + dist
} else if new_pos < *min {
2 * (*min - new_pos) + dist
} else {
dist
}
}
}
#[derive(Default)]
struct RouteTasks {
// source idx -> vec<target idx>
tasks: HashMap<usize, Vec<usize>>,
}
impl RouteTasks {
fn add(&mut self, from: usize, to: usize) {
if let Some(k) = self.tasks.get_mut(&from) | else {
self.tasks.insert(from, vec![to]);
}
}
fn into_tasks(mut self, src: &ChannelLayout) -> Vec<Task> {
self.tasks
.drain()
.map(|(k, v)| {
let net = match src[k] {
ChannelState::Net(i) => i,
_ => unreachable!(),
};
Task {
net,
from: k,
to: v,
}
})
.collect::<Vec<_>>()
}
}
pub fn route_channel(start: &ChannelLayout, end: &ChannelLayout) -> Vec<ChannelSubState> {
let mut state = start.to_owned();
// Expand the state to be at least end.len() wide.
while state.len() < end.len() {
state.push(ChannelState::Free);
}
let mut tasks = RouteTasks::default();
for end_idx in 0..end.len() {
if !end[end_idx].contains_net() || end[end_idx] == state[end_idx] {
continue;
}
let state_idx = state
.iter()
.position(|v| v == &end[end_idx])
.unwrap_or_else(|| panic!("Required field '{:?}' not found", end[end_idx]));
tasks.add(state_idx, end_idx);
}
let mut tasks = tasks.into_tasks(&state);
// Order by how much of the channel this task occupies.
tasks.sort_by_key(|k| k.channel_width_required());
let mut steps: Vec<ChannelSubState> = Vec::new();
loop {
// Ranges of the channel that is currently occupied.
let mut ranges = Ranges::new();
// Instruction on how to connect pins in the current part of the channel.
let mut wires = Vec::new();
// To detect if we were unable to do anything due to blocked pins.
let old_task_len = tasks.len();
tasks = tasks
.drain(0..tasks.len())
.filter(|task| {
// Speed things up by only 'enforcing' 50% channel utilization.
if ranges.range_sum() > (cmp::max(state.len(), end.len()) / 2) {
return true;
}
// Do we have the required part of the channel available?
if ranges.contains_range(&task.channel_range_required()) {
return true;
}
let blocking_pins = task.occupied_target_pins(&state);
if blocking_pins.is_empty() {
// Targets are free, directly move (or copy) it there.
let keep = if task.from >= end.len() || state[task.from] != end[task.from] {
state[task.from] = ChannelState::Free;
false
} else {
true
};
wires.push(WireConnection {
from: task.from,
to: task.to.clone(),
mode: if keep {
ChannelOp::Copy
} else {
ChannelOp::Move
},
});
let r = task.channel_range_required();
// -1 here since .add() + channel_range_required() will do +1.
ranges.add(r.start, r.end - 1);
for &to in &task.to {
state[to] = ChannelState::Net(task.net);
}
// We successfully handled this one.
return false;
}
true
})
.collect::<Vec<_>>();
// We were unable to handle any tasks -> we need to evict some channels.
if old_task_len == tasks.len() {
// Find available positions where we can evict to.
let mut free_positions = state
.iter()
.enumerate()
.filter(|(_, v)| !v.contains_net())
.map(|(k, _)| k)
.filter(|&k| k >= end.len() || !end[k].contains_net())
.collect::<Vec<_>>();
if free_positions.is_empty() {
println!("[!] No free positions found, expanding channel");
// Make sure that we have some room, scaling with the number of
// remaining tasks as a random tradeoff.
for _ in 0..(tasks.len() / 10 + 1) {
state.push(ChannelState::Free);
free_positions.push(state.len() - 1);
}
}
for task_idx in 0..tasks.len() {
let blocking_pins = tasks[task_idx].occupied_target_pins(&state);
for to_evict in blocking_pins {
// Find corresponding task.
let task_idx_to_evict = tasks
.iter()
.position(|t| t.from == to_evict)
.unwrap_or_else(|| panic!("Could not find task blocking {}", to_evict));
// Find a good place for this task to evict to.
free_positions.sort_by(|&a, &b| {
// Comparing in the opposite order on purpose here so
// that we can use pop() later.
tasks[task_idx_to_evict]
.eviction_cost(b)
.cmp(&tasks[task_idx_to_evict].eviction_cost(a))
});
let from = tasks[task_idx_to_evict].from;
let new_pos = *free_positions.last().unwrap();
// Check whether the space is actually available.
let req_range = std::ops::Range {
start: cmp::min(from, new_pos),
end: cmp::max(from, new_pos) + 1,
};
if !ranges.contains_range(&req_range) {
free_positions.pop();
ranges.add(from, new_pos);
wires.push(WireConnection {
from,
to: vec![new_pos],
mode: ChannelOp::Move,
});
tasks[task_idx_to_evict].from = new_pos;
state[new_pos] = ChannelState::Net(tasks[task_idx_to_evict].net);
state[to_evict] = ChannelState::Free;
}
}
}
}
let mut bitmap =
bitmap::Bitmap::from_storage(state.len(), (), vec![0; (state.len() + 63) / 64])
.unwrap();
for idx in state
.iter()
.enumerate()
.filter(|(_, v)| v.contains_net())
.map(|(k, _)| k)
{
bitmap.set(idx, 1);
}
steps.push(ChannelSubState {
wires,
occupancy_map: bitmap,
});
if tasks.is_empty() {
return steps;
}
}
}
| {
k.push(to);
} | conditional_block |
channel_router.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp;
use std::collections::HashMap;
struct Ranges {
ranges: Vec<std::ops::Range<usize>>,
}
impl Ranges {
fn new() -> Self {
Ranges { ranges: Vec::new() }
}
fn add(&mut self, start: usize, end: usize) {
let (start, end) = (cmp::min(start, end), cmp::max(start, end) + 1);
self.ranges.push(std::ops::Range { start, end });
}
fn contains(&self, start: usize, end: usize) -> bool {
let (start, end) = (cmp::min(start, end), cmp::max(start, end));
(start..=end).any(|v| self.ranges.iter().any(|r| r.contains(&v)))
}
fn contains_range(&self, range: &std::ops::Range<usize>) -> bool {
self.contains(range.start, range.end)
}
fn range_sum(&self) -> usize {
self.ranges.iter().map(|r| r.end - r.start).sum()
}
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum ChannelState {
Free,
// Occupied means no connection. This is the same as a constant false.
Occupied,
// Constant true.
Constant,
Net(usize),
}
pub type ChannelLayout = [ChannelState];
impl ChannelState {
pub fn | (&self) -> bool {
self == &ChannelState::Free
}
pub fn contains_net(&self) -> bool {
matches!(self, ChannelState::Net(_))
}
pub fn is_constant_on(&self) -> bool {
matches!(self, ChannelState::Constant)
}
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum ChannelOp {
Move,
Copy,
}
#[derive(Debug, Clone)]
pub struct WireConnection {
pub from: usize,
pub to: Vec<usize>,
pub mode: ChannelOp,
}
#[derive(Debug)]
pub struct ChannelSubState {
pub wires: Vec<WireConnection>,
pub occupancy_map: bitmap::Bitmap<Vec<usize>, bitmap::OneBit>,
}
#[derive(Debug)]
struct Task {
net: usize,
from: usize,
to: Vec<usize>,
}
impl Task {
fn channel_range_required(&self) -> std::ops::Range<usize> {
let from = [self.from];
let min = self.to.iter().chain(&from).min().unwrap();
let max = self.to.iter().chain(&from).max().unwrap();
std::ops::Range {
start: *min,
end: max + 1,
}
}
fn channel_width_required(&self) -> usize {
let r = self.channel_range_required();
r.end - r.start
}
fn occupied_target_pins(&self, layout: &ChannelLayout) -> Vec<usize> {
let mut occupied = Vec::new();
for &idx in &self.to {
if layout[idx].contains_net() && layout[idx] != ChannelState::Net(self.net) {
occupied.push(idx);
}
}
occupied
}
// Returns how 'good' a new 'from' position is for this task (when evicting)
// so that we can prefer nice spots.
fn eviction_cost(&self, new_pos: usize) -> usize {
let min = self.to.iter().min().unwrap();
let max = self.to.iter().max().unwrap();
let dist = (self.from as isize - new_pos as isize).abs() as usize;
if new_pos > *max {
2 * (new_pos - *max) + dist
} else if new_pos < *min {
2 * (*min - new_pos) + dist
} else {
dist
}
}
}
#[derive(Default)]
struct RouteTasks {
// source idx -> vec<target idx>
tasks: HashMap<usize, Vec<usize>>,
}
impl RouteTasks {
fn add(&mut self, from: usize, to: usize) {
if let Some(k) = self.tasks.get_mut(&from) {
k.push(to);
} else {
self.tasks.insert(from, vec![to]);
}
}
fn into_tasks(mut self, src: &ChannelLayout) -> Vec<Task> {
self.tasks
.drain()
.map(|(k, v)| {
let net = match src[k] {
ChannelState::Net(i) => i,
_ => unreachable!(),
};
Task {
net,
from: k,
to: v,
}
})
.collect::<Vec<_>>()
}
}
pub fn route_channel(start: &ChannelLayout, end: &ChannelLayout) -> Vec<ChannelSubState> {
let mut state = start.to_owned();
// Expand the state to be at least end.len() wide.
while state.len() < end.len() {
state.push(ChannelState::Free);
}
let mut tasks = RouteTasks::default();
for end_idx in 0..end.len() {
if !end[end_idx].contains_net() || end[end_idx] == state[end_idx] {
continue;
}
let state_idx = state
.iter()
.position(|v| v == &end[end_idx])
.unwrap_or_else(|| panic!("Required field '{:?}' not found", end[end_idx]));
tasks.add(state_idx, end_idx);
}
let mut tasks = tasks.into_tasks(&state);
// Order by how much of the channel this task occupies.
tasks.sort_by_key(|k| k.channel_width_required());
let mut steps: Vec<ChannelSubState> = Vec::new();
loop {
// Ranges of the channel that is currently occupied.
let mut ranges = Ranges::new();
// Instruction on how to connect pins in the current part of the channel.
let mut wires = Vec::new();
// To detect if we were unable to do anything due to blocked pins.
let old_task_len = tasks.len();
tasks = tasks
.drain(0..tasks.len())
.filter(|task| {
// Speed things up by only 'enforcing' 50% channel utilization.
if ranges.range_sum() > (cmp::max(state.len(), end.len()) / 2) {
return true;
}
// Do we have the required part of the channel available?
if ranges.contains_range(&task.channel_range_required()) {
return true;
}
let blocking_pins = task.occupied_target_pins(&state);
if blocking_pins.is_empty() {
// Targets are free, directly move (or copy) it there.
let keep = if task.from >= end.len() || state[task.from] != end[task.from] {
state[task.from] = ChannelState::Free;
false
} else {
true
};
wires.push(WireConnection {
from: task.from,
to: task.to.clone(),
mode: if keep {
ChannelOp::Copy
} else {
ChannelOp::Move
},
});
let r = task.channel_range_required();
// -1 here since .add() + channel_range_required() will do +1.
ranges.add(r.start, r.end - 1);
for &to in &task.to {
state[to] = ChannelState::Net(task.net);
}
// We successfully handled this one.
return false;
}
true
})
.collect::<Vec<_>>();
// We were unable to handle any tasks -> we need to evict some channels.
if old_task_len == tasks.len() {
// Find available positions where we can evict to.
let mut free_positions = state
.iter()
.enumerate()
.filter(|(_, v)| !v.contains_net())
.map(|(k, _)| k)
.filter(|&k| k >= end.len() || !end[k].contains_net())
.collect::<Vec<_>>();
if free_positions.is_empty() {
println!("[!] No free positions found, expanding channel");
// Make sure that we have some room, scaling with the number of
// remaining tasks as a random tradeoff.
for _ in 0..(tasks.len() / 10 + 1) {
state.push(ChannelState::Free);
free_positions.push(state.len() - 1);
}
}
for task_idx in 0..tasks.len() {
let blocking_pins = tasks[task_idx].occupied_target_pins(&state);
for to_evict in blocking_pins {
// Find corresponding task.
let task_idx_to_evict = tasks
.iter()
.position(|t| t.from == to_evict)
.unwrap_or_else(|| panic!("Could not find task blocking {}", to_evict));
// Find a good place for this task to evict to.
free_positions.sort_by(|&a, &b| {
// Comparing in the opposite order on purpose here so
// that we can use pop() later.
tasks[task_idx_to_evict]
.eviction_cost(b)
.cmp(&tasks[task_idx_to_evict].eviction_cost(a))
});
let from = tasks[task_idx_to_evict].from;
let new_pos = *free_positions.last().unwrap();
// Check whether the space is actually available.
let req_range = std::ops::Range {
start: cmp::min(from, new_pos),
end: cmp::max(from, new_pos) + 1,
};
if !ranges.contains_range(&req_range) {
free_positions.pop();
ranges.add(from, new_pos);
wires.push(WireConnection {
from,
to: vec![new_pos],
mode: ChannelOp::Move,
});
tasks[task_idx_to_evict].from = new_pos;
state[new_pos] = ChannelState::Net(tasks[task_idx_to_evict].net);
state[to_evict] = ChannelState::Free;
}
}
}
}
let mut bitmap =
bitmap::Bitmap::from_storage(state.len(), (), vec![0; (state.len() + 63) / 64])
.unwrap();
for idx in state
.iter()
.enumerate()
.filter(|(_, v)| v.contains_net())
.map(|(k, _)| k)
{
bitmap.set(idx, 1);
}
steps.push(ChannelSubState {
wires,
occupancy_map: bitmap,
});
if tasks.is_empty() {
return steps;
}
}
}
| is_free | identifier_name |
channel_router.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp;
use std::collections::HashMap;
struct Ranges {
ranges: Vec<std::ops::Range<usize>>,
}
impl Ranges {
fn new() -> Self {
Ranges { ranges: Vec::new() }
}
fn add(&mut self, start: usize, end: usize) {
let (start, end) = (cmp::min(start, end), cmp::max(start, end) + 1);
self.ranges.push(std::ops::Range { start, end });
}
fn contains(&self, start: usize, end: usize) -> bool {
let (start, end) = (cmp::min(start, end), cmp::max(start, end));
(start..=end).any(|v| self.ranges.iter().any(|r| r.contains(&v)))
}
fn contains_range(&self, range: &std::ops::Range<usize>) -> bool {
self.contains(range.start, range.end)
}
fn range_sum(&self) -> usize {
self.ranges.iter().map(|r| r.end - r.start).sum()
}
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum ChannelState {
Free,
// Occupied means no connection. This is the same as a constant false.
Occupied,
// Constant true.
Constant,
Net(usize),
}
pub type ChannelLayout = [ChannelState];
impl ChannelState {
pub fn is_free(&self) -> bool {
self == &ChannelState::Free
}
pub fn contains_net(&self) -> bool {
matches!(self, ChannelState::Net(_))
}
pub fn is_constant_on(&self) -> bool {
matches!(self, ChannelState::Constant)
}
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum ChannelOp {
Move,
Copy,
}
#[derive(Debug, Clone)]
pub struct WireConnection {
pub from: usize,
pub to: Vec<usize>,
pub mode: ChannelOp,
}
#[derive(Debug)]
pub struct ChannelSubState {
pub wires: Vec<WireConnection>,
pub occupancy_map: bitmap::Bitmap<Vec<usize>, bitmap::OneBit>,
}
#[derive(Debug)]
struct Task {
net: usize,
from: usize,
to: Vec<usize>,
}
impl Task {
fn channel_range_required(&self) -> std::ops::Range<usize> {
let from = [self.from];
let min = self.to.iter().chain(&from).min().unwrap();
let max = self.to.iter().chain(&from).max().unwrap();
std::ops::Range {
start: *min,
end: max + 1,
}
}
fn channel_width_required(&self) -> usize {
let r = self.channel_range_required();
r.end - r.start
}
fn occupied_target_pins(&self, layout: &ChannelLayout) -> Vec<usize> {
let mut occupied = Vec::new();
for &idx in &self.to {
if layout[idx].contains_net() && layout[idx] != ChannelState::Net(self.net) {
occupied.push(idx);
}
}
occupied
}
// Returns how 'good' a new 'from' position is for this task (when evicting)
// so that we can prefer nice spots.
fn eviction_cost(&self, new_pos: usize) -> usize {
let min = self.to.iter().min().unwrap();
let max = self.to.iter().max().unwrap();
let dist = (self.from as isize - new_pos as isize).abs() as usize;
if new_pos > *max {
2 * (new_pos - *max) + dist
} else if new_pos < *min {
2 * (*min - new_pos) + dist
} else {
dist
}
}
}
#[derive(Default)]
struct RouteTasks {
// source idx -> vec<target idx>
tasks: HashMap<usize, Vec<usize>>,
}
impl RouteTasks {
fn add(&mut self, from: usize, to: usize) { | } else {
self.tasks.insert(from, vec![to]);
}
}
fn into_tasks(mut self, src: &ChannelLayout) -> Vec<Task> {
self.tasks
.drain()
.map(|(k, v)| {
let net = match src[k] {
ChannelState::Net(i) => i,
_ => unreachable!(),
};
Task {
net,
from: k,
to: v,
}
})
.collect::<Vec<_>>()
}
}
pub fn route_channel(start: &ChannelLayout, end: &ChannelLayout) -> Vec<ChannelSubState> {
let mut state = start.to_owned();
// Expand the state to be at least end.len() wide.
while state.len() < end.len() {
state.push(ChannelState::Free);
}
let mut tasks = RouteTasks::default();
for end_idx in 0..end.len() {
if !end[end_idx].contains_net() || end[end_idx] == state[end_idx] {
continue;
}
let state_idx = state
.iter()
.position(|v| v == &end[end_idx])
.unwrap_or_else(|| panic!("Required field '{:?}' not found", end[end_idx]));
tasks.add(state_idx, end_idx);
}
let mut tasks = tasks.into_tasks(&state);
// Order by how much of the channel this task occupies.
tasks.sort_by_key(|k| k.channel_width_required());
let mut steps: Vec<ChannelSubState> = Vec::new();
loop {
// Ranges of the channel that is currently occupied.
let mut ranges = Ranges::new();
// Instruction on how to connect pins in the current part of the channel.
let mut wires = Vec::new();
// To detect if we were unable to do anything due to blocked pins.
let old_task_len = tasks.len();
tasks = tasks
.drain(0..tasks.len())
.filter(|task| {
// Speed things up by only 'enforcing' 50% channel utilization.
if ranges.range_sum() > (cmp::max(state.len(), end.len()) / 2) {
return true;
}
// Do we have the required part of the channel available?
if ranges.contains_range(&task.channel_range_required()) {
return true;
}
let blocking_pins = task.occupied_target_pins(&state);
if blocking_pins.is_empty() {
// Targets are free, directly move (or copy) it there.
let keep = if task.from >= end.len() || state[task.from] != end[task.from] {
state[task.from] = ChannelState::Free;
false
} else {
true
};
wires.push(WireConnection {
from: task.from,
to: task.to.clone(),
mode: if keep {
ChannelOp::Copy
} else {
ChannelOp::Move
},
});
let r = task.channel_range_required();
// -1 here since .add() + channel_range_required() will do +1.
ranges.add(r.start, r.end - 1);
for &to in &task.to {
state[to] = ChannelState::Net(task.net);
}
// We successfully handled this one.
return false;
}
true
})
.collect::<Vec<_>>();
// We were unable to handle any tasks -> we need to evict some channels.
if old_task_len == tasks.len() {
// Find available positions where we can evict to.
let mut free_positions = state
.iter()
.enumerate()
.filter(|(_, v)| !v.contains_net())
.map(|(k, _)| k)
.filter(|&k| k >= end.len() || !end[k].contains_net())
.collect::<Vec<_>>();
if free_positions.is_empty() {
println!("[!] No free positions found, expanding channel");
// Make sure that we have some room, scaling with the number of
// remaining tasks as a random tradeoff.
for _ in 0..(tasks.len() / 10 + 1) {
state.push(ChannelState::Free);
free_positions.push(state.len() - 1);
}
}
for task_idx in 0..tasks.len() {
let blocking_pins = tasks[task_idx].occupied_target_pins(&state);
for to_evict in blocking_pins {
// Find corresponding task.
let task_idx_to_evict = tasks
.iter()
.position(|t| t.from == to_evict)
.unwrap_or_else(|| panic!("Could not find task blocking {}", to_evict));
// Find a good place for this task to evict to.
free_positions.sort_by(|&a, &b| {
// Comparing in the opposite order on purpose here so
// that we can use pop() later.
tasks[task_idx_to_evict]
.eviction_cost(b)
.cmp(&tasks[task_idx_to_evict].eviction_cost(a))
});
let from = tasks[task_idx_to_evict].from;
let new_pos = *free_positions.last().unwrap();
// Check whether the space is actually available.
let req_range = std::ops::Range {
start: cmp::min(from, new_pos),
end: cmp::max(from, new_pos) + 1,
};
if !ranges.contains_range(&req_range) {
free_positions.pop();
ranges.add(from, new_pos);
wires.push(WireConnection {
from,
to: vec![new_pos],
mode: ChannelOp::Move,
});
tasks[task_idx_to_evict].from = new_pos;
state[new_pos] = ChannelState::Net(tasks[task_idx_to_evict].net);
state[to_evict] = ChannelState::Free;
}
}
}
}
let mut bitmap =
bitmap::Bitmap::from_storage(state.len(), (), vec![0; (state.len() + 63) / 64])
.unwrap();
for idx in state
.iter()
.enumerate()
.filter(|(_, v)| v.contains_net())
.map(|(k, _)| k)
{
bitmap.set(idx, 1);
}
steps.push(ChannelSubState {
wires,
occupancy_map: bitmap,
});
if tasks.is_empty() {
return steps;
}
}
} | if let Some(k) = self.tasks.get_mut(&from) {
k.push(to); | random_line_split |
channel_router.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp;
use std::collections::HashMap;
struct Ranges {
ranges: Vec<std::ops::Range<usize>>,
}
impl Ranges {
fn new() -> Self {
Ranges { ranges: Vec::new() }
}
fn add(&mut self, start: usize, end: usize) {
let (start, end) = (cmp::min(start, end), cmp::max(start, end) + 1);
self.ranges.push(std::ops::Range { start, end });
}
fn contains(&self, start: usize, end: usize) -> bool {
let (start, end) = (cmp::min(start, end), cmp::max(start, end));
(start..=end).any(|v| self.ranges.iter().any(|r| r.contains(&v)))
}
fn contains_range(&self, range: &std::ops::Range<usize>) -> bool {
self.contains(range.start, range.end)
}
fn range_sum(&self) -> usize |
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum ChannelState {
Free,
// Occupied means no connection. This is the same as a constant false.
Occupied,
// Constant true.
Constant,
Net(usize),
}
pub type ChannelLayout = [ChannelState];
impl ChannelState {
pub fn is_free(&self) -> bool {
self == &ChannelState::Free
}
pub fn contains_net(&self) -> bool {
matches!(self, ChannelState::Net(_))
}
pub fn is_constant_on(&self) -> bool {
matches!(self, ChannelState::Constant)
}
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum ChannelOp {
Move,
Copy,
}
#[derive(Debug, Clone)]
pub struct WireConnection {
pub from: usize,
pub to: Vec<usize>,
pub mode: ChannelOp,
}
#[derive(Debug)]
pub struct ChannelSubState {
pub wires: Vec<WireConnection>,
pub occupancy_map: bitmap::Bitmap<Vec<usize>, bitmap::OneBit>,
}
#[derive(Debug)]
struct Task {
net: usize,
from: usize,
to: Vec<usize>,
}
impl Task {
fn channel_range_required(&self) -> std::ops::Range<usize> {
let from = [self.from];
let min = self.to.iter().chain(&from).min().unwrap();
let max = self.to.iter().chain(&from).max().unwrap();
std::ops::Range {
start: *min,
end: max + 1,
}
}
fn channel_width_required(&self) -> usize {
let r = self.channel_range_required();
r.end - r.start
}
fn occupied_target_pins(&self, layout: &ChannelLayout) -> Vec<usize> {
let mut occupied = Vec::new();
for &idx in &self.to {
if layout[idx].contains_net() && layout[idx] != ChannelState::Net(self.net) {
occupied.push(idx);
}
}
occupied
}
// Returns how 'good' a new 'from' position is for this task (when evicting)
// so that we can prefer nice spots.
fn eviction_cost(&self, new_pos: usize) -> usize {
let min = self.to.iter().min().unwrap();
let max = self.to.iter().max().unwrap();
let dist = (self.from as isize - new_pos as isize).abs() as usize;
if new_pos > *max {
2 * (new_pos - *max) + dist
} else if new_pos < *min {
2 * (*min - new_pos) + dist
} else {
dist
}
}
}
#[derive(Default)]
struct RouteTasks {
// source idx -> vec<target idx>
tasks: HashMap<usize, Vec<usize>>,
}
impl RouteTasks {
fn add(&mut self, from: usize, to: usize) {
if let Some(k) = self.tasks.get_mut(&from) {
k.push(to);
} else {
self.tasks.insert(from, vec![to]);
}
}
fn into_tasks(mut self, src: &ChannelLayout) -> Vec<Task> {
self.tasks
.drain()
.map(|(k, v)| {
let net = match src[k] {
ChannelState::Net(i) => i,
_ => unreachable!(),
};
Task {
net,
from: k,
to: v,
}
})
.collect::<Vec<_>>()
}
}
pub fn route_channel(start: &ChannelLayout, end: &ChannelLayout) -> Vec<ChannelSubState> {
let mut state = start.to_owned();
// Expand the state to be at least end.len() wide.
while state.len() < end.len() {
state.push(ChannelState::Free);
}
let mut tasks = RouteTasks::default();
for end_idx in 0..end.len() {
if !end[end_idx].contains_net() || end[end_idx] == state[end_idx] {
continue;
}
let state_idx = state
.iter()
.position(|v| v == &end[end_idx])
.unwrap_or_else(|| panic!("Required field '{:?}' not found", end[end_idx]));
tasks.add(state_idx, end_idx);
}
let mut tasks = tasks.into_tasks(&state);
// Order by how much of the channel this task occupies.
tasks.sort_by_key(|k| k.channel_width_required());
let mut steps: Vec<ChannelSubState> = Vec::new();
loop {
// Ranges of the channel that is currently occupied.
let mut ranges = Ranges::new();
// Instruction on how to connect pins in the current part of the channel.
let mut wires = Vec::new();
// To detect if we were unable to do anything due to blocked pins.
let old_task_len = tasks.len();
tasks = tasks
.drain(0..tasks.len())
.filter(|task| {
// Speed things up by only 'enforcing' 50% channel utilization.
if ranges.range_sum() > (cmp::max(state.len(), end.len()) / 2) {
return true;
}
// Do we have the required part of the channel available?
if ranges.contains_range(&task.channel_range_required()) {
return true;
}
let blocking_pins = task.occupied_target_pins(&state);
if blocking_pins.is_empty() {
// Targets are free, directly move (or copy) it there.
let keep = if task.from >= end.len() || state[task.from] != end[task.from] {
state[task.from] = ChannelState::Free;
false
} else {
true
};
wires.push(WireConnection {
from: task.from,
to: task.to.clone(),
mode: if keep {
ChannelOp::Copy
} else {
ChannelOp::Move
},
});
let r = task.channel_range_required();
// -1 here since .add() + channel_range_required() will do +1.
ranges.add(r.start, r.end - 1);
for &to in &task.to {
state[to] = ChannelState::Net(task.net);
}
// We successfully handled this one.
return false;
}
true
})
.collect::<Vec<_>>();
// We were unable to handle any tasks -> we need to evict some channels.
if old_task_len == tasks.len() {
// Find available positions where we can evict to.
let mut free_positions = state
.iter()
.enumerate()
.filter(|(_, v)| !v.contains_net())
.map(|(k, _)| k)
.filter(|&k| k >= end.len() || !end[k].contains_net())
.collect::<Vec<_>>();
if free_positions.is_empty() {
println!("[!] No free positions found, expanding channel");
// Make sure that we have some room, scaling with the number of
// remaining tasks as a random tradeoff.
for _ in 0..(tasks.len() / 10 + 1) {
state.push(ChannelState::Free);
free_positions.push(state.len() - 1);
}
}
for task_idx in 0..tasks.len() {
let blocking_pins = tasks[task_idx].occupied_target_pins(&state);
for to_evict in blocking_pins {
// Find corresponding task.
let task_idx_to_evict = tasks
.iter()
.position(|t| t.from == to_evict)
.unwrap_or_else(|| panic!("Could not find task blocking {}", to_evict));
// Find a good place for this task to evict to.
free_positions.sort_by(|&a, &b| {
// Comparing in the opposite order on purpose here so
// that we can use pop() later.
tasks[task_idx_to_evict]
.eviction_cost(b)
.cmp(&tasks[task_idx_to_evict].eviction_cost(a))
});
let from = tasks[task_idx_to_evict].from;
let new_pos = *free_positions.last().unwrap();
// Check whether the space is actually available.
let req_range = std::ops::Range {
start: cmp::min(from, new_pos),
end: cmp::max(from, new_pos) + 1,
};
if !ranges.contains_range(&req_range) {
free_positions.pop();
ranges.add(from, new_pos);
wires.push(WireConnection {
from,
to: vec![new_pos],
mode: ChannelOp::Move,
});
tasks[task_idx_to_evict].from = new_pos;
state[new_pos] = ChannelState::Net(tasks[task_idx_to_evict].net);
state[to_evict] = ChannelState::Free;
}
}
}
}
let mut bitmap =
bitmap::Bitmap::from_storage(state.len(), (), vec![0; (state.len() + 63) / 64])
.unwrap();
for idx in state
.iter()
.enumerate()
.filter(|(_, v)| v.contains_net())
.map(|(k, _)| k)
{
bitmap.set(idx, 1);
}
steps.push(ChannelSubState {
wires,
occupancy_map: bitmap,
});
if tasks.is_empty() {
return steps;
}
}
}
| {
self.ranges.iter().map(|r| r.end - r.start).sum()
} | identifier_body |
chain.rs | use std::collections::HashSet;
use std::io::{self, Write};
use crate::disk::bam::BamRef;
use crate::disk::block::{BlockDeviceRef, Location, BLOCK_SIZE};
use crate::disk::directory::DirectoryEntry;
use crate::disk::error::DiskError;
/// A "zero" chain link is a link that indicates that this is a tail block, and
/// it has zero data bytes used. (Which means it has a total of two bytes
/// used, counting the link itself.)
pub static CHAIN_LINK_ZERO: ChainLink = ChainLink::Tail(2);
#[derive(Debug)]
pub enum ChainLink {
Next(Location),
Tail(usize), // used bytes
}
impl ChainLink {
#[inline]
pub fn new(block: &[u8]) -> io::Result<ChainLink> {
if block[0] == 0x00 {
// This is the last sector of the chain, so the next byte indicates how much of
// this sector is actually used.
if block[1] < 1 {
// It's not valid for a chain sector to not include the first two bytes
// as allocated.
return Err(DiskError::InvalidChainLink.into());
}
Ok(ChainLink::Tail(block[1] as usize + 1)) // 2..=256
} else {
Ok(ChainLink::Next(Location::new(block[0], block[1])))
}
}
#[inline]
pub fn to_bytes(&self, bytes: &mut [u8]) {
assert!(bytes.len() >= 2);
match &self {
ChainLink::Next(location) => location.write_bytes(bytes),
ChainLink::Tail(size) => {
assert!(*size >= 2 && *size <= 256);
bytes[0] = 0x00;
bytes[1] = (*size - 1) as u8;
}
}
}
}
/// A ChainSector is the result of a chain iteration, and provides the block contents and the
/// location from which it was read.
pub struct ChainSector {
/// The 256-byte block contents, which includes the two-byte NTS (next track and sector) link.
pub data: Vec<u8>,
pub location: Location,
}
| visited_sectors: HashSet<Location>,
block: [u8; BLOCK_SIZE],
}
impl ChainIterator {
/// Create a new chain iterator starting at the specified location.
pub fn new(blocks: BlockDeviceRef, starting_sector: Location) -> ChainIterator {
ChainIterator {
blocks,
next_sector: Some(starting_sector),
visited_sectors: HashSet::new(),
block: [0u8; BLOCK_SIZE],
}
}
/// Read the entire chain and return a list of locations.
pub fn locations(self) -> io::Result<Vec<Location>> {
self.map(|r| r.map(|cs| cs.location)).collect()
}
}
impl Iterator for ChainIterator {
type Item = io::Result<ChainSector>;
fn next(&mut self) -> Option<io::Result<ChainSector>> {
let location = match self.next_sector.take() {
Some(next) => next,
None => return None,
};
// Loop detection.
if !self.visited_sectors.insert(location) {
return Some(Err(DiskError::ChainLoop.into()));
}
// Read the next sector.
{
let blocks = self.blocks.borrow();
let block = match blocks.sector(location) {
Ok(b) => b,
Err(e) => return Some(Err(e)),
};
self.block.copy_from_slice(block);
}
// Trim the block if needed.
let size = match ChainLink::new(&self.block[..]) {
Ok(ChainLink::Next(location)) => {
self.next_sector = Some(location);
BLOCK_SIZE // The entire sector is used.
}
Ok(ChainLink::Tail(size)) => size,
Err(e) => return Some(Err(e)),
};
let block = &self.block[..size];
Some(Ok(ChainSector {
data: block.to_vec(),
location,
}))
}
}
/// ChainReader objects implement the Read trait are used to read a byte stream
/// represented as a series of chained sectors on the disk image. Simple files
/// (e.g. CBM PRG and SEQ files) store data in a single chain where the
/// beginning track and sector is provided in the directory entry. More exotic
/// file types (GEOS, REL, etc.) use more complex structures, possibly with
/// multiple ChainReader objects (e.g. a GEOS VLIR file may provide a
/// ChainReader for each record).
pub struct ChainReader {
chain: ChainIterator,
block: Option<Vec<u8>>,
eof: bool,
}
impl ChainReader {
pub fn new(blocks: BlockDeviceRef, start: Location) -> ChainReader {
let chain = ChainIterator::new(blocks, start);
ChainReader {
chain,
block: None,
eof: false,
}
}
}
impl io::Read for ChainReader {
fn read(&mut self, mut buf: &mut [u8]) -> io::Result<usize> {
let mut total_nbytes = 0;
while !buf.is_empty() && !self.eof {
match self.block.take() {
Some(mut block) => {
// Copy as much of this block as possible into the caller-provided buffer.
let nbytes = block.len().min(buf.len());
let _ = &buf[0..nbytes].copy_from_slice(&block[0..nbytes]);
total_nbytes += nbytes;
// Reduce the block slice to the unread portion (which may be zero bytes).
if block.len() == nbytes {
} else {
// Reduce
let mut tail = block.split_off(nbytes);
::std::mem::swap(&mut block, &mut tail);
// Return the unread portion
self.block = Some(block);
}
// Reduce the provided buffer slice to the unwritten portion.
let buf_ref = &mut buf;
let value: &mut [u8] = std::mem::take(buf_ref);
*buf_ref = &mut value[nbytes..];
}
None => {
// Read the next block.
match self.chain.next() {
Some(Ok(mut block)) => {
// discard the next-track/sector bytes
self.block = Some(block.data.split_off(2));
// Loop back to the Some(_) case to process the block.
}
Some(Err(e)) => {
self.eof = true;
return Err(e);
}
None => self.eof = true,
}
}
}
}
Ok(total_nbytes)
}
}
/// A writer for writing data to a chain. The chain is extended as needed according to the
/// allocation algorithm for the disk format.
pub struct ChainWriter {
blocks: BlockDeviceRef,
bam: BamRef,
entry: DirectoryEntry,
location: Location,
block: Vec<u8>,
dirty: bool,
}
impl ChainWriter {
pub fn new(
blocks: BlockDeviceRef,
bam: BamRef,
entry: DirectoryEntry,
start: Location,
) -> io::Result<ChainWriter> {
// Advance to the last block in the chain.
let tail_block;
let mut tail_location;
{
let blocks = blocks.borrow();
let mut block = blocks.sector(start)?;
tail_location = start;
while let ChainLink::Next(location) = ChainLink::new(block)? {
block = blocks.sector(location)?;
tail_location = location;
}
tail_block = block.to_vec();
}
Ok(ChainWriter {
blocks,
bam,
entry,
location: tail_location,
block: tail_block,
dirty: true,
})
}
fn increment_entry_blocks(&mut self) -> io::Result<()> {
let mut blocks = self.blocks.borrow_mut();
blocks.positioned_read(&mut self.entry)?;
self.entry.file_size += 1;
blocks.positioned_write(&self.entry)?;
Ok(())
}
fn allocate_next_block(&mut self) -> io::Result<usize> {
// NOTE: The ordering of these steps is important for consistency. We don't
// want a block to be allocated in BAM, then not used because an error
// was thrown later.
// Write the current block without the updated link.
self.write_current_block()?;
// Find a new block.
let next_location = self.bam.borrow_mut().next_free_block(None)?;
// Initialize a fresh block in memory with a link indicating a tail block with
// zero bytes used. (Really, two bytes used for the link, but zero data
// bytes used.)
for i in 2..BLOCK_SIZE {
self.block[i] = 0;
}
ChainLink::Tail(2).to_bytes(&mut self.block[..]);
// Write the fresh block to the new location
self.blocks
.borrow_mut()
.sector_mut(next_location)?
.copy_from_slice(&self.block);
// Allocate the next block.
self.bam.borrow_mut().allocate(next_location)?;
// Increment the directory entry's file size (measured in blocks)
self.increment_entry_blocks()?;
// If allocation succeeds, only then do we link the current block to the next
// block.
let mut blocks = self.blocks.borrow_mut();
let block = match blocks.sector_mut(self.location) {
Ok(block) => block,
Err(e) => {
// Roll back the allocation.
self.bam.borrow_mut().free(next_location)?;
return Err(e);
}
};
next_location.write_bytes(block);
// Update state
self.location = next_location;
// Return the available bytes in the newly loaded block, which is always two
// less than the block size.
Ok(BLOCK_SIZE - 2)
}
fn write_current_block(&mut self) -> io::Result<()> {
// Write the current block
let mut blocks = self.blocks.borrow_mut();
blocks
.sector_mut(self.location)?
.copy_from_slice(&self.block);
Ok(())
}
}
impl Drop for ChainWriter {
fn drop(&mut self) {
let _result = self.flush();
}
}
// NOTE: allocating and updating entry block size should be atomic.
impl io::Write for ChainWriter {
fn write(&mut self, mut buf: &[u8]) -> io::Result<usize> {
self.dirty = true;
let mut total_nbytes = 0;
while !buf.is_empty() {
let (offset, remaining) = match ChainLink::new(&self.block)? {
ChainLink::Next(_) => unreachable!(), // The stored buffer is always a tail block.
ChainLink::Tail(nbytes) if nbytes == BLOCK_SIZE => {
// Allocate a new block
let remaining = self.allocate_next_block()?;
(BLOCK_SIZE - remaining, remaining)
}
ChainLink::Tail(nbytes) => (nbytes, BLOCK_SIZE - nbytes),
};
// Copy as much of the caller-provided buffer as possible into the block.
let nbytes = remaining.min(buf.len());
let _ = &self.block[offset..offset + nbytes].copy_from_slice(&buf[0..nbytes]);
total_nbytes += nbytes;
// Update the block link's indication of used bytes.
ChainLink::Tail(offset + nbytes).to_bytes(&mut self.block);
// Reduce the provided buffer slice to the unwritten portion.
buf = &buf[nbytes..];
}
Ok(total_nbytes)
}
fn flush(&mut self) -> io::Result<()> {
if self.dirty {
// Write the current block
self.write_current_block()?;
// Flush the BAM
self.bam.borrow_mut().flush()?;
// Flush the underlying medium.
let mut blocks = self.blocks.borrow_mut();
blocks.flush()?;
self.dirty = false;
}
Ok(())
}
}
pub fn remove_chain(blocks: BlockDeviceRef, bam: BamRef, start: Location) -> io::Result<()> {
// Read the whole chain first to be sure we can visit every block with no
// errors.
let locations = ChainIterator::new(blocks, start).locations()?;
// Deallocate
let mut bam = bam.borrow_mut();
for location in locations {
bam.free(location)?;
}
bam.flush()?;
Ok(())
} | /// Returns a ChainSector which includes the NTS (next track and sector) link.
pub struct ChainIterator {
blocks: BlockDeviceRef,
next_sector: Option<Location>, | random_line_split |
chain.rs | use std::collections::HashSet;
use std::io::{self, Write};
use crate::disk::bam::BamRef;
use crate::disk::block::{BlockDeviceRef, Location, BLOCK_SIZE};
use crate::disk::directory::DirectoryEntry;
use crate::disk::error::DiskError;
/// A "zero" chain link is a link that indicates that this is a tail block, and
/// it has zero data bytes used. (Which means it has a total of two bytes
/// used, counting the link itself.)
pub static CHAIN_LINK_ZERO: ChainLink = ChainLink::Tail(2);
#[derive(Debug)]
pub enum ChainLink {
Next(Location),
Tail(usize), // used bytes
}
impl ChainLink {
#[inline]
pub fn new(block: &[u8]) -> io::Result<ChainLink> {
if block[0] == 0x00 {
// This is the last sector of the chain, so the next byte indicates how much of
// this sector is actually used.
if block[1] < 1 {
// It's not valid for a chain sector to not include the first two bytes
// as allocated.
return Err(DiskError::InvalidChainLink.into());
}
Ok(ChainLink::Tail(block[1] as usize + 1)) // 2..=256
} else {
Ok(ChainLink::Next(Location::new(block[0], block[1])))
}
}
#[inline]
pub fn to_bytes(&self, bytes: &mut [u8]) {
assert!(bytes.len() >= 2);
match &self {
ChainLink::Next(location) => location.write_bytes(bytes),
ChainLink::Tail(size) => {
assert!(*size >= 2 && *size <= 256);
bytes[0] = 0x00;
bytes[1] = (*size - 1) as u8;
}
}
}
}
/// A ChainSector is the result of a chain iteration, and provides the block contents and the
/// location from which it was read.
pub struct ChainSector {
/// The 256-byte block contents, which includes the two-byte NTS (next track and sector) link.
pub data: Vec<u8>,
pub location: Location,
}
/// Returns a ChainSector which includes the NTS (next track and sector) link.
pub struct ChainIterator {
blocks: BlockDeviceRef,
next_sector: Option<Location>,
visited_sectors: HashSet<Location>,
block: [u8; BLOCK_SIZE],
}
impl ChainIterator {
/// Create a new chain iterator starting at the specified location.
pub fn new(blocks: BlockDeviceRef, starting_sector: Location) -> ChainIterator {
ChainIterator {
blocks,
next_sector: Some(starting_sector),
visited_sectors: HashSet::new(),
block: [0u8; BLOCK_SIZE],
}
}
/// Read the entire chain and return a list of locations.
pub fn locations(self) -> io::Result<Vec<Location>> {
self.map(|r| r.map(|cs| cs.location)).collect()
}
}
impl Iterator for ChainIterator {
type Item = io::Result<ChainSector>;
fn next(&mut self) -> Option<io::Result<ChainSector>> {
let location = match self.next_sector.take() {
Some(next) => next,
None => return None,
};
// Loop detection.
if !self.visited_sectors.insert(location) {
return Some(Err(DiskError::ChainLoop.into()));
}
// Read the next sector.
{
let blocks = self.blocks.borrow();
let block = match blocks.sector(location) {
Ok(b) => b,
Err(e) => return Some(Err(e)),
};
self.block.copy_from_slice(block);
}
// Trim the block if needed.
let size = match ChainLink::new(&self.block[..]) {
Ok(ChainLink::Next(location)) => {
self.next_sector = Some(location);
BLOCK_SIZE // The entire sector is used.
}
Ok(ChainLink::Tail(size)) => size,
Err(e) => return Some(Err(e)),
};
let block = &self.block[..size];
Some(Ok(ChainSector {
data: block.to_vec(),
location,
}))
}
}
/// ChainReader objects implement the Read trait are used to read a byte stream
/// represented as a series of chained sectors on the disk image. Simple files
/// (e.g. CBM PRG and SEQ files) store data in a single chain where the
/// beginning track and sector is provided in the directory entry. More exotic
/// file types (GEOS, REL, etc.) use more complex structures, possibly with
/// multiple ChainReader objects (e.g. a GEOS VLIR file may provide a
/// ChainReader for each record).
pub struct ChainReader {
chain: ChainIterator,
block: Option<Vec<u8>>,
eof: bool,
}
impl ChainReader {
pub fn new(blocks: BlockDeviceRef, start: Location) -> ChainReader {
let chain = ChainIterator::new(blocks, start);
ChainReader {
chain,
block: None,
eof: false,
}
}
}
impl io::Read for ChainReader {
fn read(&mut self, mut buf: &mut [u8]) -> io::Result<usize> {
let mut total_nbytes = 0;
while !buf.is_empty() && !self.eof {
match self.block.take() {
Some(mut block) => {
// Copy as much of this block as possible into the caller-provided buffer.
let nbytes = block.len().min(buf.len());
let _ = &buf[0..nbytes].copy_from_slice(&block[0..nbytes]);
total_nbytes += nbytes;
// Reduce the block slice to the unread portion (which may be zero bytes).
if block.len() == nbytes {
} else {
// Reduce
let mut tail = block.split_off(nbytes);
::std::mem::swap(&mut block, &mut tail);
// Return the unread portion
self.block = Some(block);
}
// Reduce the provided buffer slice to the unwritten portion.
let buf_ref = &mut buf;
let value: &mut [u8] = std::mem::take(buf_ref);
*buf_ref = &mut value[nbytes..];
}
None => {
// Read the next block.
match self.chain.next() {
Some(Ok(mut block)) => {
// discard the next-track/sector bytes
self.block = Some(block.data.split_off(2));
// Loop back to the Some(_) case to process the block.
}
Some(Err(e)) => {
self.eof = true;
return Err(e);
}
None => self.eof = true,
}
}
}
}
Ok(total_nbytes)
}
}
/// A writer for writing data to a chain. The chain is extended as needed according to the
/// allocation algorithm for the disk format.
pub struct ChainWriter {
blocks: BlockDeviceRef,
bam: BamRef,
entry: DirectoryEntry,
location: Location,
block: Vec<u8>,
dirty: bool,
}
impl ChainWriter {
pub fn new(
blocks: BlockDeviceRef,
bam: BamRef,
entry: DirectoryEntry,
start: Location,
) -> io::Result<ChainWriter> {
// Advance to the last block in the chain.
let tail_block;
let mut tail_location;
{
let blocks = blocks.borrow();
let mut block = blocks.sector(start)?;
tail_location = start;
while let ChainLink::Next(location) = ChainLink::new(block)? {
block = blocks.sector(location)?;
tail_location = location;
}
tail_block = block.to_vec();
}
Ok(ChainWriter {
blocks,
bam,
entry,
location: tail_location,
block: tail_block,
dirty: true,
})
}
fn increment_entry_blocks(&mut self) -> io::Result<()> {
let mut blocks = self.blocks.borrow_mut();
blocks.positioned_read(&mut self.entry)?;
self.entry.file_size += 1;
blocks.positioned_write(&self.entry)?;
Ok(())
}
fn allocate_next_block(&mut self) -> io::Result<usize> {
// NOTE: The ordering of these steps is important for consistency. We don't
// want a block to be allocated in BAM, then not used because an error
// was thrown later.
// Write the current block without the updated link.
self.write_current_block()?;
// Find a new block.
let next_location = self.bam.borrow_mut().next_free_block(None)?;
// Initialize a fresh block in memory with a link indicating a tail block with
// zero bytes used. (Really, two bytes used for the link, but zero data
// bytes used.)
for i in 2..BLOCK_SIZE {
self.block[i] = 0;
}
ChainLink::Tail(2).to_bytes(&mut self.block[..]);
// Write the fresh block to the new location
self.blocks
.borrow_mut()
.sector_mut(next_location)?
.copy_from_slice(&self.block);
// Allocate the next block.
self.bam.borrow_mut().allocate(next_location)?;
// Increment the directory entry's file size (measured in blocks)
self.increment_entry_blocks()?;
// If allocation succeeds, only then do we link the current block to the next
// block.
let mut blocks = self.blocks.borrow_mut();
let block = match blocks.sector_mut(self.location) {
Ok(block) => block,
Err(e) => {
// Roll back the allocation.
self.bam.borrow_mut().free(next_location)?;
return Err(e);
}
};
next_location.write_bytes(block);
// Update state
self.location = next_location;
// Return the available bytes in the newly loaded block, which is always two
// less than the block size.
Ok(BLOCK_SIZE - 2)
}
fn | (&mut self) -> io::Result<()> {
// Write the current block
let mut blocks = self.blocks.borrow_mut();
blocks
.sector_mut(self.location)?
.copy_from_slice(&self.block);
Ok(())
}
}
impl Drop for ChainWriter {
fn drop(&mut self) {
let _result = self.flush();
}
}
// NOTE: allocating and updating entry block size should be atomic.
impl io::Write for ChainWriter {
fn write(&mut self, mut buf: &[u8]) -> io::Result<usize> {
self.dirty = true;
let mut total_nbytes = 0;
while !buf.is_empty() {
let (offset, remaining) = match ChainLink::new(&self.block)? {
ChainLink::Next(_) => unreachable!(), // The stored buffer is always a tail block.
ChainLink::Tail(nbytes) if nbytes == BLOCK_SIZE => {
// Allocate a new block
let remaining = self.allocate_next_block()?;
(BLOCK_SIZE - remaining, remaining)
}
ChainLink::Tail(nbytes) => (nbytes, BLOCK_SIZE - nbytes),
};
// Copy as much of the caller-provided buffer as possible into the block.
let nbytes = remaining.min(buf.len());
let _ = &self.block[offset..offset + nbytes].copy_from_slice(&buf[0..nbytes]);
total_nbytes += nbytes;
// Update the block link's indication of used bytes.
ChainLink::Tail(offset + nbytes).to_bytes(&mut self.block);
// Reduce the provided buffer slice to the unwritten portion.
buf = &buf[nbytes..];
}
Ok(total_nbytes)
}
fn flush(&mut self) -> io::Result<()> {
if self.dirty {
// Write the current block
self.write_current_block()?;
// Flush the BAM
self.bam.borrow_mut().flush()?;
// Flush the underlying medium.
let mut blocks = self.blocks.borrow_mut();
blocks.flush()?;
self.dirty = false;
}
Ok(())
}
}
pub fn remove_chain(blocks: BlockDeviceRef, bam: BamRef, start: Location) -> io::Result<()> {
// Read the whole chain first to be sure we can visit every block with no
// errors.
let locations = ChainIterator::new(blocks, start).locations()?;
// Deallocate
let mut bam = bam.borrow_mut();
for location in locations {
bam.free(location)?;
}
bam.flush()?;
Ok(())
}
| write_current_block | identifier_name |
TTA.py | #!/usr/bin/env python
# _*_coding:utf-8 _*_
# @Time :2021/6/19 15:58
# @Author :Jiawei Lian
# @FileName: defect_detector
# @Software: PyCharm
from copy import deepcopy
import cv2
import ensemble_boxes
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
from PIL import Image
from torch.hub import load_state_dict_from_url
# from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
from faster_rcnn import FastRCNNPredictor
from torchvision.transforms import functional as F, transforms
from torchvision.transforms import transforms as T
import faster_rcnn
class BaseWheatTTA:
""" author: @shonenkov """
image_size = 512
def augment(self, image):
raise NotImplementedError
def batch_augment(self, images):
raise NotImplementedError
def deaugment_boxes(self, boxes, image):
raise NotImplementedError
def get_object_detector(num_classes):
# load an instance segmentation model pre-trained pre-trained on COCO
model = faster_rcnn.fasterrcnn_resnet50_fpn(pretrained=False)
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
class TTAHorizontalFlip(BaseWheatTTA):
""" author: @shonenkov """
def augment(self, image):
return image.flip(1)
def batch_augment(self, images):
return images.flip(2)
def deaugment_boxes(self, boxes, image):
width = image.width
boxes[:, [2, 0]] = width - boxes[:, [0, 2]]
return boxes
class TTAVerticalFlip(BaseWheatTTA):
""" author: @shonenkov """
def augment(self, image):
return image
def batch_augment(self, images):
return images.flip(3)
def deaugment_boxes(self, boxes, image):
height = image.height
boxes[:, [3, 1]] = height - boxes[:, [1, 3]]
return boxes
class TTACompose(BaseWheatTTA):
""" author: @shonenkov """
def __init__(self, transforms):
self.transforms = transforms
def augment(self, image):
for transform in self.transforms:
image = transform.augment(image)
return image
def batch_augment(self, images):
for transform in self.transforms:
images = transform.batch_augment(images)
return images
def prepare_boxes(self, boxes):
result_boxes = boxes
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
return boxes
def deaugment_boxes(self, boxes, image):
for transform in self.transforms[::-1]:
boxes = transform.deaugment_boxes(boxes, image)
return self.prepare_boxes(boxes)
def tensor_to_PIL(tensor):
image = tensor.cpu().clone()
image = image.squeeze(0)
image = transforms.ToPILImage()(image)
return image
def del_tensor_ele(arr, index):
arr1 = arr[0:index]
arr2 = arr[index + 1:]
return torch.cat((arr1, arr2), dim=0)
def del_under_threshold(result, threshold=0.):
idxes = []
for idx in range(len(result[0]['scores'])):
if result[0]['scores'][idx] < threshold:
idxes.append(idx)
for i in idxes:
result[0]['scores'] = del_tensor_ele(result[0]['scores'], len(result[0]['scores']) - 1)
result[0]['labels'] = del_tensor_ele(result[0]['labels'], len(result[0]['labels']) - 1)
result[0]['boxes'] = del_tensor_ele(result[0]['boxes'], len(result[0]['boxes']) - 1)
return result
def del_fusion_under_threshold(boxes, labels, scores, threshold=0.):
idxes = []
for idx in range(len(scores)):
if scores[idx] < threshold:
idxes.append(idx)
for i in idxes:
scores = del_tensor_ele(scores, len(scores) - 1)
labels = del_tensor_ele(labels, len(labels) - 1)
boxes = del_tensor_ele(boxes, len(boxes) - 1)
return boxes, labels, scores
def py_cpu_nms(boxes, scores, thresh=0.55):
"""Pure Python NMS baseline."""
# x1、y1、x2、y2、以及score赋值
boxes = boxes.detach().numpy()
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
scores = scores
# 每一个检测框的面积
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# 按照score置信度降序排序
# order = scores.argsort()[::-1]
all_scores, order = scores.sort(descending=True)
keep = [] # 保留的结果框集合
# print(order)
while int(len(order.detach().numpy())) > 0:
i = order[0]
keep.append(i.numpy()) # 保留该类剩余box中得分最高的一个
# 得到相交区域,左上及右下
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
# 计算相交的面积,不重叠时面积为0
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
# 计算IoU:重叠面积 /(面积1+面积2-重叠面积)
ovr = inter / (areas[i] + areas[order[1:]] - inter)
# 保留IoU小于阈值的box
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1] # 因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位
return keep
def soft_nms(bboxes, scores, Nt=0.3, sigma2=0.5, score_thresh=0.001, method=2):
# 在 bboxes 之后添加对于的下标[0, 1, 2...], 最终 bboxes 的 shape 为 [n, 5], 前四个为坐标, 后一个为下标
# res_bboxes = deepcopy(bboxes)
N = bboxes.shape[0] # 总的 box 的数量
indexes = np.array([np.arange(N)]) # 下标: 0, 1, 2, ..., n-1
bboxes = bboxes.detach().numpy()
bboxes = np.concatenate((bboxes, indexes.T), axis=1) # concatenate 之后, bboxes 的操作不会对外部变量产生影响
# 计算每个 box 的面积
x1 = bboxes[:, 0]
y1 = bboxes[:, 1]
x2 = bboxes[:, 2]
y2 = bboxes[:, 3]
scores = scores
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
scores, order = scores.sort(descending=True) | pos = i + 1
if i != N - 1:
maxscore = np.max(scores[pos:], axis=0)
maxpos = np.argmax(scores[pos:], axis=0)
else:
maxscore = scores[-1]
maxpos = 0
# 如果当前 i 的得分小于后面的最大 score, 则与之交换, 确保 i 上的 score 最大
if scores[i] < maxscore:
bboxes[[i, maxpos + i + 1]] = bboxes[[maxpos + i + 1, i]]
scores[[i, maxpos + i + 1]] = scores[[maxpos + i + 1, i]]
areas[[i, maxpos + i + 1]] = areas[[maxpos + i + 1, i]]
# IoU calculate
xx1 = np.maximum(bboxes[i, 0], bboxes[pos:, 0])
yy1 = np.maximum(bboxes[i, 1], bboxes[pos:, 1])
xx2 = np.minimum(bboxes[i, 2], bboxes[pos:, 2])
yy2 = np.minimum(bboxes[i, 3], bboxes[pos:, 3])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
iou = intersection / (areas[i] + areas[pos:] - intersection)
# Three methods: 1.linear 2.gaussian 3.original NMS
if method == 1: # linear
weight = np.ones(iou.shape)
weight[iou > Nt] = weight[iou > Nt] - iou[iou > Nt]
elif method == 2: # gaussian
weight = np.exp(-(iou * iou) / sigma2)
else: # original NMS
weight = np.ones(iou.shape)
weight[iou > Nt] = 0
scores[pos:] = weight * scores[pos:]
# select the boxes and keep the corresponding indexes
inds = bboxes[:, 4][scores > score_thresh]
keep = inds.astype(int)
return keep
# image_path = './data/Images/2020-01-11_21_43_14_145.jpg'
# image_path = './data/Images/2020-03-07_08_34_30_467.jpg'
# image_path = './data/Images/2020-01-11_21_41_15_002.jpg'
image_path = './data/Images/2020-01-11_21_36_02_642.jpg'
# image_path = './data/Images/2020-03-10_16_18_20_688.jpg'
# image_path = './data/Images/2021-05-29-18-44-02.jpg'
# image_path = './data/Images/2021-05-16-18-51-54.jpg'
# image_path = './data/Images/2021-05-16-14-58-28.jpg'
# model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/0.5959/model_23_5959_5288.pth'
# model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/model_0.pth'
model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/0.5932/model_8_5932.pth'
results = []
predictions = []
# you can try own combinations:
transform1 = TTACompose([
TTAHorizontalFlip(),
# TTAVerticalFlip()
])
transform2 = TTACompose([
# TTAHorizontalFlip(),
TTAVerticalFlip()
])
fig, ax = plt.subplots(3, 2, figsize=(16, 10))
image1 = Image.open(image_path).convert("RGB")
image1_vf = F.vflip(image1)
image_tensor = torch.from_numpy(np.array(image1))
image_tensor_vf = torch.from_numpy(np.array(image1_vf))
# image_tensor = image_tensor.permute(0, 1, 2)
image_numpy_vf = image_tensor_vf.cpu().numpy().copy()
image_numpy = image_tensor.cpu().numpy().copy()
image_numpy1 = image_tensor.cpu().numpy().copy()
image_numpy2 = image_tensor.cpu().numpy().copy()
image_numpy3 = image_tensor.cpu().numpy().copy()
# ax[0, 0].imshow(image)
# ax[0, 0].set_title('original')
tta_image1 = transform1.augment(image_tensor)
tta_image2 = transform2.augment(image_tensor_vf)
tta_image1_numpy = tta_image1.numpy().copy()
tta_image2_numpy = image_tensor_vf.numpy().copy()
tta_image1 = Image.fromarray(tta_image1_numpy)
tta_image2 = Image.fromarray(tta_image2_numpy)
########################################################################
# tta_image1 prediction
preprocessed_image = torch.unsqueeze(F.to_tensor(tta_image1), dim=0)
model = get_object_detector(11)
model.load_state_dict(
torch.load(model_path, map_location='cpu')[
'model'])
model.eval()
result = model(preprocessed_image)
result = del_under_threshold(result)
print('tta_image prediction:', result)
boxes3 = result[0]['boxes']
scores3 = result[0]['scores']
labels3 = result[0]['labels']
for box in boxes3:
cv2.rectangle(tta_image1_numpy, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[0, 0].imshow(tta_image1_numpy)
ax[0, 0].set_title('Augment1')
###################################################################
# deaugmentation prediction
boxes3 = transform1.deaugment_boxes(boxes3, image1)
results.append({
'boxes': boxes3,
'scores': scores3,
'labels': labels3,
})
for box in boxes3:
cv2.rectangle(image_numpy1, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[0, 1].imshow(image_numpy1)
ax[0, 1].set_title('Deaugment1')
#########################################################
########################################################################
# tta_image2 prediction
preprocessed_image = torch.unsqueeze(F.to_tensor(tta_image2), dim=0)
model = get_object_detector(11)
model.load_state_dict(
torch.load(model_path, map_location='cpu')[
'model'])
model.eval()
result = model(preprocessed_image)
result = del_under_threshold(result)
print('tta_image prediction:', result)
boxes4 = result[0]['boxes']
scores4 = result[0]['scores']
labels4 = result[0]['labels']
for box in boxes4:
cv2.rectangle(tta_image2_numpy, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[1, 0].imshow(tta_image2_numpy)
ax[1, 0].set_title('Augment2')
###################################################################
# deaugmentation prediction
boxes4 = transform2.deaugment_boxes(boxes4, image1_vf)
results.append({
'boxes': boxes4,
'scores': scores4,
'labels': labels4,
})
for box in boxes4:
cv2.rectangle(image_numpy3, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[1, 1].imshow(image_numpy3)
ax[1, 1].set_title('Deaugment2')
#########################################################
# original_image prediction
preprocessed_image = torch.unsqueeze(F.to_tensor(image1), dim=0)
model = get_object_detector(11)
model.load_state_dict(
torch.load(model_path, map_location='cpu')[
'model'])
model.eval()
result_original_image = model(preprocessed_image)
result_original_image = del_under_threshold(result_original_image)
print('original image prediction:', result_original_image)
boxes2 = result_original_image[0]['boxes']
scores2 = result_original_image[0]['scores']
labels2 = result_original_image[0]['labels']
results.append({
'boxes': boxes2,
'scores': scores2,
'labels': labels2,
})
for box in boxes2:
cv2.rectangle(image_numpy, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[2, 0].imshow(image_numpy)
ax[2, 0].set_title('Original')
#######################################################
# # weghted boxes fusion
# predictions.append(results)
# boxes1, scores1, labels1 = run_wbf(predictions)
temp_all_boxes = torch.cat((boxes3, boxes2, boxes4), 0)
all_labels = torch.cat((labels3, labels2, labels4), 0)
all_scores = torch.cat((scores3, scores2, scores4), 0)
_, indices = all_scores.sort(descending=True)
all_labels = all_labels.gather(dim=0, index=indices)
all_scores = all_scores.gather(dim=0, index=indices)
all_boxes = torch.empty(len(indices), 4)
for i in range(len(indices)):
all_boxes[i] = temp_all_boxes[indices[i]]
all_boxes, all_labels, all_scores = del_fusion_under_threshold(all_boxes, all_labels, all_scores)
keep = py_cpu_nms(all_boxes, all_scores)
# keep = soft_nms(all_boxes, all_scores)
# scores1 = torch.from_numpy(scores1)
# boxes1 = torch.from_numpy(boxes1)
# labels1 = torch.from_numpy(labels1)
# temp_all_boxes = torch.cat((boxes2, boxes1), 0)
# all_labels = torch.cat((labels2, labels1), 0)
# all_scores = torch.cat((scores2, scores1), 0)
# print(boxes1, scores1, labels1)
#
# for box in boxes1:
# cv2.rectangle(image_numpy2, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
#
# ax[1, 1].imshow(image_numpy2)
# ax[1, 1].set_title('predictions fusion')
# all_scores, indices = all_scores.sort(descending=True)
# all_labels = all_labels.gather(dim=0, index=indices)
# all_boxes = torch.empty(len(indices), 4)
all_scores1 = all_scores[:len(keep)]
all_labels1 = all_labels[:len(keep)]
all_boxes1 = all_boxes[:len(keep)]
for i in range(len(keep)):
all_scores1[i] = all_scores[keep[i]]
all_labels1[i] = all_labels[keep[i]]
all_boxes1[i] = all_boxes[keep[i]]
labels = ["",
"connection_edge_defect",
"right_angle_edge_defect",
"cavity_defect",
"burr_defect",
"huahen",
"mosun",
"yanse",
'basi',
'jianju',
'chuizhidu', ]
i = 0
for box in all_boxes1:
cv2.rectangle(image_numpy2, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
# add label
# if box[1] > 10:
# cv2.putText(image_numpy2, labels[all_labels1[i]], (int(box[0]), int(box[1] - 6)),
# cv2.FONT_HERSHEY_COMPLEX_SMALL, 5,
# (255, 255, 0))
# else:
# cv2.putText(image_numpy2, labels[all_labels1[i]], (int(box[0]), int(box[1] + 15)),
# cv2.FONT_HERSHEY_COMPLEX_SMALL, 5,
# (255, 255, 0))
# i += 1
ax[2, 1].imshow(image_numpy2)
ax[2, 1].set_title('Fusion')
# Image._show(Image.fromarray(image_numpy2))
# Image.fromarray(image_numpy2).save('prediction.jpg')
print('fusion prediction:')
print(all_labels1)
print(all_scores1)
print(all_boxes1) | scores = scores.detach().numpy()
for i in range(N):
# 找出 i 后面的最大 score 及其下标 | random_line_split |
TTA.py | #!/usr/bin/env python
# _*_coding:utf-8 _*_
# @Time :2021/6/19 15:58
# @Author :Jiawei Lian
# @FileName: defect_detector
# @Software: PyCharm
from copy import deepcopy
import cv2
import ensemble_boxes
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
from PIL import Image
from torch.hub import load_state_dict_from_url
# from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
from faster_rcnn import FastRCNNPredictor
from torchvision.transforms import functional as F, transforms
from torchvision.transforms import transforms as T
import faster_rcnn
class BaseWheatTTA:
""" author: @shonenkov """
image_size = 512
def augment(self, image):
raise NotImplementedError
def batch_augment(self, images):
raise NotImplementedError
def deaugment_boxes(self, boxes, image):
raise NotImplementedError
def get_object_detector(num_classes):
# load an instance segmentation model pre-trained pre-trained on COCO
model = faster_rcnn.fasterrcnn_resnet50_fpn(pretrained=False)
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
class TTAHorizontalFlip(BaseWheatTTA):
""" author: @shonenkov """
def augment(self, image):
return image.flip(1)
def batch_augment(self, images):
return images.flip(2)
def deaugment_boxes(self, boxes, image):
width = image.width
boxes[:, [2, 0]] = width - boxes[:, [0, 2]]
return boxes
class TTAVerticalFlip(BaseWheatTTA):
""" author: @shonenkov """
def augment(self, image):
return image
def batch_augment(self, images):
return images.flip(3)
def deaugment_boxes(self, boxes, image):
height = image.height
boxes[:, [3, 1]] = height - boxes[:, [1, 3]]
return boxes
class TTACompose(BaseWheatTTA):
""" author: @shonenkov """
def __init__(self, transforms):
self.transforms = transforms
def augment(self, image):
for transform in self.transforms:
image = transform.augment(image)
return image
def batch_augment(self, images):
for transform in self.transforms:
images = transform.batch_augment(images)
return images
def prepare_boxes(self, boxes):
result_boxes = boxes
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
return boxes
def deaugment_boxes(self, boxes, image):
for transform in self.transforms[::-1]:
boxes = transform.deaugment_boxes(boxes, image)
return self.prepare_boxes(boxes)
def tensor_to_PIL(tensor):
image = tensor.cpu().clone()
image = image.squeeze(0)
image = transforms.ToPILImage()(image)
return image
def del_tensor_ele(arr, index):
arr1 = arr[0:index]
arr2 = arr[index + 1:]
return torch.cat((arr1, arr2), dim=0)
def del_under_threshold(result, threshold=0.):
idxes = []
for idx in range(len(result[0]['scores'])):
if result[0]['scores'][idx] < threshold:
|
for i in idxes:
result[0]['scores'] = del_tensor_ele(result[0]['scores'], len(result[0]['scores']) - 1)
result[0]['labels'] = del_tensor_ele(result[0]['labels'], len(result[0]['labels']) - 1)
result[0]['boxes'] = del_tensor_ele(result[0]['boxes'], len(result[0]['boxes']) - 1)
return result
def del_fusion_under_threshold(boxes, labels, scores, threshold=0.):
idxes = []
for idx in range(len(scores)):
if scores[idx] < threshold:
idxes.append(idx)
for i in idxes:
scores = del_tensor_ele(scores, len(scores) - 1)
labels = del_tensor_ele(labels, len(labels) - 1)
boxes = del_tensor_ele(boxes, len(boxes) - 1)
return boxes, labels, scores
def py_cpu_nms(boxes, scores, thresh=0.55):
"""Pure Python NMS baseline."""
# x1、y1、x2、y2、以及score赋值
boxes = boxes.detach().numpy()
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
scores = scores
# 每一个检测框的面积
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# 按照score置信度降序排序
# order = scores.argsort()[::-1]
all_scores, order = scores.sort(descending=True)
keep = [] # 保留的结果框集合
# print(order)
while int(len(order.detach().numpy())) > 0:
i = order[0]
keep.append(i.numpy()) # 保留该类剩余box中得分最高的一个
# 得到相交区域,左上及右下
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
# 计算相交的面积,不重叠时面积为0
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
# 计算IoU:重叠面积 /(面积1+面积2-重叠面积)
ovr = inter / (areas[i] + areas[order[1:]] - inter)
# 保留IoU小于阈值的box
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1] # 因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位
return keep
def soft_nms(bboxes, scores, Nt=0.3, sigma2=0.5, score_thresh=0.001, method=2):
# 在 bboxes 之后添加对于的下标[0, 1, 2...], 最终 bboxes 的 shape 为 [n, 5], 前四个为坐标, 后一个为下标
# res_bboxes = deepcopy(bboxes)
N = bboxes.shape[0] # 总的 box 的数量
indexes = np.array([np.arange(N)]) # 下标: 0, 1, 2, ..., n-1
bboxes = bboxes.detach().numpy()
bboxes = np.concatenate((bboxes, indexes.T), axis=1) # concatenate 之后, bboxes 的操作不会对外部变量产生影响
# 计算每个 box 的面积
x1 = bboxes[:, 0]
y1 = bboxes[:, 1]
x2 = bboxes[:, 2]
y2 = bboxes[:, 3]
scores = scores
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
scores, order = scores.sort(descending=True)
scores = scores.detach().numpy()
for i in range(N):
# 找出 i 后面的最大 score 及其下标
pos = i + 1
if i != N - 1:
maxscore = np.max(scores[pos:], axis=0)
maxpos = np.argmax(scores[pos:], axis=0)
else:
maxscore = scores[-1]
maxpos = 0
# 如果当前 i 的得分小于后面的最大 score, 则与之交换, 确保 i 上的 score 最大
if scores[i] < maxscore:
bboxes[[i, maxpos + i + 1]] = bboxes[[maxpos + i + 1, i]]
scores[[i, maxpos + i + 1]] = scores[[maxpos + i + 1, i]]
areas[[i, maxpos + i + 1]] = areas[[maxpos + i + 1, i]]
# IoU calculate
xx1 = np.maximum(bboxes[i, 0], bboxes[pos:, 0])
yy1 = np.maximum(bboxes[i, 1], bboxes[pos:, 1])
xx2 = np.minimum(bboxes[i, 2], bboxes[pos:, 2])
yy2 = np.minimum(bboxes[i, 3], bboxes[pos:, 3])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
iou = intersection / (areas[i] + areas[pos:] - intersection)
# Three methods: 1.linear 2.gaussian 3.original NMS
if method == 1: # linear
weight = np.ones(iou.shape)
weight[iou > Nt] = weight[iou > Nt] - iou[iou > Nt]
elif method == 2: # gaussian
weight = np.exp(-(iou * iou) / sigma2)
else: # original NMS
weight = np.ones(iou.shape)
weight[iou > Nt] = 0
scores[pos:] = weight * scores[pos:]
# select the boxes and keep the corresponding indexes
inds = bboxes[:, 4][scores > score_thresh]
keep = inds.astype(int)
return keep
# image_path = './data/Images/2020-01-11_21_43_14_145.jpg'
# image_path = './data/Images/2020-03-07_08_34_30_467.jpg'
# image_path = './data/Images/2020-01-11_21_41_15_002.jpg'
image_path = './data/Images/2020-01-11_21_36_02_642.jpg'
# image_path = './data/Images/2020-03-10_16_18_20_688.jpg'
# image_path = './data/Images/2021-05-29-18-44-02.jpg'
# image_path = './data/Images/2021-05-16-18-51-54.jpg'
# image_path = './data/Images/2021-05-16-14-58-28.jpg'
# model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/0.5959/model_23_5959_5288.pth'
# model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/model_0.pth'
model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/0.5932/model_8_5932.pth'
results = []
predictions = []
# you can try own combinations:
transform1 = TTACompose([
TTAHorizontalFlip(),
# TTAVerticalFlip()
])
transform2 = TTACompose([
# TTAHorizontalFlip(),
TTAVerticalFlip()
])
fig, ax = plt.subplots(3, 2, figsize=(16, 10))
image1 = Image.open(image_path).convert("RGB")
image1_vf = F.vflip(image1)
image_tensor = torch.from_numpy(np.array(image1))
image_tensor_vf = torch.from_numpy(np.array(image1_vf))
# image_tensor = image_tensor.permute(0, 1, 2)
image_numpy_vf = image_tensor_vf.cpu().numpy().copy()
image_numpy = image_tensor.cpu().numpy().copy()
image_numpy1 = image_tensor.cpu().numpy().copy()
image_numpy2 = image_tensor.cpu().numpy().copy()
image_numpy3 = image_tensor.cpu().numpy().copy()
# ax[0, 0].imshow(image)
# ax[0, 0].set_title('original')
tta_image1 = transform1.augment(image_tensor)
tta_image2 = transform2.augment(image_tensor_vf)
tta_image1_numpy = tta_image1.numpy().copy()
tta_image2_numpy = image_tensor_vf.numpy().copy()
tta_image1 = Image.fromarray(tta_image1_numpy)
tta_image2 = Image.fromarray(tta_image2_numpy)
########################################################################
# tta_image1 prediction
preprocessed_image = torch.unsqueeze(F.to_tensor(tta_image1), dim=0)
model = get_object_detector(11)
model.load_state_dict(
torch.load(model_path, map_location='cpu')[
'model'])
model.eval()
result = model(preprocessed_image)
result = del_under_threshold(result)
print('tta_image prediction:', result)
boxes3 = result[0]['boxes']
scores3 = result[0]['scores']
labels3 = result[0]['labels']
for box in boxes3:
cv2.rectangle(tta_image1_numpy, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[0, 0].imshow(tta_image1_numpy)
ax[0, 0].set_title('Augment1')
###################################################################
# deaugmentation prediction
boxes3 = transform1.deaugment_boxes(boxes3, image1)
results.append({
'boxes': boxes3,
'scores': scores3,
'labels': labels3,
})
for box in boxes3:
cv2.rectangle(image_numpy1, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[0, 1].imshow(image_numpy1)
ax[0, 1].set_title('Deaugment1')
#########################################################
########################################################################
# tta_image2 prediction
preprocessed_image = torch.unsqueeze(F.to_tensor(tta_image2), dim=0)
model = get_object_detector(11)
model.load_state_dict(
torch.load(model_path, map_location='cpu')[
'model'])
model.eval()
result = model(preprocessed_image)
result = del_under_threshold(result)
print('tta_image prediction:', result)
boxes4 = result[0]['boxes']
scores4 = result[0]['scores']
labels4 = result[0]['labels']
for box in boxes4:
cv2.rectangle(tta_image2_numpy, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[1, 0].imshow(tta_image2_numpy)
ax[1, 0].set_title('Augment2')
###################################################################
# deaugmentation prediction
boxes4 = transform2.deaugment_boxes(boxes4, image1_vf)
results.append({
'boxes': boxes4,
'scores': scores4,
'labels': labels4,
})
for box in boxes4:
cv2.rectangle(image_numpy3, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[1, 1].imshow(image_numpy3)
ax[1, 1].set_title('Deaugment2')
#########################################################
# original_image prediction
preprocessed_image = torch.unsqueeze(F.to_tensor(image1), dim=0)
model = get_object_detector(11)
model.load_state_dict(
torch.load(model_path, map_location='cpu')[
'model'])
model.eval()
result_original_image = model(preprocessed_image)
result_original_image = del_under_threshold(result_original_image)
print('original image prediction:', result_original_image)
boxes2 = result_original_image[0]['boxes']
scores2 = result_original_image[0]['scores']
labels2 = result_original_image[0]['labels']
results.append({
'boxes': boxes2,
'scores': scores2,
'labels': labels2,
})
for box in boxes2:
cv2.rectangle(image_numpy, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[2, 0].imshow(image_numpy)
ax[2, 0].set_title('Original')
#######################################################
# # weghted boxes fusion
# predictions.append(results)
# boxes1, scores1, labels1 = run_wbf(predictions)
temp_all_boxes = torch.cat((boxes3, boxes2, boxes4), 0)
all_labels = torch.cat((labels3, labels2, labels4), 0)
all_scores = torch.cat((scores3, scores2, scores4), 0)
_, indices = all_scores.sort(descending=True)
all_labels = all_labels.gather(dim=0, index=indices)
all_scores = all_scores.gather(dim=0, index=indices)
all_boxes = torch.empty(len(indices), 4)
for i in range(len(indices)):
all_boxes[i] = temp_all_boxes[indices[i]]
all_boxes, all_labels, all_scores = del_fusion_under_threshold(all_boxes, all_labels, all_scores)
keep = py_cpu_nms(all_boxes, all_scores)
# keep = soft_nms(all_boxes, all_scores)
# scores1 = torch.from_numpy(scores1)
# boxes1 = torch.from_numpy(boxes1)
# labels1 = torch.from_numpy(labels1)
# temp_all_boxes = torch.cat((boxes2, boxes1), 0)
# all_labels = torch.cat((labels2, labels1), 0)
# all_scores = torch.cat((scores2, scores1), 0)
# print(boxes1, scores1, labels1)
#
# for box in boxes1:
# cv2.rectangle(image_numpy2, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
#
# ax[1, 1].imshow(image_numpy2)
# ax[1, 1].set_title('predictions fusion')
# all_scores, indices = all_scores.sort(descending=True)
# all_labels = all_labels.gather(dim=0, index=indices)
# all_boxes = torch.empty(len(indices), 4)
all_scores1 = all_scores[:len(keep)]
all_labels1 = all_labels[:len(keep)]
all_boxes1 = all_boxes[:len(keep)]
for i in range(len(keep)):
all_scores1[i] = all_scores[keep[i]]
all_labels1[i] = all_labels[keep[i]]
all_boxes1[i] = all_boxes[keep[i]]
labels = ["",
"connection_edge_defect",
"right_angle_edge_defect",
"cavity_defect",
"burr_defect",
"huahen",
"mosun",
"yanse",
'basi',
'jianju',
'chuizhidu', ]
i = 0
for box in all_boxes1:
cv2.rectangle(image_numpy2, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
# add label
# if box[1] > 10:
# cv2.putText(image_numpy2, labels[all_labels1[i]], (int(box[0]), int(box[1] - 6)),
# cv2.FONT_HERSHEY_COMPLEX_SMALL, 5,
# (255, 255, 0))
# else:
# cv2.putText(image_numpy2, labels[all_labels1[i]], (int(box[0]), int(box[1] + 15)),
# cv2.FONT_HERSHEY_COMPLEX_SMALL, 5,
# (255, 255, 0))
# i += 1
ax[2, 1].imshow(image_numpy2)
ax[2, 1].set_title('Fusion')
# Image._show(Image.fromarray(image_numpy2))
# Image.fromarray(image_numpy2).save('prediction.jpg')
print('fusion prediction:')
print(all_labels1)
print(all_scores1)
print(all_boxes1)
| idxes.append(idx) | conditional_block |
TTA.py | #!/usr/bin/env python
# _*_coding:utf-8 _*_
# @Time :2021/6/19 15:58
# @Author :Jiawei Lian
# @FileName: defect_detector
# @Software: PyCharm
from copy import deepcopy
import cv2
import ensemble_boxes
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
from PIL import Image
from torch.hub import load_state_dict_from_url
# from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
from faster_rcnn import FastRCNNPredictor
from torchvision.transforms import functional as F, transforms
from torchvision.transforms import transforms as T
import faster_rcnn
class BaseWheatTTA:
""" author: @shonenkov """
image_size = 512
def augment(self, image):
raise NotImplementedError
def batch_augment(self, images):
raise NotImplementedError
def deaugment_boxes(self, boxes, image):
raise NotImplementedError
def get_object_detector(num_classes):
# load an instance segmentation model pre-trained pre-trained on COCO
model = faster_rcnn.fasterrcnn_resnet50_fpn(pretrained=False)
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
class TTAHorizontalFlip(BaseWheatTTA):
""" author: @shonenkov """
def augment(self, image):
return image.flip(1)
def batch_augment(self, images):
return images.flip(2)
def deaugment_boxes(self, boxes, image):
|
class TTAVerticalFlip(BaseWheatTTA):
""" author: @shonenkov """
def augment(self, image):
return image
def batch_augment(self, images):
return images.flip(3)
def deaugment_boxes(self, boxes, image):
height = image.height
boxes[:, [3, 1]] = height - boxes[:, [1, 3]]
return boxes
class TTACompose(BaseWheatTTA):
""" author: @shonenkov """
def __init__(self, transforms):
self.transforms = transforms
def augment(self, image):
for transform in self.transforms:
image = transform.augment(image)
return image
def batch_augment(self, images):
for transform in self.transforms:
images = transform.batch_augment(images)
return images
def prepare_boxes(self, boxes):
result_boxes = boxes
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
return boxes
def deaugment_boxes(self, boxes, image):
for transform in self.transforms[::-1]:
boxes = transform.deaugment_boxes(boxes, image)
return self.prepare_boxes(boxes)
def tensor_to_PIL(tensor):
image = tensor.cpu().clone()
image = image.squeeze(0)
image = transforms.ToPILImage()(image)
return image
def del_tensor_ele(arr, index):
arr1 = arr[0:index]
arr2 = arr[index + 1:]
return torch.cat((arr1, arr2), dim=0)
def del_under_threshold(result, threshold=0.):
idxes = []
for idx in range(len(result[0]['scores'])):
if result[0]['scores'][idx] < threshold:
idxes.append(idx)
for i in idxes:
result[0]['scores'] = del_tensor_ele(result[0]['scores'], len(result[0]['scores']) - 1)
result[0]['labels'] = del_tensor_ele(result[0]['labels'], len(result[0]['labels']) - 1)
result[0]['boxes'] = del_tensor_ele(result[0]['boxes'], len(result[0]['boxes']) - 1)
return result
def del_fusion_under_threshold(boxes, labels, scores, threshold=0.):
idxes = []
for idx in range(len(scores)):
if scores[idx] < threshold:
idxes.append(idx)
for i in idxes:
scores = del_tensor_ele(scores, len(scores) - 1)
labels = del_tensor_ele(labels, len(labels) - 1)
boxes = del_tensor_ele(boxes, len(boxes) - 1)
return boxes, labels, scores
def py_cpu_nms(boxes, scores, thresh=0.55):
"""Pure Python NMS baseline."""
# x1、y1、x2、y2、以及score赋值
boxes = boxes.detach().numpy()
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
scores = scores
# 每一个检测框的面积
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# 按照score置信度降序排序
# order = scores.argsort()[::-1]
all_scores, order = scores.sort(descending=True)
keep = [] # 保留的结果框集合
# print(order)
while int(len(order.detach().numpy())) > 0:
i = order[0]
keep.append(i.numpy()) # 保留该类剩余box中得分最高的一个
# 得到相交区域,左上及右下
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
# 计算相交的面积,不重叠时面积为0
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
# 计算IoU:重叠面积 /(面积1+面积2-重叠面积)
ovr = inter / (areas[i] + areas[order[1:]] - inter)
# 保留IoU小于阈值的box
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1] # 因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位
return keep
def soft_nms(bboxes, scores, Nt=0.3, sigma2=0.5, score_thresh=0.001, method=2):
# 在 bboxes 之后添加对于的下标[0, 1, 2...], 最终 bboxes 的 shape 为 [n, 5], 前四个为坐标, 后一个为下标
# res_bboxes = deepcopy(bboxes)
N = bboxes.shape[0] # 总的 box 的数量
indexes = np.array([np.arange(N)]) # 下标: 0, 1, 2, ..., n-1
bboxes = bboxes.detach().numpy()
bboxes = np.concatenate((bboxes, indexes.T), axis=1) # concatenate 之后, bboxes 的操作不会对外部变量产生影响
# 计算每个 box 的面积
x1 = bboxes[:, 0]
y1 = bboxes[:, 1]
x2 = bboxes[:, 2]
y2 = bboxes[:, 3]
scores = scores
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
scores, order = scores.sort(descending=True)
scores = scores.detach().numpy()
for i in range(N):
# 找出 i 后面的最大 score 及其下标
pos = i + 1
if i != N - 1:
maxscore = np.max(scores[pos:], axis=0)
maxpos = np.argmax(scores[pos:], axis=0)
else:
maxscore = scores[-1]
maxpos = 0
# 如果当前 i 的得分小于后面的最大 score, 则与之交换, 确保 i 上的 score 最大
if scores[i] < maxscore:
bboxes[[i, maxpos + i + 1]] = bboxes[[maxpos + i + 1, i]]
scores[[i, maxpos + i + 1]] = scores[[maxpos + i + 1, i]]
areas[[i, maxpos + i + 1]] = areas[[maxpos + i + 1, i]]
# IoU calculate
xx1 = np.maximum(bboxes[i, 0], bboxes[pos:, 0])
yy1 = np.maximum(bboxes[i, 1], bboxes[pos:, 1])
xx2 = np.minimum(bboxes[i, 2], bboxes[pos:, 2])
yy2 = np.minimum(bboxes[i, 3], bboxes[pos:, 3])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
iou = intersection / (areas[i] + areas[pos:] - intersection)
# Three methods: 1.linear 2.gaussian 3.original NMS
if method == 1: # linear
weight = np.ones(iou.shape)
weight[iou > Nt] = weight[iou > Nt] - iou[iou > Nt]
elif method == 2: # gaussian
weight = np.exp(-(iou * iou) / sigma2)
else: # original NMS
weight = np.ones(iou.shape)
weight[iou > Nt] = 0
scores[pos:] = weight * scores[pos:]
# select the boxes and keep the corresponding indexes
inds = bboxes[:, 4][scores > score_thresh]
keep = inds.astype(int)
return keep
# image_path = './data/Images/2020-01-11_21_43_14_145.jpg'
# image_path = './data/Images/2020-03-07_08_34_30_467.jpg'
# image_path = './data/Images/2020-01-11_21_41_15_002.jpg'
image_path = './data/Images/2020-01-11_21_36_02_642.jpg'
# image_path = './data/Images/2020-03-10_16_18_20_688.jpg'
# image_path = './data/Images/2021-05-29-18-44-02.jpg'
# image_path = './data/Images/2021-05-16-18-51-54.jpg'
# image_path = './data/Images/2021-05-16-14-58-28.jpg'
# model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/0.5959/model_23_5959_5288.pth'
# model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/model_0.pth'
model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/0.5932/model_8_5932.pth'
results = []
predictions = []
# you can try own combinations:
transform1 = TTACompose([
TTAHorizontalFlip(),
# TTAVerticalFlip()
])
transform2 = TTACompose([
# TTAHorizontalFlip(),
TTAVerticalFlip()
])
fig, ax = plt.subplots(3, 2, figsize=(16, 10))
image1 = Image.open(image_path).convert("RGB")
image1_vf = F.vflip(image1)
image_tensor = torch.from_numpy(np.array(image1))
image_tensor_vf = torch.from_numpy(np.array(image1_vf))
# image_tensor = image_tensor.permute(0, 1, 2)
image_numpy_vf = image_tensor_vf.cpu().numpy().copy()
image_numpy = image_tensor.cpu().numpy().copy()
image_numpy1 = image_tensor.cpu().numpy().copy()
image_numpy2 = image_tensor.cpu().numpy().copy()
image_numpy3 = image_tensor.cpu().numpy().copy()
# ax[0, 0].imshow(image)
# ax[0, 0].set_title('original')
tta_image1 = transform1.augment(image_tensor)
tta_image2 = transform2.augment(image_tensor_vf)
tta_image1_numpy = tta_image1.numpy().copy()
tta_image2_numpy = image_tensor_vf.numpy().copy()
tta_image1 = Image.fromarray(tta_image1_numpy)
tta_image2 = Image.fromarray(tta_image2_numpy)
########################################################################
# tta_image1 prediction
preprocessed_image = torch.unsqueeze(F.to_tensor(tta_image1), dim=0)
model = get_object_detector(11)
model.load_state_dict(
torch.load(model_path, map_location='cpu')[
'model'])
model.eval()
result = model(preprocessed_image)
result = del_under_threshold(result)
print('tta_image prediction:', result)
boxes3 = result[0]['boxes']
scores3 = result[0]['scores']
labels3 = result[0]['labels']
for box in boxes3:
cv2.rectangle(tta_image1_numpy, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[0, 0].imshow(tta_image1_numpy)
ax[0, 0].set_title('Augment1')
###################################################################
# deaugmentation prediction
boxes3 = transform1.deaugment_boxes(boxes3, image1)
results.append({
'boxes': boxes3,
'scores': scores3,
'labels': labels3,
})
for box in boxes3:
cv2.rectangle(image_numpy1, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[0, 1].imshow(image_numpy1)
ax[0, 1].set_title('Deaugment1')
#########################################################
########################################################################
# tta_image2 prediction
preprocessed_image = torch.unsqueeze(F.to_tensor(tta_image2), dim=0)
model = get_object_detector(11)
model.load_state_dict(
torch.load(model_path, map_location='cpu')[
'model'])
model.eval()
result = model(preprocessed_image)
result = del_under_threshold(result)
print('tta_image prediction:', result)
boxes4 = result[0]['boxes']
scores4 = result[0]['scores']
labels4 = result[0]['labels']
for box in boxes4:
cv2.rectangle(tta_image2_numpy, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[1, 0].imshow(tta_image2_numpy)
ax[1, 0].set_title('Augment2')
###################################################################
# deaugmentation prediction
boxes4 = transform2.deaugment_boxes(boxes4, image1_vf)
results.append({
'boxes': boxes4,
'scores': scores4,
'labels': labels4,
})
for box in boxes4:
cv2.rectangle(image_numpy3, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[1, 1].imshow(image_numpy3)
ax[1, 1].set_title('Deaugment2')
#########################################################
# original_image prediction
preprocessed_image = torch.unsqueeze(F.to_tensor(image1), dim=0)
model = get_object_detector(11)
model.load_state_dict(
torch.load(model_path, map_location='cpu')[
'model'])
model.eval()
result_original_image = model(preprocessed_image)
result_original_image = del_under_threshold(result_original_image)
print('original image prediction:', result_original_image)
boxes2 = result_original_image[0]['boxes']
scores2 = result_original_image[0]['scores']
labels2 = result_original_image[0]['labels']
results.append({
'boxes': boxes2,
'scores': scores2,
'labels': labels2,
})
for box in boxes2:
cv2.rectangle(image_numpy, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[2, 0].imshow(image_numpy)
ax[2, 0].set_title('Original')
#######################################################
# # weghted boxes fusion
# predictions.append(results)
# boxes1, scores1, labels1 = run_wbf(predictions)
temp_all_boxes = torch.cat((boxes3, boxes2, boxes4), 0)
all_labels = torch.cat((labels3, labels2, labels4), 0)
all_scores = torch.cat((scores3, scores2, scores4), 0)
_, indices = all_scores.sort(descending=True)
all_labels = all_labels.gather(dim=0, index=indices)
all_scores = all_scores.gather(dim=0, index=indices)
all_boxes = torch.empty(len(indices), 4)
for i in range(len(indices)):
all_boxes[i] = temp_all_boxes[indices[i]]
all_boxes, all_labels, all_scores = del_fusion_under_threshold(all_boxes, all_labels, all_scores)
keep = py_cpu_nms(all_boxes, all_scores)
# keep = soft_nms(all_boxes, all_scores)
# scores1 = torch.from_numpy(scores1)
# boxes1 = torch.from_numpy(boxes1)
# labels1 = torch.from_numpy(labels1)
# temp_all_boxes = torch.cat((boxes2, boxes1), 0)
# all_labels = torch.cat((labels2, labels1), 0)
# all_scores = torch.cat((scores2, scores1), 0)
# print(boxes1, scores1, labels1)
#
# for box in boxes1:
# cv2.rectangle(image_numpy2, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
#
# ax[1, 1].imshow(image_numpy2)
# ax[1, 1].set_title('predictions fusion')
# all_scores, indices = all_scores.sort(descending=True)
# all_labels = all_labels.gather(dim=0, index=indices)
# all_boxes = torch.empty(len(indices), 4)
all_scores1 = all_scores[:len(keep)]
all_labels1 = all_labels[:len(keep)]
all_boxes1 = all_boxes[:len(keep)]
for i in range(len(keep)):
all_scores1[i] = all_scores[keep[i]]
all_labels1[i] = all_labels[keep[i]]
all_boxes1[i] = all_boxes[keep[i]]
labels = ["",
"connection_edge_defect",
"right_angle_edge_defect",
"cavity_defect",
"burr_defect",
"huahen",
"mosun",
"yanse",
'basi',
'jianju',
'chuizhidu', ]
i = 0
for box in all_boxes1:
cv2.rectangle(image_numpy2, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
# add label
# if box[1] > 10:
# cv2.putText(image_numpy2, labels[all_labels1[i]], (int(box[0]), int(box[1] - 6)),
# cv2.FONT_HERSHEY_COMPLEX_SMALL, 5,
# (255, 255, 0))
# else:
# cv2.putText(image_numpy2, labels[all_labels1[i]], (int(box[0]), int(box[1] + 15)),
# cv2.FONT_HERSHEY_COMPLEX_SMALL, 5,
# (255, 255, 0))
# i += 1
ax[2, 1].imshow(image_numpy2)
ax[2, 1].set_title('Fusion')
# Image._show(Image.fromarray(image_numpy2))
# Image.fromarray(image_numpy2).save('prediction.jpg')
print('fusion prediction:')
print(all_labels1)
print(all_scores1)
print(all_boxes1)
| width = image.width
boxes[:, [2, 0]] = width - boxes[:, [0, 2]]
return boxes | identifier_body |
TTA.py | #!/usr/bin/env python
# _*_coding:utf-8 _*_
# @Time :2021/6/19 15:58
# @Author :Jiawei Lian
# @FileName: defect_detector
# @Software: PyCharm
from copy import deepcopy
import cv2
import ensemble_boxes
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
from PIL import Image
from torch.hub import load_state_dict_from_url
# from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
from faster_rcnn import FastRCNNPredictor
from torchvision.transforms import functional as F, transforms
from torchvision.transforms import transforms as T
import faster_rcnn
class | :
""" author: @shonenkov """
image_size = 512
def augment(self, image):
raise NotImplementedError
def batch_augment(self, images):
raise NotImplementedError
def deaugment_boxes(self, boxes, image):
raise NotImplementedError
def get_object_detector(num_classes):
# load an instance segmentation model pre-trained pre-trained on COCO
model = faster_rcnn.fasterrcnn_resnet50_fpn(pretrained=False)
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
class TTAHorizontalFlip(BaseWheatTTA):
""" author: @shonenkov """
def augment(self, image):
return image.flip(1)
def batch_augment(self, images):
return images.flip(2)
def deaugment_boxes(self, boxes, image):
width = image.width
boxes[:, [2, 0]] = width - boxes[:, [0, 2]]
return boxes
class TTAVerticalFlip(BaseWheatTTA):
""" author: @shonenkov """
def augment(self, image):
return image
def batch_augment(self, images):
return images.flip(3)
def deaugment_boxes(self, boxes, image):
height = image.height
boxes[:, [3, 1]] = height - boxes[:, [1, 3]]
return boxes
class TTACompose(BaseWheatTTA):
""" author: @shonenkov """
def __init__(self, transforms):
self.transforms = transforms
def augment(self, image):
for transform in self.transforms:
image = transform.augment(image)
return image
def batch_augment(self, images):
for transform in self.transforms:
images = transform.batch_augment(images)
return images
def prepare_boxes(self, boxes):
result_boxes = boxes
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
return boxes
def deaugment_boxes(self, boxes, image):
for transform in self.transforms[::-1]:
boxes = transform.deaugment_boxes(boxes, image)
return self.prepare_boxes(boxes)
def tensor_to_PIL(tensor):
image = tensor.cpu().clone()
image = image.squeeze(0)
image = transforms.ToPILImage()(image)
return image
def del_tensor_ele(arr, index):
arr1 = arr[0:index]
arr2 = arr[index + 1:]
return torch.cat((arr1, arr2), dim=0)
def del_under_threshold(result, threshold=0.):
idxes = []
for idx in range(len(result[0]['scores'])):
if result[0]['scores'][idx] < threshold:
idxes.append(idx)
for i in idxes:
result[0]['scores'] = del_tensor_ele(result[0]['scores'], len(result[0]['scores']) - 1)
result[0]['labels'] = del_tensor_ele(result[0]['labels'], len(result[0]['labels']) - 1)
result[0]['boxes'] = del_tensor_ele(result[0]['boxes'], len(result[0]['boxes']) - 1)
return result
def del_fusion_under_threshold(boxes, labels, scores, threshold=0.):
idxes = []
for idx in range(len(scores)):
if scores[idx] < threshold:
idxes.append(idx)
for i in idxes:
scores = del_tensor_ele(scores, len(scores) - 1)
labels = del_tensor_ele(labels, len(labels) - 1)
boxes = del_tensor_ele(boxes, len(boxes) - 1)
return boxes, labels, scores
def py_cpu_nms(boxes, scores, thresh=0.55):
"""Pure Python NMS baseline."""
# x1、y1、x2、y2、以及score赋值
boxes = boxes.detach().numpy()
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
scores = scores
# 每一个检测框的面积
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# 按照score置信度降序排序
# order = scores.argsort()[::-1]
all_scores, order = scores.sort(descending=True)
keep = [] # 保留的结果框集合
# print(order)
while int(len(order.detach().numpy())) > 0:
i = order[0]
keep.append(i.numpy()) # 保留该类剩余box中得分最高的一个
# 得到相交区域,左上及右下
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
# 计算相交的面积,不重叠时面积为0
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
# 计算IoU:重叠面积 /(面积1+面积2-重叠面积)
ovr = inter / (areas[i] + areas[order[1:]] - inter)
# 保留IoU小于阈值的box
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1] # 因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位
return keep
def soft_nms(bboxes, scores, Nt=0.3, sigma2=0.5, score_thresh=0.001, method=2):
# 在 bboxes 之后添加对于的下标[0, 1, 2...], 最终 bboxes 的 shape 为 [n, 5], 前四个为坐标, 后一个为下标
# res_bboxes = deepcopy(bboxes)
N = bboxes.shape[0] # 总的 box 的数量
indexes = np.array([np.arange(N)]) # 下标: 0, 1, 2, ..., n-1
bboxes = bboxes.detach().numpy()
bboxes = np.concatenate((bboxes, indexes.T), axis=1) # concatenate 之后, bboxes 的操作不会对外部变量产生影响
# 计算每个 box 的面积
x1 = bboxes[:, 0]
y1 = bboxes[:, 1]
x2 = bboxes[:, 2]
y2 = bboxes[:, 3]
scores = scores
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
scores, order = scores.sort(descending=True)
scores = scores.detach().numpy()
for i in range(N):
# 找出 i 后面的最大 score 及其下标
pos = i + 1
if i != N - 1:
maxscore = np.max(scores[pos:], axis=0)
maxpos = np.argmax(scores[pos:], axis=0)
else:
maxscore = scores[-1]
maxpos = 0
# 如果当前 i 的得分小于后面的最大 score, 则与之交换, 确保 i 上的 score 最大
if scores[i] < maxscore:
bboxes[[i, maxpos + i + 1]] = bboxes[[maxpos + i + 1, i]]
scores[[i, maxpos + i + 1]] = scores[[maxpos + i + 1, i]]
areas[[i, maxpos + i + 1]] = areas[[maxpos + i + 1, i]]
# IoU calculate
xx1 = np.maximum(bboxes[i, 0], bboxes[pos:, 0])
yy1 = np.maximum(bboxes[i, 1], bboxes[pos:, 1])
xx2 = np.minimum(bboxes[i, 2], bboxes[pos:, 2])
yy2 = np.minimum(bboxes[i, 3], bboxes[pos:, 3])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
iou = intersection / (areas[i] + areas[pos:] - intersection)
# Three methods: 1.linear 2.gaussian 3.original NMS
if method == 1: # linear
weight = np.ones(iou.shape)
weight[iou > Nt] = weight[iou > Nt] - iou[iou > Nt]
elif method == 2: # gaussian
weight = np.exp(-(iou * iou) / sigma2)
else: # original NMS
weight = np.ones(iou.shape)
weight[iou > Nt] = 0
scores[pos:] = weight * scores[pos:]
# select the boxes and keep the corresponding indexes
inds = bboxes[:, 4][scores > score_thresh]
keep = inds.astype(int)
return keep
# image_path = './data/Images/2020-01-11_21_43_14_145.jpg'
# image_path = './data/Images/2020-03-07_08_34_30_467.jpg'
# image_path = './data/Images/2020-01-11_21_41_15_002.jpg'
image_path = './data/Images/2020-01-11_21_36_02_642.jpg'
# image_path = './data/Images/2020-03-10_16_18_20_688.jpg'
# image_path = './data/Images/2021-05-29-18-44-02.jpg'
# image_path = './data/Images/2021-05-16-18-51-54.jpg'
# image_path = './data/Images/2021-05-16-14-58-28.jpg'
# model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/0.5959/model_23_5959_5288.pth'
# model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/model_0.pth'
model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/0.5932/model_8_5932.pth'
results = []
predictions = []
# you can try own combinations:
transform1 = TTACompose([
TTAHorizontalFlip(),
# TTAVerticalFlip()
])
transform2 = TTACompose([
# TTAHorizontalFlip(),
TTAVerticalFlip()
])
fig, ax = plt.subplots(3, 2, figsize=(16, 10))
image1 = Image.open(image_path).convert("RGB")
image1_vf = F.vflip(image1)
image_tensor = torch.from_numpy(np.array(image1))
image_tensor_vf = torch.from_numpy(np.array(image1_vf))
# image_tensor = image_tensor.permute(0, 1, 2)
image_numpy_vf = image_tensor_vf.cpu().numpy().copy()
image_numpy = image_tensor.cpu().numpy().copy()
image_numpy1 = image_tensor.cpu().numpy().copy()
image_numpy2 = image_tensor.cpu().numpy().copy()
image_numpy3 = image_tensor.cpu().numpy().copy()
# ax[0, 0].imshow(image)
# ax[0, 0].set_title('original')
tta_image1 = transform1.augment(image_tensor)
tta_image2 = transform2.augment(image_tensor_vf)
tta_image1_numpy = tta_image1.numpy().copy()
tta_image2_numpy = image_tensor_vf.numpy().copy()
tta_image1 = Image.fromarray(tta_image1_numpy)
tta_image2 = Image.fromarray(tta_image2_numpy)
########################################################################
# tta_image1 prediction
preprocessed_image = torch.unsqueeze(F.to_tensor(tta_image1), dim=0)
model = get_object_detector(11)
model.load_state_dict(
torch.load(model_path, map_location='cpu')[
'model'])
model.eval()
result = model(preprocessed_image)
result = del_under_threshold(result)
print('tta_image prediction:', result)
boxes3 = result[0]['boxes']
scores3 = result[0]['scores']
labels3 = result[0]['labels']
for box in boxes3:
cv2.rectangle(tta_image1_numpy, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[0, 0].imshow(tta_image1_numpy)
ax[0, 0].set_title('Augment1')
###################################################################
# deaugmentation prediction
boxes3 = transform1.deaugment_boxes(boxes3, image1)
results.append({
'boxes': boxes3,
'scores': scores3,
'labels': labels3,
})
for box in boxes3:
cv2.rectangle(image_numpy1, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[0, 1].imshow(image_numpy1)
ax[0, 1].set_title('Deaugment1')
#########################################################
########################################################################
# tta_image2 prediction
preprocessed_image = torch.unsqueeze(F.to_tensor(tta_image2), dim=0)
model = get_object_detector(11)
model.load_state_dict(
torch.load(model_path, map_location='cpu')[
'model'])
model.eval()
result = model(preprocessed_image)
result = del_under_threshold(result)
print('tta_image prediction:', result)
boxes4 = result[0]['boxes']
scores4 = result[0]['scores']
labels4 = result[0]['labels']
for box in boxes4:
cv2.rectangle(tta_image2_numpy, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[1, 0].imshow(tta_image2_numpy)
ax[1, 0].set_title('Augment2')
###################################################################
# deaugmentation prediction
boxes4 = transform2.deaugment_boxes(boxes4, image1_vf)
results.append({
'boxes': boxes4,
'scores': scores4,
'labels': labels4,
})
for box in boxes4:
cv2.rectangle(image_numpy3, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[1, 1].imshow(image_numpy3)
ax[1, 1].set_title('Deaugment2')
#########################################################
# original_image prediction
preprocessed_image = torch.unsqueeze(F.to_tensor(image1), dim=0)
model = get_object_detector(11)
model.load_state_dict(
torch.load(model_path, map_location='cpu')[
'model'])
model.eval()
result_original_image = model(preprocessed_image)
result_original_image = del_under_threshold(result_original_image)
print('original image prediction:', result_original_image)
boxes2 = result_original_image[0]['boxes']
scores2 = result_original_image[0]['scores']
labels2 = result_original_image[0]['labels']
results.append({
'boxes': boxes2,
'scores': scores2,
'labels': labels2,
})
for box in boxes2:
cv2.rectangle(image_numpy, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[2, 0].imshow(image_numpy)
ax[2, 0].set_title('Original')
#######################################################
# # weghted boxes fusion
# predictions.append(results)
# boxes1, scores1, labels1 = run_wbf(predictions)
temp_all_boxes = torch.cat((boxes3, boxes2, boxes4), 0)
all_labels = torch.cat((labels3, labels2, labels4), 0)
all_scores = torch.cat((scores3, scores2, scores4), 0)
_, indices = all_scores.sort(descending=True)
all_labels = all_labels.gather(dim=0, index=indices)
all_scores = all_scores.gather(dim=0, index=indices)
all_boxes = torch.empty(len(indices), 4)
for i in range(len(indices)):
all_boxes[i] = temp_all_boxes[indices[i]]
all_boxes, all_labels, all_scores = del_fusion_under_threshold(all_boxes, all_labels, all_scores)
keep = py_cpu_nms(all_boxes, all_scores)
# keep = soft_nms(all_boxes, all_scores)
# scores1 = torch.from_numpy(scores1)
# boxes1 = torch.from_numpy(boxes1)
# labels1 = torch.from_numpy(labels1)
# temp_all_boxes = torch.cat((boxes2, boxes1), 0)
# all_labels = torch.cat((labels2, labels1), 0)
# all_scores = torch.cat((scores2, scores1), 0)
# print(boxes1, scores1, labels1)
#
# for box in boxes1:
# cv2.rectangle(image_numpy2, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
#
# ax[1, 1].imshow(image_numpy2)
# ax[1, 1].set_title('predictions fusion')
# all_scores, indices = all_scores.sort(descending=True)
# all_labels = all_labels.gather(dim=0, index=indices)
# all_boxes = torch.empty(len(indices), 4)
all_scores1 = all_scores[:len(keep)]
all_labels1 = all_labels[:len(keep)]
all_boxes1 = all_boxes[:len(keep)]
for i in range(len(keep)):
all_scores1[i] = all_scores[keep[i]]
all_labels1[i] = all_labels[keep[i]]
all_boxes1[i] = all_boxes[keep[i]]
labels = ["",
"connection_edge_defect",
"right_angle_edge_defect",
"cavity_defect",
"burr_defect",
"huahen",
"mosun",
"yanse",
'basi',
'jianju',
'chuizhidu', ]
i = 0
for box in all_boxes1:
cv2.rectangle(image_numpy2, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
# add label
# if box[1] > 10:
# cv2.putText(image_numpy2, labels[all_labels1[i]], (int(box[0]), int(box[1] - 6)),
# cv2.FONT_HERSHEY_COMPLEX_SMALL, 5,
# (255, 255, 0))
# else:
# cv2.putText(image_numpy2, labels[all_labels1[i]], (int(box[0]), int(box[1] + 15)),
# cv2.FONT_HERSHEY_COMPLEX_SMALL, 5,
# (255, 255, 0))
# i += 1
ax[2, 1].imshow(image_numpy2)
ax[2, 1].set_title('Fusion')
# Image._show(Image.fromarray(image_numpy2))
# Image.fromarray(image_numpy2).save('prediction.jpg')
print('fusion prediction:')
print(all_labels1)
print(all_scores1)
print(all_boxes1)
| BaseWheatTTA | identifier_name |
app_multiple_databus.go | package dao
import (
"context"
"encoding/json"
"fmt"
"regexp"
"strings"
"time"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/queue/databus"
)
// AppMultipleDatabus .
type AppMultipleDatabus struct {
d *Dao
appid string
attrs *model.Attrs
db *xsql.DB
dtb *databus.Databus
offsets model.LoopOffsets
mapData []model.MapData
tableName []string
indexNameSuffix []string
commits map[int32]*databus.Message
}
// IndexNameSuffix .
func (amd *AppMultipleDatabus) IndexNameSuffix(format string, startDate string) (res []string, err error) {
var (
sTime time.Time
eTime = time.Now()
)
sTime, err = time.Parse(format, startDate)
if err != nil {
log.Error("d.LogAuditIndexName(%v)", startDate)
return
}
resDict := map[string]bool{}
if strings.Contains(format, "02") {
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, 0, -1)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "week") {
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, 0, -7)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "01") {
// 1月31日时AddDate(0, -1, 0)会出现错误
year, month, _ := eTime.Date()
hour, min, sec := eTime.Clock()
eTime = time.Date(year, month, 1, hour, min, sec, 0, eTime.Location())
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, -1, 0)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "2006") {
// 2月29日时AddDate(-1, 0, 0)会出现错误
year, _, _ := eTime.Date()
hour, min, sec := eTime.Clock()
eTime = time.Date(year, 1, 1, hour, min, sec, 0, eTime.Location())
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(-1, 0, 0)
if sTime.After(eTime) {
break
}
}
}
for k := range resDict {
res = append(res, k)
}
return
}
func (amd *AppMultipleDatabus) getIndexName(format string, time time.Time) (index string) {
var (
week = map[int]string{
0: "0108",
1: "0916",
2: "1724",
3: "2531",
}
)
return strings.Replace(time.Format(format), "week", week[time.Day()/9], -1)
}
// NewAppMultipleDatabus .
func NewAppMultipleDatabus(d *Dao, appid string) (amd *AppMultipleDatabus) {
var err error
amd = &AppMultipleDatabus{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offsets: make(map[int]*model.LoopOffset),
tableName: []string{},
indexNameSuffix: []string{},
commits: make(map[int32]*databus.Message),
}
amd.db = d.DBPool[amd.attrs.DBName]
amd.dtb = d.DatabusPool[amd.attrs.Databus.Databus]
if amd.attrs.Table.TableSplit == "int" || amd.attrs.Table.TableSplit == "single" {
for i := amd.attrs.Table.TableFrom; i <= amd.attrs.Table.TableTo; i++ {
tableName := fmt.Sprintf("%s%0"+amd.attrs.Table.TableZero+"d", amd.attrs.Table.TablePrefix, i)
amd.tableName = append(amd.tableName, tableName)
amd.offsets[i] = &model.LoopOffset{}
}
} else {
var tableNameSuffix []string
tableFormat := strings.Split(amd.attrs.Table.TableFormat, ",")
if tableNameSuffix, err = amd.IndexNameSuffix(tableFormat[0], tableFormat[1]); err != nil {
log.Error("amd.IndexNameSuffix(%v)", err)
return
}
for _, v := range tableNameSuffix {
amd.tableName = append(amd.tableName, amd.attrs.Table.TablePrefix+v)
}
for i := range amd.tableName {
amd.offsets[i] = &model.LoopOffset{}
}
}
return
}
// Business return business.
func (amd *AppMultipleDatabus) Business() string {
return amd.attrs.Business
}
// InitIndex .
func (amd *AppMultipleDatabus) InitIndex(c context.Context) {
var (
err error
indexAliasName string
indexEntityName string
)
indexFormat := strings.Split(amd.attrs.Index.IndexFormat, ",")
aliases, aliasErr := amd.d.GetAliases(amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix)
if indexFormat[0] == "int" || indexFormat[0] == "single" {
for i := amd.attrs.Index.IndexFrom; i <= amd.attrs.Index.IndexTo; i++ {
// == "0" 有问题,不通用
if amd.attrs.Index.IndexZero == "0" {
indexAliasName = amd.attrs.Index.IndexAliasPrefix
indexEntityName = amd.attrs.Index.IndexEntityPrefix
} else {
indexAliasName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexAliasPrefix, i)
indexEntityName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexEntityPrefix, i)
}
if aliasErr != nil {
amd.d.InitIndex(c, nil, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping)
} else {
amd.d.InitIndex(c, aliases, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping)
}
}
} else {
if amd.indexNameSuffix, err = amd.IndexNameSuffix(indexFormat[0], indexFormat[1]); err != nil {
log.Error("amd.IndexNameSuffix(%v)", err)
return
}
for _, v := range amd.indexNameSuffix {
if aliasErr != nil {
amd.d.InitIndex(c, nil, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping)
} else {
amd.d.InitIndex(c, aliases, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping)
}
}
}
}
// InitOffset insert init value to offset.
func (amd *AppMultipleDatabus) InitOffset(c context.Context) {
amd.d.InitOffset(c, amd.offsets[0], amd.attrs, amd.tableName)
}
// Offset .
func (amd *AppMultipleDatabus) Offset(c context.Context) {
for i, v := range amd.tableName {
offset, err := amd.d.Offset(c, amd.attrs.AppID, v)
if err != nil {
log.Error("amd.d.offset error(%v)", err)
time.Sleep(time.Second * 3)
}
amd.offsets[i].SetReview(offset.ReviewID, offset.ReviewTime)
amd.offsets[i].SetOffset(offset.OffsetID(), offset.OffsetTime())
}
}
// SetRecover set recover
func (amd *AppMultipleDatabus) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
amd.offsets.SetRecoverOffsets(i, recoverID, recoverTime)
}
// IncrMessages .
func (amd *AppMultipleDatabus) IncrMessages(c context.Context) (length int, err error) {
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(amd.attrs.Databus.Ticker)))
defer ticker.Stop()
for {
select {
case msg, ok := <-amd.dtb.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", amd.attrs.Databus)
break
}
m := &model.Message{}
amd.commits[msg.Partition] = msg
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if amd.attrs.Business == "creative_reply" {
r, _ := regexp.Compile("reply_\\d+")
if !r.MatchString(m.Table) {
continue
}
}
if (amd.attrs.Table.TableSplit == "string" && m.Table == amd.attrs.Table.TablePrefix) ||
(amd.attrs.Table.TableSplit != "string" && strings.HasPrefix(m.Table, amd.attrs.Table.TablePrefix)) {
if m.Action == "insert" || m.Action == "update" {
var parseMap map[string]interface{}
parseMap, err = amd.d.JSON2map(m.New)
if err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
// esports fav type filter
if amd.attrs.AppID == "esports_fav" {
if t, ok := parseMap["type"]; ok && t.(int64) != 10 {
continue
}
}
// playlist fav type and attr filter
if amd.attrs.AppID == "fav_playlist" {
if t, ok := parseMap["type"]; ok && t.(int64) != 2 {
continue | if t, ok := parseMap["attr"]; ok {
if t.(int64)>>0&1 == 0 || (m.Action == "insert" && t.(int64)>>1&1 == 1) {
continue
}
}
}
var newParseMap map[string]interface{}
newParseMap, err = amd.newParseMap(c, m.Table, parseMap)
if err != nil {
if amd.attrs.AppID == "creative_reply" {
continue
}
log.Error("amd.newParseMap error(%v)", err)
continue
}
amd.mapData = append(amd.mapData, newParseMap)
}
}
if len(amd.mapData) < amd.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
if len(amd.mapData) > 0 {
amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "dtb", []string{})
}
length = len(amd.mapData)
//amd.d.extraData(c, amd, "dtb")
return
}
// AllMessages .
func (amd *AppMultipleDatabus) AllMessages(c context.Context) (length int, err error) {
amd.mapData = []model.MapData{}
for i, v := range amd.tableName {
var (
rows *xsql.Rows
sql string
)
tableFormat := strings.Split(amd.attrs.Table.TableFormat, ",")
if amd.attrs.AppID == "dm_search" || amd.attrs.AppID == "dm" {
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i, i)
} else if tableFormat[0] == "int" || tableFormat[0] == "single" { // 兼容只传后缀,不传表名
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i)
log.Info(sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size)
} else {
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, v)
}
if rows, err = amd.db.Query(c, sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size); err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
tempList := []model.MapData{}
for rows.Next() {
item, row := InitMapData(amd.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AppMultipleDatabus.AllMessages rows.Scan() error(%v)", err)
continue
}
var newParseMap map[string]interface{}
newParseMap, err = amd.newParseMap(c, v, item)
if err != nil {
log.Error("amd.newParseMap error(%v)", err)
continue
}
tempList = append(tempList, newParseMap)
amd.mapData = append(amd.mapData, newParseMap)
}
rows.Close()
tmpLength := len(tempList)
if tmpLength > 0 {
amd.offsets[i].SetTempOffset(tempList[tmpLength-1].PrimaryID(), tempList[tmpLength-1].StrMTime())
}
}
if len(amd.mapData) > 0 {
amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "db", []string{})
}
length = len(amd.mapData)
//amd.d.extraData(c, amd, "db")
return
}
// BulkIndex .
func (amd *AppMultipleDatabus) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) {
partData := amd.mapData[start:end]
if amd.d.c.Business.Index {
err = amd.d.BulkDBData(c, amd.attrs, writeEntityIndex, partData...)
} else {
err = amd.d.BulkDatabusData(c, amd.attrs, writeEntityIndex, partData...)
}
return
}
// Commit .
func (amd *AppMultipleDatabus) Commit(c context.Context) (err error) {
if amd.d.c.Business.Index {
if amd.attrs.Table.TableSplit == "int" || amd.attrs.Table.TableSplit == "single" { // 兼容只传后缀,不传表名
for i := amd.attrs.Table.TableFrom; i <= amd.attrs.Table.TableTo; i++ {
tableName := fmt.Sprintf("%s%0"+amd.attrs.Table.TableZero+"d", amd.attrs.Table.TablePrefix, i)
if err = amd.d.CommitOffset(c, amd.offsets[i], amd.attrs.AppID, tableName); err != nil {
log.Error("AppMultipleDatabus.Commit error(%v)", err)
continue
}
}
} else {
for i, v := range amd.indexNameSuffix {
if err = amd.d.CommitOffset(c, amd.offsets[i], amd.attrs.AppID, v); err != nil {
log.Error("Commit error(%v)", err)
continue
}
}
}
} else {
for k, c := range amd.commits {
if err = c.Commit(); err != nil {
log.Error("AppMultipleDatabus.Commit error(%v)", err)
continue
}
delete(amd.commits, k)
}
}
amd.mapData = []model.MapData{}
return
}
// Sleep .
func (amd *AppMultipleDatabus) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(amd.attrs.Other.Sleep))
}
// Size .
func (amd *AppMultipleDatabus) Size(c context.Context) (size int) {
return amd.attrs.Other.Size
}
// indexField .
// func (amd *AppMultipleDatabus) indexField(c context.Context, tableName string) (fieldName string, fieldValue int) {
// suffix, _ := strconv.Atoi(strings.Split(tableName, "_")[2])
// s := strings.Split(amd.attrs.DataSQL.DataIndexSuffix, ";")
// v := strings.Split(s[1], ":")
// fieldName = v[0]
// indexNum, _ := strconv.Atoi(v[2])
// fieldValue = suffix + indexNum
// return
// }
// newParseMap .
func (amd *AppMultipleDatabus) newParseMap(c context.Context, table string, parseMap map[string]interface{}) (res map[string]interface{}, err error) {
res = parseMap
//TODO 实体索引写不进去
if (amd.attrs.AppID == "dm_search" || amd.attrs.AppID == "dm") && !amd.d.c.Business.Index {
indexSuffix := strings.Split(table, "_")[2]
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + indexSuffix
if _, ok := res["msg"]; ok {
// dm_content_
res["index_field"] = true // 删除ctime
res["index_id"] = fmt.Sprintf("%v", res["dmid"])
} else {
// dm_index_
res["index_id"] = fmt.Sprintf("%v", res["id"])
}
} else if amd.attrs.AppID == "dmreport" {
if ztime, ok := res["ctime"].(*interface{}); ok { // 数据库
if ctime, cok := (*ztime).(time.Time); cok {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + ctime.Format("2006")
}
} else if ztime, ok := res["ctime"].(string); ok { // databus
var ctime time.Time
if ctime, err = time.Parse("2006-01-02 15:04:05", ztime); err == nil {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + ctime.Format("2006")
}
}
} else if amd.attrs.AppID == "creative_reply" && !amd.d.c.Business.Index {
if replyType, ok := res["type"].(int64); ok {
if replyType == 1 || replyType == 12 || replyType == 14 {
} else {
err = fmt.Errorf("多余数据")
}
} else {
err = fmt.Errorf("错误数据")
}
} else if amd.attrs.Index.IndexSplit == "single" {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix
} else {
indexSuffix := string([]rune(table)[strings.Count(amd.attrs.Table.TablePrefix, "")-1:])
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + indexSuffix
}
//dtb index_id
if amd.attrs.AppID == "favorite" && !amd.d.c.Business.Index {
if fid, ok := res["fid"].(int64); ok {
if oid, ok := res["oid"].(int64); ok {
res["index_id"] = fmt.Sprintf("%d_%d", fid, oid)
return
}
}
res["index_id"] = "err"
res["indexName"] = ""
}
return
} | } | random_line_split |
app_multiple_databus.go | package dao
import (
"context"
"encoding/json"
"fmt"
"regexp"
"strings"
"time"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/queue/databus"
)
// AppMultipleDatabus .
type AppMultipleDatabus struct {
d *Dao
appid string
attrs *model.Attrs
db *xsql.DB
dtb *databus.Databus
offsets model.LoopOffsets
mapData []model.MapData
tableName []string
indexNameSuffix []string
commits map[int32]*databus.Message
}
// IndexNameSuffix .
func (amd *AppMultipleDatabus) IndexNameSuffix(format string, startDate string) (res []string, err error) {
var (
sTime time.Time
eTime = time.Now()
)
sTime, err = time.Parse(format, startDate)
if err != nil {
log.Error("d.LogAuditIndexName(%v)", startDate)
return
}
resDict := map[string]bool{}
if strings.Contains(format, "02") {
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, 0, -1)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "week") {
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, 0, -7)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "01") {
// 1月31日时AddDate(0, -1, 0)会出现错误
year, month, _ := eTime.Date()
hour, min, sec := eTime.Clock()
eTime = time.Date(year, month, 1, hour, min, sec, 0, eTime.Location())
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, -1, 0)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "2006") {
// 2月29日时AddDate(-1, 0, 0)会出现错误
year, _, _ := eTime.Date()
hour, min, sec := eTime.Clock()
eTime = time.Date(year, 1, 1, hour, min, sec, 0, eTime.Location())
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(-1, 0, 0)
if sTime.After(eTime) {
break
}
}
}
for k := range resDict {
res = append(res, k)
}
return
}
func (amd *AppMultipleDatabus) getIndexName(format string, time time.Time) (index string) {
var (
week = map[int]string{
0: "0108",
1: "0916",
2: "1724",
3: "2531",
}
)
return strings.Replace(time.Format(format), "week", week[time.Day()/9], -1)
}
// NewAppMultipleDatabus .
func NewAppMultipleDatabus(d *Dao, appid string) (amd *AppMultipleDatabus) {
var err error
amd = &AppMultipleDatabus{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offsets: make(map[int]*model.LoopOffset),
tableName: []string{},
indexNameSuffix: []string{},
commits: make(map[int32]*databus.Message),
}
amd.db = d.DBPool[amd.attrs.DBName]
amd.dtb = d.DatabusPool[amd.attrs.Databus.Databus]
if amd.attrs.Table.TableSplit == "int" || amd.attrs.Table.TableSplit == "single" {
for i := amd.attrs.Table.TableFrom; i <= amd.attrs.Table.TableTo; i++ {
tableName := fmt.Sprintf("%s%0"+amd.attrs.Table.TableZero+"d", amd.attrs.Table.TablePrefix, i)
amd.tableName = append(amd.tableName, tableName)
amd.offsets[i] = &model.LoopOffset{}
}
} else {
var tableNameSuffix []string
tableFormat := strings.Split(amd.attrs.Table.TableFormat, ",")
if tableNameSuffix, err = amd.IndexNameSuffix(tableFormat[0], tableFormat[1]); err != nil {
log.Error("amd.IndexNameSuffix(%v)", err)
return
}
for _, v := range tableNameSuffix {
amd.tableName = append(amd.tableName, amd.attrs.Table.TablePrefix+v)
}
for i := range amd.tableName {
amd.offsets[i] = &model.LoopOffset{}
}
}
return
}
// Business return business.
func (amd *AppMultipleDatabus) Business() string {
return amd.attrs.Business
}
// InitIndex .
func (amd *AppMultipleDatabus) InitIndex(c context.Context) {
var (
err error
indexAliasName string
indexEntityName string
)
indexFormat := strings.Split(amd.attrs.Index.IndexFormat, ",")
aliases, aliasErr := amd.d.GetAliases(amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix)
if indexFormat[0] == "int" || indexFormat[0] == "single" {
for i := amd.attrs.Index.IndexFrom; i <= amd.attrs.Index.IndexTo; i++ {
// == "0" 有问题,不通用
if amd.attrs.Index.IndexZero == "0" {
indexAliasName = amd.attrs.Index.IndexAliasPrefix
indexEntityName = amd.attrs.Index.IndexEntityPrefix
} else {
indexAliasName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexAliasPrefix, i)
indexEntityName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexEntityPrefix, i)
}
if aliasErr != nil {
amd.d.InitIndex(c, nil, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping)
} else {
amd.d.InitIndex(c, aliases, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping)
}
}
} else {
if amd.indexNameSuffix, err = amd.IndexNameSuffix(indexFormat[0], indexFormat[1]); err != nil {
log.Error("amd.IndexNameSuffix(%v)", err)
return
}
for _, v := range amd.indexNameSuffix {
if aliasErr != nil {
amd.d.InitIndex(c, nil, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping)
} else {
amd.d.InitIndex(c, aliases, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping)
}
}
}
}
// InitOffset insert init value to offset.
func (amd *AppMultipleDatabus) InitOffset(c context.Context) {
amd.d.InitOffset(c, amd.offsets[0], amd.attrs, amd.tableName)
}
// Offset .
func (amd *AppMultipleDatabus) Offset(c context.Context) {
for i, v := range amd.tableName {
offset, err := amd.d.Offset(c, amd.attrs.AppID, v)
if err != nil {
log.Error("amd.d.offset error(%v)", err)
time.Sleep(time.Second * 3)
}
amd.offsets[i].SetReview(offset.ReviewID, offset.ReviewTime)
amd.offsets[i].SetOffset(offset.OffsetID(), offset.OffsetTime())
}
}
// SetRecover set recover
func (amd *AppMultipleDatabus) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
amd.offsets.SetRecoverOffsets(i, recoverID, recoverTime)
}
// IncrMessages .
func (amd *AppMultipleDatabus) IncrMessages(c context.Context) (length int, err error) {
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(amd.attrs.Databus.Ticker)))
defer ticker.Stop()
for {
select {
case msg, ok := <-amd.dtb.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", amd.attrs.Databus)
break
}
m := &model.Message{}
amd.commits[msg.Partition] = msg
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if amd.attrs.Business == "creative_reply" {
r, _ := regexp.Compile("reply_\\d+")
if !r.MatchString(m.Table) {
continue
}
}
if (amd.attrs.Table.TableSplit == "string" && m.Table == amd.attrs.Table.TablePrefix) ||
(amd.attrs.Table.TableSplit != "string" && strings.HasPrefix(m.Table, amd.attrs.Table.TablePrefix)) {
if m.Action == "insert" || m.Action == "update" {
var parseMap map[string]interface{}
parseMap, err = amd.d.JSON2map(m.New)
if err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
// esports fav type filter
if amd.attrs.AppID == "esports_fav" {
if t, ok := parseMap["type"]; ok && t.(int64) != 10 {
continue
}
}
// playlist fav type and attr filter
if amd.attrs.AppID == "fav_playlist" {
if t, ok := parseMap["type"]; ok && t.(int64) != 2 {
continue
}
if t, ok := parseMap["attr"]; ok {
if t.(int64)>>0&1 == 0 || (m.Action == "insert" && t.(int64)>>1&1 == 1) {
continue
}
}
}
var newParseMap map[string]interface{}
newParseMap, err = amd.newParseMap(c, m.Table, parseMap)
if err != nil {
if amd.attrs.AppID == "creative_reply" {
continue
}
log.Error("amd.newParseMap error(%v)", err)
continue
}
amd.mapData = append(amd.mapData, newParseMap)
}
}
if len(amd.mapData) < amd.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
if len(amd.mapData) > 0 {
amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "dtb", []string{})
}
length = len(amd.mapData)
//amd.d.extraData(c, amd, "dtb")
return
}
// AllMessages .
func (amd *AppMultipleDatabus) AllMessages(c context.Context) (length int, err error) {
amd.mapData = []model.MapData{}
for i, v := range amd.tableName {
var (
rows *xsql.Rows
sql string
)
tableFormat := strings.Split(amd.attrs.Table.TableFormat, ",")
if amd.attrs.AppID == "dm_search" || amd.attrs.AppID == "dm" {
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i, i)
} else if tableFormat[0] == "int" || tableFormat[0] == "single" { // 兼容只传后缀,不传表名
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i)
log.Info(sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size)
} else {
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, v)
}
if rows, err = amd.db.Query(c, sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size); err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
tempList := []model.MapData{}
for rows.Next() {
item, row := InitMapData(amd.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AppMultipleDatabus.AllMessages rows.Scan() error(%v)", err)
continue
}
var newParseMap map[string]interface{}
newParseMap, err = amd.newParseMap(c, v, item)
if err != nil {
log.Error("amd.newParseMap error(%v)", err)
continue
}
tempList = append(tempList, newParseMap)
amd.mapData = append(amd.mapData, newParseMap)
}
rows.Close()
tmpLength := len(tempList)
if tmpLength > 0 {
amd.offsets[i].SetTempOffset(tempList[tmpLength-1].PrimaryID(), tempList[tmpLength-1].StrMTime())
}
}
if len(amd.mapData) > 0 {
amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "db", []string{})
}
length = len(amd.mapData)
//amd.d.extraData(c, amd, "db")
return
}
// BulkIndex .
func (amd *AppMultipleDatabus) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) {
partData := amd.mapData[start:end]
if amd.d.c.Business.Index {
err = amd.d.BulkDBData(c, amd.attrs, writeEntityIndex, partData...)
} else {
err = amd.d.BulkDatabusData(c, amd.attrs, writeEntityIndex, partData...)
}
return
}
// Commit .
func (amd *AppMultipleDatabus) Commit(c context.Context) (err error) {
if amd.d.c.Business.Index { | amd.attrs.Table.TableSplit == "int" || amd.attrs.Table.TableSplit == "single" { // 兼容只传后缀,不传表名
for i := amd.attrs.Table.TableFrom; i <= amd.attrs.Table.TableTo; i++ {
tableName := fmt.Sprintf("%s%0"+amd.attrs.Table.TableZero+"d", amd.attrs.Table.TablePrefix, i)
if err = amd.d.CommitOffset(c, amd.offsets[i], amd.attrs.AppID, tableName); err != nil {
log.Error("AppMultipleDatabus.Commit error(%v)", err)
continue
}
}
} else {
for i, v := range amd.indexNameSuffix {
if err = amd.d.CommitOffset(c, amd.offsets[i], amd.attrs.AppID, v); err != nil {
log.Error("Commit error(%v)", err)
continue
}
}
}
} else {
for k, c := range amd.commits {
if err = c.Commit(); err != nil {
log.Error("AppMultipleDatabus.Commit error(%v)", err)
continue
}
delete(amd.commits, k)
}
}
amd.mapData = []model.MapData{}
return
}
// Sleep .
func (amd *AppMultipleDatabus) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(amd.attrs.Other.Sleep))
}
// Size .
func (amd *AppMultipleDatabus) Size(c context.Context) (size int) {
return amd.attrs.Other.Size
}
// indexField .
// func (amd *AppMultipleDatabus) indexField(c context.Context, tableName string) (fieldName string, fieldValue int) {
// suffix, _ := strconv.Atoi(strings.Split(tableName, "_")[2])
// s := strings.Split(amd.attrs.DataSQL.DataIndexSuffix, ";")
// v := strings.Split(s[1], ":")
// fieldName = v[0]
// indexNum, _ := strconv.Atoi(v[2])
// fieldValue = suffix + indexNum
// return
// }
// newParseMap .
func (amd *AppMultipleDatabus) newParseMap(c context.Context, table string, parseMap map[string]interface{}) (res map[string]interface{}, err error) {
res = parseMap
//TODO 实体索引写不进去
if (amd.attrs.AppID == "dm_search" || amd.attrs.AppID == "dm") && !amd.d.c.Business.Index {
indexSuffix := strings.Split(table, "_")[2]
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + indexSuffix
if _, ok := res["msg"]; ok {
// dm_content_
res["index_field"] = true // 删除ctime
res["index_id"] = fmt.Sprintf("%v", res["dmid"])
} else {
// dm_index_
res["index_id"] = fmt.Sprintf("%v", res["id"])
}
} else if amd.attrs.AppID == "dmreport" {
if ztime, ok := res["ctime"].(*interface{}); ok { // 数据库
if ctime, cok := (*ztime).(time.Time); cok {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + ctime.Format("2006")
}
} else if ztime, ok := res["ctime"].(string); ok { // databus
var ctime time.Time
if ctime, err = time.Parse("2006-01-02 15:04:05", ztime); err == nil {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + ctime.Format("2006")
}
}
} else if amd.attrs.AppID == "creative_reply" && !amd.d.c.Business.Index {
if replyType, ok := res["type"].(int64); ok {
if replyType == 1 || replyType == 12 || replyType == 14 {
} else {
err = fmt.Errorf("多余数据")
}
} else {
err = fmt.Errorf("错误数据")
}
} else if amd.attrs.Index.IndexSplit == "single" {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix
} else {
indexSuffix := string([]rune(table)[strings.Count(amd.attrs.Table.TablePrefix, "")-1:])
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + indexSuffix
}
//dtb index_id
if amd.attrs.AppID == "favorite" && !amd.d.c.Business.Index {
if fid, ok := res["fid"].(int64); ok {
if oid, ok := res["oid"].(int64); ok {
res["index_id"] = fmt.Sprintf("%d_%d", fid, oid)
return
}
}
res["index_id"] = "err"
res["indexName"] = ""
}
return
}
|
if | identifier_name |
app_multiple_databus.go | package dao
import (
"context"
"encoding/json"
"fmt"
"regexp"
"strings"
"time"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/queue/databus"
)
// AppMultipleDatabus .
type AppMultipleDatabus struct {
d *Dao
appid string
attrs *model.Attrs
db *xsql.DB
dtb *databus.Databus
offsets model.LoopOffsets
mapData []model.MapData
tableName []string
indexNameSuffix []string
commits map[int32]*databus.Message
}
// IndexNameSuffix .
func (amd *AppMultipleDatabus) IndexNameSuffix(format string, startDate string) (res []string, err error) {
var (
sTime time.Time
eTime = time.Now()
)
sTime, err = time.Parse(format, startDate)
if err != nil {
log.Error("d.LogAuditIndexName(%v)", startDate)
return
}
resDict := map[string]bool{}
if strings.Contains(format, "02") {
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, 0, -1)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "week") {
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, 0, -7)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "01") {
// 1月31日时AddDate(0, -1, 0)会出现错误
year, month, _ := eTime.Date()
hour, min, sec := eTime.Clock()
eTime = time.Date(year, month, 1, hour, min, sec, 0, eTime.Location())
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, -1, 0)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "2006") {
// 2月29日时AddDate(-1, 0, 0)会出现错误
year, _, _ := eTime.Date()
hour, min, sec := eTime.Clock()
eTime = time.Date(year, 1, 1, hour, min, sec, 0, eTime.Location())
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(-1, 0, 0)
if sTime.After(eTime) {
break
}
}
}
for k := range resDict {
res = append(res, k)
}
return
}
func (amd *AppMultipleDatabus) getIndexName(format string, time time.Time) (index string) {
var (
week = map[int]string{
0: "0108",
1: "0916",
2: "1724",
3: "2531",
}
)
return strings.Replace(time.Format(format), "week", week[time.Day()/9], -1)
}
// NewAppMultipleDatabus .
func NewAppMultipleDatabus(d *Dao, appid string) (amd *AppMultipleDatabus) {
var err error
amd = &AppMultipleDatabus{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offsets: make(map[int]*model.LoopOffset),
tableName: []string{},
indexNameSuffix: []string{},
commits: make(map[int32]*databus.Message),
}
amd.db = d.DBPool[amd.attrs.DBName]
amd.dtb = d.DatabusPool[amd.attrs.Databus.Databus]
if amd.attrs.Table.TableSplit == "int" || amd.attrs.Table.TableSplit == "single" {
for i := amd.attrs.Table.TableFrom; i <= amd.attrs.Table.TableTo; i++ {
tableName := fmt.Sprintf("%s%0"+amd.attrs.Table.TableZero+"d", amd.attrs.Table.TablePrefix, i)
amd.tableName = append(amd.tableName, tableName)
amd.offsets[i] = &model.LoopOffset{}
}
} else {
var tableNameSuffix []string
tableFormat := strings.Split(amd.attrs.Table.TableFormat, ",")
if tableNameSuffix, err = amd.IndexNameSuffix(tableFormat[0], tableFormat[1]); err != nil {
log.Error("amd.IndexNameSuffix(%v)", err)
return
}
for _, v := range tableNameSuffix {
amd.tableName = append(amd.tableName, amd.attrs.Table.TablePrefix+v)
}
for i := range amd.tableName {
amd.offsets[i] = &model.LoopOffset{}
}
}
return
}
// Business return business.
func (amd *AppMultipleDatabus) Business() string {
return amd.attrs.Business
}
// InitIndex .
func (amd *AppMultipleDatabus) InitIndex(c context.Context) {
var (
err error
indexAliasName string
indexEntityName string
)
indexFormat := strings.Split(amd.attrs.Index.IndexFormat, ",")
aliases, aliasErr := amd.d.GetAliases(amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix)
if indexFormat[0] == "int" || indexFormat[0] == "single" {
for i := amd.attrs.Index.IndexFrom; i <= amd.attrs.Index.IndexTo; i++ {
// == "0" 有问题,不通用
if amd.attrs.Index.IndexZero == "0" {
indexAliasName = amd.attrs.Index.IndexAliasPrefix
indexEntityName = amd.attrs.Index.IndexEntityPrefix
} else {
indexAliasName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexAliasPrefix, i)
indexEntityName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexEntityPrefix, i)
}
if aliasErr != nil {
amd.d.InitIndex(c, nil, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping)
} else {
amd.d.InitIndex(c, aliases, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping)
}
}
} else {
if amd.indexNameSuffix, err = amd.IndexNameSuffix(indexFormat[0], indexFormat[1]); err != nil {
log.Error("amd.IndexNameSuffix(%v)", err)
return
}
for _, v := range amd.indexNameSuffix {
if aliasErr != nil {
amd.d.InitIndex(c, nil, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping)
} else {
amd.d.InitIndex(c, aliases, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping)
}
}
}
}
// InitOffset insert init value to offset.
func (amd *AppMultipleDatabus) InitOffset(c context.Context) {
amd.d.InitOffset(c, amd.offsets[0], amd.attrs, amd.tableName)
}
// Offset .
func (amd *AppMultipleDatabus) Offset(c context.Context) {
for i, v := range amd.tableName {
offset, err := amd.d.Offset(c, amd.attrs.AppID, v)
if err != nil {
log.Error("amd.d.offset error(%v)", err)
time.Sleep(time.Second * 3)
}
amd.offsets[i].SetReview(offset.ReviewID, offset.ReviewTime)
amd.offsets[i].SetOffset(offset.OffsetID(), offset.OffsetTime())
}
}
// SetRecover set recover
func (amd *AppMultipleDatabus) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
amd.offsets.SetRecoverOffsets(i, recoverID, recoverTime)
}
// IncrMessages .
func (amd *AppMultipleDatabus) IncrMessages(c context.Context) (length int, err error) {
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(amd.attrs.Databus.Ticker)))
defer ticker.Stop()
for {
select {
case msg, ok := <-amd.dtb.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", amd.attrs.Databus)
break
}
m := &model.Message{}
amd.commits[msg.Partition] = msg
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if amd.attrs.Business == "creative_reply" {
r, _ := regexp.Compile("reply_\\d+")
if !r.MatchString(m.Table) {
continue
}
}
if (amd.attrs.Table.TableSplit == "string" && m.Table == amd.attrs.Table.TablePrefix) ||
(amd.attrs.Table.TableSplit != "string" && strings.HasPrefix(m.Table, amd.attrs.Table.TablePrefix)) {
if m.Action == "insert" || m.Action == "update" {
var parseMap map[string]interface{}
parseMap, err = amd.d.JSON2map(m.New)
if err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
// esports fav type filter
if amd.attrs.AppID == "esports_fav" {
if t, ok := parseMap["type"]; ok && t.(int64) != 10 {
continue
}
}
// playlist fav type and attr filter
if amd.attrs.AppID == "fav_playlist" {
if t, ok := parseMap["type"]; ok && t.(int64) != 2 {
continue
}
if t, ok := parseMap["attr"]; ok {
if t.(int64)>>0&1 == 0 || (m.Action == "insert" && t.(int64)>>1&1 == 1) {
continue
}
}
}
var newParseMap map[string]interface{}
newParseMap, err = amd.newParseMap(c, m.Table, parseMap)
if err != nil {
if amd.attrs.AppID == "creative_reply" {
continue
}
log.Error("amd.newParseMap error(%v)", err)
continue
}
amd.mapData = append(amd.mapData, newParseMap)
}
}
if len(amd.mapData) < amd.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
if len(amd.mapData) > 0 {
amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "dtb", []string{})
}
length = len(amd.mapData)
//amd.d.extraData(c, amd, "dtb")
return
}
// AllMessages .
func (amd *AppMultipleDatabus) AllMessages(c context.Context) (length int, err error) {
amd.mapData = []model.MapData{}
for i, v := range amd.tableName {
var (
rows *xsql.Rows
sql string
)
tableFormat := strings.Split(amd.attrs.Table.TableFormat, ",")
if amd.attrs.AppID == "dm_search" || amd.attrs.AppID == "dm" {
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i, i)
} else if tableFormat[0] == "int" || tableFormat[0] == "single" { // 兼容只传后缀,不传表名
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i)
log.Info(sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size)
} else {
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, v)
}
if rows, err = amd.db.Query(c, sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size); err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
tempList := []model.MapData{}
for rows.Next() {
item, row := InitMapData(amd.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AppMultipleDatabus.AllMessages rows.Scan() error(%v)", err)
continue
}
var newParseMap map[string]interface{}
newParseMap, err = amd.newParseMap(c, v, item)
if err != nil {
log.Error("amd.newParseMap error(%v)", err)
continue
}
tempList = append(tempList, newParseMap)
amd.mapData = append(amd.mapData, newParseMap)
}
rows.Close()
tmpLength := len(tempList)
if tmpLength > 0 {
amd.offsets[i].SetTempOffset(tempList[tmpLength-1].PrimaryID(), tempList[tmpLength-1].StrMTime())
}
}
if len(amd.mapData) > 0 {
amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "db", []string{})
}
length = len(amd.mapData)
//amd.d.extraData(c, amd, "db")
return
}
// BulkIndex .
func (amd *AppMultipleDatabus) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) {
partData := amd.mapData[start:end]
if amd.d.c.Business.Index {
| t) (err error) {
if amd.d.c.Business.Index {
if amd.attrs.Table.TableSplit == "int" || amd.attrs.Table.TableSplit == "single" { // 兼容只传后缀,不传表名
for i := amd.attrs.Table.TableFrom; i <= amd.attrs.Table.TableTo; i++ {
tableName := fmt.Sprintf("%s%0"+amd.attrs.Table.TableZero+"d", amd.attrs.Table.TablePrefix, i)
if err = amd.d.CommitOffset(c, amd.offsets[i], amd.attrs.AppID, tableName); err != nil {
log.Error("AppMultipleDatabus.Commit error(%v)", err)
continue
}
}
} else {
for i, v := range amd.indexNameSuffix {
if err = amd.d.CommitOffset(c, amd.offsets[i], amd.attrs.AppID, v); err != nil {
log.Error("Commit error(%v)", err)
continue
}
}
}
} else {
for k, c := range amd.commits {
if err = c.Commit(); err != nil {
log.Error("AppMultipleDatabus.Commit error(%v)", err)
continue
}
delete(amd.commits, k)
}
}
amd.mapData = []model.MapData{}
return
}
// Sleep .
func (amd *AppMultipleDatabus) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(amd.attrs.Other.Sleep))
}
// Size .
func (amd *AppMultipleDatabus) Size(c context.Context) (size int) {
return amd.attrs.Other.Size
}
// indexField .
// func (amd *AppMultipleDatabus) indexField(c context.Context, tableName string) (fieldName string, fieldValue int) {
// suffix, _ := strconv.Atoi(strings.Split(tableName, "_")[2])
// s := strings.Split(amd.attrs.DataSQL.DataIndexSuffix, ";")
// v := strings.Split(s[1], ":")
// fieldName = v[0]
// indexNum, _ := strconv.Atoi(v[2])
// fieldValue = suffix + indexNum
// return
// }
// newParseMap .
func (amd *AppMultipleDatabus) newParseMap(c context.Context, table string, parseMap map[string]interface{}) (res map[string]interface{}, err error) {
res = parseMap
//TODO 实体索引写不进去
if (amd.attrs.AppID == "dm_search" || amd.attrs.AppID == "dm") && !amd.d.c.Business.Index {
indexSuffix := strings.Split(table, "_")[2]
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + indexSuffix
if _, ok := res["msg"]; ok {
// dm_content_
res["index_field"] = true // 删除ctime
res["index_id"] = fmt.Sprintf("%v", res["dmid"])
} else {
// dm_index_
res["index_id"] = fmt.Sprintf("%v", res["id"])
}
} else if amd.attrs.AppID == "dmreport" {
if ztime, ok := res["ctime"].(*interface{}); ok { // 数据库
if ctime, cok := (*ztime).(time.Time); cok {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + ctime.Format("2006")
}
} else if ztime, ok := res["ctime"].(string); ok { // databus
var ctime time.Time
if ctime, err = time.Parse("2006-01-02 15:04:05", ztime); err == nil {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + ctime.Format("2006")
}
}
} else if amd.attrs.AppID == "creative_reply" && !amd.d.c.Business.Index {
if replyType, ok := res["type"].(int64); ok {
if replyType == 1 || replyType == 12 || replyType == 14 {
} else {
err = fmt.Errorf("多余数据")
}
} else {
err = fmt.Errorf("错误数据")
}
} else if amd.attrs.Index.IndexSplit == "single" {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix
} else {
indexSuffix := string([]rune(table)[strings.Count(amd.attrs.Table.TablePrefix, "")-1:])
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + indexSuffix
}
//dtb index_id
if amd.attrs.AppID == "favorite" && !amd.d.c.Business.Index {
if fid, ok := res["fid"].(int64); ok {
if oid, ok := res["oid"].(int64); ok {
res["index_id"] = fmt.Sprintf("%d_%d", fid, oid)
return
}
}
res["index_id"] = "err"
res["indexName"] = ""
}
return
}
| err = amd.d.BulkDBData(c, amd.attrs, writeEntityIndex, partData...)
} else {
err = amd.d.BulkDatabusData(c, amd.attrs, writeEntityIndex, partData...)
}
return
}
// Commit .
func (amd *AppMultipleDatabus) Commit(c context.Contex | identifier_body |
app_multiple_databus.go | package dao
import (
"context"
"encoding/json"
"fmt"
"regexp"
"strings"
"time"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/queue/databus"
)
// AppMultipleDatabus .
type AppMultipleDatabus struct {
d *Dao
appid string
attrs *model.Attrs
db *xsql.DB
dtb *databus.Databus
offsets model.LoopOffsets
mapData []model.MapData
tableName []string
indexNameSuffix []string
commits map[int32]*databus.Message
}
// IndexNameSuffix .
func (amd *AppMultipleDatabus) IndexNameSuffix(format string, startDate string) (res []string, err error) {
var (
sTime time.Time
eTime = time.Now()
)
sTime, err = time.Parse(format, startDate)
if err != nil {
log.Error("d.LogAuditIndexName(%v)", startDate)
return
}
resDict := map[string]bool{}
if strings.Contains(format, "02") {
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, 0, -1)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "week") {
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, 0, -7)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "01") {
// 1月31日时AddDate(0, -1, 0)会出现错误
year, month, _ := eTime.Date()
hour, min, sec := eTime.Clock()
eTime = time.Date(year, month, 1, hour, min, sec, 0, eTime.Location())
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, -1, 0)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "2006") {
// 2月29日时AddDate(-1, 0, 0)会出现错误
year, _, _ := eTime.Date()
hour, min, sec := eTime.Clock()
eTime = time.Date(year, 1, 1, hour, min, sec, 0, eTime.Location())
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(-1, 0, 0)
if sTime.After(eTime) {
break
}
}
}
for k := range resDict {
res = append(res, k)
}
return
}
func (amd *AppMultipleDatabus) getIndexName(format string, time time.Time) (index string) {
var (
week = map[int]string{
0: "0108",
1: "0916",
2: "1724",
3: "2531",
}
)
return strings.Replace(time.Format(format), "week", week[time.Day()/9], -1)
}
// NewAppMultipleDatabus .
func NewAppMultipleDatabus(d *Dao, appid string) (amd *AppMultipleDatabus) {
var err error
amd = &AppMultipleDatabus{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offsets: make(map[int]*model.LoopOffset),
tableName: []string{},
indexNameSuffix: []string{},
commits: make(map[int32]*databus.Message),
}
amd.db = d.DBPool[amd.attrs.DBName]
amd.dtb = d.DatabusPool[amd.attrs.Databus.Databus]
if amd.attrs.Table.TableSplit == "int" || amd.attrs.Table.TableSplit == "single" {
for i := amd.attrs.Table.TableFrom; i <= amd.attrs.Table.TableTo; i++ {
tableName := fmt.Sprintf("%s%0"+amd.attrs.Table.TableZero+"d", amd.attrs.Table.TablePrefix, i)
amd.tableName = append(amd.tableName, tableName)
amd.offsets[i] = &model.LoopOffset{}
}
} else {
var tableNameSuffix []string
tableFormat := strings.Split(amd.attrs.Table.TableFormat, ",")
if tableNameSuffix, err = amd.IndexNameSuffix(tableFormat[0], tableFormat[1]); err != nil {
log.Error("amd.IndexNameSuffix(%v)", err)
return
}
for _, v := range tableNameSuffix {
amd.tableName = append(amd.tableName, amd.attrs.Table.TablePrefix+v)
}
for i := range amd.tableName {
amd.offsets[i] = &model.LoopOffset{}
}
}
return
}
// Business return business.
func (amd *AppMultipleDatabus) Business() string {
return amd.attrs.Business
}
// InitIndex .
func (amd *AppMultipleDatabus) InitIndex(c context.Context) {
var (
err error
indexAliasName string
indexEntityName string
)
indexFormat := strings.Split(amd.attrs.Index.IndexFormat, ",")
aliases, aliasErr := amd.d.GetAliases(amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix)
if indexFormat[0] == "int" || indexFormat[0] == "single" {
for i := amd.attrs.Index.IndexFrom; i <= amd.attrs.Index.IndexTo; i++ {
// == "0" 有问题,不通用
if amd.attrs.Index.IndexZero == "0" {
indexAliasName = amd.attrs.Index.IndexAliasPrefix
indexEntityName = amd.attrs.Index.IndexEntityPrefix
} else {
indexAliasName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexAliasPrefix, i)
indexEntityName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexEntityPrefix, i)
}
if aliasErr != nil {
amd.d.InitIndex(c, nil, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping)
} else {
amd.d.InitIndex(c, aliases, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping)
}
}
} else {
if amd.indexNameSuffix, err = amd.IndexNameSuffix(indexFormat[0], indexFormat[1]); err != nil {
log.Error("amd.IndexNameSuffix(%v)", err)
return
}
for _, v := range amd.indexNameSuffix {
if aliasErr != nil {
amd.d.InitIndex(c, nil, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping)
} else {
amd.d.InitIndex(c, aliases, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping)
}
}
}
}
// InitOffset insert init value to offset.
func (amd *AppMultipleDatabus) InitOffset(c context.Context) {
amd.d.InitOffset(c, amd.offsets[0], amd.attrs, amd.tableName)
}
// Offset .
func (amd *AppMultipleDatabus) Offset(c context.Context) {
for i, v := range amd.tableName {
offset, err := amd.d.Offset(c, amd.attrs.AppID, v)
if err != nil {
log.Error("amd.d.offset error(%v)", err)
time.Sleep(time.Second * 3)
}
amd.offsets[i].SetReview(offset.ReviewID, offset.ReviewTime)
amd.offsets[i].SetOffset(offset.OffsetID(), offset.OffsetTime())
}
}
// SetRecover set recover
func (amd *AppMultipleDatabus) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
amd.offsets.SetRecoverOffsets(i, recoverID, recoverTime)
}
// IncrMessages .
func (amd *AppMultipleDatabus) IncrMessages(c context.Context) (length int, err error) {
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(amd.attrs.Databus.Ticker)))
defer ticker.Stop()
for {
select {
case msg, ok := <-amd.dtb.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", amd.attrs.Databus)
break
}
m := &model.Message{}
amd.commits[msg.Partition] = msg
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if amd.attrs.Business == "creative_reply" {
r, _ := regexp.Compile("reply_\\d+")
if !r.MatchString(m.Table) {
continue
}
}
if (amd.attrs.Table.TableSplit == "string" && m.Table == amd.attrs.Table.TablePrefix) ||
(amd.attrs.Table.TableSplit != "string" && strings.HasPrefix(m.Table, amd.attrs.Table.TablePrefix)) {
if m.Action == "insert" || m.Action == "update" {
var parseMap map[string]interface{}
parseMap, err = amd.d.JSON2map(m.New)
if err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
// esports fav type filter
if amd.attrs.AppID == "esports_fav" {
if t, ok := parseMap["type"]; ok && t.(int64) != 10 {
continue
}
}
// playlist fav type and attr filter
if amd.attrs.AppID == "fav_playlist" {
if t, ok := parseMap["type"]; ok && t.(int64) != 2 {
continue
}
if t, ok := parseMap["attr"]; ok {
if t.(int64)>>0&1 == 0 || (m.Action == "insert" && t.(int64)>>1&1 == 1) {
continue
}
}
}
var newParseMap map[string]interface{}
newParseMap, err = amd.newParseMap(c, m.Table, parseMap)
if err != nil {
if amd.attrs.AppID == "creative_reply" {
continue
}
log.Error("amd.newParseMap error(%v)", err)
continue
}
amd.mapData = append(amd.mapData, newParseMap)
}
}
if len(amd.mapData) < amd.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
if len(amd.mapData) > 0 {
amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "dtb", []string{})
}
length = len(amd.mapData)
//amd.d.extraData(c, amd, "dtb")
return
}
// AllMessages .
func (amd *AppMultipleDatabus) AllMessages(c context.Context) (length int, err error) {
amd.mapData = []model.MapData{}
for i, v := range amd.tableName {
var (
rows *xsql.Rows
sql string
)
tableFormat := strings.Split(amd.attrs.Table.TableFormat, ",")
if amd.attrs.AppID == "dm_search" || amd.attrs.AppID == "dm" {
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i, i)
} else if tableFormat[0] == "int" || tableFormat[0] == "single" { // 兼容只传后缀,不传表名
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i)
log.Info(sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size)
} else {
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, v)
}
if rows, err = amd.db.Query(c, sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size); err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
tempList := []model.MapData{}
for rows.Next() {
item, row := InitMapData(amd.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AppMultipleDatabus.AllMessages rows.Scan() error(%v)", err)
continue
}
var newParseMap map[string]interface{}
newParseMap, err = amd.newParseMap(c, v, item)
if err != nil {
log.Error("amd.newParseMap error(%v)", err)
continue
}
tempList = append(tempList, newParseMap)
amd.mapData = append(amd.mapData, newParseMap)
}
rows.Close()
tmpLength := len(tempList)
if tmpLength > 0 {
amd.offsets[i].SetTempOffset(tempList[tmpLength-1].PrimaryID(), tempList[tmpLength-1].StrMTime())
}
}
if len(amd.mapData) > 0 {
amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "db", []string{})
}
length = len(amd.mapData)
//amd.d.extraData(c, amd, "db")
return
}
// BulkIndex .
func (amd *AppMultipleDatabus) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) {
partData := amd.mapData[start:end]
if amd.d.c.Business.Index {
err = amd.d.BulkDBData(c, amd.attrs, writeEntityIndex, partData...)
} else {
err = amd.d.BulkDatabusData(c, amd.attrs, writeEntityIndex, partData...)
}
return
}
// Commit .
func (amd *AppMultipleDatabus) Commit(c context.Context) (err error) {
if amd.d.c.Business.Index {
if amd.attrs.Table.TableSplit == "int" || amd.attrs.Table.TableSplit == "single" { // 兼容只传后缀,不传表名
for i := amd.attrs.Table.TableFrom; i <= amd.attrs.Table.TableTo; i++ {
tableName := fmt.Sprintf("%s%0"+amd.attrs.Table.TableZero+"d", amd.attrs.Table.TablePrefix, i)
if err = amd.d.CommitOffset(c, amd.offsets[i], amd.attrs.AppID, tableName); err != nil {
log.Error("AppMultipleDatabus.Commit error(%v)", err)
continue
}
}
} else {
for i, v := range amd.indexNameSuffix {
if err = amd.d.CommitOffset(c, amd.offsets[i], amd.attrs.AppID, v); err != nil {
| log.Error("AppMultipleDatabus.Commit error(%v)", err)
continue
}
delete(amd.commits, k)
}
}
amd.mapData = []model.MapData{}
return
}
// Sleep .
func (amd *AppMultipleDatabus) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(amd.attrs.Other.Sleep))
}
// Size .
func (amd *AppMultipleDatabus) Size(c context.Context) (size int) {
return amd.attrs.Other.Size
}
// indexField .
// func (amd *AppMultipleDatabus) indexField(c context.Context, tableName string) (fieldName string, fieldValue int) {
// suffix, _ := strconv.Atoi(strings.Split(tableName, "_")[2])
// s := strings.Split(amd.attrs.DataSQL.DataIndexSuffix, ";")
// v := strings.Split(s[1], ":")
// fieldName = v[0]
// indexNum, _ := strconv.Atoi(v[2])
// fieldValue = suffix + indexNum
// return
// }
// newParseMap .
func (amd *AppMultipleDatabus) newParseMap(c context.Context, table string, parseMap map[string]interface{}) (res map[string]interface{}, err error) {
res = parseMap
//TODO 实体索引写不进去
if (amd.attrs.AppID == "dm_search" || amd.attrs.AppID == "dm") && !amd.d.c.Business.Index {
indexSuffix := strings.Split(table, "_")[2]
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + indexSuffix
if _, ok := res["msg"]; ok {
// dm_content_
res["index_field"] = true // 删除ctime
res["index_id"] = fmt.Sprintf("%v", res["dmid"])
} else {
// dm_index_
res["index_id"] = fmt.Sprintf("%v", res["id"])
}
} else if amd.attrs.AppID == "dmreport" {
if ztime, ok := res["ctime"].(*interface{}); ok { // 数据库
if ctime, cok := (*ztime).(time.Time); cok {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + ctime.Format("2006")
}
} else if ztime, ok := res["ctime"].(string); ok { // databus
var ctime time.Time
if ctime, err = time.Parse("2006-01-02 15:04:05", ztime); err == nil {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + ctime.Format("2006")
}
}
} else if amd.attrs.AppID == "creative_reply" && !amd.d.c.Business.Index {
if replyType, ok := res["type"].(int64); ok {
if replyType == 1 || replyType == 12 || replyType == 14 {
} else {
err = fmt.Errorf("多余数据")
}
} else {
err = fmt.Errorf("错误数据")
}
} else if amd.attrs.Index.IndexSplit == "single" {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix
} else {
indexSuffix := string([]rune(table)[strings.Count(amd.attrs.Table.TablePrefix, "")-1:])
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + indexSuffix
}
//dtb index_id
if amd.attrs.AppID == "favorite" && !amd.d.c.Business.Index {
if fid, ok := res["fid"].(int64); ok {
if oid, ok := res["oid"].(int64); ok {
res["index_id"] = fmt.Sprintf("%d_%d", fid, oid)
return
}
}
res["index_id"] = "err"
res["indexName"] = ""
}
return
}
| log.Error("Commit error(%v)", err)
continue
}
}
}
} else {
for k, c := range amd.commits {
if err = c.Commit(); err != nil {
| conditional_block |
zz_generated.composition_transforms.go | /*
Copyright 2020 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Generated from apiextensions/v1/composition_transforms.go by ../hack/duplicate_api_type.sh. DO NOT EDIT.
package v1beta1
import (
"encoding/json"
"regexp"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"github.com/crossplane/crossplane-runtime/pkg/errors"
verrors "github.com/crossplane/crossplane/internal/validation/errors"
)
// TransformType is type of the transform function to be chosen.
type TransformType string
// Accepted TransformTypes.
const (
ErrFmtConvertFormatPairNotSupported = "conversion from %s to %s is not supported with format %s"
TransformTypeMap TransformType = "map"
TransformTypeMatch TransformType = "match"
TransformTypeMath TransformType = "math"
TransformTypeString TransformType = "string"
TransformTypeConvert TransformType = "convert"
)
// Transform is a unit of process whose input is transformed into an output with
// the supplied configuration.
type Transform struct {
// Type of the transform to be run.
// +kubebuilder:validation:Enum=map;match;math;string;convert
Type TransformType `json:"type"`
// Math is used to transform the input via mathematical operations such as
// multiplication.
// +optional
Math *MathTransform `json:"math,omitempty"`
// Map uses the input as a key in the given map and returns the value.
// +optional
Map *MapTransform `json:"map,omitempty"`
// Match is a more complex version of Map that matches a list of patterns.
// +optional
Match *MatchTransform `json:"match,omitempty"`
// String is used to transform the input into a string or a different kind
// of string. Note that the input does not necessarily need to be a string.
// +optional
String *StringTransform `json:"string,omitempty"`
// Convert is used to cast the input into the given output type.
// +optional
Convert *ConvertTransform `json:"convert,omitempty"`
}
// Validate this Transform is valid.
//
//nolint:gocyclo // This is a long but simple/same-y switch.
func (t *Transform) Validate() *field.Error {
switch t.Type {
case TransformTypeMath:
if t.Math == nil {
return field.Required(field.NewPath("math"), "given transform type math requires configuration")
}
return verrors.WrapFieldError(t.Math.Validate(), field.NewPath("math"))
case TransformTypeMap:
if t.Map == nil {
return field.Required(field.NewPath("map"), "given transform type map requires configuration")
}
return verrors.WrapFieldError(t.Map.Validate(), field.NewPath("map"))
case TransformTypeMatch:
if t.Match == nil {
return field.Required(field.NewPath("match"), "given transform type match requires configuration")
}
return verrors.WrapFieldError(t.Match.Validate(), field.NewPath("match"))
case TransformTypeString:
if t.String == nil {
return field.Required(field.NewPath("string"), "given transform type string requires configuration")
}
return verrors.WrapFieldError(t.String.Validate(), field.NewPath("string"))
case TransformTypeConvert:
if t.Convert == nil {
return field.Required(field.NewPath("convert"), "given transform type convert requires configuration")
}
if err := t.Convert.Validate(); err != nil {
return verrors.WrapFieldError(err, field.NewPath("convert"))
}
default:
// Should never happen
return field.Invalid(field.NewPath("type"), t.Type, "unknown transform type")
}
return nil
}
// GetFormat returns the format of the transform.
func (t *ConvertTransform) GetFormat() ConvertTransformFormat {
if t.Format != nil {
return *t.Format
}
return ConvertTransformFormatNone
}
// GetOutputType returns the output type of the transform.
// It returns an error if the transform type is unknown.
// It returns nil if the output type is not known.
func (t *Transform) GetOutputType() (*TransformIOType, error) {
var out TransformIOType
switch t.Type {
case TransformTypeMap, TransformTypeMatch:
return nil, nil
case TransformTypeMath:
out = TransformIOTypeFloat64
case TransformTypeString:
out = TransformIOTypeString
case TransformTypeConvert:
out = t.Convert.ToType
default:
return nil, errors.Errorf("unable to get output type, unknown transform type: %s", t.Type)
}
return &out, nil
}
// MathTransformType conducts mathematical operations.
type MathTransformType string
// Accepted MathTransformType.
const (
MathTransformTypeMultiply MathTransformType = "Multiply" // Default
MathTransformTypeClampMin MathTransformType = "ClampMin"
MathTransformTypeClampMax MathTransformType = "ClampMax"
)
// MathTransform conducts mathematical operations on the input with the given
// configuration in its properties.
type MathTransform struct {
// Type of the math transform to be run.
// +optional
// +kubebuilder:validation:Enum=Multiply;ClampMin;ClampMax
// +kubebuilder:default=Multiply
Type MathTransformType `json:"type,omitempty"`
// Multiply the value.
// +optional
Multiply *int64 `json:"multiply,omitempty"`
// ClampMin makes sure that the value is not smaller than the given value.
// +optional
ClampMin *int64 `json:"clampMin,omitempty"`
// ClampMax makes sure that the value is not bigger than the given value.
// +optional
ClampMax *int64 `json:"clampMax,omitempty"`
}
// GetType returns the type of the math transform, returning the default if not specified.
func (m *MathTransform) GetType() MathTransformType {
if m.Type == "" {
return MathTransformTypeMultiply
}
return m.Type
}
// Validate checks this MathTransform is valid.
func (m *MathTransform) Validate() *field.Error {
switch m.GetType() {
case MathTransformTypeMultiply:
if m.Multiply == nil {
return field.Required(field.NewPath("multiply"), "must specify a value if a multiply math transform is specified")
}
case MathTransformTypeClampMin:
if m.ClampMin == nil {
return field.Required(field.NewPath("clampMin"), "must specify a value if a clamp min math transform is specified")
}
case MathTransformTypeClampMax:
if m.ClampMax == nil {
return field.Required(field.NewPath("clampMax"), "must specify a value if a clamp max math transform is specified")
}
default:
return field.Invalid(field.NewPath("type"), m.Type, "unknown math transform type")
}
return nil
}
// MapTransform returns a value for the input from the given map.
type MapTransform struct {
// Pairs is the map that will be used for transform.
// +optional
Pairs map[string]extv1.JSON `json:",inline"`
}
// Validate checks this MapTransform is valid.
func (m *MapTransform) Validate() *field.Error {
if len(m.Pairs) == 0 {
return field.Required(field.NewPath("pairs"), "at least one pair must be specified if a map transform is specified")
}
return nil
}
// NOTE(negz): The Kubernetes JSON decoder doesn't seem to like inlining a map
// into a struct - doing so results in a seemingly successful unmarshal of the
// data, but an empty map. We must keep the ,inline tag nevertheless in order to
// trick the CRD generator into thinking MapTransform is an arbitrary map (i.e.
// generating a validation schema with string additionalProperties), but the
// actual marshalling is handled by the marshal methods below.
// UnmarshalJSON into this MapTransform.
func (m *MapTransform) UnmarshalJSON(b []byte) error {
return json.Unmarshal(b, &m.Pairs)
}
// MarshalJSON from this MapTransform.
func (m *MapTransform) MarshalJSON() ([]byte, error) {
return json.Marshal(m.Pairs)
}
// MatchFallbackTo defines how a match operation will fallback.
type MatchFallbackTo string
// Valid MatchFallbackTo.
const (
MatchFallbackToTypeValue MatchFallbackTo = "Value"
MatchFallbackToTypeInput MatchFallbackTo = "Input"
)
// MatchTransform is a more complex version of a map transform that matches a
// list of patterns.
type MatchTransform struct {
// The patterns that should be tested against the input string.
// Patterns are tested in order. The value of the first match is used as
// result of this transform.
Patterns []MatchTransformPattern `json:"patterns,omitempty"`
// The fallback value that should be returned by the transform if now pattern
// matches.
FallbackValue extv1.JSON `json:"fallbackValue,omitempty"`
// Determines to what value the transform should fallback if no pattern matches.
// +optional
// +kubebuilder:validation:Enum=Value;Input
// +kubebuilder:default=Value
FallbackTo MatchFallbackTo `json:"fallbackTo,omitempty"`
}
// Validate checks this MatchTransform is valid.
func (m *MatchTransform) Validate() *field.Error {
if len(m.Patterns) == 0 {
return field.Required(field.NewPath("patterns"), "at least one pattern must be specified if a match transform is specified")
}
for i, p := range m.Patterns {
if err := p.Validate(); err != nil {
return verrors.WrapFieldError(err, field.NewPath("patterns").Index(i))
}
}
return nil
}
// MatchTransformPatternType defines the type of a MatchTransformPattern.
type MatchTransformPatternType string
|
// MatchTransformPattern is a transform that returns the value that matches a
// pattern.
type MatchTransformPattern struct {
// Type specifies how the pattern matches the input.
//
// * `literal` - the pattern value has to exactly match (case sensitive) the
// input string. This is the default.
//
// * `regexp` - the pattern treated as a regular expression against
// which the input string is tested. Crossplane will throw an error if the
// key is not a valid regexp.
//
// +kubebuilder:validation:Enum=literal;regexp
// +kubebuilder:default=literal
Type MatchTransformPatternType `json:"type"`
// Literal exactly matches the input string (case sensitive).
// Is required if `type` is `literal`.
Literal *string `json:"literal,omitempty"`
// Regexp to match against the input string.
// Is required if `type` is `regexp`.
Regexp *string `json:"regexp,omitempty"`
// The value that is used as result of the transform if the pattern matches.
Result extv1.JSON `json:"result"`
}
// Validate checks this MatchTransformPattern is valid.
func (m *MatchTransformPattern) Validate() *field.Error {
switch m.Type {
case MatchTransformPatternTypeLiteral, "":
if m.Literal == nil {
return field.Required(field.NewPath("literal"), "literal pattern type requires a literal")
}
case MatchTransformPatternTypeRegexp:
if m.Regexp == nil {
return field.Required(field.NewPath("regexp"), "regexp pattern type requires a regexp")
}
if _, err := regexp.Compile(*m.Regexp); err != nil {
return field.Invalid(field.NewPath("regexp"), *m.Regexp, "invalid regexp")
}
default:
return field.Invalid(field.NewPath("type"), m.Type, "unknown pattern type")
}
return nil
}
// StringTransformType transforms a string.
type StringTransformType string
// Accepted StringTransformTypes.
const (
StringTransformTypeFormat StringTransformType = "Format" // Default
StringTransformTypeConvert StringTransformType = "Convert"
StringTransformTypeTrimPrefix StringTransformType = "TrimPrefix"
StringTransformTypeTrimSuffix StringTransformType = "TrimSuffix"
StringTransformTypeRegexp StringTransformType = "Regexp"
)
// StringConversionType converts a string.
type StringConversionType string
// Accepted StringConversionTypes.
const (
StringConversionTypeToUpper StringConversionType = "ToUpper"
StringConversionTypeToLower StringConversionType = "ToLower"
StringConversionTypeToJSON StringConversionType = "ToJson"
StringConversionTypeToBase64 StringConversionType = "ToBase64"
StringConversionTypeFromBase64 StringConversionType = "FromBase64"
StringConversionTypeToSHA1 StringConversionType = "ToSha1"
StringConversionTypeToSHA256 StringConversionType = "ToSha256"
StringConversionTypeToSHA512 StringConversionType = "ToSha512"
StringConversionTypeToAdler32 StringConversionType = "ToAdler32"
)
// A StringTransform returns a string given the supplied input.
type StringTransform struct {
// Type of the string transform to be run.
// +optional
// +kubebuilder:validation:Enum=Format;Convert;TrimPrefix;TrimSuffix;Regexp
// +kubebuilder:default=Format
Type StringTransformType `json:"type,omitempty"`
// Format the input using a Go format string. See
// https://golang.org/pkg/fmt/ for details.
// +optional
Format *string `json:"fmt,omitempty"`
// Optional conversion method to be specified.
// `ToUpper` and `ToLower` change the letter case of the input string.
// `ToBase64` and `FromBase64` perform a base64 conversion based on the input string.
// `ToJson` converts any input value into its raw JSON representation.
// `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input
// converted to JSON.
// +optional
// +kubebuilder:validation:Enum=ToUpper;ToLower;ToBase64;FromBase64;ToJson;ToSha1;ToSha256;ToSha512
Convert *StringConversionType `json:"convert,omitempty"`
// Trim the prefix or suffix from the input
// +optional
Trim *string `json:"trim,omitempty"`
// Extract a match from the input using a regular expression.
// +optional
Regexp *StringTransformRegexp `json:"regexp,omitempty"`
}
// Validate checks this StringTransform is valid.
//
//nolint:gocyclo // just a switch
func (s *StringTransform) Validate() *field.Error {
switch s.Type {
case StringTransformTypeFormat, "":
if s.Format == nil {
return field.Required(field.NewPath("fmt"), "format transform requires a format")
}
case StringTransformTypeConvert:
if s.Convert == nil {
return field.Required(field.NewPath("convert"), "convert transform requires a conversion type")
}
case StringTransformTypeTrimPrefix, StringTransformTypeTrimSuffix:
if s.Trim == nil {
return field.Required(field.NewPath("trim"), "trim transform requires a trim value")
}
case StringTransformTypeRegexp:
if s.Regexp == nil {
return field.Required(field.NewPath("regexp"), "regexp transform requires a regexp")
}
if s.Regexp.Match == "" {
return field.Required(field.NewPath("regexp", "match"), "regexp transform requires a match")
}
if _, err := regexp.Compile(s.Regexp.Match); err != nil {
return field.Invalid(field.NewPath("regexp", "match"), s.Regexp.Match, "invalid regexp")
}
default:
return field.Invalid(field.NewPath("type"), s.Type, "unknown string transform type")
}
return nil
}
// A StringTransformRegexp extracts a match from the input using a regular
// expression.
type StringTransformRegexp struct {
// Match string. May optionally include submatches, aka capture groups.
// See https://pkg.go.dev/regexp/ for details.
Match string `json:"match"`
// Group number to match. 0 (the default) matches the entire expression.
// +optional
Group *int `json:"group,omitempty"`
}
// TransformIOType defines the type of a ConvertTransform.
type TransformIOType string
// The list of supported Transform input and output types.
const (
TransformIOTypeString TransformIOType = "string"
TransformIOTypeBool TransformIOType = "bool"
TransformIOTypeInt TransformIOType = "int"
TransformIOTypeInt64 TransformIOType = "int64"
TransformIOTypeFloat64 TransformIOType = "float64"
TransformIOTypeObject TransformIOType = "object"
TransformIOTypeArray TransformIOType = "array"
)
// IsValid checks if the given TransformIOType is valid.
func (c TransformIOType) IsValid() bool {
switch c {
case TransformIOTypeString, TransformIOTypeBool, TransformIOTypeInt, TransformIOTypeInt64, TransformIOTypeFloat64, TransformIOTypeObject, TransformIOTypeArray:
return true
}
return false
}
// ConvertTransformFormat defines the expected format of an input value of a
// conversion transform.
type ConvertTransformFormat string
// Possible ConvertTransformFormat values.
const (
ConvertTransformFormatNone ConvertTransformFormat = "none"
ConvertTransformFormatQuantity ConvertTransformFormat = "quantity"
ConvertTransformFormatJSON ConvertTransformFormat = "json"
)
// IsValid returns true if the format is valid.
func (c ConvertTransformFormat) IsValid() bool {
switch c {
case ConvertTransformFormatNone, ConvertTransformFormatQuantity, ConvertTransformFormatJSON:
return true
}
return false
}
// A ConvertTransform converts the input into a new object whose type is supplied.
type ConvertTransform struct {
// ToType is the type of the output of this transform.
// +kubebuilder:validation:Enum=string;int;int64;bool;float64;object;list
ToType TransformIOType `json:"toType"`
// The expected input format.
//
// * `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).
// Only used during `string -> float64` conversions.
// * `json` - parses the input as a JSON string.
// Only used during `string -> object` or `string -> list` conversions.
//
// If this property is null, the default conversion is applied.
//
// +kubebuilder:validation:Enum=none;quantity;json
// +kubebuilder:validation:Default=none
Format *ConvertTransformFormat `json:"format,omitempty"`
}
// Validate returns an error if the ConvertTransform is invalid.
func (t ConvertTransform) Validate() *field.Error {
if !t.GetFormat().IsValid() {
return field.Invalid(field.NewPath("format"), t.Format, "invalid format")
}
if !t.ToType.IsValid() {
return field.Invalid(field.NewPath("toType"), t.ToType, "invalid type")
}
return nil
} | // Valid MatchTransformPatternTypes.
const (
MatchTransformPatternTypeLiteral MatchTransformPatternType = "literal"
MatchTransformPatternTypeRegexp MatchTransformPatternType = "regexp"
) | random_line_split |
zz_generated.composition_transforms.go | /*
Copyright 2020 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Generated from apiextensions/v1/composition_transforms.go by ../hack/duplicate_api_type.sh. DO NOT EDIT.
package v1beta1
import (
"encoding/json"
"regexp"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"github.com/crossplane/crossplane-runtime/pkg/errors"
verrors "github.com/crossplane/crossplane/internal/validation/errors"
)
// TransformType is type of the transform function to be chosen.
type TransformType string
// Accepted TransformTypes.
const (
ErrFmtConvertFormatPairNotSupported = "conversion from %s to %s is not supported with format %s"
TransformTypeMap TransformType = "map"
TransformTypeMatch TransformType = "match"
TransformTypeMath TransformType = "math"
TransformTypeString TransformType = "string"
TransformTypeConvert TransformType = "convert"
)
// Transform is a unit of process whose input is transformed into an output with
// the supplied configuration.
type Transform struct {
// Type of the transform to be run.
// +kubebuilder:validation:Enum=map;match;math;string;convert
Type TransformType `json:"type"`
// Math is used to transform the input via mathematical operations such as
// multiplication.
// +optional
Math *MathTransform `json:"math,omitempty"`
// Map uses the input as a key in the given map and returns the value.
// +optional
Map *MapTransform `json:"map,omitempty"`
// Match is a more complex version of Map that matches a list of patterns.
// +optional
Match *MatchTransform `json:"match,omitempty"`
// String is used to transform the input into a string or a different kind
// of string. Note that the input does not necessarily need to be a string.
// +optional
String *StringTransform `json:"string,omitempty"`
// Convert is used to cast the input into the given output type.
// +optional
Convert *ConvertTransform `json:"convert,omitempty"`
}
// Validate this Transform is valid.
//
//nolint:gocyclo // This is a long but simple/same-y switch.
func (t *Transform) Validate() *field.Error {
switch t.Type {
case TransformTypeMath:
if t.Math == nil {
return field.Required(field.NewPath("math"), "given transform type math requires configuration")
}
return verrors.WrapFieldError(t.Math.Validate(), field.NewPath("math"))
case TransformTypeMap:
if t.Map == nil {
return field.Required(field.NewPath("map"), "given transform type map requires configuration")
}
return verrors.WrapFieldError(t.Map.Validate(), field.NewPath("map"))
case TransformTypeMatch:
if t.Match == nil {
return field.Required(field.NewPath("match"), "given transform type match requires configuration")
}
return verrors.WrapFieldError(t.Match.Validate(), field.NewPath("match"))
case TransformTypeString:
if t.String == nil {
return field.Required(field.NewPath("string"), "given transform type string requires configuration")
}
return verrors.WrapFieldError(t.String.Validate(), field.NewPath("string"))
case TransformTypeConvert:
if t.Convert == nil {
return field.Required(field.NewPath("convert"), "given transform type convert requires configuration")
}
if err := t.Convert.Validate(); err != nil {
return verrors.WrapFieldError(err, field.NewPath("convert"))
}
default:
// Should never happen
return field.Invalid(field.NewPath("type"), t.Type, "unknown transform type")
}
return nil
}
// GetFormat returns the format of the transform.
func (t *ConvertTransform) GetFormat() ConvertTransformFormat {
if t.Format != nil {
return *t.Format
}
return ConvertTransformFormatNone
}
// GetOutputType returns the output type of the transform.
// It returns an error if the transform type is unknown.
// It returns nil if the output type is not known.
func (t *Transform) GetOutputType() (*TransformIOType, error) {
var out TransformIOType
switch t.Type {
case TransformTypeMap, TransformTypeMatch:
return nil, nil
case TransformTypeMath:
out = TransformIOTypeFloat64
case TransformTypeString:
out = TransformIOTypeString
case TransformTypeConvert:
out = t.Convert.ToType
default:
return nil, errors.Errorf("unable to get output type, unknown transform type: %s", t.Type)
}
return &out, nil
}
// MathTransformType conducts mathematical operations.
type MathTransformType string
// Accepted MathTransformType.
const (
MathTransformTypeMultiply MathTransformType = "Multiply" // Default
MathTransformTypeClampMin MathTransformType = "ClampMin"
MathTransformTypeClampMax MathTransformType = "ClampMax"
)
// MathTransform conducts mathematical operations on the input with the given
// configuration in its properties.
type MathTransform struct {
// Type of the math transform to be run.
// +optional
// +kubebuilder:validation:Enum=Multiply;ClampMin;ClampMax
// +kubebuilder:default=Multiply
Type MathTransformType `json:"type,omitempty"`
// Multiply the value.
// +optional
Multiply *int64 `json:"multiply,omitempty"`
// ClampMin makes sure that the value is not smaller than the given value.
// +optional
ClampMin *int64 `json:"clampMin,omitempty"`
// ClampMax makes sure that the value is not bigger than the given value.
// +optional
ClampMax *int64 `json:"clampMax,omitempty"`
}
// GetType returns the type of the math transform, returning the default if not specified.
func (m *MathTransform) GetType() MathTransformType {
if m.Type == "" {
return MathTransformTypeMultiply
}
return m.Type
}
// Validate checks this MathTransform is valid.
func (m *MathTransform) Validate() *field.Error {
switch m.GetType() {
case MathTransformTypeMultiply:
if m.Multiply == nil {
return field.Required(field.NewPath("multiply"), "must specify a value if a multiply math transform is specified")
}
case MathTransformTypeClampMin:
if m.ClampMin == nil {
return field.Required(field.NewPath("clampMin"), "must specify a value if a clamp min math transform is specified")
}
case MathTransformTypeClampMax:
if m.ClampMax == nil {
return field.Required(field.NewPath("clampMax"), "must specify a value if a clamp max math transform is specified")
}
default:
return field.Invalid(field.NewPath("type"), m.Type, "unknown math transform type")
}
return nil
}
// MapTransform returns a value for the input from the given map.
type MapTransform struct {
// Pairs is the map that will be used for transform.
// +optional
Pairs map[string]extv1.JSON `json:",inline"`
}
// Validate checks this MapTransform is valid.
func (m *MapTransform) Validate() *field.Error {
if len(m.Pairs) == 0 {
return field.Required(field.NewPath("pairs"), "at least one pair must be specified if a map transform is specified")
}
return nil
}
// NOTE(negz): The Kubernetes JSON decoder doesn't seem to like inlining a map
// into a struct - doing so results in a seemingly successful unmarshal of the
// data, but an empty map. We must keep the ,inline tag nevertheless in order to
// trick the CRD generator into thinking MapTransform is an arbitrary map (i.e.
// generating a validation schema with string additionalProperties), but the
// actual marshalling is handled by the marshal methods below.
// UnmarshalJSON into this MapTransform.
func (m *MapTransform) UnmarshalJSON(b []byte) error {
return json.Unmarshal(b, &m.Pairs)
}
// MarshalJSON from this MapTransform.
func (m *MapTransform) MarshalJSON() ([]byte, error) {
return json.Marshal(m.Pairs)
}
// MatchFallbackTo defines how a match operation will fallback.
type MatchFallbackTo string
// Valid MatchFallbackTo.
const (
MatchFallbackToTypeValue MatchFallbackTo = "Value"
MatchFallbackToTypeInput MatchFallbackTo = "Input"
)
// MatchTransform is a more complex version of a map transform that matches a
// list of patterns.
type MatchTransform struct {
// The patterns that should be tested against the input string.
// Patterns are tested in order. The value of the first match is used as
// result of this transform.
Patterns []MatchTransformPattern `json:"patterns,omitempty"`
// The fallback value that should be returned by the transform if now pattern
// matches.
FallbackValue extv1.JSON `json:"fallbackValue,omitempty"`
// Determines to what value the transform should fallback if no pattern matches.
// +optional
// +kubebuilder:validation:Enum=Value;Input
// +kubebuilder:default=Value
FallbackTo MatchFallbackTo `json:"fallbackTo,omitempty"`
}
// Validate checks this MatchTransform is valid.
func (m *MatchTransform) Validate() *field.Error {
if len(m.Patterns) == 0 {
return field.Required(field.NewPath("patterns"), "at least one pattern must be specified if a match transform is specified")
}
for i, p := range m.Patterns {
if err := p.Validate(); err != nil {
return verrors.WrapFieldError(err, field.NewPath("patterns").Index(i))
}
}
return nil
}
// MatchTransformPatternType defines the type of a MatchTransformPattern.
type MatchTransformPatternType string
// Valid MatchTransformPatternTypes.
const (
MatchTransformPatternTypeLiteral MatchTransformPatternType = "literal"
MatchTransformPatternTypeRegexp MatchTransformPatternType = "regexp"
)
// MatchTransformPattern is a transform that returns the value that matches a
// pattern.
type MatchTransformPattern struct {
// Type specifies how the pattern matches the input.
//
// * `literal` - the pattern value has to exactly match (case sensitive) the
// input string. This is the default.
//
// * `regexp` - the pattern treated as a regular expression against
// which the input string is tested. Crossplane will throw an error if the
// key is not a valid regexp.
//
// +kubebuilder:validation:Enum=literal;regexp
// +kubebuilder:default=literal
Type MatchTransformPatternType `json:"type"`
// Literal exactly matches the input string (case sensitive).
// Is required if `type` is `literal`.
Literal *string `json:"literal,omitempty"`
// Regexp to match against the input string.
// Is required if `type` is `regexp`.
Regexp *string `json:"regexp,omitempty"`
// The value that is used as result of the transform if the pattern matches.
Result extv1.JSON `json:"result"`
}
// Validate checks this MatchTransformPattern is valid.
func (m *MatchTransformPattern) Validate() *field.Error {
switch m.Type {
case MatchTransformPatternTypeLiteral, "":
if m.Literal == nil {
return field.Required(field.NewPath("literal"), "literal pattern type requires a literal")
}
case MatchTransformPatternTypeRegexp:
if m.Regexp == nil {
return field.Required(field.NewPath("regexp"), "regexp pattern type requires a regexp")
}
if _, err := regexp.Compile(*m.Regexp); err != nil {
return field.Invalid(field.NewPath("regexp"), *m.Regexp, "invalid regexp")
}
default:
return field.Invalid(field.NewPath("type"), m.Type, "unknown pattern type")
}
return nil
}
// StringTransformType transforms a string.
type StringTransformType string
// Accepted StringTransformTypes.
const (
StringTransformTypeFormat StringTransformType = "Format" // Default
StringTransformTypeConvert StringTransformType = "Convert"
StringTransformTypeTrimPrefix StringTransformType = "TrimPrefix"
StringTransformTypeTrimSuffix StringTransformType = "TrimSuffix"
StringTransformTypeRegexp StringTransformType = "Regexp"
)
// StringConversionType converts a string.
type StringConversionType string
// Accepted StringConversionTypes.
const (
StringConversionTypeToUpper StringConversionType = "ToUpper"
StringConversionTypeToLower StringConversionType = "ToLower"
StringConversionTypeToJSON StringConversionType = "ToJson"
StringConversionTypeToBase64 StringConversionType = "ToBase64"
StringConversionTypeFromBase64 StringConversionType = "FromBase64"
StringConversionTypeToSHA1 StringConversionType = "ToSha1"
StringConversionTypeToSHA256 StringConversionType = "ToSha256"
StringConversionTypeToSHA512 StringConversionType = "ToSha512"
StringConversionTypeToAdler32 StringConversionType = "ToAdler32"
)
// A StringTransform returns a string given the supplied input.
type StringTransform struct {
// Type of the string transform to be run.
// +optional
// +kubebuilder:validation:Enum=Format;Convert;TrimPrefix;TrimSuffix;Regexp
// +kubebuilder:default=Format
Type StringTransformType `json:"type,omitempty"`
// Format the input using a Go format string. See
// https://golang.org/pkg/fmt/ for details.
// +optional
Format *string `json:"fmt,omitempty"`
// Optional conversion method to be specified.
// `ToUpper` and `ToLower` change the letter case of the input string.
// `ToBase64` and `FromBase64` perform a base64 conversion based on the input string.
// `ToJson` converts any input value into its raw JSON representation.
// `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input
// converted to JSON.
// +optional
// +kubebuilder:validation:Enum=ToUpper;ToLower;ToBase64;FromBase64;ToJson;ToSha1;ToSha256;ToSha512
Convert *StringConversionType `json:"convert,omitempty"`
// Trim the prefix or suffix from the input
// +optional
Trim *string `json:"trim,omitempty"`
// Extract a match from the input using a regular expression.
// +optional
Regexp *StringTransformRegexp `json:"regexp,omitempty"`
}
// Validate checks this StringTransform is valid.
//
//nolint:gocyclo // just a switch
func (s *StringTransform) Validate() *field.Error {
switch s.Type {
case StringTransformTypeFormat, "":
if s.Format == nil {
return field.Required(field.NewPath("fmt"), "format transform requires a format")
}
case StringTransformTypeConvert:
if s.Convert == nil {
return field.Required(field.NewPath("convert"), "convert transform requires a conversion type")
}
case StringTransformTypeTrimPrefix, StringTransformTypeTrimSuffix:
if s.Trim == nil {
return field.Required(field.NewPath("trim"), "trim transform requires a trim value")
}
case StringTransformTypeRegexp:
if s.Regexp == nil {
return field.Required(field.NewPath("regexp"), "regexp transform requires a regexp")
}
if s.Regexp.Match == "" {
return field.Required(field.NewPath("regexp", "match"), "regexp transform requires a match")
}
if _, err := regexp.Compile(s.Regexp.Match); err != nil {
return field.Invalid(field.NewPath("regexp", "match"), s.Regexp.Match, "invalid regexp")
}
default:
return field.Invalid(field.NewPath("type"), s.Type, "unknown string transform type")
}
return nil
}
// A StringTransformRegexp extracts a match from the input using a regular
// expression.
type StringTransformRegexp struct {
// Match string. May optionally include submatches, aka capture groups.
// See https://pkg.go.dev/regexp/ for details.
Match string `json:"match"`
// Group number to match. 0 (the default) matches the entire expression.
// +optional
Group *int `json:"group,omitempty"`
}
// TransformIOType defines the type of a ConvertTransform.
type TransformIOType string
// The list of supported Transform input and output types.
const (
TransformIOTypeString TransformIOType = "string"
TransformIOTypeBool TransformIOType = "bool"
TransformIOTypeInt TransformIOType = "int"
TransformIOTypeInt64 TransformIOType = "int64"
TransformIOTypeFloat64 TransformIOType = "float64"
TransformIOTypeObject TransformIOType = "object"
TransformIOTypeArray TransformIOType = "array"
)
// IsValid checks if the given TransformIOType is valid.
func (c TransformIOType) IsValid() bool |
// ConvertTransformFormat defines the expected format of an input value of a
// conversion transform.
type ConvertTransformFormat string
// Possible ConvertTransformFormat values.
const (
ConvertTransformFormatNone ConvertTransformFormat = "none"
ConvertTransformFormatQuantity ConvertTransformFormat = "quantity"
ConvertTransformFormatJSON ConvertTransformFormat = "json"
)
// IsValid returns true if the format is valid.
func (c ConvertTransformFormat) IsValid() bool {
switch c {
case ConvertTransformFormatNone, ConvertTransformFormatQuantity, ConvertTransformFormatJSON:
return true
}
return false
}
// A ConvertTransform converts the input into a new object whose type is supplied.
type ConvertTransform struct {
// ToType is the type of the output of this transform.
// +kubebuilder:validation:Enum=string;int;int64;bool;float64;object;list
ToType TransformIOType `json:"toType"`
// The expected input format.
//
// * `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).
// Only used during `string -> float64` conversions.
// * `json` - parses the input as a JSON string.
// Only used during `string -> object` or `string -> list` conversions.
//
// If this property is null, the default conversion is applied.
//
// +kubebuilder:validation:Enum=none;quantity;json
// +kubebuilder:validation:Default=none
Format *ConvertTransformFormat `json:"format,omitempty"`
}
// Validate returns an error if the ConvertTransform is invalid.
func (t ConvertTransform) Validate() *field.Error {
if !t.GetFormat().IsValid() {
return field.Invalid(field.NewPath("format"), t.Format, "invalid format")
}
if !t.ToType.IsValid() {
return field.Invalid(field.NewPath("toType"), t.ToType, "invalid type")
}
return nil
}
| {
switch c {
case TransformIOTypeString, TransformIOTypeBool, TransformIOTypeInt, TransformIOTypeInt64, TransformIOTypeFloat64, TransformIOTypeObject, TransformIOTypeArray:
return true
}
return false
} | identifier_body |
zz_generated.composition_transforms.go | /*
Copyright 2020 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Generated from apiextensions/v1/composition_transforms.go by ../hack/duplicate_api_type.sh. DO NOT EDIT.
package v1beta1
import (
"encoding/json"
"regexp"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"github.com/crossplane/crossplane-runtime/pkg/errors"
verrors "github.com/crossplane/crossplane/internal/validation/errors"
)
// TransformType is type of the transform function to be chosen.
type TransformType string
// Accepted TransformTypes.
const (
ErrFmtConvertFormatPairNotSupported = "conversion from %s to %s is not supported with format %s"
TransformTypeMap TransformType = "map"
TransformTypeMatch TransformType = "match"
TransformTypeMath TransformType = "math"
TransformTypeString TransformType = "string"
TransformTypeConvert TransformType = "convert"
)
// Transform is a unit of process whose input is transformed into an output with
// the supplied configuration.
type Transform struct {
// Type of the transform to be run.
// +kubebuilder:validation:Enum=map;match;math;string;convert
Type TransformType `json:"type"`
// Math is used to transform the input via mathematical operations such as
// multiplication.
// +optional
Math *MathTransform `json:"math,omitempty"`
// Map uses the input as a key in the given map and returns the value.
// +optional
Map *MapTransform `json:"map,omitempty"`
// Match is a more complex version of Map that matches a list of patterns.
// +optional
Match *MatchTransform `json:"match,omitempty"`
// String is used to transform the input into a string or a different kind
// of string. Note that the input does not necessarily need to be a string.
// +optional
String *StringTransform `json:"string,omitempty"`
// Convert is used to cast the input into the given output type.
// +optional
Convert *ConvertTransform `json:"convert,omitempty"`
}
// Validate this Transform is valid.
//
//nolint:gocyclo // This is a long but simple/same-y switch.
func (t *Transform) Validate() *field.Error {
switch t.Type {
case TransformTypeMath:
if t.Math == nil {
return field.Required(field.NewPath("math"), "given transform type math requires configuration")
}
return verrors.WrapFieldError(t.Math.Validate(), field.NewPath("math"))
case TransformTypeMap:
if t.Map == nil {
return field.Required(field.NewPath("map"), "given transform type map requires configuration")
}
return verrors.WrapFieldError(t.Map.Validate(), field.NewPath("map"))
case TransformTypeMatch:
if t.Match == nil {
return field.Required(field.NewPath("match"), "given transform type match requires configuration")
}
return verrors.WrapFieldError(t.Match.Validate(), field.NewPath("match"))
case TransformTypeString:
if t.String == nil {
return field.Required(field.NewPath("string"), "given transform type string requires configuration")
}
return verrors.WrapFieldError(t.String.Validate(), field.NewPath("string"))
case TransformTypeConvert:
if t.Convert == nil {
return field.Required(field.NewPath("convert"), "given transform type convert requires configuration")
}
if err := t.Convert.Validate(); err != nil {
return verrors.WrapFieldError(err, field.NewPath("convert"))
}
default:
// Should never happen
return field.Invalid(field.NewPath("type"), t.Type, "unknown transform type")
}
return nil
}
// GetFormat returns the format of the transform.
func (t *ConvertTransform) GetFormat() ConvertTransformFormat {
if t.Format != nil {
return *t.Format
}
return ConvertTransformFormatNone
}
// GetOutputType returns the output type of the transform.
// It returns an error if the transform type is unknown.
// It returns nil if the output type is not known.
func (t *Transform) GetOutputType() (*TransformIOType, error) {
var out TransformIOType
switch t.Type {
case TransformTypeMap, TransformTypeMatch:
return nil, nil
case TransformTypeMath:
out = TransformIOTypeFloat64
case TransformTypeString:
out = TransformIOTypeString
case TransformTypeConvert:
out = t.Convert.ToType
default:
return nil, errors.Errorf("unable to get output type, unknown transform type: %s", t.Type)
}
return &out, nil
}
// MathTransformType conducts mathematical operations.
type MathTransformType string
// Accepted MathTransformType.
const (
MathTransformTypeMultiply MathTransformType = "Multiply" // Default
MathTransformTypeClampMin MathTransformType = "ClampMin"
MathTransformTypeClampMax MathTransformType = "ClampMax"
)
// MathTransform conducts mathematical operations on the input with the given
// configuration in its properties.
type MathTransform struct {
// Type of the math transform to be run.
// +optional
// +kubebuilder:validation:Enum=Multiply;ClampMin;ClampMax
// +kubebuilder:default=Multiply
Type MathTransformType `json:"type,omitempty"`
// Multiply the value.
// +optional
Multiply *int64 `json:"multiply,omitempty"`
// ClampMin makes sure that the value is not smaller than the given value.
// +optional
ClampMin *int64 `json:"clampMin,omitempty"`
// ClampMax makes sure that the value is not bigger than the given value.
// +optional
ClampMax *int64 `json:"clampMax,omitempty"`
}
// GetType returns the type of the math transform, returning the default if not specified.
func (m *MathTransform) | () MathTransformType {
if m.Type == "" {
return MathTransformTypeMultiply
}
return m.Type
}
// Validate checks this MathTransform is valid.
func (m *MathTransform) Validate() *field.Error {
switch m.GetType() {
case MathTransformTypeMultiply:
if m.Multiply == nil {
return field.Required(field.NewPath("multiply"), "must specify a value if a multiply math transform is specified")
}
case MathTransformTypeClampMin:
if m.ClampMin == nil {
return field.Required(field.NewPath("clampMin"), "must specify a value if a clamp min math transform is specified")
}
case MathTransformTypeClampMax:
if m.ClampMax == nil {
return field.Required(field.NewPath("clampMax"), "must specify a value if a clamp max math transform is specified")
}
default:
return field.Invalid(field.NewPath("type"), m.Type, "unknown math transform type")
}
return nil
}
// MapTransform returns a value for the input from the given map.
type MapTransform struct {
// Pairs is the map that will be used for transform.
// +optional
Pairs map[string]extv1.JSON `json:",inline"`
}
// Validate checks this MapTransform is valid.
func (m *MapTransform) Validate() *field.Error {
if len(m.Pairs) == 0 {
return field.Required(field.NewPath("pairs"), "at least one pair must be specified if a map transform is specified")
}
return nil
}
// NOTE(negz): The Kubernetes JSON decoder doesn't seem to like inlining a map
// into a struct - doing so results in a seemingly successful unmarshal of the
// data, but an empty map. We must keep the ,inline tag nevertheless in order to
// trick the CRD generator into thinking MapTransform is an arbitrary map (i.e.
// generating a validation schema with string additionalProperties), but the
// actual marshalling is handled by the marshal methods below.
// UnmarshalJSON into this MapTransform.
func (m *MapTransform) UnmarshalJSON(b []byte) error {
return json.Unmarshal(b, &m.Pairs)
}
// MarshalJSON from this MapTransform.
func (m *MapTransform) MarshalJSON() ([]byte, error) {
return json.Marshal(m.Pairs)
}
// MatchFallbackTo defines how a match operation will fallback.
type MatchFallbackTo string
// Valid MatchFallbackTo.
const (
MatchFallbackToTypeValue MatchFallbackTo = "Value"
MatchFallbackToTypeInput MatchFallbackTo = "Input"
)
// MatchTransform is a more complex version of a map transform that matches a
// list of patterns.
type MatchTransform struct {
// The patterns that should be tested against the input string.
// Patterns are tested in order. The value of the first match is used as
// result of this transform.
Patterns []MatchTransformPattern `json:"patterns,omitempty"`
// The fallback value that should be returned by the transform if now pattern
// matches.
FallbackValue extv1.JSON `json:"fallbackValue,omitempty"`
// Determines to what value the transform should fallback if no pattern matches.
// +optional
// +kubebuilder:validation:Enum=Value;Input
// +kubebuilder:default=Value
FallbackTo MatchFallbackTo `json:"fallbackTo,omitempty"`
}
// Validate checks this MatchTransform is valid.
func (m *MatchTransform) Validate() *field.Error {
if len(m.Patterns) == 0 {
return field.Required(field.NewPath("patterns"), "at least one pattern must be specified if a match transform is specified")
}
for i, p := range m.Patterns {
if err := p.Validate(); err != nil {
return verrors.WrapFieldError(err, field.NewPath("patterns").Index(i))
}
}
return nil
}
// MatchTransformPatternType defines the type of a MatchTransformPattern.
type MatchTransformPatternType string
// Valid MatchTransformPatternTypes.
const (
MatchTransformPatternTypeLiteral MatchTransformPatternType = "literal"
MatchTransformPatternTypeRegexp MatchTransformPatternType = "regexp"
)
// MatchTransformPattern is a transform that returns the value that matches a
// pattern.
type MatchTransformPattern struct {
// Type specifies how the pattern matches the input.
//
// * `literal` - the pattern value has to exactly match (case sensitive) the
// input string. This is the default.
//
// * `regexp` - the pattern treated as a regular expression against
// which the input string is tested. Crossplane will throw an error if the
// key is not a valid regexp.
//
// +kubebuilder:validation:Enum=literal;regexp
// +kubebuilder:default=literal
Type MatchTransformPatternType `json:"type"`
// Literal exactly matches the input string (case sensitive).
// Is required if `type` is `literal`.
Literal *string `json:"literal,omitempty"`
// Regexp to match against the input string.
// Is required if `type` is `regexp`.
Regexp *string `json:"regexp,omitempty"`
// The value that is used as result of the transform if the pattern matches.
Result extv1.JSON `json:"result"`
}
// Validate checks this MatchTransformPattern is valid.
func (m *MatchTransformPattern) Validate() *field.Error {
switch m.Type {
case MatchTransformPatternTypeLiteral, "":
if m.Literal == nil {
return field.Required(field.NewPath("literal"), "literal pattern type requires a literal")
}
case MatchTransformPatternTypeRegexp:
if m.Regexp == nil {
return field.Required(field.NewPath("regexp"), "regexp pattern type requires a regexp")
}
if _, err := regexp.Compile(*m.Regexp); err != nil {
return field.Invalid(field.NewPath("regexp"), *m.Regexp, "invalid regexp")
}
default:
return field.Invalid(field.NewPath("type"), m.Type, "unknown pattern type")
}
return nil
}
// StringTransformType transforms a string.
type StringTransformType string
// Accepted StringTransformTypes.
const (
StringTransformTypeFormat StringTransformType = "Format" // Default
StringTransformTypeConvert StringTransformType = "Convert"
StringTransformTypeTrimPrefix StringTransformType = "TrimPrefix"
StringTransformTypeTrimSuffix StringTransformType = "TrimSuffix"
StringTransformTypeRegexp StringTransformType = "Regexp"
)
// StringConversionType converts a string.
type StringConversionType string
// Accepted StringConversionTypes.
const (
StringConversionTypeToUpper StringConversionType = "ToUpper"
StringConversionTypeToLower StringConversionType = "ToLower"
StringConversionTypeToJSON StringConversionType = "ToJson"
StringConversionTypeToBase64 StringConversionType = "ToBase64"
StringConversionTypeFromBase64 StringConversionType = "FromBase64"
StringConversionTypeToSHA1 StringConversionType = "ToSha1"
StringConversionTypeToSHA256 StringConversionType = "ToSha256"
StringConversionTypeToSHA512 StringConversionType = "ToSha512"
StringConversionTypeToAdler32 StringConversionType = "ToAdler32"
)
// A StringTransform returns a string given the supplied input.
type StringTransform struct {
// Type of the string transform to be run.
// +optional
// +kubebuilder:validation:Enum=Format;Convert;TrimPrefix;TrimSuffix;Regexp
// +kubebuilder:default=Format
Type StringTransformType `json:"type,omitempty"`
// Format the input using a Go format string. See
// https://golang.org/pkg/fmt/ for details.
// +optional
Format *string `json:"fmt,omitempty"`
// Optional conversion method to be specified.
// `ToUpper` and `ToLower` change the letter case of the input string.
// `ToBase64` and `FromBase64` perform a base64 conversion based on the input string.
// `ToJson` converts any input value into its raw JSON representation.
// `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input
// converted to JSON.
// +optional
// +kubebuilder:validation:Enum=ToUpper;ToLower;ToBase64;FromBase64;ToJson;ToSha1;ToSha256;ToSha512
Convert *StringConversionType `json:"convert,omitempty"`
// Trim the prefix or suffix from the input
// +optional
Trim *string `json:"trim,omitempty"`
// Extract a match from the input using a regular expression.
// +optional
Regexp *StringTransformRegexp `json:"regexp,omitempty"`
}
// Validate checks this StringTransform is valid.
//
//nolint:gocyclo // just a switch
func (s *StringTransform) Validate() *field.Error {
switch s.Type {
case StringTransformTypeFormat, "":
if s.Format == nil {
return field.Required(field.NewPath("fmt"), "format transform requires a format")
}
case StringTransformTypeConvert:
if s.Convert == nil {
return field.Required(field.NewPath("convert"), "convert transform requires a conversion type")
}
case StringTransformTypeTrimPrefix, StringTransformTypeTrimSuffix:
if s.Trim == nil {
return field.Required(field.NewPath("trim"), "trim transform requires a trim value")
}
case StringTransformTypeRegexp:
if s.Regexp == nil {
return field.Required(field.NewPath("regexp"), "regexp transform requires a regexp")
}
if s.Regexp.Match == "" {
return field.Required(field.NewPath("regexp", "match"), "regexp transform requires a match")
}
if _, err := regexp.Compile(s.Regexp.Match); err != nil {
return field.Invalid(field.NewPath("regexp", "match"), s.Regexp.Match, "invalid regexp")
}
default:
return field.Invalid(field.NewPath("type"), s.Type, "unknown string transform type")
}
return nil
}
// A StringTransformRegexp extracts a match from the input using a regular
// expression.
type StringTransformRegexp struct {
// Match string. May optionally include submatches, aka capture groups.
// See https://pkg.go.dev/regexp/ for details.
Match string `json:"match"`
// Group number to match. 0 (the default) matches the entire expression.
// +optional
Group *int `json:"group,omitempty"`
}
// TransformIOType defines the type of a ConvertTransform.
type TransformIOType string
// The list of supported Transform input and output types.
const (
TransformIOTypeString TransformIOType = "string"
TransformIOTypeBool TransformIOType = "bool"
TransformIOTypeInt TransformIOType = "int"
TransformIOTypeInt64 TransformIOType = "int64"
TransformIOTypeFloat64 TransformIOType = "float64"
TransformIOTypeObject TransformIOType = "object"
TransformIOTypeArray TransformIOType = "array"
)
// IsValid checks if the given TransformIOType is valid.
func (c TransformIOType) IsValid() bool {
switch c {
case TransformIOTypeString, TransformIOTypeBool, TransformIOTypeInt, TransformIOTypeInt64, TransformIOTypeFloat64, TransformIOTypeObject, TransformIOTypeArray:
return true
}
return false
}
// ConvertTransformFormat defines the expected format of an input value of a
// conversion transform.
type ConvertTransformFormat string
// Possible ConvertTransformFormat values.
const (
ConvertTransformFormatNone ConvertTransformFormat = "none"
ConvertTransformFormatQuantity ConvertTransformFormat = "quantity"
ConvertTransformFormatJSON ConvertTransformFormat = "json"
)
// IsValid returns true if the format is valid.
func (c ConvertTransformFormat) IsValid() bool {
switch c {
case ConvertTransformFormatNone, ConvertTransformFormatQuantity, ConvertTransformFormatJSON:
return true
}
return false
}
// A ConvertTransform converts the input into a new object whose type is supplied.
type ConvertTransform struct {
// ToType is the type of the output of this transform.
// +kubebuilder:validation:Enum=string;int;int64;bool;float64;object;list
ToType TransformIOType `json:"toType"`
// The expected input format.
//
// * `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).
// Only used during `string -> float64` conversions.
// * `json` - parses the input as a JSON string.
// Only used during `string -> object` or `string -> list` conversions.
//
// If this property is null, the default conversion is applied.
//
// +kubebuilder:validation:Enum=none;quantity;json
// +kubebuilder:validation:Default=none
Format *ConvertTransformFormat `json:"format,omitempty"`
}
// Validate returns an error if the ConvertTransform is invalid.
func (t ConvertTransform) Validate() *field.Error {
if !t.GetFormat().IsValid() {
return field.Invalid(field.NewPath("format"), t.Format, "invalid format")
}
if !t.ToType.IsValid() {
return field.Invalid(field.NewPath("toType"), t.ToType, "invalid type")
}
return nil
}
| GetType | identifier_name |
zz_generated.composition_transforms.go | /*
Copyright 2020 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Generated from apiextensions/v1/composition_transforms.go by ../hack/duplicate_api_type.sh. DO NOT EDIT.
package v1beta1
import (
"encoding/json"
"regexp"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"github.com/crossplane/crossplane-runtime/pkg/errors"
verrors "github.com/crossplane/crossplane/internal/validation/errors"
)
// TransformType is type of the transform function to be chosen.
type TransformType string
// Accepted TransformTypes.
const (
ErrFmtConvertFormatPairNotSupported = "conversion from %s to %s is not supported with format %s"
TransformTypeMap TransformType = "map"
TransformTypeMatch TransformType = "match"
TransformTypeMath TransformType = "math"
TransformTypeString TransformType = "string"
TransformTypeConvert TransformType = "convert"
)
// Transform is a unit of process whose input is transformed into an output with
// the supplied configuration.
type Transform struct {
// Type of the transform to be run.
// +kubebuilder:validation:Enum=map;match;math;string;convert
Type TransformType `json:"type"`
// Math is used to transform the input via mathematical operations such as
// multiplication.
// +optional
Math *MathTransform `json:"math,omitempty"`
// Map uses the input as a key in the given map and returns the value.
// +optional
Map *MapTransform `json:"map,omitempty"`
// Match is a more complex version of Map that matches a list of patterns.
// +optional
Match *MatchTransform `json:"match,omitempty"`
// String is used to transform the input into a string or a different kind
// of string. Note that the input does not necessarily need to be a string.
// +optional
String *StringTransform `json:"string,omitempty"`
// Convert is used to cast the input into the given output type.
// +optional
Convert *ConvertTransform `json:"convert,omitempty"`
}
// Validate this Transform is valid.
//
//nolint:gocyclo // This is a long but simple/same-y switch.
func (t *Transform) Validate() *field.Error {
switch t.Type {
case TransformTypeMath:
if t.Math == nil {
return field.Required(field.NewPath("math"), "given transform type math requires configuration")
}
return verrors.WrapFieldError(t.Math.Validate(), field.NewPath("math"))
case TransformTypeMap:
if t.Map == nil {
return field.Required(field.NewPath("map"), "given transform type map requires configuration")
}
return verrors.WrapFieldError(t.Map.Validate(), field.NewPath("map"))
case TransformTypeMatch:
if t.Match == nil {
return field.Required(field.NewPath("match"), "given transform type match requires configuration")
}
return verrors.WrapFieldError(t.Match.Validate(), field.NewPath("match"))
case TransformTypeString:
if t.String == nil {
return field.Required(field.NewPath("string"), "given transform type string requires configuration")
}
return verrors.WrapFieldError(t.String.Validate(), field.NewPath("string"))
case TransformTypeConvert:
if t.Convert == nil {
return field.Required(field.NewPath("convert"), "given transform type convert requires configuration")
}
if err := t.Convert.Validate(); err != nil {
return verrors.WrapFieldError(err, field.NewPath("convert"))
}
default:
// Should never happen
return field.Invalid(field.NewPath("type"), t.Type, "unknown transform type")
}
return nil
}
// GetFormat returns the format of the transform.
func (t *ConvertTransform) GetFormat() ConvertTransformFormat {
if t.Format != nil {
return *t.Format
}
return ConvertTransformFormatNone
}
// GetOutputType returns the output type of the transform.
// It returns an error if the transform type is unknown.
// It returns nil if the output type is not known.
func (t *Transform) GetOutputType() (*TransformIOType, error) {
var out TransformIOType
switch t.Type {
case TransformTypeMap, TransformTypeMatch:
return nil, nil
case TransformTypeMath:
out = TransformIOTypeFloat64
case TransformTypeString:
out = TransformIOTypeString
case TransformTypeConvert:
out = t.Convert.ToType
default:
return nil, errors.Errorf("unable to get output type, unknown transform type: %s", t.Type)
}
return &out, nil
}
// MathTransformType conducts mathematical operations.
type MathTransformType string
// Accepted MathTransformType.
const (
MathTransformTypeMultiply MathTransformType = "Multiply" // Default
MathTransformTypeClampMin MathTransformType = "ClampMin"
MathTransformTypeClampMax MathTransformType = "ClampMax"
)
// MathTransform conducts mathematical operations on the input with the given
// configuration in its properties.
type MathTransform struct {
// Type of the math transform to be run.
// +optional
// +kubebuilder:validation:Enum=Multiply;ClampMin;ClampMax
// +kubebuilder:default=Multiply
Type MathTransformType `json:"type,omitempty"`
// Multiply the value.
// +optional
Multiply *int64 `json:"multiply,omitempty"`
// ClampMin makes sure that the value is not smaller than the given value.
// +optional
ClampMin *int64 `json:"clampMin,omitempty"`
// ClampMax makes sure that the value is not bigger than the given value.
// +optional
ClampMax *int64 `json:"clampMax,omitempty"`
}
// GetType returns the type of the math transform, returning the default if not specified.
func (m *MathTransform) GetType() MathTransformType {
if m.Type == "" |
return m.Type
}
// Validate checks this MathTransform is valid.
func (m *MathTransform) Validate() *field.Error {
switch m.GetType() {
case MathTransformTypeMultiply:
if m.Multiply == nil {
return field.Required(field.NewPath("multiply"), "must specify a value if a multiply math transform is specified")
}
case MathTransformTypeClampMin:
if m.ClampMin == nil {
return field.Required(field.NewPath("clampMin"), "must specify a value if a clamp min math transform is specified")
}
case MathTransformTypeClampMax:
if m.ClampMax == nil {
return field.Required(field.NewPath("clampMax"), "must specify a value if a clamp max math transform is specified")
}
default:
return field.Invalid(field.NewPath("type"), m.Type, "unknown math transform type")
}
return nil
}
// MapTransform returns a value for the input from the given map.
type MapTransform struct {
// Pairs is the map that will be used for transform.
// +optional
Pairs map[string]extv1.JSON `json:",inline"`
}
// Validate checks this MapTransform is valid.
func (m *MapTransform) Validate() *field.Error {
if len(m.Pairs) == 0 {
return field.Required(field.NewPath("pairs"), "at least one pair must be specified if a map transform is specified")
}
return nil
}
// NOTE(negz): The Kubernetes JSON decoder doesn't seem to like inlining a map
// into a struct - doing so results in a seemingly successful unmarshal of the
// data, but an empty map. We must keep the ,inline tag nevertheless in order to
// trick the CRD generator into thinking MapTransform is an arbitrary map (i.e.
// generating a validation schema with string additionalProperties), but the
// actual marshalling is handled by the marshal methods below.
// UnmarshalJSON into this MapTransform.
func (m *MapTransform) UnmarshalJSON(b []byte) error {
return json.Unmarshal(b, &m.Pairs)
}
// MarshalJSON from this MapTransform.
func (m *MapTransform) MarshalJSON() ([]byte, error) {
return json.Marshal(m.Pairs)
}
// MatchFallbackTo defines how a match operation will fallback.
type MatchFallbackTo string
// Valid MatchFallbackTo.
const (
MatchFallbackToTypeValue MatchFallbackTo = "Value"
MatchFallbackToTypeInput MatchFallbackTo = "Input"
)
// MatchTransform is a more complex version of a map transform that matches a
// list of patterns.
type MatchTransform struct {
// The patterns that should be tested against the input string.
// Patterns are tested in order. The value of the first match is used as
// result of this transform.
Patterns []MatchTransformPattern `json:"patterns,omitempty"`
// The fallback value that should be returned by the transform if now pattern
// matches.
FallbackValue extv1.JSON `json:"fallbackValue,omitempty"`
// Determines to what value the transform should fallback if no pattern matches.
// +optional
// +kubebuilder:validation:Enum=Value;Input
// +kubebuilder:default=Value
FallbackTo MatchFallbackTo `json:"fallbackTo,omitempty"`
}
// Validate checks this MatchTransform is valid.
func (m *MatchTransform) Validate() *field.Error {
if len(m.Patterns) == 0 {
return field.Required(field.NewPath("patterns"), "at least one pattern must be specified if a match transform is specified")
}
for i, p := range m.Patterns {
if err := p.Validate(); err != nil {
return verrors.WrapFieldError(err, field.NewPath("patterns").Index(i))
}
}
return nil
}
// MatchTransformPatternType defines the type of a MatchTransformPattern.
type MatchTransformPatternType string
// Valid MatchTransformPatternTypes.
const (
MatchTransformPatternTypeLiteral MatchTransformPatternType = "literal"
MatchTransformPatternTypeRegexp MatchTransformPatternType = "regexp"
)
// MatchTransformPattern is a transform that returns the value that matches a
// pattern.
type MatchTransformPattern struct {
// Type specifies how the pattern matches the input.
//
// * `literal` - the pattern value has to exactly match (case sensitive) the
// input string. This is the default.
//
// * `regexp` - the pattern treated as a regular expression against
// which the input string is tested. Crossplane will throw an error if the
// key is not a valid regexp.
//
// +kubebuilder:validation:Enum=literal;regexp
// +kubebuilder:default=literal
Type MatchTransformPatternType `json:"type"`
// Literal exactly matches the input string (case sensitive).
// Is required if `type` is `literal`.
Literal *string `json:"literal,omitempty"`
// Regexp to match against the input string.
// Is required if `type` is `regexp`.
Regexp *string `json:"regexp,omitempty"`
// The value that is used as result of the transform if the pattern matches.
Result extv1.JSON `json:"result"`
}
// Validate checks this MatchTransformPattern is valid.
func (m *MatchTransformPattern) Validate() *field.Error {
switch m.Type {
case MatchTransformPatternTypeLiteral, "":
if m.Literal == nil {
return field.Required(field.NewPath("literal"), "literal pattern type requires a literal")
}
case MatchTransformPatternTypeRegexp:
if m.Regexp == nil {
return field.Required(field.NewPath("regexp"), "regexp pattern type requires a regexp")
}
if _, err := regexp.Compile(*m.Regexp); err != nil {
return field.Invalid(field.NewPath("regexp"), *m.Regexp, "invalid regexp")
}
default:
return field.Invalid(field.NewPath("type"), m.Type, "unknown pattern type")
}
return nil
}
// StringTransformType transforms a string.
type StringTransformType string
// Accepted StringTransformTypes.
const (
StringTransformTypeFormat StringTransformType = "Format" // Default
StringTransformTypeConvert StringTransformType = "Convert"
StringTransformTypeTrimPrefix StringTransformType = "TrimPrefix"
StringTransformTypeTrimSuffix StringTransformType = "TrimSuffix"
StringTransformTypeRegexp StringTransformType = "Regexp"
)
// StringConversionType converts a string.
type StringConversionType string
// Accepted StringConversionTypes.
const (
StringConversionTypeToUpper StringConversionType = "ToUpper"
StringConversionTypeToLower StringConversionType = "ToLower"
StringConversionTypeToJSON StringConversionType = "ToJson"
StringConversionTypeToBase64 StringConversionType = "ToBase64"
StringConversionTypeFromBase64 StringConversionType = "FromBase64"
StringConversionTypeToSHA1 StringConversionType = "ToSha1"
StringConversionTypeToSHA256 StringConversionType = "ToSha256"
StringConversionTypeToSHA512 StringConversionType = "ToSha512"
StringConversionTypeToAdler32 StringConversionType = "ToAdler32"
)
// A StringTransform returns a string given the supplied input.
type StringTransform struct {
// Type of the string transform to be run.
// +optional
// +kubebuilder:validation:Enum=Format;Convert;TrimPrefix;TrimSuffix;Regexp
// +kubebuilder:default=Format
Type StringTransformType `json:"type,omitempty"`
// Format the input using a Go format string. See
// https://golang.org/pkg/fmt/ for details.
// +optional
Format *string `json:"fmt,omitempty"`
// Optional conversion method to be specified.
// `ToUpper` and `ToLower` change the letter case of the input string.
// `ToBase64` and `FromBase64` perform a base64 conversion based on the input string.
// `ToJson` converts any input value into its raw JSON representation.
// `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input
// converted to JSON.
// +optional
// +kubebuilder:validation:Enum=ToUpper;ToLower;ToBase64;FromBase64;ToJson;ToSha1;ToSha256;ToSha512
Convert *StringConversionType `json:"convert,omitempty"`
// Trim the prefix or suffix from the input
// +optional
Trim *string `json:"trim,omitempty"`
// Extract a match from the input using a regular expression.
// +optional
Regexp *StringTransformRegexp `json:"regexp,omitempty"`
}
// Validate checks this StringTransform is valid.
//
//nolint:gocyclo // just a switch
func (s *StringTransform) Validate() *field.Error {
switch s.Type {
case StringTransformTypeFormat, "":
if s.Format == nil {
return field.Required(field.NewPath("fmt"), "format transform requires a format")
}
case StringTransformTypeConvert:
if s.Convert == nil {
return field.Required(field.NewPath("convert"), "convert transform requires a conversion type")
}
case StringTransformTypeTrimPrefix, StringTransformTypeTrimSuffix:
if s.Trim == nil {
return field.Required(field.NewPath("trim"), "trim transform requires a trim value")
}
case StringTransformTypeRegexp:
if s.Regexp == nil {
return field.Required(field.NewPath("regexp"), "regexp transform requires a regexp")
}
if s.Regexp.Match == "" {
return field.Required(field.NewPath("regexp", "match"), "regexp transform requires a match")
}
if _, err := regexp.Compile(s.Regexp.Match); err != nil {
return field.Invalid(field.NewPath("regexp", "match"), s.Regexp.Match, "invalid regexp")
}
default:
return field.Invalid(field.NewPath("type"), s.Type, "unknown string transform type")
}
return nil
}
// A StringTransformRegexp extracts a match from the input using a regular
// expression.
type StringTransformRegexp struct {
// Match string. May optionally include submatches, aka capture groups.
// See https://pkg.go.dev/regexp/ for details.
Match string `json:"match"`
// Group number to match. 0 (the default) matches the entire expression.
// +optional
Group *int `json:"group,omitempty"`
}
// TransformIOType defines the type of a ConvertTransform.
type TransformIOType string
// The list of supported Transform input and output types.
const (
TransformIOTypeString TransformIOType = "string"
TransformIOTypeBool TransformIOType = "bool"
TransformIOTypeInt TransformIOType = "int"
TransformIOTypeInt64 TransformIOType = "int64"
TransformIOTypeFloat64 TransformIOType = "float64"
TransformIOTypeObject TransformIOType = "object"
TransformIOTypeArray TransformIOType = "array"
)
// IsValid checks if the given TransformIOType is valid.
func (c TransformIOType) IsValid() bool {
switch c {
case TransformIOTypeString, TransformIOTypeBool, TransformIOTypeInt, TransformIOTypeInt64, TransformIOTypeFloat64, TransformIOTypeObject, TransformIOTypeArray:
return true
}
return false
}
// ConvertTransformFormat defines the expected format of an input value of a
// conversion transform.
type ConvertTransformFormat string
// Possible ConvertTransformFormat values.
const (
ConvertTransformFormatNone ConvertTransformFormat = "none"
ConvertTransformFormatQuantity ConvertTransformFormat = "quantity"
ConvertTransformFormatJSON ConvertTransformFormat = "json"
)
// IsValid returns true if the format is valid.
func (c ConvertTransformFormat) IsValid() bool {
switch c {
case ConvertTransformFormatNone, ConvertTransformFormatQuantity, ConvertTransformFormatJSON:
return true
}
return false
}
// A ConvertTransform converts the input into a new object whose type is supplied.
type ConvertTransform struct {
// ToType is the type of the output of this transform.
// +kubebuilder:validation:Enum=string;int;int64;bool;float64;object;list
ToType TransformIOType `json:"toType"`
// The expected input format.
//
// * `quantity` - parses the input as a K8s [`resource.Quantity`](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).
// Only used during `string -> float64` conversions.
// * `json` - parses the input as a JSON string.
// Only used during `string -> object` or `string -> list` conversions.
//
// If this property is null, the default conversion is applied.
//
// +kubebuilder:validation:Enum=none;quantity;json
// +kubebuilder:validation:Default=none
Format *ConvertTransformFormat `json:"format,omitempty"`
}
// Validate returns an error if the ConvertTransform is invalid.
func (t ConvertTransform) Validate() *field.Error {
if !t.GetFormat().IsValid() {
return field.Invalid(field.NewPath("format"), t.Format, "invalid format")
}
if !t.ToType.IsValid() {
return field.Invalid(field.NewPath("toType"), t.ToType, "invalid type")
}
return nil
}
| {
return MathTransformTypeMultiply
} | conditional_block |
input.py | #!/usr/bin/env python3
#
# input.py
"""
Input functions (prompt, choice etc.).
"""
#
# Copyright © 2020-2021 Dominic Davis-Foster <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# prompt and confirm based on https://github.com/pallets/click
# Copyright 2014 Pallets
# | Redistribution and use in source and binary forms, with or without modification,
# | are permitted provided that the following conditions are met:
# |
# | * Redistributions of source code must retain the above copyright notice,
# | this list of conditions and the following disclaimer.
# | * Redistributions in binary form must reproduce the above copyright notice,
# | this list of conditions and the following disclaimer in the documentation
# | and/or other materials provided with the distribution.
# | * Neither the name of the copyright holder nor the names of its contributors
# | may be used to endorse or promote products derived from this software without
# | specific prior written permission.
# |
# | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# |
#
# stderr_input based on raw_input from https://foss.heptapod.net/pypy/pypy
# PyPy Copyright holders 2003-2020
# MIT Licenced
#
# stdlib
import sys
from typing import IO, Any, Callable, List, Mapping, Optional, Union, overload
# 3rd party
import click
from click.termui import _build_prompt, hidden_prompt_func
from click.types import Path, convert_type
# this package
from consolekit._types import _ConvertibleType
from consolekit.utils import hidden_cursor, hide_cursor, show_cursor # noqa
__all__ = [
"prompt",
"confirm",
"stderr_input",
"choice",
]
if not bool(getattr(sys, "ps1", sys.flags.interactive)): # pragma: no cover
try:
# stdlib
import readline
readline.set_history_length(0)
readline.set_auto_history(False)
except (ImportError, AttributeError):
# Attribute error on PyPy, ImportError on Windows etc.
pass
def prompt(
text: str,
default: Optional[str] = None,
hide_input: bool = False,
confirmation_prompt: bool = False,
type: Optional[_ConvertibleType] = None, # noqa: A002 # pylint: disable=redefined-builtin
value_proc: Optional[Callable[[Optional[str]], Any]] = None,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
show_choices: bool = True,
):
"""
Prompts a user for input.
If the user aborts the input by sending an interrupt signal,
this function will catch it and raise a :exc:`click.Abort` exception.
:param text: The text to show for the prompt.
:param default: The default value to use if no input happens.
If this is not given it will prompt until it is aborted.
:param hide_input: If :py:obj:`True` then the input value will be hidden.
:param confirmation_prompt: Asks for confirmation for the value.
:param type: The type to check the value against.
:param value_proc: If this parameter is provided it must be a function that
is invoked instead of the type conversion to convert a value.
:param prompt_suffix: A suffix that should be added to the prompt.
:param show_default: Shows or hides the default value in the prompt.
:param err: If :py:obj:`True` the file defaults to ``stderr`` instead of
``stdout``, the same as with :func:`click.echo`.
:param show_choices: Show or hide choices if the passed type is a :class:`click.Choice`.
For example, if the choice is either ``day`` or ``week``,
``show_choices`` is :py:obj:`True` and ``text`` is ``'Group by'`` then the
prompt will be ``'Group by (day, week): '``.
"""
result = None # noqa
def prompt_func(text):
try:
return _prompt(text, err=err, hide_input=hide_input)
except (KeyboardInterrupt, EOFError):
if hide_input:
click.echo(None, err=err)
raise click.Abort()
if value_proc is None:
value_proc = convert_type(type, default)
prompt = _build_prompt(text, prompt_suffix, show_default, default, show_choices, type) # type: ignore
while True:
while True:
value = prompt_func(prompt)
if value:
break
elif default is not None:
if isinstance(value_proc, Path):
# validate Path default value (exists, dir_okay etc.)
value = default
break
return default
try:
result = value_proc(value)
except click.UsageError as e:
click.echo(f"Error: {e.message}", err=err) # noqa: B306
continue
if not confirmation_prompt:
return result
while True:
value2 = prompt_func("Repeat for confirmation: ")
if value2:
break
if value == value2:
return result
click.echo("Error: the two entered values do not match", err=err)
def confirm(
text: str,
default: bool = False,
abort: bool = False,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
):
"""
Prompts for confirmation (yes/no question).
If the user aborts the input by sending a interrupt signal this
function will catch it and raise a :exc:`click.Abort` exception.
:param text: The question to ask.
:param default: The default for the prompt.
:param abort: If :py:obj:`True` a negative answer aborts the exception by raising :exc:`click.Abort`.
:param prompt_suffix: A suffix that should be added to the prompt.
:param show_default: Shows or hides the default value in the prompt.
:param err: If :py:obj:`True` the file defaults to ``stderr`` instead of ``stdout``, the same as with echo.
"""
prompt = _build_prompt(text, prompt_suffix, show_default, "Y/n" if default else "y/N")
while True:
try:
value = _prompt(prompt, err=err, hide_input=False).lower().strip()
except (KeyboardInterrupt, EOFError):
raise click.Abort()
if value in ('y', "yes"):
rv = True
elif value in ('n', "no"):
rv = False
elif value == '':
rv = default
else:
click.echo("Error: invalid input", err=err)
continue
break
if abort and not rv:
raise click.Abort()
return rv
def s | prompt: str = '', file: IO = sys.stdout) -> str: # pragma: no cover
"""
Read a string from standard input, but prompt to standard error.
The trailing newline is stripped.
If the user hits EOF (Unix: :kbd:`Ctrl-D`, Windows: :kbd:`Ctrl-Z+Return`), raise :exc:`EOFError`.
On Unix, GNU readline is used if enabled.
The ``prompt`` string, if given, is printed to stderr without a trailing newline before reading.
"""
if file is sys.stdout:
return input(prompt)
try:
stdin = sys.stdin
except AttributeError:
raise RuntimeError("stderr_input: lost sys.stdin")
file.write(prompt)
try:
flush = file.flush
except AttributeError:
pass
else:
flush()
try:
file.softspace = 0 # type: ignore
except (AttributeError, TypeError):
pass
line = stdin.readline()
if not line: # inputting an empty line gives line == '\n'
raise EOFError
elif line[-1] == '\n':
return line[:-1]
return line
def _prompt(text, err: bool, hide_input: bool):
if sys.platform != "linux":
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
click.echo(text, nl=False, err=err)
text = ''
if hide_input:
return hidden_prompt_func(text)
elif err:
return stderr_input(text, file=sys.stderr)
else:
return click.termui.visible_prompt_func(text) # type: ignore
@overload
def choice(
options: List[str],
text: str = ...,
default: Optional[str] = ...,
prompt_suffix: str = ...,
show_default: bool = ...,
err: bool = ...,
start_index: int = ...
) -> int: ...
@overload
def choice(
options: Mapping[str, str],
text: str = ...,
default: Optional[str] = ...,
prompt_suffix: str = ...,
show_default: bool = ...,
err: bool = ...,
start_index: int = ...
) -> str: ...
def choice(
options: Union[List[str], Mapping[str, str]],
text: str = '',
default: Optional[str] = None,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
start_index: int = 0
) -> Union[str, int]:
"""
Prompts a user for input.
If the user aborts the input by sending an interrupt signal, this
function will catch it and raise a :exc:`click.Abort` exception.
:param options:
:param text: The text to show for the prompt.
:param default: The index of the default value to use if no input happens.
If this is not given it will prompt until it is aborted.
:param prompt_suffix: A suffix that should be added to the prompt.
:param show_default: Shows or hides the default value in the prompt.
:param err: If :py:obj:`True` the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
:param start_index: If ``options`` is a list of values, sets the start index.
"""
# TODO: completer for numbers?
type_: click.ParamType
if isinstance(options, Mapping):
# (Y/I/N/O/D/Z) [default=N]
text = f"{text} ({'/'.join(options.keys())})"
type_ = click.STRING
for choice, descripton in options.items():
click.echo(f" {choice} : {descripton}")
else:
type_ = click.IntRange(start_index, len(options) + 1 - start_index)
for idx, descripton in enumerate(options):
idx += start_index
click.echo(f" [{idx}] {descripton}")
if default is not None and show_default:
text += f" [default={default}]"
while True:
selection = prompt(
text=text,
default=default,
type=type_,
prompt_suffix=prompt_suffix,
show_default=False,
err=err,
)
if isinstance(options, Mapping):
selection = selection.strip().upper()
if selection not in options:
click.echo(f"Please enter a valid option.")
else:
return selection
else:
return selection - start_index
| tderr_input( | identifier_name |
input.py | #!/usr/bin/env python3
#
# input.py
"""
Input functions (prompt, choice etc.).
"""
#
# Copyright © 2020-2021 Dominic Davis-Foster <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# prompt and confirm based on https://github.com/pallets/click
# Copyright 2014 Pallets
# | Redistribution and use in source and binary forms, with or without modification,
# | are permitted provided that the following conditions are met:
# |
# | * Redistributions of source code must retain the above copyright notice,
# | this list of conditions and the following disclaimer.
# | * Redistributions in binary form must reproduce the above copyright notice,
# | this list of conditions and the following disclaimer in the documentation
# | and/or other materials provided with the distribution.
# | * Neither the name of the copyright holder nor the names of its contributors
# | may be used to endorse or promote products derived from this software without
# | specific prior written permission.
# |
# | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# |
#
# stderr_input based on raw_input from https://foss.heptapod.net/pypy/pypy
# PyPy Copyright holders 2003-2020
# MIT Licenced
#
# stdlib
import sys
from typing import IO, Any, Callable, List, Mapping, Optional, Union, overload
# 3rd party
import click
from click.termui import _build_prompt, hidden_prompt_func
from click.types import Path, convert_type
# this package
from consolekit._types import _ConvertibleType
from consolekit.utils import hidden_cursor, hide_cursor, show_cursor # noqa
__all__ = [
"prompt",
"confirm",
"stderr_input",
"choice",
]
if not bool(getattr(sys, "ps1", sys.flags.interactive)): # pragma: no cover
try:
# stdlib
import readline
readline.set_history_length(0)
readline.set_auto_history(False)
except (ImportError, AttributeError):
# Attribute error on PyPy, ImportError on Windows etc.
pass
def prompt(
text: str,
default: Optional[str] = None,
hide_input: bool = False,
confirmation_prompt: bool = False,
type: Optional[_ConvertibleType] = None, # noqa: A002 # pylint: disable=redefined-builtin
value_proc: Optional[Callable[[Optional[str]], Any]] = None,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
show_choices: bool = True,
):
"""
Prompts a user for input.
If the user aborts the input by sending an interrupt signal,
this function will catch it and raise a :exc:`click.Abort` exception.
:param text: The text to show for the prompt.
:param default: The default value to use if no input happens.
If this is not given it will prompt until it is aborted.
:param hide_input: If :py:obj:`True` then the input value will be hidden.
:param confirmation_prompt: Asks for confirmation for the value.
:param type: The type to check the value against.
:param value_proc: If this parameter is provided it must be a function that
is invoked instead of the type conversion to convert a value.
:param prompt_suffix: A suffix that should be added to the prompt.
:param show_default: Shows or hides the default value in the prompt.
:param err: If :py:obj:`True` the file defaults to ``stderr`` instead of
``stdout``, the same as with :func:`click.echo`.
:param show_choices: Show or hide choices if the passed type is a :class:`click.Choice`.
For example, if the choice is either ``day`` or ``week``,
``show_choices`` is :py:obj:`True` and ``text`` is ``'Group by'`` then the
prompt will be ``'Group by (day, week): '``.
"""
result = None # noqa
def prompt_func(text):
try:
return _prompt(text, err=err, hide_input=hide_input)
except (KeyboardInterrupt, EOFError):
if hide_input:
click.echo(None, err=err)
raise click.Abort()
if value_proc is None:
value_proc = convert_type(type, default)
prompt = _build_prompt(text, prompt_suffix, show_default, default, show_choices, type) # type: ignore
while True:
while True:
value = prompt_func(prompt)
if value:
break
elif default is not None:
if isinstance(value_proc, Path):
# validate Path default value (exists, dir_okay etc.)
value = default
break
return default
try:
result = value_proc(value)
except click.UsageError as e:
click.echo(f"Error: {e.message}", err=err) # noqa: B306
continue
if not confirmation_prompt:
r |
while True:
value2 = prompt_func("Repeat for confirmation: ")
if value2:
break
if value == value2:
return result
click.echo("Error: the two entered values do not match", err=err)
def confirm(
text: str,
default: bool = False,
abort: bool = False,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
):
"""
Prompts for confirmation (yes/no question).
If the user aborts the input by sending a interrupt signal this
function will catch it and raise a :exc:`click.Abort` exception.
:param text: The question to ask.
:param default: The default for the prompt.
:param abort: If :py:obj:`True` a negative answer aborts the exception by raising :exc:`click.Abort`.
:param prompt_suffix: A suffix that should be added to the prompt.
:param show_default: Shows or hides the default value in the prompt.
:param err: If :py:obj:`True` the file defaults to ``stderr`` instead of ``stdout``, the same as with echo.
"""
prompt = _build_prompt(text, prompt_suffix, show_default, "Y/n" if default else "y/N")
while True:
try:
value = _prompt(prompt, err=err, hide_input=False).lower().strip()
except (KeyboardInterrupt, EOFError):
raise click.Abort()
if value in ('y', "yes"):
rv = True
elif value in ('n', "no"):
rv = False
elif value == '':
rv = default
else:
click.echo("Error: invalid input", err=err)
continue
break
if abort and not rv:
raise click.Abort()
return rv
def stderr_input(prompt: str = '', file: IO = sys.stdout) -> str: # pragma: no cover
"""
Read a string from standard input, but prompt to standard error.
The trailing newline is stripped.
If the user hits EOF (Unix: :kbd:`Ctrl-D`, Windows: :kbd:`Ctrl-Z+Return`), raise :exc:`EOFError`.
On Unix, GNU readline is used if enabled.
The ``prompt`` string, if given, is printed to stderr without a trailing newline before reading.
"""
if file is sys.stdout:
return input(prompt)
try:
stdin = sys.stdin
except AttributeError:
raise RuntimeError("stderr_input: lost sys.stdin")
file.write(prompt)
try:
flush = file.flush
except AttributeError:
pass
else:
flush()
try:
file.softspace = 0 # type: ignore
except (AttributeError, TypeError):
pass
line = stdin.readline()
if not line: # inputting an empty line gives line == '\n'
raise EOFError
elif line[-1] == '\n':
return line[:-1]
return line
def _prompt(text, err: bool, hide_input: bool):
if sys.platform != "linux":
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
click.echo(text, nl=False, err=err)
text = ''
if hide_input:
return hidden_prompt_func(text)
elif err:
return stderr_input(text, file=sys.stderr)
else:
return click.termui.visible_prompt_func(text) # type: ignore
@overload
def choice(
options: List[str],
text: str = ...,
default: Optional[str] = ...,
prompt_suffix: str = ...,
show_default: bool = ...,
err: bool = ...,
start_index: int = ...
) -> int: ...
@overload
def choice(
options: Mapping[str, str],
text: str = ...,
default: Optional[str] = ...,
prompt_suffix: str = ...,
show_default: bool = ...,
err: bool = ...,
start_index: int = ...
) -> str: ...
def choice(
options: Union[List[str], Mapping[str, str]],
text: str = '',
default: Optional[str] = None,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
start_index: int = 0
) -> Union[str, int]:
"""
Prompts a user for input.
If the user aborts the input by sending an interrupt signal, this
function will catch it and raise a :exc:`click.Abort` exception.
:param options:
:param text: The text to show for the prompt.
:param default: The index of the default value to use if no input happens.
If this is not given it will prompt until it is aborted.
:param prompt_suffix: A suffix that should be added to the prompt.
:param show_default: Shows or hides the default value in the prompt.
:param err: If :py:obj:`True` the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
:param start_index: If ``options`` is a list of values, sets the start index.
"""
# TODO: completer for numbers?
type_: click.ParamType
if isinstance(options, Mapping):
# (Y/I/N/O/D/Z) [default=N]
text = f"{text} ({'/'.join(options.keys())})"
type_ = click.STRING
for choice, descripton in options.items():
click.echo(f" {choice} : {descripton}")
else:
type_ = click.IntRange(start_index, len(options) + 1 - start_index)
for idx, descripton in enumerate(options):
idx += start_index
click.echo(f" [{idx}] {descripton}")
if default is not None and show_default:
text += f" [default={default}]"
while True:
selection = prompt(
text=text,
default=default,
type=type_,
prompt_suffix=prompt_suffix,
show_default=False,
err=err,
)
if isinstance(options, Mapping):
selection = selection.strip().upper()
if selection not in options:
click.echo(f"Please enter a valid option.")
else:
return selection
else:
return selection - start_index
| eturn result
| conditional_block |
input.py | #!/usr/bin/env python3
#
# input.py
"""
Input functions (prompt, choice etc.).
"""
#
# Copyright © 2020-2021 Dominic Davis-Foster <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# prompt and confirm based on https://github.com/pallets/click
# Copyright 2014 Pallets
# | Redistribution and use in source and binary forms, with or without modification,
# | are permitted provided that the following conditions are met:
# |
# | * Redistributions of source code must retain the above copyright notice,
# | this list of conditions and the following disclaimer.
# | * Redistributions in binary form must reproduce the above copyright notice,
# | this list of conditions and the following disclaimer in the documentation
# | and/or other materials provided with the distribution.
# | * Neither the name of the copyright holder nor the names of its contributors
# | may be used to endorse or promote products derived from this software without
# | specific prior written permission.
# |
# | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# |
#
# stderr_input based on raw_input from https://foss.heptapod.net/pypy/pypy
# PyPy Copyright holders 2003-2020
# MIT Licenced
#
# stdlib
import sys
from typing import IO, Any, Callable, List, Mapping, Optional, Union, overload
# 3rd party
import click
from click.termui import _build_prompt, hidden_prompt_func
from click.types import Path, convert_type
# this package
from consolekit._types import _ConvertibleType
from consolekit.utils import hidden_cursor, hide_cursor, show_cursor # noqa
__all__ = [
"prompt",
"confirm",
"stderr_input",
"choice",
]
if not bool(getattr(sys, "ps1", sys.flags.interactive)): # pragma: no cover
try:
# stdlib
import readline
readline.set_history_length(0)
readline.set_auto_history(False)
except (ImportError, AttributeError):
# Attribute error on PyPy, ImportError on Windows etc.
pass
def prompt(
text: str,
default: Optional[str] = None,
hide_input: bool = False,
confirmation_prompt: bool = False,
type: Optional[_ConvertibleType] = None, # noqa: A002 # pylint: disable=redefined-builtin
value_proc: Optional[Callable[[Optional[str]], Any]] = None,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
show_choices: bool = True,
):
"""
Prompts a user for input.
If the user aborts the input by sending an interrupt signal,
this function will catch it and raise a :exc:`click.Abort` exception.
:param text: The text to show for the prompt.
:param default: The default value to use if no input happens.
If this is not given it will prompt until it is aborted.
:param hide_input: If :py:obj:`True` then the input value will be hidden.
:param confirmation_prompt: Asks for confirmation for the value.
:param type: The type to check the value against.
:param value_proc: If this parameter is provided it must be a function that
is invoked instead of the type conversion to convert a value.
:param prompt_suffix: A suffix that should be added to the prompt.
:param show_default: Shows or hides the default value in the prompt.
:param err: If :py:obj:`True` the file defaults to ``stderr`` instead of
``stdout``, the same as with :func:`click.echo`.
:param show_choices: Show or hide choices if the passed type is a :class:`click.Choice`.
For example, if the choice is either ``day`` or ``week``,
``show_choices`` is :py:obj:`True` and ``text`` is ``'Group by'`` then the
prompt will be ``'Group by (day, week): '``.
"""
result = None # noqa
def prompt_func(text):
try:
return _prompt(text, err=err, hide_input=hide_input)
except (KeyboardInterrupt, EOFError):
if hide_input:
click.echo(None, err=err)
raise click.Abort()
if value_proc is None:
value_proc = convert_type(type, default)
prompt = _build_prompt(text, prompt_suffix, show_default, default, show_choices, type) # type: ignore
while True:
while True:
value = prompt_func(prompt)
if value:
break
elif default is not None:
if isinstance(value_proc, Path):
# validate Path default value (exists, dir_okay etc.)
value = default
break
return default
try:
result = value_proc(value)
except click.UsageError as e:
click.echo(f"Error: {e.message}", err=err) # noqa: B306
continue
if not confirmation_prompt:
return result
while True:
value2 = prompt_func("Repeat for confirmation: ")
if value2:
break
if value == value2:
return result
click.echo("Error: the two entered values do not match", err=err)
def confirm(
text: str,
default: bool = False,
abort: bool = False,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
):
"""
Prompts for confirmation (yes/no question).
If the user aborts the input by sending a interrupt signal this
function will catch it and raise a :exc:`click.Abort` exception.
:param text: The question to ask.
:param default: The default for the prompt.
:param abort: If :py:obj:`True` a negative answer aborts the exception by raising :exc:`click.Abort`.
:param prompt_suffix: A suffix that should be added to the prompt.
:param show_default: Shows or hides the default value in the prompt.
:param err: If :py:obj:`True` the file defaults to ``stderr`` instead of ``stdout``, the same as with echo.
"""
prompt = _build_prompt(text, prompt_suffix, show_default, "Y/n" if default else "y/N")
while True:
try:
value = _prompt(prompt, err=err, hide_input=False).lower().strip()
except (KeyboardInterrupt, EOFError):
raise click.Abort()
if value in ('y', "yes"):
rv = True
elif value in ('n', "no"):
rv = False
elif value == '':
rv = default
else:
click.echo("Error: invalid input", err=err)
continue
break
if abort and not rv:
raise click.Abort()
return rv
def stderr_input(prompt: str = '', file: IO = sys.stdout) -> str: # pragma: no cover
"""
Read a string from standard input, but prompt to standard error.
The trailing newline is stripped.
If the user hits EOF (Unix: :kbd:`Ctrl-D`, Windows: :kbd:`Ctrl-Z+Return`), raise :exc:`EOFError`.
On Unix, GNU readline is used if enabled.
The ``prompt`` string, if given, is printed to stderr without a trailing newline before reading.
"""
if file is sys.stdout:
return input(prompt)
try:
stdin = sys.stdin
except AttributeError:
raise RuntimeError("stderr_input: lost sys.stdin")
file.write(prompt)
try:
flush = file.flush
except AttributeError:
pass
else:
flush()
try:
file.softspace = 0 # type: ignore
except (AttributeError, TypeError):
pass
line = stdin.readline()
if not line: # inputting an empty line gives line == '\n'
raise EOFError
elif line[-1] == '\n':
return line[:-1]
return line
def _prompt(text, err: bool, hide_input: bool):
i |
@overload
def choice(
options: List[str],
text: str = ...,
default: Optional[str] = ...,
prompt_suffix: str = ...,
show_default: bool = ...,
err: bool = ...,
start_index: int = ...
) -> int: ...
@overload
def choice(
options: Mapping[str, str],
text: str = ...,
default: Optional[str] = ...,
prompt_suffix: str = ...,
show_default: bool = ...,
err: bool = ...,
start_index: int = ...
) -> str: ...
def choice(
options: Union[List[str], Mapping[str, str]],
text: str = '',
default: Optional[str] = None,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
start_index: int = 0
) -> Union[str, int]:
"""
Prompts a user for input.
If the user aborts the input by sending an interrupt signal, this
function will catch it and raise a :exc:`click.Abort` exception.
:param options:
:param text: The text to show for the prompt.
:param default: The index of the default value to use if no input happens.
If this is not given it will prompt until it is aborted.
:param prompt_suffix: A suffix that should be added to the prompt.
:param show_default: Shows or hides the default value in the prompt.
:param err: If :py:obj:`True` the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
:param start_index: If ``options`` is a list of values, sets the start index.
"""
# TODO: completer for numbers?
type_: click.ParamType
if isinstance(options, Mapping):
# (Y/I/N/O/D/Z) [default=N]
text = f"{text} ({'/'.join(options.keys())})"
type_ = click.STRING
for choice, descripton in options.items():
click.echo(f" {choice} : {descripton}")
else:
type_ = click.IntRange(start_index, len(options) + 1 - start_index)
for idx, descripton in enumerate(options):
idx += start_index
click.echo(f" [{idx}] {descripton}")
if default is not None and show_default:
text += f" [default={default}]"
while True:
selection = prompt(
text=text,
default=default,
type=type_,
prompt_suffix=prompt_suffix,
show_default=False,
err=err,
)
if isinstance(options, Mapping):
selection = selection.strip().upper()
if selection not in options:
click.echo(f"Please enter a valid option.")
else:
return selection
else:
return selection - start_index
| f sys.platform != "linux":
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
click.echo(text, nl=False, err=err)
text = ''
if hide_input:
return hidden_prompt_func(text)
elif err:
return stderr_input(text, file=sys.stderr)
else:
return click.termui.visible_prompt_func(text) # type: ignore
| identifier_body |
input.py | #!/usr/bin/env python3
#
# input.py
"""
Input functions (prompt, choice etc.).
"""
#
# Copyright © 2020-2021 Dominic Davis-Foster <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# prompt and confirm based on https://github.com/pallets/click
# Copyright 2014 Pallets
# | Redistribution and use in source and binary forms, with or without modification,
# | are permitted provided that the following conditions are met:
# |
# | * Redistributions of source code must retain the above copyright notice,
# | this list of conditions and the following disclaimer.
# | * Redistributions in binary form must reproduce the above copyright notice,
# | this list of conditions and the following disclaimer in the documentation
# | and/or other materials provided with the distribution.
# | * Neither the name of the copyright holder nor the names of its contributors
# | may be used to endorse or promote products derived from this software without
# | specific prior written permission.
# |
# | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# |
#
# stderr_input based on raw_input from https://foss.heptapod.net/pypy/pypy
# PyPy Copyright holders 2003-2020
# MIT Licenced
#
# stdlib
import sys
from typing import IO, Any, Callable, List, Mapping, Optional, Union, overload
# 3rd party
import click
from click.termui import _build_prompt, hidden_prompt_func
from click.types import Path, convert_type
# this package
from consolekit._types import _ConvertibleType
from consolekit.utils import hidden_cursor, hide_cursor, show_cursor # noqa
__all__ = [
"prompt",
"confirm",
"stderr_input",
"choice",
]
if not bool(getattr(sys, "ps1", sys.flags.interactive)): # pragma: no cover
try:
# stdlib
import readline
readline.set_history_length(0)
readline.set_auto_history(False)
except (ImportError, AttributeError):
# Attribute error on PyPy, ImportError on Windows etc.
pass
def prompt(
text: str,
default: Optional[str] = None,
hide_input: bool = False,
confirmation_prompt: bool = False,
type: Optional[_ConvertibleType] = None, # noqa: A002 # pylint: disable=redefined-builtin
value_proc: Optional[Callable[[Optional[str]], Any]] = None,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
show_choices: bool = True,
):
"""
Prompts a user for input.
If the user aborts the input by sending an interrupt signal,
this function will catch it and raise a :exc:`click.Abort` exception.
:param text: The text to show for the prompt.
:param default: The default value to use if no input happens.
If this is not given it will prompt until it is aborted.
:param hide_input: If :py:obj:`True` then the input value will be hidden.
:param confirmation_prompt: Asks for confirmation for the value.
:param type: The type to check the value against.
:param value_proc: If this parameter is provided it must be a function that
is invoked instead of the type conversion to convert a value.
:param prompt_suffix: A suffix that should be added to the prompt.
:param show_default: Shows or hides the default value in the prompt.
:param err: If :py:obj:`True` the file defaults to ``stderr`` instead of
``stdout``, the same as with :func:`click.echo`.
:param show_choices: Show or hide choices if the passed type is a :class:`click.Choice`.
For example, if the choice is either ``day`` or ``week``,
``show_choices`` is :py:obj:`True` and ``text`` is ``'Group by'`` then the
prompt will be ``'Group by (day, week): '``.
"""
result = None # noqa
def prompt_func(text):
try:
return _prompt(text, err=err, hide_input=hide_input)
except (KeyboardInterrupt, EOFError):
if hide_input:
click.echo(None, err=err)
raise click.Abort()
if value_proc is None:
value_proc = convert_type(type, default)
prompt = _build_prompt(text, prompt_suffix, show_default, default, show_choices, type) # type: ignore
while True:
while True:
value = prompt_func(prompt)
if value:
break
elif default is not None:
if isinstance(value_proc, Path):
# validate Path default value (exists, dir_okay etc.)
value = default
break
return default
try:
result = value_proc(value)
except click.UsageError as e:
click.echo(f"Error: {e.message}", err=err) # noqa: B306
continue
if not confirmation_prompt:
return result
while True:
value2 = prompt_func("Repeat for confirmation: ")
if value2:
break
if value == value2:
return result
click.echo("Error: the two entered values do not match", err=err)
def confirm(
text: str,
default: bool = False,
abort: bool = False,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
):
"""
Prompts for confirmation (yes/no question).
If the user aborts the input by sending a interrupt signal this
function will catch it and raise a :exc:`click.Abort` exception.
:param text: The question to ask.
:param default: The default for the prompt.
:param abort: If :py:obj:`True` a negative answer aborts the exception by raising :exc:`click.Abort`.
:param prompt_suffix: A suffix that should be added to the prompt.
:param show_default: Shows or hides the default value in the prompt.
:param err: If :py:obj:`True` the file defaults to ``stderr`` instead of ``stdout``, the same as with echo.
"""
prompt = _build_prompt(text, prompt_suffix, show_default, "Y/n" if default else "y/N")
while True:
try:
value = _prompt(prompt, err=err, hide_input=False).lower().strip()
except (KeyboardInterrupt, EOFError):
raise click.Abort()
if value in ('y', "yes"):
rv = True
elif value in ('n', "no"):
rv = False
elif value == '':
rv = default
else:
click.echo("Error: invalid input", err=err)
continue
break
if abort and not rv:
raise click.Abort()
return rv
def stderr_input(prompt: str = '', file: IO = sys.stdout) -> str: # pragma: no cover
"""
Read a string from standard input, but prompt to standard error.
The trailing newline is stripped.
If the user hits EOF (Unix: :kbd:`Ctrl-D`, Windows: :kbd:`Ctrl-Z+Return`), raise :exc:`EOFError`.
On Unix, GNU readline is used if enabled.
The ``prompt`` string, if given, is printed to stderr without a trailing newline before reading.
"""
if file is sys.stdout:
return input(prompt)
try:
stdin = sys.stdin
except AttributeError:
raise RuntimeError("stderr_input: lost sys.stdin")
file.write(prompt)
try:
flush = file.flush
except AttributeError:
pass
else:
flush()
try:
file.softspace = 0 # type: ignore
except (AttributeError, TypeError):
pass
line = stdin.readline()
if not line: # inputting an empty line gives line == '\n' | return line[:-1]
return line
def _prompt(text, err: bool, hide_input: bool):
if sys.platform != "linux":
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
click.echo(text, nl=False, err=err)
text = ''
if hide_input:
return hidden_prompt_func(text)
elif err:
return stderr_input(text, file=sys.stderr)
else:
return click.termui.visible_prompt_func(text) # type: ignore
@overload
def choice(
options: List[str],
text: str = ...,
default: Optional[str] = ...,
prompt_suffix: str = ...,
show_default: bool = ...,
err: bool = ...,
start_index: int = ...
) -> int: ...
@overload
def choice(
options: Mapping[str, str],
text: str = ...,
default: Optional[str] = ...,
prompt_suffix: str = ...,
show_default: bool = ...,
err: bool = ...,
start_index: int = ...
) -> str: ...
def choice(
options: Union[List[str], Mapping[str, str]],
text: str = '',
default: Optional[str] = None,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
start_index: int = 0
) -> Union[str, int]:
"""
Prompts a user for input.
If the user aborts the input by sending an interrupt signal, this
function will catch it and raise a :exc:`click.Abort` exception.
:param options:
:param text: The text to show for the prompt.
:param default: The index of the default value to use if no input happens.
If this is not given it will prompt until it is aborted.
:param prompt_suffix: A suffix that should be added to the prompt.
:param show_default: Shows or hides the default value in the prompt.
:param err: If :py:obj:`True` the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
:param start_index: If ``options`` is a list of values, sets the start index.
"""
# TODO: completer for numbers?
type_: click.ParamType
if isinstance(options, Mapping):
# (Y/I/N/O/D/Z) [default=N]
text = f"{text} ({'/'.join(options.keys())})"
type_ = click.STRING
for choice, descripton in options.items():
click.echo(f" {choice} : {descripton}")
else:
type_ = click.IntRange(start_index, len(options) + 1 - start_index)
for idx, descripton in enumerate(options):
idx += start_index
click.echo(f" [{idx}] {descripton}")
if default is not None and show_default:
text += f" [default={default}]"
while True:
selection = prompt(
text=text,
default=default,
type=type_,
prompt_suffix=prompt_suffix,
show_default=False,
err=err,
)
if isinstance(options, Mapping):
selection = selection.strip().upper()
if selection not in options:
click.echo(f"Please enter a valid option.")
else:
return selection
else:
return selection - start_index | raise EOFError
elif line[-1] == '\n': | random_line_split |
main.go | package main
import (
"bytes"
"crypto/sha256"
"flag"
"fmt"
"github.com/D4-project/d4-pretensor/pretensorhit"
"golang.org/x/net/proxy"
"io"
"log"
"net"
"net/http"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/D4-project/d4-golang-utils/config"
"github.com/gomodule/redigo/redis"
rg "github.com/redislabs/redisgraph-go"
gj "github.com/tidwall/gjson"
)
type (
redisconf struct {
redisHost string
redisPort string
databasename string
}
bindesc struct {
url string
phit *pretensorhit.PHit
}
filedesc struct {
path string
info os.FileInfo
}
)
// Setting up Flags and other Vars
var (
confdir = flag.String("c", "conf.sample", "configuration directory")
folder = flag.String("log_folder", "logs", "folder containing mod security logs")
debug = flag.Bool("d", false, "debug output")
verbose = flag.Bool("v", false, "additional debug output")
delete = flag.Bool("D", false, "Delete previous graph")
d4 = flag.Bool("r", false, "Connect to redis - d4 to get new files")
tmprate, _ = time.ParseDuration("5s")
rate = flag.Duration("rl", tmprate, "Rate limiter: time in human format before retry after EOF")
buf bytes.Buffer
logger = log.New(&buf, "INFO: ", log.Lshortfile)
redisConn redis.Conn
redisConnPretensor redis.Conn
redisGR redis.Conn
redisPretensorPool *redis.Pool
redisd4Pool *redis.Pool
redisd4Queue string
tomonitor [][]byte
mitm [][]byte
curls map[string]string
// Keeps a map of sha/hash to keep track of what we downloaded already
binurls map[string]*pretensorhit.PHit
bashs map[string]*pretensorhit.PHit
wg sync.WaitGroup
walk_folder = true
checkredisd4 = true
binchan chan bindesc
filechan chan filedesc
bashchan chan bindesc
)
func main() {
// Setting up log file
f, err := os.OpenFile("pretensor.log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
log.Fatalf("error opening file: %v", err)
}
// Create infected folder if not exist
if _, err := os.Stat("infected"); os.IsNotExist(err) {
os.Mkdir("infected", 0750)
}
// Create infected_bash folder if not exist
if _, err := os.Stat("infected_bash"); os.IsNotExist(err) {
os.Mkdir("infected_bash", 0750)
}
defer f.Close()
logger.SetOutput(f)
logger.SetFlags(log.LstdFlags | log.Lshortfile)
logger.Println("Init")
// Setting up Graceful killing
sortie := make(chan os.Signal, 1)
signal.Notify(sortie, os.Interrupt, os.Kill)
// Signal goroutine
go func() {
<-sortie
logger.Println("Exiting")
os.Exit(0)
}()
// Usage
flag.Usage = func() {
fmt.Printf("d4 - d4-pretensor\n")
fmt.Printf("Parses Mod Security logs into Redis Graph \n")
fmt.Printf("from a folder first to bootstrap a redis graph, \n")
fmt.Printf("and then from d4 to update it. \n")
fmt.Printf("\n")
fmt.Printf("Usage: d4-pretensor -c config_directory\n")
fmt.Printf("\n")
fmt.Printf("Configuration\n\n")
fmt.Printf("The configuration settings are stored in files in the configuration directory\n")
fmt.Printf("specified with the -c command line switch.\n\n")
fmt.Printf("Files in the configuration directory\n")
fmt.Printf("\n")
fmt.Printf("redis_pretensor - host:port/graphname\n")
fmt.Printf("redis_d4 - host:port/db\n")
fmt.Printf("redis_d4_queue - d4 queue to pop\n")
fmt.Printf("folder - folder containing mod security logs\n")
fmt.Printf("tomonitor - list of requests to monitor (botnet activity)\n")
fmt.Printf("mitm - list of mitm proxy to remove\n")
fmt.Printf("\n")
flag.PrintDefaults()
}
// Parse Flags
flag.Parse()
if flag.NFlag() == 0 || *confdir == "" {
flag.Usage()
sortie <- os.Kill
} else {
*confdir = strings.TrimSuffix(*confdir, "/")
*confdir = strings.TrimSuffix(*confdir, "\\")
}
// Check redis-pretensor configuration
rrg := redisconf{}
// Parse Input Redis Config
tmp := config.ReadConfigFile(*confdir, "redis_pretensor")
ss := strings.Split(string(tmp), "/")
if len(ss) <= 1 {
log.Fatal("Missing Database in redis_pretensor input config: should be host:port/database_name")
}
rrg.databasename = ss[1]
var ret bool
ret, ss[0] = config.IsNet(ss[0])
if ret {
sss := strings.Split(string(ss[0]), ":")
rrg.redisHost = sss[0]
rrg.redisPort = sss[1]
} else {
logger.Fatal("Redis-pretensor config error.")
}
// Create a new redis-pretensor connection pool
redisPretensorPool = newPool(rrg.redisHost+":"+rrg.redisPort, 400)
redisConnPretensor, err = redisPretensorPool.Dial()
if err != nil {
logger.Fatal("Could not connect to redis-pretensor Redis")
}
rd4 := redisconf{}
if *d4 {
// Check redis-d4 configuration
// Parse Input Redis Config
tmp = config.ReadConfigFile(*confdir, "redis_d4")
ss = strings.Split(string(tmp), "/")
if len(ss) <= 1 {
logger.Println("Missing Database in redis_d4 input config: should be host:port/database_name -- Skipping")
checkredisd4 = false
} else {
rd4.databasename = ss[1]
ret, ss[0] = config.IsNet(ss[0])
if ret {
sss := strings.Split(string(ss[0]), ":")
rrg.redisHost = sss[0]
rrg.redisPort = sss[1]
} else {
logger.Fatal("Redis-d4 config error.")
}
// Create a new redis-graph connection pool
redisd4Pool = newPool(rrg.redisHost+":"+rrg.redisPort, 400)
redisConn, err = redisd4Pool.Dial()
if err != nil {
logger.Fatal("Could not connect to d4 Redis")
}
// Get that the redis_d4_queue file
redisd4Queue = string(config.ReadConfigFile(*confdir, "redis_d4_queue"))
}
}
// Checking that the log folder exists
log_folder := string(config.ReadConfigFile(*confdir, "folder"))
_, err = os.ReadDir(log_folder)
if err != nil {
logger.Println(err)
walk_folder = false
}
// Loading Requests to monitor
tomonitor = config.ReadConfigFileLines(*confdir, "tomonitor")
// Loading proxy list to remove from Hosts
mitm = config.ReadConfigFileLines(*confdir, "mitm")
// Init maps
curls = make(map[string]string)
bashs = make(map[string]*pretensorhit.PHit)
binurls = make(map[string]*pretensorhit.PHit)
// Init redis graph
graph := rg.GraphNew("pretensor", redisConnPretensor)
if *delete {
graph.Delete()
}
// Create processing channels
binchan = make(chan bindesc, 2000)
bashchan = make(chan bindesc, 2000)
// Unbuffered channel for the parser
filechan = make(chan filedesc)
wg.Add(3)
// Launch the download routine
go downloadBin(binchan, sortie)
// Write no ELF files to files
go writeBashs(bashchan, sortie)
// Launch the Pretensor routine
// Leaving the existing redis connection to pretensorParse
go pretensorParse(filechan, sortie, &graph)
// Walking folder
err = filepath.Walk(log_folder,
func(path string, info os.FileInfo, err error) error {
filechan <- filedesc{path: path, info: info}
if err != nil {
return err
}
if *verbose {
logger.Println(info.Name(), info.Size())
}
return nil
})
if checkredisd4 && *d4 {
redisConnD4, err := redisd4Pool.Dial()
if err != nil {
logger.Fatal("Could not connect to d4 Redis")
}
if _, err := redisConnD4.Do("SELECT", rd4.databasename); err != nil {
redisConnD4.Close()
logger.Println(err)
return
}
// Once the walk is over, we start listening to d4 to get new files
rateLimiter := time.Tick(*rate)
redisNormal:
err = redisRead(redisConnD4, redisd4Queue)
for {
select {
case <-rateLimiter:
// Use the ratelimiter while the connection hangs
logger.Println("Limited read")
goto redisNormal
case <-sortie:
goto gtfo
}
}
}
//// Write curl commands to a file
//for _, v := range curls {
// f, err := os.OpenFile("./infected/curl.sh", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
// if err != nil {
// logger.Println(err)
// }
// if _, err := f.Write([]byte(fmt.Sprintf("%v\n", v))); err != nil {
// f.Close()
// logger.Println(err)
// }
// if err := f.Close(); err != nil {
// logger.Println(err)
// }
//}
// Waiting for the binary handling routines
wg.Wait()
gtfo:
logger.Println("Exiting")
}
func redisRead(redisConnD4 redis.Conn, redisd4Queue string) error {
for {
buf, err := redis.String(redisConnD4.Do("LPOP", redisd4Queue))
// If redis return empty: EOF (user should not stop)
if err == redis.ErrNil {
// no new record we break until the tick
return io.EOF
// oops
} else if err != nil {
logger.Println(err)
return err
}
fileinfo, err := os.Stat(buf)
if err != nil {
logger.Println(err)
return err
}
filechan <- filedesc{path: buf, info: fileinfo}
}
}
// Parsing whatever is thrown into filechan
func | (filechan chan filedesc, sortie chan os.Signal, graph *rg.Graph) error {
logger.Println("Entering pretensorparse")
defer wg.Done()
for {
select {
case file := <-filechan:
if *debug {
logger.Println(file.path)
}
info := file.info
path := file.path
if !info.IsDir() {
content, err := os.ReadFile(path)
if err != nil {
return err
}
if len(content) == 0 {
if *debug {
logger.Println("Empty File: " + path)
}
break
}
// Load JSON
contents := string(content)
if !gj.Valid(contents) {
if *debug {
logger.Println("Invalid json: " + path)
}
break
}
// For each request to monitor
for _, v := range tomonitor {
request := gj.Get(contents, "request.request_line")
if strings.Contains(request.String(), string(v)) {
// We are in a file of interest
tmp := new(pretensorhit.PHit)
tmp.SetTimestamp(gj.Get(contents, "transaction.time"))
tmp.SetIp(gj.Get(contents, "transaction.remote_address"))
tmp.SetLine(gj.Get(contents, "request.request_line"))
tmp.SetReferer(gj.Get(contents, "request.headers.Referer"))
tmp.SetUseragent(gj.Get(contents, "request.headers.User-Agent"))
tmp.SetStatus(gj.Get(contents, "response.status"))
tmp.SetBody(gj.Get(contents, "response.body"))
tmp.SetContenttype(gj.Get(contents, "response.headers.Content-Type"))
tmp.SetLength(gj.Get(contents, "response.headers.Content-Length"))
tmp.SetHost(removeMitm(gj.Get(contents, "request.headers.Host")))
// Complete the graph
// Create bot if not exist
query := `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"}) RETURN b.ip, b.firstseen, b.lastseen`
result, err := graph.Query(query)
if err != nil {
fmt.Println(err)
fmt.Println(result)
}
if result.Empty() {
if *debug {
logger.Println(tmp.GetBotNode())
}
graph.AddNode(tmp.GetBotNode())
_, err := graph.Flush()
if err != nil {
fmt.Println(err)
}
// Update Firstseen / Lastseen if already seen
} else {
result.Next()
r := result.Record()
fsstr, _ := r.Get("b.firstseen")
lsstr, _ := r.Get("b.lastseen")
fs, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(fsstr))
ls, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(lsstr))
if tmp.GetParsedTimeStamp().Before(fs) {
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
SET b.firstseen="` + tmp.GetTimestamp() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
if tmp.GetParsedTimeStamp().After(ls) {
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
SET b.lastseen="` + tmp.GetTimestamp() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
}
// Create CC if not exist
query = `MATCH (c:CC {host:"` + tmp.GetHost() + `"}) RETURN c.host, c.firstseen, c.lastseen`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
if result.Empty() {
graph.AddNode(tmp.GetCCNode())
_, err := graph.Flush()
if err != nil {
fmt.Println(err)
}
// Update Firstseen / Lastseen if already seen
} else {
result.Next()
r := result.Record()
fsstr, _ := r.Get("c.firstseen")
lsstr, _ := r.Get("c.lastseen")
fs, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(fsstr))
ls, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(lsstr))
if tmp.GetParsedTimeStamp().Before(fs) {
query = `MATCH (c:CC {host:"` + tmp.GetHost() + `"})
SET c.firstseen="` + tmp.GetTimestamp() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
if tmp.GetParsedTimeStamp().After(ls) {
query = `MATCH (c:CC {host:"` + tmp.GetHost() + `"})
SET c.lastseen="` + tmp.GetTimestamp() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
}
// Use Merge to create the relationship between the bot and the CC
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
MATCH (c:CC {host:"` + tmp.GetHost() + `"})
MERGE (b)-[r:reach {name: "reach"}]->(c)`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
// If the bot downloaded a binary
if tmp.GetContenttype() == "application/octet-stream" && tmp.GetStatus() == "200" {
// Logging all bash scripts and curl commands to download binaries
if strings.Contains(fmt.Sprintf("%v", tmp.GetBody()), "ELF") {
//tmpsha256 := sha256.Sum256([]byte(tmp.Curl()))
//curls[fmt.Sprintf("%x", tmpsha256)] = tmp.Curl()
binchan <- bindesc{url: tmp.GetBinurl(), phit: tmp}
} else {
bashchan <- bindesc{url: tmp.GetBinurl(), phit: tmp}
}
// Create binary if not exist
query := `MATCH (bin` + tmp.GetBinaryMatchQuerySelector() + `) RETURN bin`
// The following is causing a panic -- it looks like a redigo issue
//query := `MATCH (bin:Binary` + tmp.GetBinaryMatchQuerySelector() + `) RETURN bin`
qres, err := graph.Query(query)
if err != nil {
fmt.Println(err)
}
if qres.Empty() {
//fmt.Println("Add binary: "+tmp.GetBinaryMatchQuerySelector())
graph.AddNode(tmp.GetBinaryNode())
_, err := graph.Flush()
if err != nil {
fmt.Println(err)
}
}
// Use Merge to create the relationship bot, binaries and CC
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
MATCH (c:CC {host:"` + tmp.GetHost() + `"})
MATCH (bin:Binary ` + tmp.GetBinaryMatchQuerySelector() + `)
MERGE (b)-[d:download {name: "download"}]->(bin)
MERGE (c)-[h:host {name: "host"}]->(bin)`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
// Bot set a referer command
if tmp.GetCmdRawCommand() != "" {
// First we update what we know about this bot
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
MATCH (c:CC {host:"` + tmp.GetHost() + `"})
SET b.user="` + tmp.GetCmdUser() + `"
SET b.hostname="` + tmp.GetCmdHostname() + `"
SET b.fingerprint="` + tmp.GetCmdFingerprint() + `"
SET b.architecture="` + tmp.GetCmdArchitecture() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
// Then we create a command node for this command
query = `MATCH (c:Command {rawcontent:"` + tmp.GetCmdRawCommand() + `"}) RETURN c.content`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
if result.Empty() {
graph.AddNode(tmp.GetCommandNode())
_, err := graph.Flush()
if err != nil {
fmt.Println(err)
}
}
// Finally we tie the Bot and the issued Command
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
MATCH (c:CC {host:"` + tmp.GetHost() + `"})
MATCH (co:Command {rawcontent:"` + tmp.GetCmdRawCommand() + `"})
MERGE (b)-[e:execute {name: "execute"}]->(co)
MERGE (c)-[l:launch {name: "launch"}]->(co)`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
if *verbose {
fmt.Println(tmp)
}
// We treated the request
break
}
}
}
case <-sortie:
return nil
}
}
}
// Write Bashs scripts to files
func writeBashs(bc chan bindesc, sortie chan os.Signal) error {
defer wg.Done()
for {
select {
case v := <-bc:
var err error
redisGR, err = redisPretensorPool.Dial()
if err != nil {
logger.Fatal("Could not connect routine to pretensor Redis")
}
graphGR := rg.GraphNew("pretensor", redisGR)
if _, ok := bashs[v.url]; !ok {
if *debug {
logger.Println("Writing " + v.url)
}
// Set Sha 256 hash to the object
if s, err := strconv.Atoi(v.phit.GetLength()); err == nil && s > 0 {
tmpsha256 := sha256.Sum256([]byte(v.phit.GetBody()))
v.phit.SetSha256(fmt.Sprintf("%x", tmpsha256))
// Add binary's hash to the graph
query := `MATCH (b:Bot {ip:"` + v.phit.GetIp() + `"})
MATCH (c:CC {host:"` + v.phit.GetHost() + `"})
MATCH (bin:Binary ` + v.phit.GetBinaryMatchQuerySelector() + `)
MERGE (b)-[d:download {name: "download"}]->(bin)
MERGE (c)-[h:host {name: "host"}]->(bin)
ON MATCH SET bin.sha256 = "` + v.phit.GetSha256() + `"`
result, err := graphGR.Query(query)
if err != nil {
logger.Println(err)
}
if *debug {
logger.Println(query)
logger.Println(result)
}
err = os.WriteFile("./infected_bash/"+v.phit.GetSha256(), []byte(v.phit.GetBody()), 0644)
if err != nil {
logger.Println(err)
}
}
// Update de binbash map
bashs[v.phit.GetBinurl()] = v.phit
} else if *debug {
logger.Println("Skipping bash " + v.url)
}
case <-sortie:
return nil
}
}
}
// Gathering Binaries ourselves
func downloadBin(phitchan chan bindesc, sortie chan os.Signal) error {
defer wg.Done()
downloading:
for {
select {
case vi := <-phitchan:
// Check whether we already touched it
if _, ok := binurls[vi.url]; !ok {
//do something here
var err error
redisGR, err = redisPretensorPool.Dial()
if err != nil {
logger.Fatal("Could not connect routine to pretensor Redis")
}
graphGR := rg.GraphNew("pretensor", redisGR)
if *debug {
logger.Println("Fetching " + vi.url)
}
// Set some options for our TCP dialer
dialer := net.Dialer{
Timeout: 15 * time.Second,
}
// create a socks5 dialer
dial, err := proxy.SOCKS5("tcp", "127.0.0.1:9050", nil, &dialer)
if err != nil {
logger.Println("can't connect to the proxy:", err)
os.Exit(1)
}
httpTransport := &http.Transport{}
httpClient := &http.Client{Transport: httpTransport}
// set our socks5 as the dialer
httpTransport.Dial = dial.Dial
// create a request
req, err := http.NewRequest("GET", vi.phit.GetBinurl(), nil)
req.Header.Set("User-Agent", "-")
if err != nil {
logger.Println("can't create request:", err)
}
// use the http client to fetch the page
resp, err := httpClient.Do(req)
if err != nil {
logger.Println("can't GET page:", err)
break downloading
}
defer resp.Body.Close()
// update the binurls map
binurls[vi.phit.GetBinurl()] = vi.phit
b, err := io.ReadAll(resp.Body)
if err != nil || len(b) < 1 {
logger.Println("error reading body:", err)
break downloading
}
tmpb := sha256.Sum256(b)
err = os.WriteFile("./infected/"+fmt.Sprintf("%x", tmpb), b, 0644)
if err != nil {
logger.Println(err)
break downloading
}
// Update the Go object with this sha
vi.phit.SetSha256(fmt.Sprintf("%x", tmpb))
//fmt.Printf("Not empty, we Create a new relationship for: %v ", vi.phit.GetSha256())
query := `MATCH (b:Bot {ip:"` + vi.phit.GetIp() + `"})
MATCH (c:CC {host:"` + vi.phit.GetHost() + `"})
MATCH (bin:Binary ` + vi.phit.GetBinaryMatchQuerySelector() + `)
MERGE (b)-[d:download {name: "download"}]->(bin)
MERGE (c)-[h:host {name: "host"}]->(bin)
ON MATCH SET bin.sha256 = "` + vi.phit.GetSha256() + `"`
result, err := graphGR.Query(query)
if err != nil {
fmt.Println(err)
}
if *debug {
logger.Println(query)
logger.Println(result)
}
} else if *debug {
logger.Println("Skipping " + vi.url)
}
case <-sortie:
return nil
}
}
return nil
}
func removeMitm(s gj.Result) gj.Result {
str := s.String()
for _, v := range mitm {
str = strings.Replace(str, string(v), "", -1)
}
s = gj.Result{Str: str, Type: gj.String}
return s
}
func newPool(addr string, maxconn int) *redis.Pool {
return &redis.Pool{
MaxActive: maxconn,
MaxIdle: 3,
IdleTimeout: 240 * time.Second,
// Dial or DialContext must be set. When both are set, DialContext takes precedence over Dial.
Dial: func() (redis.Conn, error) { return redis.Dial("tcp", addr) },
}
}
| pretensorParse | identifier_name |
main.go | package main
import (
"bytes"
"crypto/sha256"
"flag"
"fmt"
"github.com/D4-project/d4-pretensor/pretensorhit"
"golang.org/x/net/proxy"
"io"
"log"
"net"
"net/http"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/D4-project/d4-golang-utils/config"
"github.com/gomodule/redigo/redis"
rg "github.com/redislabs/redisgraph-go"
gj "github.com/tidwall/gjson"
)
type (
redisconf struct {
redisHost string
redisPort string
databasename string
}
bindesc struct {
url string
phit *pretensorhit.PHit
}
filedesc struct {
path string
info os.FileInfo
}
)
// Setting up Flags and other Vars
var (
confdir = flag.String("c", "conf.sample", "configuration directory")
folder = flag.String("log_folder", "logs", "folder containing mod security logs")
debug = flag.Bool("d", false, "debug output")
verbose = flag.Bool("v", false, "additional debug output")
delete = flag.Bool("D", false, "Delete previous graph")
d4 = flag.Bool("r", false, "Connect to redis - d4 to get new files")
tmprate, _ = time.ParseDuration("5s")
rate = flag.Duration("rl", tmprate, "Rate limiter: time in human format before retry after EOF")
buf bytes.Buffer
logger = log.New(&buf, "INFO: ", log.Lshortfile)
redisConn redis.Conn
redisConnPretensor redis.Conn
redisGR redis.Conn
redisPretensorPool *redis.Pool
redisd4Pool *redis.Pool
redisd4Queue string
tomonitor [][]byte
mitm [][]byte
curls map[string]string
// Keeps a map of sha/hash to keep track of what we downloaded already
binurls map[string]*pretensorhit.PHit
bashs map[string]*pretensorhit.PHit
wg sync.WaitGroup
walk_folder = true
checkredisd4 = true
binchan chan bindesc
filechan chan filedesc
bashchan chan bindesc
)
func main() {
// Setting up log file
f, err := os.OpenFile("pretensor.log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
log.Fatalf("error opening file: %v", err)
}
// Create infected folder if not exist
if _, err := os.Stat("infected"); os.IsNotExist(err) {
os.Mkdir("infected", 0750)
}
// Create infected_bash folder if not exist
if _, err := os.Stat("infected_bash"); os.IsNotExist(err) {
os.Mkdir("infected_bash", 0750)
}
defer f.Close()
logger.SetOutput(f)
logger.SetFlags(log.LstdFlags | log.Lshortfile)
logger.Println("Init")
// Setting up Graceful killing
sortie := make(chan os.Signal, 1)
signal.Notify(sortie, os.Interrupt, os.Kill)
// Signal goroutine
go func() {
<-sortie
logger.Println("Exiting")
os.Exit(0)
}()
// Usage
flag.Usage = func() {
fmt.Printf("d4 - d4-pretensor\n")
fmt.Printf("Parses Mod Security logs into Redis Graph \n")
fmt.Printf("from a folder first to bootstrap a redis graph, \n")
fmt.Printf("and then from d4 to update it. \n")
fmt.Printf("\n")
fmt.Printf("Usage: d4-pretensor -c config_directory\n")
fmt.Printf("\n")
fmt.Printf("Configuration\n\n")
fmt.Printf("The configuration settings are stored in files in the configuration directory\n")
fmt.Printf("specified with the -c command line switch.\n\n")
fmt.Printf("Files in the configuration directory\n")
fmt.Printf("\n")
fmt.Printf("redis_pretensor - host:port/graphname\n")
fmt.Printf("redis_d4 - host:port/db\n")
fmt.Printf("redis_d4_queue - d4 queue to pop\n")
fmt.Printf("folder - folder containing mod security logs\n")
fmt.Printf("tomonitor - list of requests to monitor (botnet activity)\n")
fmt.Printf("mitm - list of mitm proxy to remove\n")
fmt.Printf("\n")
flag.PrintDefaults()
}
// Parse Flags
flag.Parse()
if flag.NFlag() == 0 || *confdir == "" {
flag.Usage()
sortie <- os.Kill
} else {
*confdir = strings.TrimSuffix(*confdir, "/")
*confdir = strings.TrimSuffix(*confdir, "\\")
}
// Check redis-pretensor configuration
rrg := redisconf{}
// Parse Input Redis Config
tmp := config.ReadConfigFile(*confdir, "redis_pretensor")
ss := strings.Split(string(tmp), "/")
if len(ss) <= 1 {
log.Fatal("Missing Database in redis_pretensor input config: should be host:port/database_name")
}
rrg.databasename = ss[1]
var ret bool
ret, ss[0] = config.IsNet(ss[0])
if ret {
sss := strings.Split(string(ss[0]), ":")
rrg.redisHost = sss[0]
rrg.redisPort = sss[1]
} else {
logger.Fatal("Redis-pretensor config error.")
}
// Create a new redis-pretensor connection pool
redisPretensorPool = newPool(rrg.redisHost+":"+rrg.redisPort, 400)
redisConnPretensor, err = redisPretensorPool.Dial()
if err != nil {
logger.Fatal("Could not connect to redis-pretensor Redis")
}
rd4 := redisconf{}
if *d4 {
// Check redis-d4 configuration
// Parse Input Redis Config
tmp = config.ReadConfigFile(*confdir, "redis_d4")
ss = strings.Split(string(tmp), "/")
if len(ss) <= 1 {
logger.Println("Missing Database in redis_d4 input config: should be host:port/database_name -- Skipping")
checkredisd4 = false
} else {
rd4.databasename = ss[1]
ret, ss[0] = config.IsNet(ss[0])
if ret {
sss := strings.Split(string(ss[0]), ":")
rrg.redisHost = sss[0]
rrg.redisPort = sss[1]
} else {
logger.Fatal("Redis-d4 config error.")
}
// Create a new redis-graph connection pool
redisd4Pool = newPool(rrg.redisHost+":"+rrg.redisPort, 400)
redisConn, err = redisd4Pool.Dial()
if err != nil {
logger.Fatal("Could not connect to d4 Redis")
}
// Get that the redis_d4_queue file
redisd4Queue = string(config.ReadConfigFile(*confdir, "redis_d4_queue"))
}
}
// Checking that the log folder exists
log_folder := string(config.ReadConfigFile(*confdir, "folder"))
_, err = os.ReadDir(log_folder)
if err != nil {
logger.Println(err)
walk_folder = false
}
// Loading Requests to monitor
tomonitor = config.ReadConfigFileLines(*confdir, "tomonitor")
// Loading proxy list to remove from Hosts
mitm = config.ReadConfigFileLines(*confdir, "mitm")
// Init maps
curls = make(map[string]string)
bashs = make(map[string]*pretensorhit.PHit)
binurls = make(map[string]*pretensorhit.PHit)
// Init redis graph
graph := rg.GraphNew("pretensor", redisConnPretensor)
if *delete {
graph.Delete()
}
// Create processing channels
binchan = make(chan bindesc, 2000)
bashchan = make(chan bindesc, 2000)
// Unbuffered channel for the parser
filechan = make(chan filedesc)
wg.Add(3)
// Launch the download routine
go downloadBin(binchan, sortie)
// Write no ELF files to files
go writeBashs(bashchan, sortie)
// Launch the Pretensor routine
// Leaving the existing redis connection to pretensorParse
go pretensorParse(filechan, sortie, &graph)
// Walking folder
err = filepath.Walk(log_folder,
func(path string, info os.FileInfo, err error) error {
filechan <- filedesc{path: path, info: info}
if err != nil {
return err
}
if *verbose {
logger.Println(info.Name(), info.Size())
}
return nil
})
if checkredisd4 && *d4 {
redisConnD4, err := redisd4Pool.Dial()
if err != nil {
logger.Fatal("Could not connect to d4 Redis")
}
if _, err := redisConnD4.Do("SELECT", rd4.databasename); err != nil {
redisConnD4.Close()
logger.Println(err)
return
}
// Once the walk is over, we start listening to d4 to get new files
rateLimiter := time.Tick(*rate)
redisNormal:
err = redisRead(redisConnD4, redisd4Queue)
for {
select {
case <-rateLimiter:
// Use the ratelimiter while the connection hangs
logger.Println("Limited read")
goto redisNormal
case <-sortie:
goto gtfo
}
}
}
//// Write curl commands to a file
//for _, v := range curls {
// f, err := os.OpenFile("./infected/curl.sh", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
// if err != nil {
// logger.Println(err)
// }
// if _, err := f.Write([]byte(fmt.Sprintf("%v\n", v))); err != nil {
// f.Close()
// logger.Println(err)
// }
// if err := f.Close(); err != nil {
// logger.Println(err)
// }
//}
// Waiting for the binary handling routines
wg.Wait()
gtfo:
logger.Println("Exiting")
}
func redisRead(redisConnD4 redis.Conn, redisd4Queue string) error {
for {
buf, err := redis.String(redisConnD4.Do("LPOP", redisd4Queue))
// If redis return empty: EOF (user should not stop)
if err == redis.ErrNil {
// no new record we break until the tick
return io.EOF
// oops
} else if err != nil {
logger.Println(err)
return err
}
fileinfo, err := os.Stat(buf)
if err != nil {
logger.Println(err)
return err
}
filechan <- filedesc{path: buf, info: fileinfo}
}
}
// Parsing whatever is thrown into filechan
func pretensorParse(filechan chan filedesc, sortie chan os.Signal, graph *rg.Graph) error {
logger.Println("Entering pretensorparse")
defer wg.Done()
for {
select {
case file := <-filechan:
if *debug {
logger.Println(file.path)
}
info := file.info
path := file.path
if !info.IsDir() {
content, err := os.ReadFile(path)
if err != nil {
return err
}
if len(content) == 0 {
if *debug {
logger.Println("Empty File: " + path)
}
break
}
// Load JSON
contents := string(content)
if !gj.Valid(contents) {
if *debug {
logger.Println("Invalid json: " + path)
}
break
}
// For each request to monitor
for _, v := range tomonitor {
request := gj.Get(contents, "request.request_line")
if strings.Contains(request.String(), string(v)) {
// We are in a file of interest
tmp := new(pretensorhit.PHit)
tmp.SetTimestamp(gj.Get(contents, "transaction.time"))
tmp.SetIp(gj.Get(contents, "transaction.remote_address"))
tmp.SetLine(gj.Get(contents, "request.request_line"))
tmp.SetReferer(gj.Get(contents, "request.headers.Referer"))
tmp.SetUseragent(gj.Get(contents, "request.headers.User-Agent"))
tmp.SetStatus(gj.Get(contents, "response.status"))
tmp.SetBody(gj.Get(contents, "response.body"))
tmp.SetContenttype(gj.Get(contents, "response.headers.Content-Type"))
tmp.SetLength(gj.Get(contents, "response.headers.Content-Length"))
tmp.SetHost(removeMitm(gj.Get(contents, "request.headers.Host")))
// Complete the graph
// Create bot if not exist
query := `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"}) RETURN b.ip, b.firstseen, b.lastseen`
result, err := graph.Query(query)
if err != nil {
fmt.Println(err)
fmt.Println(result)
}
if result.Empty() {
if *debug {
logger.Println(tmp.GetBotNode())
}
graph.AddNode(tmp.GetBotNode())
_, err := graph.Flush()
if err != nil {
fmt.Println(err)
}
// Update Firstseen / Lastseen if already seen
} else {
result.Next()
r := result.Record()
fsstr, _ := r.Get("b.firstseen")
lsstr, _ := r.Get("b.lastseen")
fs, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(fsstr))
ls, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(lsstr))
if tmp.GetParsedTimeStamp().Before(fs) {
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
SET b.firstseen="` + tmp.GetTimestamp() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
if tmp.GetParsedTimeStamp().After(ls) {
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
SET b.lastseen="` + tmp.GetTimestamp() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
}
// Create CC if not exist
query = `MATCH (c:CC {host:"` + tmp.GetHost() + `"}) RETURN c.host, c.firstseen, c.lastseen`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
if result.Empty() {
graph.AddNode(tmp.GetCCNode())
_, err := graph.Flush()
if err != nil {
fmt.Println(err)
}
// Update Firstseen / Lastseen if already seen
} else {
result.Next()
r := result.Record()
fsstr, _ := r.Get("c.firstseen")
lsstr, _ := r.Get("c.lastseen")
fs, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(fsstr))
ls, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(lsstr))
if tmp.GetParsedTimeStamp().Before(fs) {
query = `MATCH (c:CC {host:"` + tmp.GetHost() + `"})
SET c.firstseen="` + tmp.GetTimestamp() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
if tmp.GetParsedTimeStamp().After(ls) {
query = `MATCH (c:CC {host:"` + tmp.GetHost() + `"})
SET c.lastseen="` + tmp.GetTimestamp() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
}
// Use Merge to create the relationship between the bot and the CC
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
MATCH (c:CC {host:"` + tmp.GetHost() + `"})
MERGE (b)-[r:reach {name: "reach"}]->(c)`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
// If the bot downloaded a binary
if tmp.GetContenttype() == "application/octet-stream" && tmp.GetStatus() == "200" {
// Logging all bash scripts and curl commands to download binaries
if strings.Contains(fmt.Sprintf("%v", tmp.GetBody()), "ELF") {
//tmpsha256 := sha256.Sum256([]byte(tmp.Curl()))
//curls[fmt.Sprintf("%x", tmpsha256)] = tmp.Curl()
binchan <- bindesc{url: tmp.GetBinurl(), phit: tmp}
} else {
bashchan <- bindesc{url: tmp.GetBinurl(), phit: tmp}
}
// Create binary if not exist
query := `MATCH (bin` + tmp.GetBinaryMatchQuerySelector() + `) RETURN bin`
// The following is causing a panic -- it looks like a redigo issue
//query := `MATCH (bin:Binary` + tmp.GetBinaryMatchQuerySelector() + `) RETURN bin`
qres, err := graph.Query(query)
if err != nil {
fmt.Println(err)
}
if qres.Empty() {
//fmt.Println("Add binary: "+tmp.GetBinaryMatchQuerySelector())
graph.AddNode(tmp.GetBinaryNode())
_, err := graph.Flush()
if err != nil {
fmt.Println(err)
}
}
// Use Merge to create the relationship bot, binaries and CC
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
MATCH (c:CC {host:"` + tmp.GetHost() + `"})
MATCH (bin:Binary ` + tmp.GetBinaryMatchQuerySelector() + `)
MERGE (b)-[d:download {name: "download"}]->(bin)
MERGE (c)-[h:host {name: "host"}]->(bin)`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
// Bot set a referer command
if tmp.GetCmdRawCommand() != "" {
// First we update what we know about this bot
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
MATCH (c:CC {host:"` + tmp.GetHost() + `"})
SET b.user="` + tmp.GetCmdUser() + `"
SET b.hostname="` + tmp.GetCmdHostname() + `"
SET b.fingerprint="` + tmp.GetCmdFingerprint() + `"
SET b.architecture="` + tmp.GetCmdArchitecture() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
// Then we create a command node for this command
query = `MATCH (c:Command {rawcontent:"` + tmp.GetCmdRawCommand() + `"}) RETURN c.content`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
if result.Empty() {
graph.AddNode(tmp.GetCommandNode())
_, err := graph.Flush()
if err != nil {
fmt.Println(err)
}
}
// Finally we tie the Bot and the issued Command
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
MATCH (c:CC {host:"` + tmp.GetHost() + `"})
MATCH (co:Command {rawcontent:"` + tmp.GetCmdRawCommand() + `"})
MERGE (b)-[e:execute {name: "execute"}]->(co)
MERGE (c)-[l:launch {name: "launch"}]->(co)`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
if *verbose {
fmt.Println(tmp)
}
// We treated the request
break
}
}
}
case <-sortie:
return nil
}
}
}
// Write Bashs scripts to files
func writeBashs(bc chan bindesc, sortie chan os.Signal) error {
defer wg.Done()
for {
select {
case v := <-bc:
var err error
redisGR, err = redisPretensorPool.Dial()
if err != nil {
logger.Fatal("Could not connect routine to pretensor Redis")
}
graphGR := rg.GraphNew("pretensor", redisGR)
if _, ok := bashs[v.url]; !ok {
if *debug {
logger.Println("Writing " + v.url)
}
// Set Sha 256 hash to the object
if s, err := strconv.Atoi(v.phit.GetLength()); err == nil && s > 0 {
tmpsha256 := sha256.Sum256([]byte(v.phit.GetBody()))
v.phit.SetSha256(fmt.Sprintf("%x", tmpsha256))
// Add binary's hash to the graph
query := `MATCH (b:Bot {ip:"` + v.phit.GetIp() + `"})
MATCH (c:CC {host:"` + v.phit.GetHost() + `"})
MATCH (bin:Binary ` + v.phit.GetBinaryMatchQuerySelector() + `)
MERGE (b)-[d:download {name: "download"}]->(bin)
MERGE (c)-[h:host {name: "host"}]->(bin)
ON MATCH SET bin.sha256 = "` + v.phit.GetSha256() + `"`
result, err := graphGR.Query(query)
if err != nil {
logger.Println(err)
}
if *debug {
logger.Println(query)
logger.Println(result)
}
err = os.WriteFile("./infected_bash/"+v.phit.GetSha256(), []byte(v.phit.GetBody()), 0644)
if err != nil {
logger.Println(err)
}
}
// Update de binbash map
bashs[v.phit.GetBinurl()] = v.phit
} else if *debug {
logger.Println("Skipping bash " + v.url)
}
case <-sortie:
return nil
}
}
}
// Gathering Binaries ourselves
func downloadBin(phitchan chan bindesc, sortie chan os.Signal) error {
defer wg.Done()
downloading:
for {
select {
case vi := <-phitchan:
// Check whether we already touched it | var err error
redisGR, err = redisPretensorPool.Dial()
if err != nil {
logger.Fatal("Could not connect routine to pretensor Redis")
}
graphGR := rg.GraphNew("pretensor", redisGR)
if *debug {
logger.Println("Fetching " + vi.url)
}
// Set some options for our TCP dialer
dialer := net.Dialer{
Timeout: 15 * time.Second,
}
// create a socks5 dialer
dial, err := proxy.SOCKS5("tcp", "127.0.0.1:9050", nil, &dialer)
if err != nil {
logger.Println("can't connect to the proxy:", err)
os.Exit(1)
}
httpTransport := &http.Transport{}
httpClient := &http.Client{Transport: httpTransport}
// set our socks5 as the dialer
httpTransport.Dial = dial.Dial
// create a request
req, err := http.NewRequest("GET", vi.phit.GetBinurl(), nil)
req.Header.Set("User-Agent", "-")
if err != nil {
logger.Println("can't create request:", err)
}
// use the http client to fetch the page
resp, err := httpClient.Do(req)
if err != nil {
logger.Println("can't GET page:", err)
break downloading
}
defer resp.Body.Close()
// update the binurls map
binurls[vi.phit.GetBinurl()] = vi.phit
b, err := io.ReadAll(resp.Body)
if err != nil || len(b) < 1 {
logger.Println("error reading body:", err)
break downloading
}
tmpb := sha256.Sum256(b)
err = os.WriteFile("./infected/"+fmt.Sprintf("%x", tmpb), b, 0644)
if err != nil {
logger.Println(err)
break downloading
}
// Update the Go object with this sha
vi.phit.SetSha256(fmt.Sprintf("%x", tmpb))
//fmt.Printf("Not empty, we Create a new relationship for: %v ", vi.phit.GetSha256())
query := `MATCH (b:Bot {ip:"` + vi.phit.GetIp() + `"})
MATCH (c:CC {host:"` + vi.phit.GetHost() + `"})
MATCH (bin:Binary ` + vi.phit.GetBinaryMatchQuerySelector() + `)
MERGE (b)-[d:download {name: "download"}]->(bin)
MERGE (c)-[h:host {name: "host"}]->(bin)
ON MATCH SET bin.sha256 = "` + vi.phit.GetSha256() + `"`
result, err := graphGR.Query(query)
if err != nil {
fmt.Println(err)
}
if *debug {
logger.Println(query)
logger.Println(result)
}
} else if *debug {
logger.Println("Skipping " + vi.url)
}
case <-sortie:
return nil
}
}
return nil
}
func removeMitm(s gj.Result) gj.Result {
str := s.String()
for _, v := range mitm {
str = strings.Replace(str, string(v), "", -1)
}
s = gj.Result{Str: str, Type: gj.String}
return s
}
func newPool(addr string, maxconn int) *redis.Pool {
return &redis.Pool{
MaxActive: maxconn,
MaxIdle: 3,
IdleTimeout: 240 * time.Second,
// Dial or DialContext must be set. When both are set, DialContext takes precedence over Dial.
Dial: func() (redis.Conn, error) { return redis.Dial("tcp", addr) },
}
} | if _, ok := binurls[vi.url]; !ok {
//do something here | random_line_split |
main.go | package main
import (
"bytes"
"crypto/sha256"
"flag"
"fmt"
"github.com/D4-project/d4-pretensor/pretensorhit"
"golang.org/x/net/proxy"
"io"
"log"
"net"
"net/http"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/D4-project/d4-golang-utils/config"
"github.com/gomodule/redigo/redis"
rg "github.com/redislabs/redisgraph-go"
gj "github.com/tidwall/gjson"
)
type (
redisconf struct {
redisHost string
redisPort string
databasename string
}
bindesc struct {
url string
phit *pretensorhit.PHit
}
filedesc struct {
path string
info os.FileInfo
}
)
// Setting up Flags and other Vars
var (
confdir = flag.String("c", "conf.sample", "configuration directory")
folder = flag.String("log_folder", "logs", "folder containing mod security logs")
debug = flag.Bool("d", false, "debug output")
verbose = flag.Bool("v", false, "additional debug output")
delete = flag.Bool("D", false, "Delete previous graph")
d4 = flag.Bool("r", false, "Connect to redis - d4 to get new files")
tmprate, _ = time.ParseDuration("5s")
rate = flag.Duration("rl", tmprate, "Rate limiter: time in human format before retry after EOF")
buf bytes.Buffer
logger = log.New(&buf, "INFO: ", log.Lshortfile)
redisConn redis.Conn
redisConnPretensor redis.Conn
redisGR redis.Conn
redisPretensorPool *redis.Pool
redisd4Pool *redis.Pool
redisd4Queue string
tomonitor [][]byte
mitm [][]byte
curls map[string]string
// Keeps a map of sha/hash to keep track of what we downloaded already
binurls map[string]*pretensorhit.PHit
bashs map[string]*pretensorhit.PHit
wg sync.WaitGroup
walk_folder = true
checkredisd4 = true
binchan chan bindesc
filechan chan filedesc
bashchan chan bindesc
)
func main() {
// Setting up log file
f, err := os.OpenFile("pretensor.log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
log.Fatalf("error opening file: %v", err)
}
// Create infected folder if not exist
if _, err := os.Stat("infected"); os.IsNotExist(err) {
os.Mkdir("infected", 0750)
}
// Create infected_bash folder if not exist
if _, err := os.Stat("infected_bash"); os.IsNotExist(err) {
os.Mkdir("infected_bash", 0750)
}
defer f.Close()
logger.SetOutput(f)
logger.SetFlags(log.LstdFlags | log.Lshortfile)
logger.Println("Init")
// Setting up Graceful killing
sortie := make(chan os.Signal, 1)
signal.Notify(sortie, os.Interrupt, os.Kill)
// Signal goroutine
go func() {
<-sortie
logger.Println("Exiting")
os.Exit(0)
}()
// Usage
flag.Usage = func() {
fmt.Printf("d4 - d4-pretensor\n")
fmt.Printf("Parses Mod Security logs into Redis Graph \n")
fmt.Printf("from a folder first to bootstrap a redis graph, \n")
fmt.Printf("and then from d4 to update it. \n")
fmt.Printf("\n")
fmt.Printf("Usage: d4-pretensor -c config_directory\n")
fmt.Printf("\n")
fmt.Printf("Configuration\n\n")
fmt.Printf("The configuration settings are stored in files in the configuration directory\n")
fmt.Printf("specified with the -c command line switch.\n\n")
fmt.Printf("Files in the configuration directory\n")
fmt.Printf("\n")
fmt.Printf("redis_pretensor - host:port/graphname\n")
fmt.Printf("redis_d4 - host:port/db\n")
fmt.Printf("redis_d4_queue - d4 queue to pop\n")
fmt.Printf("folder - folder containing mod security logs\n")
fmt.Printf("tomonitor - list of requests to monitor (botnet activity)\n")
fmt.Printf("mitm - list of mitm proxy to remove\n")
fmt.Printf("\n")
flag.PrintDefaults()
}
// Parse Flags
flag.Parse()
if flag.NFlag() == 0 || *confdir == "" {
flag.Usage()
sortie <- os.Kill
} else {
*confdir = strings.TrimSuffix(*confdir, "/")
*confdir = strings.TrimSuffix(*confdir, "\\")
}
// Check redis-pretensor configuration
rrg := redisconf{}
// Parse Input Redis Config
tmp := config.ReadConfigFile(*confdir, "redis_pretensor")
ss := strings.Split(string(tmp), "/")
if len(ss) <= 1 {
log.Fatal("Missing Database in redis_pretensor input config: should be host:port/database_name")
}
rrg.databasename = ss[1]
var ret bool
ret, ss[0] = config.IsNet(ss[0])
if ret {
sss := strings.Split(string(ss[0]), ":")
rrg.redisHost = sss[0]
rrg.redisPort = sss[1]
} else {
logger.Fatal("Redis-pretensor config error.")
}
// Create a new redis-pretensor connection pool
redisPretensorPool = newPool(rrg.redisHost+":"+rrg.redisPort, 400)
redisConnPretensor, err = redisPretensorPool.Dial()
if err != nil {
logger.Fatal("Could not connect to redis-pretensor Redis")
}
rd4 := redisconf{}
if *d4 {
// Check redis-d4 configuration
// Parse Input Redis Config
tmp = config.ReadConfigFile(*confdir, "redis_d4")
ss = strings.Split(string(tmp), "/")
if len(ss) <= 1 {
logger.Println("Missing Database in redis_d4 input config: should be host:port/database_name -- Skipping")
checkredisd4 = false
} else {
rd4.databasename = ss[1]
ret, ss[0] = config.IsNet(ss[0])
if ret {
sss := strings.Split(string(ss[0]), ":")
rrg.redisHost = sss[0]
rrg.redisPort = sss[1]
} else {
logger.Fatal("Redis-d4 config error.")
}
// Create a new redis-graph connection pool
redisd4Pool = newPool(rrg.redisHost+":"+rrg.redisPort, 400)
redisConn, err = redisd4Pool.Dial()
if err != nil {
logger.Fatal("Could not connect to d4 Redis")
}
// Get that the redis_d4_queue file
redisd4Queue = string(config.ReadConfigFile(*confdir, "redis_d4_queue"))
}
}
// Checking that the log folder exists
log_folder := string(config.ReadConfigFile(*confdir, "folder"))
_, err = os.ReadDir(log_folder)
if err != nil {
logger.Println(err)
walk_folder = false
}
// Loading Requests to monitor
tomonitor = config.ReadConfigFileLines(*confdir, "tomonitor")
// Loading proxy list to remove from Hosts
mitm = config.ReadConfigFileLines(*confdir, "mitm")
// Init maps
curls = make(map[string]string)
bashs = make(map[string]*pretensorhit.PHit)
binurls = make(map[string]*pretensorhit.PHit)
// Init redis graph
graph := rg.GraphNew("pretensor", redisConnPretensor)
if *delete {
graph.Delete()
}
// Create processing channels
binchan = make(chan bindesc, 2000)
bashchan = make(chan bindesc, 2000)
// Unbuffered channel for the parser
filechan = make(chan filedesc)
wg.Add(3)
// Launch the download routine
go downloadBin(binchan, sortie)
// Write no ELF files to files
go writeBashs(bashchan, sortie)
// Launch the Pretensor routine
// Leaving the existing redis connection to pretensorParse
go pretensorParse(filechan, sortie, &graph)
// Walking folder
err = filepath.Walk(log_folder,
func(path string, info os.FileInfo, err error) error {
filechan <- filedesc{path: path, info: info}
if err != nil {
return err
}
if *verbose {
logger.Println(info.Name(), info.Size())
}
return nil
})
if checkredisd4 && *d4 {
redisConnD4, err := redisd4Pool.Dial()
if err != nil {
logger.Fatal("Could not connect to d4 Redis")
}
if _, err := redisConnD4.Do("SELECT", rd4.databasename); err != nil {
redisConnD4.Close()
logger.Println(err)
return
}
// Once the walk is over, we start listening to d4 to get new files
rateLimiter := time.Tick(*rate)
redisNormal:
err = redisRead(redisConnD4, redisd4Queue)
for {
select {
case <-rateLimiter:
// Use the ratelimiter while the connection hangs
logger.Println("Limited read")
goto redisNormal
case <-sortie:
goto gtfo
}
}
}
//// Write curl commands to a file
//for _, v := range curls {
// f, err := os.OpenFile("./infected/curl.sh", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
// if err != nil {
// logger.Println(err)
// }
// if _, err := f.Write([]byte(fmt.Sprintf("%v\n", v))); err != nil {
// f.Close()
// logger.Println(err)
// }
// if err := f.Close(); err != nil {
// logger.Println(err)
// }
//}
// Waiting for the binary handling routines
wg.Wait()
gtfo:
logger.Println("Exiting")
}
func redisRead(redisConnD4 redis.Conn, redisd4Queue string) error {
for {
buf, err := redis.String(redisConnD4.Do("LPOP", redisd4Queue))
// If redis return empty: EOF (user should not stop)
if err == redis.ErrNil {
// no new record we break until the tick
return io.EOF
// oops
} else if err != nil {
logger.Println(err)
return err
}
fileinfo, err := os.Stat(buf)
if err != nil {
logger.Println(err)
return err
}
filechan <- filedesc{path: buf, info: fileinfo}
}
}
// Parsing whatever is thrown into filechan
func pretensorParse(filechan chan filedesc, sortie chan os.Signal, graph *rg.Graph) error {
logger.Println("Entering pretensorparse")
defer wg.Done()
for {
select {
case file := <-filechan:
if *debug {
logger.Println(file.path)
}
info := file.info
path := file.path
if !info.IsDir() {
content, err := os.ReadFile(path)
if err != nil {
return err
}
if len(content) == 0 {
if *debug {
logger.Println("Empty File: " + path)
}
break
}
// Load JSON
contents := string(content)
if !gj.Valid(contents) {
if *debug {
logger.Println("Invalid json: " + path)
}
break
}
// For each request to monitor
for _, v := range tomonitor {
request := gj.Get(contents, "request.request_line")
if strings.Contains(request.String(), string(v)) {
// We are in a file of interest
tmp := new(pretensorhit.PHit)
tmp.SetTimestamp(gj.Get(contents, "transaction.time"))
tmp.SetIp(gj.Get(contents, "transaction.remote_address"))
tmp.SetLine(gj.Get(contents, "request.request_line"))
tmp.SetReferer(gj.Get(contents, "request.headers.Referer"))
tmp.SetUseragent(gj.Get(contents, "request.headers.User-Agent"))
tmp.SetStatus(gj.Get(contents, "response.status"))
tmp.SetBody(gj.Get(contents, "response.body"))
tmp.SetContenttype(gj.Get(contents, "response.headers.Content-Type"))
tmp.SetLength(gj.Get(contents, "response.headers.Content-Length"))
tmp.SetHost(removeMitm(gj.Get(contents, "request.headers.Host")))
// Complete the graph
// Create bot if not exist
query := `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"}) RETURN b.ip, b.firstseen, b.lastseen`
result, err := graph.Query(query)
if err != nil {
fmt.Println(err)
fmt.Println(result)
}
if result.Empty() {
if *debug |
graph.AddNode(tmp.GetBotNode())
_, err := graph.Flush()
if err != nil {
fmt.Println(err)
}
// Update Firstseen / Lastseen if already seen
} else {
result.Next()
r := result.Record()
fsstr, _ := r.Get("b.firstseen")
lsstr, _ := r.Get("b.lastseen")
fs, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(fsstr))
ls, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(lsstr))
if tmp.GetParsedTimeStamp().Before(fs) {
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
SET b.firstseen="` + tmp.GetTimestamp() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
if tmp.GetParsedTimeStamp().After(ls) {
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
SET b.lastseen="` + tmp.GetTimestamp() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
}
// Create CC if not exist
query = `MATCH (c:CC {host:"` + tmp.GetHost() + `"}) RETURN c.host, c.firstseen, c.lastseen`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
if result.Empty() {
graph.AddNode(tmp.GetCCNode())
_, err := graph.Flush()
if err != nil {
fmt.Println(err)
}
// Update Firstseen / Lastseen if already seen
} else {
result.Next()
r := result.Record()
fsstr, _ := r.Get("c.firstseen")
lsstr, _ := r.Get("c.lastseen")
fs, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(fsstr))
ls, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(lsstr))
if tmp.GetParsedTimeStamp().Before(fs) {
query = `MATCH (c:CC {host:"` + tmp.GetHost() + `"})
SET c.firstseen="` + tmp.GetTimestamp() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
if tmp.GetParsedTimeStamp().After(ls) {
query = `MATCH (c:CC {host:"` + tmp.GetHost() + `"})
SET c.lastseen="` + tmp.GetTimestamp() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
}
// Use Merge to create the relationship between the bot and the CC
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
MATCH (c:CC {host:"` + tmp.GetHost() + `"})
MERGE (b)-[r:reach {name: "reach"}]->(c)`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
// If the bot downloaded a binary
if tmp.GetContenttype() == "application/octet-stream" && tmp.GetStatus() == "200" {
// Logging all bash scripts and curl commands to download binaries
if strings.Contains(fmt.Sprintf("%v", tmp.GetBody()), "ELF") {
//tmpsha256 := sha256.Sum256([]byte(tmp.Curl()))
//curls[fmt.Sprintf("%x", tmpsha256)] = tmp.Curl()
binchan <- bindesc{url: tmp.GetBinurl(), phit: tmp}
} else {
bashchan <- bindesc{url: tmp.GetBinurl(), phit: tmp}
}
// Create binary if not exist
query := `MATCH (bin` + tmp.GetBinaryMatchQuerySelector() + `) RETURN bin`
// The following is causing a panic -- it looks like a redigo issue
//query := `MATCH (bin:Binary` + tmp.GetBinaryMatchQuerySelector() + `) RETURN bin`
qres, err := graph.Query(query)
if err != nil {
fmt.Println(err)
}
if qres.Empty() {
//fmt.Println("Add binary: "+tmp.GetBinaryMatchQuerySelector())
graph.AddNode(tmp.GetBinaryNode())
_, err := graph.Flush()
if err != nil {
fmt.Println(err)
}
}
// Use Merge to create the relationship bot, binaries and CC
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
MATCH (c:CC {host:"` + tmp.GetHost() + `"})
MATCH (bin:Binary ` + tmp.GetBinaryMatchQuerySelector() + `)
MERGE (b)-[d:download {name: "download"}]->(bin)
MERGE (c)-[h:host {name: "host"}]->(bin)`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
// Bot set a referer command
if tmp.GetCmdRawCommand() != "" {
// First we update what we know about this bot
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
MATCH (c:CC {host:"` + tmp.GetHost() + `"})
SET b.user="` + tmp.GetCmdUser() + `"
SET b.hostname="` + tmp.GetCmdHostname() + `"
SET b.fingerprint="` + tmp.GetCmdFingerprint() + `"
SET b.architecture="` + tmp.GetCmdArchitecture() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
// Then we create a command node for this command
query = `MATCH (c:Command {rawcontent:"` + tmp.GetCmdRawCommand() + `"}) RETURN c.content`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
if result.Empty() {
graph.AddNode(tmp.GetCommandNode())
_, err := graph.Flush()
if err != nil {
fmt.Println(err)
}
}
// Finally we tie the Bot and the issued Command
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
MATCH (c:CC {host:"` + tmp.GetHost() + `"})
MATCH (co:Command {rawcontent:"` + tmp.GetCmdRawCommand() + `"})
MERGE (b)-[e:execute {name: "execute"}]->(co)
MERGE (c)-[l:launch {name: "launch"}]->(co)`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
if *verbose {
fmt.Println(tmp)
}
// We treated the request
break
}
}
}
case <-sortie:
return nil
}
}
}
// Write Bashs scripts to files
func writeBashs(bc chan bindesc, sortie chan os.Signal) error {
defer wg.Done()
for {
select {
case v := <-bc:
var err error
redisGR, err = redisPretensorPool.Dial()
if err != nil {
logger.Fatal("Could not connect routine to pretensor Redis")
}
graphGR := rg.GraphNew("pretensor", redisGR)
if _, ok := bashs[v.url]; !ok {
if *debug {
logger.Println("Writing " + v.url)
}
// Set Sha 256 hash to the object
if s, err := strconv.Atoi(v.phit.GetLength()); err == nil && s > 0 {
tmpsha256 := sha256.Sum256([]byte(v.phit.GetBody()))
v.phit.SetSha256(fmt.Sprintf("%x", tmpsha256))
// Add binary's hash to the graph
query := `MATCH (b:Bot {ip:"` + v.phit.GetIp() + `"})
MATCH (c:CC {host:"` + v.phit.GetHost() + `"})
MATCH (bin:Binary ` + v.phit.GetBinaryMatchQuerySelector() + `)
MERGE (b)-[d:download {name: "download"}]->(bin)
MERGE (c)-[h:host {name: "host"}]->(bin)
ON MATCH SET bin.sha256 = "` + v.phit.GetSha256() + `"`
result, err := graphGR.Query(query)
if err != nil {
logger.Println(err)
}
if *debug {
logger.Println(query)
logger.Println(result)
}
err = os.WriteFile("./infected_bash/"+v.phit.GetSha256(), []byte(v.phit.GetBody()), 0644)
if err != nil {
logger.Println(err)
}
}
// Update de binbash map
bashs[v.phit.GetBinurl()] = v.phit
} else if *debug {
logger.Println("Skipping bash " + v.url)
}
case <-sortie:
return nil
}
}
}
// Gathering Binaries ourselves
func downloadBin(phitchan chan bindesc, sortie chan os.Signal) error {
defer wg.Done()
downloading:
for {
select {
case vi := <-phitchan:
// Check whether we already touched it
if _, ok := binurls[vi.url]; !ok {
//do something here
var err error
redisGR, err = redisPretensorPool.Dial()
if err != nil {
logger.Fatal("Could not connect routine to pretensor Redis")
}
graphGR := rg.GraphNew("pretensor", redisGR)
if *debug {
logger.Println("Fetching " + vi.url)
}
// Set some options for our TCP dialer
dialer := net.Dialer{
Timeout: 15 * time.Second,
}
// create a socks5 dialer
dial, err := proxy.SOCKS5("tcp", "127.0.0.1:9050", nil, &dialer)
if err != nil {
logger.Println("can't connect to the proxy:", err)
os.Exit(1)
}
httpTransport := &http.Transport{}
httpClient := &http.Client{Transport: httpTransport}
// set our socks5 as the dialer
httpTransport.Dial = dial.Dial
// create a request
req, err := http.NewRequest("GET", vi.phit.GetBinurl(), nil)
req.Header.Set("User-Agent", "-")
if err != nil {
logger.Println("can't create request:", err)
}
// use the http client to fetch the page
resp, err := httpClient.Do(req)
if err != nil {
logger.Println("can't GET page:", err)
break downloading
}
defer resp.Body.Close()
// update the binurls map
binurls[vi.phit.GetBinurl()] = vi.phit
b, err := io.ReadAll(resp.Body)
if err != nil || len(b) < 1 {
logger.Println("error reading body:", err)
break downloading
}
tmpb := sha256.Sum256(b)
err = os.WriteFile("./infected/"+fmt.Sprintf("%x", tmpb), b, 0644)
if err != nil {
logger.Println(err)
break downloading
}
// Update the Go object with this sha
vi.phit.SetSha256(fmt.Sprintf("%x", tmpb))
//fmt.Printf("Not empty, we Create a new relationship for: %v ", vi.phit.GetSha256())
query := `MATCH (b:Bot {ip:"` + vi.phit.GetIp() + `"})
MATCH (c:CC {host:"` + vi.phit.GetHost() + `"})
MATCH (bin:Binary ` + vi.phit.GetBinaryMatchQuerySelector() + `)
MERGE (b)-[d:download {name: "download"}]->(bin)
MERGE (c)-[h:host {name: "host"}]->(bin)
ON MATCH SET bin.sha256 = "` + vi.phit.GetSha256() + `"`
result, err := graphGR.Query(query)
if err != nil {
fmt.Println(err)
}
if *debug {
logger.Println(query)
logger.Println(result)
}
} else if *debug {
logger.Println("Skipping " + vi.url)
}
case <-sortie:
return nil
}
}
return nil
}
func removeMitm(s gj.Result) gj.Result {
str := s.String()
for _, v := range mitm {
str = strings.Replace(str, string(v), "", -1)
}
s = gj.Result{Str: str, Type: gj.String}
return s
}
func newPool(addr string, maxconn int) *redis.Pool {
return &redis.Pool{
MaxActive: maxconn,
MaxIdle: 3,
IdleTimeout: 240 * time.Second,
// Dial or DialContext must be set. When both are set, DialContext takes precedence over Dial.
Dial: func() (redis.Conn, error) { return redis.Dial("tcp", addr) },
}
}
| {
logger.Println(tmp.GetBotNode())
} | conditional_block |
main.go | package main
import (
"bytes"
"crypto/sha256"
"flag"
"fmt"
"github.com/D4-project/d4-pretensor/pretensorhit"
"golang.org/x/net/proxy"
"io"
"log"
"net"
"net/http"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/D4-project/d4-golang-utils/config"
"github.com/gomodule/redigo/redis"
rg "github.com/redislabs/redisgraph-go"
gj "github.com/tidwall/gjson"
)
type (
redisconf struct {
redisHost string
redisPort string
databasename string
}
bindesc struct {
url string
phit *pretensorhit.PHit
}
filedesc struct {
path string
info os.FileInfo
}
)
// Setting up Flags and other Vars
var (
confdir = flag.String("c", "conf.sample", "configuration directory")
folder = flag.String("log_folder", "logs", "folder containing mod security logs")
debug = flag.Bool("d", false, "debug output")
verbose = flag.Bool("v", false, "additional debug output")
delete = flag.Bool("D", false, "Delete previous graph")
d4 = flag.Bool("r", false, "Connect to redis - d4 to get new files")
tmprate, _ = time.ParseDuration("5s")
rate = flag.Duration("rl", tmprate, "Rate limiter: time in human format before retry after EOF")
buf bytes.Buffer
logger = log.New(&buf, "INFO: ", log.Lshortfile)
redisConn redis.Conn
redisConnPretensor redis.Conn
redisGR redis.Conn
redisPretensorPool *redis.Pool
redisd4Pool *redis.Pool
redisd4Queue string
tomonitor [][]byte
mitm [][]byte
curls map[string]string
// Keeps a map of sha/hash to keep track of what we downloaded already
binurls map[string]*pretensorhit.PHit
bashs map[string]*pretensorhit.PHit
wg sync.WaitGroup
walk_folder = true
checkredisd4 = true
binchan chan bindesc
filechan chan filedesc
bashchan chan bindesc
)
func main() {
// Setting up log file
f, err := os.OpenFile("pretensor.log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
log.Fatalf("error opening file: %v", err)
}
// Create infected folder if not exist
if _, err := os.Stat("infected"); os.IsNotExist(err) {
os.Mkdir("infected", 0750)
}
// Create infected_bash folder if not exist
if _, err := os.Stat("infected_bash"); os.IsNotExist(err) {
os.Mkdir("infected_bash", 0750)
}
defer f.Close()
logger.SetOutput(f)
logger.SetFlags(log.LstdFlags | log.Lshortfile)
logger.Println("Init")
// Setting up Graceful killing
sortie := make(chan os.Signal, 1)
signal.Notify(sortie, os.Interrupt, os.Kill)
// Signal goroutine
go func() {
<-sortie
logger.Println("Exiting")
os.Exit(0)
}()
// Usage
flag.Usage = func() {
fmt.Printf("d4 - d4-pretensor\n")
fmt.Printf("Parses Mod Security logs into Redis Graph \n")
fmt.Printf("from a folder first to bootstrap a redis graph, \n")
fmt.Printf("and then from d4 to update it. \n")
fmt.Printf("\n")
fmt.Printf("Usage: d4-pretensor -c config_directory\n")
fmt.Printf("\n")
fmt.Printf("Configuration\n\n")
fmt.Printf("The configuration settings are stored in files in the configuration directory\n")
fmt.Printf("specified with the -c command line switch.\n\n")
fmt.Printf("Files in the configuration directory\n")
fmt.Printf("\n")
fmt.Printf("redis_pretensor - host:port/graphname\n")
fmt.Printf("redis_d4 - host:port/db\n")
fmt.Printf("redis_d4_queue - d4 queue to pop\n")
fmt.Printf("folder - folder containing mod security logs\n")
fmt.Printf("tomonitor - list of requests to monitor (botnet activity)\n")
fmt.Printf("mitm - list of mitm proxy to remove\n")
fmt.Printf("\n")
flag.PrintDefaults()
}
// Parse Flags
flag.Parse()
if flag.NFlag() == 0 || *confdir == "" {
flag.Usage()
sortie <- os.Kill
} else {
*confdir = strings.TrimSuffix(*confdir, "/")
*confdir = strings.TrimSuffix(*confdir, "\\")
}
// Check redis-pretensor configuration
rrg := redisconf{}
// Parse Input Redis Config
tmp := config.ReadConfigFile(*confdir, "redis_pretensor")
ss := strings.Split(string(tmp), "/")
if len(ss) <= 1 {
log.Fatal("Missing Database in redis_pretensor input config: should be host:port/database_name")
}
rrg.databasename = ss[1]
var ret bool
ret, ss[0] = config.IsNet(ss[0])
if ret {
sss := strings.Split(string(ss[0]), ":")
rrg.redisHost = sss[0]
rrg.redisPort = sss[1]
} else {
logger.Fatal("Redis-pretensor config error.")
}
// Create a new redis-pretensor connection pool
redisPretensorPool = newPool(rrg.redisHost+":"+rrg.redisPort, 400)
redisConnPretensor, err = redisPretensorPool.Dial()
if err != nil {
logger.Fatal("Could not connect to redis-pretensor Redis")
}
rd4 := redisconf{}
if *d4 {
// Check redis-d4 configuration
// Parse Input Redis Config
tmp = config.ReadConfigFile(*confdir, "redis_d4")
ss = strings.Split(string(tmp), "/")
if len(ss) <= 1 {
logger.Println("Missing Database in redis_d4 input config: should be host:port/database_name -- Skipping")
checkredisd4 = false
} else {
rd4.databasename = ss[1]
ret, ss[0] = config.IsNet(ss[0])
if ret {
sss := strings.Split(string(ss[0]), ":")
rrg.redisHost = sss[0]
rrg.redisPort = sss[1]
} else {
logger.Fatal("Redis-d4 config error.")
}
// Create a new redis-graph connection pool
redisd4Pool = newPool(rrg.redisHost+":"+rrg.redisPort, 400)
redisConn, err = redisd4Pool.Dial()
if err != nil {
logger.Fatal("Could not connect to d4 Redis")
}
// Get that the redis_d4_queue file
redisd4Queue = string(config.ReadConfigFile(*confdir, "redis_d4_queue"))
}
}
// Checking that the log folder exists
log_folder := string(config.ReadConfigFile(*confdir, "folder"))
_, err = os.ReadDir(log_folder)
if err != nil {
logger.Println(err)
walk_folder = false
}
// Loading Requests to monitor
tomonitor = config.ReadConfigFileLines(*confdir, "tomonitor")
// Loading proxy list to remove from Hosts
mitm = config.ReadConfigFileLines(*confdir, "mitm")
// Init maps
curls = make(map[string]string)
bashs = make(map[string]*pretensorhit.PHit)
binurls = make(map[string]*pretensorhit.PHit)
// Init redis graph
graph := rg.GraphNew("pretensor", redisConnPretensor)
if *delete {
graph.Delete()
}
// Create processing channels
binchan = make(chan bindesc, 2000)
bashchan = make(chan bindesc, 2000)
// Unbuffered channel for the parser
filechan = make(chan filedesc)
wg.Add(3)
// Launch the download routine
go downloadBin(binchan, sortie)
// Write no ELF files to files
go writeBashs(bashchan, sortie)
// Launch the Pretensor routine
// Leaving the existing redis connection to pretensorParse
go pretensorParse(filechan, sortie, &graph)
// Walking folder
err = filepath.Walk(log_folder,
func(path string, info os.FileInfo, err error) error {
filechan <- filedesc{path: path, info: info}
if err != nil {
return err
}
if *verbose {
logger.Println(info.Name(), info.Size())
}
return nil
})
if checkredisd4 && *d4 {
redisConnD4, err := redisd4Pool.Dial()
if err != nil {
logger.Fatal("Could not connect to d4 Redis")
}
if _, err := redisConnD4.Do("SELECT", rd4.databasename); err != nil {
redisConnD4.Close()
logger.Println(err)
return
}
// Once the walk is over, we start listening to d4 to get new files
rateLimiter := time.Tick(*rate)
redisNormal:
err = redisRead(redisConnD4, redisd4Queue)
for {
select {
case <-rateLimiter:
// Use the ratelimiter while the connection hangs
logger.Println("Limited read")
goto redisNormal
case <-sortie:
goto gtfo
}
}
}
//// Write curl commands to a file
//for _, v := range curls {
// f, err := os.OpenFile("./infected/curl.sh", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
// if err != nil {
// logger.Println(err)
// }
// if _, err := f.Write([]byte(fmt.Sprintf("%v\n", v))); err != nil {
// f.Close()
// logger.Println(err)
// }
// if err := f.Close(); err != nil {
// logger.Println(err)
// }
//}
// Waiting for the binary handling routines
wg.Wait()
gtfo:
logger.Println("Exiting")
}
func redisRead(redisConnD4 redis.Conn, redisd4Queue string) error |
// Parsing whatever is thrown into filechan
func pretensorParse(filechan chan filedesc, sortie chan os.Signal, graph *rg.Graph) error {
logger.Println("Entering pretensorparse")
defer wg.Done()
for {
select {
case file := <-filechan:
if *debug {
logger.Println(file.path)
}
info := file.info
path := file.path
if !info.IsDir() {
content, err := os.ReadFile(path)
if err != nil {
return err
}
if len(content) == 0 {
if *debug {
logger.Println("Empty File: " + path)
}
break
}
// Load JSON
contents := string(content)
if !gj.Valid(contents) {
if *debug {
logger.Println("Invalid json: " + path)
}
break
}
// For each request to monitor
for _, v := range tomonitor {
request := gj.Get(contents, "request.request_line")
if strings.Contains(request.String(), string(v)) {
// We are in a file of interest
tmp := new(pretensorhit.PHit)
tmp.SetTimestamp(gj.Get(contents, "transaction.time"))
tmp.SetIp(gj.Get(contents, "transaction.remote_address"))
tmp.SetLine(gj.Get(contents, "request.request_line"))
tmp.SetReferer(gj.Get(contents, "request.headers.Referer"))
tmp.SetUseragent(gj.Get(contents, "request.headers.User-Agent"))
tmp.SetStatus(gj.Get(contents, "response.status"))
tmp.SetBody(gj.Get(contents, "response.body"))
tmp.SetContenttype(gj.Get(contents, "response.headers.Content-Type"))
tmp.SetLength(gj.Get(contents, "response.headers.Content-Length"))
tmp.SetHost(removeMitm(gj.Get(contents, "request.headers.Host")))
// Complete the graph
// Create bot if not exist
query := `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"}) RETURN b.ip, b.firstseen, b.lastseen`
result, err := graph.Query(query)
if err != nil {
fmt.Println(err)
fmt.Println(result)
}
if result.Empty() {
if *debug {
logger.Println(tmp.GetBotNode())
}
graph.AddNode(tmp.GetBotNode())
_, err := graph.Flush()
if err != nil {
fmt.Println(err)
}
// Update Firstseen / Lastseen if already seen
} else {
result.Next()
r := result.Record()
fsstr, _ := r.Get("b.firstseen")
lsstr, _ := r.Get("b.lastseen")
fs, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(fsstr))
ls, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(lsstr))
if tmp.GetParsedTimeStamp().Before(fs) {
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
SET b.firstseen="` + tmp.GetTimestamp() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
if tmp.GetParsedTimeStamp().After(ls) {
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
SET b.lastseen="` + tmp.GetTimestamp() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
}
// Create CC if not exist
query = `MATCH (c:CC {host:"` + tmp.GetHost() + `"}) RETURN c.host, c.firstseen, c.lastseen`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
if result.Empty() {
graph.AddNode(tmp.GetCCNode())
_, err := graph.Flush()
if err != nil {
fmt.Println(err)
}
// Update Firstseen / Lastseen if already seen
} else {
result.Next()
r := result.Record()
fsstr, _ := r.Get("c.firstseen")
lsstr, _ := r.Get("c.lastseen")
fs, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(fsstr))
ls, _ := time.Parse("02/Jan/2006:15:04:05 -0700", fmt.Sprint(lsstr))
if tmp.GetParsedTimeStamp().Before(fs) {
query = `MATCH (c:CC {host:"` + tmp.GetHost() + `"})
SET c.firstseen="` + tmp.GetTimestamp() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
if tmp.GetParsedTimeStamp().After(ls) {
query = `MATCH (c:CC {host:"` + tmp.GetHost() + `"})
SET c.lastseen="` + tmp.GetTimestamp() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
}
// Use Merge to create the relationship between the bot and the CC
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
MATCH (c:CC {host:"` + tmp.GetHost() + `"})
MERGE (b)-[r:reach {name: "reach"}]->(c)`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
// If the bot downloaded a binary
if tmp.GetContenttype() == "application/octet-stream" && tmp.GetStatus() == "200" {
// Logging all bash scripts and curl commands to download binaries
if strings.Contains(fmt.Sprintf("%v", tmp.GetBody()), "ELF") {
//tmpsha256 := sha256.Sum256([]byte(tmp.Curl()))
//curls[fmt.Sprintf("%x", tmpsha256)] = tmp.Curl()
binchan <- bindesc{url: tmp.GetBinurl(), phit: tmp}
} else {
bashchan <- bindesc{url: tmp.GetBinurl(), phit: tmp}
}
// Create binary if not exist
query := `MATCH (bin` + tmp.GetBinaryMatchQuerySelector() + `) RETURN bin`
// The following is causing a panic -- it looks like a redigo issue
//query := `MATCH (bin:Binary` + tmp.GetBinaryMatchQuerySelector() + `) RETURN bin`
qres, err := graph.Query(query)
if err != nil {
fmt.Println(err)
}
if qres.Empty() {
//fmt.Println("Add binary: "+tmp.GetBinaryMatchQuerySelector())
graph.AddNode(tmp.GetBinaryNode())
_, err := graph.Flush()
if err != nil {
fmt.Println(err)
}
}
// Use Merge to create the relationship bot, binaries and CC
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
MATCH (c:CC {host:"` + tmp.GetHost() + `"})
MATCH (bin:Binary ` + tmp.GetBinaryMatchQuerySelector() + `)
MERGE (b)-[d:download {name: "download"}]->(bin)
MERGE (c)-[h:host {name: "host"}]->(bin)`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
// Bot set a referer command
if tmp.GetCmdRawCommand() != "" {
// First we update what we know about this bot
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
MATCH (c:CC {host:"` + tmp.GetHost() + `"})
SET b.user="` + tmp.GetCmdUser() + `"
SET b.hostname="` + tmp.GetCmdHostname() + `"
SET b.fingerprint="` + tmp.GetCmdFingerprint() + `"
SET b.architecture="` + tmp.GetCmdArchitecture() + `"`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
// Then we create a command node for this command
query = `MATCH (c:Command {rawcontent:"` + tmp.GetCmdRawCommand() + `"}) RETURN c.content`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
if result.Empty() {
graph.AddNode(tmp.GetCommandNode())
_, err := graph.Flush()
if err != nil {
fmt.Println(err)
}
}
// Finally we tie the Bot and the issued Command
query = `MATCH (b:Bot {ip:"` + tmp.GetIp() + `"})
MATCH (c:CC {host:"` + tmp.GetHost() + `"})
MATCH (co:Command {rawcontent:"` + tmp.GetCmdRawCommand() + `"})
MERGE (b)-[e:execute {name: "execute"}]->(co)
MERGE (c)-[l:launch {name: "launch"}]->(co)`
result, err = graph.Query(query)
if err != nil {
fmt.Println(err)
}
}
if *verbose {
fmt.Println(tmp)
}
// We treated the request
break
}
}
}
case <-sortie:
return nil
}
}
}
// Write Bashs scripts to files
func writeBashs(bc chan bindesc, sortie chan os.Signal) error {
defer wg.Done()
for {
select {
case v := <-bc:
var err error
redisGR, err = redisPretensorPool.Dial()
if err != nil {
logger.Fatal("Could not connect routine to pretensor Redis")
}
graphGR := rg.GraphNew("pretensor", redisGR)
if _, ok := bashs[v.url]; !ok {
if *debug {
logger.Println("Writing " + v.url)
}
// Set Sha 256 hash to the object
if s, err := strconv.Atoi(v.phit.GetLength()); err == nil && s > 0 {
tmpsha256 := sha256.Sum256([]byte(v.phit.GetBody()))
v.phit.SetSha256(fmt.Sprintf("%x", tmpsha256))
// Add binary's hash to the graph
query := `MATCH (b:Bot {ip:"` + v.phit.GetIp() + `"})
MATCH (c:CC {host:"` + v.phit.GetHost() + `"})
MATCH (bin:Binary ` + v.phit.GetBinaryMatchQuerySelector() + `)
MERGE (b)-[d:download {name: "download"}]->(bin)
MERGE (c)-[h:host {name: "host"}]->(bin)
ON MATCH SET bin.sha256 = "` + v.phit.GetSha256() + `"`
result, err := graphGR.Query(query)
if err != nil {
logger.Println(err)
}
if *debug {
logger.Println(query)
logger.Println(result)
}
err = os.WriteFile("./infected_bash/"+v.phit.GetSha256(), []byte(v.phit.GetBody()), 0644)
if err != nil {
logger.Println(err)
}
}
// Update de binbash map
bashs[v.phit.GetBinurl()] = v.phit
} else if *debug {
logger.Println("Skipping bash " + v.url)
}
case <-sortie:
return nil
}
}
}
// Gathering Binaries ourselves
func downloadBin(phitchan chan bindesc, sortie chan os.Signal) error {
defer wg.Done()
downloading:
for {
select {
case vi := <-phitchan:
// Check whether we already touched it
if _, ok := binurls[vi.url]; !ok {
//do something here
var err error
redisGR, err = redisPretensorPool.Dial()
if err != nil {
logger.Fatal("Could not connect routine to pretensor Redis")
}
graphGR := rg.GraphNew("pretensor", redisGR)
if *debug {
logger.Println("Fetching " + vi.url)
}
// Set some options for our TCP dialer
dialer := net.Dialer{
Timeout: 15 * time.Second,
}
// create a socks5 dialer
dial, err := proxy.SOCKS5("tcp", "127.0.0.1:9050", nil, &dialer)
if err != nil {
logger.Println("can't connect to the proxy:", err)
os.Exit(1)
}
httpTransport := &http.Transport{}
httpClient := &http.Client{Transport: httpTransport}
// set our socks5 as the dialer
httpTransport.Dial = dial.Dial
// create a request
req, err := http.NewRequest("GET", vi.phit.GetBinurl(), nil)
req.Header.Set("User-Agent", "-")
if err != nil {
logger.Println("can't create request:", err)
}
// use the http client to fetch the page
resp, err := httpClient.Do(req)
if err != nil {
logger.Println("can't GET page:", err)
break downloading
}
defer resp.Body.Close()
// update the binurls map
binurls[vi.phit.GetBinurl()] = vi.phit
b, err := io.ReadAll(resp.Body)
if err != nil || len(b) < 1 {
logger.Println("error reading body:", err)
break downloading
}
tmpb := sha256.Sum256(b)
err = os.WriteFile("./infected/"+fmt.Sprintf("%x", tmpb), b, 0644)
if err != nil {
logger.Println(err)
break downloading
}
// Update the Go object with this sha
vi.phit.SetSha256(fmt.Sprintf("%x", tmpb))
//fmt.Printf("Not empty, we Create a new relationship for: %v ", vi.phit.GetSha256())
query := `MATCH (b:Bot {ip:"` + vi.phit.GetIp() + `"})
MATCH (c:CC {host:"` + vi.phit.GetHost() + `"})
MATCH (bin:Binary ` + vi.phit.GetBinaryMatchQuerySelector() + `)
MERGE (b)-[d:download {name: "download"}]->(bin)
MERGE (c)-[h:host {name: "host"}]->(bin)
ON MATCH SET bin.sha256 = "` + vi.phit.GetSha256() + `"`
result, err := graphGR.Query(query)
if err != nil {
fmt.Println(err)
}
if *debug {
logger.Println(query)
logger.Println(result)
}
} else if *debug {
logger.Println("Skipping " + vi.url)
}
case <-sortie:
return nil
}
}
return nil
}
func removeMitm(s gj.Result) gj.Result {
str := s.String()
for _, v := range mitm {
str = strings.Replace(str, string(v), "", -1)
}
s = gj.Result{Str: str, Type: gj.String}
return s
}
func newPool(addr string, maxconn int) *redis.Pool {
return &redis.Pool{
MaxActive: maxconn,
MaxIdle: 3,
IdleTimeout: 240 * time.Second,
// Dial or DialContext must be set. When both are set, DialContext takes precedence over Dial.
Dial: func() (redis.Conn, error) { return redis.Dial("tcp", addr) },
}
}
| {
for {
buf, err := redis.String(redisConnD4.Do("LPOP", redisd4Queue))
// If redis return empty: EOF (user should not stop)
if err == redis.ErrNil {
// no new record we break until the tick
return io.EOF
// oops
} else if err != nil {
logger.Println(err)
return err
}
fileinfo, err := os.Stat(buf)
if err != nil {
logger.Println(err)
return err
}
filechan <- filedesc{path: buf, info: fileinfo}
}
} | identifier_body |
trixer.py | # -*- coding: utf-8 -*-
__author__ = 'Adônis Gasiglia'
import argparse, os, pickle, sys, ConfigParser, math
from PIL import Image, ImageDraw, ImageFont
### AUXILIAR FUNCTIONS ###
def getKey0(item):
return item[0]
def getKey1(item):
return item[1]
def calcPixelLuminance(pixel):
return pixel[0]*0.2126 + pixel[1]*0.7152 + pixel[2]*0.0722
def calcBlockLuminance(file,blockx,blocky,lumitable):
im = Image.open(file)
px = im.load()
luminance = 0
for x in xrange(blockx*lumitable.blockWidth,(blockx*lumitable.blockWidth)+lumitable.blockWidth):
for y in xrange(blocky*lumitable.blockHeight,(blocky*lumitable.blockHeight)+lumitable.blockHeight):
luminance += calcPixelLuminance(px[x,y])
luminance = luminance/(lumitable.blockHeight*lumitable.blockWidth)
return luminance
### LUMITABLE CLASS ###
class lumitable:
def __init__(self,fontname,fontsize,range,blockheight,blockwidth):
self.fontName = fontname
self.fontSize = fontsize
self.charRange = range
self.charNumber = range[1] - range[0]
self.blockHeight = blockheight
self.blockWidth = blockwidth
self.table = []
def generateFontStrip(self):
if self.fontName is not None:
base = Image.new('RGBA', (self.charNumber*self.blockWidth,self.blockHeight), (255,255,255,255))
txt = Image.new('RGBA', base.size, (255,255,255,0))
fnt = ImageFont.truetype(("lumitables/" + self.fontName + '.ttf'), self.fontSize)
d = ImageDraw.Draw(txt)
for num in range(self.charRange[0],self.charRange[1]):
pos = (num-self.charRange[0])*self.blockWidth
d.text((pos,0), chr(num), font=fnt, fill=(0,0,0,255))
out = Image.alpha_composite(base, txt)
# write to stdout
out.save(("lumitables/" + self.fontName + ".png"), "PNG")
v_print(2,"Fontstrip generated!")
def generateLuminanceTable(self):
for block in xrange(0,self.charNumber):
self.table.append([block+self.charRange[0],calcBlockLuminance("lumitables/" + self.fontName + ".png",block,0,self)])
self.table.sort(key=getKey1)
with open("lumitables/" + self.fontName + ".lut", 'wb') as f:
pickle.dump(self, f)
v_print(2,"Lumitable generated!")
### IMAGETABLE CLASS ###
class imagetable:
def __init__(self,file,lumitable,colorMode):
self.file = file
self.image = Image.open(file)
self.xBlocks = math.floor(self.image.size[0].__float__() / lumitable.blockWidth.__float__()).__int__()
self.yBlocks = math.floor(self.image.size[1].__float__() / lumitable.blockHeight.__float__()).__int__()
self.lumitable = lumitable
self.colorMode = colorMode
self.table = []
if colorMode == "colors": ##### CONSERRRRRTAAAAAAAA
self.colorTable = [[0 for x in range(self.xBlocks*3)] for x in range(self.yBlocks*3)]
ready = 0.0
total = self.xBlocks*self.yBlocks
for x in range(0,self.xBlocks):
for y in range(0,self.yBlocks):
luminance = calcBlockLuminance(self.file,x,y,self.lumitable)
if colorMode == "colors":
self.colorTable[x][y] = self.calcColorAverage(x,y,self.lumitable)
found = False
for i in self.table:
if i[0] == luminance:
i[1].append((x,y))
found = True
break
if not found:
self.table.append([luminance,[(x,y)]])
ready += 1.0
v_print(2,"Generating imagetable: {0:.2f}%".format((ready/total)*100.0))
self.table.sort(key=getKey0)
v_print(2,"Imagetable generated!")
def calcColorAverage(self,blockx,blocky,lumitable):
im = Image.open(self.file)
px = im.load()
red = 0
green = 0
blue = 0
for x in xrange(blockx*lumitable.blockWidth,(blockx*lumitable.blockWidth)+lumitable.blockWidth):
for y in xrange(blocky*lumitable.blockHeight,(blocky*lumitable.blockHeight)+lumitable.blockHeight):
red += px[x,y][0]
green += px[x,y][1]
blue += px[x,y][2]
red = red / (lumitable.blockHeight*lumitable.blockWidth)
green = green / (lumitable.blockHeight*lumitable.blockWidth)
blue = blue / (lumitable.blockHeight*lumitable.blockWidth)
return (red,green,blue)
### Trix Class ###
class trix:
def __init__(self,name,lumi,imagetb):
self.name = name
self.lumitable = lumi
self.imagetable = imagetb
self.imagetable.table.reverse()
self.image = Image.new('RGBA', (self.imagetable.xBlocks*self.lumitable.blockWidth,self.imagetable.yBlocks*self.lumitable.blockHeight), (255,255,255,255))
self.blockPerChar = math.ceil(len(self.imagetable.table).__float__() / len(self.lumitable.table).__float__())
self.trixtable = []
def generateTrixtable(self):
trixindex = -1
ready = 0.0
total = len(self.lumitable.table)
for i in self.lumitable.table:
trixindex += 1
self.trixtable.append([i[0],[]])
for n in range(0,self.blockPerChar.__int__()):
if(len(self.imagetable.table)>0):
self.trixtable[trixindex][1].append((self.imagetable.table.pop()[1]))
else:
break
ready += 1.0
v_print(2,"Generating trixtable: {0:.2f}%".format((ready/total)*100.0))
v_print(2,"Trixtable generated!")
def printTrix(self,output):
txt = Image.new('RGBA', self.image.size, (255,255,255,0))
fnt = ImageFont.truetype("lumitables/"+self.lumitable.fontName+".ttf", self.lumitable.fontSize)
d = ImageDraw.Draw(txt)
for currtrix in self.trixtable:
if len(currtrix) > 0:
for i in range(0,self.blockPerChar.__int__()):
if len(currtrix[1]) > i:
for tuple in currtrix[1][i]:
x = tuple[0] * self.lumitable.blockWidth
y = tuple[1] * self.lumitable.blockHeight
if self.imagetable.colorMode == "colors":
red = self.imagetable.colorTable[tuple[0]][tuple[1]][0]
green = self.imagetable.colorTable[tuple[0]][tuple[1]][1]
blue = self.imagetable.colorTable[tuple[0]][tuple[1]][2]
d.text((x,y), chr(currtrix[0]), font=fnt, fill=(red,green,blue,255))
else:
d.text((x,y), chr(currtrix[0]), font=fnt, fill=(0,0,0,255))
out = Image.alpha_composite(self.image, txt)
out.save("output/" + output)
v_print(2,"Trix saved!")
### Default Configs ###
class configs():
def __init__(self):
# Open defauls.cfg if it exists or create a new one if it doesn't.
ConfigPrs = ConfigParser.ConfigParser()
if os.path.isfile("defaults.cfg"):
ConfigPrs.read("defaults.cfg")
else:
cfgfile = open("defaults.cfg",'w')
# add the settings to the structure of the file, and lets write it out...
ConfigPrs.add_section('Defaults')
ConfigPrs.set('Defaults','lumitable','courier.lut')
ConfigPrs.set('Defaults','colorMode', 'colors')
ConfigPrs.set('Defaults','verbosity', "1") # 0 = nothing / 1 = errors / 2 = info
ConfigPrs.write(cfgfile)
cfgfile.close()
# -/
self.input = ""
self.output = ""
self.lumitable = ConfigPrs.get("Defaults","lumitable")
self.colorMode = ConfigPrs.get("Defaults","colorMode")
self.verbosity = int(ConfigPrs.get("Defaults","verbosity"))
### --------------- ###
### MAIN FUNCTION ###
def main():
### Arguments parsing ###
# Configure and parse the command line parameters.
parser = argparse.ArgumentParser(description='Creates a number matrix based on an image file.')
parser.add_argument('-i','--input',help="Input file pathname.",required=True)
parser.add_argument('-o','--output',help="Output file pathname.",required=True)
parser.add_argument('-l','--lumitable',help="Lumitable name.",required=False)
parser.add_argument('-c','--colorMode',help="Color mode (bw/colors).",required=False)
parser.add_argument('-v','--verbosity',help="Controls how much information the program will print.\n0 = none | 1 = errors | 2 = errors and info", required=False)
args = parser.parse_args()
### ----------------- ###
if not os.path.isfile(args.input):
v_print(1,"EXITING: Input file not found!")
sys.exit(-1)
else:
conf.input = args.input
# TODO: solve permission problems on Windows
if os.path.isfile("output/" + args.output):
o | else:
conf.output = args.output
if args.lumitable != None:
if os.path.isfile("lumitables/" + args.lumitable) :
v_print(1,"EXITING: Lumitable " + args.lumitable + " not found on /lumitables folder!")
sys.exit(-1)
else:
conf.lumitable = args.lumitable
if args.colorMode != None:
if args.colorMode != "colors" and args.colorMode != "bw":
v_print(1,"EXITING: Color mode " + args.colorMode + " don't exist!")
sys.exit(-1)
else:
conf.colorMode = args.colorMode
if args.verbosity != None:
if int(args.verbosity) < 0 or int(args.verbosity) > 2:
v_print(1,"EXITING: Verbosity level " + args.verbosity + " don't exist!")
sys.exit(-1)
else:
conf.verbosity = args.verbosity
### The Program Core ###
with open("lumitables/" + conf.lumitable, 'rb') as f:
lumi = pickle.load(f)
imtable = imagetable(conf.input,lumi,conf.colorMode)
tri = trix(conf.input,lumi,imtable)
tri.generateTrixtable()
tri.printTrix(conf.output)
### ---------------- ###
if __name__ == "__main__":
conf = configs()
### Implementing Verbosity ###
if conf.verbosity:
def _v_print(*verb_args):
if verb_args[0] <= conf.verbosity:
if verb_args[0] == 1: print ("ERROR " + verb_args[1])
if verb_args[0] == 2: print ("INFO " + verb_args[1])
else:
_v_print = lambda *a: None # do-nothing function
global v_print
v_print = _v_print
sys.exit(main())
| p = raw_input("Output file already exists. Overwrite existing file? (Y/N)")
if(op == "n" or op == "N"):
v_print(1,"EXITING: Process canceled. Output file already exists.")
sys.exit(-1)
| conditional_block |
trixer.py | # -*- coding: utf-8 -*-
__author__ = 'Adônis Gasiglia'
import argparse, os, pickle, sys, ConfigParser, math
from PIL import Image, ImageDraw, ImageFont
### AUXILIAR FUNCTIONS ###
def getKey0(item):
return item[0]
def getKey1(item):
return item[1]
def calcPixelLuminance(pixel):
return pixel[0]*0.2126 + pixel[1]*0.7152 + pixel[2]*0.0722
def calcBlockLuminance(file,blockx,blocky,lumitable):
im = Image.open(file)
px = im.load()
luminance = 0
for x in xrange(blockx*lumitable.blockWidth,(blockx*lumitable.blockWidth)+lumitable.blockWidth):
for y in xrange(blocky*lumitable.blockHeight,(blocky*lumitable.blockHeight)+lumitable.blockHeight):
luminance += calcPixelLuminance(px[x,y])
luminance = luminance/(lumitable.blockHeight*lumitable.blockWidth)
return luminance
### LUMITABLE CLASS ###
class lumitable:
def __init__(self,fontname,fontsize,range,blockheight,blockwidth):
self.fontName = fontname
self.fontSize = fontsize
self.charRange = range
self.charNumber = range[1] - range[0]
self.blockHeight = blockheight
self.blockWidth = blockwidth
self.table = []
def generateFontStrip(self):
if self.fontName is not None:
base = Image.new('RGBA', (self.charNumber*self.blockWidth,self.blockHeight), (255,255,255,255))
txt = Image.new('RGBA', base.size, (255,255,255,0))
fnt = ImageFont.truetype(("lumitables/" + self.fontName + '.ttf'), self.fontSize)
d = ImageDraw.Draw(txt)
for num in range(self.charRange[0],self.charRange[1]):
pos = (num-self.charRange[0])*self.blockWidth
d.text((pos,0), chr(num), font=fnt, fill=(0,0,0,255))
out = Image.alpha_composite(base, txt)
# write to stdout
out.save(("lumitables/" + self.fontName + ".png"), "PNG")
v_print(2,"Fontstrip generated!")
def generateLuminanceTable(self):
for block in xrange(0,self.charNumber):
self.table.append([block+self.charRange[0],calcBlockLuminance("lumitables/" + self.fontName + ".png",block,0,self)])
self.table.sort(key=getKey1)
with open("lumitables/" + self.fontName + ".lut", 'wb') as f:
pickle.dump(self, f)
v_print(2,"Lumitable generated!")
### IMAGETABLE CLASS ###
class imagetable:
def __init__(self,file,lumitable,colorMode):
self.file = file
self.image = Image.open(file)
self.xBlocks = math.floor(self.image.size[0].__float__() / lumitable.blockWidth.__float__()).__int__()
self.yBlocks = math.floor(self.image.size[1].__float__() / lumitable.blockHeight.__float__()).__int__()
self.lumitable = lumitable
self.colorMode = colorMode
self.table = []
if colorMode == "colors": ##### CONSERRRRRTAAAAAAAA
self.colorTable = [[0 for x in range(self.xBlocks*3)] for x in range(self.yBlocks*3)]
ready = 0.0
total = self.xBlocks*self.yBlocks
for x in range(0,self.xBlocks):
for y in range(0,self.yBlocks):
luminance = calcBlockLuminance(self.file,x,y,self.lumitable)
if colorMode == "colors":
self.colorTable[x][y] = self.calcColorAverage(x,y,self.lumitable)
found = False
for i in self.table:
if i[0] == luminance:
i[1].append((x,y))
found = True
break
if not found:
self.table.append([luminance,[(x,y)]])
ready += 1.0
v_print(2,"Generating imagetable: {0:.2f}%".format((ready/total)*100.0))
self.table.sort(key=getKey0)
v_print(2,"Imagetable generated!")
def calcColorAverage(self,blockx,blocky,lumitable):
i |
### Trix Class ###
class trix:
def __init__(self,name,lumi,imagetb):
self.name = name
self.lumitable = lumi
self.imagetable = imagetb
self.imagetable.table.reverse()
self.image = Image.new('RGBA', (self.imagetable.xBlocks*self.lumitable.blockWidth,self.imagetable.yBlocks*self.lumitable.blockHeight), (255,255,255,255))
self.blockPerChar = math.ceil(len(self.imagetable.table).__float__() / len(self.lumitable.table).__float__())
self.trixtable = []
def generateTrixtable(self):
trixindex = -1
ready = 0.0
total = len(self.lumitable.table)
for i in self.lumitable.table:
trixindex += 1
self.trixtable.append([i[0],[]])
for n in range(0,self.blockPerChar.__int__()):
if(len(self.imagetable.table)>0):
self.trixtable[trixindex][1].append((self.imagetable.table.pop()[1]))
else:
break
ready += 1.0
v_print(2,"Generating trixtable: {0:.2f}%".format((ready/total)*100.0))
v_print(2,"Trixtable generated!")
def printTrix(self,output):
txt = Image.new('RGBA', self.image.size, (255,255,255,0))
fnt = ImageFont.truetype("lumitables/"+self.lumitable.fontName+".ttf", self.lumitable.fontSize)
d = ImageDraw.Draw(txt)
for currtrix in self.trixtable:
if len(currtrix) > 0:
for i in range(0,self.blockPerChar.__int__()):
if len(currtrix[1]) > i:
for tuple in currtrix[1][i]:
x = tuple[0] * self.lumitable.blockWidth
y = tuple[1] * self.lumitable.blockHeight
if self.imagetable.colorMode == "colors":
red = self.imagetable.colorTable[tuple[0]][tuple[1]][0]
green = self.imagetable.colorTable[tuple[0]][tuple[1]][1]
blue = self.imagetable.colorTable[tuple[0]][tuple[1]][2]
d.text((x,y), chr(currtrix[0]), font=fnt, fill=(red,green,blue,255))
else:
d.text((x,y), chr(currtrix[0]), font=fnt, fill=(0,0,0,255))
out = Image.alpha_composite(self.image, txt)
out.save("output/" + output)
v_print(2,"Trix saved!")
### Default Configs ###
class configs():
def __init__(self):
# Open defauls.cfg if it exists or create a new one if it doesn't.
ConfigPrs = ConfigParser.ConfigParser()
if os.path.isfile("defaults.cfg"):
ConfigPrs.read("defaults.cfg")
else:
cfgfile = open("defaults.cfg",'w')
# add the settings to the structure of the file, and lets write it out...
ConfigPrs.add_section('Defaults')
ConfigPrs.set('Defaults','lumitable','courier.lut')
ConfigPrs.set('Defaults','colorMode', 'colors')
ConfigPrs.set('Defaults','verbosity', "1") # 0 = nothing / 1 = errors / 2 = info
ConfigPrs.write(cfgfile)
cfgfile.close()
# -/
self.input = ""
self.output = ""
self.lumitable = ConfigPrs.get("Defaults","lumitable")
self.colorMode = ConfigPrs.get("Defaults","colorMode")
self.verbosity = int(ConfigPrs.get("Defaults","verbosity"))
### --------------- ###
### MAIN FUNCTION ###
def main():
### Arguments parsing ###
# Configure and parse the command line parameters.
parser = argparse.ArgumentParser(description='Creates a number matrix based on an image file.')
parser.add_argument('-i','--input',help="Input file pathname.",required=True)
parser.add_argument('-o','--output',help="Output file pathname.",required=True)
parser.add_argument('-l','--lumitable',help="Lumitable name.",required=False)
parser.add_argument('-c','--colorMode',help="Color mode (bw/colors).",required=False)
parser.add_argument('-v','--verbosity',help="Controls how much information the program will print.\n0 = none | 1 = errors | 2 = errors and info", required=False)
args = parser.parse_args()
### ----------------- ###
if not os.path.isfile(args.input):
v_print(1,"EXITING: Input file not found!")
sys.exit(-1)
else:
conf.input = args.input
# TODO: solve permission problems on Windows
if os.path.isfile("output/" + args.output):
op = raw_input("Output file already exists. Overwrite existing file? (Y/N)")
if(op == "n" or op == "N"):
v_print(1,"EXITING: Process canceled. Output file already exists.")
sys.exit(-1)
else:
conf.output = args.output
if args.lumitable != None:
if os.path.isfile("lumitables/" + args.lumitable) :
v_print(1,"EXITING: Lumitable " + args.lumitable + " not found on /lumitables folder!")
sys.exit(-1)
else:
conf.lumitable = args.lumitable
if args.colorMode != None:
if args.colorMode != "colors" and args.colorMode != "bw":
v_print(1,"EXITING: Color mode " + args.colorMode + " don't exist!")
sys.exit(-1)
else:
conf.colorMode = args.colorMode
if args.verbosity != None:
if int(args.verbosity) < 0 or int(args.verbosity) > 2:
v_print(1,"EXITING: Verbosity level " + args.verbosity + " don't exist!")
sys.exit(-1)
else:
conf.verbosity = args.verbosity
### The Program Core ###
with open("lumitables/" + conf.lumitable, 'rb') as f:
lumi = pickle.load(f)
imtable = imagetable(conf.input,lumi,conf.colorMode)
tri = trix(conf.input,lumi,imtable)
tri.generateTrixtable()
tri.printTrix(conf.output)
### ---------------- ###
if __name__ == "__main__":
conf = configs()
### Implementing Verbosity ###
if conf.verbosity:
def _v_print(*verb_args):
if verb_args[0] <= conf.verbosity:
if verb_args[0] == 1: print ("ERROR " + verb_args[1])
if verb_args[0] == 2: print ("INFO " + verb_args[1])
else:
_v_print = lambda *a: None # do-nothing function
global v_print
v_print = _v_print
sys.exit(main())
| m = Image.open(self.file)
px = im.load()
red = 0
green = 0
blue = 0
for x in xrange(blockx*lumitable.blockWidth,(blockx*lumitable.blockWidth)+lumitable.blockWidth):
for y in xrange(blocky*lumitable.blockHeight,(blocky*lumitable.blockHeight)+lumitable.blockHeight):
red += px[x,y][0]
green += px[x,y][1]
blue += px[x,y][2]
red = red / (lumitable.blockHeight*lumitable.blockWidth)
green = green / (lumitable.blockHeight*lumitable.blockWidth)
blue = blue / (lumitable.blockHeight*lumitable.blockWidth)
return (red,green,blue)
| identifier_body |
trixer.py | # -*- coding: utf-8 -*-
__author__ = 'Adônis Gasiglia'
import argparse, os, pickle, sys, ConfigParser, math
from PIL import Image, ImageDraw, ImageFont
### AUXILIAR FUNCTIONS ###
def getKey0(item):
return item[0]
def getKey1(item):
return item[1]
def calcPixelLuminance(pixel):
return pixel[0]*0.2126 + pixel[1]*0.7152 + pixel[2]*0.0722
def calcBlockLuminance(file,blockx,blocky,lumitable):
im = Image.open(file)
px = im.load()
luminance = 0
for x in xrange(blockx*lumitable.blockWidth,(blockx*lumitable.blockWidth)+lumitable.blockWidth):
for y in xrange(blocky*lumitable.blockHeight,(blocky*lumitable.blockHeight)+lumitable.blockHeight):
luminance += calcPixelLuminance(px[x,y])
luminance = luminance/(lumitable.blockHeight*lumitable.blockWidth)
return luminance
### LUMITABLE CLASS ###
class lumitable:
def __init__(self,fontname,fontsize,range,blockheight,blockwidth):
self.fontName = fontname
self.fontSize = fontsize
self.charRange = range
self.charNumber = range[1] - range[0]
self.blockHeight = blockheight
self.blockWidth = blockwidth
self.table = []
def generateFontStrip(self):
if self.fontName is not None:
base = Image.new('RGBA', (self.charNumber*self.blockWidth,self.blockHeight), (255,255,255,255))
txt = Image.new('RGBA', base.size, (255,255,255,0))
fnt = ImageFont.truetype(("lumitables/" + self.fontName + '.ttf'), self.fontSize)
d = ImageDraw.Draw(txt)
for num in range(self.charRange[0],self.charRange[1]):
pos = (num-self.charRange[0])*self.blockWidth
d.text((pos,0), chr(num), font=fnt, fill=(0,0,0,255))
out = Image.alpha_composite(base, txt)
# write to stdout
out.save(("lumitables/" + self.fontName + ".png"), "PNG")
v_print(2,"Fontstrip generated!")
def generateLuminanceTable(self):
for block in xrange(0,self.charNumber):
self.table.append([block+self.charRange[0],calcBlockLuminance("lumitables/" + self.fontName + ".png",block,0,self)])
self.table.sort(key=getKey1)
with open("lumitables/" + self.fontName + ".lut", 'wb') as f:
pickle.dump(self, f)
v_print(2,"Lumitable generated!")
### IMAGETABLE CLASS ###
class imagetable:
def __init__(self,file,lumitable,colorMode):
self.file = file
self.image = Image.open(file)
self.xBlocks = math.floor(self.image.size[0].__float__() / lumitable.blockWidth.__float__()).__int__()
self.yBlocks = math.floor(self.image.size[1].__float__() / lumitable.blockHeight.__float__()).__int__()
self.lumitable = lumitable
self.colorMode = colorMode
self.table = []
if colorMode == "colors": ##### CONSERRRRRTAAAAAAAA
self.colorTable = [[0 for x in range(self.xBlocks*3)] for x in range(self.yBlocks*3)]
ready = 0.0
total = self.xBlocks*self.yBlocks
for x in range(0,self.xBlocks):
for y in range(0,self.yBlocks):
luminance = calcBlockLuminance(self.file,x,y,self.lumitable)
if colorMode == "colors":
self.colorTable[x][y] = self.calcColorAverage(x,y,self.lumitable)
found = False
for i in self.table:
if i[0] == luminance:
i[1].append((x,y))
found = True
break
if not found:
self.table.append([luminance,[(x,y)]])
ready += 1.0
v_print(2,"Generating imagetable: {0:.2f}%".format((ready/total)*100.0))
self.table.sort(key=getKey0)
v_print(2,"Imagetable generated!")
def calcColorAverage(self,blockx,blocky,lumitable):
im = Image.open(self.file)
px = im.load()
red = 0
green = 0
blue = 0
for x in xrange(blockx*lumitable.blockWidth,(blockx*lumitable.blockWidth)+lumitable.blockWidth):
for y in xrange(blocky*lumitable.blockHeight,(blocky*lumitable.blockHeight)+lumitable.blockHeight):
red += px[x,y][0]
green += px[x,y][1]
blue += px[x,y][2]
red = red / (lumitable.blockHeight*lumitable.blockWidth)
green = green / (lumitable.blockHeight*lumitable.blockWidth)
blue = blue / (lumitable.blockHeight*lumitable.blockWidth)
return (red,green,blue)
### Trix Class ###
class trix:
def __init__(self,name,lumi,imagetb):
self.name = name
self.lumitable = lumi
self.imagetable = imagetb
self.imagetable.table.reverse()
self.image = Image.new('RGBA', (self.imagetable.xBlocks*self.lumitable.blockWidth,self.imagetable.yBlocks*self.lumitable.blockHeight), (255,255,255,255))
self.blockPerChar = math.ceil(len(self.imagetable.table).__float__() / len(self.lumitable.table).__float__())
self.trixtable = []
def generateTrixtable(self):
trixindex = -1
ready = 0.0
total = len(self.lumitable.table)
for i in self.lumitable.table:
trixindex += 1
self.trixtable.append([i[0],[]])
for n in range(0,self.blockPerChar.__int__()):
if(len(self.imagetable.table)>0):
self.trixtable[trixindex][1].append((self.imagetable.table.pop()[1]))
else:
break
ready += 1.0
v_print(2,"Generating trixtable: {0:.2f}%".format((ready/total)*100.0))
v_print(2,"Trixtable generated!")
def printTrix(self,output):
txt = Image.new('RGBA', self.image.size, (255,255,255,0))
fnt = ImageFont.truetype("lumitables/"+self.lumitable.fontName+".ttf", self.lumitable.fontSize)
d = ImageDraw.Draw(txt)
for currtrix in self.trixtable:
if len(currtrix) > 0:
for i in range(0,self.blockPerChar.__int__()):
if len(currtrix[1]) > i:
for tuple in currtrix[1][i]: | blue = self.imagetable.colorTable[tuple[0]][tuple[1]][2]
d.text((x,y), chr(currtrix[0]), font=fnt, fill=(red,green,blue,255))
else:
d.text((x,y), chr(currtrix[0]), font=fnt, fill=(0,0,0,255))
out = Image.alpha_composite(self.image, txt)
out.save("output/" + output)
v_print(2,"Trix saved!")
### Default Configs ###
class configs():
def __init__(self):
# Open defauls.cfg if it exists or create a new one if it doesn't.
ConfigPrs = ConfigParser.ConfigParser()
if os.path.isfile("defaults.cfg"):
ConfigPrs.read("defaults.cfg")
else:
cfgfile = open("defaults.cfg",'w')
# add the settings to the structure of the file, and lets write it out...
ConfigPrs.add_section('Defaults')
ConfigPrs.set('Defaults','lumitable','courier.lut')
ConfigPrs.set('Defaults','colorMode', 'colors')
ConfigPrs.set('Defaults','verbosity', "1") # 0 = nothing / 1 = errors / 2 = info
ConfigPrs.write(cfgfile)
cfgfile.close()
# -/
self.input = ""
self.output = ""
self.lumitable = ConfigPrs.get("Defaults","lumitable")
self.colorMode = ConfigPrs.get("Defaults","colorMode")
self.verbosity = int(ConfigPrs.get("Defaults","verbosity"))
### --------------- ###
### MAIN FUNCTION ###
def main():
### Arguments parsing ###
# Configure and parse the command line parameters.
parser = argparse.ArgumentParser(description='Creates a number matrix based on an image file.')
parser.add_argument('-i','--input',help="Input file pathname.",required=True)
parser.add_argument('-o','--output',help="Output file pathname.",required=True)
parser.add_argument('-l','--lumitable',help="Lumitable name.",required=False)
parser.add_argument('-c','--colorMode',help="Color mode (bw/colors).",required=False)
parser.add_argument('-v','--verbosity',help="Controls how much information the program will print.\n0 = none | 1 = errors | 2 = errors and info", required=False)
args = parser.parse_args()
### ----------------- ###
if not os.path.isfile(args.input):
v_print(1,"EXITING: Input file not found!")
sys.exit(-1)
else:
conf.input = args.input
# TODO: solve permission problems on Windows
if os.path.isfile("output/" + args.output):
op = raw_input("Output file already exists. Overwrite existing file? (Y/N)")
if(op == "n" or op == "N"):
v_print(1,"EXITING: Process canceled. Output file already exists.")
sys.exit(-1)
else:
conf.output = args.output
if args.lumitable != None:
if os.path.isfile("lumitables/" + args.lumitable) :
v_print(1,"EXITING: Lumitable " + args.lumitable + " not found on /lumitables folder!")
sys.exit(-1)
else:
conf.lumitable = args.lumitable
if args.colorMode != None:
if args.colorMode != "colors" and args.colorMode != "bw":
v_print(1,"EXITING: Color mode " + args.colorMode + " don't exist!")
sys.exit(-1)
else:
conf.colorMode = args.colorMode
if args.verbosity != None:
if int(args.verbosity) < 0 or int(args.verbosity) > 2:
v_print(1,"EXITING: Verbosity level " + args.verbosity + " don't exist!")
sys.exit(-1)
else:
conf.verbosity = args.verbosity
### The Program Core ###
with open("lumitables/" + conf.lumitable, 'rb') as f:
lumi = pickle.load(f)
imtable = imagetable(conf.input,lumi,conf.colorMode)
tri = trix(conf.input,lumi,imtable)
tri.generateTrixtable()
tri.printTrix(conf.output)
### ---------------- ###
if __name__ == "__main__":
conf = configs()
### Implementing Verbosity ###
if conf.verbosity:
def _v_print(*verb_args):
if verb_args[0] <= conf.verbosity:
if verb_args[0] == 1: print ("ERROR " + verb_args[1])
if verb_args[0] == 2: print ("INFO " + verb_args[1])
else:
_v_print = lambda *a: None # do-nothing function
global v_print
v_print = _v_print
sys.exit(main()) | x = tuple[0] * self.lumitable.blockWidth
y = tuple[1] * self.lumitable.blockHeight
if self.imagetable.colorMode == "colors":
red = self.imagetable.colorTable[tuple[0]][tuple[1]][0]
green = self.imagetable.colorTable[tuple[0]][tuple[1]][1] | random_line_split |
trixer.py | # -*- coding: utf-8 -*-
__author__ = 'Adônis Gasiglia'
import argparse, os, pickle, sys, ConfigParser, math
from PIL import Image, ImageDraw, ImageFont
### AUXILIAR FUNCTIONS ###
def getKey0(item):
return item[0]
def getKey1(item):
return item[1]
def calcPixelLuminance(pixel):
return pixel[0]*0.2126 + pixel[1]*0.7152 + pixel[2]*0.0722
def calcBlockLuminance(file,blockx,blocky,lumitable):
im = Image.open(file)
px = im.load()
luminance = 0
for x in xrange(blockx*lumitable.blockWidth,(blockx*lumitable.blockWidth)+lumitable.blockWidth):
for y in xrange(blocky*lumitable.blockHeight,(blocky*lumitable.blockHeight)+lumitable.blockHeight):
luminance += calcPixelLuminance(px[x,y])
luminance = luminance/(lumitable.blockHeight*lumitable.blockWidth)
return luminance
### LUMITABLE CLASS ###
class lumitable:
def __init__(self,fontname,fontsize,range,blockheight,blockwidth):
self.fontName = fontname
self.fontSize = fontsize
self.charRange = range
self.charNumber = range[1] - range[0]
self.blockHeight = blockheight
self.blockWidth = blockwidth
self.table = []
def generateFontStrip(self):
if self.fontName is not None:
base = Image.new('RGBA', (self.charNumber*self.blockWidth,self.blockHeight), (255,255,255,255))
txt = Image.new('RGBA', base.size, (255,255,255,0))
fnt = ImageFont.truetype(("lumitables/" + self.fontName + '.ttf'), self.fontSize)
d = ImageDraw.Draw(txt)
for num in range(self.charRange[0],self.charRange[1]):
pos = (num-self.charRange[0])*self.blockWidth
d.text((pos,0), chr(num), font=fnt, fill=(0,0,0,255))
out = Image.alpha_composite(base, txt)
# write to stdout
out.save(("lumitables/" + self.fontName + ".png"), "PNG")
v_print(2,"Fontstrip generated!")
def generateLuminanceTable(self):
for block in xrange(0,self.charNumber):
self.table.append([block+self.charRange[0],calcBlockLuminance("lumitables/" + self.fontName + ".png",block,0,self)])
self.table.sort(key=getKey1)
with open("lumitables/" + self.fontName + ".lut", 'wb') as f:
pickle.dump(self, f)
v_print(2,"Lumitable generated!")
### IMAGETABLE CLASS ###
class imagetable:
def __init__(self,file,lumitable,colorMode):
self.file = file
self.image = Image.open(file)
self.xBlocks = math.floor(self.image.size[0].__float__() / lumitable.blockWidth.__float__()).__int__()
self.yBlocks = math.floor(self.image.size[1].__float__() / lumitable.blockHeight.__float__()).__int__()
self.lumitable = lumitable
self.colorMode = colorMode
self.table = []
if colorMode == "colors": ##### CONSERRRRRTAAAAAAAA
self.colorTable = [[0 for x in range(self.xBlocks*3)] for x in range(self.yBlocks*3)]
ready = 0.0
total = self.xBlocks*self.yBlocks
for x in range(0,self.xBlocks):
for y in range(0,self.yBlocks):
luminance = calcBlockLuminance(self.file,x,y,self.lumitable)
if colorMode == "colors":
self.colorTable[x][y] = self.calcColorAverage(x,y,self.lumitable)
found = False
for i in self.table:
if i[0] == luminance:
i[1].append((x,y))
found = True
break
if not found:
self.table.append([luminance,[(x,y)]])
ready += 1.0
v_print(2,"Generating imagetable: {0:.2f}%".format((ready/total)*100.0))
self.table.sort(key=getKey0)
v_print(2,"Imagetable generated!")
def calcColorAverage(self,blockx,blocky,lumitable):
im = Image.open(self.file)
px = im.load()
red = 0
green = 0
blue = 0
for x in xrange(blockx*lumitable.blockWidth,(blockx*lumitable.blockWidth)+lumitable.blockWidth):
for y in xrange(blocky*lumitable.blockHeight,(blocky*lumitable.blockHeight)+lumitable.blockHeight):
red += px[x,y][0]
green += px[x,y][1]
blue += px[x,y][2]
red = red / (lumitable.blockHeight*lumitable.blockWidth)
green = green / (lumitable.blockHeight*lumitable.blockWidth)
blue = blue / (lumitable.blockHeight*lumitable.blockWidth)
return (red,green,blue)
### Trix Class ###
class trix:
def _ | self,name,lumi,imagetb):
self.name = name
self.lumitable = lumi
self.imagetable = imagetb
self.imagetable.table.reverse()
self.image = Image.new('RGBA', (self.imagetable.xBlocks*self.lumitable.blockWidth,self.imagetable.yBlocks*self.lumitable.blockHeight), (255,255,255,255))
self.blockPerChar = math.ceil(len(self.imagetable.table).__float__() / len(self.lumitable.table).__float__())
self.trixtable = []
def generateTrixtable(self):
trixindex = -1
ready = 0.0
total = len(self.lumitable.table)
for i in self.lumitable.table:
trixindex += 1
self.trixtable.append([i[0],[]])
for n in range(0,self.blockPerChar.__int__()):
if(len(self.imagetable.table)>0):
self.trixtable[trixindex][1].append((self.imagetable.table.pop()[1]))
else:
break
ready += 1.0
v_print(2,"Generating trixtable: {0:.2f}%".format((ready/total)*100.0))
v_print(2,"Trixtable generated!")
def printTrix(self,output):
txt = Image.new('RGBA', self.image.size, (255,255,255,0))
fnt = ImageFont.truetype("lumitables/"+self.lumitable.fontName+".ttf", self.lumitable.fontSize)
d = ImageDraw.Draw(txt)
for currtrix in self.trixtable:
if len(currtrix) > 0:
for i in range(0,self.blockPerChar.__int__()):
if len(currtrix[1]) > i:
for tuple in currtrix[1][i]:
x = tuple[0] * self.lumitable.blockWidth
y = tuple[1] * self.lumitable.blockHeight
if self.imagetable.colorMode == "colors":
red = self.imagetable.colorTable[tuple[0]][tuple[1]][0]
green = self.imagetable.colorTable[tuple[0]][tuple[1]][1]
blue = self.imagetable.colorTable[tuple[0]][tuple[1]][2]
d.text((x,y), chr(currtrix[0]), font=fnt, fill=(red,green,blue,255))
else:
d.text((x,y), chr(currtrix[0]), font=fnt, fill=(0,0,0,255))
out = Image.alpha_composite(self.image, txt)
out.save("output/" + output)
v_print(2,"Trix saved!")
### Default Configs ###
class configs():
def __init__(self):
# Open defauls.cfg if it exists or create a new one if it doesn't.
ConfigPrs = ConfigParser.ConfigParser()
if os.path.isfile("defaults.cfg"):
ConfigPrs.read("defaults.cfg")
else:
cfgfile = open("defaults.cfg",'w')
# add the settings to the structure of the file, and lets write it out...
ConfigPrs.add_section('Defaults')
ConfigPrs.set('Defaults','lumitable','courier.lut')
ConfigPrs.set('Defaults','colorMode', 'colors')
ConfigPrs.set('Defaults','verbosity', "1") # 0 = nothing / 1 = errors / 2 = info
ConfigPrs.write(cfgfile)
cfgfile.close()
# -/
self.input = ""
self.output = ""
self.lumitable = ConfigPrs.get("Defaults","lumitable")
self.colorMode = ConfigPrs.get("Defaults","colorMode")
self.verbosity = int(ConfigPrs.get("Defaults","verbosity"))
### --------------- ###
### MAIN FUNCTION ###
def main():
### Arguments parsing ###
# Configure and parse the command line parameters.
parser = argparse.ArgumentParser(description='Creates a number matrix based on an image file.')
parser.add_argument('-i','--input',help="Input file pathname.",required=True)
parser.add_argument('-o','--output',help="Output file pathname.",required=True)
parser.add_argument('-l','--lumitable',help="Lumitable name.",required=False)
parser.add_argument('-c','--colorMode',help="Color mode (bw/colors).",required=False)
parser.add_argument('-v','--verbosity',help="Controls how much information the program will print.\n0 = none | 1 = errors | 2 = errors and info", required=False)
args = parser.parse_args()
### ----------------- ###
if not os.path.isfile(args.input):
v_print(1,"EXITING: Input file not found!")
sys.exit(-1)
else:
conf.input = args.input
# TODO: solve permission problems on Windows
if os.path.isfile("output/" + args.output):
op = raw_input("Output file already exists. Overwrite existing file? (Y/N)")
if(op == "n" or op == "N"):
v_print(1,"EXITING: Process canceled. Output file already exists.")
sys.exit(-1)
else:
conf.output = args.output
if args.lumitable != None:
if os.path.isfile("lumitables/" + args.lumitable) :
v_print(1,"EXITING: Lumitable " + args.lumitable + " not found on /lumitables folder!")
sys.exit(-1)
else:
conf.lumitable = args.lumitable
if args.colorMode != None:
if args.colorMode != "colors" and args.colorMode != "bw":
v_print(1,"EXITING: Color mode " + args.colorMode + " don't exist!")
sys.exit(-1)
else:
conf.colorMode = args.colorMode
if args.verbosity != None:
if int(args.verbosity) < 0 or int(args.verbosity) > 2:
v_print(1,"EXITING: Verbosity level " + args.verbosity + " don't exist!")
sys.exit(-1)
else:
conf.verbosity = args.verbosity
### The Program Core ###
with open("lumitables/" + conf.lumitable, 'rb') as f:
lumi = pickle.load(f)
imtable = imagetable(conf.input,lumi,conf.colorMode)
tri = trix(conf.input,lumi,imtable)
tri.generateTrixtable()
tri.printTrix(conf.output)
### ---------------- ###
if __name__ == "__main__":
conf = configs()
### Implementing Verbosity ###
if conf.verbosity:
def _v_print(*verb_args):
if verb_args[0] <= conf.verbosity:
if verb_args[0] == 1: print ("ERROR " + verb_args[1])
if verb_args[0] == 2: print ("INFO " + verb_args[1])
else:
_v_print = lambda *a: None # do-nothing function
global v_print
v_print = _v_print
sys.exit(main())
| _init__( | identifier_name |
main.rs | use std::{
collections::HashSet,
fmt,
fs::{File, OpenOptions},
io::{self, prelude::*, BufRead, Cursor},
net::{IpAddr, Ipv4Addr},
process::Command,
};
use failure::Fail;
use nom::{
branch::alt,
bytes::complete::tag,
character::complete::{alpha1, alphanumeric1, digit1, hex_digit1, one_of, space1},
combinator::{all_consuming, map_res, opt, recognize, rest},
error::{convert_error, ErrorKind, ParseError, VerboseError},
multi::{many0, many_m_n, separated_list},
sequence::{preceded, tuple},
Err, IResult,
};
use structopt::StructOpt;
static HOSTS_FILE: &str = "/etc/hosts";
#[derive(Debug, StructOpt)]
#[structopt(name = "local-domain-alias")]
struct Options {
#[structopt(name = "port")]
port: u16,
#[structopt(name = "alias")]
alias: String,
#[structopt(skip)]
ip: Option<u8>,
}
#[derive(Debug, Fail)]
enum Error {
#[fail(display = "alias is already in use")]
AliasAlreadyInUse,
#[fail(display = "incomplete alias")]
IncompleteAlias,
#[fail(display = "invalid alias format\n{}", _0)]
InvalidAliasFormat(String),
#[fail(display = "could not set up port forwarding: ip tables error {}", _0)]
IptablesCommandFailed(i32),
#[fail(display = "must be run as root")]
MustRunAsRoot,
#[fail(display = "io error: {}", _0)]
IoError(io::Error),
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Error {
match error.kind() {
io::ErrorKind::PermissionDenied => Error::MustRunAsRoot,
_e => Error::IoError(dbg!(error)),
}
}
}
fn octet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u8, E> {
map_res(digit1, |s: &str| s.parse::<u8>())(input)
}
fn dotted_octet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u8, E> {
preceded(tag("."), octet)(input)
}
fn ip_v4_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> {
recognize(tuple((octet, dotted_octet, dotted_octet, dotted_octet)))(input)
}
fn hextet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u16, E> {
map_res(hex_digit1, |s: &str| s.parse::<u16>())(input)
}
fn sep_hextet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u16, E> {
preceded(tag("::"), hextet)(input)
}
fn ip_v6_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> {
let parser = preceded(opt(hextet), many_m_n(1, 7, sep_hextet));
recognize(parser)(input)
}
fn ip_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, IpAddr, E> {
map_res(alt((ip_v4_addr, ip_v6_addr)), |s: &str| s.parse::<IpAddr>())(input)
}
fn hostname<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> {
recognize(tuple((
alpha1,
many0(alt((alphanumeric1, recognize(one_of("-."))))),
)))(input)
}
fn check_hostname<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, (), E> {
all_consuming(hostname)(input).map(|(input, _)| (input, ()))
}
fn aliases<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, Vec<String>, E> {
let (input, aliases) = separated_list(tag(" "), hostname)(input)?;
Ok((input, aliases.into_iter().map(String::from).collect()))
}
fn | <'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> {
preceded(tag("#"), rest)(input)
}
#[derive(Debug)]
struct HostsLine {
ip: IpAddr,
canonical_hostname: String,
aliases: Vec<String>,
comment: Option<String>,
}
impl HostsLine {
fn new(ip: IpAddr, canonical_hostname: String) -> HostsLine {
let aliases = Vec::new();
let comment = None;
HostsLine {
ip,
canonical_hostname,
aliases,
comment,
}
}
}
impl fmt::Display for HostsLine {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let HostsLine {
ip,
canonical_hostname,
aliases,
comment,
} = self;
let sep = match ip.to_string().chars().count() {
0..=8 => "\t\t",
7..=16 => "\t",
_ => " ",
};
write!(
f,
"{ip}{sep}{ch}",
ip = ip,
sep = sep,
ch = canonical_hostname,
)?;
if !aliases.is_empty() {
write!(f, "\t{}", aliases.join(" "))?;
}
if let Some(comment) = comment {
write!(f, "#{}", comment)?;
}
Ok(())
}
}
fn hosts_line<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, HostsLine, E> {
let (input, ip) = ip_addr(input)?;
let (input, _) = space1(input)?;
let (input, canonical_hostname) = hostname(input)?;
let (input, _) = space1(input)?;
let (input, aliases) = opt(aliases)(input)?;
let (input, comment) = opt(comment)(input)?;
let canonical_hostname = String::from(canonical_hostname);
let aliases = aliases.unwrap_or_else(Vec::new);
let comment = comment.map(String::from);
Ok((
input,
HostsLine {
ip,
canonical_hostname,
aliases,
comment,
},
))
}
#[derive(Debug)]
enum Line {
Unstructured(String),
Structured(HostsLine),
}
impl Line {
fn structured(ip: IpAddr, canonical_name: String) -> Line {
Line::Structured(HostsLine::new(ip, canonical_name))
}
fn structured_ref(&self) -> Option<&HostsLine> {
match self {
Line::Structured(line) => Some(line),
Line::Unstructured(_) => None,
}
}
}
impl fmt::Display for Line {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Line::Unstructured(line) => write!(f, "{}", line),
Line::Structured(hosts_line) => write!(f, "{}", hosts_line),
}
}
}
fn parse_line(line: &str) -> Line {
match hosts_line::<(&str, ErrorKind)>(&line) {
Ok((_, hosts_line)) => Line::Structured(hosts_line),
Err(_error) => Line::Unstructured(String::from(line)),
}
}
fn validate_alias(alias: &str) -> Result<(), Error> {
check_hostname::<VerboseError<&str>>(alias)
.map(|_| ())
.map_err(|error| match error {
Err::Incomplete(_) => Error::IncompleteAlias,
Err::Error(e) | Err::Failure(e) => Error::InvalidAliasFormat(convert_error(alias, e)),
})
}
fn iptables_rules_exist(options: &Options) -> Result<bool, Error> {
let rule_match = format!(
"-A OUTPUT -s 127.0.0.1/32 -d {alias}/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination 127.0.0.",
alias = options.alias,
);
let output = Command::new("iptables")
.args(&["-t", "nat", "-S", "OUTPUT"])
.output()?;
let stdout = Cursor::new(output.stdout);
let matched_lines: Vec<_> = stdout
.lines()
.filter_map(|line_ret| {
line_ret.ok().and_then(|line| {
let line: String = dbg!(line);
line.rfind(&rule_match).map(|index| dbg!((index, line)))
})
})
.collect();
let port = options.port.to_string();
if let Some((idx, line)) = matched_lines.first() {
if dbg!(&line[*idx..]) == port {
return Ok(true);
} else {
return Err(Error::AliasAlreadyInUse);
}
}
Ok(false)
}
fn write_iptables_rules(options: &Options) -> Result<(), Error> {
let status = Command::new("iptables")
.args(&[
"-t",
"nat",
"--append",
"OUTPUT",
"--protocol",
"tcp",
"--dport",
"80",
"--source",
"127.0.0.1",
"--destination",
&options.alias,
"--jump",
"DNAT",
"--to-destination",
&format!("127.0.0.{ip}:{port}", ip = "1", port = options.port),
])
.status()?;
if !status.success() {
return Err(Error::IptablesCommandFailed(status.code().unwrap_or(-1)));
}
Ok(())
}
fn next_unused_local_ip(in_use_ips: &HashSet<IpAddr>) -> IpAddr {
for b in 0..128 {
for c in 0..128 {
for d in 1..128 {
let ip = IpAddr::V4(Ipv4Addr::new(127, b, c, d));
if !in_use_ips.contains(&ip) {
return ip;
}
}
}
}
"127.0.0.1".parse().unwrap()
}
fn run() -> Result<(), Error> {
let options = Options::from_args();
validate_alias(&options.alias)?;
let mut file = File::open(HOSTS_FILE)?;
file.seek(io::SeekFrom::Start(0))?;
let reader = io::BufReader::new(file);
let mut lines: Vec<_> = reader
.lines()
.map(|line_res| line_res.map(|line| parse_line(&line)))
.collect::<Result<Vec<_>, io::Error>>()?;
let mut file = OpenOptions::new().write(true).open(HOSTS_FILE)?;
file.seek(io::SeekFrom::Start(0))?;
let structured_refs: Vec<_> = lines
.iter()
.filter_map(|line| line.structured_ref())
.collect();
if structured_refs
.iter()
.find(|&x| *x.canonical_hostname == options.alias)
.is_none()
{
let in_use_ips: HashSet<IpAddr> = structured_refs.iter().map(|line| line.ip).collect();
let ip = next_unused_local_ip(&in_use_ips);
lines.push(Line::structured(ip, options.alias.clone()));
} else {
eprintln!("Alias already in /etc/hosts not adding a second entry");
}
for line in &lines {
writeln!(file, "{}", line)?;
}
file.sync_all()?;
drop(file);
if !iptables_rules_exist(&options)? {
write_iptables_rules(&options)?;
}
Ok(())
}
fn main() {
match run() {
Ok(()) => {}
Err(err) => {
eprintln!("local-domain-alias: {}", err);
std::process::exit(1);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
macro_rules! assert_parse_err {
{ $fn_name:ident($input:literal), rest == $expected_rest:literal } => {
match $fn_name::<VerboseError<&str>>($input) {
Ok((rest, value)) => {
assert_eq!($expected_rest, rest, "actual unparsed input");
panic!("parse unexpectedly succeeded: {ifn}({iarg}) rest: '{rest}', value: '{value:?}'",
ifn = stringify!($fn_name),
iarg = stringify!($input),
rest = rest,
value = value,
);
}
Err(err) => err,
}
}
}
macro_rules! assert_parse_ok {
{ $fn_name:ident($input:literal) } => {
match $fn_name::<VerboseError<&str>>($input) {
Err(Err::Incomplete(i)) =>
panic!("incomplete input: '{}' {:?}", $input, i),
Err(Err::Error(e)) | Err(Err::Failure(e)) => {
panic!("failed to parse: {ifn}({iarg})\n{converted}",
ifn = stringify!($fn_name),
iarg = stringify!($input),
converted = convert_error($input, e));
},
Ok(ret) => ret,
}
}
}
#[test]
fn parse_hostname() {
assert_parse_err!(hostname("123"), rest == "");
assert_parse_ok!(hostname("a123"));
assert_parse_ok!(hostname("abc"));
assert_parse_ok!(hostname("abc.def"));
assert_parse_ok!(hostname("abc-def"));
assert_parse_ok!(hostname("abc-def.ghi"));
}
#[test]
fn parse_check_hostname() {
assert_parse_err!(check_hostname("123"), rest == "");
assert_parse_ok!(check_hostname("a123"));
assert_parse_ok!(check_hostname("abc"));
assert_parse_ok!(check_hostname("abc-def"));
assert_parse_ok!(hostname("abc.def"));
assert_parse_ok!(hostname("abc-def.ghi"));
}
#[test]
fn parse_aliases() {
assert_parse_ok!(aliases("123"));
assert_parse_ok!(aliases("a123"));
assert_parse_ok!(aliases("abc"));
}
#[test]
fn parse_comment() {
assert_parse_err!(comment("123"), rest == "123");
assert_parse_err!(comment(""), rest == "");
assert_parse_ok!(comment("#"));
assert_parse_ok!(comment("#abc 123 !@# {}()[]"));
assert_parse_ok!(comment("#abc123!@#\nfoobar"));
}
}
| comment | identifier_name |
main.rs | use std::{
collections::HashSet,
fmt,
fs::{File, OpenOptions},
io::{self, prelude::*, BufRead, Cursor},
net::{IpAddr, Ipv4Addr},
process::Command,
};
use failure::Fail;
use nom::{
branch::alt,
bytes::complete::tag,
character::complete::{alpha1, alphanumeric1, digit1, hex_digit1, one_of, space1},
combinator::{all_consuming, map_res, opt, recognize, rest},
error::{convert_error, ErrorKind, ParseError, VerboseError},
multi::{many0, many_m_n, separated_list},
sequence::{preceded, tuple},
Err, IResult,
};
use structopt::StructOpt;
static HOSTS_FILE: &str = "/etc/hosts";
#[derive(Debug, StructOpt)]
#[structopt(name = "local-domain-alias")]
struct Options {
#[structopt(name = "port")]
port: u16,
#[structopt(name = "alias")]
alias: String,
#[structopt(skip)]
ip: Option<u8>,
}
#[derive(Debug, Fail)]
enum Error {
#[fail(display = "alias is already in use")]
AliasAlreadyInUse,
#[fail(display = "incomplete alias")]
IncompleteAlias,
#[fail(display = "invalid alias format\n{}", _0)]
InvalidAliasFormat(String),
#[fail(display = "could not set up port forwarding: ip tables error {}", _0)]
IptablesCommandFailed(i32),
#[fail(display = "must be run as root")]
MustRunAsRoot,
#[fail(display = "io error: {}", _0)]
IoError(io::Error),
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Error {
match error.kind() {
io::ErrorKind::PermissionDenied => Error::MustRunAsRoot,
_e => Error::IoError(dbg!(error)),
}
}
}
fn octet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u8, E> {
map_res(digit1, |s: &str| s.parse::<u8>())(input)
}
fn dotted_octet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u8, E> {
preceded(tag("."), octet)(input)
}
fn ip_v4_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> {
recognize(tuple((octet, dotted_octet, dotted_octet, dotted_octet)))(input)
}
fn hextet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u16, E> {
map_res(hex_digit1, |s: &str| s.parse::<u16>())(input)
}
fn sep_hextet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u16, E> {
preceded(tag("::"), hextet)(input)
}
fn ip_v6_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> {
let parser = preceded(opt(hextet), many_m_n(1, 7, sep_hextet));
recognize(parser)(input)
}
fn ip_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, IpAddr, E> {
map_res(alt((ip_v4_addr, ip_v6_addr)), |s: &str| s.parse::<IpAddr>())(input)
}
fn hostname<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> {
recognize(tuple((
alpha1,
many0(alt((alphanumeric1, recognize(one_of("-."))))),
)))(input)
}
fn check_hostname<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, (), E> {
all_consuming(hostname)(input).map(|(input, _)| (input, ()))
}
fn aliases<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, Vec<String>, E> {
let (input, aliases) = separated_list(tag(" "), hostname)(input)?;
Ok((input, aliases.into_iter().map(String::from).collect()))
} | #[derive(Debug)]
struct HostsLine {
ip: IpAddr,
canonical_hostname: String,
aliases: Vec<String>,
comment: Option<String>,
}
impl HostsLine {
fn new(ip: IpAddr, canonical_hostname: String) -> HostsLine {
let aliases = Vec::new();
let comment = None;
HostsLine {
ip,
canonical_hostname,
aliases,
comment,
}
}
}
impl fmt::Display for HostsLine {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let HostsLine {
ip,
canonical_hostname,
aliases,
comment,
} = self;
let sep = match ip.to_string().chars().count() {
0..=8 => "\t\t",
7..=16 => "\t",
_ => " ",
};
write!(
f,
"{ip}{sep}{ch}",
ip = ip,
sep = sep,
ch = canonical_hostname,
)?;
if !aliases.is_empty() {
write!(f, "\t{}", aliases.join(" "))?;
}
if let Some(comment) = comment {
write!(f, "#{}", comment)?;
}
Ok(())
}
}
fn hosts_line<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, HostsLine, E> {
let (input, ip) = ip_addr(input)?;
let (input, _) = space1(input)?;
let (input, canonical_hostname) = hostname(input)?;
let (input, _) = space1(input)?;
let (input, aliases) = opt(aliases)(input)?;
let (input, comment) = opt(comment)(input)?;
let canonical_hostname = String::from(canonical_hostname);
let aliases = aliases.unwrap_or_else(Vec::new);
let comment = comment.map(String::from);
Ok((
input,
HostsLine {
ip,
canonical_hostname,
aliases,
comment,
},
))
}
#[derive(Debug)]
enum Line {
Unstructured(String),
Structured(HostsLine),
}
impl Line {
fn structured(ip: IpAddr, canonical_name: String) -> Line {
Line::Structured(HostsLine::new(ip, canonical_name))
}
fn structured_ref(&self) -> Option<&HostsLine> {
match self {
Line::Structured(line) => Some(line),
Line::Unstructured(_) => None,
}
}
}
impl fmt::Display for Line {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Line::Unstructured(line) => write!(f, "{}", line),
Line::Structured(hosts_line) => write!(f, "{}", hosts_line),
}
}
}
fn parse_line(line: &str) -> Line {
match hosts_line::<(&str, ErrorKind)>(&line) {
Ok((_, hosts_line)) => Line::Structured(hosts_line),
Err(_error) => Line::Unstructured(String::from(line)),
}
}
fn validate_alias(alias: &str) -> Result<(), Error> {
check_hostname::<VerboseError<&str>>(alias)
.map(|_| ())
.map_err(|error| match error {
Err::Incomplete(_) => Error::IncompleteAlias,
Err::Error(e) | Err::Failure(e) => Error::InvalidAliasFormat(convert_error(alias, e)),
})
}
fn iptables_rules_exist(options: &Options) -> Result<bool, Error> {
let rule_match = format!(
"-A OUTPUT -s 127.0.0.1/32 -d {alias}/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination 127.0.0.",
alias = options.alias,
);
let output = Command::new("iptables")
.args(&["-t", "nat", "-S", "OUTPUT"])
.output()?;
let stdout = Cursor::new(output.stdout);
let matched_lines: Vec<_> = stdout
.lines()
.filter_map(|line_ret| {
line_ret.ok().and_then(|line| {
let line: String = dbg!(line);
line.rfind(&rule_match).map(|index| dbg!((index, line)))
})
})
.collect();
let port = options.port.to_string();
if let Some((idx, line)) = matched_lines.first() {
if dbg!(&line[*idx..]) == port {
return Ok(true);
} else {
return Err(Error::AliasAlreadyInUse);
}
}
Ok(false)
}
fn write_iptables_rules(options: &Options) -> Result<(), Error> {
let status = Command::new("iptables")
.args(&[
"-t",
"nat",
"--append",
"OUTPUT",
"--protocol",
"tcp",
"--dport",
"80",
"--source",
"127.0.0.1",
"--destination",
&options.alias,
"--jump",
"DNAT",
"--to-destination",
&format!("127.0.0.{ip}:{port}", ip = "1", port = options.port),
])
.status()?;
if !status.success() {
return Err(Error::IptablesCommandFailed(status.code().unwrap_or(-1)));
}
Ok(())
}
fn next_unused_local_ip(in_use_ips: &HashSet<IpAddr>) -> IpAddr {
for b in 0..128 {
for c in 0..128 {
for d in 1..128 {
let ip = IpAddr::V4(Ipv4Addr::new(127, b, c, d));
if !in_use_ips.contains(&ip) {
return ip;
}
}
}
}
"127.0.0.1".parse().unwrap()
}
fn run() -> Result<(), Error> {
let options = Options::from_args();
validate_alias(&options.alias)?;
let mut file = File::open(HOSTS_FILE)?;
file.seek(io::SeekFrom::Start(0))?;
let reader = io::BufReader::new(file);
let mut lines: Vec<_> = reader
.lines()
.map(|line_res| line_res.map(|line| parse_line(&line)))
.collect::<Result<Vec<_>, io::Error>>()?;
let mut file = OpenOptions::new().write(true).open(HOSTS_FILE)?;
file.seek(io::SeekFrom::Start(0))?;
let structured_refs: Vec<_> = lines
.iter()
.filter_map(|line| line.structured_ref())
.collect();
if structured_refs
.iter()
.find(|&x| *x.canonical_hostname == options.alias)
.is_none()
{
let in_use_ips: HashSet<IpAddr> = structured_refs.iter().map(|line| line.ip).collect();
let ip = next_unused_local_ip(&in_use_ips);
lines.push(Line::structured(ip, options.alias.clone()));
} else {
eprintln!("Alias already in /etc/hosts not adding a second entry");
}
for line in &lines {
writeln!(file, "{}", line)?;
}
file.sync_all()?;
drop(file);
if !iptables_rules_exist(&options)? {
write_iptables_rules(&options)?;
}
Ok(())
}
fn main() {
match run() {
Ok(()) => {}
Err(err) => {
eprintln!("local-domain-alias: {}", err);
std::process::exit(1);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
macro_rules! assert_parse_err {
{ $fn_name:ident($input:literal), rest == $expected_rest:literal } => {
match $fn_name::<VerboseError<&str>>($input) {
Ok((rest, value)) => {
assert_eq!($expected_rest, rest, "actual unparsed input");
panic!("parse unexpectedly succeeded: {ifn}({iarg}) rest: '{rest}', value: '{value:?}'",
ifn = stringify!($fn_name),
iarg = stringify!($input),
rest = rest,
value = value,
);
}
Err(err) => err,
}
}
}
macro_rules! assert_parse_ok {
{ $fn_name:ident($input:literal) } => {
match $fn_name::<VerboseError<&str>>($input) {
Err(Err::Incomplete(i)) =>
panic!("incomplete input: '{}' {:?}", $input, i),
Err(Err::Error(e)) | Err(Err::Failure(e)) => {
panic!("failed to parse: {ifn}({iarg})\n{converted}",
ifn = stringify!($fn_name),
iarg = stringify!($input),
converted = convert_error($input, e));
},
Ok(ret) => ret,
}
}
}
#[test]
fn parse_hostname() {
assert_parse_err!(hostname("123"), rest == "");
assert_parse_ok!(hostname("a123"));
assert_parse_ok!(hostname("abc"));
assert_parse_ok!(hostname("abc.def"));
assert_parse_ok!(hostname("abc-def"));
assert_parse_ok!(hostname("abc-def.ghi"));
}
#[test]
fn parse_check_hostname() {
assert_parse_err!(check_hostname("123"), rest == "");
assert_parse_ok!(check_hostname("a123"));
assert_parse_ok!(check_hostname("abc"));
assert_parse_ok!(check_hostname("abc-def"));
assert_parse_ok!(hostname("abc.def"));
assert_parse_ok!(hostname("abc-def.ghi"));
}
#[test]
fn parse_aliases() {
assert_parse_ok!(aliases("123"));
assert_parse_ok!(aliases("a123"));
assert_parse_ok!(aliases("abc"));
}
#[test]
fn parse_comment() {
assert_parse_err!(comment("123"), rest == "123");
assert_parse_err!(comment(""), rest == "");
assert_parse_ok!(comment("#"));
assert_parse_ok!(comment("#abc 123 !@# {}()[]"));
assert_parse_ok!(comment("#abc123!@#\nfoobar"));
}
} |
fn comment<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> {
preceded(tag("#"), rest)(input)
}
| random_line_split |
dummy.py | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Unit test dummies.
"""
from Acquisition import Implicit
from Acquisition import aq_base
from Acquisition import aq_inner
from Acquisition import aq_parent
from OFS.event import ObjectWillBeAddedEvent
from OFS.event import ObjectWillBeRemovedEvent
from OFS.interfaces import IObjectManager
from OFS.SimpleItem import Item
from zope.component.factory import Factory
from zope.container.contained import ObjectAddedEvent
from zope.container.contained import ObjectRemovedEvent
from zope.container.contained import notifyContainerModified
from zope.datetime import rfc1123_date
from zope.event import notify
from zope.interface import implementer
from ...ActionProviderBase import ActionProviderBase
from ...interfaces import IContentish
from ...interfaces import ISiteRoot
from ...interfaces import ITypeInformation
from ...PortalContent import PortalContent
from ..base.security import DummyUser
from ..base.security import OmnipotentUser
class DummyObject(Implicit):
"""
A dummy callable object.
Comes with getIconURL and restrictedTraverse
methods.
"""
def __init__(self, id='dummy', **kw):
self._id = id
self.__dict__.update(kw)
def __str__(self):
return self._id
def __call__(self):
return self._id
def restrictedTraverse(self, path):
if not path:
return self
parent = self
path_elements = path.split('/')
path_elements.reverse()
while path_elements:
path_element = path_elements.pop()
parent = getattr(parent, path_element)
return parent
def icon(self):
return f'{self._id} ICON'
def getIconURL(self):
return f'{self._id} ICON'
def getId(self):
return self._id
@implementer(ITypeInformation)
class DummyType(DummyObject):
""" A Dummy Type object """
def __init__(self, id='Dummy Content', title='Dummy Content', actions=()):
""" To fake out some actions, pass in a sequence of tuples where the
first element represents the ID or alias of the action and the
second element is the path to the object to be invoked, such as
a page template.
"""
self.id = self._id = id
self.title = title
self._actions = {}
self._setActions(actions)
def _setActions(self, actions=()):
for action_id, action_path in actions:
self._actions[action_id] = action_path
def Title(self):
return self.title
def allowType(self, contentType):
return True
def allowDiscussion(self):
return False
def queryMethodID(self, alias, default=None, context=None):
return self._actions.get(alias, default)
def isConstructionAllowed(self, container):
return True
@implementer(IContentish)
class DummyContent(PortalContent, Item):
"""
A Dummy piece of PortalContent
"""
meta_type = 'Dummy'
portal_type = 'Dummy Content'
url = 'foo_url'
after_add_called = before_delete_called = 0
def __init__(self, id='dummy', *args, **kw):
self.id = id
self._args = args
self._kw = {}
self._kw.update(kw)
self.reset()
self.catalog = kw.get('catalog', 0)
self.url = kw.get('url', None)
self.view_id = kw.get('view_id', None)
def manage_afterAdd(self, item, container):
self.after_add_called = 1
def manage_beforeDelete(self, item, container):
self.before_delete_called = 1
def absolute_url(self):
return self.url
def reset(self):
self.after_add_called = self.before_delete_called = 0
# Make sure normal Database export/import stuff doesn't trip us up.
def _getCopy(self, container):
return DummyContent(self.id, catalog=self.catalog)
def _safe_get(self, attr):
if self.catalog:
return getattr(self, attr, '')
else:
return getattr(self, attr)
def Title(self):
return self.title
def listCreators(self):
return self._safe_get('creators')
def Subject(self):
return self._safe_get('subject')
def Description(self):
return self._safe_get('description')
def created(self):
return self._safe_get('created_date')
def modified(self):
return self._safe_get('modified_date')
def Type(self):
return 'Dummy Content Title'
def __call__(self):
if self.view_id is None:
return DummyContent.inheritedAttribute('__call__')(self)
else:
# view_id control for testing
template = getattr(self, self.view_id)
if getattr(aq_base(template), 'isDocTemp', 0):
return template(self, self.REQUEST, self.REQUEST['RESPONSE'])
else:
return template()
DummyFactory = Factory(DummyContent)
class DummyFactoryDispatcher:
"""
Dummy Product Factory Dispatcher
"""
def __init__(self, folder):
self._folder = folder
def getId(self):
return 'DummyFactoryDispatcher'
def addFoo(self, id, *args, **kw):
if getattr(self._folder, '_prefix', None):
id = f'{self._folder._prefix}_{id}'
foo = DummyContent(id, *args, **kw)
self._folder._setObject(id, foo, suppress_events=True)
if getattr(self._folder, '_prefix', None):
return id
__roles__ = ('FooAdder',)
__allow_access_to_unprotected_subobjects__ = {'addFoo': 1}
@implementer(IObjectManager)
class DummyFolder(DummyObject):
"""Dummy Container for testing.
"""
def __init__(self, id='dummy', fake_product=0, prefix=''):
self._prefix = prefix
self._id = id
if fake_product:
self.manage_addProduct = {
'FooProduct': DummyFactoryDispatcher(self)}
def _setOb(self, id, object):
setattr(self, id, object)
def _delOb(self, id):
delattr(self, id)
def _getOb(self, id):
return getattr(self, id)
def _setObject(self, id, object, suppress_events=False):
if not suppress_events:
notify(ObjectWillBeAddedEvent(object, self, id))
self._setOb(id, object)
object = self._getOb(id)
if hasattr(aq_base(object), 'manage_afterAdd'):
object.manage_afterAdd(object, self)
if not suppress_events:
notify(ObjectAddedEvent(object, self, id))
notifyContainerModified(self)
return object
def _delObject(self, id):
object = self._getOb(id)
notify(ObjectWillBeRemovedEvent(object, self, id))
if hasattr(aq_base(object), 'manage_beforeDelete'):
object.manage_beforeDelete(object, self)
self._delOb(id)
notify(ObjectRemovedEvent(object, self, id))
notifyContainerModified(self)
def getPhysicalPath(self):
p = aq_parent(aq_inner(self))
path = (self._id,)
if p is not None:
path = p.getPhysicalPath() + path
return path
def getId(self):
return self._id
def reindexObjectSecurity(self):
pass
def contentIds(self):
return ('user_bar',)
def all_meta_types(self):
return ({'name': 'Dummy', 'permission': 'addFoo'},)
def getTypeInfo(self):
return self.portal_types.getTypeInfo(self) # Can return None.
@implementer(ISiteRoot)
class DummySite(DummyFolder):
""" A dummy portal folder.
"""
_domain = 'http://www.foobar.com'
_path = 'bar'
def absolute_url(self, relative=0):
return '/'.join((self._domain, self._path, self._id))
def getPhysicalPath(self):
return ('', self._path, self._id)
def getPhysicalRoot(self):
return self
def unrestrictedTraverse(self, path, default=None, restricted=0):
if path == ['acl_users']:
return self.acl_users
else:
|
def userdefined_roles(self):
return ('Member', 'Reviewer')
def getProperty(self, id, default=None):
return getattr(self, id, default)
class DummyUserFolder(Implicit):
""" A dummy User Folder with 2 dummy Users.
"""
id = 'acl_users'
def __init__(self):
setattr(self, 'user_foo', DummyUser(id='user_foo'))
setattr(self, 'user_bar', DummyUser(id='user_bar'))
setattr(self, 'all_powerful_Oz', OmnipotentUser())
def getUsers(self):
pass
def getUser(self, name):
return getattr(self, name, None)
def getUserById(self, id, default=None):
return self.getUser(id)
def userFolderDelUsers(self, names):
for user_id in names:
delattr(self, user_id)
class DummyTool(Implicit, ActionProviderBase):
"""
This is a Dummy Tool that behaves as a
a MemberShipTool, a URLTool and an
Action Provider
"""
def __init__(self, anon=1):
self.anon = anon
# IMembershipTool
def getAuthenticatedMember(self):
return DummyUser()
def isAnonymousUser(self):
return self.anon
def checkPermission(self, permissionName, object, subobjectName=None):
return True
# ITypesTool
_type_id = 'Dummy Content'
_type_actions = (('', 'dummy_view'),
('view', 'dummy_view'),
('(Default)', 'dummy_view'))
def getTypeInfo(self, contentType):
return DummyType(self._type_id, title=self._type_id,
actions=self._type_actions)
def listTypeInfo(self, container=None):
return (DummyType(self._type_id, title=self._type_id,
actions=self._type_actions),)
def listContentTypes(self, container=None, by_metatype=0):
return (self._type_id,)
# IURLTool
def __call__(self, relative=0):
return self.getPortalObject().absolute_url()
def getPortalObject(self):
return aq_parent(aq_inner(self))
getPortalPath = __call__
# IWorkflowTool
test_notified = None
def notifyCreated(self, ob):
self.test_notified = ob
def getCatalogVariablesFor(self, obj):
return {}
class DummyCachingManager:
def getHTTPCachingHeaders(self, content, view_name, keywords, time=None):
return (
('foo', 'Foo'), ('bar', 'Bar'),
('test_path', '/'.join(content.getPhysicalPath())),
)
def getModTimeAndETag(self, content, view_method, keywords, time=None):
return (None, None, False)
def getPhysicalPath(self):
return ('baz',)
FAKE_ETAG = None # '--FAKE ETAG--'
class DummyCachingManagerWithPolicy(DummyCachingManager):
# dummy fixture implementing a single policy:
# - always set the last-modified date if available
# - calculate the date using the modified method on content
def getHTTPCachingHeaders(self, content, view_name, keywords, time=None):
# if the object has a modified method, add it as last-modified
if hasattr(content, 'modified'):
headers = (('Last-modified', rfc1123_date(content.modified())),)
return headers
def getModTimeAndETag(self, content, view_method, keywords, time=None):
modified_date = None
if hasattr(content, 'modified'):
modified_date = content.modified()
set_last_modified = (modified_date is not None)
return (modified_date, FAKE_ETAG, set_last_modified)
| obj = self
for id in path[3:]:
obj = getattr(obj, id)
return obj | conditional_block |
dummy.py | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Unit test dummies.
"""
from Acquisition import Implicit
from Acquisition import aq_base
from Acquisition import aq_inner
from Acquisition import aq_parent
from OFS.event import ObjectWillBeAddedEvent
from OFS.event import ObjectWillBeRemovedEvent
from OFS.interfaces import IObjectManager
from OFS.SimpleItem import Item
from zope.component.factory import Factory
from zope.container.contained import ObjectAddedEvent
from zope.container.contained import ObjectRemovedEvent
from zope.container.contained import notifyContainerModified
from zope.datetime import rfc1123_date
from zope.event import notify
from zope.interface import implementer
from ...ActionProviderBase import ActionProviderBase
from ...interfaces import IContentish
from ...interfaces import ISiteRoot
from ...interfaces import ITypeInformation
from ...PortalContent import PortalContent
from ..base.security import DummyUser
from ..base.security import OmnipotentUser
class DummyObject(Implicit):
"""
A dummy callable object.
Comes with getIconURL and restrictedTraverse
methods.
"""
def __init__(self, id='dummy', **kw):
self._id = id
self.__dict__.update(kw)
def __str__(self):
return self._id
def __call__(self):
return self._id
def restrictedTraverse(self, path):
if not path:
return self
parent = self
path_elements = path.split('/')
path_elements.reverse()
while path_elements:
path_element = path_elements.pop()
parent = getattr(parent, path_element)
return parent
def icon(self):
return f'{self._id} ICON'
def getIconURL(self):
return f'{self._id} ICON'
def getId(self):
return self._id
@implementer(ITypeInformation)
class DummyType(DummyObject):
""" A Dummy Type object """
def __init__(self, id='Dummy Content', title='Dummy Content', actions=()):
""" To fake out some actions, pass in a sequence of tuples where the
first element represents the ID or alias of the action and the
second element is the path to the object to be invoked, such as
a page template.
"""
self.id = self._id = id
self.title = title
self._actions = {}
self._setActions(actions)
def _setActions(self, actions=()):
for action_id, action_path in actions:
self._actions[action_id] = action_path
def Title(self):
return self.title
def allowType(self, contentType):
return True
def allowDiscussion(self):
return False
def queryMethodID(self, alias, default=None, context=None):
return self._actions.get(alias, default)
def isConstructionAllowed(self, container):
return True
@implementer(IContentish)
class DummyContent(PortalContent, Item):
"""
A Dummy piece of PortalContent
"""
meta_type = 'Dummy'
portal_type = 'Dummy Content'
url = 'foo_url'
after_add_called = before_delete_called = 0
def __init__(self, id='dummy', *args, **kw):
self.id = id
self._args = args
self._kw = {}
self._kw.update(kw)
self.reset()
self.catalog = kw.get('catalog', 0)
self.url = kw.get('url', None)
self.view_id = kw.get('view_id', None)
def manage_afterAdd(self, item, container):
self.after_add_called = 1
def manage_beforeDelete(self, item, container):
self.before_delete_called = 1
def absolute_url(self):
return self.url
def reset(self):
self.after_add_called = self.before_delete_called = 0
# Make sure normal Database export/import stuff doesn't trip us up.
def _getCopy(self, container):
return DummyContent(self.id, catalog=self.catalog)
def _safe_get(self, attr):
if self.catalog:
return getattr(self, attr, '')
else:
return getattr(self, attr)
def Title(self):
return self.title
def listCreators(self):
return self._safe_get('creators')
def Subject(self):
return self._safe_get('subject')
def Description(self):
return self._safe_get('description')
def created(self):
return self._safe_get('created_date')
def modified(self):
return self._safe_get('modified_date')
def Type(self):
return 'Dummy Content Title'
def __call__(self):
if self.view_id is None:
return DummyContent.inheritedAttribute('__call__')(self)
else:
# view_id control for testing
template = getattr(self, self.view_id)
if getattr(aq_base(template), 'isDocTemp', 0):
return template(self, self.REQUEST, self.REQUEST['RESPONSE'])
else:
return template()
DummyFactory = Factory(DummyContent)
class DummyFactoryDispatcher:
"""
Dummy Product Factory Dispatcher
"""
def __init__(self, folder):
self._folder = folder
def getId(self):
return 'DummyFactoryDispatcher'
def addFoo(self, id, *args, **kw):
if getattr(self._folder, '_prefix', None):
id = f'{self._folder._prefix}_{id}'
foo = DummyContent(id, *args, **kw)
self._folder._setObject(id, foo, suppress_events=True)
if getattr(self._folder, '_prefix', None):
return id
__roles__ = ('FooAdder',)
__allow_access_to_unprotected_subobjects__ = {'addFoo': 1}
@implementer(IObjectManager)
class DummyFolder(DummyObject):
"""Dummy Container for testing.
"""
def __init__(self, id='dummy', fake_product=0, prefix=''):
self._prefix = prefix
self._id = id
if fake_product:
self.manage_addProduct = {
'FooProduct': DummyFactoryDispatcher(self)}
def _setOb(self, id, object):
setattr(self, id, object)
def _delOb(self, id):
delattr(self, id)
def _getOb(self, id):
return getattr(self, id)
def _setObject(self, id, object, suppress_events=False):
if not suppress_events:
notify(ObjectWillBeAddedEvent(object, self, id))
self._setOb(id, object)
object = self._getOb(id)
if hasattr(aq_base(object), 'manage_afterAdd'):
object.manage_afterAdd(object, self)
if not suppress_events:
notify(ObjectAddedEvent(object, self, id))
notifyContainerModified(self)
return object
def _delObject(self, id):
object = self._getOb(id)
notify(ObjectWillBeRemovedEvent(object, self, id))
if hasattr(aq_base(object), 'manage_beforeDelete'):
object.manage_beforeDelete(object, self)
self._delOb(id)
notify(ObjectRemovedEvent(object, self, id))
notifyContainerModified(self)
def getPhysicalPath(self):
p = aq_parent(aq_inner(self))
path = (self._id,)
if p is not None:
path = p.getPhysicalPath() + path
return path
def getId(self):
return self._id
def reindexObjectSecurity(self):
pass
def contentIds(self):
return ('user_bar',)
def all_meta_types(self):
return ({'name': 'Dummy', 'permission': 'addFoo'},)
def getTypeInfo(self):
return self.portal_types.getTypeInfo(self) # Can return None.
@implementer(ISiteRoot)
class DummySite(DummyFolder):
""" A dummy portal folder.
"""
_domain = 'http://www.foobar.com'
_path = 'bar'
def absolute_url(self, relative=0):
return '/'.join((self._domain, self._path, self._id))
def getPhysicalPath(self):
return ('', self._path, self._id)
def getPhysicalRoot(self):
return self | return self.acl_users
else:
obj = self
for id in path[3:]:
obj = getattr(obj, id)
return obj
def userdefined_roles(self):
return ('Member', 'Reviewer')
def getProperty(self, id, default=None):
return getattr(self, id, default)
class DummyUserFolder(Implicit):
""" A dummy User Folder with 2 dummy Users.
"""
id = 'acl_users'
def __init__(self):
setattr(self, 'user_foo', DummyUser(id='user_foo'))
setattr(self, 'user_bar', DummyUser(id='user_bar'))
setattr(self, 'all_powerful_Oz', OmnipotentUser())
def getUsers(self):
pass
def getUser(self, name):
return getattr(self, name, None)
def getUserById(self, id, default=None):
return self.getUser(id)
def userFolderDelUsers(self, names):
for user_id in names:
delattr(self, user_id)
class DummyTool(Implicit, ActionProviderBase):
"""
This is a Dummy Tool that behaves as a
a MemberShipTool, a URLTool and an
Action Provider
"""
def __init__(self, anon=1):
self.anon = anon
# IMembershipTool
def getAuthenticatedMember(self):
return DummyUser()
def isAnonymousUser(self):
return self.anon
def checkPermission(self, permissionName, object, subobjectName=None):
return True
# ITypesTool
_type_id = 'Dummy Content'
_type_actions = (('', 'dummy_view'),
('view', 'dummy_view'),
('(Default)', 'dummy_view'))
def getTypeInfo(self, contentType):
return DummyType(self._type_id, title=self._type_id,
actions=self._type_actions)
def listTypeInfo(self, container=None):
return (DummyType(self._type_id, title=self._type_id,
actions=self._type_actions),)
def listContentTypes(self, container=None, by_metatype=0):
return (self._type_id,)
# IURLTool
def __call__(self, relative=0):
return self.getPortalObject().absolute_url()
def getPortalObject(self):
return aq_parent(aq_inner(self))
getPortalPath = __call__
# IWorkflowTool
test_notified = None
def notifyCreated(self, ob):
self.test_notified = ob
def getCatalogVariablesFor(self, obj):
return {}
class DummyCachingManager:
def getHTTPCachingHeaders(self, content, view_name, keywords, time=None):
return (
('foo', 'Foo'), ('bar', 'Bar'),
('test_path', '/'.join(content.getPhysicalPath())),
)
def getModTimeAndETag(self, content, view_method, keywords, time=None):
return (None, None, False)
def getPhysicalPath(self):
return ('baz',)
FAKE_ETAG = None # '--FAKE ETAG--'
class DummyCachingManagerWithPolicy(DummyCachingManager):
# dummy fixture implementing a single policy:
# - always set the last-modified date if available
# - calculate the date using the modified method on content
def getHTTPCachingHeaders(self, content, view_name, keywords, time=None):
# if the object has a modified method, add it as last-modified
if hasattr(content, 'modified'):
headers = (('Last-modified', rfc1123_date(content.modified())),)
return headers
def getModTimeAndETag(self, content, view_method, keywords, time=None):
modified_date = None
if hasattr(content, 'modified'):
modified_date = content.modified()
set_last_modified = (modified_date is not None)
return (modified_date, FAKE_ETAG, set_last_modified) |
def unrestrictedTraverse(self, path, default=None, restricted=0):
if path == ['acl_users']: | random_line_split |
dummy.py | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Unit test dummies.
"""
from Acquisition import Implicit
from Acquisition import aq_base
from Acquisition import aq_inner
from Acquisition import aq_parent
from OFS.event import ObjectWillBeAddedEvent
from OFS.event import ObjectWillBeRemovedEvent
from OFS.interfaces import IObjectManager
from OFS.SimpleItem import Item
from zope.component.factory import Factory
from zope.container.contained import ObjectAddedEvent
from zope.container.contained import ObjectRemovedEvent
from zope.container.contained import notifyContainerModified
from zope.datetime import rfc1123_date
from zope.event import notify
from zope.interface import implementer
from ...ActionProviderBase import ActionProviderBase
from ...interfaces import IContentish
from ...interfaces import ISiteRoot
from ...interfaces import ITypeInformation
from ...PortalContent import PortalContent
from ..base.security import DummyUser
from ..base.security import OmnipotentUser
class DummyObject(Implicit):
"""
A dummy callable object.
Comes with getIconURL and restrictedTraverse
methods.
"""
def __init__(self, id='dummy', **kw):
self._id = id
self.__dict__.update(kw)
def __str__(self):
return self._id
def __call__(self):
return self._id
def restrictedTraverse(self, path):
if not path:
return self
parent = self
path_elements = path.split('/')
path_elements.reverse()
while path_elements:
path_element = path_elements.pop()
parent = getattr(parent, path_element)
return parent
def icon(self):
return f'{self._id} ICON'
def getIconURL(self):
return f'{self._id} ICON'
def getId(self):
return self._id
@implementer(ITypeInformation)
class DummyType(DummyObject):
""" A Dummy Type object """
def __init__(self, id='Dummy Content', title='Dummy Content', actions=()):
""" To fake out some actions, pass in a sequence of tuples where the
first element represents the ID or alias of the action and the
second element is the path to the object to be invoked, such as
a page template.
"""
self.id = self._id = id
self.title = title
self._actions = {}
self._setActions(actions)
def _setActions(self, actions=()):
for action_id, action_path in actions:
self._actions[action_id] = action_path
def Title(self):
return self.title
def allowType(self, contentType):
return True
def allowDiscussion(self):
return False
def queryMethodID(self, alias, default=None, context=None):
return self._actions.get(alias, default)
def isConstructionAllowed(self, container):
return True
@implementer(IContentish)
class DummyContent(PortalContent, Item):
"""
A Dummy piece of PortalContent
"""
meta_type = 'Dummy'
portal_type = 'Dummy Content'
url = 'foo_url'
after_add_called = before_delete_called = 0
def __init__(self, id='dummy', *args, **kw):
self.id = id
self._args = args
self._kw = {}
self._kw.update(kw)
self.reset()
self.catalog = kw.get('catalog', 0)
self.url = kw.get('url', None)
self.view_id = kw.get('view_id', None)
def manage_afterAdd(self, item, container):
self.after_add_called = 1
def manage_beforeDelete(self, item, container):
self.before_delete_called = 1
def absolute_url(self):
return self.url
def reset(self):
self.after_add_called = self.before_delete_called = 0
# Make sure normal Database export/import stuff doesn't trip us up.
def _getCopy(self, container):
return DummyContent(self.id, catalog=self.catalog)
def _safe_get(self, attr):
if self.catalog:
return getattr(self, attr, '')
else:
return getattr(self, attr)
def Title(self):
return self.title
def listCreators(self):
return self._safe_get('creators')
def Subject(self):
return self._safe_get('subject')
def Description(self):
return self._safe_get('description')
def created(self):
return self._safe_get('created_date')
def modified(self):
return self._safe_get('modified_date')
def Type(self):
|
def __call__(self):
if self.view_id is None:
return DummyContent.inheritedAttribute('__call__')(self)
else:
# view_id control for testing
template = getattr(self, self.view_id)
if getattr(aq_base(template), 'isDocTemp', 0):
return template(self, self.REQUEST, self.REQUEST['RESPONSE'])
else:
return template()
DummyFactory = Factory(DummyContent)
class DummyFactoryDispatcher:
"""
Dummy Product Factory Dispatcher
"""
def __init__(self, folder):
self._folder = folder
def getId(self):
return 'DummyFactoryDispatcher'
def addFoo(self, id, *args, **kw):
if getattr(self._folder, '_prefix', None):
id = f'{self._folder._prefix}_{id}'
foo = DummyContent(id, *args, **kw)
self._folder._setObject(id, foo, suppress_events=True)
if getattr(self._folder, '_prefix', None):
return id
__roles__ = ('FooAdder',)
__allow_access_to_unprotected_subobjects__ = {'addFoo': 1}
@implementer(IObjectManager)
class DummyFolder(DummyObject):
"""Dummy Container for testing.
"""
def __init__(self, id='dummy', fake_product=0, prefix=''):
self._prefix = prefix
self._id = id
if fake_product:
self.manage_addProduct = {
'FooProduct': DummyFactoryDispatcher(self)}
def _setOb(self, id, object):
setattr(self, id, object)
def _delOb(self, id):
delattr(self, id)
def _getOb(self, id):
return getattr(self, id)
def _setObject(self, id, object, suppress_events=False):
if not suppress_events:
notify(ObjectWillBeAddedEvent(object, self, id))
self._setOb(id, object)
object = self._getOb(id)
if hasattr(aq_base(object), 'manage_afterAdd'):
object.manage_afterAdd(object, self)
if not suppress_events:
notify(ObjectAddedEvent(object, self, id))
notifyContainerModified(self)
return object
def _delObject(self, id):
object = self._getOb(id)
notify(ObjectWillBeRemovedEvent(object, self, id))
if hasattr(aq_base(object), 'manage_beforeDelete'):
object.manage_beforeDelete(object, self)
self._delOb(id)
notify(ObjectRemovedEvent(object, self, id))
notifyContainerModified(self)
def getPhysicalPath(self):
p = aq_parent(aq_inner(self))
path = (self._id,)
if p is not None:
path = p.getPhysicalPath() + path
return path
def getId(self):
return self._id
def reindexObjectSecurity(self):
pass
def contentIds(self):
return ('user_bar',)
def all_meta_types(self):
return ({'name': 'Dummy', 'permission': 'addFoo'},)
def getTypeInfo(self):
return self.portal_types.getTypeInfo(self) # Can return None.
@implementer(ISiteRoot)
class DummySite(DummyFolder):
""" A dummy portal folder.
"""
_domain = 'http://www.foobar.com'
_path = 'bar'
def absolute_url(self, relative=0):
return '/'.join((self._domain, self._path, self._id))
def getPhysicalPath(self):
return ('', self._path, self._id)
def getPhysicalRoot(self):
return self
def unrestrictedTraverse(self, path, default=None, restricted=0):
if path == ['acl_users']:
return self.acl_users
else:
obj = self
for id in path[3:]:
obj = getattr(obj, id)
return obj
def userdefined_roles(self):
return ('Member', 'Reviewer')
def getProperty(self, id, default=None):
return getattr(self, id, default)
class DummyUserFolder(Implicit):
""" A dummy User Folder with 2 dummy Users.
"""
id = 'acl_users'
def __init__(self):
setattr(self, 'user_foo', DummyUser(id='user_foo'))
setattr(self, 'user_bar', DummyUser(id='user_bar'))
setattr(self, 'all_powerful_Oz', OmnipotentUser())
def getUsers(self):
pass
def getUser(self, name):
return getattr(self, name, None)
def getUserById(self, id, default=None):
return self.getUser(id)
def userFolderDelUsers(self, names):
for user_id in names:
delattr(self, user_id)
class DummyTool(Implicit, ActionProviderBase):
"""
This is a Dummy Tool that behaves as a
a MemberShipTool, a URLTool and an
Action Provider
"""
def __init__(self, anon=1):
self.anon = anon
# IMembershipTool
def getAuthenticatedMember(self):
return DummyUser()
def isAnonymousUser(self):
return self.anon
def checkPermission(self, permissionName, object, subobjectName=None):
return True
# ITypesTool
_type_id = 'Dummy Content'
_type_actions = (('', 'dummy_view'),
('view', 'dummy_view'),
('(Default)', 'dummy_view'))
def getTypeInfo(self, contentType):
return DummyType(self._type_id, title=self._type_id,
actions=self._type_actions)
def listTypeInfo(self, container=None):
return (DummyType(self._type_id, title=self._type_id,
actions=self._type_actions),)
def listContentTypes(self, container=None, by_metatype=0):
return (self._type_id,)
# IURLTool
def __call__(self, relative=0):
return self.getPortalObject().absolute_url()
def getPortalObject(self):
return aq_parent(aq_inner(self))
getPortalPath = __call__
# IWorkflowTool
test_notified = None
def notifyCreated(self, ob):
self.test_notified = ob
def getCatalogVariablesFor(self, obj):
return {}
class DummyCachingManager:
def getHTTPCachingHeaders(self, content, view_name, keywords, time=None):
return (
('foo', 'Foo'), ('bar', 'Bar'),
('test_path', '/'.join(content.getPhysicalPath())),
)
def getModTimeAndETag(self, content, view_method, keywords, time=None):
return (None, None, False)
def getPhysicalPath(self):
return ('baz',)
FAKE_ETAG = None # '--FAKE ETAG--'
class DummyCachingManagerWithPolicy(DummyCachingManager):
# dummy fixture implementing a single policy:
# - always set the last-modified date if available
# - calculate the date using the modified method on content
def getHTTPCachingHeaders(self, content, view_name, keywords, time=None):
# if the object has a modified method, add it as last-modified
if hasattr(content, 'modified'):
headers = (('Last-modified', rfc1123_date(content.modified())),)
return headers
def getModTimeAndETag(self, content, view_method, keywords, time=None):
modified_date = None
if hasattr(content, 'modified'):
modified_date = content.modified()
set_last_modified = (modified_date is not None)
return (modified_date, FAKE_ETAG, set_last_modified)
| return 'Dummy Content Title' | identifier_body |
dummy.py | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Unit test dummies.
"""
from Acquisition import Implicit
from Acquisition import aq_base
from Acquisition import aq_inner
from Acquisition import aq_parent
from OFS.event import ObjectWillBeAddedEvent
from OFS.event import ObjectWillBeRemovedEvent
from OFS.interfaces import IObjectManager
from OFS.SimpleItem import Item
from zope.component.factory import Factory
from zope.container.contained import ObjectAddedEvent
from zope.container.contained import ObjectRemovedEvent
from zope.container.contained import notifyContainerModified
from zope.datetime import rfc1123_date
from zope.event import notify
from zope.interface import implementer
from ...ActionProviderBase import ActionProviderBase
from ...interfaces import IContentish
from ...interfaces import ISiteRoot
from ...interfaces import ITypeInformation
from ...PortalContent import PortalContent
from ..base.security import DummyUser
from ..base.security import OmnipotentUser
class DummyObject(Implicit):
"""
A dummy callable object.
Comes with getIconURL and restrictedTraverse
methods.
"""
def __init__(self, id='dummy', **kw):
self._id = id
self.__dict__.update(kw)
def __str__(self):
return self._id
def __call__(self):
return self._id
def restrictedTraverse(self, path):
if not path:
return self
parent = self
path_elements = path.split('/')
path_elements.reverse()
while path_elements:
path_element = path_elements.pop()
parent = getattr(parent, path_element)
return parent
def icon(self):
return f'{self._id} ICON'
def getIconURL(self):
return f'{self._id} ICON'
def getId(self):
return self._id
@implementer(ITypeInformation)
class DummyType(DummyObject):
""" A Dummy Type object """
def __init__(self, id='Dummy Content', title='Dummy Content', actions=()):
""" To fake out some actions, pass in a sequence of tuples where the
first element represents the ID or alias of the action and the
second element is the path to the object to be invoked, such as
a page template.
"""
self.id = self._id = id
self.title = title
self._actions = {}
self._setActions(actions)
def _setActions(self, actions=()):
for action_id, action_path in actions:
self._actions[action_id] = action_path
def Title(self):
return self.title
def allowType(self, contentType):
return True
def allowDiscussion(self):
return False
def queryMethodID(self, alias, default=None, context=None):
return self._actions.get(alias, default)
def isConstructionAllowed(self, container):
return True
@implementer(IContentish)
class DummyContent(PortalContent, Item):
"""
A Dummy piece of PortalContent
"""
meta_type = 'Dummy'
portal_type = 'Dummy Content'
url = 'foo_url'
after_add_called = before_delete_called = 0
def __init__(self, id='dummy', *args, **kw):
self.id = id
self._args = args
self._kw = {}
self._kw.update(kw)
self.reset()
self.catalog = kw.get('catalog', 0)
self.url = kw.get('url', None)
self.view_id = kw.get('view_id', None)
def manage_afterAdd(self, item, container):
self.after_add_called = 1
def manage_beforeDelete(self, item, container):
self.before_delete_called = 1
def absolute_url(self):
return self.url
def reset(self):
self.after_add_called = self.before_delete_called = 0
# Make sure normal Database export/import stuff doesn't trip us up.
def _getCopy(self, container):
return DummyContent(self.id, catalog=self.catalog)
def _safe_get(self, attr):
if self.catalog:
return getattr(self, attr, '')
else:
return getattr(self, attr)
def Title(self):
return self.title
def listCreators(self):
return self._safe_get('creators')
def Subject(self):
return self._safe_get('subject')
def Description(self):
return self._safe_get('description')
def | (self):
return self._safe_get('created_date')
def modified(self):
return self._safe_get('modified_date')
def Type(self):
return 'Dummy Content Title'
def __call__(self):
if self.view_id is None:
return DummyContent.inheritedAttribute('__call__')(self)
else:
# view_id control for testing
template = getattr(self, self.view_id)
if getattr(aq_base(template), 'isDocTemp', 0):
return template(self, self.REQUEST, self.REQUEST['RESPONSE'])
else:
return template()
DummyFactory = Factory(DummyContent)
class DummyFactoryDispatcher:
"""
Dummy Product Factory Dispatcher
"""
def __init__(self, folder):
self._folder = folder
def getId(self):
return 'DummyFactoryDispatcher'
def addFoo(self, id, *args, **kw):
if getattr(self._folder, '_prefix', None):
id = f'{self._folder._prefix}_{id}'
foo = DummyContent(id, *args, **kw)
self._folder._setObject(id, foo, suppress_events=True)
if getattr(self._folder, '_prefix', None):
return id
__roles__ = ('FooAdder',)
__allow_access_to_unprotected_subobjects__ = {'addFoo': 1}
@implementer(IObjectManager)
class DummyFolder(DummyObject):
"""Dummy Container for testing.
"""
def __init__(self, id='dummy', fake_product=0, prefix=''):
self._prefix = prefix
self._id = id
if fake_product:
self.manage_addProduct = {
'FooProduct': DummyFactoryDispatcher(self)}
def _setOb(self, id, object):
setattr(self, id, object)
def _delOb(self, id):
delattr(self, id)
def _getOb(self, id):
return getattr(self, id)
def _setObject(self, id, object, suppress_events=False):
if not suppress_events:
notify(ObjectWillBeAddedEvent(object, self, id))
self._setOb(id, object)
object = self._getOb(id)
if hasattr(aq_base(object), 'manage_afterAdd'):
object.manage_afterAdd(object, self)
if not suppress_events:
notify(ObjectAddedEvent(object, self, id))
notifyContainerModified(self)
return object
def _delObject(self, id):
object = self._getOb(id)
notify(ObjectWillBeRemovedEvent(object, self, id))
if hasattr(aq_base(object), 'manage_beforeDelete'):
object.manage_beforeDelete(object, self)
self._delOb(id)
notify(ObjectRemovedEvent(object, self, id))
notifyContainerModified(self)
def getPhysicalPath(self):
p = aq_parent(aq_inner(self))
path = (self._id,)
if p is not None:
path = p.getPhysicalPath() + path
return path
def getId(self):
return self._id
def reindexObjectSecurity(self):
pass
def contentIds(self):
return ('user_bar',)
def all_meta_types(self):
return ({'name': 'Dummy', 'permission': 'addFoo'},)
def getTypeInfo(self):
return self.portal_types.getTypeInfo(self) # Can return None.
@implementer(ISiteRoot)
class DummySite(DummyFolder):
""" A dummy portal folder.
"""
_domain = 'http://www.foobar.com'
_path = 'bar'
def absolute_url(self, relative=0):
return '/'.join((self._domain, self._path, self._id))
def getPhysicalPath(self):
return ('', self._path, self._id)
def getPhysicalRoot(self):
return self
def unrestrictedTraverse(self, path, default=None, restricted=0):
if path == ['acl_users']:
return self.acl_users
else:
obj = self
for id in path[3:]:
obj = getattr(obj, id)
return obj
def userdefined_roles(self):
return ('Member', 'Reviewer')
def getProperty(self, id, default=None):
return getattr(self, id, default)
class DummyUserFolder(Implicit):
""" A dummy User Folder with 2 dummy Users.
"""
id = 'acl_users'
def __init__(self):
setattr(self, 'user_foo', DummyUser(id='user_foo'))
setattr(self, 'user_bar', DummyUser(id='user_bar'))
setattr(self, 'all_powerful_Oz', OmnipotentUser())
def getUsers(self):
pass
def getUser(self, name):
return getattr(self, name, None)
def getUserById(self, id, default=None):
return self.getUser(id)
def userFolderDelUsers(self, names):
for user_id in names:
delattr(self, user_id)
class DummyTool(Implicit, ActionProviderBase):
"""
This is a Dummy Tool that behaves as a
a MemberShipTool, a URLTool and an
Action Provider
"""
def __init__(self, anon=1):
self.anon = anon
# IMembershipTool
def getAuthenticatedMember(self):
return DummyUser()
def isAnonymousUser(self):
return self.anon
def checkPermission(self, permissionName, object, subobjectName=None):
return True
# ITypesTool
_type_id = 'Dummy Content'
_type_actions = (('', 'dummy_view'),
('view', 'dummy_view'),
('(Default)', 'dummy_view'))
def getTypeInfo(self, contentType):
return DummyType(self._type_id, title=self._type_id,
actions=self._type_actions)
def listTypeInfo(self, container=None):
return (DummyType(self._type_id, title=self._type_id,
actions=self._type_actions),)
def listContentTypes(self, container=None, by_metatype=0):
return (self._type_id,)
# IURLTool
def __call__(self, relative=0):
return self.getPortalObject().absolute_url()
def getPortalObject(self):
return aq_parent(aq_inner(self))
getPortalPath = __call__
# IWorkflowTool
test_notified = None
def notifyCreated(self, ob):
self.test_notified = ob
def getCatalogVariablesFor(self, obj):
return {}
class DummyCachingManager:
def getHTTPCachingHeaders(self, content, view_name, keywords, time=None):
return (
('foo', 'Foo'), ('bar', 'Bar'),
('test_path', '/'.join(content.getPhysicalPath())),
)
def getModTimeAndETag(self, content, view_method, keywords, time=None):
return (None, None, False)
def getPhysicalPath(self):
return ('baz',)
FAKE_ETAG = None # '--FAKE ETAG--'
class DummyCachingManagerWithPolicy(DummyCachingManager):
# dummy fixture implementing a single policy:
# - always set the last-modified date if available
# - calculate the date using the modified method on content
def getHTTPCachingHeaders(self, content, view_name, keywords, time=None):
# if the object has a modified method, add it as last-modified
if hasattr(content, 'modified'):
headers = (('Last-modified', rfc1123_date(content.modified())),)
return headers
def getModTimeAndETag(self, content, view_method, keywords, time=None):
modified_date = None
if hasattr(content, 'modified'):
modified_date = content.modified()
set_last_modified = (modified_date is not None)
return (modified_date, FAKE_ETAG, set_last_modified)
| created | identifier_name |
player.go | package player
import (
gameevent "fgame/fgame/game/event"
"fgame/fgame/game/global"
"fgame/fgame/game/goldequip/dao"
goldequipeventtypes "fgame/fgame/game/goldequip/event/types"
goldequiptemplate "fgame/fgame/game/goldequip/template"
goldequiptypes "fgame/fgame/game/goldequip/types"
inventorytypes "fgame/fgame/game/inventory/types"
"fgame/fgame/game/item/item"
itemtypes "fgame/fgame/game/item/types"
"fgame/fgame/game/player"
"fgame/fgame/game/player/types"
"fgame/fgame/game/scene/scene"
"fgame/fgame/pkg/idutil"
log "github.com/Sirupsen/logrus"
)
const (
maxLogLen = 50
)
//玩家元神金装管理器
type PlayerGoldEquipDataManager struct {
p player.Player
//元神金装背包
goldEquipBag *BodyBag
//分解日志
logList []*PlayerGoldEquipLogObject
//金装设置
equipSettingObj *PlayerGoldEquipSettingObject
//元神金装数据
goldEquipObject *PlayerGoldEquipObject
}
func (m *PlayerGoldEquipDataManager) Player() player.Player {
return m.p
}
//加载
func (m *PlayerGoldEquipDataManager) Load() (err error) {
//加载装备数据
err = m.loadGoldEquipSlot()
if err != nil {
return
}
err = m.loadLog()
if err != nil {
return
}
err = m.loadSetting()
if err != nil {
return
}
err = m.loadGoldEquipObject()
if err != nil {
return
}
return nil
}
//加载后
func (m *PlayerGoldEquipDataManager) AfterLoad() (err error) {
return nil
}
//心跳
func (m *PlayerGoldEquipDataManager) Heartbeat() {
}
//加载金装日志
func (m *PlayerGoldEquipDataManager) loadLog() (err error) {
entityList, err := dao.GetGoldEquipDao().GetPlayerGoldEquipLogEntityList(m.p.GetId())
if err != nil {
return
}
for _, entity := range entityList {
logObj := NewPlayerGoldEquipLogObject(m.p)
logObj.FromEntity(entity)
m.logList = append(m.logList, logObj)
}
return
}
//加载金装设置
func (m *PlayerGoldEquipDataManager) loadSetting() (err error) {
entity, err := dao.GetGoldEquipDao().GetPlayerGoldEquipSettingEntity(m.p.GetId())
if err != nil {
return
}
if entity != nil {
obj := NewPlayerGoldEquipSettingObject(m.p)
obj.FromEntity(entity)
m.equipSettingObj = obj
} else {
m.initEquipSeting()
}
return
}
//加载金装设置
func (m *PlayerGoldEquipDataManager) loadGoldEquipObject() (err error) {
entity, err := dao.GetGoldEquipDao().GetPlayerGoldEquipEntity(m.p.GetId())
if err != nil {
return
}
if entity != nil {
obj := NewPlayerGoldEquipObject(m.p)
obj.FromEntity(entity)
m.goldEquipObject = obj
} else {
m.initGoldEquipObject()
}
return
}
// 初始化设置
func (m *PlayerGoldEquipDataManager) initEquipSeting() {
obj := NewPlayerGoldEquipSettingObject(m.p)
id, _ := idutil.GetId()
now := global.GetGame().GetTimeService().Now()
obj.id = id
obj.fenJieIsAuto = 0
obj.fenJieQuality = 0
//zrc: 修改过的
//TODO:cjb 默认是检测过的,看完删除注释
obj.isCheckOldSt = int32(0)
obj.createTime = now
obj.SetModified()
m.equipSettingObj = obj
return
}
// 初始化设置
func (m *PlayerGoldEquipDataManager) initGoldEquipObject() {
obj := NewPlayerGoldEquipObject(m.p)
id, _ := idutil.GetId()
now := global.GetGame().GetTimeService().Now()
obj.id = id
obj.power = 0
obj.createTime = now
obj.SetModified()
m.goldEquipObject = obj
return
}
//获取金装背包
func (m *PlayerGoldEquipDataManager) GetGoldEquipBag() *BodyBag {
return m.goldEquipBag
}
//加载身上金装
func (m *PlayerGoldEquipDataManager) loadGoldEquipSlot() (err error) {
//加载金装槽位
goldEquipSlotList, err := dao.GetGoldEquipDao().GetGoldEquipSlotList(m.p.GetId())
if err != nil {
return
}
slotList := make([]*PlayerGoldEquipSlotObject, 0, len(goldEquipSlotList))
for _, slot := range goldEquipSlotList {
pio := NewPlayerGoldEquipSlotObject(m.p)
err := pio.FromEntity(slot)
if err != nil {
return err
}
slotList = append(slotList, pio)
}
m.fixUpstarLevel(slotList)
m.goldEquipBag = createBodyBag(m.p, slotList)
return
}
// 修正升星强化等级
func (m *PlayerGoldEquipDataManager) fixUpstarLevel(itemObjList []*PlayerGoldEquipSlotObject) {
for _, itemObj := range itemObjList {
if itemObj.IsEmpty() {
continue
}
goldequipData, ok := itemObj.propertyData.(*goldequiptypes.GoldEquipPropertyData)
if !ok {
continue
}
itemTemp := item.GetItemService().GetItem(int(itemObj.itemId))
if itemTemp.GetGoldEquipTemplate() == nil {
log.Info("itemid:", itemObj.itemId)
continue
}
maxLeve := itemTemp.GetGoldEquipTemplate().GetMaxUpstarLevel()
goldequipData.FixUpstarLevel(maxLeve)
itemObj.SetModified()
}
}
//获取装备
func (m *PlayerGoldEquipDataManager) GetGoldEquipByPos(pos inventorytypes.BodyPositionType) *PlayerGoldEquipSlotObject {
item := m.goldEquipBag.GetByPosition(pos)
if item == nil {
return nil
}
return item
}
//使用装备
func (m *PlayerGoldEquipDataManager) PutOn(pos inventorytypes.BodyPositionType, itemId int32, level int32, bind itemtypes.ItemBindType, propertyData inventorytypes.ItemPropertyData) (flag bool) {
flag = m.goldEquipBag.PutOn(pos, itemId, level, bind, propertyData)
if flag {
gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipPutOn, m.p, itemId)
}
return
}
//脱下装备
func (m *PlayerGoldEquipDataManager) TakeOff(pos inventorytypes.BodyPositionType) (itemId int32) {
//判断是否可以脱下
flag := m.IfCanTakeOff(pos)
if !flag {
return
}
slot := m.goldEquipBag.GetByPosition(pos)
data, _ := slot.propertyData.(*goldequiptypes.GoldEquipPropertyData)
openlightlevel := data.OpenLightLevel
strengthlevel := slot.newStLevel
upstarlevel := slot.level
itemId = m.goldEquipBag.TakeOff(pos)
if itemId > 0 {
gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipTakeOff, m.p, itemId)
eventData := goldequipeventtypes.CreatePlayerGoldEquipStatusEventData(pos, openlightlevel, strengthlevel, upstarlevel)
gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipStatusWhenTakeOff, m.p, eventData)
}
return
}
//获取套装数量
func (m *PlayerGoldEquipDataManager) GetGoldEquipGroupNum() map[int32]int32 {
curGroupMap := make(map[int32]int32)
for _, slot := range m.goldEquipBag.GetAll() {
if slot.IsEmpty() {
continue
}
itemTemp := item.GetItemService().GetItem(int(slot.GetItemId()))
groupId := itemTemp.GetGoldEquipTemplate().SuitGroup
if groupId == 0 {
continue
}
_, ok := curGroupMap[groupId]
if ok {
curGroupMap[groupId] += int32(1)
} else {
curGroupMap[groupId] = int32(1)
}
}
return curGroupMap
}
//装备改变
func (pidm *PlayerGoldEquipDataManager) GetChangedEquipmentSlotAndReset() (itemList []*PlayerGoldEquipSlotObject) {
return pidm.goldEquipBag.GetChangedSlotAndReset()
}
//是否可以卸下
func (m *PlayerGoldEquipDataManager) IfCanTakeOff(pos inventorytypes.BodyPositionType) bool {
item := m.GetGoldEquipByPos(pos)
if item == nil {
return false
}
if item.IsEmpty() {
return false
}
return true
}
//开光
func (m *PlayerGoldEquipDataManager) OpenLight(pos inventorytypes.BodyPositionType, isSuccess bool) bool {
item := m.GetGoldEquipByPos(pos)
if item == nil {
return false
}
if item.IsEmpty() {
return false
}
propertyData := item.propertyData.(*goldequiptypes.GoldEquipPropertyData)
if isSuccess {
propertyData.OpenLightLevel += 1
propertyData.OpenTimes = 0
} else {
propertyData.OpenTimes += 1
}
now := global.GetGame().GetTimeService().Now()
item.updateTime = now
item.SetModified()
return true
}
//获取强化总等级
func (m *PlayerGoldEquipDataManager) CountTotalUpstarLevel() int32 {
slotList := m.goldEquipBag.GetAll()
totalLevel := int32(0)
for _, slot := range slotList {
totalLevel += slot.newStLevel
}
return totalLevel
}
//获取镶嵌宝石总等级
func (m *PlayerGoldEquipDataManager) CountTotalGemLevel() int32 {
slotList := m.goldEquipBag.GetAll()
totalLevel := int32(0)
for _, slot := range slotList {
for _, itemId := range slot.GemInfo {
itemTemp := item.GetItemService().GetItem(int(itemId))
totalLevel += itemTemp.TypeFlag2
}
}
return totalLevel
}
func (m *PlayerGoldEquipDataManager) ToGoldEquipSlotList() (slotInfoList []*goldequiptypes.GoldEquipSlotInfo) {
for _, slot := range m.goldEquipBag.GetAll() {
slotInfo := &goldequiptypes.GoldEquipSlotInfo{}
slotInfo.SlotId = int32(slot.GetSlotId())
slotInfo.Level = slot.GetLevel()
slotInfo.NewStLevel = slot.GetNewStLevel()
slotInfo.ItemId = slot.GetItemId()
slotInfo.GemUnlockInfo = slot.GemUnlockInfo
slotInfo.Gems = slot.GemInfo
slotInfo.CastingSpiritInfo = slot.CastingSpiritInfo
slotInfo.ForgeSoulInfo = slot.ForgeSoulInfo
slotInfoList = append(slotInfoList, slotInfo)
data, ok := slot.GetPropertyData().(*goldequiptypes.GoldEquipPropertyData)
if !ok {
//TODO xzk:临时处理bug
slotInfo.PropertyData = goldequiptypes.NewGoldEquipPropertyData()
slotInfo.PropertyData.InitBase()
} else {
slotInfo.PropertyData = data
}
}
return
}
//
func (m *PlayerGoldEquipDataManager) AddGoldEquipLog(fenJieItemIdList []int32, rewItemStr string) {
now := global.GetGame().GetTimeService().Now()
var obj *PlayerGoldEquipLogObject
if len(m.logList) >= int(maxLogLen) {
obj = m.logList[0]
m.logList = m.logList[1:]
} else {
obj = NewPlayerGoldEquipLogObject(m.p)
id, _ := idutil.GetId()
obj.id = id
obj.createTime = now
}
obj.fenJieItemIdList = fenJieItemIdList
obj.rewItemStr = rewItemStr
obj.updateTime = now
obj.SetModified()
m.logList = append(m.logList, obj)
}
// 获取金装日志列表
func (m *PlayerGoldEquipDataManager) GetLogList() []*PlayerGoldEquipLogObject {
return m.logList
}
//设置自动分解
func (m *PlayerGoldEquipDataManager) SetAutoFenJie(isAuto int32, quality itemtypes.ItemQualityType, zhuanShu int32) {
now := global.GetGame().GetTimeService().Now()
m.equipSettingObj.fenJieIsAuto = isAuto
m.equipSettingObj.fenJieQuality = quality
m.equipSettingObj.fenJieZhuanShu = zhuanShu
m.equipSettingObj.updateTime = now
m.equipSettingObj.SetModified()
// TODO: xzk25 后台日志
}
//设置自动分解
func (m *PlayerGoldEquipDataManager) GetGoldEquipSetting() *PlayerGoldEquipSettingObject | temId))
if itemTemplate == nil {
continue
}
goldequipTemplate := itemTemplate.GetGoldEquipTemplate()
if goldequipTemplate == nil {
continue
}
if !goldequipTemplate.IsGodCastingEquip() {
continue
}
Loop:
for soulType, info := range obj.ForgeSoulInfo {
forgeSoulTemplate := goldequiptemplate.GetGoldEquipTemplateService().GetForgeSoulTemplate(obj.GetSlotId(), soulType)
if forgeSoulTemplate == nil {
continue
}
soulLevelTemplate := forgeSoulTemplate.GetLevelTemplate(info.Level)
if soulLevelTemplate == nil {
continue
}
for _, skillObj := range skillList {
if skillObj.GetSkillId() == forgeSoulTemplate.GetTeshuSkillTemp().SkillId {
skillObj.AddRate(soulLevelTemplate.ChufaRate, soulLevelTemplate.DikangRate)
continue Loop
}
}
skillObj := scene.CreateTeshuSkillObject(forgeSoulTemplate.GetTeshuSkillTemp().SkillId, soulLevelTemplate.ChufaRate, soulLevelTemplate.DikangRate)
skillList = append(skillList, skillObj)
}
}
return skillList
}
//获取特殊技能
func (m *PlayerGoldEquipDataManager) UplevelSoul(bodyPos inventorytypes.BodyPositionType, soulType goldequiptypes.ForgeSoulType, sucess bool) {
m.goldEquipBag.UplevelSoul(bodyPos, soulType, sucess)
}
// 获取元神金装战力
func (m *PlayerGoldEquipDataManager) GetPower() int64 {
return m.goldEquipObject.power
}
// 设置元神金装战力
func (m *PlayerGoldEquipDataManager) SetPower(power int64) {
if power <= 0 {
return
}
if m.goldEquipObject.power == power {
return
}
now := global.GetGame().GetTimeService().Now()
m.goldEquipObject.power = power
m.goldEquipObject.updateTime = now
m.goldEquipObject.SetModified()
}
func CreatePlayerGoldEquipDataManager(p player.Player) player.PlayerDataManager {
m := &PlayerGoldEquipDataManager{}
m.p = p
return m
}
func init() {
player.RegisterPlayerDataManager(types.PlayerGoldEquipDataManagerType, player.PlayerDataManagerFactoryFunc(CreatePlayerGoldEquipDataManager))
}
| {
return m.equipSettingObj
}
//获取特殊技能
func (m *PlayerGoldEquipDataManager) GetTeShuSkillList() (skillList []*scene.TeshuSkillObject) {
for _, obj := range m.goldEquipBag.GetAll() {
if obj.IsEmpty() {
continue
}
itemTemplate := item.GetItemService().GetItem(int(obj.i | identifier_body |
player.go | package player
import (
gameevent "fgame/fgame/game/event"
"fgame/fgame/game/global"
"fgame/fgame/game/goldequip/dao"
goldequipeventtypes "fgame/fgame/game/goldequip/event/types"
goldequiptemplate "fgame/fgame/game/goldequip/template"
goldequiptypes "fgame/fgame/game/goldequip/types"
inventorytypes "fgame/fgame/game/inventory/types"
"fgame/fgame/game/item/item"
itemtypes "fgame/fgame/game/item/types"
"fgame/fgame/game/player"
"fgame/fgame/game/player/types"
"fgame/fgame/game/scene/scene"
"fgame/fgame/pkg/idutil"
log "github.com/Sirupsen/logrus"
)
const (
maxLogLen = 50
)
//玩家元神金装管理器
type PlayerGoldEquipDataManager struct {
p player.Player
//元神金装背包
goldEquipBag *BodyBag
//分解日志
logList []*PlayerGoldEquipLogObject
//金装设置
equipSettingObj *PlayerGoldEquipSettingObject
//元神金装数据
goldEquipObject *PlayerGoldEquipObject
}
func (m *PlayerGoldEquipDataManager) Player() player.Player {
return m.p
}
//加载
func (m *PlayerGoldEquipDataManager) Load() (err error) {
//加载装备数据
err = m.loadGoldEquipSlot()
if err != nil {
return
}
err = m.loadLog()
if err != nil {
return
}
err = m.loadSetting()
if err != nil {
return
}
err = m.loadGoldEquipObject()
if err != nil {
return
}
return nil
}
//加载后
func (m *PlayerGoldEquipDataManager) AfterLoad() (err error) {
return nil
}
//心跳
func (m *PlayerGoldEquipDataManager) Heartbeat() {
}
//加载金装日志
func (m *PlayerGoldEquipDataManager) loadLog() (err error) {
entityList, err := dao.GetGoldEquipDao().GetPlayerGoldEquipLogEntityList(m.p.GetId())
if err != nil {
return
}
for _, entity := range entityList {
logObj := NewPlayerGoldEquipLogObject(m.p)
logObj.FromEntity(entity)
m.logList = append(m.logList, logObj)
}
return
}
//加载金装设置
func (m *PlayerGoldEquipDataManager) loadSetting() (err error) {
entity, err := dao.GetGoldEquipDao().GetPlayerGoldEquipSettingEntity(m.p.GetId())
if err != nil {
return
}
if entity != nil {
obj := NewPlayerGoldEquipSettingObject(m.p)
obj.FromEntity(entity)
m.equipSettingObj = obj
} else {
m.initEquipSeting()
}
return
}
//加载金装设置
func (m *PlayerGoldEquipDataManager) loadGoldEquipObject() (err error) {
entity, err := dao.GetGoldEquipDao().GetPlayerGoldEquipEntity(m.p.GetId())
if err != nil {
return
}
if entity != nil {
obj := NewPlayerGoldEquipObject(m.p)
obj.FromEntity(entity)
m.goldEquipObject = | {
m.initGoldEquipObject()
}
return
}
// 初始化设置
func (m *PlayerGoldEquipDataManager) initEquipSeting() {
obj := NewPlayerGoldEquipSettingObject(m.p)
id, _ := idutil.GetId()
now := global.GetGame().GetTimeService().Now()
obj.id = id
obj.fenJieIsAuto = 0
obj.fenJieQuality = 0
//zrc: 修改过的
//TODO:cjb 默认是检测过的,看完删除注释
obj.isCheckOldSt = int32(0)
obj.createTime = now
obj.SetModified()
m.equipSettingObj = obj
return
}
// 初始化设置
func (m *PlayerGoldEquipDataManager) initGoldEquipObject() {
obj := NewPlayerGoldEquipObject(m.p)
id, _ := idutil.GetId()
now := global.GetGame().GetTimeService().Now()
obj.id = id
obj.power = 0
obj.createTime = now
obj.SetModified()
m.goldEquipObject = obj
return
}
//获取金装背包
func (m *PlayerGoldEquipDataManager) GetGoldEquipBag() *BodyBag {
return m.goldEquipBag
}
//加载身上金装
func (m *PlayerGoldEquipDataManager) loadGoldEquipSlot() (err error) {
//加载金装槽位
goldEquipSlotList, err := dao.GetGoldEquipDao().GetGoldEquipSlotList(m.p.GetId())
if err != nil {
return
}
slotList := make([]*PlayerGoldEquipSlotObject, 0, len(goldEquipSlotList))
for _, slot := range goldEquipSlotList {
pio := NewPlayerGoldEquipSlotObject(m.p)
err := pio.FromEntity(slot)
if err != nil {
return err
}
slotList = append(slotList, pio)
}
m.fixUpstarLevel(slotList)
m.goldEquipBag = createBodyBag(m.p, slotList)
return
}
// 修正升星强化等级
func (m *PlayerGoldEquipDataManager) fixUpstarLevel(itemObjList []*PlayerGoldEquipSlotObject) {
for _, itemObj := range itemObjList {
if itemObj.IsEmpty() {
continue
}
goldequipData, ok := itemObj.propertyData.(*goldequiptypes.GoldEquipPropertyData)
if !ok {
continue
}
itemTemp := item.GetItemService().GetItem(int(itemObj.itemId))
if itemTemp.GetGoldEquipTemplate() == nil {
log.Info("itemid:", itemObj.itemId)
continue
}
maxLeve := itemTemp.GetGoldEquipTemplate().GetMaxUpstarLevel()
goldequipData.FixUpstarLevel(maxLeve)
itemObj.SetModified()
}
}
//获取装备
func (m *PlayerGoldEquipDataManager) GetGoldEquipByPos(pos inventorytypes.BodyPositionType) *PlayerGoldEquipSlotObject {
item := m.goldEquipBag.GetByPosition(pos)
if item == nil {
return nil
}
return item
}
//使用装备
func (m *PlayerGoldEquipDataManager) PutOn(pos inventorytypes.BodyPositionType, itemId int32, level int32, bind itemtypes.ItemBindType, propertyData inventorytypes.ItemPropertyData) (flag bool) {
flag = m.goldEquipBag.PutOn(pos, itemId, level, bind, propertyData)
if flag {
gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipPutOn, m.p, itemId)
}
return
}
//脱下装备
func (m *PlayerGoldEquipDataManager) TakeOff(pos inventorytypes.BodyPositionType) (itemId int32) {
//判断是否可以脱下
flag := m.IfCanTakeOff(pos)
if !flag {
return
}
slot := m.goldEquipBag.GetByPosition(pos)
data, _ := slot.propertyData.(*goldequiptypes.GoldEquipPropertyData)
openlightlevel := data.OpenLightLevel
strengthlevel := slot.newStLevel
upstarlevel := slot.level
itemId = m.goldEquipBag.TakeOff(pos)
if itemId > 0 {
gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipTakeOff, m.p, itemId)
eventData := goldequipeventtypes.CreatePlayerGoldEquipStatusEventData(pos, openlightlevel, strengthlevel, upstarlevel)
gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipStatusWhenTakeOff, m.p, eventData)
}
return
}
//获取套装数量
func (m *PlayerGoldEquipDataManager) GetGoldEquipGroupNum() map[int32]int32 {
curGroupMap := make(map[int32]int32)
for _, slot := range m.goldEquipBag.GetAll() {
if slot.IsEmpty() {
continue
}
itemTemp := item.GetItemService().GetItem(int(slot.GetItemId()))
groupId := itemTemp.GetGoldEquipTemplate().SuitGroup
if groupId == 0 {
continue
}
_, ok := curGroupMap[groupId]
if ok {
curGroupMap[groupId] += int32(1)
} else {
curGroupMap[groupId] = int32(1)
}
}
return curGroupMap
}
//装备改变
func (pidm *PlayerGoldEquipDataManager) GetChangedEquipmentSlotAndReset() (itemList []*PlayerGoldEquipSlotObject) {
return pidm.goldEquipBag.GetChangedSlotAndReset()
}
//是否可以卸下
func (m *PlayerGoldEquipDataManager) IfCanTakeOff(pos inventorytypes.BodyPositionType) bool {
item := m.GetGoldEquipByPos(pos)
if item == nil {
return false
}
if item.IsEmpty() {
return false
}
return true
}
//开光
func (m *PlayerGoldEquipDataManager) OpenLight(pos inventorytypes.BodyPositionType, isSuccess bool) bool {
item := m.GetGoldEquipByPos(pos)
if item == nil {
return false
}
if item.IsEmpty() {
return false
}
propertyData := item.propertyData.(*goldequiptypes.GoldEquipPropertyData)
if isSuccess {
propertyData.OpenLightLevel += 1
propertyData.OpenTimes = 0
} else {
propertyData.OpenTimes += 1
}
now := global.GetGame().GetTimeService().Now()
item.updateTime = now
item.SetModified()
return true
}
//获取强化总等级
func (m *PlayerGoldEquipDataManager) CountTotalUpstarLevel() int32 {
slotList := m.goldEquipBag.GetAll()
totalLevel := int32(0)
for _, slot := range slotList {
totalLevel += slot.newStLevel
}
return totalLevel
}
//获取镶嵌宝石总等级
func (m *PlayerGoldEquipDataManager) CountTotalGemLevel() int32 {
slotList := m.goldEquipBag.GetAll()
totalLevel := int32(0)
for _, slot := range slotList {
for _, itemId := range slot.GemInfo {
itemTemp := item.GetItemService().GetItem(int(itemId))
totalLevel += itemTemp.TypeFlag2
}
}
return totalLevel
}
func (m *PlayerGoldEquipDataManager) ToGoldEquipSlotList() (slotInfoList []*goldequiptypes.GoldEquipSlotInfo) {
for _, slot := range m.goldEquipBag.GetAll() {
slotInfo := &goldequiptypes.GoldEquipSlotInfo{}
slotInfo.SlotId = int32(slot.GetSlotId())
slotInfo.Level = slot.GetLevel()
slotInfo.NewStLevel = slot.GetNewStLevel()
slotInfo.ItemId = slot.GetItemId()
slotInfo.GemUnlockInfo = slot.GemUnlockInfo
slotInfo.Gems = slot.GemInfo
slotInfo.CastingSpiritInfo = slot.CastingSpiritInfo
slotInfo.ForgeSoulInfo = slot.ForgeSoulInfo
slotInfoList = append(slotInfoList, slotInfo)
data, ok := slot.GetPropertyData().(*goldequiptypes.GoldEquipPropertyData)
if !ok {
//TODO xzk:临时处理bug
slotInfo.PropertyData = goldequiptypes.NewGoldEquipPropertyData()
slotInfo.PropertyData.InitBase()
} else {
slotInfo.PropertyData = data
}
}
return
}
//
func (m *PlayerGoldEquipDataManager) AddGoldEquipLog(fenJieItemIdList []int32, rewItemStr string) {
now := global.GetGame().GetTimeService().Now()
var obj *PlayerGoldEquipLogObject
if len(m.logList) >= int(maxLogLen) {
obj = m.logList[0]
m.logList = m.logList[1:]
} else {
obj = NewPlayerGoldEquipLogObject(m.p)
id, _ := idutil.GetId()
obj.id = id
obj.createTime = now
}
obj.fenJieItemIdList = fenJieItemIdList
obj.rewItemStr = rewItemStr
obj.updateTime = now
obj.SetModified()
m.logList = append(m.logList, obj)
}
// 获取金装日志列表
func (m *PlayerGoldEquipDataManager) GetLogList() []*PlayerGoldEquipLogObject {
return m.logList
}
//设置自动分解
func (m *PlayerGoldEquipDataManager) SetAutoFenJie(isAuto int32, quality itemtypes.ItemQualityType, zhuanShu int32) {
now := global.GetGame().GetTimeService().Now()
m.equipSettingObj.fenJieIsAuto = isAuto
m.equipSettingObj.fenJieQuality = quality
m.equipSettingObj.fenJieZhuanShu = zhuanShu
m.equipSettingObj.updateTime = now
m.equipSettingObj.SetModified()
// TODO: xzk25 后台日志
}
//设置自动分解
func (m *PlayerGoldEquipDataManager) GetGoldEquipSetting() *PlayerGoldEquipSettingObject {
return m.equipSettingObj
}
//获取特殊技能
func (m *PlayerGoldEquipDataManager) GetTeShuSkillList() (skillList []*scene.TeshuSkillObject) {
for _, obj := range m.goldEquipBag.GetAll() {
if obj.IsEmpty() {
continue
}
itemTemplate := item.GetItemService().GetItem(int(obj.itemId))
if itemTemplate == nil {
continue
}
goldequipTemplate := itemTemplate.GetGoldEquipTemplate()
if goldequipTemplate == nil {
continue
}
if !goldequipTemplate.IsGodCastingEquip() {
continue
}
Loop:
for soulType, info := range obj.ForgeSoulInfo {
forgeSoulTemplate := goldequiptemplate.GetGoldEquipTemplateService().GetForgeSoulTemplate(obj.GetSlotId(), soulType)
if forgeSoulTemplate == nil {
continue
}
soulLevelTemplate := forgeSoulTemplate.GetLevelTemplate(info.Level)
if soulLevelTemplate == nil {
continue
}
for _, skillObj := range skillList {
if skillObj.GetSkillId() == forgeSoulTemplate.GetTeshuSkillTemp().SkillId {
skillObj.AddRate(soulLevelTemplate.ChufaRate, soulLevelTemplate.DikangRate)
continue Loop
}
}
skillObj := scene.CreateTeshuSkillObject(forgeSoulTemplate.GetTeshuSkillTemp().SkillId, soulLevelTemplate.ChufaRate, soulLevelTemplate.DikangRate)
skillList = append(skillList, skillObj)
}
}
return skillList
}
//获取特殊技能
func (m *PlayerGoldEquipDataManager) UplevelSoul(bodyPos inventorytypes.BodyPositionType, soulType goldequiptypes.ForgeSoulType, sucess bool) {
m.goldEquipBag.UplevelSoul(bodyPos, soulType, sucess)
}
// 获取元神金装战力
func (m *PlayerGoldEquipDataManager) GetPower() int64 {
return m.goldEquipObject.power
}
// 设置元神金装战力
func (m *PlayerGoldEquipDataManager) SetPower(power int64) {
if power <= 0 {
return
}
if m.goldEquipObject.power == power {
return
}
now := global.GetGame().GetTimeService().Now()
m.goldEquipObject.power = power
m.goldEquipObject.updateTime = now
m.goldEquipObject.SetModified()
}
func CreatePlayerGoldEquipDataManager(p player.Player) player.PlayerDataManager {
m := &PlayerGoldEquipDataManager{}
m.p = p
return m
}
func init() {
player.RegisterPlayerDataManager(types.PlayerGoldEquipDataManagerType, player.PlayerDataManagerFactoryFunc(CreatePlayerGoldEquipDataManager))
}
| obj
} else | conditional_block |
player.go | package player
import (
gameevent "fgame/fgame/game/event"
"fgame/fgame/game/global"
"fgame/fgame/game/goldequip/dao"
goldequipeventtypes "fgame/fgame/game/goldequip/event/types"
goldequiptemplate "fgame/fgame/game/goldequip/template"
goldequiptypes "fgame/fgame/game/goldequip/types"
inventorytypes "fgame/fgame/game/inventory/types"
"fgame/fgame/game/item/item"
itemtypes "fgame/fgame/game/item/types"
"fgame/fgame/game/player"
"fgame/fgame/game/player/types"
"fgame/fgame/game/scene/scene"
"fgame/fgame/pkg/idutil"
log "github.com/Sirupsen/logrus"
)
const (
maxLogLen = 50
)
//玩家元神金装管理器
type PlayerGoldEquipDataManager struct {
p player.Player
//元神金装背包
goldEquipBag *BodyBag
//分解日志
logList []*PlayerGoldEquipLogObject
//金装设置
equipSettingObj *PlayerGoldEquipSettingObject
//元神金装数据
goldEquipObject *PlayerGoldEquipObject
}
func (m *PlayerGoldEquipDataManager) Player() player.Player {
return m.p
}
//加载
func (m *PlayerGoldEquipDataManager) Load() (err error) {
//加载装备数据
err = m.loadGoldEquipSlot()
if err != nil {
return
}
err = m.loadLog()
if err != nil {
return
}
err = m.loadSetting()
if err != nil {
return
}
err = m.loadGoldEquipObject()
if err != nil {
return
}
return nil
}
//加载后
func (m *PlayerGoldEquipDataManager) AfterLoad() (err error) {
return nil
}
//心跳
func (m *PlayerGoldEquipDataManager) Heartbeat() {
}
//加载金装日志
func (m *PlayerGoldEquipDataManager) loadLog() (err error) {
entityList, err := dao.GetGoldEquipDao().GetPlayerGoldEquipLogEntityList(m.p.GetId())
if err != nil {
return
}
for _, entity := range entityList {
logObj := NewPlayerGoldEquipLogObject(m.p)
logObj.FromEntity(entity)
m.logList = append(m.logList, logObj)
}
return
}
//加载金装设置
func (m *PlayerGoldEquipDataManager) loadSetting() (err error) {
entity, err := dao.GetGoldEquipDao().GetPlayerGoldEquipSettingEntity(m.p.GetId())
if err != nil {
return
}
if entity != nil {
obj := NewPlayerGoldEquipSettingObject(m.p)
obj.FromEntity(entity)
m.equipSettingObj = obj
} else {
m.initEquipSeting()
}
return
}
//加载金装设置
func (m *PlayerGoldEquipDataManager) loadGoldEquipObject() (err error) {
entity, err := dao.GetGoldEquipDao().GetPlayerGoldEquipEntity(m.p.GetId())
if err != nil {
return
}
if entity != nil {
obj := NewPlayerGoldEquipObject(m.p)
obj.FromEntity(entity)
m.goldEquipObject = obj
} else {
m.initGoldEquipObject()
}
return
}
// 初始化设置
func (m *PlayerGoldEquipDataManager) initEquipSeting() {
obj := NewPlayerGoldEquipSettingObject(m.p)
id, _ := idutil.GetId()
now := global.GetGame().GetTimeService().Now()
obj.id = id
obj.fenJieIsAuto = 0
obj.fenJieQuality = 0
//zrc: 修改过的
//TODO:cjb 默认是检测过的,看完删除注释
obj.isCheckOldSt = int32(0)
obj.createTime = now
obj.SetModified()
m.equipSettingObj = obj
return
}
// 初始化设置
func (m *PlayerGoldEquipDataManager) initGoldEquipObject() {
obj := NewPlayerGoldEquipObject(m.p)
id, _ := idutil.GetId()
now := global.GetGame().GetTimeService().Now()
obj.id = id
obj.power = 0
obj.createTime = now
obj.SetModified()
m.goldEquipObject = obj
return
}
//获取金装背包
func (m *PlayerGoldEquipDataManager) GetGoldEquipBag() *BodyBag {
return m.goldEquipBag
}
//加载身上金装
func (m *PlayerGoldEquipDataManager) loadGoldEquipSlot() (err error) {
//加载金装槽位
goldEquipSlotList, err := dao.GetGoldEquipDao().GetGoldEquipSlotList(m.p.GetId())
if err != nil {
return
}
slotList := make([]*PlayerGoldEquipSlotObject, 0, len(goldEquipSlotList))
for _, slot := range goldEquipSlotList {
pio := NewPlayerGoldEquipSlotObject(m.p)
err := pio.FromEntity(slot)
if err != nil {
return err
}
slotList = append(slotList, pio)
}
m.fixUpstarLevel(slotList)
m.goldEquipBag = createBodyBag(m.p, slotList)
return
}
// 修正升星强化等级
func (m *PlayerGoldEquipDataManager) fixUpstarLevel(itemObjList []*PlayerGoldEquipSlotObject) {
for _, itemObj := range itemObjList {
if itemObj.IsEmpty() {
continue
}
goldequipData, ok := itemObj.propertyData.(*goldequiptypes.GoldEquipPropertyData)
if !ok {
continue
}
itemTemp := item.GetItemService().GetItem(int(itemObj.itemId))
if itemTemp.GetGoldEquipTemplate() == nil {
log.Info("itemid:", itemObj.itemId)
continue
}
maxLeve := itemTemp.GetGoldEquipTemplate().GetMaxUpstarLevel()
goldequipData.FixUpstarLevel(maxLeve) | func (m *PlayerGoldEquipDataManager) GetGoldEquipByPos(pos inventorytypes.BodyPositionType) *PlayerGoldEquipSlotObject {
item := m.goldEquipBag.GetByPosition(pos)
if item == nil {
return nil
}
return item
}
//使用装备
func (m *PlayerGoldEquipDataManager) PutOn(pos inventorytypes.BodyPositionType, itemId int32, level int32, bind itemtypes.ItemBindType, propertyData inventorytypes.ItemPropertyData) (flag bool) {
flag = m.goldEquipBag.PutOn(pos, itemId, level, bind, propertyData)
if flag {
gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipPutOn, m.p, itemId)
}
return
}
//脱下装备
func (m *PlayerGoldEquipDataManager) TakeOff(pos inventorytypes.BodyPositionType) (itemId int32) {
//判断是否可以脱下
flag := m.IfCanTakeOff(pos)
if !flag {
return
}
slot := m.goldEquipBag.GetByPosition(pos)
data, _ := slot.propertyData.(*goldequiptypes.GoldEquipPropertyData)
openlightlevel := data.OpenLightLevel
strengthlevel := slot.newStLevel
upstarlevel := slot.level
itemId = m.goldEquipBag.TakeOff(pos)
if itemId > 0 {
gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipTakeOff, m.p, itemId)
eventData := goldequipeventtypes.CreatePlayerGoldEquipStatusEventData(pos, openlightlevel, strengthlevel, upstarlevel)
gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipStatusWhenTakeOff, m.p, eventData)
}
return
}
//获取套装数量
func (m *PlayerGoldEquipDataManager) GetGoldEquipGroupNum() map[int32]int32 {
curGroupMap := make(map[int32]int32)
for _, slot := range m.goldEquipBag.GetAll() {
if slot.IsEmpty() {
continue
}
itemTemp := item.GetItemService().GetItem(int(slot.GetItemId()))
groupId := itemTemp.GetGoldEquipTemplate().SuitGroup
if groupId == 0 {
continue
}
_, ok := curGroupMap[groupId]
if ok {
curGroupMap[groupId] += int32(1)
} else {
curGroupMap[groupId] = int32(1)
}
}
return curGroupMap
}
//装备改变
func (pidm *PlayerGoldEquipDataManager) GetChangedEquipmentSlotAndReset() (itemList []*PlayerGoldEquipSlotObject) {
return pidm.goldEquipBag.GetChangedSlotAndReset()
}
//是否可以卸下
func (m *PlayerGoldEquipDataManager) IfCanTakeOff(pos inventorytypes.BodyPositionType) bool {
item := m.GetGoldEquipByPos(pos)
if item == nil {
return false
}
if item.IsEmpty() {
return false
}
return true
}
//开光
func (m *PlayerGoldEquipDataManager) OpenLight(pos inventorytypes.BodyPositionType, isSuccess bool) bool {
item := m.GetGoldEquipByPos(pos)
if item == nil {
return false
}
if item.IsEmpty() {
return false
}
propertyData := item.propertyData.(*goldequiptypes.GoldEquipPropertyData)
if isSuccess {
propertyData.OpenLightLevel += 1
propertyData.OpenTimes = 0
} else {
propertyData.OpenTimes += 1
}
now := global.GetGame().GetTimeService().Now()
item.updateTime = now
item.SetModified()
return true
}
//获取强化总等级
func (m *PlayerGoldEquipDataManager) CountTotalUpstarLevel() int32 {
slotList := m.goldEquipBag.GetAll()
totalLevel := int32(0)
for _, slot := range slotList {
totalLevel += slot.newStLevel
}
return totalLevel
}
//获取镶嵌宝石总等级
func (m *PlayerGoldEquipDataManager) CountTotalGemLevel() int32 {
slotList := m.goldEquipBag.GetAll()
totalLevel := int32(0)
for _, slot := range slotList {
for _, itemId := range slot.GemInfo {
itemTemp := item.GetItemService().GetItem(int(itemId))
totalLevel += itemTemp.TypeFlag2
}
}
return totalLevel
}
func (m *PlayerGoldEquipDataManager) ToGoldEquipSlotList() (slotInfoList []*goldequiptypes.GoldEquipSlotInfo) {
for _, slot := range m.goldEquipBag.GetAll() {
slotInfo := &goldequiptypes.GoldEquipSlotInfo{}
slotInfo.SlotId = int32(slot.GetSlotId())
slotInfo.Level = slot.GetLevel()
slotInfo.NewStLevel = slot.GetNewStLevel()
slotInfo.ItemId = slot.GetItemId()
slotInfo.GemUnlockInfo = slot.GemUnlockInfo
slotInfo.Gems = slot.GemInfo
slotInfo.CastingSpiritInfo = slot.CastingSpiritInfo
slotInfo.ForgeSoulInfo = slot.ForgeSoulInfo
slotInfoList = append(slotInfoList, slotInfo)
data, ok := slot.GetPropertyData().(*goldequiptypes.GoldEquipPropertyData)
if !ok {
//TODO xzk:临时处理bug
slotInfo.PropertyData = goldequiptypes.NewGoldEquipPropertyData()
slotInfo.PropertyData.InitBase()
} else {
slotInfo.PropertyData = data
}
}
return
}
//
func (m *PlayerGoldEquipDataManager) AddGoldEquipLog(fenJieItemIdList []int32, rewItemStr string) {
now := global.GetGame().GetTimeService().Now()
var obj *PlayerGoldEquipLogObject
if len(m.logList) >= int(maxLogLen) {
obj = m.logList[0]
m.logList = m.logList[1:]
} else {
obj = NewPlayerGoldEquipLogObject(m.p)
id, _ := idutil.GetId()
obj.id = id
obj.createTime = now
}
obj.fenJieItemIdList = fenJieItemIdList
obj.rewItemStr = rewItemStr
obj.updateTime = now
obj.SetModified()
m.logList = append(m.logList, obj)
}
// 获取金装日志列表
func (m *PlayerGoldEquipDataManager) GetLogList() []*PlayerGoldEquipLogObject {
return m.logList
}
//设置自动分解
func (m *PlayerGoldEquipDataManager) SetAutoFenJie(isAuto int32, quality itemtypes.ItemQualityType, zhuanShu int32) {
now := global.GetGame().GetTimeService().Now()
m.equipSettingObj.fenJieIsAuto = isAuto
m.equipSettingObj.fenJieQuality = quality
m.equipSettingObj.fenJieZhuanShu = zhuanShu
m.equipSettingObj.updateTime = now
m.equipSettingObj.SetModified()
// TODO: xzk25 后台日志
}
//设置自动分解
func (m *PlayerGoldEquipDataManager) GetGoldEquipSetting() *PlayerGoldEquipSettingObject {
return m.equipSettingObj
}
//获取特殊技能
func (m *PlayerGoldEquipDataManager) GetTeShuSkillList() (skillList []*scene.TeshuSkillObject) {
for _, obj := range m.goldEquipBag.GetAll() {
if obj.IsEmpty() {
continue
}
itemTemplate := item.GetItemService().GetItem(int(obj.itemId))
if itemTemplate == nil {
continue
}
goldequipTemplate := itemTemplate.GetGoldEquipTemplate()
if goldequipTemplate == nil {
continue
}
if !goldequipTemplate.IsGodCastingEquip() {
continue
}
Loop:
for soulType, info := range obj.ForgeSoulInfo {
forgeSoulTemplate := goldequiptemplate.GetGoldEquipTemplateService().GetForgeSoulTemplate(obj.GetSlotId(), soulType)
if forgeSoulTemplate == nil {
continue
}
soulLevelTemplate := forgeSoulTemplate.GetLevelTemplate(info.Level)
if soulLevelTemplate == nil {
continue
}
for _, skillObj := range skillList {
if skillObj.GetSkillId() == forgeSoulTemplate.GetTeshuSkillTemp().SkillId {
skillObj.AddRate(soulLevelTemplate.ChufaRate, soulLevelTemplate.DikangRate)
continue Loop
}
}
skillObj := scene.CreateTeshuSkillObject(forgeSoulTemplate.GetTeshuSkillTemp().SkillId, soulLevelTemplate.ChufaRate, soulLevelTemplate.DikangRate)
skillList = append(skillList, skillObj)
}
}
return skillList
}
//获取特殊技能
func (m *PlayerGoldEquipDataManager) UplevelSoul(bodyPos inventorytypes.BodyPositionType, soulType goldequiptypes.ForgeSoulType, sucess bool) {
m.goldEquipBag.UplevelSoul(bodyPos, soulType, sucess)
}
// 获取元神金装战力
func (m *PlayerGoldEquipDataManager) GetPower() int64 {
return m.goldEquipObject.power
}
// 设置元神金装战力
func (m *PlayerGoldEquipDataManager) SetPower(power int64) {
if power <= 0 {
return
}
if m.goldEquipObject.power == power {
return
}
now := global.GetGame().GetTimeService().Now()
m.goldEquipObject.power = power
m.goldEquipObject.updateTime = now
m.goldEquipObject.SetModified()
}
func CreatePlayerGoldEquipDataManager(p player.Player) player.PlayerDataManager {
m := &PlayerGoldEquipDataManager{}
m.p = p
return m
}
func init() {
player.RegisterPlayerDataManager(types.PlayerGoldEquipDataManagerType, player.PlayerDataManagerFactoryFunc(CreatePlayerGoldEquipDataManager))
} | itemObj.SetModified()
}
}
//获取装备 | random_line_split |
player.go | package player
import (
gameevent "fgame/fgame/game/event"
"fgame/fgame/game/global"
"fgame/fgame/game/goldequip/dao"
goldequipeventtypes "fgame/fgame/game/goldequip/event/types"
goldequiptemplate "fgame/fgame/game/goldequip/template"
goldequiptypes "fgame/fgame/game/goldequip/types"
inventorytypes "fgame/fgame/game/inventory/types"
"fgame/fgame/game/item/item"
itemtypes "fgame/fgame/game/item/types"
"fgame/fgame/game/player"
"fgame/fgame/game/player/types"
"fgame/fgame/game/scene/scene"
"fgame/fgame/pkg/idutil"
log "github.com/Sirupsen/logrus"
)
const (
maxLogLen = 50
)
//玩家元神金装管理器
type PlayerGoldEquipDataManager struct {
p player.Player
//元神金装背包
goldEquipBag *BodyBag
//分解日志
logList []*PlayerGoldEquipLogObject
//金装设置
equipSettingObj *PlayerGoldEquipSettingObject
//元神金装数据
goldEquipObject *PlayerGoldEquipObject
}
func (m *PlayerGoldEquipDataManager) Player() player.Player {
return m.p
}
//加载
func (m *PlayerGoldEquipDataManager) Load() (err error) {
//加载装备数据
err = m.loadGoldEquipSlot()
if err != nil {
return
}
err = m.loadLog()
if err != nil {
return
}
err = m.loadSetting()
if err != nil {
return
}
err = m.loadGoldEquipObject()
if err != nil {
return
}
return nil
}
//加载后
func (m *PlayerGoldEquipDataManager) AfterLoad() (err error) {
return nil
}
//心跳
func (m *PlayerGoldEquipDataManager) Heartbeat() {
}
//加载金装日志
func (m *PlayerGoldEquipDataManager) loadLog() (err error) {
entityList, err := dao.GetGoldEquipDao().GetPlayerGoldEquipLogEntityList(m.p.GetId())
if err != nil {
return
}
for _, entity := range entityList {
logObj := NewPlayerGoldEquipLogObject(m.p)
logObj.FromEntity(entity)
m.logList = append(m.logList, logObj)
}
return
}
//加载金装设置
func (m *PlayerGoldEquipDataManager) loadSetting() (err error) {
entity, err := dao.GetGoldEquipDao().GetPlayerGoldEquipSettingEntity(m.p.GetId())
if err != nil {
return
}
if entity != nil {
obj := NewPlayerGoldEquipSettingObject(m.p)
obj.FromEntity(entity)
m.equipSettingObj = obj
} else {
m.initEquipSeting()
}
return
}
//加载金装设置
func (m *PlayerGoldEquipDataManager) loadGoldEquipObject() (err error) {
entity, err := dao.GetGoldEquipDao().GetPlayerGoldEquipEntity(m.p.GetId())
if err != nil {
return
}
if entity != nil {
obj := NewPlayerGoldEquipObject(m.p)
obj.FromEntity(entity)
m.goldEquipObject = obj
} else {
m.initGoldEquipObject()
}
return
}
// 初始化设置
func (m *PlayerGoldEquipDataManager) initEquipSeting() {
obj := NewPlayerGoldEquipSettingObject(m.p)
id, _ := idutil.GetId()
now := global.GetGame().GetTimeService().Now()
obj.id = id
obj.fenJieIsAuto = 0
obj.fenJieQuality = 0
//zrc: 修改过的
//TODO:cjb 默认是检测过的,看完删除注释
obj.isCheckOldSt = int32(0)
obj.createTime = now
obj.SetModified()
m.equipSettingObj = obj
return
}
// 初始化设置
func (m *PlayerGoldEquipDataManager) initGoldEquipObject() {
obj := NewPlayerGoldEquipObject(m.p)
id, _ := idutil.GetId()
now := global.GetGame().GetTimeService().Now()
obj.id = id
obj.power = 0
obj.createTime = now
obj.SetModified()
m.goldEquipObject = obj
return
}
//获取金装背包
func (m *PlayerGoldEquipDataManager) GetGoldEquipBag() *BodyBag {
return m.goldEquipBag
}
//加载身上金装
func (m *PlayerGoldEquipDataManager) loadGoldEquipSlot() (err error) {
//加载金装槽位
goldEquipSlotList, err := dao.GetGoldEquipDao().GetGoldEquipSlotList(m.p.GetId())
if err != nil {
return
}
slotList := make([]*PlayerGoldEquipSlotObject, 0, len(goldEquipSlotList))
for _, slot := range goldEquipSlotList {
pio := NewPlayerGoldEquipSlotObject(m.p)
err := pio.FromEntity(slot)
if err != nil {
return err
}
slotList = append(slotList, pio)
}
m.fixUpstarLevel(slotList)
m.goldEquipBag = createBodyBag(m.p, slotList)
return
}
// 修正升星强化等级
func (m *PlayerGoldEquipDataManager) fixUpstarLevel(itemObjList []*PlayerGoldEquipSlotObject) {
for _, itemObj := range itemObjList {
if itemObj.IsEmpty() {
continue
}
goldequipData, ok := itemObj.propertyData.(*goldequiptypes.GoldEquipPropertyData)
if !ok {
continue
}
itemTemp := item.GetItemService().GetItem(int(itemObj.itemId))
if itemTemp.GetGoldEquipTemplate() == nil {
log.Info("itemid:", itemObj.itemId)
continue
}
maxLeve := itemTemp.GetGoldEquipTemplate().GetMaxUpstarLevel()
goldequipData.FixUpstarLevel(maxLeve)
itemObj.SetModified()
}
}
//获取装备
func (m *PlayerGoldEquipDataManager) GetGoldEquipByPos(pos inventorytypes.BodyPositionType) *PlayerGoldEquipSlotObject {
item := m.goldEquipBag.GetByPosition(pos)
if item == nil {
return nil
}
return item
}
//使用装备
func (m *PlayerGoldEquipDataManager) PutOn(pos inventorytypes.BodyPositionType, itemId int32, level int32, bind itemtypes.ItemBindType, propertyData inventorytypes.ItemPropertyData) (flag bool) {
flag = m.goldEquipBag.PutOn(pos, itemId, level, bind, propertyData)
if flag {
gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipPutOn, m.p, itemId)
}
return
}
//脱下装备
func (m *PlayerGoldEquipDataManager) TakeOff(pos inventorytypes.BodyPositionType) (itemId int32) {
//判断是否可以脱下
flag := m.IfCanTakeOff(pos)
if !flag {
return
}
slot := m.goldEquipBag.GetByPosition(pos)
data, _ := slot.propertyData.(*goldequiptypes.GoldEquipPropertyData)
openlightlevel := data.OpenLightLevel
strengthlevel := slot.newStLevel
upstarlevel := slot.level
itemId = m.goldEquipBag.TakeOff(pos)
if itemId > 0 {
gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipTakeOff, m.p, itemId)
eventData := goldequipeventtypes.CreatePlayerGoldEquipStatusEventData(pos, openlightlevel, strengthlevel, upstarlevel)
gameevent.Emit(goldequipeventtypes.EventTypeGoldEquipStatusWhenTakeOff, m.p, eventData)
}
return
}
//获取套装数量
func (m *PlayerGoldEquipDataManager) GetGoldEquipGroupNum() map[int32]int32 {
curGroupMap := make(map[int32]int32)
for _, slot := range m.goldEquipBag.GetAll() {
if slot.IsEmpty() {
continue
}
itemTemp := item.GetItemService().GetItem(int(slot.GetItemId()))
groupId := itemTemp.GetGoldEquipTemplate().SuitGroup
if groupId == 0 {
continue
}
_, ok := curGroupMap[groupId]
if ok {
curGroupMap[groupId] += int32(1)
} else {
curGroupMap[groupId] = int32(1)
}
}
return curGroupMap
}
//装备改变
func (pidm *PlayerGoldEquipDataManager) GetChangedEquipmentSlotAndReset() (itemList []*PlayerGoldEquipSlotObject) {
return pidm.goldEquipBag.GetChangedSlotAndReset()
}
//是否可以卸下
func (m *PlayerGoldEquipDataManager) IfCanTakeOff(pos inventorytypes.BodyPositionType) bool {
item := m.GetGoldEquipByPos(pos)
if item == nil {
return false
}
if item.IsEmpty() {
return false
}
return true
}
//开光
func (m *PlayerGoldEquipDataManager) OpenLight(pos inventorytypes.BodyPositionType, isSuccess bool) bool {
item := m.GetGoldEquipByPos(pos)
if item == nil {
return false
}
if item.IsEmpty() {
return false
}
propertyData := item.propertyData.(*goldequiptypes.GoldEquipPropertyData)
if isSuccess {
propertyData.OpenLightLevel += 1
propertyData.OpenTimes = 0
} else {
propertyData.OpenTimes += 1
}
now := global.GetGame().GetTimeService().Now()
item.updateTime = now
item.SetModified()
return true
}
//获取强化总等级
func (m *PlayerGoldEquipDataManager) CountTotalUpstarLevel() int32 {
slotList := m.goldEquipBag.GetAll()
totalLevel := int32(0)
for _, slot := range slotList {
totalLevel += slot.newStLevel
}
return totalLevel
}
//获取镶嵌宝石总等级
func (m *PlayerGoldEquipDataManager) CountTotalGemLevel() int32 {
slotList := m.goldEquipBag.GetAll()
totalLevel := int32(0)
for _, slot := range slotList {
for _, itemId := range slot.GemInfo {
itemTemp := item.GetItemService().GetItem(int(itemId))
totalLevel += itemTemp.TypeFlag2
}
}
return totalLevel
}
func (m *PlayerGoldEquipDataManager) ToGoldEquipSlotList() (slotInfoList []*goldequiptypes.GoldEquipSlotInfo) {
for _, slot := range m.goldEquipBag.GetAll() {
slotInfo := &goldequiptypes.GoldEquipSlotInfo{}
slotInfo.SlotId = int32(slot.GetSlotId())
slotInfo.Level = slot.GetLevel()
slotInfo.NewStLevel = slot.GetNewStLevel()
slotInfo.ItemId = slot.GetItemId()
slotInfo.GemUnlockInfo = slot.GemUnlockInfo
slotInfo.Gems = slot.GemInfo
slotInfo.CastingSpiritInfo = slot.CastingSpiritInfo
slotInfo.ForgeSoulInfo = slot.ForgeSoulInfo
slotInfoList = append(slotInfoList, slotInfo)
data, ok := slot.GetPropertyData().(*goldequiptypes.GoldEquipPropertyData)
if !ok {
//TODO xzk:临时处理bug
slotInfo.PropertyData = goldequiptypes.NewGoldEquipPropertyData()
slotInfo.PropertyData.InitBase()
} else {
slotInfo.PropertyData = data
}
}
return
}
//
func (m *PlayerGoldEquipDataManager) AddGoldEquipLog(fenJieItemIdList []int32, rewItemStr string) {
now := global.GetGame().GetTimeService().Now()
var obj *PlayerGoldEquipLogObject
if len(m.logList) >= int(maxLogLen) {
obj = m.logList[0]
m.logList = m.logList[1:]
} else {
obj = NewPlayerGoldEquipLogObject(m.p)
id, _ := idutil.GetId()
obj.id = id
obj.createTime = now
}
obj.fenJieItemIdList = fenJieItemIdList
obj.rewItemStr = rewItemStr
obj.updateTime = now
obj.SetModified()
m.logList = append(m.logList, obj)
}
// 获取金装日志列表
func (m *PlayerGoldEquipDataManager) GetLogList() []*PlayerGoldEquipLogObject {
return m.logList
}
//设置自动分解
func (m *PlayerGoldEquipDataManager) SetAutoFenJie(isAuto int32, quality itemtypes.ItemQualityType, zhuanShu int32) {
now := global.GetGame().GetTimeService().Now()
m.equipSettingObj.fenJieIsAuto = isAuto
m.equipSettingObj.fenJieQuality = quality
m.equipSettingObj.fenJieZhuanShu = zhuanShu
m.equipSettingObj.updateTime = now
m.equipSettingObj.SetModified()
// TODO: xzk25 后台日志
}
//设置自动分解
func (m *PlayerGoldEquipDataManager) GetGoldEquipSetting() *PlayerGoldEquipSettingObject {
return m.equipSettingObj
}
//获取特殊技能
func (m *PlayerGoldEquipDataManager) GetTeShuSkillList() (skillList []*scene.TeshuSkillObject) {
for _, obj := range m.goldEquipBag.GetAll() {
if obj.IsEmpty() {
continue
}
itemTemplate := item.GetItemService().GetItem(int(obj.itemId))
if itemTemplate == nil {
continue
}
goldequipTemplate := itemTemplate.GetGoldEquipTemplate()
if goldequipTemplate == nil {
continue
}
if !goldequipTemplate.IsGodCastingEquip() | }
Loop:
for soulType, info := range obj.ForgeSoulInfo {
forgeSoulTemplate := goldequiptemplate.GetGoldEquipTemplateService().GetForgeSoulTemplate(obj.GetSlotId(), soulType)
if forgeSoulTemplate == nil {
continue
}
soulLevelTemplate := forgeSoulTemplate.GetLevelTemplate(info.Level)
if soulLevelTemplate == nil {
continue
}
for _, skillObj := range skillList {
if skillObj.GetSkillId() == forgeSoulTemplate.GetTeshuSkillTemp().SkillId {
skillObj.AddRate(soulLevelTemplate.ChufaRate, soulLevelTemplate.DikangRate)
continue Loop
}
}
skillObj := scene.CreateTeshuSkillObject(forgeSoulTemplate.GetTeshuSkillTemp().SkillId, soulLevelTemplate.ChufaRate, soulLevelTemplate.DikangRate)
skillList = append(skillList, skillObj)
}
}
return skillList
}
//获取特殊技能
func (m *PlayerGoldEquipDataManager) UplevelSoul(bodyPos inventorytypes.BodyPositionType, soulType goldequiptypes.ForgeSoulType, sucess bool) {
m.goldEquipBag.UplevelSoul(bodyPos, soulType, sucess)
}
// 获取元神金装战力
func (m *PlayerGoldEquipDataManager) GetPower() int64 {
return m.goldEquipObject.power
}
// 设置元神金装战力
func (m *PlayerGoldEquipDataManager) SetPower(power int64) {
if power <= 0 {
return
}
if m.goldEquipObject.power == power {
return
}
now := global.GetGame().GetTimeService().Now()
m.goldEquipObject.power = power
m.goldEquipObject.updateTime = now
m.goldEquipObject.SetModified()
}
func CreatePlayerGoldEquipDataManager(p player.Player) player.PlayerDataManager {
m := &PlayerGoldEquipDataManager{}
m.p = p
return m
}
func init() {
player.RegisterPlayerDataManager(types.PlayerGoldEquipDataManagerType, player.PlayerDataManagerFactoryFunc(CreatePlayerGoldEquipDataManager))
}
| {
continue
| identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.