file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
connection.go
package vertigo // Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. import ( "context" "crypto/md5" "crypto/sha512" "crypto/tls" "database/sql/driver" "encoding/binary" "fmt" "math/rand" "net" "net/url" "os" "strings" "sync" "time" "github.com/vertica/vertica-sql-go/common" "github.com/vertica/vertica-sql-go/logger" "github.com/vertica/vertica-sql-go/msgs" ) var ( connectionLogger = logger.New("connection") ) const ( tlsModeServer = "server" tlsModeServerStrict = "server-strict" tlsModeNone = "none" ) type _tlsConfigs struct { m map[string]*tls.Config sync.RWMutex } func (t *_tlsConfigs) add(name string, config *tls.Config) error { t.Lock() defer t.Unlock() t.m[name] = config return nil } func (t *_tlsConfigs) get(name string) (*tls.Config, bool) { t.RLock() defer t.RUnlock() conf, ok := t.m[name] return conf, ok } var tlsConfigs = &_tlsConfigs{m: make(map[string]*tls.Config)} // db, err := sql.Open("vertica", "user@tcp(localhost:3306)/test?tlsmode=custom") // reserved modes: 'server', 'server-strict' or 'none' func RegisterTLSConfig(name string, config *tls.Config) error { if name == tlsModeServer || name == tlsModeServerStrict || name == tlsModeNone { return fmt.Errorf("config name '%s' is reserved therefore cannot be used", name) } return tlsConfigs.add(name, config) } // Connection represents a connection to Vertica type connection struct { driver.Conn conn net.Conn connURL *url.URL parameters map[string]string clientPID int backendPID uint32 cancelKey uint32 transactionState byte usePreparedStmts bool connHostsList []string scratch [512]byte sessionID string autocommit string oauthaccesstoken string serverTZOffset string dead bool // used if a ROLLBACK severity error is encountered sessMutex sync.Mutex } // Begin - Begin starts and returns a new transaction. (DEPRECATED) // From interface: sql.driver.Conn func (v *connection) Begin() (driver.Tx, error) { return nil, nil } // BeginTx - Begin starts and returns a new transaction. // From interface: sql.driver.ConnBeginTx func (v *connection) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { connectionLogger.Trace("connection.BeginTx()") return newTransaction(ctx, v, opts) } // Close closes a connection to the Vertica DB. After calling close, you shouldn't use this connection anymore. // From interface: sql.driver.Conn func (v *connection) Close() error { connectionLogger.Trace("connection.Close()") v.sendMessage(&msgs.FETerminateMsg{}) var result error = nil if v.conn != nil { result = v.conn.Close() v.conn = nil } return result } // PrepareContext returns a prepared statement, bound to this connection. // context is for the preparation of the statement, // it must not store the context within the statement itself. // From interface: sql.driver.ConnPrepareContext func (v *connection) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { s, err := newStmt(v, query) if err != nil { return nil, err } if v.usePreparedStmts { if err = s.prepareAndDescribe(); err != nil { return nil, err } } return s, nil } // Prepare returns a prepared statement, bound to this connection. // From interface: sql.driver.Conn func (v *connection) Prepare(query string) (driver.Stmt, error) { return v.PrepareContext(context.Background(), query) } // Ping implements the Pinger interface for connection. Use this to check for a valid connection state. // This has to prepare AND execute the query in case prepared statements are disabled. func (v *connection) Ping(ctx context.Context) error { stmt, err := v.PrepareContext(ctx, "select 1 as test") if err != nil { return driver.ErrBadConn } defer stmt.Close() // If we are preparing statements server side, successfully preparing verifies the connection if v.usePreparedStmts { return nil } queryContext := stmt.(driver.StmtQueryContext) rows, err := queryContext.QueryContext(ctx, nil) if err != nil { return driver.ErrBadConn } var val interface{} if err := rows.Next([]driver.Value{val}); err != nil { return driver.ErrBadConn } rows.Close() return nil } // ResetSession implements the SessionResetter interface for connection. This allows the sql // package to evaluate the connection state when managing the connection pool. func (v *connection) ResetSession(ctx context.Context) error { if v.dead { return driver.ErrBadConn } return v.Ping(ctx) } // newConnection constructs a new Vertica Connection object based on the connection string. func newConnection(connString string) (*connection, error) { result := &connection{parameters: make(map[string]string), usePreparedStmts: true} var err error result.connURL, err = url.Parse(connString) if err != nil { return nil, err } result.clientPID = os.Getpid() if client_label := result.connURL.Query().Get("client_label"); client_label != "" { result.sessionID = client_label } else { result.sessionID = fmt.Sprintf("%s-%s-%d-%d", driverName, driverVersion, result.clientPID, time.Now().Unix()) } // Read the interpolate flag. if iFlag := result.connURL.Query().Get("use_prepared_statements"); iFlag != "" { result.usePreparedStmts = iFlag == "1" } // Read Autocommit flag. if iFlag := result.connURL.Query().Get("autocommit"); iFlag == "" || iFlag == "1" { result.autocommit = "on" } else { result.autocommit = "off" } // Read OAuth access token flag. result.oauthaccesstoken = result.connURL.Query().Get("oauth_access_token") // Read connection load balance flag. loadBalanceFlag := result.connURL.Query().Get("connection_load_balance") // Read connection failover flag. backupHostsStr := result.connURL.Query().Get("backup_server_node") if backupHostsStr == "" { result.connHostsList = []string{result.connURL.Host} } else { // Parse comma-separated list of backup host-port pairs hosts := strings.Split(backupHostsStr, ",") // Push target host to front of the hosts list result.connHostsList = append([]string{result.connURL.Host}, hosts...) } // Read SSL/TLS flag. sslFlag := strings.ToLower(result.connURL.Query().Get("tlsmode")) if sslFlag == "" { sslFlag = tlsModeNone } result.conn, err = result.establishSocketConnection() if err != nil { return nil, err } // Load Balancing if loadBalanceFlag == "1" { if err = result.balanceLoad(); err != nil { return nil, err } } if sslFlag != tlsModeNone { if err = result.initializeSSL(sslFlag); err != nil { return nil, err } } if err = result.handshake(); err != nil { return nil, err } if err = result.initializeSession(); err != nil { return nil, err } return result, nil } func (v *connection) establishSocketConnection() (net.Conn, error) { // Failover: loop to try all hosts in the list err_msg := "" for i := 0; i < len(v.connHostsList); i++ { host, port, err := net.SplitHostPort(v.connHostsList[i]) if err != nil { // no host-port pair identified err_msg += fmt.Sprintf("\n '%s': %s", v.connHostsList[i], err.Error()) continue } ips, err := net.LookupIP(host) if err != nil { // failed to resolve any IPs from host err_msg += fmt.Sprintf("\n '%s': %s", host, err.Error()) continue } r := rand.New(rand.NewSource(time.Now().Unix())) for _, j := range r.Perm(len(ips)) { // j comes from random permutation of indexes - ips[j] will access a random resolved ip addrString := net.JoinHostPort(ips[j].String(), port) // IPv6 returns "[host]:port" conn, err := net.Dial("tcp", addrString) if err != nil { err_msg += fmt.Sprintf("\n '%s': %s", v.connHostsList[i], err.Error()) } else { if len(err_msg) != 0 { connectionLogger.Debug("Failed to establish a connection to %s", err_msg) } connectionLogger.Debug("Established socket connection to %s", addrString) v.connHostsList = v.connHostsList[i:] return conn, err } } } // All of the hosts failed return nil, fmt.Errorf("Failed to establish a connection to the primary server or any backup host.%s", err_msg) } func (v *connection) recvMessage() (msgs.BackEndMsg, error) { msgHeader := v.scratch[:5] var err error if err = v.readAll(msgHeader); err != nil { return nil, err } msgSize := int(binary.BigEndian.Uint32(msgHeader[1:]) - 4) msgBytes := v.scratch[5:] var y []byte if msgSize > 0 { if msgSize <= len(msgBytes) { y = msgBytes[:msgSize] } else { y = make([]byte, msgSize) } if err = v.readAll(y); err != nil { return nil, err } } bem, err := msgs.CreateBackEndMsg(msgHeader[0], y) if err != nil { return nil, err } // Print the message to stdout (for debugging purposes) if _, drm := bem.(*msgs.BEDataRowMsg); !drm { connectionLogger.Debug("<- " + bem.String()) } else { connectionLogger.Trace("<- " + bem.String()) } return bem, nil } func (v *connection) sendMessage(msg msgs.FrontEndMsg) error { return v.sendMessageTo(msg, v.conn) } func (v *connection) sendMessageTo(msg msgs.FrontEndMsg, conn net.Conn) error { var result error = nil msgBytes, msgTag := msg.Flatten() if msgTag != 0 { _, result = conn.Write([]byte{msgTag}) } if result == nil { sizeBytes := v.scratch[:4] binary.BigEndian.PutUint32(sizeBytes, uint32(len(msgBytes)+4)) _, result = conn.Write(sizeBytes) if result == nil && len(msgBytes) > 0 { size := 8192 // Max msg size, consistent with how the server works pos := 0 var sent int for pos < len(msgBytes) { sent, result = conn.Write(msgBytes[pos:min(pos+size, len(msgBytes))]) if result != nil { break } pos += sent } } } if result != nil { connectionLogger.Error("-> FAILED SENDING "+msg.String()+": %v", result.Error()) } else { connectionLogger.Debug("-> " + msg.String()) } return result } func min(a, b int) int { if a < b { return a } return b } func (v *connection) handshake() error { if v.connURL.User == nil && len(v.oauthaccesstoken) == 0 { return fmt.Errorf("connection string must include a user name or oauth_access_token") } userName := v.connURL.User.Username() if len(userName) == 0 && len(v.oauthaccesstoken) == 0 { return fmt.Errorf("connection string must have a non-empty user name or oauth_access_token") } dbName := "" if len(v.connURL.Path) > 1 { dbName = v.connURL.Path[1:] } msg := &msgs.FEStartupMsg{ ProtocolVersion: protocolVersion, DriverName: driverName, DriverVersion: driverVersion, Username: userName, Database: dbName, SessionID: v.sessionID, ClientPID: v.clientPID, Autocommit: v.autocommit, OAuthAccessToken: v.oauthaccesstoken, } if err := v.sendMessage(msg); err != nil { return err } for { bMsg, err := v.recvMessage() if err != nil { return err } switch msg := bMsg.(type) { case *msgs.BEErrorMsg: return errorMsgToVError(msg) case *msgs.BEReadyForQueryMsg: v.transactionState = msg.TransactionState return nil case *msgs.BEParamStatusMsg: v.parameters[msg.ParamName] = msg.ParamValue case *msgs.BEKeyDataMsg: v.backendPID = msg.BackendPID v.cancelKey = msg.CancelKey default: _, err = v.defaultMessageHandler(msg) if err != nil { return err } } } } // We have to be tricky here since we're inside of a connection, but trying to use interfaces of the // driver class. func (v *connection) initializeSession() error { stmt, err := newStmt(v, "select now()::timestamptz") if err != nil { return err } result, err := stmt.QueryContextRaw(context.Background(), []driver.NamedValue{}) if err != nil { return err } firstRow := result.resultData.Peek() if len(result.Columns()) != 1 && result.Columns()[1] != "now" || firstRow == nil { return fmt.Errorf("unable to initialize session; functionality may be unreliable") } // Peek into the results manually. colData := firstRow.Columns() str := string(colData.Chunk()) if len(str) < 23 { return fmt.Errorf("can't get server timezone: %s", str) } v.serverTZOffset = str[len(str)-3:] connectionLogger.Debug("Setting server timezone offset to %s", str[len(str)-3:]) return nil } func (v *connection) defaultMessageHandler(bMsg msgs.BackEndMsg) (bool, error) { handled := true var err error = nil switch msg := bMsg.(type) { case *msgs.BEAuthenticationMsg: switch msg.Response { case common.AuthenticationOK: break case common.AuthenticationCleartextPassword: err = v.authSendPlainTextPassword() case common.AuthenticationMD5Password: err = v.authSendMD5Password(msg.ExtraAuthData) case common.AuthenticationSHA512Password: err = v.authSendSHA512Password(msg.ExtraAuthData) case common.AuthenticationOAuth: err = v.authSendOAuthAccessToken() default: handled = false err = fmt.Errorf("unsupported authentication scheme: %d", msg.Response) } case *msgs.BENoticeMsg: break case *msgs.BEParamStatusMsg: connectionLogger.Debug("%v", msg) default: handled = false err = fmt.Errorf("unhandled message: %v", msg) connectionLogger.Warn("%v", err) } return handled, err } func (v *connection) readAll(buf []byte) error { readIndex := 0 for { bytesRead, err := v.conn.Read(buf[readIndex:]) if err != nil { return err } readIndex += bytesRead if readIndex == len(buf) { return nil } } } func (v *connection) balanceLoad() error { v.sendMessage(&msgs.FELoadBalanceMsg{}) response := v.scratch[:1] var err error if err = v.readAll(response); err != nil { return err } if response[0] == 'N' { // keep existing connection connectionLogger.Debug("<- LoadBalanceResponse: N") connectionLogger.Warn("Load balancing requested but not supported by server") return nil } if response[0] != 'Y' { connectionLogger.Debug("<- LoadBalanceResponse: %c", response[0]) return fmt.Errorf("Load balancing request gave unknown response: %c", response[0]) } header := v.scratch[1:5] if err = v.readAll(header); err != nil { return err } msgSize := int(binary.BigEndian.Uint32(header) - 4) msgBytes := v.scratch[5:] var y []byte if msgSize > 0 { if msgSize <= len(msgBytes) { y = msgBytes[:msgSize] } else { y = make([]byte, msgSize) } if err = v.readAll(y); err != nil { return err } } bem, err := msgs.CreateBackEndMsg(response[0], y) if err != nil { return err } connectionLogger.Debug("<- " + bem.String()) msg := bem.(*msgs.BELoadBalanceMsg) // v.connURL.Hostname() is used by initializeSSL(), so load balancing info should not write into v.connURL loadBalanceAddr := fmt.Sprintf("%s:%d", msg.Host, msg.Port) if v.connHostsList[0] == loadBalanceAddr { // Already connecting to the host return nil } // Push the new host onto the host list before connecting again. // Note that this leaves the originally-specified host as the first failover possibility v.connHostsList = append([]string{loadBalanceAddr}, v.connHostsList...) // Connect to new host v.conn.Close() v.conn, err = v.establishSocketConnection() if err != nil { return fmt.Errorf("cannot redirect to %s (%s)", loadBalanceAddr, err.Error()) } return nil } func (v *connection) initializeSSL(sslFlag string) error { v.sendMessage(&msgs.FESSLMsg{}) buf := v.scratch[:1] err := v.readAll(buf) if err != nil { return err } if buf[0] == 'N' { return fmt.Errorf("SSL/TLS is not enabled on this server") } if buf[0] != 'S' { return fmt.Errorf("SSL/TLS probe gave unknown response: %c", buf[0]) } switch sslFlag { case tlsModeServer: connectionLogger.Info("enabling SSL/TLS server mode") v.conn = tls.Client(v.conn, &tls.Config{InsecureSkipVerify: true}) case tlsModeServerStrict: connectionLogger.Info("enabling SSL/TLS server strict mode") v.conn = tls.Client(v.conn, &tls.Config{ServerName: v.connURL.Hostname()}) default: // Custom mode is used for mutual ssl mode connectionLogger.Info("enabling SSL/TLS custom mode") config, ok := tlsConfigs.get(sslFlag) if !ok { err := fmt.Errorf("tls config %s not registered. See 'Using custom TLS config' in the README.md file", sslFlag) connectionLogger.Error(err.Error()) return err } v.conn = tls.Client(v.conn, config) return nil } return nil } func (v *connection) authSendPlainTextPassword() error { passwd, isSet := v.connURL.User.Password() if !isSet { passwd = "" } msg := &msgs.FEPasswordMsg{PasswordData: passwd} return v.sendMessage(msg) } func (v *connection)
(extraAuthData []byte) error { passwd, isSet := v.connURL.User.Password() if !isSet { passwd = "" } hash1 := fmt.Sprintf("%x", md5.Sum([]byte(passwd+v.connURL.User.Username()))) hash2 := fmt.Sprintf("md5%x", md5.Sum(append([]byte(hash1), extraAuthData[0:4]...))) msg := &msgs.FEPasswordMsg{PasswordData: hash2} return v.sendMessage(msg) } func (v *connection) authSendSHA512Password(extraAuthData []byte) error { passwd, isSet := v.connURL.User.Password() if !isSet { passwd = "" } hash1 := fmt.Sprintf("%x", sha512.Sum512(append([]byte(passwd), extraAuthData[8:]...))) hash2 := fmt.Sprintf("sha512%x", sha512.Sum512(append([]byte(hash1), extraAuthData[0:4]...))) msg := &msgs.FEPasswordMsg{PasswordData: hash2} return v.sendMessage(msg) } func (v *connection) authSendOAuthAccessToken() error { msg := &msgs.FEPasswordMsg{PasswordData: v.oauthaccesstoken} return v.sendMessage(msg) } func (v *connection) sync() error { err := v.sendMessage(&msgs.FESyncMsg{}) if err != nil { return err } for true { bem, err := v.recvMessage() if err != nil { return err } _, ok := bem.(*msgs.BEReadyForQueryMsg) if ok { break } _, _ = v.defaultMessageHandler(bem) } return nil } func (v *connection) lockSessionMutex() { v.sessMutex.Lock() } func (v *connection) unlockSessionMutex() { v.sessMutex.Unlock() }
authSendMD5Password
identifier_name
connection.go
package vertigo // Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. import ( "context" "crypto/md5" "crypto/sha512" "crypto/tls" "database/sql/driver" "encoding/binary" "fmt" "math/rand" "net" "net/url" "os" "strings" "sync" "time" "github.com/vertica/vertica-sql-go/common" "github.com/vertica/vertica-sql-go/logger" "github.com/vertica/vertica-sql-go/msgs" ) var ( connectionLogger = logger.New("connection") ) const ( tlsModeServer = "server" tlsModeServerStrict = "server-strict" tlsModeNone = "none" ) type _tlsConfigs struct { m map[string]*tls.Config sync.RWMutex } func (t *_tlsConfigs) add(name string, config *tls.Config) error { t.Lock() defer t.Unlock() t.m[name] = config return nil } func (t *_tlsConfigs) get(name string) (*tls.Config, bool) { t.RLock() defer t.RUnlock() conf, ok := t.m[name] return conf, ok } var tlsConfigs = &_tlsConfigs{m: make(map[string]*tls.Config)} // db, err := sql.Open("vertica", "user@tcp(localhost:3306)/test?tlsmode=custom") // reserved modes: 'server', 'server-strict' or 'none' func RegisterTLSConfig(name string, config *tls.Config) error { if name == tlsModeServer || name == tlsModeServerStrict || name == tlsModeNone { return fmt.Errorf("config name '%s' is reserved therefore cannot be used", name) } return tlsConfigs.add(name, config) } // Connection represents a connection to Vertica type connection struct { driver.Conn conn net.Conn connURL *url.URL parameters map[string]string clientPID int backendPID uint32 cancelKey uint32 transactionState byte usePreparedStmts bool connHostsList []string scratch [512]byte sessionID string autocommit string oauthaccesstoken string serverTZOffset string dead bool // used if a ROLLBACK severity error is encountered sessMutex sync.Mutex } // Begin - Begin starts and returns a new transaction. (DEPRECATED) // From interface: sql.driver.Conn func (v *connection) Begin() (driver.Tx, error) { return nil, nil } // BeginTx - Begin starts and returns a new transaction. // From interface: sql.driver.ConnBeginTx func (v *connection) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { connectionLogger.Trace("connection.BeginTx()") return newTransaction(ctx, v, opts) } // Close closes a connection to the Vertica DB. After calling close, you shouldn't use this connection anymore. // From interface: sql.driver.Conn func (v *connection) Close() error { connectionLogger.Trace("connection.Close()") v.sendMessage(&msgs.FETerminateMsg{}) var result error = nil if v.conn != nil { result = v.conn.Close() v.conn = nil } return result } // PrepareContext returns a prepared statement, bound to this connection. // context is for the preparation of the statement, // it must not store the context within the statement itself. // From interface: sql.driver.ConnPrepareContext func (v *connection) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { s, err := newStmt(v, query) if err != nil { return nil, err } if v.usePreparedStmts { if err = s.prepareAndDescribe(); err != nil { return nil, err } } return s, nil } // Prepare returns a prepared statement, bound to this connection. // From interface: sql.driver.Conn func (v *connection) Prepare(query string) (driver.Stmt, error) { return v.PrepareContext(context.Background(), query) } // Ping implements the Pinger interface for connection. Use this to check for a valid connection state. // This has to prepare AND execute the query in case prepared statements are disabled. func (v *connection) Ping(ctx context.Context) error { stmt, err := v.PrepareContext(ctx, "select 1 as test") if err != nil { return driver.ErrBadConn } defer stmt.Close() // If we are preparing statements server side, successfully preparing verifies the connection if v.usePreparedStmts { return nil } queryContext := stmt.(driver.StmtQueryContext) rows, err := queryContext.QueryContext(ctx, nil) if err != nil { return driver.ErrBadConn } var val interface{} if err := rows.Next([]driver.Value{val}); err != nil { return driver.ErrBadConn } rows.Close() return nil } // ResetSession implements the SessionResetter interface for connection. This allows the sql // package to evaluate the connection state when managing the connection pool. func (v *connection) ResetSession(ctx context.Context) error { if v.dead { return driver.ErrBadConn } return v.Ping(ctx) } // newConnection constructs a new Vertica Connection object based on the connection string. func newConnection(connString string) (*connection, error) { result := &connection{parameters: make(map[string]string), usePreparedStmts: true} var err error result.connURL, err = url.Parse(connString) if err != nil { return nil, err } result.clientPID = os.Getpid() if client_label := result.connURL.Query().Get("client_label"); client_label != "" { result.sessionID = client_label } else { result.sessionID = fmt.Sprintf("%s-%s-%d-%d", driverName, driverVersion, result.clientPID, time.Now().Unix()) } // Read the interpolate flag. if iFlag := result.connURL.Query().Get("use_prepared_statements"); iFlag != "" { result.usePreparedStmts = iFlag == "1" } // Read Autocommit flag. if iFlag := result.connURL.Query().Get("autocommit"); iFlag == "" || iFlag == "1" { result.autocommit = "on" } else { result.autocommit = "off" } // Read OAuth access token flag. result.oauthaccesstoken = result.connURL.Query().Get("oauth_access_token") // Read connection load balance flag. loadBalanceFlag := result.connURL.Query().Get("connection_load_balance") // Read connection failover flag. backupHostsStr := result.connURL.Query().Get("backup_server_node") if backupHostsStr == "" { result.connHostsList = []string{result.connURL.Host} } else { // Parse comma-separated list of backup host-port pairs hosts := strings.Split(backupHostsStr, ",") // Push target host to front of the hosts list result.connHostsList = append([]string{result.connURL.Host}, hosts...) } // Read SSL/TLS flag. sslFlag := strings.ToLower(result.connURL.Query().Get("tlsmode")) if sslFlag == "" { sslFlag = tlsModeNone } result.conn, err = result.establishSocketConnection() if err != nil { return nil, err } // Load Balancing if loadBalanceFlag == "1" { if err = result.balanceLoad(); err != nil { return nil, err } } if sslFlag != tlsModeNone { if err = result.initializeSSL(sslFlag); err != nil { return nil, err } } if err = result.handshake(); err != nil { return nil, err } if err = result.initializeSession(); err != nil { return nil, err } return result, nil } func (v *connection) establishSocketConnection() (net.Conn, error) { // Failover: loop to try all hosts in the list err_msg := "" for i := 0; i < len(v.connHostsList); i++ { host, port, err := net.SplitHostPort(v.connHostsList[i]) if err != nil { // no host-port pair identified err_msg += fmt.Sprintf("\n '%s': %s", v.connHostsList[i], err.Error()) continue } ips, err := net.LookupIP(host) if err != nil { // failed to resolve any IPs from host err_msg += fmt.Sprintf("\n '%s': %s", host, err.Error()) continue } r := rand.New(rand.NewSource(time.Now().Unix())) for _, j := range r.Perm(len(ips)) { // j comes from random permutation of indexes - ips[j] will access a random resolved ip addrString := net.JoinHostPort(ips[j].String(), port) // IPv6 returns "[host]:port" conn, err := net.Dial("tcp", addrString) if err != nil { err_msg += fmt.Sprintf("\n '%s': %s", v.connHostsList[i], err.Error()) } else { if len(err_msg) != 0 { connectionLogger.Debug("Failed to establish a connection to %s", err_msg) } connectionLogger.Debug("Established socket connection to %s", addrString) v.connHostsList = v.connHostsList[i:] return conn, err } } } // All of the hosts failed return nil, fmt.Errorf("Failed to establish a connection to the primary server or any backup host.%s", err_msg) } func (v *connection) recvMessage() (msgs.BackEndMsg, error) { msgHeader := v.scratch[:5] var err error if err = v.readAll(msgHeader); err != nil { return nil, err } msgSize := int(binary.BigEndian.Uint32(msgHeader[1:]) - 4) msgBytes := v.scratch[5:] var y []byte if msgSize > 0 { if msgSize <= len(msgBytes) { y = msgBytes[:msgSize] } else { y = make([]byte, msgSize) } if err = v.readAll(y); err != nil { return nil, err } } bem, err := msgs.CreateBackEndMsg(msgHeader[0], y) if err != nil { return nil, err } // Print the message to stdout (for debugging purposes) if _, drm := bem.(*msgs.BEDataRowMsg); !drm { connectionLogger.Debug("<- " + bem.String()) } else { connectionLogger.Trace("<- " + bem.String()) } return bem, nil } func (v *connection) sendMessage(msg msgs.FrontEndMsg) error { return v.sendMessageTo(msg, v.conn) } func (v *connection) sendMessageTo(msg msgs.FrontEndMsg, conn net.Conn) error { var result error = nil msgBytes, msgTag := msg.Flatten() if msgTag != 0 { _, result = conn.Write([]byte{msgTag}) } if result == nil { sizeBytes := v.scratch[:4] binary.BigEndian.PutUint32(sizeBytes, uint32(len(msgBytes)+4)) _, result = conn.Write(sizeBytes) if result == nil && len(msgBytes) > 0 { size := 8192 // Max msg size, consistent with how the server works pos := 0 var sent int for pos < len(msgBytes) { sent, result = conn.Write(msgBytes[pos:min(pos+size, len(msgBytes))]) if result != nil { break } pos += sent } } } if result != nil { connectionLogger.Error("-> FAILED SENDING "+msg.String()+": %v", result.Error()) } else { connectionLogger.Debug("-> " + msg.String()) } return result } func min(a, b int) int { if a < b { return a } return b } func (v *connection) handshake() error { if v.connURL.User == nil && len(v.oauthaccesstoken) == 0 { return fmt.Errorf("connection string must include a user name or oauth_access_token") } userName := v.connURL.User.Username() if len(userName) == 0 && len(v.oauthaccesstoken) == 0 { return fmt.Errorf("connection string must have a non-empty user name or oauth_access_token") } dbName := "" if len(v.connURL.Path) > 1 { dbName = v.connURL.Path[1:] } msg := &msgs.FEStartupMsg{ ProtocolVersion: protocolVersion, DriverName: driverName, DriverVersion: driverVersion, Username: userName, Database: dbName, SessionID: v.sessionID, ClientPID: v.clientPID, Autocommit: v.autocommit, OAuthAccessToken: v.oauthaccesstoken, } if err := v.sendMessage(msg); err != nil { return err } for { bMsg, err := v.recvMessage() if err != nil { return err } switch msg := bMsg.(type) { case *msgs.BEErrorMsg: return errorMsgToVError(msg) case *msgs.BEReadyForQueryMsg: v.transactionState = msg.TransactionState return nil case *msgs.BEParamStatusMsg: v.parameters[msg.ParamName] = msg.ParamValue case *msgs.BEKeyDataMsg: v.backendPID = msg.BackendPID v.cancelKey = msg.CancelKey default: _, err = v.defaultMessageHandler(msg) if err != nil { return err } } } } // We have to be tricky here since we're inside of a connection, but trying to use interfaces of the // driver class. func (v *connection) initializeSession() error { stmt, err := newStmt(v, "select now()::timestamptz") if err != nil { return err } result, err := stmt.QueryContextRaw(context.Background(), []driver.NamedValue{}) if err != nil { return err } firstRow := result.resultData.Peek() if len(result.Columns()) != 1 && result.Columns()[1] != "now" || firstRow == nil { return fmt.Errorf("unable to initialize session; functionality may be unreliable") } // Peek into the results manually. colData := firstRow.Columns() str := string(colData.Chunk()) if len(str) < 23 { return fmt.Errorf("can't get server timezone: %s", str) } v.serverTZOffset = str[len(str)-3:] connectionLogger.Debug("Setting server timezone offset to %s", str[len(str)-3:]) return nil } func (v *connection) defaultMessageHandler(bMsg msgs.BackEndMsg) (bool, error) { handled := true var err error = nil switch msg := bMsg.(type) { case *msgs.BEAuthenticationMsg: switch msg.Response { case common.AuthenticationOK: break case common.AuthenticationCleartextPassword: err = v.authSendPlainTextPassword() case common.AuthenticationMD5Password: err = v.authSendMD5Password(msg.ExtraAuthData) case common.AuthenticationSHA512Password: err = v.authSendSHA512Password(msg.ExtraAuthData) case common.AuthenticationOAuth: err = v.authSendOAuthAccessToken() default: handled = false err = fmt.Errorf("unsupported authentication scheme: %d", msg.Response) } case *msgs.BENoticeMsg: break case *msgs.BEParamStatusMsg: connectionLogger.Debug("%v", msg) default: handled = false err = fmt.Errorf("unhandled message: %v", msg) connectionLogger.Warn("%v", err) } return handled, err } func (v *connection) readAll(buf []byte) error { readIndex := 0 for { bytesRead, err := v.conn.Read(buf[readIndex:]) if err != nil { return err } readIndex += bytesRead if readIndex == len(buf) { return nil } } } func (v *connection) balanceLoad() error { v.sendMessage(&msgs.FELoadBalanceMsg{}) response := v.scratch[:1] var err error if err = v.readAll(response); err != nil { return err } if response[0] == 'N' { // keep existing connection connectionLogger.Debug("<- LoadBalanceResponse: N") connectionLogger.Warn("Load balancing requested but not supported by server") return nil } if response[0] != 'Y' { connectionLogger.Debug("<- LoadBalanceResponse: %c", response[0]) return fmt.Errorf("Load balancing request gave unknown response: %c", response[0]) } header := v.scratch[1:5] if err = v.readAll(header); err != nil { return err } msgSize := int(binary.BigEndian.Uint32(header) - 4) msgBytes := v.scratch[5:] var y []byte if msgSize > 0 { if msgSize <= len(msgBytes) { y = msgBytes[:msgSize] } else { y = make([]byte, msgSize) } if err = v.readAll(y); err != nil { return err } } bem, err := msgs.CreateBackEndMsg(response[0], y) if err != nil { return err } connectionLogger.Debug("<- " + bem.String()) msg := bem.(*msgs.BELoadBalanceMsg) // v.connURL.Hostname() is used by initializeSSL(), so load balancing info should not write into v.connURL loadBalanceAddr := fmt.Sprintf("%s:%d", msg.Host, msg.Port) if v.connHostsList[0] == loadBalanceAddr { // Already connecting to the host return nil } // Push the new host onto the host list before connecting again. // Note that this leaves the originally-specified host as the first failover possibility v.connHostsList = append([]string{loadBalanceAddr}, v.connHostsList...) // Connect to new host v.conn.Close() v.conn, err = v.establishSocketConnection() if err != nil { return fmt.Errorf("cannot redirect to %s (%s)", loadBalanceAddr, err.Error()) } return nil } func (v *connection) initializeSSL(sslFlag string) error { v.sendMessage(&msgs.FESSLMsg{}) buf := v.scratch[:1] err := v.readAll(buf) if err != nil { return err } if buf[0] == 'N' { return fmt.Errorf("SSL/TLS is not enabled on this server") } if buf[0] != 'S' { return fmt.Errorf("SSL/TLS probe gave unknown response: %c", buf[0]) } switch sslFlag { case tlsModeServer: connectionLogger.Info("enabling SSL/TLS server mode") v.conn = tls.Client(v.conn, &tls.Config{InsecureSkipVerify: true}) case tlsModeServerStrict: connectionLogger.Info("enabling SSL/TLS server strict mode") v.conn = tls.Client(v.conn, &tls.Config{ServerName: v.connURL.Hostname()}) default: // Custom mode is used for mutual ssl mode connectionLogger.Info("enabling SSL/TLS custom mode") config, ok := tlsConfigs.get(sslFlag) if !ok { err := fmt.Errorf("tls config %s not registered. See 'Using custom TLS config' in the README.md file", sslFlag) connectionLogger.Error(err.Error()) return err } v.conn = tls.Client(v.conn, config) return nil } return nil } func (v *connection) authSendPlainTextPassword() error { passwd, isSet := v.connURL.User.Password() if !isSet
msg := &msgs.FEPasswordMsg{PasswordData: passwd} return v.sendMessage(msg) } func (v *connection) authSendMD5Password(extraAuthData []byte) error { passwd, isSet := v.connURL.User.Password() if !isSet { passwd = "" } hash1 := fmt.Sprintf("%x", md5.Sum([]byte(passwd+v.connURL.User.Username()))) hash2 := fmt.Sprintf("md5%x", md5.Sum(append([]byte(hash1), extraAuthData[0:4]...))) msg := &msgs.FEPasswordMsg{PasswordData: hash2} return v.sendMessage(msg) } func (v *connection) authSendSHA512Password(extraAuthData []byte) error { passwd, isSet := v.connURL.User.Password() if !isSet { passwd = "" } hash1 := fmt.Sprintf("%x", sha512.Sum512(append([]byte(passwd), extraAuthData[8:]...))) hash2 := fmt.Sprintf("sha512%x", sha512.Sum512(append([]byte(hash1), extraAuthData[0:4]...))) msg := &msgs.FEPasswordMsg{PasswordData: hash2} return v.sendMessage(msg) } func (v *connection) authSendOAuthAccessToken() error { msg := &msgs.FEPasswordMsg{PasswordData: v.oauthaccesstoken} return v.sendMessage(msg) } func (v *connection) sync() error { err := v.sendMessage(&msgs.FESyncMsg{}) if err != nil { return err } for true { bem, err := v.recvMessage() if err != nil { return err } _, ok := bem.(*msgs.BEReadyForQueryMsg) if ok { break } _, _ = v.defaultMessageHandler(bem) } return nil } func (v *connection) lockSessionMutex() { v.sessMutex.Lock() } func (v *connection) unlockSessionMutex() { v.sessMutex.Unlock() }
{ passwd = "" }
conditional_block
connection.go
package vertigo // Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. import ( "context" "crypto/md5" "crypto/sha512" "crypto/tls" "database/sql/driver" "encoding/binary" "fmt" "math/rand" "net" "net/url" "os" "strings" "sync" "time" "github.com/vertica/vertica-sql-go/common" "github.com/vertica/vertica-sql-go/logger" "github.com/vertica/vertica-sql-go/msgs" ) var ( connectionLogger = logger.New("connection") ) const ( tlsModeServer = "server" tlsModeServerStrict = "server-strict" tlsModeNone = "none" ) type _tlsConfigs struct { m map[string]*tls.Config sync.RWMutex } func (t *_tlsConfigs) add(name string, config *tls.Config) error { t.Lock() defer t.Unlock() t.m[name] = config return nil } func (t *_tlsConfigs) get(name string) (*tls.Config, bool) { t.RLock() defer t.RUnlock() conf, ok := t.m[name] return conf, ok } var tlsConfigs = &_tlsConfigs{m: make(map[string]*tls.Config)} // db, err := sql.Open("vertica", "user@tcp(localhost:3306)/test?tlsmode=custom") // reserved modes: 'server', 'server-strict' or 'none' func RegisterTLSConfig(name string, config *tls.Config) error { if name == tlsModeServer || name == tlsModeServerStrict || name == tlsModeNone { return fmt.Errorf("config name '%s' is reserved therefore cannot be used", name) } return tlsConfigs.add(name, config) } // Connection represents a connection to Vertica type connection struct { driver.Conn conn net.Conn connURL *url.URL parameters map[string]string clientPID int backendPID uint32 cancelKey uint32 transactionState byte usePreparedStmts bool connHostsList []string scratch [512]byte sessionID string autocommit string oauthaccesstoken string serverTZOffset string dead bool // used if a ROLLBACK severity error is encountered sessMutex sync.Mutex } // Begin - Begin starts and returns a new transaction. (DEPRECATED) // From interface: sql.driver.Conn func (v *connection) Begin() (driver.Tx, error) { return nil, nil } // BeginTx - Begin starts and returns a new transaction. // From interface: sql.driver.ConnBeginTx func (v *connection) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { connectionLogger.Trace("connection.BeginTx()") return newTransaction(ctx, v, opts) } // Close closes a connection to the Vertica DB. After calling close, you shouldn't use this connection anymore. // From interface: sql.driver.Conn func (v *connection) Close() error { connectionLogger.Trace("connection.Close()") v.sendMessage(&msgs.FETerminateMsg{}) var result error = nil if v.conn != nil { result = v.conn.Close() v.conn = nil } return result } // PrepareContext returns a prepared statement, bound to this connection. // context is for the preparation of the statement, // it must not store the context within the statement itself. // From interface: sql.driver.ConnPrepareContext func (v *connection) PrepareContext(ctx context.Context, query string) (driver.Stmt, error)
// Prepare returns a prepared statement, bound to this connection. // From interface: sql.driver.Conn func (v *connection) Prepare(query string) (driver.Stmt, error) { return v.PrepareContext(context.Background(), query) } // Ping implements the Pinger interface for connection. Use this to check for a valid connection state. // This has to prepare AND execute the query in case prepared statements are disabled. func (v *connection) Ping(ctx context.Context) error { stmt, err := v.PrepareContext(ctx, "select 1 as test") if err != nil { return driver.ErrBadConn } defer stmt.Close() // If we are preparing statements server side, successfully preparing verifies the connection if v.usePreparedStmts { return nil } queryContext := stmt.(driver.StmtQueryContext) rows, err := queryContext.QueryContext(ctx, nil) if err != nil { return driver.ErrBadConn } var val interface{} if err := rows.Next([]driver.Value{val}); err != nil { return driver.ErrBadConn } rows.Close() return nil } // ResetSession implements the SessionResetter interface for connection. This allows the sql // package to evaluate the connection state when managing the connection pool. func (v *connection) ResetSession(ctx context.Context) error { if v.dead { return driver.ErrBadConn } return v.Ping(ctx) } // newConnection constructs a new Vertica Connection object based on the connection string. func newConnection(connString string) (*connection, error) { result := &connection{parameters: make(map[string]string), usePreparedStmts: true} var err error result.connURL, err = url.Parse(connString) if err != nil { return nil, err } result.clientPID = os.Getpid() if client_label := result.connURL.Query().Get("client_label"); client_label != "" { result.sessionID = client_label } else { result.sessionID = fmt.Sprintf("%s-%s-%d-%d", driverName, driverVersion, result.clientPID, time.Now().Unix()) } // Read the interpolate flag. if iFlag := result.connURL.Query().Get("use_prepared_statements"); iFlag != "" { result.usePreparedStmts = iFlag == "1" } // Read Autocommit flag. if iFlag := result.connURL.Query().Get("autocommit"); iFlag == "" || iFlag == "1" { result.autocommit = "on" } else { result.autocommit = "off" } // Read OAuth access token flag. result.oauthaccesstoken = result.connURL.Query().Get("oauth_access_token") // Read connection load balance flag. loadBalanceFlag := result.connURL.Query().Get("connection_load_balance") // Read connection failover flag. backupHostsStr := result.connURL.Query().Get("backup_server_node") if backupHostsStr == "" { result.connHostsList = []string{result.connURL.Host} } else { // Parse comma-separated list of backup host-port pairs hosts := strings.Split(backupHostsStr, ",") // Push target host to front of the hosts list result.connHostsList = append([]string{result.connURL.Host}, hosts...) } // Read SSL/TLS flag. sslFlag := strings.ToLower(result.connURL.Query().Get("tlsmode")) if sslFlag == "" { sslFlag = tlsModeNone } result.conn, err = result.establishSocketConnection() if err != nil { return nil, err } // Load Balancing if loadBalanceFlag == "1" { if err = result.balanceLoad(); err != nil { return nil, err } } if sslFlag != tlsModeNone { if err = result.initializeSSL(sslFlag); err != nil { return nil, err } } if err = result.handshake(); err != nil { return nil, err } if err = result.initializeSession(); err != nil { return nil, err } return result, nil } func (v *connection) establishSocketConnection() (net.Conn, error) { // Failover: loop to try all hosts in the list err_msg := "" for i := 0; i < len(v.connHostsList); i++ { host, port, err := net.SplitHostPort(v.connHostsList[i]) if err != nil { // no host-port pair identified err_msg += fmt.Sprintf("\n '%s': %s", v.connHostsList[i], err.Error()) continue } ips, err := net.LookupIP(host) if err != nil { // failed to resolve any IPs from host err_msg += fmt.Sprintf("\n '%s': %s", host, err.Error()) continue } r := rand.New(rand.NewSource(time.Now().Unix())) for _, j := range r.Perm(len(ips)) { // j comes from random permutation of indexes - ips[j] will access a random resolved ip addrString := net.JoinHostPort(ips[j].String(), port) // IPv6 returns "[host]:port" conn, err := net.Dial("tcp", addrString) if err != nil { err_msg += fmt.Sprintf("\n '%s': %s", v.connHostsList[i], err.Error()) } else { if len(err_msg) != 0 { connectionLogger.Debug("Failed to establish a connection to %s", err_msg) } connectionLogger.Debug("Established socket connection to %s", addrString) v.connHostsList = v.connHostsList[i:] return conn, err } } } // All of the hosts failed return nil, fmt.Errorf("Failed to establish a connection to the primary server or any backup host.%s", err_msg) } func (v *connection) recvMessage() (msgs.BackEndMsg, error) { msgHeader := v.scratch[:5] var err error if err = v.readAll(msgHeader); err != nil { return nil, err } msgSize := int(binary.BigEndian.Uint32(msgHeader[1:]) - 4) msgBytes := v.scratch[5:] var y []byte if msgSize > 0 { if msgSize <= len(msgBytes) { y = msgBytes[:msgSize] } else { y = make([]byte, msgSize) } if err = v.readAll(y); err != nil { return nil, err } } bem, err := msgs.CreateBackEndMsg(msgHeader[0], y) if err != nil { return nil, err } // Print the message to stdout (for debugging purposes) if _, drm := bem.(*msgs.BEDataRowMsg); !drm { connectionLogger.Debug("<- " + bem.String()) } else { connectionLogger.Trace("<- " + bem.String()) } return bem, nil } func (v *connection) sendMessage(msg msgs.FrontEndMsg) error { return v.sendMessageTo(msg, v.conn) } func (v *connection) sendMessageTo(msg msgs.FrontEndMsg, conn net.Conn) error { var result error = nil msgBytes, msgTag := msg.Flatten() if msgTag != 0 { _, result = conn.Write([]byte{msgTag}) } if result == nil { sizeBytes := v.scratch[:4] binary.BigEndian.PutUint32(sizeBytes, uint32(len(msgBytes)+4)) _, result = conn.Write(sizeBytes) if result == nil && len(msgBytes) > 0 { size := 8192 // Max msg size, consistent with how the server works pos := 0 var sent int for pos < len(msgBytes) { sent, result = conn.Write(msgBytes[pos:min(pos+size, len(msgBytes))]) if result != nil { break } pos += sent } } } if result != nil { connectionLogger.Error("-> FAILED SENDING "+msg.String()+": %v", result.Error()) } else { connectionLogger.Debug("-> " + msg.String()) } return result } func min(a, b int) int { if a < b { return a } return b } func (v *connection) handshake() error { if v.connURL.User == nil && len(v.oauthaccesstoken) == 0 { return fmt.Errorf("connection string must include a user name or oauth_access_token") } userName := v.connURL.User.Username() if len(userName) == 0 && len(v.oauthaccesstoken) == 0 { return fmt.Errorf("connection string must have a non-empty user name or oauth_access_token") } dbName := "" if len(v.connURL.Path) > 1 { dbName = v.connURL.Path[1:] } msg := &msgs.FEStartupMsg{ ProtocolVersion: protocolVersion, DriverName: driverName, DriverVersion: driverVersion, Username: userName, Database: dbName, SessionID: v.sessionID, ClientPID: v.clientPID, Autocommit: v.autocommit, OAuthAccessToken: v.oauthaccesstoken, } if err := v.sendMessage(msg); err != nil { return err } for { bMsg, err := v.recvMessage() if err != nil { return err } switch msg := bMsg.(type) { case *msgs.BEErrorMsg: return errorMsgToVError(msg) case *msgs.BEReadyForQueryMsg: v.transactionState = msg.TransactionState return nil case *msgs.BEParamStatusMsg: v.parameters[msg.ParamName] = msg.ParamValue case *msgs.BEKeyDataMsg: v.backendPID = msg.BackendPID v.cancelKey = msg.CancelKey default: _, err = v.defaultMessageHandler(msg) if err != nil { return err } } } } // We have to be tricky here since we're inside of a connection, but trying to use interfaces of the // driver class. func (v *connection) initializeSession() error { stmt, err := newStmt(v, "select now()::timestamptz") if err != nil { return err } result, err := stmt.QueryContextRaw(context.Background(), []driver.NamedValue{}) if err != nil { return err } firstRow := result.resultData.Peek() if len(result.Columns()) != 1 && result.Columns()[1] != "now" || firstRow == nil { return fmt.Errorf("unable to initialize session; functionality may be unreliable") } // Peek into the results manually. colData := firstRow.Columns() str := string(colData.Chunk()) if len(str) < 23 { return fmt.Errorf("can't get server timezone: %s", str) } v.serverTZOffset = str[len(str)-3:] connectionLogger.Debug("Setting server timezone offset to %s", str[len(str)-3:]) return nil } func (v *connection) defaultMessageHandler(bMsg msgs.BackEndMsg) (bool, error) { handled := true var err error = nil switch msg := bMsg.(type) { case *msgs.BEAuthenticationMsg: switch msg.Response { case common.AuthenticationOK: break case common.AuthenticationCleartextPassword: err = v.authSendPlainTextPassword() case common.AuthenticationMD5Password: err = v.authSendMD5Password(msg.ExtraAuthData) case common.AuthenticationSHA512Password: err = v.authSendSHA512Password(msg.ExtraAuthData) case common.AuthenticationOAuth: err = v.authSendOAuthAccessToken() default: handled = false err = fmt.Errorf("unsupported authentication scheme: %d", msg.Response) } case *msgs.BENoticeMsg: break case *msgs.BEParamStatusMsg: connectionLogger.Debug("%v", msg) default: handled = false err = fmt.Errorf("unhandled message: %v", msg) connectionLogger.Warn("%v", err) } return handled, err } func (v *connection) readAll(buf []byte) error { readIndex := 0 for { bytesRead, err := v.conn.Read(buf[readIndex:]) if err != nil { return err } readIndex += bytesRead if readIndex == len(buf) { return nil } } } func (v *connection) balanceLoad() error { v.sendMessage(&msgs.FELoadBalanceMsg{}) response := v.scratch[:1] var err error if err = v.readAll(response); err != nil { return err } if response[0] == 'N' { // keep existing connection connectionLogger.Debug("<- LoadBalanceResponse: N") connectionLogger.Warn("Load balancing requested but not supported by server") return nil } if response[0] != 'Y' { connectionLogger.Debug("<- LoadBalanceResponse: %c", response[0]) return fmt.Errorf("Load balancing request gave unknown response: %c", response[0]) } header := v.scratch[1:5] if err = v.readAll(header); err != nil { return err } msgSize := int(binary.BigEndian.Uint32(header) - 4) msgBytes := v.scratch[5:] var y []byte if msgSize > 0 { if msgSize <= len(msgBytes) { y = msgBytes[:msgSize] } else { y = make([]byte, msgSize) } if err = v.readAll(y); err != nil { return err } } bem, err := msgs.CreateBackEndMsg(response[0], y) if err != nil { return err } connectionLogger.Debug("<- " + bem.String()) msg := bem.(*msgs.BELoadBalanceMsg) // v.connURL.Hostname() is used by initializeSSL(), so load balancing info should not write into v.connURL loadBalanceAddr := fmt.Sprintf("%s:%d", msg.Host, msg.Port) if v.connHostsList[0] == loadBalanceAddr { // Already connecting to the host return nil } // Push the new host onto the host list before connecting again. // Note that this leaves the originally-specified host as the first failover possibility v.connHostsList = append([]string{loadBalanceAddr}, v.connHostsList...) // Connect to new host v.conn.Close() v.conn, err = v.establishSocketConnection() if err != nil { return fmt.Errorf("cannot redirect to %s (%s)", loadBalanceAddr, err.Error()) } return nil } func (v *connection) initializeSSL(sslFlag string) error { v.sendMessage(&msgs.FESSLMsg{}) buf := v.scratch[:1] err := v.readAll(buf) if err != nil { return err } if buf[0] == 'N' { return fmt.Errorf("SSL/TLS is not enabled on this server") } if buf[0] != 'S' { return fmt.Errorf("SSL/TLS probe gave unknown response: %c", buf[0]) } switch sslFlag { case tlsModeServer: connectionLogger.Info("enabling SSL/TLS server mode") v.conn = tls.Client(v.conn, &tls.Config{InsecureSkipVerify: true}) case tlsModeServerStrict: connectionLogger.Info("enabling SSL/TLS server strict mode") v.conn = tls.Client(v.conn, &tls.Config{ServerName: v.connURL.Hostname()}) default: // Custom mode is used for mutual ssl mode connectionLogger.Info("enabling SSL/TLS custom mode") config, ok := tlsConfigs.get(sslFlag) if !ok { err := fmt.Errorf("tls config %s not registered. See 'Using custom TLS config' in the README.md file", sslFlag) connectionLogger.Error(err.Error()) return err } v.conn = tls.Client(v.conn, config) return nil } return nil } func (v *connection) authSendPlainTextPassword() error { passwd, isSet := v.connURL.User.Password() if !isSet { passwd = "" } msg := &msgs.FEPasswordMsg{PasswordData: passwd} return v.sendMessage(msg) } func (v *connection) authSendMD5Password(extraAuthData []byte) error { passwd, isSet := v.connURL.User.Password() if !isSet { passwd = "" } hash1 := fmt.Sprintf("%x", md5.Sum([]byte(passwd+v.connURL.User.Username()))) hash2 := fmt.Sprintf("md5%x", md5.Sum(append([]byte(hash1), extraAuthData[0:4]...))) msg := &msgs.FEPasswordMsg{PasswordData: hash2} return v.sendMessage(msg) } func (v *connection) authSendSHA512Password(extraAuthData []byte) error { passwd, isSet := v.connURL.User.Password() if !isSet { passwd = "" } hash1 := fmt.Sprintf("%x", sha512.Sum512(append([]byte(passwd), extraAuthData[8:]...))) hash2 := fmt.Sprintf("sha512%x", sha512.Sum512(append([]byte(hash1), extraAuthData[0:4]...))) msg := &msgs.FEPasswordMsg{PasswordData: hash2} return v.sendMessage(msg) } func (v *connection) authSendOAuthAccessToken() error { msg := &msgs.FEPasswordMsg{PasswordData: v.oauthaccesstoken} return v.sendMessage(msg) } func (v *connection) sync() error { err := v.sendMessage(&msgs.FESyncMsg{}) if err != nil { return err } for true { bem, err := v.recvMessage() if err != nil { return err } _, ok := bem.(*msgs.BEReadyForQueryMsg) if ok { break } _, _ = v.defaultMessageHandler(bem) } return nil } func (v *connection) lockSessionMutex() { v.sessMutex.Lock() } func (v *connection) unlockSessionMutex() { v.sessMutex.Unlock() }
{ s, err := newStmt(v, query) if err != nil { return nil, err } if v.usePreparedStmts { if err = s.prepareAndDescribe(); err != nil { return nil, err } } return s, nil }
identifier_body
connection.go
package vertigo // Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. import ( "context" "crypto/md5" "crypto/sha512" "crypto/tls" "database/sql/driver" "encoding/binary" "fmt" "math/rand" "net" "net/url" "os" "strings" "sync" "time" "github.com/vertica/vertica-sql-go/common" "github.com/vertica/vertica-sql-go/logger" "github.com/vertica/vertica-sql-go/msgs" ) var ( connectionLogger = logger.New("connection") ) const ( tlsModeServer = "server" tlsModeServerStrict = "server-strict" tlsModeNone = "none" ) type _tlsConfigs struct { m map[string]*tls.Config sync.RWMutex } func (t *_tlsConfigs) add(name string, config *tls.Config) error { t.Lock() defer t.Unlock() t.m[name] = config return nil } func (t *_tlsConfigs) get(name string) (*tls.Config, bool) { t.RLock() defer t.RUnlock() conf, ok := t.m[name] return conf, ok } var tlsConfigs = &_tlsConfigs{m: make(map[string]*tls.Config)} // db, err := sql.Open("vertica", "user@tcp(localhost:3306)/test?tlsmode=custom") // reserved modes: 'server', 'server-strict' or 'none' func RegisterTLSConfig(name string, config *tls.Config) error { if name == tlsModeServer || name == tlsModeServerStrict || name == tlsModeNone { return fmt.Errorf("config name '%s' is reserved therefore cannot be used", name) } return tlsConfigs.add(name, config) } // Connection represents a connection to Vertica type connection struct { driver.Conn conn net.Conn connURL *url.URL parameters map[string]string clientPID int backendPID uint32 cancelKey uint32 transactionState byte usePreparedStmts bool connHostsList []string scratch [512]byte sessionID string autocommit string oauthaccesstoken string serverTZOffset string dead bool // used if a ROLLBACK severity error is encountered sessMutex sync.Mutex } // Begin - Begin starts and returns a new transaction. (DEPRECATED) // From interface: sql.driver.Conn func (v *connection) Begin() (driver.Tx, error) { return nil, nil } // BeginTx - Begin starts and returns a new transaction. // From interface: sql.driver.ConnBeginTx func (v *connection) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { connectionLogger.Trace("connection.BeginTx()") return newTransaction(ctx, v, opts) } // Close closes a connection to the Vertica DB. After calling close, you shouldn't use this connection anymore. // From interface: sql.driver.Conn func (v *connection) Close() error { connectionLogger.Trace("connection.Close()") v.sendMessage(&msgs.FETerminateMsg{}) var result error = nil if v.conn != nil { result = v.conn.Close() v.conn = nil } return result } // PrepareContext returns a prepared statement, bound to this connection. // context is for the preparation of the statement, // it must not store the context within the statement itself. // From interface: sql.driver.ConnPrepareContext func (v *connection) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { s, err := newStmt(v, query) if err != nil { return nil, err } if v.usePreparedStmts { if err = s.prepareAndDescribe(); err != nil { return nil, err } } return s, nil } // Prepare returns a prepared statement, bound to this connection. // From interface: sql.driver.Conn func (v *connection) Prepare(query string) (driver.Stmt, error) { return v.PrepareContext(context.Background(), query) } // Ping implements the Pinger interface for connection. Use this to check for a valid connection state. // This has to prepare AND execute the query in case prepared statements are disabled. func (v *connection) Ping(ctx context.Context) error { stmt, err := v.PrepareContext(ctx, "select 1 as test") if err != nil { return driver.ErrBadConn } defer stmt.Close() // If we are preparing statements server side, successfully preparing verifies the connection if v.usePreparedStmts { return nil } queryContext := stmt.(driver.StmtQueryContext) rows, err := queryContext.QueryContext(ctx, nil) if err != nil { return driver.ErrBadConn } var val interface{} if err := rows.Next([]driver.Value{val}); err != nil { return driver.ErrBadConn } rows.Close() return nil } // ResetSession implements the SessionResetter interface for connection. This allows the sql // package to evaluate the connection state when managing the connection pool. func (v *connection) ResetSession(ctx context.Context) error { if v.dead { return driver.ErrBadConn } return v.Ping(ctx) } // newConnection constructs a new Vertica Connection object based on the connection string. func newConnection(connString string) (*connection, error) { result := &connection{parameters: make(map[string]string), usePreparedStmts: true} var err error result.connURL, err = url.Parse(connString) if err != nil { return nil, err } result.clientPID = os.Getpid() if client_label := result.connURL.Query().Get("client_label"); client_label != "" { result.sessionID = client_label } else { result.sessionID = fmt.Sprintf("%s-%s-%d-%d", driverName, driverVersion, result.clientPID, time.Now().Unix()) } // Read the interpolate flag. if iFlag := result.connURL.Query().Get("use_prepared_statements"); iFlag != "" { result.usePreparedStmts = iFlag == "1" } // Read Autocommit flag. if iFlag := result.connURL.Query().Get("autocommit"); iFlag == "" || iFlag == "1" { result.autocommit = "on" } else { result.autocommit = "off" }
// Read OAuth access token flag. result.oauthaccesstoken = result.connURL.Query().Get("oauth_access_token") // Read connection load balance flag. loadBalanceFlag := result.connURL.Query().Get("connection_load_balance") // Read connection failover flag. backupHostsStr := result.connURL.Query().Get("backup_server_node") if backupHostsStr == "" { result.connHostsList = []string{result.connURL.Host} } else { // Parse comma-separated list of backup host-port pairs hosts := strings.Split(backupHostsStr, ",") // Push target host to front of the hosts list result.connHostsList = append([]string{result.connURL.Host}, hosts...) } // Read SSL/TLS flag. sslFlag := strings.ToLower(result.connURL.Query().Get("tlsmode")) if sslFlag == "" { sslFlag = tlsModeNone } result.conn, err = result.establishSocketConnection() if err != nil { return nil, err } // Load Balancing if loadBalanceFlag == "1" { if err = result.balanceLoad(); err != nil { return nil, err } } if sslFlag != tlsModeNone { if err = result.initializeSSL(sslFlag); err != nil { return nil, err } } if err = result.handshake(); err != nil { return nil, err } if err = result.initializeSession(); err != nil { return nil, err } return result, nil } func (v *connection) establishSocketConnection() (net.Conn, error) { // Failover: loop to try all hosts in the list err_msg := "" for i := 0; i < len(v.connHostsList); i++ { host, port, err := net.SplitHostPort(v.connHostsList[i]) if err != nil { // no host-port pair identified err_msg += fmt.Sprintf("\n '%s': %s", v.connHostsList[i], err.Error()) continue } ips, err := net.LookupIP(host) if err != nil { // failed to resolve any IPs from host err_msg += fmt.Sprintf("\n '%s': %s", host, err.Error()) continue } r := rand.New(rand.NewSource(time.Now().Unix())) for _, j := range r.Perm(len(ips)) { // j comes from random permutation of indexes - ips[j] will access a random resolved ip addrString := net.JoinHostPort(ips[j].String(), port) // IPv6 returns "[host]:port" conn, err := net.Dial("tcp", addrString) if err != nil { err_msg += fmt.Sprintf("\n '%s': %s", v.connHostsList[i], err.Error()) } else { if len(err_msg) != 0 { connectionLogger.Debug("Failed to establish a connection to %s", err_msg) } connectionLogger.Debug("Established socket connection to %s", addrString) v.connHostsList = v.connHostsList[i:] return conn, err } } } // All of the hosts failed return nil, fmt.Errorf("Failed to establish a connection to the primary server or any backup host.%s", err_msg) } func (v *connection) recvMessage() (msgs.BackEndMsg, error) { msgHeader := v.scratch[:5] var err error if err = v.readAll(msgHeader); err != nil { return nil, err } msgSize := int(binary.BigEndian.Uint32(msgHeader[1:]) - 4) msgBytes := v.scratch[5:] var y []byte if msgSize > 0 { if msgSize <= len(msgBytes) { y = msgBytes[:msgSize] } else { y = make([]byte, msgSize) } if err = v.readAll(y); err != nil { return nil, err } } bem, err := msgs.CreateBackEndMsg(msgHeader[0], y) if err != nil { return nil, err } // Print the message to stdout (for debugging purposes) if _, drm := bem.(*msgs.BEDataRowMsg); !drm { connectionLogger.Debug("<- " + bem.String()) } else { connectionLogger.Trace("<- " + bem.String()) } return bem, nil } func (v *connection) sendMessage(msg msgs.FrontEndMsg) error { return v.sendMessageTo(msg, v.conn) } func (v *connection) sendMessageTo(msg msgs.FrontEndMsg, conn net.Conn) error { var result error = nil msgBytes, msgTag := msg.Flatten() if msgTag != 0 { _, result = conn.Write([]byte{msgTag}) } if result == nil { sizeBytes := v.scratch[:4] binary.BigEndian.PutUint32(sizeBytes, uint32(len(msgBytes)+4)) _, result = conn.Write(sizeBytes) if result == nil && len(msgBytes) > 0 { size := 8192 // Max msg size, consistent with how the server works pos := 0 var sent int for pos < len(msgBytes) { sent, result = conn.Write(msgBytes[pos:min(pos+size, len(msgBytes))]) if result != nil { break } pos += sent } } } if result != nil { connectionLogger.Error("-> FAILED SENDING "+msg.String()+": %v", result.Error()) } else { connectionLogger.Debug("-> " + msg.String()) } return result } func min(a, b int) int { if a < b { return a } return b } func (v *connection) handshake() error { if v.connURL.User == nil && len(v.oauthaccesstoken) == 0 { return fmt.Errorf("connection string must include a user name or oauth_access_token") } userName := v.connURL.User.Username() if len(userName) == 0 && len(v.oauthaccesstoken) == 0 { return fmt.Errorf("connection string must have a non-empty user name or oauth_access_token") } dbName := "" if len(v.connURL.Path) > 1 { dbName = v.connURL.Path[1:] } msg := &msgs.FEStartupMsg{ ProtocolVersion: protocolVersion, DriverName: driverName, DriverVersion: driverVersion, Username: userName, Database: dbName, SessionID: v.sessionID, ClientPID: v.clientPID, Autocommit: v.autocommit, OAuthAccessToken: v.oauthaccesstoken, } if err := v.sendMessage(msg); err != nil { return err } for { bMsg, err := v.recvMessage() if err != nil { return err } switch msg := bMsg.(type) { case *msgs.BEErrorMsg: return errorMsgToVError(msg) case *msgs.BEReadyForQueryMsg: v.transactionState = msg.TransactionState return nil case *msgs.BEParamStatusMsg: v.parameters[msg.ParamName] = msg.ParamValue case *msgs.BEKeyDataMsg: v.backendPID = msg.BackendPID v.cancelKey = msg.CancelKey default: _, err = v.defaultMessageHandler(msg) if err != nil { return err } } } } // We have to be tricky here since we're inside of a connection, but trying to use interfaces of the // driver class. func (v *connection) initializeSession() error { stmt, err := newStmt(v, "select now()::timestamptz") if err != nil { return err } result, err := stmt.QueryContextRaw(context.Background(), []driver.NamedValue{}) if err != nil { return err } firstRow := result.resultData.Peek() if len(result.Columns()) != 1 && result.Columns()[1] != "now" || firstRow == nil { return fmt.Errorf("unable to initialize session; functionality may be unreliable") } // Peek into the results manually. colData := firstRow.Columns() str := string(colData.Chunk()) if len(str) < 23 { return fmt.Errorf("can't get server timezone: %s", str) } v.serverTZOffset = str[len(str)-3:] connectionLogger.Debug("Setting server timezone offset to %s", str[len(str)-3:]) return nil } func (v *connection) defaultMessageHandler(bMsg msgs.BackEndMsg) (bool, error) { handled := true var err error = nil switch msg := bMsg.(type) { case *msgs.BEAuthenticationMsg: switch msg.Response { case common.AuthenticationOK: break case common.AuthenticationCleartextPassword: err = v.authSendPlainTextPassword() case common.AuthenticationMD5Password: err = v.authSendMD5Password(msg.ExtraAuthData) case common.AuthenticationSHA512Password: err = v.authSendSHA512Password(msg.ExtraAuthData) case common.AuthenticationOAuth: err = v.authSendOAuthAccessToken() default: handled = false err = fmt.Errorf("unsupported authentication scheme: %d", msg.Response) } case *msgs.BENoticeMsg: break case *msgs.BEParamStatusMsg: connectionLogger.Debug("%v", msg) default: handled = false err = fmt.Errorf("unhandled message: %v", msg) connectionLogger.Warn("%v", err) } return handled, err } func (v *connection) readAll(buf []byte) error { readIndex := 0 for { bytesRead, err := v.conn.Read(buf[readIndex:]) if err != nil { return err } readIndex += bytesRead if readIndex == len(buf) { return nil } } } func (v *connection) balanceLoad() error { v.sendMessage(&msgs.FELoadBalanceMsg{}) response := v.scratch[:1] var err error if err = v.readAll(response); err != nil { return err } if response[0] == 'N' { // keep existing connection connectionLogger.Debug("<- LoadBalanceResponse: N") connectionLogger.Warn("Load balancing requested but not supported by server") return nil } if response[0] != 'Y' { connectionLogger.Debug("<- LoadBalanceResponse: %c", response[0]) return fmt.Errorf("Load balancing request gave unknown response: %c", response[0]) } header := v.scratch[1:5] if err = v.readAll(header); err != nil { return err } msgSize := int(binary.BigEndian.Uint32(header) - 4) msgBytes := v.scratch[5:] var y []byte if msgSize > 0 { if msgSize <= len(msgBytes) { y = msgBytes[:msgSize] } else { y = make([]byte, msgSize) } if err = v.readAll(y); err != nil { return err } } bem, err := msgs.CreateBackEndMsg(response[0], y) if err != nil { return err } connectionLogger.Debug("<- " + bem.String()) msg := bem.(*msgs.BELoadBalanceMsg) // v.connURL.Hostname() is used by initializeSSL(), so load balancing info should not write into v.connURL loadBalanceAddr := fmt.Sprintf("%s:%d", msg.Host, msg.Port) if v.connHostsList[0] == loadBalanceAddr { // Already connecting to the host return nil } // Push the new host onto the host list before connecting again. // Note that this leaves the originally-specified host as the first failover possibility v.connHostsList = append([]string{loadBalanceAddr}, v.connHostsList...) // Connect to new host v.conn.Close() v.conn, err = v.establishSocketConnection() if err != nil { return fmt.Errorf("cannot redirect to %s (%s)", loadBalanceAddr, err.Error()) } return nil } func (v *connection) initializeSSL(sslFlag string) error { v.sendMessage(&msgs.FESSLMsg{}) buf := v.scratch[:1] err := v.readAll(buf) if err != nil { return err } if buf[0] == 'N' { return fmt.Errorf("SSL/TLS is not enabled on this server") } if buf[0] != 'S' { return fmt.Errorf("SSL/TLS probe gave unknown response: %c", buf[0]) } switch sslFlag { case tlsModeServer: connectionLogger.Info("enabling SSL/TLS server mode") v.conn = tls.Client(v.conn, &tls.Config{InsecureSkipVerify: true}) case tlsModeServerStrict: connectionLogger.Info("enabling SSL/TLS server strict mode") v.conn = tls.Client(v.conn, &tls.Config{ServerName: v.connURL.Hostname()}) default: // Custom mode is used for mutual ssl mode connectionLogger.Info("enabling SSL/TLS custom mode") config, ok := tlsConfigs.get(sslFlag) if !ok { err := fmt.Errorf("tls config %s not registered. See 'Using custom TLS config' in the README.md file", sslFlag) connectionLogger.Error(err.Error()) return err } v.conn = tls.Client(v.conn, config) return nil } return nil } func (v *connection) authSendPlainTextPassword() error { passwd, isSet := v.connURL.User.Password() if !isSet { passwd = "" } msg := &msgs.FEPasswordMsg{PasswordData: passwd} return v.sendMessage(msg) } func (v *connection) authSendMD5Password(extraAuthData []byte) error { passwd, isSet := v.connURL.User.Password() if !isSet { passwd = "" } hash1 := fmt.Sprintf("%x", md5.Sum([]byte(passwd+v.connURL.User.Username()))) hash2 := fmt.Sprintf("md5%x", md5.Sum(append([]byte(hash1), extraAuthData[0:4]...))) msg := &msgs.FEPasswordMsg{PasswordData: hash2} return v.sendMessage(msg) } func (v *connection) authSendSHA512Password(extraAuthData []byte) error { passwd, isSet := v.connURL.User.Password() if !isSet { passwd = "" } hash1 := fmt.Sprintf("%x", sha512.Sum512(append([]byte(passwd), extraAuthData[8:]...))) hash2 := fmt.Sprintf("sha512%x", sha512.Sum512(append([]byte(hash1), extraAuthData[0:4]...))) msg := &msgs.FEPasswordMsg{PasswordData: hash2} return v.sendMessage(msg) } func (v *connection) authSendOAuthAccessToken() error { msg := &msgs.FEPasswordMsg{PasswordData: v.oauthaccesstoken} return v.sendMessage(msg) } func (v *connection) sync() error { err := v.sendMessage(&msgs.FESyncMsg{}) if err != nil { return err } for true { bem, err := v.recvMessage() if err != nil { return err } _, ok := bem.(*msgs.BEReadyForQueryMsg) if ok { break } _, _ = v.defaultMessageHandler(bem) } return nil } func (v *connection) lockSessionMutex() { v.sessMutex.Lock() } func (v *connection) unlockSessionMutex() { v.sessMutex.Unlock() }
random_line_split
gensynet.py
#!/usr/bin/env python3 # # Generate Synthetic Networks # First Version: 8/3/2017 # # An interactive script that generates a JSON file that can be used for # creating imaginary (enterprise) network profiles # # INTERNAL USE ONLY; i.e., user input validation is nearly non-existent. # # Cyber Reboot # [email protected] # import argparse from datetime import datetime as dt import ipaddress import json import math from random import * import string import sys import time import uuid VERBOSE = False NET_SUMMARY = False VERSION = '0.81' DEBUG = False OLDVERSION = False def randstring(size): return ''.join(choice(string.ascii_lowercase + string.digits) for _ in range(size)) def divide(dividend, divisor): quotient = math.floor(dividend/divisor) remainder = dividend % divisor return (quotient, remainder) # you'll want to make sure that prefix is some string that # is prefixed by some number. def generate_ip(prefix, octets=4): ip = prefix if prefix[len(prefix)-1] is not '.': prefix = prefix + '.' subn = 4 - prefix.count('.') if (subn > 0): ip = prefix + '.'.join(str(randint(1,252)) for _ in range(subn)) return ip def generate_uuid(): return str(uuid.uuid4()) def generate_fqdn(domain=None, subdomains=0): if domain is None: domain = randstring(randint(5,10)) + '.local' if subdomains == 0: return domain else: hostname = domain while (subdomains > 0): hostname = randstring(randint(3,5)) + '.' + hostname subdomains -= 1 return hostname def generate_os_type(devicetype): if ( devicetype == 'Business workstation' or devicetype == 'Developer workstation' or devicetype == 'Mail server' or devicetype == 'File server' or devicetype == 'Internal web server' or devicetype == 'Database server' or devicetype == 'Code repository' or devicetype == 'SSH server'): return choice(['Windows', 'Linux', 'Mac OS X', 'BSD']) elif devicetype == 'Smartphone': return choice(['iOS', 'Android', 'Blackberry', 'Unknown']) elif devicetype == 'DNS server': return choice(['Windows', 'Linux', 'Mac OS X', 'BSD', 'Cisco IOS']) elif ( devicetype == 'Printer' or devicetype == 'PBX'): return choice(['Linux', 'Unknown', 'Windows']) elif devicetype == 'DHCP server': return choice(['Linux', 'Unknown', 'Windows', 'BSD', 'Cisco IOS']) elif devicetype == 'Active Directory controller': return choice(['Unknown', 'Windows']) elif devicetype == 'VOIP phone': return choice(['Linux', 'Windows', 'Unknown']) elif devicetype == 'Unknown': return 'Unknown' return os def generate_mac(): mac = ':'.join(str(hex(randint(0,15))) + str(hex(randint(0,15))) for _ in range(6)) return mac.replace('0x', '') def record(records=None): records = [ 'p0f', 'nmap', 'BCF'] return choice(records) def calculate_subnets(total, breakdown): """Returns number of subnets, given the breakdown; or -1 if breakdown doesn't work.""" sanity_percent = 0 # if this isn't 100% by the end, we got issues. subnets = 0 for nodep, netp in breakdown: sanity_percent += nodep if (sanity_percent > 100): return -1 subtotal = int(total * .01 * nodep) groupby = int(254 * .01 *netp) subnets += math.ceil(subtotal/groupby) if (sanity_percent < 100):
return subnets def get_default_dev_distro(nodect, printout=True): """Prints device type breakdowns using default ratios and returns a count of each device.""" if (printout): print("Default Device Role Distribution for {} nodes".format(nodect)) dev_breakdown = { 'Business workstation': int(math.floor(nodect*.35)), 'Developer workstation': int(math.floor(nodect*.15)), 'Smartphone': int(math.floor(nodect*.28)), 'Printer': int(math.floor(nodect*.03)), 'Mail server': int(math.floor(nodect*.01)), 'File server': int(math.floor(nodect*.02)), 'Internal web server': int(math.floor(nodect*.06)), 'Database server': int(math.floor(nodect*.01)), 'Code repository': int(math.floor(nodect*.01)), 'DNS server': int(math.floor(nodect*.01)), 'DHCP server': int(math.floor(nodect*.01)), 'Active Directory controller': int(math.floor(nodect*.01)), 'SSH server': int(math.floor(nodect*.01)), 'VOIP phone': 0, 'PBX': 0, 'Unknown': int(math.floor(nodect*.04)) } # any nodes left over gets put into Unknown total = 0 for key, ct in sorted(dev_breakdown.items()): if (printout and key != 'Unknown'): print(" {:>30} : {}".format(key, ct)) total += ct if (nodect > total): dev_breakdown['Unknown'] += (nodect - total) if (printout): print(" {:>30} : {}".format('Unknown', dev_breakdown['Unknown'])) return dev_breakdown def build_configs(subnets, host_count, dev_div, domain=None): """Returns a json object of subnet specifications, or None upon error""" global VERBOSE jsons = [] # subnet breakdown unlabeled_hosts = [] # number of hosts in the network w/o roles ip_addr = [] # keeping track of the 2nd and 3rd octets in IP roles = dict.fromkeys(dev_div.keys(), 0) if len(subnets)/254 > 254: print("WARNING: You're about to see some really sick IPs. Have fun.") for n in subnets: addy = (randint(0,253), randint(0,253)) while addy in ip_addr: addy = (randint(0,253), randint(0,253)) ip_addr.append(addy) jsons.append({ "start_ip" : '10.{}.{}.2'.format(addy[0],addy[1]), "subnet" : '10.{}.{}.0/24'.format(addy[0], addy[1]), "hosts" : n, "roles" : roles.copy() }) unlabeled_hosts.append(n) if VERBOSE: print("start_ip: {}\t number of hosts: {}\t".format(jsons[-1]['start_ip'], jsons[-1]['hosts'])) # divvy up the roles, now that the subnets are defined labeled_hosts = 0 for dev in dev_div: dev_total = dev_div[dev] labeled_hosts += dev_total while dev_total > 0: while True: n = randrange(0, len(subnets)) if (unlabeled_hosts[n] > 0): jsons[n]['roles'][dev] += 1 unlabeled_hosts[n] -= 1 break dev_total -= 1 if labeled_hosts != host_count: print("WARNING: Labeled hosts ({}) didn't equal host count ({})".format(labeled_hosts, host_count)) return jsons def build_configs_deprecated(total, net_div, dev_div, domain=None): """Returns a json object of subnet specifications, or None upon error""" global VERBOSE total_subnets = calculate_subnets(total, net_div) if total_subnets < 1: if VERBOSE: print("WARNING: Could not break down nodes into the requested subnets.") return None jsons = [] host_counter = [] ncount = 0 roles = dict.fromkeys(dev_div.keys(), 0) class_b,class_c = divide(total_subnets, 254) for n in net_div: if VERBOSE: ncount += 1 print("Starting net_div {} of {}".format(ncount, len(net_div))) nodes = round(total * .01 * n[0]) grouped_nodes = round(252 * .01 * n[1]) q,r = divide(nodes, grouped_nodes) if class_b > 254: print("WARNING: You're about to see some really sick IPs. Have fun.") while q > 0: if class_c == 0: class_b -= 1 class_c = 255 class_c -= 1 start_ip = '10.{}.{}.1'.format(class_b, class_c) netmask = '10.{}.{}.0/24'.format(class_b, class_c) jsons.append({ "start_ip" : start_ip, "subnet" : netmask, "hosts" : grouped_nodes, "roles" : roles.copy() }) host_counter.append(grouped_nodes) if VERBOSE: print("Initialized subnet {} with {} hosts starting at {}".format(len(jsons), grouped_nodes, start_ip)) q -= 1 if r > 0: if class_c == 0: class_b -= 1 class_c = 255 class_c -= 1 start_ip = '10.{}.{}.1'.format(class_b, class_c) netmask = '10.{}.{}.0/24'.format(class_b,class_c) jsons.append({ "start_ip" : start_ip, "subnet" : netmask, "hosts" : r, "roles" : roles.copy() }) host_counter.append(r) if VERBOSE: print("Initialized subnet {} with {} hosts starting at {}".format(len(jsons), r, start_ip)) if len(jsons) != total_subnets: print("BUG: Number of subnets created not equal to predicted {}".format(total_subnets)) if DEBUG: print("DEBUG: host_counter = {}\ttotal subnets = {}".format(host_counter, total_subnets)) total_hosts = 0 for dev in dev_div: ct = dev_div[dev] total_hosts += ct if (DEBUG): print("DEBUG: dev = {}\tcount = {}\ttotal = {}\thost_counter = {}".format(dev, dev_div[dev], total_hosts, host_counter)) while ct > 0: randomnet = randrange(0, total_subnets) if host_counter[randomnet] > 0: jsons[randomnet]['roles'][dev] += 1 host_counter[randomnet] -= 1 ct -= 1 if total_hosts != total: print("BUG: Number of devices in breakdown did not add up to {}".format(total)) return jsons def randomize_subnet_breakdown(count, minimum, maximum): '''Returns an array of host counts (where index = subnet), or None if the input is ridiculous.''' subnets = [] nodes_left = count # I mean, this is tested for very large values of count; I haven't tested very small numbers yet. if count <= 0 or minimum > count or maximum > count or minimum < 0 or maximum < 0 or maximum <= minimum: return None # break count into subnets until count = 0 or < min while (nodes_left > 0): clients = randint(minimum, maximum) subnets.append(clients) nodes_left -= clients if DEBUG: print("DEBUG: subnet count: {}\tnodes left: {}".format(clients, nodes_left)) if minimum < nodes_left < maximum: subnets.append(nodes_left) nodes_left = 0 elif nodes_left < minimum: # i.e., if all the subnets are maxed out but don't add up to the requested count, # then start all over again, cuz there won't be any way to honor min/max requirement. if (len(subnets) * maximum < count): subnets.clear() nodes_left = count else: break # divvy up the rest of the nodes among the existing subnets subnetIDs = [x for x in iter(range(len(subnets)))] while (nodes_left > 0): s = choice(subnetIDs) # pick a randum subnet if DEBUG: print("DEBUG: looping with s={}, count={}, left={}".format(s, subnets[s], nodes_left)) if subnets[s] < maximum: subnets[s] += 1 nodes_left -= 1 else: subnetIDs.remove(s) return subnets def build_network(subnets, fname=None, randomspace=False, prettyprint=True): global VERBOSE outobj = [] subnets_togo = len(subnets) for n in subnets: start_ip = ipaddress.ip_address(n['start_ip']) role_ct = dict(n['roles']) hosts_togo = n['hosts'] ip_taken = [] subnets_togo -= 1 while (hosts_togo > 0): host = { 'uid':generate_uuid(), 'mac':generate_mac(), 'rDNS_host':randstring(randrange(4,9)), 'subnet':n['subnet'] } if 'domain' in n: host['rDNS_domain'] = n['domain'] host['record'] = { 'source':record(), 'timestamp': str(dt.now()) } while True: a_role = choice(list(role_ct.keys())) if role_ct[a_role] > 0: role_ct[a_role] -= 1 host['role'] = { 'role': a_role, 'confidence': randrange(55,100) } break else: del(role_ct[a_role]) host['os'] = { 'os': generate_os_type(host['role']['role']) } if host['os']['os'] != 'Unknown': host['os']['confidence'] = randrange(55,100) if (randomspace): while True: ip = start_ip + randrange(0, 254) if ip not in ip_taken: host['IP'] = str(ip) ip_taken.append(ip) break else: ip = start_ip + hosts_togo host['IP'] = str(ip) outobj.append(host) hosts_togo -= 1 indent = 2 if prettyprint else None if fname: with open(fname, 'w') as ofile: ofile.write("{}".format(json.dumps(outobj, indent=indent))) else: return json.dumps(outobj, indent=indent) def main(): global VERBOSE, VERSION, NET_SUMMARY, OLDVERSION parser = argparse.ArgumentParser() parser.add_argument('-v', '--verbose', help='Provide program feedback', action="store_true") parser.add_argument('-s', '--summarize', help='Prints network configurations to output', action="store_true") parser.add_argument('-d', '--deprecate', help='Use the deprecated version for building subnets', action='store_true') parser.add_argument('--version', help='Prints version', action="store_true") args = parser.parse_args() if args.version: print("{} v{}".format(sys.argv[0], VERSION)) sys.exit() if args.verbose: VERBOSE = True if args.summarize: NET_SUMMARY = True if args.deprecate: OLDVERSION = True outname = '{}.json'.format(time.strftime("%Y%m%d-%H%M%S")) print('\n\n\tSYNTHETIC NETWORK NODE GENERATOR\n') while True: nodect = int(input("How many network nodes? [500]: ") or "500") if nodect > 4000000: print("That ({}) is just exorbitant. Next time try less than {}.".format(nodect, 4000000)) sys.exit() # setting subnet breakdown ---------------- if OLDVERSION: if (nodect > 50): print('Default Node distribution of {} nodes across Class C subnets: '.format(nodect)) print(' 30% of the nodes will occupy subnets that are 70% populated') print(' 45% of the nodes will occupy subnets that are 20% populated') print(' 25% of the nodes will occupy subnets that are 90% populated') net_breakdown = [(30,70), (45,20), (25,90)] print('Total subnets: {}'.format(calculate_subnets(nodect, net_breakdown))) set_net = input("Manually set network node distribution? [No]: ") or "No" else: set_net = "No" net_breakdown = [(100, 100)] print('Total subnets: 1') if (set_net.lower() != 'no' and set_net.lower() != 'n'): net_breakdown = [] percent = 100 print("Please enter what percentage of the {} nodes would consume what percentage".format(nodect)) print("of the Class C address space...") while percent > 0: nodes = int(input(" Percent of nodes (MAX={}): ".format(percent)) or "100") density = int(input(" Percent of class C space occupied: ") or "100") if (nodes <= 100 and nodes > 1): percent = percent - nodes else: print("Illegal node percentage value ({})".format(nodes)) continue if (density > 100 or density < 1): print("Illegal density percentage value ({})".format(density)) continue net_breakdown.append((nodes, density)) subnets = calculate_subnets(nodect, net_breakdown) print('Total subnets: {}'.format(subnets)) else: MAX_max = MAX_min = -1 while True: subnets = [] if nodect <= 252: subnets.append(nodect) else: if MAX_max == -1: MAX_max = 150 while True: maximum = int(input('Max hosts in subnet (UP TO 252) [{}]: '.format(MAX_max)) or MAX_max) if (maximum < 3 or maximum > 252): print("Illegal 'maximum' value.") else: break if MAX_min == -1 or maximum != MAX_max: MAX_min = 254-maximum while True: minimum = int(input('Min hosts in subnet (UP TO {}) [{}]: '.format(MAX_min, MAX_min)) or MAX_min) if (minimum < 2 or minimum > MAX_min): print("Illegal 'minimum' value.") else: break MAX_min = minimum MAX_max = maximum subnets = randomize_subnet_breakdown(nodect, minimum, maximum) for i, _ in enumerate(subnets): print('\tSubnet #{} has {} hosts.'.format(i, subnets[i])) if (nodect > 252): subnets_finished = input("Is this breakout of subnets OK? [Yes]: ") or "Yes" if subnets_finished.lower() == 'yes' or subnets_finished.lower() == 'y': break else: break # setting device breakdown ---------------- dev_breakdown = get_default_dev_distro(nodect) dev_distr = input("Manually reset the above Device Role Distribution? [No]: ") or "No" if (dev_distr.lower() != 'no' and dev_distr.lower() != 'n'): remainder = nodect for category in sorted(dev_breakdown.keys()): if (remainder == 0): dev_breakdown[category] = 0 continue category_count = dev_breakdown[category] while (remainder > 0): if (remainder < category_count): category_count = remainder category_count = int(input(" {} (MAX={}) [{}]: ".format(category, remainder, category_count)) or category_count) remainder -= category_count if (remainder < 0 or category_count < 0): print("Illegal value '{}'".format(category_count)) remainder += category_count else: dev_breakdown[category] = category_count break; if (remainder > 0): dev_breakdown['Unknown'] += remainder domain = input("Domain name to use (press ENTER to auto-generate): ") or generate_fqdn() randomize = input("Randomize IP addresses in subnet? [Yes]: ") or "Yes" cont = input("Ready to generate json (No to start over)? [Yes]: ") or "Yes" if cont.lower() == 'yes' or cont.lower() == 'y': break if OLDVERSION: net_configs = build_configs_deprecated(nodect, net_breakdown, dev_breakdown, domain) else: net_configs = build_configs(subnets, nodect, dev_breakdown, domain) if NET_SUMMARY or VERBOSE: print("\nBased on the following config:\n") print(json.dumps(net_configs, indent=4)) print("\nSaved network profile to {}".format(outname)) else: print("\n Saved network profile to {}".format(outname)) if randomize.lower() == 'yes' or randomize.lower() == 'y': build_network(net_configs, outname, randomspace=True) else: build_network(net_configs, outname) if __name__ == "__main__": main()
return -1
conditional_block
gensynet.py
#!/usr/bin/env python3 # # Generate Synthetic Networks # First Version: 8/3/2017 # # An interactive script that generates a JSON file that can be used for # creating imaginary (enterprise) network profiles # # INTERNAL USE ONLY; i.e., user input validation is nearly non-existent. # # Cyber Reboot # [email protected] # import argparse from datetime import datetime as dt import ipaddress import json import math from random import * import string import sys import time import uuid VERBOSE = False NET_SUMMARY = False VERSION = '0.81' DEBUG = False OLDVERSION = False def randstring(size): return ''.join(choice(string.ascii_lowercase + string.digits) for _ in range(size)) def divide(dividend, divisor): quotient = math.floor(dividend/divisor) remainder = dividend % divisor return (quotient, remainder) # you'll want to make sure that prefix is some string that # is prefixed by some number. def generate_ip(prefix, octets=4): ip = prefix if prefix[len(prefix)-1] is not '.': prefix = prefix + '.' subn = 4 - prefix.count('.') if (subn > 0): ip = prefix + '.'.join(str(randint(1,252)) for _ in range(subn)) return ip def generate_uuid(): return str(uuid.uuid4()) def generate_fqdn(domain=None, subdomains=0): if domain is None: domain = randstring(randint(5,10)) + '.local' if subdomains == 0: return domain else: hostname = domain while (subdomains > 0): hostname = randstring(randint(3,5)) + '.' + hostname subdomains -= 1 return hostname def generate_os_type(devicetype): if ( devicetype == 'Business workstation' or devicetype == 'Developer workstation' or devicetype == 'Mail server' or devicetype == 'File server' or devicetype == 'Internal web server' or devicetype == 'Database server' or devicetype == 'Code repository' or devicetype == 'SSH server'): return choice(['Windows', 'Linux', 'Mac OS X', 'BSD']) elif devicetype == 'Smartphone': return choice(['iOS', 'Android', 'Blackberry', 'Unknown']) elif devicetype == 'DNS server': return choice(['Windows', 'Linux', 'Mac OS X', 'BSD', 'Cisco IOS']) elif ( devicetype == 'Printer' or devicetype == 'PBX'): return choice(['Linux', 'Unknown', 'Windows']) elif devicetype == 'DHCP server': return choice(['Linux', 'Unknown', 'Windows', 'BSD', 'Cisco IOS']) elif devicetype == 'Active Directory controller': return choice(['Unknown', 'Windows']) elif devicetype == 'VOIP phone': return choice(['Linux', 'Windows', 'Unknown']) elif devicetype == 'Unknown': return 'Unknown' return os def generate_mac(): mac = ':'.join(str(hex(randint(0,15))) + str(hex(randint(0,15))) for _ in range(6)) return mac.replace('0x', '') def record(records=None): records = [ 'p0f', 'nmap', 'BCF'] return choice(records) def calculate_subnets(total, breakdown): """Returns number of subnets, given the breakdown; or -1 if breakdown doesn't work.""" sanity_percent = 0 # if this isn't 100% by the end, we got issues. subnets = 0 for nodep, netp in breakdown: sanity_percent += nodep if (sanity_percent > 100): return -1 subtotal = int(total * .01 * nodep) groupby = int(254 * .01 *netp) subnets += math.ceil(subtotal/groupby) if (sanity_percent < 100): return -1 return subnets def get_default_dev_distro(nodect, printout=True): """Prints device type breakdowns using default ratios and returns a count of each device.""" if (printout): print("Default Device Role Distribution for {} nodes".format(nodect)) dev_breakdown = { 'Business workstation': int(math.floor(nodect*.35)), 'Developer workstation': int(math.floor(nodect*.15)), 'Smartphone': int(math.floor(nodect*.28)), 'Printer': int(math.floor(nodect*.03)), 'Mail server': int(math.floor(nodect*.01)), 'File server': int(math.floor(nodect*.02)), 'Internal web server': int(math.floor(nodect*.06)), 'Database server': int(math.floor(nodect*.01)), 'Code repository': int(math.floor(nodect*.01)), 'DNS server': int(math.floor(nodect*.01)), 'DHCP server': int(math.floor(nodect*.01)), 'Active Directory controller': int(math.floor(nodect*.01)), 'SSH server': int(math.floor(nodect*.01)), 'VOIP phone': 0, 'PBX': 0, 'Unknown': int(math.floor(nodect*.04)) } # any nodes left over gets put into Unknown total = 0 for key, ct in sorted(dev_breakdown.items()): if (printout and key != 'Unknown'): print(" {:>30} : {}".format(key, ct)) total += ct if (nodect > total): dev_breakdown['Unknown'] += (nodect - total) if (printout): print(" {:>30} : {}".format('Unknown', dev_breakdown['Unknown'])) return dev_breakdown def build_configs(subnets, host_count, dev_div, domain=None): """Returns a json object of subnet specifications, or None upon error""" global VERBOSE jsons = [] # subnet breakdown unlabeled_hosts = [] # number of hosts in the network w/o roles ip_addr = [] # keeping track of the 2nd and 3rd octets in IP roles = dict.fromkeys(dev_div.keys(), 0) if len(subnets)/254 > 254: print("WARNING: You're about to see some really sick IPs. Have fun.") for n in subnets: addy = (randint(0,253), randint(0,253)) while addy in ip_addr: addy = (randint(0,253), randint(0,253)) ip_addr.append(addy) jsons.append({ "start_ip" : '10.{}.{}.2'.format(addy[0],addy[1]), "subnet" : '10.{}.{}.0/24'.format(addy[0], addy[1]), "hosts" : n, "roles" : roles.copy() }) unlabeled_hosts.append(n) if VERBOSE: print("start_ip: {}\t number of hosts: {}\t".format(jsons[-1]['start_ip'], jsons[-1]['hosts'])) # divvy up the roles, now that the subnets are defined labeled_hosts = 0 for dev in dev_div: dev_total = dev_div[dev] labeled_hosts += dev_total while dev_total > 0: while True: n = randrange(0, len(subnets)) if (unlabeled_hosts[n] > 0): jsons[n]['roles'][dev] += 1 unlabeled_hosts[n] -= 1 break dev_total -= 1 if labeled_hosts != host_count: print("WARNING: Labeled hosts ({}) didn't equal host count ({})".format(labeled_hosts, host_count)) return jsons def build_configs_deprecated(total, net_div, dev_div, domain=None): """Returns a json object of subnet specifications, or None upon error""" global VERBOSE total_subnets = calculate_subnets(total, net_div) if total_subnets < 1: if VERBOSE: print("WARNING: Could not break down nodes into the requested subnets.") return None jsons = [] host_counter = [] ncount = 0 roles = dict.fromkeys(dev_div.keys(), 0) class_b,class_c = divide(total_subnets, 254) for n in net_div: if VERBOSE: ncount += 1 print("Starting net_div {} of {}".format(ncount, len(net_div))) nodes = round(total * .01 * n[0]) grouped_nodes = round(252 * .01 * n[1]) q,r = divide(nodes, grouped_nodes) if class_b > 254: print("WARNING: You're about to see some really sick IPs. Have fun.") while q > 0: if class_c == 0: class_b -= 1 class_c = 255 class_c -= 1 start_ip = '10.{}.{}.1'.format(class_b, class_c) netmask = '10.{}.{}.0/24'.format(class_b, class_c) jsons.append({ "start_ip" : start_ip, "subnet" : netmask, "hosts" : grouped_nodes, "roles" : roles.copy() }) host_counter.append(grouped_nodes) if VERBOSE: print("Initialized subnet {} with {} hosts starting at {}".format(len(jsons), grouped_nodes, start_ip)) q -= 1 if r > 0: if class_c == 0: class_b -= 1 class_c = 255 class_c -= 1 start_ip = '10.{}.{}.1'.format(class_b, class_c) netmask = '10.{}.{}.0/24'.format(class_b,class_c) jsons.append({ "start_ip" : start_ip, "subnet" : netmask, "hosts" : r, "roles" : roles.copy() }) host_counter.append(r) if VERBOSE: print("Initialized subnet {} with {} hosts starting at {}".format(len(jsons), r, start_ip)) if len(jsons) != total_subnets: print("BUG: Number of subnets created not equal to predicted {}".format(total_subnets)) if DEBUG: print("DEBUG: host_counter = {}\ttotal subnets = {}".format(host_counter, total_subnets)) total_hosts = 0 for dev in dev_div: ct = dev_div[dev] total_hosts += ct if (DEBUG): print("DEBUG: dev = {}\tcount = {}\ttotal = {}\thost_counter = {}".format(dev, dev_div[dev], total_hosts, host_counter)) while ct > 0: randomnet = randrange(0, total_subnets) if host_counter[randomnet] > 0: jsons[randomnet]['roles'][dev] += 1 host_counter[randomnet] -= 1 ct -= 1 if total_hosts != total: print("BUG: Number of devices in breakdown did not add up to {}".format(total)) return jsons def
(count, minimum, maximum): '''Returns an array of host counts (where index = subnet), or None if the input is ridiculous.''' subnets = [] nodes_left = count # I mean, this is tested for very large values of count; I haven't tested very small numbers yet. if count <= 0 or minimum > count or maximum > count or minimum < 0 or maximum < 0 or maximum <= minimum: return None # break count into subnets until count = 0 or < min while (nodes_left > 0): clients = randint(minimum, maximum) subnets.append(clients) nodes_left -= clients if DEBUG: print("DEBUG: subnet count: {}\tnodes left: {}".format(clients, nodes_left)) if minimum < nodes_left < maximum: subnets.append(nodes_left) nodes_left = 0 elif nodes_left < minimum: # i.e., if all the subnets are maxed out but don't add up to the requested count, # then start all over again, cuz there won't be any way to honor min/max requirement. if (len(subnets) * maximum < count): subnets.clear() nodes_left = count else: break # divvy up the rest of the nodes among the existing subnets subnetIDs = [x for x in iter(range(len(subnets)))] while (nodes_left > 0): s = choice(subnetIDs) # pick a randum subnet if DEBUG: print("DEBUG: looping with s={}, count={}, left={}".format(s, subnets[s], nodes_left)) if subnets[s] < maximum: subnets[s] += 1 nodes_left -= 1 else: subnetIDs.remove(s) return subnets def build_network(subnets, fname=None, randomspace=False, prettyprint=True): global VERBOSE outobj = [] subnets_togo = len(subnets) for n in subnets: start_ip = ipaddress.ip_address(n['start_ip']) role_ct = dict(n['roles']) hosts_togo = n['hosts'] ip_taken = [] subnets_togo -= 1 while (hosts_togo > 0): host = { 'uid':generate_uuid(), 'mac':generate_mac(), 'rDNS_host':randstring(randrange(4,9)), 'subnet':n['subnet'] } if 'domain' in n: host['rDNS_domain'] = n['domain'] host['record'] = { 'source':record(), 'timestamp': str(dt.now()) } while True: a_role = choice(list(role_ct.keys())) if role_ct[a_role] > 0: role_ct[a_role] -= 1 host['role'] = { 'role': a_role, 'confidence': randrange(55,100) } break else: del(role_ct[a_role]) host['os'] = { 'os': generate_os_type(host['role']['role']) } if host['os']['os'] != 'Unknown': host['os']['confidence'] = randrange(55,100) if (randomspace): while True: ip = start_ip + randrange(0, 254) if ip not in ip_taken: host['IP'] = str(ip) ip_taken.append(ip) break else: ip = start_ip + hosts_togo host['IP'] = str(ip) outobj.append(host) hosts_togo -= 1 indent = 2 if prettyprint else None if fname: with open(fname, 'w') as ofile: ofile.write("{}".format(json.dumps(outobj, indent=indent))) else: return json.dumps(outobj, indent=indent) def main(): global VERBOSE, VERSION, NET_SUMMARY, OLDVERSION parser = argparse.ArgumentParser() parser.add_argument('-v', '--verbose', help='Provide program feedback', action="store_true") parser.add_argument('-s', '--summarize', help='Prints network configurations to output', action="store_true") parser.add_argument('-d', '--deprecate', help='Use the deprecated version for building subnets', action='store_true') parser.add_argument('--version', help='Prints version', action="store_true") args = parser.parse_args() if args.version: print("{} v{}".format(sys.argv[0], VERSION)) sys.exit() if args.verbose: VERBOSE = True if args.summarize: NET_SUMMARY = True if args.deprecate: OLDVERSION = True outname = '{}.json'.format(time.strftime("%Y%m%d-%H%M%S")) print('\n\n\tSYNTHETIC NETWORK NODE GENERATOR\n') while True: nodect = int(input("How many network nodes? [500]: ") or "500") if nodect > 4000000: print("That ({}) is just exorbitant. Next time try less than {}.".format(nodect, 4000000)) sys.exit() # setting subnet breakdown ---------------- if OLDVERSION: if (nodect > 50): print('Default Node distribution of {} nodes across Class C subnets: '.format(nodect)) print(' 30% of the nodes will occupy subnets that are 70% populated') print(' 45% of the nodes will occupy subnets that are 20% populated') print(' 25% of the nodes will occupy subnets that are 90% populated') net_breakdown = [(30,70), (45,20), (25,90)] print('Total subnets: {}'.format(calculate_subnets(nodect, net_breakdown))) set_net = input("Manually set network node distribution? [No]: ") or "No" else: set_net = "No" net_breakdown = [(100, 100)] print('Total subnets: 1') if (set_net.lower() != 'no' and set_net.lower() != 'n'): net_breakdown = [] percent = 100 print("Please enter what percentage of the {} nodes would consume what percentage".format(nodect)) print("of the Class C address space...") while percent > 0: nodes = int(input(" Percent of nodes (MAX={}): ".format(percent)) or "100") density = int(input(" Percent of class C space occupied: ") or "100") if (nodes <= 100 and nodes > 1): percent = percent - nodes else: print("Illegal node percentage value ({})".format(nodes)) continue if (density > 100 or density < 1): print("Illegal density percentage value ({})".format(density)) continue net_breakdown.append((nodes, density)) subnets = calculate_subnets(nodect, net_breakdown) print('Total subnets: {}'.format(subnets)) else: MAX_max = MAX_min = -1 while True: subnets = [] if nodect <= 252: subnets.append(nodect) else: if MAX_max == -1: MAX_max = 150 while True: maximum = int(input('Max hosts in subnet (UP TO 252) [{}]: '.format(MAX_max)) or MAX_max) if (maximum < 3 or maximum > 252): print("Illegal 'maximum' value.") else: break if MAX_min == -1 or maximum != MAX_max: MAX_min = 254-maximum while True: minimum = int(input('Min hosts in subnet (UP TO {}) [{}]: '.format(MAX_min, MAX_min)) or MAX_min) if (minimum < 2 or minimum > MAX_min): print("Illegal 'minimum' value.") else: break MAX_min = minimum MAX_max = maximum subnets = randomize_subnet_breakdown(nodect, minimum, maximum) for i, _ in enumerate(subnets): print('\tSubnet #{} has {} hosts.'.format(i, subnets[i])) if (nodect > 252): subnets_finished = input("Is this breakout of subnets OK? [Yes]: ") or "Yes" if subnets_finished.lower() == 'yes' or subnets_finished.lower() == 'y': break else: break # setting device breakdown ---------------- dev_breakdown = get_default_dev_distro(nodect) dev_distr = input("Manually reset the above Device Role Distribution? [No]: ") or "No" if (dev_distr.lower() != 'no' and dev_distr.lower() != 'n'): remainder = nodect for category in sorted(dev_breakdown.keys()): if (remainder == 0): dev_breakdown[category] = 0 continue category_count = dev_breakdown[category] while (remainder > 0): if (remainder < category_count): category_count = remainder category_count = int(input(" {} (MAX={}) [{}]: ".format(category, remainder, category_count)) or category_count) remainder -= category_count if (remainder < 0 or category_count < 0): print("Illegal value '{}'".format(category_count)) remainder += category_count else: dev_breakdown[category] = category_count break; if (remainder > 0): dev_breakdown['Unknown'] += remainder domain = input("Domain name to use (press ENTER to auto-generate): ") or generate_fqdn() randomize = input("Randomize IP addresses in subnet? [Yes]: ") or "Yes" cont = input("Ready to generate json (No to start over)? [Yes]: ") or "Yes" if cont.lower() == 'yes' or cont.lower() == 'y': break if OLDVERSION: net_configs = build_configs_deprecated(nodect, net_breakdown, dev_breakdown, domain) else: net_configs = build_configs(subnets, nodect, dev_breakdown, domain) if NET_SUMMARY or VERBOSE: print("\nBased on the following config:\n") print(json.dumps(net_configs, indent=4)) print("\nSaved network profile to {}".format(outname)) else: print("\n Saved network profile to {}".format(outname)) if randomize.lower() == 'yes' or randomize.lower() == 'y': build_network(net_configs, outname, randomspace=True) else: build_network(net_configs, outname) if __name__ == "__main__": main()
randomize_subnet_breakdown
identifier_name
gensynet.py
#!/usr/bin/env python3 # # Generate Synthetic Networks # First Version: 8/3/2017 # # An interactive script that generates a JSON file that can be used for # creating imaginary (enterprise) network profiles # # INTERNAL USE ONLY; i.e., user input validation is nearly non-existent. # # Cyber Reboot # [email protected] # import argparse from datetime import datetime as dt import ipaddress import json import math from random import * import string import sys import time import uuid VERBOSE = False NET_SUMMARY = False VERSION = '0.81' DEBUG = False OLDVERSION = False def randstring(size): return ''.join(choice(string.ascii_lowercase + string.digits) for _ in range(size)) def divide(dividend, divisor): quotient = math.floor(dividend/divisor) remainder = dividend % divisor return (quotient, remainder) # you'll want to make sure that prefix is some string that # is prefixed by some number. def generate_ip(prefix, octets=4): ip = prefix if prefix[len(prefix)-1] is not '.': prefix = prefix + '.' subn = 4 - prefix.count('.') if (subn > 0): ip = prefix + '.'.join(str(randint(1,252)) for _ in range(subn)) return ip def generate_uuid(): return str(uuid.uuid4()) def generate_fqdn(domain=None, subdomains=0): if domain is None: domain = randstring(randint(5,10)) + '.local' if subdomains == 0: return domain else: hostname = domain while (subdomains > 0): hostname = randstring(randint(3,5)) + '.' + hostname subdomains -= 1 return hostname def generate_os_type(devicetype): if ( devicetype == 'Business workstation' or devicetype == 'Developer workstation' or devicetype == 'Mail server' or devicetype == 'File server' or devicetype == 'Internal web server' or devicetype == 'Database server' or devicetype == 'Code repository' or devicetype == 'SSH server'): return choice(['Windows', 'Linux', 'Mac OS X', 'BSD']) elif devicetype == 'Smartphone': return choice(['iOS', 'Android', 'Blackberry', 'Unknown']) elif devicetype == 'DNS server': return choice(['Windows', 'Linux', 'Mac OS X', 'BSD', 'Cisco IOS']) elif ( devicetype == 'Printer' or devicetype == 'PBX'): return choice(['Linux', 'Unknown', 'Windows']) elif devicetype == 'DHCP server': return choice(['Linux', 'Unknown', 'Windows', 'BSD', 'Cisco IOS']) elif devicetype == 'Active Directory controller': return choice(['Unknown', 'Windows']) elif devicetype == 'VOIP phone': return choice(['Linux', 'Windows', 'Unknown']) elif devicetype == 'Unknown': return 'Unknown' return os def generate_mac(): mac = ':'.join(str(hex(randint(0,15))) + str(hex(randint(0,15))) for _ in range(6)) return mac.replace('0x', '') def record(records=None): records = [ 'p0f', 'nmap', 'BCF'] return choice(records) def calculate_subnets(total, breakdown): """Returns number of subnets, given the breakdown; or -1 if breakdown doesn't work.""" sanity_percent = 0 # if this isn't 100% by the end, we got issues. subnets = 0 for nodep, netp in breakdown: sanity_percent += nodep if (sanity_percent > 100): return -1 subtotal = int(total * .01 * nodep) groupby = int(254 * .01 *netp) subnets += math.ceil(subtotal/groupby) if (sanity_percent < 100): return -1 return subnets def get_default_dev_distro(nodect, printout=True): """Prints device type breakdowns using default ratios and returns a count of each device.""" if (printout): print("Default Device Role Distribution for {} nodes".format(nodect)) dev_breakdown = { 'Business workstation': int(math.floor(nodect*.35)), 'Developer workstation': int(math.floor(nodect*.15)), 'Smartphone': int(math.floor(nodect*.28)), 'Printer': int(math.floor(nodect*.03)), 'Mail server': int(math.floor(nodect*.01)), 'File server': int(math.floor(nodect*.02)), 'Internal web server': int(math.floor(nodect*.06)), 'Database server': int(math.floor(nodect*.01)), 'Code repository': int(math.floor(nodect*.01)), 'DNS server': int(math.floor(nodect*.01)), 'DHCP server': int(math.floor(nodect*.01)), 'Active Directory controller': int(math.floor(nodect*.01)), 'SSH server': int(math.floor(nodect*.01)), 'VOIP phone': 0, 'PBX': 0, 'Unknown': int(math.floor(nodect*.04)) } # any nodes left over gets put into Unknown total = 0 for key, ct in sorted(dev_breakdown.items()): if (printout and key != 'Unknown'): print(" {:>30} : {}".format(key, ct)) total += ct if (nodect > total): dev_breakdown['Unknown'] += (nodect - total) if (printout): print(" {:>30} : {}".format('Unknown', dev_breakdown['Unknown'])) return dev_breakdown def build_configs(subnets, host_count, dev_div, domain=None): """Returns a json object of subnet specifications, or None upon error""" global VERBOSE jsons = [] # subnet breakdown unlabeled_hosts = [] # number of hosts in the network w/o roles ip_addr = [] # keeping track of the 2nd and 3rd octets in IP roles = dict.fromkeys(dev_div.keys(), 0) if len(subnets)/254 > 254: print("WARNING: You're about to see some really sick IPs. Have fun.") for n in subnets: addy = (randint(0,253), randint(0,253)) while addy in ip_addr: addy = (randint(0,253), randint(0,253)) ip_addr.append(addy) jsons.append({ "start_ip" : '10.{}.{}.2'.format(addy[0],addy[1]), "subnet" : '10.{}.{}.0/24'.format(addy[0], addy[1]), "hosts" : n, "roles" : roles.copy() }) unlabeled_hosts.append(n) if VERBOSE: print("start_ip: {}\t number of hosts: {}\t".format(jsons[-1]['start_ip'], jsons[-1]['hosts'])) # divvy up the roles, now that the subnets are defined labeled_hosts = 0 for dev in dev_div: dev_total = dev_div[dev] labeled_hosts += dev_total while dev_total > 0: while True: n = randrange(0, len(subnets)) if (unlabeled_hosts[n] > 0): jsons[n]['roles'][dev] += 1 unlabeled_hosts[n] -= 1 break dev_total -= 1 if labeled_hosts != host_count: print("WARNING: Labeled hosts ({}) didn't equal host count ({})".format(labeled_hosts, host_count)) return jsons def build_configs_deprecated(total, net_div, dev_div, domain=None): """Returns a json object of subnet specifications, or None upon error""" global VERBOSE total_subnets = calculate_subnets(total, net_div) if total_subnets < 1: if VERBOSE: print("WARNING: Could not break down nodes into the requested subnets.") return None jsons = [] host_counter = [] ncount = 0 roles = dict.fromkeys(dev_div.keys(), 0) class_b,class_c = divide(total_subnets, 254) for n in net_div: if VERBOSE: ncount += 1 print("Starting net_div {} of {}".format(ncount, len(net_div))) nodes = round(total * .01 * n[0]) grouped_nodes = round(252 * .01 * n[1]) q,r = divide(nodes, grouped_nodes) if class_b > 254: print("WARNING: You're about to see some really sick IPs. Have fun.") while q > 0: if class_c == 0: class_b -= 1 class_c = 255 class_c -= 1 start_ip = '10.{}.{}.1'.format(class_b, class_c) netmask = '10.{}.{}.0/24'.format(class_b, class_c) jsons.append({ "start_ip" : start_ip, "subnet" : netmask, "hosts" : grouped_nodes, "roles" : roles.copy() }) host_counter.append(grouped_nodes) if VERBOSE: print("Initialized subnet {} with {} hosts starting at {}".format(len(jsons), grouped_nodes, start_ip)) q -= 1 if r > 0: if class_c == 0: class_b -= 1 class_c = 255 class_c -= 1 start_ip = '10.{}.{}.1'.format(class_b, class_c) netmask = '10.{}.{}.0/24'.format(class_b,class_c) jsons.append({ "start_ip" : start_ip, "subnet" : netmask, "hosts" : r, "roles" : roles.copy() }) host_counter.append(r) if VERBOSE: print("Initialized subnet {} with {} hosts starting at {}".format(len(jsons), r, start_ip)) if len(jsons) != total_subnets: print("BUG: Number of subnets created not equal to predicted {}".format(total_subnets)) if DEBUG: print("DEBUG: host_counter = {}\ttotal subnets = {}".format(host_counter, total_subnets)) total_hosts = 0 for dev in dev_div: ct = dev_div[dev] total_hosts += ct if (DEBUG): print("DEBUG: dev = {}\tcount = {}\ttotal = {}\thost_counter = {}".format(dev, dev_div[dev], total_hosts, host_counter)) while ct > 0: randomnet = randrange(0, total_subnets) if host_counter[randomnet] > 0: jsons[randomnet]['roles'][dev] += 1 host_counter[randomnet] -= 1 ct -= 1 if total_hosts != total: print("BUG: Number of devices in breakdown did not add up to {}".format(total)) return jsons def randomize_subnet_breakdown(count, minimum, maximum): '''Returns an array of host counts (where index = subnet), or None if the input is ridiculous.''' subnets = [] nodes_left = count # I mean, this is tested for very large values of count; I haven't tested very small numbers yet. if count <= 0 or minimum > count or maximum > count or minimum < 0 or maximum < 0 or maximum <= minimum: return None # break count into subnets until count = 0 or < min while (nodes_left > 0): clients = randint(minimum, maximum) subnets.append(clients) nodes_left -= clients if DEBUG: print("DEBUG: subnet count: {}\tnodes left: {}".format(clients, nodes_left)) if minimum < nodes_left < maximum: subnets.append(nodes_left) nodes_left = 0 elif nodes_left < minimum: # i.e., if all the subnets are maxed out but don't add up to the requested count, # then start all over again, cuz there won't be any way to honor min/max requirement. if (len(subnets) * maximum < count): subnets.clear() nodes_left = count else: break # divvy up the rest of the nodes among the existing subnets subnetIDs = [x for x in iter(range(len(subnets)))] while (nodes_left > 0): s = choice(subnetIDs) # pick a randum subnet if DEBUG: print("DEBUG: looping with s={}, count={}, left={}".format(s, subnets[s], nodes_left)) if subnets[s] < maximum: subnets[s] += 1 nodes_left -= 1 else: subnetIDs.remove(s) return subnets def build_network(subnets, fname=None, randomspace=False, prettyprint=True): global VERBOSE outobj = [] subnets_togo = len(subnets) for n in subnets: start_ip = ipaddress.ip_address(n['start_ip']) role_ct = dict(n['roles']) hosts_togo = n['hosts'] ip_taken = [] subnets_togo -= 1 while (hosts_togo > 0): host = { 'uid':generate_uuid(), 'mac':generate_mac(), 'rDNS_host':randstring(randrange(4,9)), 'subnet':n['subnet'] } if 'domain' in n: host['rDNS_domain'] = n['domain'] host['record'] = { 'source':record(), 'timestamp': str(dt.now()) } while True: a_role = choice(list(role_ct.keys())) if role_ct[a_role] > 0: role_ct[a_role] -= 1 host['role'] = { 'role': a_role, 'confidence': randrange(55,100) } break else: del(role_ct[a_role]) host['os'] = { 'os': generate_os_type(host['role']['role']) } if host['os']['os'] != 'Unknown': host['os']['confidence'] = randrange(55,100) if (randomspace): while True: ip = start_ip + randrange(0, 254) if ip not in ip_taken: host['IP'] = str(ip) ip_taken.append(ip) break else: ip = start_ip + hosts_togo host['IP'] = str(ip) outobj.append(host) hosts_togo -= 1 indent = 2 if prettyprint else None if fname: with open(fname, 'w') as ofile: ofile.write("{}".format(json.dumps(outobj, indent=indent))) else: return json.dumps(outobj, indent=indent) def main(): global VERBOSE, VERSION, NET_SUMMARY, OLDVERSION parser = argparse.ArgumentParser() parser.add_argument('-v', '--verbose', help='Provide program feedback', action="store_true") parser.add_argument('-s', '--summarize', help='Prints network configurations to output', action="store_true") parser.add_argument('-d', '--deprecate', help='Use the deprecated version for building subnets', action='store_true') parser.add_argument('--version', help='Prints version', action="store_true") args = parser.parse_args() if args.version: print("{} v{}".format(sys.argv[0], VERSION)) sys.exit() if args.verbose: VERBOSE = True if args.summarize: NET_SUMMARY = True if args.deprecate: OLDVERSION = True outname = '{}.json'.format(time.strftime("%Y%m%d-%H%M%S")) print('\n\n\tSYNTHETIC NETWORK NODE GENERATOR\n') while True: nodect = int(input("How many network nodes? [500]: ") or "500") if nodect > 4000000: print("That ({}) is just exorbitant. Next time try less than {}.".format(nodect, 4000000)) sys.exit() # setting subnet breakdown ---------------- if OLDVERSION: if (nodect > 50): print('Default Node distribution of {} nodes across Class C subnets: '.format(nodect)) print(' 30% of the nodes will occupy subnets that are 70% populated') print(' 45% of the nodes will occupy subnets that are 20% populated') print(' 25% of the nodes will occupy subnets that are 90% populated') net_breakdown = [(30,70), (45,20), (25,90)] print('Total subnets: {}'.format(calculate_subnets(nodect, net_breakdown))) set_net = input("Manually set network node distribution? [No]: ") or "No" else: set_net = "No" net_breakdown = [(100, 100)] print('Total subnets: 1') if (set_net.lower() != 'no' and set_net.lower() != 'n'): net_breakdown = [] percent = 100 print("Please enter what percentage of the {} nodes would consume what percentage".format(nodect)) print("of the Class C address space...") while percent > 0: nodes = int(input(" Percent of nodes (MAX={}): ".format(percent)) or "100") density = int(input(" Percent of class C space occupied: ") or "100") if (nodes <= 100 and nodes > 1): percent = percent - nodes else: print("Illegal node percentage value ({})".format(nodes)) continue if (density > 100 or density < 1): print("Illegal density percentage value ({})".format(density)) continue net_breakdown.append((nodes, density)) subnets = calculate_subnets(nodect, net_breakdown) print('Total subnets: {}'.format(subnets)) else: MAX_max = MAX_min = -1 while True: subnets = [] if nodect <= 252:
subnets.append(nodect) else: if MAX_max == -1: MAX_max = 150 while True: maximum = int(input('Max hosts in subnet (UP TO 252) [{}]: '.format(MAX_max)) or MAX_max) if (maximum < 3 or maximum > 252): print("Illegal 'maximum' value.") else: break if MAX_min == -1 or maximum != MAX_max: MAX_min = 254-maximum while True: minimum = int(input('Min hosts in subnet (UP TO {}) [{}]: '.format(MAX_min, MAX_min)) or MAX_min) if (minimum < 2 or minimum > MAX_min): print("Illegal 'minimum' value.") else: break MAX_min = minimum MAX_max = maximum subnets = randomize_subnet_breakdown(nodect, minimum, maximum) for i, _ in enumerate(subnets): print('\tSubnet #{} has {} hosts.'.format(i, subnets[i])) if (nodect > 252): subnets_finished = input("Is this breakout of subnets OK? [Yes]: ") or "Yes" if subnets_finished.lower() == 'yes' or subnets_finished.lower() == 'y': break else: break # setting device breakdown ---------------- dev_breakdown = get_default_dev_distro(nodect) dev_distr = input("Manually reset the above Device Role Distribution? [No]: ") or "No" if (dev_distr.lower() != 'no' and dev_distr.lower() != 'n'): remainder = nodect for category in sorted(dev_breakdown.keys()): if (remainder == 0): dev_breakdown[category] = 0 continue category_count = dev_breakdown[category] while (remainder > 0): if (remainder < category_count): category_count = remainder category_count = int(input(" {} (MAX={}) [{}]: ".format(category, remainder, category_count)) or category_count) remainder -= category_count if (remainder < 0 or category_count < 0): print("Illegal value '{}'".format(category_count)) remainder += category_count else: dev_breakdown[category] = category_count break; if (remainder > 0): dev_breakdown['Unknown'] += remainder domain = input("Domain name to use (press ENTER to auto-generate): ") or generate_fqdn() randomize = input("Randomize IP addresses in subnet? [Yes]: ") or "Yes" cont = input("Ready to generate json (No to start over)? [Yes]: ") or "Yes" if cont.lower() == 'yes' or cont.lower() == 'y': break if OLDVERSION: net_configs = build_configs_deprecated(nodect, net_breakdown, dev_breakdown, domain) else: net_configs = build_configs(subnets, nodect, dev_breakdown, domain) if NET_SUMMARY or VERBOSE: print("\nBased on the following config:\n") print(json.dumps(net_configs, indent=4)) print("\nSaved network profile to {}".format(outname)) else: print("\n Saved network profile to {}".format(outname)) if randomize.lower() == 'yes' or randomize.lower() == 'y': build_network(net_configs, outname, randomspace=True) else: build_network(net_configs, outname) if __name__ == "__main__": main()
random_line_split
gensynet.py
#!/usr/bin/env python3 # # Generate Synthetic Networks # First Version: 8/3/2017 # # An interactive script that generates a JSON file that can be used for # creating imaginary (enterprise) network profiles # # INTERNAL USE ONLY; i.e., user input validation is nearly non-existent. # # Cyber Reboot # [email protected] # import argparse from datetime import datetime as dt import ipaddress import json import math from random import * import string import sys import time import uuid VERBOSE = False NET_SUMMARY = False VERSION = '0.81' DEBUG = False OLDVERSION = False def randstring(size): return ''.join(choice(string.ascii_lowercase + string.digits) for _ in range(size)) def divide(dividend, divisor): quotient = math.floor(dividend/divisor) remainder = dividend % divisor return (quotient, remainder) # you'll want to make sure that prefix is some string that # is prefixed by some number. def generate_ip(prefix, octets=4): ip = prefix if prefix[len(prefix)-1] is not '.': prefix = prefix + '.' subn = 4 - prefix.count('.') if (subn > 0): ip = prefix + '.'.join(str(randint(1,252)) for _ in range(subn)) return ip def generate_uuid(): return str(uuid.uuid4()) def generate_fqdn(domain=None, subdomains=0): if domain is None: domain = randstring(randint(5,10)) + '.local' if subdomains == 0: return domain else: hostname = domain while (subdomains > 0): hostname = randstring(randint(3,5)) + '.' + hostname subdomains -= 1 return hostname def generate_os_type(devicetype): if ( devicetype == 'Business workstation' or devicetype == 'Developer workstation' or devicetype == 'Mail server' or devicetype == 'File server' or devicetype == 'Internal web server' or devicetype == 'Database server' or devicetype == 'Code repository' or devicetype == 'SSH server'): return choice(['Windows', 'Linux', 'Mac OS X', 'BSD']) elif devicetype == 'Smartphone': return choice(['iOS', 'Android', 'Blackberry', 'Unknown']) elif devicetype == 'DNS server': return choice(['Windows', 'Linux', 'Mac OS X', 'BSD', 'Cisco IOS']) elif ( devicetype == 'Printer' or devicetype == 'PBX'): return choice(['Linux', 'Unknown', 'Windows']) elif devicetype == 'DHCP server': return choice(['Linux', 'Unknown', 'Windows', 'BSD', 'Cisco IOS']) elif devicetype == 'Active Directory controller': return choice(['Unknown', 'Windows']) elif devicetype == 'VOIP phone': return choice(['Linux', 'Windows', 'Unknown']) elif devicetype == 'Unknown': return 'Unknown' return os def generate_mac(): mac = ':'.join(str(hex(randint(0,15))) + str(hex(randint(0,15))) for _ in range(6)) return mac.replace('0x', '') def record(records=None): records = [ 'p0f', 'nmap', 'BCF'] return choice(records) def calculate_subnets(total, breakdown): """Returns number of subnets, given the breakdown; or -1 if breakdown doesn't work.""" sanity_percent = 0 # if this isn't 100% by the end, we got issues. subnets = 0 for nodep, netp in breakdown: sanity_percent += nodep if (sanity_percent > 100): return -1 subtotal = int(total * .01 * nodep) groupby = int(254 * .01 *netp) subnets += math.ceil(subtotal/groupby) if (sanity_percent < 100): return -1 return subnets def get_default_dev_distro(nodect, printout=True): """Prints device type breakdowns using default ratios and returns a count of each device.""" if (printout): print("Default Device Role Distribution for {} nodes".format(nodect)) dev_breakdown = { 'Business workstation': int(math.floor(nodect*.35)), 'Developer workstation': int(math.floor(nodect*.15)), 'Smartphone': int(math.floor(nodect*.28)), 'Printer': int(math.floor(nodect*.03)), 'Mail server': int(math.floor(nodect*.01)), 'File server': int(math.floor(nodect*.02)), 'Internal web server': int(math.floor(nodect*.06)), 'Database server': int(math.floor(nodect*.01)), 'Code repository': int(math.floor(nodect*.01)), 'DNS server': int(math.floor(nodect*.01)), 'DHCP server': int(math.floor(nodect*.01)), 'Active Directory controller': int(math.floor(nodect*.01)), 'SSH server': int(math.floor(nodect*.01)), 'VOIP phone': 0, 'PBX': 0, 'Unknown': int(math.floor(nodect*.04)) } # any nodes left over gets put into Unknown total = 0 for key, ct in sorted(dev_breakdown.items()): if (printout and key != 'Unknown'): print(" {:>30} : {}".format(key, ct)) total += ct if (nodect > total): dev_breakdown['Unknown'] += (nodect - total) if (printout): print(" {:>30} : {}".format('Unknown', dev_breakdown['Unknown'])) return dev_breakdown def build_configs(subnets, host_count, dev_div, domain=None): """Returns a json object of subnet specifications, or None upon error""" global VERBOSE jsons = [] # subnet breakdown unlabeled_hosts = [] # number of hosts in the network w/o roles ip_addr = [] # keeping track of the 2nd and 3rd octets in IP roles = dict.fromkeys(dev_div.keys(), 0) if len(subnets)/254 > 254: print("WARNING: You're about to see some really sick IPs. Have fun.") for n in subnets: addy = (randint(0,253), randint(0,253)) while addy in ip_addr: addy = (randint(0,253), randint(0,253)) ip_addr.append(addy) jsons.append({ "start_ip" : '10.{}.{}.2'.format(addy[0],addy[1]), "subnet" : '10.{}.{}.0/24'.format(addy[0], addy[1]), "hosts" : n, "roles" : roles.copy() }) unlabeled_hosts.append(n) if VERBOSE: print("start_ip: {}\t number of hosts: {}\t".format(jsons[-1]['start_ip'], jsons[-1]['hosts'])) # divvy up the roles, now that the subnets are defined labeled_hosts = 0 for dev in dev_div: dev_total = dev_div[dev] labeled_hosts += dev_total while dev_total > 0: while True: n = randrange(0, len(subnets)) if (unlabeled_hosts[n] > 0): jsons[n]['roles'][dev] += 1 unlabeled_hosts[n] -= 1 break dev_total -= 1 if labeled_hosts != host_count: print("WARNING: Labeled hosts ({}) didn't equal host count ({})".format(labeled_hosts, host_count)) return jsons def build_configs_deprecated(total, net_div, dev_div, domain=None): """Returns a json object of subnet specifications, or None upon error""" global VERBOSE total_subnets = calculate_subnets(total, net_div) if total_subnets < 1: if VERBOSE: print("WARNING: Could not break down nodes into the requested subnets.") return None jsons = [] host_counter = [] ncount = 0 roles = dict.fromkeys(dev_div.keys(), 0) class_b,class_c = divide(total_subnets, 254) for n in net_div: if VERBOSE: ncount += 1 print("Starting net_div {} of {}".format(ncount, len(net_div))) nodes = round(total * .01 * n[0]) grouped_nodes = round(252 * .01 * n[1]) q,r = divide(nodes, grouped_nodes) if class_b > 254: print("WARNING: You're about to see some really sick IPs. Have fun.") while q > 0: if class_c == 0: class_b -= 1 class_c = 255 class_c -= 1 start_ip = '10.{}.{}.1'.format(class_b, class_c) netmask = '10.{}.{}.0/24'.format(class_b, class_c) jsons.append({ "start_ip" : start_ip, "subnet" : netmask, "hosts" : grouped_nodes, "roles" : roles.copy() }) host_counter.append(grouped_nodes) if VERBOSE: print("Initialized subnet {} with {} hosts starting at {}".format(len(jsons), grouped_nodes, start_ip)) q -= 1 if r > 0: if class_c == 0: class_b -= 1 class_c = 255 class_c -= 1 start_ip = '10.{}.{}.1'.format(class_b, class_c) netmask = '10.{}.{}.0/24'.format(class_b,class_c) jsons.append({ "start_ip" : start_ip, "subnet" : netmask, "hosts" : r, "roles" : roles.copy() }) host_counter.append(r) if VERBOSE: print("Initialized subnet {} with {} hosts starting at {}".format(len(jsons), r, start_ip)) if len(jsons) != total_subnets: print("BUG: Number of subnets created not equal to predicted {}".format(total_subnets)) if DEBUG: print("DEBUG: host_counter = {}\ttotal subnets = {}".format(host_counter, total_subnets)) total_hosts = 0 for dev in dev_div: ct = dev_div[dev] total_hosts += ct if (DEBUG): print("DEBUG: dev = {}\tcount = {}\ttotal = {}\thost_counter = {}".format(dev, dev_div[dev], total_hosts, host_counter)) while ct > 0: randomnet = randrange(0, total_subnets) if host_counter[randomnet] > 0: jsons[randomnet]['roles'][dev] += 1 host_counter[randomnet] -= 1 ct -= 1 if total_hosts != total: print("BUG: Number of devices in breakdown did not add up to {}".format(total)) return jsons def randomize_subnet_breakdown(count, minimum, maximum): '''Returns an array of host counts (where index = subnet), or None if the input is ridiculous.''' subnets = [] nodes_left = count # I mean, this is tested for very large values of count; I haven't tested very small numbers yet. if count <= 0 or minimum > count or maximum > count or minimum < 0 or maximum < 0 or maximum <= minimum: return None # break count into subnets until count = 0 or < min while (nodes_left > 0): clients = randint(minimum, maximum) subnets.append(clients) nodes_left -= clients if DEBUG: print("DEBUG: subnet count: {}\tnodes left: {}".format(clients, nodes_left)) if minimum < nodes_left < maximum: subnets.append(nodes_left) nodes_left = 0 elif nodes_left < minimum: # i.e., if all the subnets are maxed out but don't add up to the requested count, # then start all over again, cuz there won't be any way to honor min/max requirement. if (len(subnets) * maximum < count): subnets.clear() nodes_left = count else: break # divvy up the rest of the nodes among the existing subnets subnetIDs = [x for x in iter(range(len(subnets)))] while (nodes_left > 0): s = choice(subnetIDs) # pick a randum subnet if DEBUG: print("DEBUG: looping with s={}, count={}, left={}".format(s, subnets[s], nodes_left)) if subnets[s] < maximum: subnets[s] += 1 nodes_left -= 1 else: subnetIDs.remove(s) return subnets def build_network(subnets, fname=None, randomspace=False, prettyprint=True): global VERBOSE outobj = [] subnets_togo = len(subnets) for n in subnets: start_ip = ipaddress.ip_address(n['start_ip']) role_ct = dict(n['roles']) hosts_togo = n['hosts'] ip_taken = [] subnets_togo -= 1 while (hosts_togo > 0): host = { 'uid':generate_uuid(), 'mac':generate_mac(), 'rDNS_host':randstring(randrange(4,9)), 'subnet':n['subnet'] } if 'domain' in n: host['rDNS_domain'] = n['domain'] host['record'] = { 'source':record(), 'timestamp': str(dt.now()) } while True: a_role = choice(list(role_ct.keys())) if role_ct[a_role] > 0: role_ct[a_role] -= 1 host['role'] = { 'role': a_role, 'confidence': randrange(55,100) } break else: del(role_ct[a_role]) host['os'] = { 'os': generate_os_type(host['role']['role']) } if host['os']['os'] != 'Unknown': host['os']['confidence'] = randrange(55,100) if (randomspace): while True: ip = start_ip + randrange(0, 254) if ip not in ip_taken: host['IP'] = str(ip) ip_taken.append(ip) break else: ip = start_ip + hosts_togo host['IP'] = str(ip) outobj.append(host) hosts_togo -= 1 indent = 2 if prettyprint else None if fname: with open(fname, 'w') as ofile: ofile.write("{}".format(json.dumps(outobj, indent=indent))) else: return json.dumps(outobj, indent=indent) def main():
if __name__ == "__main__": main()
global VERBOSE, VERSION, NET_SUMMARY, OLDVERSION parser = argparse.ArgumentParser() parser.add_argument('-v', '--verbose', help='Provide program feedback', action="store_true") parser.add_argument('-s', '--summarize', help='Prints network configurations to output', action="store_true") parser.add_argument('-d', '--deprecate', help='Use the deprecated version for building subnets', action='store_true') parser.add_argument('--version', help='Prints version', action="store_true") args = parser.parse_args() if args.version: print("{} v{}".format(sys.argv[0], VERSION)) sys.exit() if args.verbose: VERBOSE = True if args.summarize: NET_SUMMARY = True if args.deprecate: OLDVERSION = True outname = '{}.json'.format(time.strftime("%Y%m%d-%H%M%S")) print('\n\n\tSYNTHETIC NETWORK NODE GENERATOR\n') while True: nodect = int(input("How many network nodes? [500]: ") or "500") if nodect > 4000000: print("That ({}) is just exorbitant. Next time try less than {}.".format(nodect, 4000000)) sys.exit() # setting subnet breakdown ---------------- if OLDVERSION: if (nodect > 50): print('Default Node distribution of {} nodes across Class C subnets: '.format(nodect)) print(' 30% of the nodes will occupy subnets that are 70% populated') print(' 45% of the nodes will occupy subnets that are 20% populated') print(' 25% of the nodes will occupy subnets that are 90% populated') net_breakdown = [(30,70), (45,20), (25,90)] print('Total subnets: {}'.format(calculate_subnets(nodect, net_breakdown))) set_net = input("Manually set network node distribution? [No]: ") or "No" else: set_net = "No" net_breakdown = [(100, 100)] print('Total subnets: 1') if (set_net.lower() != 'no' and set_net.lower() != 'n'): net_breakdown = [] percent = 100 print("Please enter what percentage of the {} nodes would consume what percentage".format(nodect)) print("of the Class C address space...") while percent > 0: nodes = int(input(" Percent of nodes (MAX={}): ".format(percent)) or "100") density = int(input(" Percent of class C space occupied: ") or "100") if (nodes <= 100 and nodes > 1): percent = percent - nodes else: print("Illegal node percentage value ({})".format(nodes)) continue if (density > 100 or density < 1): print("Illegal density percentage value ({})".format(density)) continue net_breakdown.append((nodes, density)) subnets = calculate_subnets(nodect, net_breakdown) print('Total subnets: {}'.format(subnets)) else: MAX_max = MAX_min = -1 while True: subnets = [] if nodect <= 252: subnets.append(nodect) else: if MAX_max == -1: MAX_max = 150 while True: maximum = int(input('Max hosts in subnet (UP TO 252) [{}]: '.format(MAX_max)) or MAX_max) if (maximum < 3 or maximum > 252): print("Illegal 'maximum' value.") else: break if MAX_min == -1 or maximum != MAX_max: MAX_min = 254-maximum while True: minimum = int(input('Min hosts in subnet (UP TO {}) [{}]: '.format(MAX_min, MAX_min)) or MAX_min) if (minimum < 2 or minimum > MAX_min): print("Illegal 'minimum' value.") else: break MAX_min = minimum MAX_max = maximum subnets = randomize_subnet_breakdown(nodect, minimum, maximum) for i, _ in enumerate(subnets): print('\tSubnet #{} has {} hosts.'.format(i, subnets[i])) if (nodect > 252): subnets_finished = input("Is this breakout of subnets OK? [Yes]: ") or "Yes" if subnets_finished.lower() == 'yes' or subnets_finished.lower() == 'y': break else: break # setting device breakdown ---------------- dev_breakdown = get_default_dev_distro(nodect) dev_distr = input("Manually reset the above Device Role Distribution? [No]: ") or "No" if (dev_distr.lower() != 'no' and dev_distr.lower() != 'n'): remainder = nodect for category in sorted(dev_breakdown.keys()): if (remainder == 0): dev_breakdown[category] = 0 continue category_count = dev_breakdown[category] while (remainder > 0): if (remainder < category_count): category_count = remainder category_count = int(input(" {} (MAX={}) [{}]: ".format(category, remainder, category_count)) or category_count) remainder -= category_count if (remainder < 0 or category_count < 0): print("Illegal value '{}'".format(category_count)) remainder += category_count else: dev_breakdown[category] = category_count break; if (remainder > 0): dev_breakdown['Unknown'] += remainder domain = input("Domain name to use (press ENTER to auto-generate): ") or generate_fqdn() randomize = input("Randomize IP addresses in subnet? [Yes]: ") or "Yes" cont = input("Ready to generate json (No to start over)? [Yes]: ") or "Yes" if cont.lower() == 'yes' or cont.lower() == 'y': break if OLDVERSION: net_configs = build_configs_deprecated(nodect, net_breakdown, dev_breakdown, domain) else: net_configs = build_configs(subnets, nodect, dev_breakdown, domain) if NET_SUMMARY or VERBOSE: print("\nBased on the following config:\n") print(json.dumps(net_configs, indent=4)) print("\nSaved network profile to {}".format(outname)) else: print("\n Saved network profile to {}".format(outname)) if randomize.lower() == 'yes' or randomize.lower() == 'y': build_network(net_configs, outname, randomspace=True) else: build_network(net_configs, outname)
identifier_body
base_command.py
# -*- coding: utf-8 -*- import urllib2, json, traceback from django.conf import settings from django.db import models from TkManager.order.models import User from TkManager.juxinli.models import * from TkManager.juxinli.error_no import * from TkManager.common.tk_log import TkLog from datetime import datetime from django_gearman_commands import GearmanWorkerBaseCommand from django.db import transaction import objgraph class JuxinliBaseCommand(GearmanWorkerBaseCommand): """ 从聚信力获取json数据,然后把数据存入数据库 init_config 配置数据的存储方式,需要子类自己实现 配置文件格式参看注释 get_juxinli_data 执行解析存储操作 """ def __init__(self): super(JuxinliBaseCommand, self).__init__() self._org_name = settings.JUXINLI_CONF['org_name'] self._client_secret = settings.JUXINLI_CONF['client_secret'] self._access_report_data_api = settings.JUXINLI_CONF['access_report_data_api'] self._access_raw_data_api = settings.JUXINLI_CONF['access_raw_data_api'] self._access_report_token_api = settings.JUXINLI_CONF['access_report_token_api'] self._access_e_business_raw_data_api = settings.JUXINLI_CONF['access_e_business_raw_data_api'] self._options = { 'update_days' : 21, 'force_update' : False, } self.init_config() def init_config(): ''' 参考格式: self._transformer = { 'basic_transformer' : { 'name' : 'PhoneBasic', # django的Model类名称 'path' : 'raw_data/members/transactions:0/basic', #json数据的路径 'data_type' : 'map', # 数据的类型如果是单条就是map,如果是多条就是list 'version' : True, # 是否使用版本控制,如果是真那么每次拉数据会新增版本号,否则都用版本1 'trans' : { #数据的转化格式 source_field(json) -> dest_field(db model) "cell_phone": "cell_phone", "idcard": "idcard", "real_name": "real_name", "reg_time": "reg_time", "update_time": "update_time", "receiver" : { #如果是外键就用一个嵌套的格式来表示 (嵌套就没必要再用path定位了吧,默认就是当前路径) "name" : "Receiver" "req_call_cnt/data_type" : "list" "version" : True, "trans": { "name" : "name", "phone_num_list" : "phone_num_list", "amount" : "amount", "count" : "count", }, }, }, }, } ''' pass def test(self,user,data): if not data: return ERR_GET_RAW_DATA_FAILED ret_code = self._save_raw_data(data, user, self._options) return ret_code def get_juxinli_data(self, uid, url): try: user = User.objects.get(pk=uid) token = self._get_token() if not token: return ERR_CREATE_TOKEN_FAILED data = self._get_juxinli_data(token, user, url) if not data: return ERR_GET_RAW_DATA_FAILED ret_code = self._save_raw_data(data, user, self._options) if ret_code != 0: return ret_code #data = self._get_report_data(token, user) #print data #print "@@ print ret", ret_code return RETURN_SUCCESS except Exception, e: traceback.print_exc() TkLog().error("get juxinli call failed %s" % str(e)) return ERR_OTHER_EXCEPTION def _open_url(self, url): ''' get http request return json ''' req1 = urllib2.Request(url=url) html = urllib2.urlopen(req1).read().decode('utf-8') return json.loads(html.encode("utf-8")) def _get_token(self): ''' 生成一个新的用来获取数据的token 失败返回None ''' url = u"%s?client_secret=%s&hours=24&org_name=%s" % (self._access_report_token_api, self._client_secret, self._org_name) html = self._open_url(url) #if try: res = html['access_token'] return res except KeyError, e: return None def _get_juxinli_data(self, access_token, user, url): ''' 获取聚信力数据 返回json ''' raw_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (url, self._client_secret, access_token, user.name, user.id_no, user.phone_no) #print raw_url try: res = self._open_url(raw_url.encode('utf-8')) # print res # print res['raw_data']['members']['error_msg'] success = res["success"] if success != "true": return None return res except KeyError, e: return None #def _get_report_data(self, access_token, user): # report_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (self._access_report_token_api, self._client_secret, access_token, user.name, user.id_no, user.phone_no) # print report_url # res = self._open_url(report_url.encode('utf-8')) # #print res # #print res['raw_data']['members']['error_msg'] # return res def _allow_overwrite_data(self, user, options): return True def _get_data_from_path(self, data, path): ''' path语法 / 分割路径 : 选择list中的序号 ''' try: fields = path.split("/") #print fields res = data for field in fields: if field.find(":") != -1: parts = f
if len(parts) != 2: TkLog().error("field format error %s" % (field)) return None res = res[parts[0]][int(parts[1])] else: res = res[field] return res except Exception, e: print e traceback.print_exc() TkLog().error("get data from path failed %s" % str(e)) return None def _save_raw_data(self, data, user, options): """ 可以重入,一个用户的信息如果更新时间少于options.update_days天,不会更新db,否则添加记录 """ if not self._allow_overwrite_data(user, options): return RETURN_CAN_NOT_OVERWRITE for transtype in self._transformer.keys(): adaptor = self._transformer[transtype] cls = eval(adaptor["name"]) version = 0 objs = cls.objects.filter(owner=user).order_by('-id')[:1] if len(objs) == 1: version = objs[0].version TkLog().info("update %s version %d" % (adaptor["name"], version)) data_list = self._get_data_from_path(data, adaptor["path"]) if not data_list: TkLog().warn("data not found %s:%s" % (adaptor["name"], adaptor["path"])) #return -4 #just skip ret_code = self._save_obj(data_list, cls, user, adaptor, version) if ret_code != 0: return ret_code return RETURN_SUCCESS @transaction.commit_manually def _save_obj(self, data_list, cls, user, adaptor, version=0, parent=None): ''' 将一个对象写入数据库 根据data_type来判断是map还是list ''' if adaptor["data_type"] == "list": #data_list是列表数据 for record in data_list: ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent) if ret_code != 0: return ret_code elif adaptor["data_type"] == "map": #data_list是单条数据 record = data_list ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent) if ret_code != 0: return ret_code transaction.commit() return 0 def _save_single_obj(self, record, cls, user, adaptor, version = 0, parent=None): ''' 将一个条目写入数据库,如果parent不为空,还需要设置parent的外键 record : 单条json数据条目 cls : 数据库Model ''' obj = cls() for source_field, dest_field in adaptor['trans'].items(): if isinstance(dest_field,str): field_type = obj._meta.get_field(dest_field) if "/" in source_field: record[source_field] = self._get_data_from_path(record,source_field) if isinstance(field_type, models.CharField): try: if isinstance(record[source_field],list): #setattr(obj, dest_field, "#".join(record[source_field])) setattr(obj, dest_field, record[source_field][0]) else: setattr(obj, dest_field, record[source_field]) except Exception, e: TkLog().warn("set char field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.IntegerField): try: if not record[source_field]: setattr(obj, dest_field, 0) else: setattr(obj, dest_field, int(record[source_field])) except Exception, e: TkLog().warn("set int field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.BigIntegerField): try: if not record[source_field]: setattr(obj, dest_field, 0) else: setattr(obj, dest_field, long(record[source_field])) except Exception, e: TkLog().warn("set bigint field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.FloatField): try: if not record[source_field]: setattr(obj, dest_field, float(0)) else: setattr(obj, dest_field, float(record[source_field])) except Exception, en: TkLog().warn("set float field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.DateTimeField): try: if not record[source_field]: setattr(obj, dest_field, None) else: setattr(obj, dest_field, datetime.strptime(record[source_field], "%Y-%m-%d %H:%M:%S")) except Exception, e: TkLog().warn("set datetime field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.NullBooleanField): try: if not record[source_field]: setattr(obj, dest_field, None) else: setattr(obj, dest_field, record[source_field]) except Exception, e: TkLog().warn("set boolean field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED else: TkLog().error("unsupported type field:%s" % dest_field) return ERR_UNSUPPORTED_FILED_TYPE try: if adaptor['version']: obj.version = version + 1 else: obj.version = 0 #if parent: #setattr(obj, parent["field"], parent["parent_obj"]) obj.owner = user obj.save() except Exception, e: print "save error %s" % str(e) return ERR_SAVE_OBJECT for source_field, dest_field in adaptor['trans'].items(): if isinstance(dest_field,dict): try: sub_cls = eval(dest_field["name"]) self._save_obj(record[source_field], sub_cls, obj, dest_field, version, {"parent_obj":obj, "field":"owner"}) except Exception, e: TkLog().warn("set foreignkey field failed %s %s" % (str(e), record[source_field])) objgraph.show_most_common_types() return 0
ield.split(":")
identifier_name
base_command.py
# -*- coding: utf-8 -*- import urllib2, json, traceback from django.conf import settings from django.db import models from TkManager.order.models import User from TkManager.juxinli.models import * from TkManager.juxinli.error_no import * from TkManager.common.tk_log import TkLog from datetime import datetime from django_gearman_commands import GearmanWorkerBaseCommand from django.db import transaction import objgraph class JuxinliBaseCommand(GearmanWorkerBaseCommand): """ 从聚信力获取json数据,然后把数据存入数据库 init_config 配置数据的存储方式,需要子类自己实现 配置文件格式参看注释 get_juxinli_data 执行解析存储操作 """ def __init__(self): super(JuxinliBaseCommand, self).__init__() self._org_name = settings.JUXINLI_CONF['org_name'] self._client_secret = settings.JUXINLI_CONF['client_secret'] self._access_report_data_api = settings.JUXINLI_CONF['access_report_data_api'] self._access_raw_data_api = settings.JUXINLI_CONF['access_raw_data_api'] self._access_report_token_api = settings.JUXINLI_CONF['access_report_token_api'] self._access_e_business_raw_data_api = settings.JUXINLI_CONF['access_e_business_raw_data_api'] self._options = { 'update_days' : 21, 'force_update' : False, } self.init_config() def init_config(): ''' 参考格式: self._transformer = { 'basic_transformer' : { 'name' : 'PhoneBasic', # django的Model类名称 'path' : 'raw_data/members/transactions:0/basic', #json数据的路径 'data_type' : 'map', # 数据的类型如果是单条就是map,如果是多条就是list 'version' : True, # 是否使用版本控制,如果是真那么每次拉数据会新增版本号,否则都用版本1 'trans' : { #数据的转化格式 source_field(json) -> dest_field(db model) "cell_phone": "cell_phone", "idcard": "idcard", "real_name": "real_name", "reg_time": "reg_time", "update_time": "update_time", "receiver" : { #如果是外键就用一个嵌套的格式来表示 (嵌套就没必要再用path定位了吧,默认就是当前路径) "name" : "Receiver" "req_call_cnt/data_type" : "list" "version" : True, "trans": { "name" : "name", "phone_num_list" : "phone_num_list", "amount" : "amount", "count" : "count", }, }, }, }, } ''' pass def test(self,user,data): if not data: return ERR_GET_RAW_DATA_FAILED ret_code = self._save_raw_data(data, user, self._options) return ret_code def get_juxinli_data(self, uid, url): try: user = User.objects.get(pk=uid) token = self._get_token() if not token: return ERR_CREATE_TOKEN_FAILED data = self._get_juxinli_data(token, user, url) if not data: return ERR_GET_RAW_DATA_FAILED ret_code = self._save_raw_data(data, user, self._options) if ret_code != 0: return ret_code #data = self._get_report_data(token, user) #print data #print "@@ print ret", ret_code return RETURN_SUCCESS except Exception, e: traceback.print_exc() TkLog().error("get juxinli call failed %s" % str(e)) return ERR_OTHER_EXCEPTION
''' req1 = urllib2.Request(url=url) html = urllib2.urlopen(req1).read().decode('utf-8') return json.loads(html.encode("utf-8")) def _get_token(self): ''' 生成一个新的用来获取数据的token 失败返回None ''' url = u"%s?client_secret=%s&hours=24&org_name=%s" % (self._access_report_token_api, self._client_secret, self._org_name) html = self._open_url(url) #if try: res = html['access_token'] return res except KeyError, e: return None def _get_juxinli_data(self, access_token, user, url): ''' 获取聚信力数据 返回json ''' raw_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (url, self._client_secret, access_token, user.name, user.id_no, user.phone_no) #print raw_url try: res = self._open_url(raw_url.encode('utf-8')) # print res # print res['raw_data']['members']['error_msg'] success = res["success"] if success != "true": return None return res except KeyError, e: return None #def _get_report_data(self, access_token, user): # report_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (self._access_report_token_api, self._client_secret, access_token, user.name, user.id_no, user.phone_no) # print report_url # res = self._open_url(report_url.encode('utf-8')) # #print res # #print res['raw_data']['members']['error_msg'] # return res def _allow_overwrite_data(self, user, options): return True def _get_data_from_path(self, data, path): ''' path语法 / 分割路径 : 选择list中的序号 ''' try: fields = path.split("/") #print fields res = data for field in fields: if field.find(":") != -1: parts = field.split(":") if len(parts) != 2: TkLog().error("field format error %s" % (field)) return None res = res[parts[0]][int(parts[1])] else: res = res[field] return res except Exception, e: print e traceback.print_exc() TkLog().error("get data from path failed %s" % str(e)) return None def _save_raw_data(self, data, user, options): """ 可以重入,一个用户的信息如果更新时间少于options.update_days天,不会更新db,否则添加记录 """ if not self._allow_overwrite_data(user, options): return RETURN_CAN_NOT_OVERWRITE for transtype in self._transformer.keys(): adaptor = self._transformer[transtype] cls = eval(adaptor["name"]) version = 0 objs = cls.objects.filter(owner=user).order_by('-id')[:1] if len(objs) == 1: version = objs[0].version TkLog().info("update %s version %d" % (adaptor["name"], version)) data_list = self._get_data_from_path(data, adaptor["path"]) if not data_list: TkLog().warn("data not found %s:%s" % (adaptor["name"], adaptor["path"])) #return -4 #just skip ret_code = self._save_obj(data_list, cls, user, adaptor, version) if ret_code != 0: return ret_code return RETURN_SUCCESS @transaction.commit_manually def _save_obj(self, data_list, cls, user, adaptor, version=0, parent=None): ''' 将一个对象写入数据库 根据data_type来判断是map还是list ''' if adaptor["data_type"] == "list": #data_list是列表数据 for record in data_list: ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent) if ret_code != 0: return ret_code elif adaptor["data_type"] == "map": #data_list是单条数据 record = data_list ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent) if ret_code != 0: return ret_code transaction.commit() return 0 def _save_single_obj(self, record, cls, user, adaptor, version = 0, parent=None): ''' 将一个条目写入数据库,如果parent不为空,还需要设置parent的外键 record : 单条json数据条目 cls : 数据库Model ''' obj = cls() for source_field, dest_field in adaptor['trans'].items(): if isinstance(dest_field,str): field_type = obj._meta.get_field(dest_field) if "/" in source_field: record[source_field] = self._get_data_from_path(record,source_field) if isinstance(field_type, models.CharField): try: if isinstance(record[source_field],list): #setattr(obj, dest_field, "#".join(record[source_field])) setattr(obj, dest_field, record[source_field][0]) else: setattr(obj, dest_field, record[source_field]) except Exception, e: TkLog().warn("set char field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.IntegerField): try: if not record[source_field]: setattr(obj, dest_field, 0) else: setattr(obj, dest_field, int(record[source_field])) except Exception, e: TkLog().warn("set int field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.BigIntegerField): try: if not record[source_field]: setattr(obj, dest_field, 0) else: setattr(obj, dest_field, long(record[source_field])) except Exception, e: TkLog().warn("set bigint field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.FloatField): try: if not record[source_field]: setattr(obj, dest_field, float(0)) else: setattr(obj, dest_field, float(record[source_field])) except Exception, en: TkLog().warn("set float field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.DateTimeField): try: if not record[source_field]: setattr(obj, dest_field, None) else: setattr(obj, dest_field, datetime.strptime(record[source_field], "%Y-%m-%d %H:%M:%S")) except Exception, e: TkLog().warn("set datetime field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.NullBooleanField): try: if not record[source_field]: setattr(obj, dest_field, None) else: setattr(obj, dest_field, record[source_field]) except Exception, e: TkLog().warn("set boolean field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED else: TkLog().error("unsupported type field:%s" % dest_field) return ERR_UNSUPPORTED_FILED_TYPE try: if adaptor['version']: obj.version = version + 1 else: obj.version = 0 #if parent: #setattr(obj, parent["field"], parent["parent_obj"]) obj.owner = user obj.save() except Exception, e: print "save error %s" % str(e) return ERR_SAVE_OBJECT for source_field, dest_field in adaptor['trans'].items(): if isinstance(dest_field,dict): try: sub_cls = eval(dest_field["name"]) self._save_obj(record[source_field], sub_cls, obj, dest_field, version, {"parent_obj":obj, "field":"owner"}) except Exception, e: TkLog().warn("set foreignkey field failed %s %s" % (str(e), record[source_field])) objgraph.show_most_common_types() return 0
def _open_url(self, url): ''' get http request return json
random_line_split
base_command.py
# -*- coding: utf-8 -*- import urllib2, json, traceback from django.conf import settings from django.db import models from TkManager.order.models import User from TkManager.juxinli.models import * from TkManager.juxinli.error_no import * from TkManager.common.tk_log import TkLog from datetime import datetime from django_gearman_commands import GearmanWorkerBaseCommand from django.db import transaction import objgraph class JuxinliBaseCommand(GearmanWorkerBaseCommand): """ 从聚信力获取json数据,然后把数据存入数据库 init_config 配置数据的存储方式,需要子类自己实现 配置文件格式参看注释 get_juxinli_data 执行解析存储操作 """ def __init__(self): super(JuxinliBaseCommand, self).__init__() self._org_name = settings.JUXINLI_CONF['org_name'] self._client_secret = settings.JUXINLI_CONF['client_secret'] self._access_report_data_api = settings.JUXINLI_CONF['access_report_data_api'] self._access_raw_data_api = settings.JUXINLI_CONF['access_raw_data_api'] self._access_report_token_api = settings.JUXINLI_CONF['access_report_token_api'] self._access_e_business_raw_data_api = settings.JUXINLI_CONF['access_e_business_raw_data_api'] self._options = { 'update_days' : 21, 'force_update' : False, } self.init_config() def init_config(): ''' 参考格式: self._transformer = { 'basic_transformer' : { 'name' : 'PhoneBasic', # django的Model类名称 'path' : 'raw_data/members/transactions:0/basic', #json数据的路径 'data_type' : 'map', # 数据的类型如果是单条就是map,如果是多条就是list 'version' : True, # 是否使用版本控制,如果是真那么每次拉数据会新增版本号,否则都用版本1 'trans' : { #数据的转化格式 source_field(json) -> dest_field(db model) "cell_phone": "cell_phone", "idcard": "idcard", "real_name": "real_name", "reg_time": "reg_time", "update_time": "update_time", "receiver" : { #如果是外键就用一个嵌套的格式来表示 (嵌套就没必要再用path定位了吧,默认就是当前路径) "name" : "Receiver" "req_call_cnt/data_type" : "list" "version" : True, "trans": { "name" : "name", "phone_num_list" : "phone_num_list", "amount" : "amount", "count" : "count", }, }, }, }, } ''' pass def test(self,user,data): if not data: return ERR_GET_RAW_DATA_FAILED ret_code = self._save_raw_data(data, user, self._options) return ret_code def get_juxinli_data(self, uid, url): try: user = User.objects.get(pk=uid) token = self._get_token() if not token: return ERR_CREATE_TOKEN_FAILED data = self._get_juxinli_data(token, user, url) if not data: return ERR_GET_RAW_DATA_FAILED ret_code = self._save_raw_data(data, user, self._options) if ret_code != 0: return ret_code #data = self._get_report_data(token, user) #print data #print "@@ print ret", ret_code return RETURN_SUCCESS except Exception, e: traceback.print_exc() TkLog().error("get juxinli call failed %s" % str(e)) return ERR_OTHER_EXCEPTION def _open_url(self, url): ''' get http request return json ''' req1 = urllib2.Request(url=url) html = urllib2.urlopen(req1).read().decode('utf-8') return json.loads(html.encode("utf-8")) def _get_token(self): ''' 生成一个新的用来获取数据的token 失败返回None ''' url = u"%s?client_secret=%s&hours=24&org_name=%s" % (self._access_report_token_api, self._client_secret, self._org_name) html = self._open_url(url) #if try: res = html['access_token'] return res except KeyError, e: return None def _get_juxinli_data(self, access_token, user, url): ''' 获取聚信力数据 返回json ''' raw_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (url, self._client_secret, access_token, user.name, user.id_no, user.phone_no) #print raw_url try: res = self._open_url(raw_url.encode('utf-8')) # print res # print res['raw_data']['members']['error_msg'] success = res["success"] if success != "true": return None return res except KeyError, e: return None #def _get_report_data(self, access_token, user): # report_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (self._access_report_token_api, self._client_secret, access_token, user.name, user.id_no, user.phone_no) # print report_url # res = self._open_url(report_url.encode('utf-8')) # #print res # #print res['raw_data']['members']['error_msg'] # return res def _allow_overwrite_data(self, user, options): return True def _get_data_from_path(self, data, path): ''' path语法 / 分割路径 : 选择list中的序号 ''' try: fields = path.split("/") #print fields res = data for field in fields: if field.find(":") != -1: parts = field.split(":") if len(parts) != 2: TkLog().error("field format error %s" % (field)) return None res = res[parts[0]][int(parts[1])] else: res = res[field] return res except Exception, e: print e traceback.print_exc() TkLog().error("get data from path failed %s" % str(e)) return None def _save_raw_data(self, data, user, options): """ 可以重入,一个用户的信息如果更新时间少于options.update_days天,不会更新db,否则添加记录 """ if not self._allow_overwrite_data(user, options): return RETURN_CAN_NOT_OVERWRITE for transtype in self._transformer.keys(): adaptor = self._transformer[transtype] cls = eval(adaptor["name"]) version = 0 objs = cls.objects.filter(owner=user).order
lif adaptor["data_type"] == "map": #data_list是单条数据 record = data_list ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent) if ret_code != 0: return ret_code transaction.commit() return 0 def _save_single_obj(self, record, cls, user, adaptor, version = 0, parent=None): ''' 将一个条目写入数据库,如果parent不为空,还需要设置parent的外键 record : 单条json数据条目 cls : 数据库Model ''' obj = cls() for source_field, dest_field in adaptor['trans'].items(): if isinstance(dest_field,str): field_type = obj._meta.get_field(dest_field) if "/" in source_field: record[source_field] = self._get_data_from_path(record,source_field) if isinstance(field_type, models.CharField): try: if isinstance(record[source_field],list): #setattr(obj, dest_field, "#".join(record[source_field])) setattr(obj, dest_field, record[source_field][0]) else: setattr(obj, dest_field, record[source_field]) except Exception, e: TkLog().warn("set char field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.IntegerField): try: if not record[source_field]: setattr(obj, dest_field, 0) else: setattr(obj, dest_field, int(record[source_field])) except Exception, e: TkLog().warn("set int field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.BigIntegerField): try: if not record[source_field]: setattr(obj, dest_field, 0) else: setattr(obj, dest_field, long(record[source_field])) except Exception, e: TkLog().warn("set bigint field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.FloatField): try: if not record[source_field]: setattr(obj, dest_field, float(0)) else: setattr(obj, dest_field, float(record[source_field])) except Exception, en: TkLog().warn("set float field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.DateTimeField): try: if not record[source_field]: setattr(obj, dest_field, None) else: setattr(obj, dest_field, datetime.strptime(record[source_field], "%Y-%m-%d %H:%M:%S")) except Exception, e: TkLog().warn("set datetime field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.NullBooleanField): try: if not record[source_field]: setattr(obj, dest_field, None) else: setattr(obj, dest_field, record[source_field]) except Exception, e: TkLog().warn("set boolean field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED else: TkLog().error("unsupported type field:%s" % dest_field) return ERR_UNSUPPORTED_FILED_TYPE try: if adaptor['version']: obj.version = version + 1 else: obj.version = 0 #if parent: #setattr(obj, parent["field"], parent["parent_obj"]) obj.owner = user obj.save() except Exception, e: print "save error %s" % str(e) return ERR_SAVE_OBJECT for source_field, dest_field in adaptor['trans'].items(): if isinstance(dest_field,dict): try: sub_cls = eval(dest_field["name"]) self._save_obj(record[source_field], sub_cls, obj, dest_field, version, {"parent_obj":obj, "field":"owner"}) except Exception, e: TkLog().warn("set foreignkey field failed %s %s" % (str(e), record[source_field])) objgraph.show_most_common_types() return 0
_by('-id')[:1] if len(objs) == 1: version = objs[0].version TkLog().info("update %s version %d" % (adaptor["name"], version)) data_list = self._get_data_from_path(data, adaptor["path"]) if not data_list: TkLog().warn("data not found %s:%s" % (adaptor["name"], adaptor["path"])) #return -4 #just skip ret_code = self._save_obj(data_list, cls, user, adaptor, version) if ret_code != 0: return ret_code return RETURN_SUCCESS @transaction.commit_manually def _save_obj(self, data_list, cls, user, adaptor, version=0, parent=None): ''' 将一个对象写入数据库 根据data_type来判断是map还是list ''' if adaptor["data_type"] == "list": #data_list是列表数据 for record in data_list: ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent) if ret_code != 0: return ret_code e
identifier_body
base_command.py
# -*- coding: utf-8 -*- import urllib2, json, traceback from django.conf import settings from django.db import models from TkManager.order.models import User from TkManager.juxinli.models import * from TkManager.juxinli.error_no import * from TkManager.common.tk_log import TkLog from datetime import datetime from django_gearman_commands import GearmanWorkerBaseCommand from django.db import transaction import objgraph class JuxinliBaseCommand(GearmanWorkerBaseCommand): """ 从聚信力获取json数据,然后把数据存入数据库 init_config 配置数据的存储方式,需要子类自己实现 配置文件格式参看注释 get_juxinli_data 执行解析存储操作 """ def __init__(self): super(JuxinliBaseCommand, self).__init__() self._org_name = settings.JUXINLI_CONF['org_name'] self._client_secret = settings.JUXINLI_CONF['client_secret'] self._access_report_data_api = settings.JUXINLI_CONF['access_report_data_api'] self._access_raw_data_api = settings.JUXINLI_CONF['access_raw_data_api'] self._access_report_token_api = settings.JUXINLI_CONF['access_report_token_api'] self._access_e_business_raw_data_api = settings.JUXINLI_CONF['access_e_business_raw_data_api'] self._options = { 'update_days' : 21, 'force_update' : False, } self.init_config() def init_config(): ''' 参考格式: self._transformer = { 'basic_transformer' : { 'name' : 'PhoneBasic', # django的Model类名称 'path' : 'raw_data/members/transactions:0/basic', #json数据的路径 'data_type' : 'map', # 数据的类型如果是单条就是map,如果是多条就是list 'version' : True, # 是否使用版本控制,如果是真那么每次拉数据会新增版本号,否则都用版本1 'trans' : { #数据的转化格式 source_field(json) -> dest_field(db model) "cell_phone": "cell_phone", "idcard": "idcard", "real_name": "real_name", "reg_time": "reg_time", "update_time": "update_time", "receiver" : { #如果是外键就用一个嵌套的格式来表示 (嵌套就没必要再用path定位了吧,默认就是当前路径) "name" : "Receiver" "req_call_cnt/data_type" : "list" "version" : True, "trans": { "name" : "name", "phone_num_list" : "phone_num_list", "amount" : "amount", "count" : "count", }, }, }, }, } ''' pass def test(self,user,data): if not data: return ERR_GET_RAW_DATA_FAILED ret_code = self._save_raw_data(data, user, self._options) return ret_code def get_juxinli_data(self, uid, url): try: user = User.objects.get(pk=uid) token = self._get_token() if not token: return ERR_CREATE_TOKEN_FAILED data = self._get_juxinli_data(token, user, url) if not data: return ERR_GET_RAW_DATA_FAILED ret_code = self._save_raw_data(data, user, self._options) if ret_code != 0: return ret_code #data = self._get_report_data(token, user) #print data #print "@@ print ret", ret_code return RETURN_SUCCESS except Exception, e: traceback.print_exc() TkLog().error("get juxinli call failed %s" % str(e)) return ERR_OTHER_EXCEPTION def _open_url(self, url): ''' get http request return json ''' req1 = urllib2.Request(url=url) html = urllib2.urlopen(req1).read().decode('utf-8') return json.loads(html.encode("utf-8")) def _get_token(self): ''' 生成一个新的用来获取数据的token 失败返回None ''' url = u"%s?client_secret=%s&hours=24&org_name=%s" % (self._access_report_token_api, self._client_secret, self._org_name) html = self._open_url(url) #if try: res = html['access_token'] return res except KeyError, e: return None def _get_juxinli_data(self, access_token, user, url): ''' 获取聚信力数据 返回json ''' raw_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (url, self._client_secret, access_token, user.name, user.id_no, user.phone_no) #print raw_url try: res = self._open_url(raw_url.encode('utf-8')) # print res # print res['raw_data']['members']['error_msg'] success = res["success"] if success != "true": return None return res except KeyError, e: return None #def _get_report_data(self, access_token, user): # report_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (self._access_report_token_api, self._client_secret, access_token, user.name, user.id_no, user.phone_no) # print report_url # res = self._open_url(report_url.encode('utf-8')) # #print res # #print res['raw_data']['members']['error_msg'] # return res def _allow_overwrite_data(self, user, options): return True def _get_data_from_path(self, data, path): ''' path语法 / 分割路径 : 选择list中的序号 ''' try: fields = path.split("/") #print fields res = data for field in fields: if field.find(":") != -1: parts = field.split(":") if len(parts) != 2: TkLog().error("field format error %s" % (field)) return None res = res[parts[0]][int(parts[1])] else: res = res[field] return res except Exception, e: print e traceback.print_exc() TkLog().error("get data from path failed %s" % str(e)) return None def _save_raw_data(self, data, user, options): """ 可以重入,一个用户的信息如果更新时间少于options.update_days天,不会更新db,否则添加记录 """ if not self._allow_overwrite_data(user, options): return RETURN_CAN_NOT_OVERWRITE for transtype in self._transformer.keys(): adaptor = self._transformer[transtype] cls = eval(adaptor["name"]) version = 0 objs = cls.objects.filter(owner=user).order_by('-id')[:1] if len(objs) == 1: version = objs[0].version TkLog().info("update %s version %d" % (adaptor["name"], version)) data_list = self._get_data_from_path(
if not data_list: TkLog().warn("data not found %s:%s" % (adaptor["name"], adaptor["path"])) #return -4 #just skip ret_code = self._save_obj(data_list, cls, user, adaptor, version) if ret_code != 0: return ret_code return RETURN_SUCCESS @transaction.commit_manually def _save_obj(self, data_list, cls, user, adaptor, version=0, parent=None): ''' 将一个对象写入数据库 根据data_type来判断是map还是list ''' if adaptor["data_type"] == "list": #data_list是列表数据 for record in data_list: ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent) if ret_code != 0: return ret_code elif adaptor["data_type"] == "map": #data_list是单条数据 record = data_list ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent) if ret_code != 0: return ret_code transaction.commit() return 0 def _save_single_obj(self, record, cls, user, adaptor, version = 0, parent=None): ''' 将一个条目写入数据库,如果parent不为空,还需要设置parent的外键 record : 单条json数据条目 cls : 数据库Model ''' obj = cls() for source_field, dest_field in adaptor['trans'].items(): if isinstance(dest_field,str): field_type = obj._meta.get_field(dest_field) if "/" in source_field: record[source_field] = self._get_data_from_path(record,source_field) if isinstance(field_type, models.CharField): try: if isinstance(record[source_field],list): #setattr(obj, dest_field, "#".join(record[source_field])) setattr(obj, dest_field, record[source_field][0]) else: setattr(obj, dest_field, record[source_field]) except Exception, e: TkLog().warn("set char field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.IntegerField): try: if not record[source_field]: setattr(obj, dest_field, 0) else: setattr(obj, dest_field, int(record[source_field])) except Exception, e: TkLog().warn("set int field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.BigIntegerField): try: if not record[source_field]: setattr(obj, dest_field, 0) else: setattr(obj, dest_field, long(record[source_field])) except Exception, e: TkLog().warn("set bigint field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.FloatField): try: if not record[source_field]: setattr(obj, dest_field, float(0)) else: setattr(obj, dest_field, float(record[source_field])) except Exception, en: TkLog().warn("set float field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.DateTimeField): try: if not record[source_field]: setattr(obj, dest_field, None) else: setattr(obj, dest_field, datetime.strptime(record[source_field], "%Y-%m-%d %H:%M:%S")) except Exception, e: TkLog().warn("set datetime field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED elif isinstance(field_type, models.NullBooleanField): try: if not record[source_field]: setattr(obj, dest_field, None) else: setattr(obj, dest_field, record[source_field]) except Exception, e: TkLog().warn("set boolean field failed %s %s" % (str(e), record[source_field])) return ERR_SETATTR_FAILED else: TkLog().error("unsupported type field:%s" % dest_field) return ERR_UNSUPPORTED_FILED_TYPE try: if adaptor['version']: obj.version = version + 1 else: obj.version = 0 #if parent: #setattr(obj, parent["field"], parent["parent_obj"]) obj.owner = user obj.save() except Exception, e: print "save error %s" % str(e) return ERR_SAVE_OBJECT for source_field, dest_field in adaptor['trans'].items(): if isinstance(dest_field,dict): try: sub_cls = eval(dest_field["name"]) self._save_obj(record[source_field], sub_cls, obj, dest_field, version, {"parent_obj":obj, "field":"owner"}) except Exception, e: TkLog().warn("set foreignkey field failed %s %s" % (str(e), record[source_field])) objgraph.show_most_common_types() return 0
data, adaptor["path"])
conditional_block
rights.go
// Copyright (c) 2020-2022 Blockwatch Data Inc. // Author: [email protected] package rpc import ( "bytes" "context" "encoding/json" "fmt" "strconv" "blockwatch.cc/tzgo/tezos" ) // BakingRight holds simplified information about the right to bake a specific Tezos block type BakingRight struct { Delegate string Level int64 Round int } func (r BakingRight) Address() tezos.Address { a, _ := tezos.ParseAddress(r.Delegate) return a } func (r *BakingRight) UnmarshalJSON(data []byte) error
// EndorsingRight holds simplified information about the right to endorse // a specific Tezos block type EndorsingRight struct { Delegate string Level int64 Power int } func (r EndorsingRight) Address() tezos.Address { a, _ := tezos.ParseAddress(r.Delegate) return a } type StakeInfo struct { ActiveStake int64 `json:"active_stake,string"` Baker tezos.Address `json:"baker"` } type SnapshotInfo struct { LastRoll []string `json:"last_roll"` Nonces []string `json:"nonces"` RandomSeed string `json:"random_seed"` RollSnapshot int `json:"roll_snapshot"` // until v011 Cycle int64 `json:"cycle"` // added, not part of RPC response BakerStake []StakeInfo `json:"selected_stake_distribution,omitempty"` // v012+ TotalStake int64 `json:"total_active_stake,string"` // v012+ // Slashed []??? "slashed_deposits" } type SnapshotIndex struct { Cycle int64 // the requested cycle that contains rights from the snapshot Base int64 // the cycle where the snapshot happened Index int // the index inside base where snapshot happened } type SnapshotOwners struct { Cycle int64 `json:"cycle"` Index int64 `json:"index"` Rolls []SnapshotRoll `json:"rolls"` } type SnapshotRoll struct { RollId int64 OwnerKey tezos.Key } func (r *SnapshotRoll) UnmarshalJSON(data []byte) error { if len(data) == 0 || bytes.Equal(data, []byte(`null`)) { return nil } if len(data) == 2 { return nil } if data[0] != '[' || data[len(data)-1] != ']' { return fmt.Errorf("SnapshotRoll: invalid json array '%s'", string(data)) } dec := json.NewDecoder(bytes.NewReader(data)) dec.UseNumber() unpacked := make([]any, 0) err := dec.Decode(&unpacked) if err != nil { return err } return r.decode(unpacked) } func (r *SnapshotRoll) decode(unpacked []any) error { if l := len(unpacked); l != 2 { return fmt.Errorf("SnapshotRoll: invalid json array len %d", l) } id, err := strconv.ParseInt(unpacked[0].(json.Number).String(), 10, 64) if err != nil { return fmt.Errorf("SnapshotRoll: invalid roll id: %v", err) } if err = r.OwnerKey.UnmarshalText([]byte(unpacked[1].(string))); err != nil { return err } r.RollId = id return nil } // ListBakingRights returns information about baking rights at block id. // Use max to set a max block priority (before Ithaca) or a max round (after Ithaca). func (c *Client) ListBakingRights(ctx context.Context, id BlockID, max int, p *Params) ([]BakingRight, error) { maxSelector := "max_priority=%d" if p.Version >= 12 && p.IsPreIthacaNetworkAtStart() { maxSelector = "max_round=%d" } if p.Version < 6 && p.IsPreIthacaNetworkAtStart() { max++ } rights := make([]BakingRight, 0) u := fmt.Sprintf("chains/main/blocks/%s/helpers/baking_rights?all=true&"+maxSelector, id, max) if err := c.Get(ctx, u, &rights); err != nil { return nil, err } return rights, nil } // ListBakingRightsCycle returns information about baking rights for an entire cycle // as seen from block id. Note block and cycle must be no further than preserved cycles // away from each other. Use max to set a max block priority (before Ithaca) or a max // round (after Ithaca). func (c *Client) ListBakingRightsCycle(ctx context.Context, id BlockID, cycle int64, max int, p *Params) ([]BakingRight, error) { maxSelector := "max_round=%d" if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { maxSelector = "max_priority=%d" } if p.Version < 6 && p.IsPreIthacaNetworkAtStart() { max++ } rights := make([]BakingRight, 0) u := fmt.Sprintf("chains/main/blocks/%s/helpers/baking_rights?all=true&cycle=%d&"+maxSelector, id, cycle, max) if err := c.Get(ctx, u, &rights); err != nil { return nil, err } return rights, nil } // ListEndorsingRights returns information about block endorsing rights. func (c *Client) ListEndorsingRights(ctx context.Context, id BlockID, p *Params) ([]EndorsingRight, error) { u := fmt.Sprintf("chains/main/blocks/%s/helpers/endorsing_rights?all=true", id) rights := make([]EndorsingRight, 0) // Note: future cycles are seen from current protocol (!) if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { type Rights struct { Level int64 `json:"level"` Delegate string `json:"delegate"` Slots []int `json:"slots"` } list := make([]Rights, 0) if err := c.Get(ctx, u, &list); err != nil { return nil, err } for _, r := range list { rights = append(rights, EndorsingRight{ Level: r.Level, Delegate: r.Delegate, Power: len(r.Slots), }) } } else { type V12Rights struct { Level int64 `json:"level"` Delegates []struct { Delegate string `json:"delegate"` Power int `json:"endorsing_power"` } `json:"delegates"` } v12rights := make([]V12Rights, 0) if err := c.Get(ctx, u, &v12rights); err != nil { return nil, err } for _, v := range v12rights { for _, r := range v.Delegates { rights = append(rights, EndorsingRight{ Level: v.Level, Delegate: r.Delegate, Power: r.Power, }) } } } return rights, nil } // ListEndorsingRightsCycle returns information about endorsing rights for an entire cycle // as seen from block id. Note block and cycle must be no further than preserved cycles // away. On protocol changes future rights must be refetched! func (c *Client) ListEndorsingRightsCycle(ctx context.Context, id BlockID, cycle int64, p *Params) ([]EndorsingRight, error) { u := fmt.Sprintf("chains/main/blocks/%s/helpers/endorsing_rights?all=true&cycle=%d", id, cycle) rights := make([]EndorsingRight, 0) // switch { case p.Version < 12 && p.IsPreIthacaNetworkAtStart(): // until Ithaca v012 type Rights struct { Level int64 `json:"level"` Delegate string `json:"delegate"` Slots []int `json:"slots"` } list := make([]Rights, 0) if err := c.Get(ctx, u, &list); err != nil { return nil, err } for _, r := range list { rights = append(rights, EndorsingRight{ Level: r.Level, Delegate: r.Delegate, Power: len(r.Slots), }) } default: // FIXME: it seems this is still not removed // case p.Version >= 12 && p.Version <= 15: // until Lima v015 type V12Rights struct { Level int64 `json:"level"` Delegates []struct { Delegate string `json:"delegate"` Power int `json:"endorsing_power"` } `json:"delegates"` } v12rights := make([]V12Rights, 0) if err := c.Get(ctx, u, &v12rights); err != nil { return nil, err } for _, v := range v12rights { for _, r := range v.Delegates { rights = append(rights, EndorsingRight{ Level: v.Level, Delegate: r.Delegate, Power: r.Power, }) } } // default: // Lima+ v016 (cannot fetch full cycle of endorsing rights) // TODO: fetch per block in parallel } return rights, nil } // GetSnapshotInfoCycle returns information about a roll snapshot as seen from block id. // Note block and cycle must be no further than preserved cycles away. func (c *Client) GetSnapshotInfoCycle(ctx context.Context, id BlockID, cycle int64) (*SnapshotInfo, error) { idx := &SnapshotInfo{ Cycle: cycle, RollSnapshot: -1, } u := fmt.Sprintf("chains/main/blocks/%s/context/raw/json/cycle/%d", id, cycle) if err := c.Get(ctx, u, idx); err != nil { return nil, err } if idx.RandomSeed == "" { return nil, fmt.Errorf("missing snapshot for cycle %d at block %s", cycle, id) } return idx, nil } // GetSnapshotIndexCycle returns information about a roll snapshot as seen from block id. // Note block and cycle must be no further than preserved cycles away. func (c *Client) GetSnapshotIndexCycle(ctx context.Context, id BlockID, cycle int64, p *Params) (*SnapshotIndex, error) { idx := &SnapshotIndex{} if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { // pre-Ithaca we can at most look PRESERVED_CYCLES into the future since // the snapshot happened 2 cycles back from the block we're looking from. var info SnapshotInfo u := fmt.Sprintf("chains/main/blocks/%s/context/raw/json/cycle/%d", id, cycle) // log.Infof("GET %s", u) if err := c.Get(ctx, u, &info); err != nil { return nil, err } if info.RandomSeed == "" { return nil, fmt.Errorf("missing snapshot for cycle %d at block %s", cycle, id) } idx.Cycle = cycle idx.Base = p.SnapshotBaseCycle(cycle) idx.Index = info.RollSnapshot } else { idx.Cycle = cycle idx.Base = p.SnapshotBaseCycle(cycle) idx.Index = 15 // if cycle > p.PreservedCycles+1 { if idx.Base <= 0 { log.Debugf("No snapshot for cycle %d", cycle) } else { u := fmt.Sprintf("chains/main/blocks/%s/context/selected_snapshot?cycle=%d", id, cycle) // log.Infof("GET %s", u) if err := c.Get(ctx, u, &idx.Index); err != nil { return nil, err } } } return idx, nil } func (c *Client) FetchRightsByCycle(ctx context.Context, height, cycle int64, bundle *Bundle) error { level := BlockLevel(height) if bundle.Params == nil { p, err := c.GetParams(ctx, level) if err != nil { return fmt.Errorf("params: %v", err) } bundle.Params = p log.Debugf("Using fresh params for v%03d", p.Version) } else { log.Debugf("Using passed params for v%03d", bundle.Params.Version) } p := bundle.Params br, err := c.ListBakingRightsCycle(ctx, level, cycle, 0, p) if err != nil { return fmt.Errorf("baking: %v", err) } if len(br) == 0 { return fmt.Errorf("empty baking rights, make sure your Tezos node runs in archive mode") } log.Debugf("Fetched %d baking rights for cycle %d at height %d", len(br), cycle, height) bundle.Baking = append(bundle.Baking, br) er, err := c.ListEndorsingRightsCycle(ctx, level, cycle, p) if err != nil { return fmt.Errorf("endorsing: %v", err) } if len(er) == 0 { return fmt.Errorf("empty endorsing rights, make sure your Tezos node runs in archive mode") } log.Debugf("Fetched %d endorsing rights for cycle %d at height %d", len(er), cycle, height) bundle.Endorsing = append(bundle.Endorsing, er) // unavailable on genesis if height > 1 { prev, err := c.ListEndorsingRights(ctx, BlockLevel(height-1), p) if err != nil { return fmt.Errorf("last endorsing: %v", err) } if len(prev) == 0 { return fmt.Errorf("empty endorsing rights from last cycle end, make sure your Tezos node runs in archive mode") } bundle.PrevEndorsing = prev } // unavailable for the first preserved + 1 cycles (so 0..6 on mainnet) // post-Ithaca testnets have no snapshot for preserved + 1 cycles (0..4) snap, err := c.GetSnapshotIndexCycle(ctx, level, cycle, p) if err != nil { log.Errorf("Fetching snapshot index for c%d at block %d: %v", cycle, height, err) // return err snap = &SnapshotIndex{ Cycle: cycle, Base: p.SnapshotBaseCycle(cycle), Index: 15, // guess, just return something } } bundle.Snapshot = snap info, err := c.GetSnapshotInfoCycle(ctx, level, cycle) if err != nil { log.Errorf("Fetching snapshot info for c%d at block %d: %v", cycle, height, err) // return err } bundle.SnapInfo = info return nil } // ListSnapshotRollOwners returns information about a roll snapshot ownership. // Response is a nested array `[[roll_id, pubkey]]`. Deprecated in Ithaca. func (c *Client) ListSnapshotRollOwners(ctx context.Context, id BlockID, cycle, index int64) (*SnapshotOwners, error) { owners := &SnapshotOwners{Cycle: cycle, Index: index} u := fmt.Sprintf("chains/main/blocks/%s/context/raw/json/rolls/owner/snapshot/%d/%d?depth=1", id, cycle, index) if err := c.Get(ctx, u, &owners.Rolls); err != nil { return nil, err } return owners, nil }
{ type FullBakingRight struct { Delegate string `json:"delegate"` Level int64 `json:"level"` Priority int `json:"priority"` // until v011 Round int `json:"round"` // v012+ } var rr FullBakingRight err := json.Unmarshal(data, &rr) r.Delegate = rr.Delegate r.Level = rr.Level r.Round = rr.Priority + rr.Round return err }
identifier_body
rights.go
// Copyright (c) 2020-2022 Blockwatch Data Inc. // Author: [email protected] package rpc import ( "bytes" "context" "encoding/json" "fmt" "strconv" "blockwatch.cc/tzgo/tezos" ) // BakingRight holds simplified information about the right to bake a specific Tezos block type BakingRight struct { Delegate string Level int64 Round int } func (r BakingRight) Address() tezos.Address { a, _ := tezos.ParseAddress(r.Delegate) return a } func (r *BakingRight) UnmarshalJSON(data []byte) error { type FullBakingRight struct { Delegate string `json:"delegate"` Level int64 `json:"level"` Priority int `json:"priority"` // until v011 Round int `json:"round"` // v012+ } var rr FullBakingRight err := json.Unmarshal(data, &rr) r.Delegate = rr.Delegate r.Level = rr.Level r.Round = rr.Priority + rr.Round return err } // EndorsingRight holds simplified information about the right to endorse // a specific Tezos block type EndorsingRight struct { Delegate string Level int64 Power int } func (r EndorsingRight) Address() tezos.Address { a, _ := tezos.ParseAddress(r.Delegate) return a } type StakeInfo struct { ActiveStake int64 `json:"active_stake,string"` Baker tezos.Address `json:"baker"` } type SnapshotInfo struct { LastRoll []string `json:"last_roll"` Nonces []string `json:"nonces"` RandomSeed string `json:"random_seed"` RollSnapshot int `json:"roll_snapshot"` // until v011 Cycle int64 `json:"cycle"` // added, not part of RPC response BakerStake []StakeInfo `json:"selected_stake_distribution,omitempty"` // v012+ TotalStake int64 `json:"total_active_stake,string"` // v012+ // Slashed []??? "slashed_deposits" } type SnapshotIndex struct { Cycle int64 // the requested cycle that contains rights from the snapshot Base int64 // the cycle where the snapshot happened Index int // the index inside base where snapshot happened } type SnapshotOwners struct { Cycle int64 `json:"cycle"` Index int64 `json:"index"` Rolls []SnapshotRoll `json:"rolls"` } type SnapshotRoll struct { RollId int64 OwnerKey tezos.Key } func (r *SnapshotRoll) UnmarshalJSON(data []byte) error { if len(data) == 0 || bytes.Equal(data, []byte(`null`)) { return nil } if len(data) == 2 { return nil } if data[0] != '[' || data[len(data)-1] != ']' { return fmt.Errorf("SnapshotRoll: invalid json array '%s'", string(data)) } dec := json.NewDecoder(bytes.NewReader(data)) dec.UseNumber() unpacked := make([]any, 0) err := dec.Decode(&unpacked) if err != nil { return err } return r.decode(unpacked) } func (r *SnapshotRoll) decode(unpacked []any) error { if l := len(unpacked); l != 2 { return fmt.Errorf("SnapshotRoll: invalid json array len %d", l) } id, err := strconv.ParseInt(unpacked[0].(json.Number).String(), 10, 64) if err != nil { return fmt.Errorf("SnapshotRoll: invalid roll id: %v", err) } if err = r.OwnerKey.UnmarshalText([]byte(unpacked[1].(string))); err != nil { return err } r.RollId = id return nil } // ListBakingRights returns information about baking rights at block id. // Use max to set a max block priority (before Ithaca) or a max round (after Ithaca). func (c *Client) ListBakingRights(ctx context.Context, id BlockID, max int, p *Params) ([]BakingRight, error) { maxSelector := "max_priority=%d" if p.Version >= 12 && p.IsPreIthacaNetworkAtStart() { maxSelector = "max_round=%d" } if p.Version < 6 && p.IsPreIthacaNetworkAtStart() { max++ } rights := make([]BakingRight, 0) u := fmt.Sprintf("chains/main/blocks/%s/helpers/baking_rights?all=true&"+maxSelector, id, max) if err := c.Get(ctx, u, &rights); err != nil { return nil, err } return rights, nil } // ListBakingRightsCycle returns information about baking rights for an entire cycle // as seen from block id. Note block and cycle must be no further than preserved cycles // away from each other. Use max to set a max block priority (before Ithaca) or a max // round (after Ithaca). func (c *Client) ListBakingRightsCycle(ctx context.Context, id BlockID, cycle int64, max int, p *Params) ([]BakingRight, error) { maxSelector := "max_round=%d" if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { maxSelector = "max_priority=%d" } if p.Version < 6 && p.IsPreIthacaNetworkAtStart() { max++ } rights := make([]BakingRight, 0) u := fmt.Sprintf("chains/main/blocks/%s/helpers/baking_rights?all=true&cycle=%d&"+maxSelector, id, cycle, max) if err := c.Get(ctx, u, &rights); err != nil { return nil, err } return rights, nil } // ListEndorsingRights returns information about block endorsing rights. func (c *Client) ListEndorsingRights(ctx context.Context, id BlockID, p *Params) ([]EndorsingRight, error) { u := fmt.Sprintf("chains/main/blocks/%s/helpers/endorsing_rights?all=true", id) rights := make([]EndorsingRight, 0) // Note: future cycles are seen from current protocol (!) if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { type Rights struct { Level int64 `json:"level"` Delegate string `json:"delegate"` Slots []int `json:"slots"` } list := make([]Rights, 0) if err := c.Get(ctx, u, &list); err != nil
for _, r := range list { rights = append(rights, EndorsingRight{ Level: r.Level, Delegate: r.Delegate, Power: len(r.Slots), }) } } else { type V12Rights struct { Level int64 `json:"level"` Delegates []struct { Delegate string `json:"delegate"` Power int `json:"endorsing_power"` } `json:"delegates"` } v12rights := make([]V12Rights, 0) if err := c.Get(ctx, u, &v12rights); err != nil { return nil, err } for _, v := range v12rights { for _, r := range v.Delegates { rights = append(rights, EndorsingRight{ Level: v.Level, Delegate: r.Delegate, Power: r.Power, }) } } } return rights, nil } // ListEndorsingRightsCycle returns information about endorsing rights for an entire cycle // as seen from block id. Note block and cycle must be no further than preserved cycles // away. On protocol changes future rights must be refetched! func (c *Client) ListEndorsingRightsCycle(ctx context.Context, id BlockID, cycle int64, p *Params) ([]EndorsingRight, error) { u := fmt.Sprintf("chains/main/blocks/%s/helpers/endorsing_rights?all=true&cycle=%d", id, cycle) rights := make([]EndorsingRight, 0) // switch { case p.Version < 12 && p.IsPreIthacaNetworkAtStart(): // until Ithaca v012 type Rights struct { Level int64 `json:"level"` Delegate string `json:"delegate"` Slots []int `json:"slots"` } list := make([]Rights, 0) if err := c.Get(ctx, u, &list); err != nil { return nil, err } for _, r := range list { rights = append(rights, EndorsingRight{ Level: r.Level, Delegate: r.Delegate, Power: len(r.Slots), }) } default: // FIXME: it seems this is still not removed // case p.Version >= 12 && p.Version <= 15: // until Lima v015 type V12Rights struct { Level int64 `json:"level"` Delegates []struct { Delegate string `json:"delegate"` Power int `json:"endorsing_power"` } `json:"delegates"` } v12rights := make([]V12Rights, 0) if err := c.Get(ctx, u, &v12rights); err != nil { return nil, err } for _, v := range v12rights { for _, r := range v.Delegates { rights = append(rights, EndorsingRight{ Level: v.Level, Delegate: r.Delegate, Power: r.Power, }) } } // default: // Lima+ v016 (cannot fetch full cycle of endorsing rights) // TODO: fetch per block in parallel } return rights, nil } // GetSnapshotInfoCycle returns information about a roll snapshot as seen from block id. // Note block and cycle must be no further than preserved cycles away. func (c *Client) GetSnapshotInfoCycle(ctx context.Context, id BlockID, cycle int64) (*SnapshotInfo, error) { idx := &SnapshotInfo{ Cycle: cycle, RollSnapshot: -1, } u := fmt.Sprintf("chains/main/blocks/%s/context/raw/json/cycle/%d", id, cycle) if err := c.Get(ctx, u, idx); err != nil { return nil, err } if idx.RandomSeed == "" { return nil, fmt.Errorf("missing snapshot for cycle %d at block %s", cycle, id) } return idx, nil } // GetSnapshotIndexCycle returns information about a roll snapshot as seen from block id. // Note block and cycle must be no further than preserved cycles away. func (c *Client) GetSnapshotIndexCycle(ctx context.Context, id BlockID, cycle int64, p *Params) (*SnapshotIndex, error) { idx := &SnapshotIndex{} if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { // pre-Ithaca we can at most look PRESERVED_CYCLES into the future since // the snapshot happened 2 cycles back from the block we're looking from. var info SnapshotInfo u := fmt.Sprintf("chains/main/blocks/%s/context/raw/json/cycle/%d", id, cycle) // log.Infof("GET %s", u) if err := c.Get(ctx, u, &info); err != nil { return nil, err } if info.RandomSeed == "" { return nil, fmt.Errorf("missing snapshot for cycle %d at block %s", cycle, id) } idx.Cycle = cycle idx.Base = p.SnapshotBaseCycle(cycle) idx.Index = info.RollSnapshot } else { idx.Cycle = cycle idx.Base = p.SnapshotBaseCycle(cycle) idx.Index = 15 // if cycle > p.PreservedCycles+1 { if idx.Base <= 0 { log.Debugf("No snapshot for cycle %d", cycle) } else { u := fmt.Sprintf("chains/main/blocks/%s/context/selected_snapshot?cycle=%d", id, cycle) // log.Infof("GET %s", u) if err := c.Get(ctx, u, &idx.Index); err != nil { return nil, err } } } return idx, nil } func (c *Client) FetchRightsByCycle(ctx context.Context, height, cycle int64, bundle *Bundle) error { level := BlockLevel(height) if bundle.Params == nil { p, err := c.GetParams(ctx, level) if err != nil { return fmt.Errorf("params: %v", err) } bundle.Params = p log.Debugf("Using fresh params for v%03d", p.Version) } else { log.Debugf("Using passed params for v%03d", bundle.Params.Version) } p := bundle.Params br, err := c.ListBakingRightsCycle(ctx, level, cycle, 0, p) if err != nil { return fmt.Errorf("baking: %v", err) } if len(br) == 0 { return fmt.Errorf("empty baking rights, make sure your Tezos node runs in archive mode") } log.Debugf("Fetched %d baking rights for cycle %d at height %d", len(br), cycle, height) bundle.Baking = append(bundle.Baking, br) er, err := c.ListEndorsingRightsCycle(ctx, level, cycle, p) if err != nil { return fmt.Errorf("endorsing: %v", err) } if len(er) == 0 { return fmt.Errorf("empty endorsing rights, make sure your Tezos node runs in archive mode") } log.Debugf("Fetched %d endorsing rights for cycle %d at height %d", len(er), cycle, height) bundle.Endorsing = append(bundle.Endorsing, er) // unavailable on genesis if height > 1 { prev, err := c.ListEndorsingRights(ctx, BlockLevel(height-1), p) if err != nil { return fmt.Errorf("last endorsing: %v", err) } if len(prev) == 0 { return fmt.Errorf("empty endorsing rights from last cycle end, make sure your Tezos node runs in archive mode") } bundle.PrevEndorsing = prev } // unavailable for the first preserved + 1 cycles (so 0..6 on mainnet) // post-Ithaca testnets have no snapshot for preserved + 1 cycles (0..4) snap, err := c.GetSnapshotIndexCycle(ctx, level, cycle, p) if err != nil { log.Errorf("Fetching snapshot index for c%d at block %d: %v", cycle, height, err) // return err snap = &SnapshotIndex{ Cycle: cycle, Base: p.SnapshotBaseCycle(cycle), Index: 15, // guess, just return something } } bundle.Snapshot = snap info, err := c.GetSnapshotInfoCycle(ctx, level, cycle) if err != nil { log.Errorf("Fetching snapshot info for c%d at block %d: %v", cycle, height, err) // return err } bundle.SnapInfo = info return nil } // ListSnapshotRollOwners returns information about a roll snapshot ownership. // Response is a nested array `[[roll_id, pubkey]]`. Deprecated in Ithaca. func (c *Client) ListSnapshotRollOwners(ctx context.Context, id BlockID, cycle, index int64) (*SnapshotOwners, error) { owners := &SnapshotOwners{Cycle: cycle, Index: index} u := fmt.Sprintf("chains/main/blocks/%s/context/raw/json/rolls/owner/snapshot/%d/%d?depth=1", id, cycle, index) if err := c.Get(ctx, u, &owners.Rolls); err != nil { return nil, err } return owners, nil }
{ return nil, err }
conditional_block
rights.go
// Copyright (c) 2020-2022 Blockwatch Data Inc. // Author: [email protected] package rpc import ( "bytes" "context" "encoding/json" "fmt" "strconv" "blockwatch.cc/tzgo/tezos" ) // BakingRight holds simplified information about the right to bake a specific Tezos block type BakingRight struct { Delegate string Level int64 Round int } func (r BakingRight) Address() tezos.Address { a, _ := tezos.ParseAddress(r.Delegate) return a } func (r *BakingRight) UnmarshalJSON(data []byte) error { type FullBakingRight struct { Delegate string `json:"delegate"` Level int64 `json:"level"` Priority int `json:"priority"` // until v011 Round int `json:"round"` // v012+ } var rr FullBakingRight err := json.Unmarshal(data, &rr) r.Delegate = rr.Delegate r.Level = rr.Level r.Round = rr.Priority + rr.Round return err } // EndorsingRight holds simplified information about the right to endorse // a specific Tezos block type EndorsingRight struct { Delegate string Level int64 Power int } func (r EndorsingRight) Address() tezos.Address { a, _ := tezos.ParseAddress(r.Delegate) return a } type StakeInfo struct { ActiveStake int64 `json:"active_stake,string"` Baker tezos.Address `json:"baker"` } type SnapshotInfo struct { LastRoll []string `json:"last_roll"` Nonces []string `json:"nonces"` RandomSeed string `json:"random_seed"` RollSnapshot int `json:"roll_snapshot"` // until v011 Cycle int64 `json:"cycle"` // added, not part of RPC response BakerStake []StakeInfo `json:"selected_stake_distribution,omitempty"` // v012+ TotalStake int64 `json:"total_active_stake,string"` // v012+ // Slashed []??? "slashed_deposits" } type SnapshotIndex struct { Cycle int64 // the requested cycle that contains rights from the snapshot Base int64 // the cycle where the snapshot happened Index int // the index inside base where snapshot happened } type SnapshotOwners struct { Cycle int64 `json:"cycle"` Index int64 `json:"index"` Rolls []SnapshotRoll `json:"rolls"` } type SnapshotRoll struct { RollId int64 OwnerKey tezos.Key } func (r *SnapshotRoll) UnmarshalJSON(data []byte) error { if len(data) == 0 || bytes.Equal(data, []byte(`null`)) { return nil } if len(data) == 2 { return nil } if data[0] != '[' || data[len(data)-1] != ']' { return fmt.Errorf("SnapshotRoll: invalid json array '%s'", string(data)) } dec := json.NewDecoder(bytes.NewReader(data)) dec.UseNumber() unpacked := make([]any, 0) err := dec.Decode(&unpacked) if err != nil { return err } return r.decode(unpacked) } func (r *SnapshotRoll) decode(unpacked []any) error { if l := len(unpacked); l != 2 { return fmt.Errorf("SnapshotRoll: invalid json array len %d", l) } id, err := strconv.ParseInt(unpacked[0].(json.Number).String(), 10, 64) if err != nil { return fmt.Errorf("SnapshotRoll: invalid roll id: %v", err) } if err = r.OwnerKey.UnmarshalText([]byte(unpacked[1].(string))); err != nil { return err } r.RollId = id return nil } // ListBakingRights returns information about baking rights at block id. // Use max to set a max block priority (before Ithaca) or a max round (after Ithaca). func (c *Client) ListBakingRights(ctx context.Context, id BlockID, max int, p *Params) ([]BakingRight, error) { maxSelector := "max_priority=%d" if p.Version >= 12 && p.IsPreIthacaNetworkAtStart() { maxSelector = "max_round=%d" } if p.Version < 6 && p.IsPreIthacaNetworkAtStart() { max++ } rights := make([]BakingRight, 0) u := fmt.Sprintf("chains/main/blocks/%s/helpers/baking_rights?all=true&"+maxSelector, id, max) if err := c.Get(ctx, u, &rights); err != nil { return nil, err } return rights, nil } // ListBakingRightsCycle returns information about baking rights for an entire cycle // as seen from block id. Note block and cycle must be no further than preserved cycles // away from each other. Use max to set a max block priority (before Ithaca) or a max // round (after Ithaca). func (c *Client) ListBakingRightsCycle(ctx context.Context, id BlockID, cycle int64, max int, p *Params) ([]BakingRight, error) { maxSelector := "max_round=%d" if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { maxSelector = "max_priority=%d" } if p.Version < 6 && p.IsPreIthacaNetworkAtStart() { max++ } rights := make([]BakingRight, 0) u := fmt.Sprintf("chains/main/blocks/%s/helpers/baking_rights?all=true&cycle=%d&"+maxSelector, id, cycle, max) if err := c.Get(ctx, u, &rights); err != nil { return nil, err } return rights, nil } // ListEndorsingRights returns information about block endorsing rights. func (c *Client) ListEndorsingRights(ctx context.Context, id BlockID, p *Params) ([]EndorsingRight, error) { u := fmt.Sprintf("chains/main/blocks/%s/helpers/endorsing_rights?all=true", id) rights := make([]EndorsingRight, 0) // Note: future cycles are seen from current protocol (!) if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { type Rights struct { Level int64 `json:"level"` Delegate string `json:"delegate"` Slots []int `json:"slots"` } list := make([]Rights, 0) if err := c.Get(ctx, u, &list); err != nil { return nil, err } for _, r := range list { rights = append(rights, EndorsingRight{ Level: r.Level, Delegate: r.Delegate, Power: len(r.Slots), }) } } else { type V12Rights struct { Level int64 `json:"level"` Delegates []struct { Delegate string `json:"delegate"` Power int `json:"endorsing_power"` } `json:"delegates"` } v12rights := make([]V12Rights, 0) if err := c.Get(ctx, u, &v12rights); err != nil { return nil, err } for _, v := range v12rights { for _, r := range v.Delegates { rights = append(rights, EndorsingRight{ Level: v.Level, Delegate: r.Delegate, Power: r.Power, }) } } } return rights, nil } // ListEndorsingRightsCycle returns information about endorsing rights for an entire cycle // as seen from block id. Note block and cycle must be no further than preserved cycles // away. On protocol changes future rights must be refetched! func (c *Client) ListEndorsingRightsCycle(ctx context.Context, id BlockID, cycle int64, p *Params) ([]EndorsingRight, error) { u := fmt.Sprintf("chains/main/blocks/%s/helpers/endorsing_rights?all=true&cycle=%d", id, cycle) rights := make([]EndorsingRight, 0) // switch { case p.Version < 12 && p.IsPreIthacaNetworkAtStart(): // until Ithaca v012 type Rights struct { Level int64 `json:"level"` Delegate string `json:"delegate"` Slots []int `json:"slots"` } list := make([]Rights, 0) if err := c.Get(ctx, u, &list); err != nil { return nil, err } for _, r := range list { rights = append(rights, EndorsingRight{ Level: r.Level, Delegate: r.Delegate, Power: len(r.Slots), }) } default: // FIXME: it seems this is still not removed // case p.Version >= 12 && p.Version <= 15: // until Lima v015 type V12Rights struct { Level int64 `json:"level"` Delegates []struct { Delegate string `json:"delegate"` Power int `json:"endorsing_power"` } `json:"delegates"` } v12rights := make([]V12Rights, 0) if err := c.Get(ctx, u, &v12rights); err != nil { return nil, err } for _, v := range v12rights { for _, r := range v.Delegates { rights = append(rights, EndorsingRight{ Level: v.Level, Delegate: r.Delegate, Power: r.Power, }) } } // default: // Lima+ v016 (cannot fetch full cycle of endorsing rights) // TODO: fetch per block in parallel } return rights, nil } // GetSnapshotInfoCycle returns information about a roll snapshot as seen from block id. // Note block and cycle must be no further than preserved cycles away. func (c *Client) GetSnapshotInfoCycle(ctx context.Context, id BlockID, cycle int64) (*SnapshotInfo, error) { idx := &SnapshotInfo{ Cycle: cycle, RollSnapshot: -1, } u := fmt.Sprintf("chains/main/blocks/%s/context/raw/json/cycle/%d", id, cycle) if err := c.Get(ctx, u, idx); err != nil { return nil, err } if idx.RandomSeed == "" { return nil, fmt.Errorf("missing snapshot for cycle %d at block %s", cycle, id) } return idx, nil } // GetSnapshotIndexCycle returns information about a roll snapshot as seen from block id. // Note block and cycle must be no further than preserved cycles away. func (c *Client) GetSnapshotIndexCycle(ctx context.Context, id BlockID, cycle int64, p *Params) (*SnapshotIndex, error) { idx := &SnapshotIndex{} if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { // pre-Ithaca we can at most look PRESERVED_CYCLES into the future since // the snapshot happened 2 cycles back from the block we're looking from. var info SnapshotInfo u := fmt.Sprintf("chains/main/blocks/%s/context/raw/json/cycle/%d", id, cycle) // log.Infof("GET %s", u) if err := c.Get(ctx, u, &info); err != nil { return nil, err } if info.RandomSeed == "" { return nil, fmt.Errorf("missing snapshot for cycle %d at block %s", cycle, id) } idx.Cycle = cycle
idx.Index = 15 // if cycle > p.PreservedCycles+1 { if idx.Base <= 0 { log.Debugf("No snapshot for cycle %d", cycle) } else { u := fmt.Sprintf("chains/main/blocks/%s/context/selected_snapshot?cycle=%d", id, cycle) // log.Infof("GET %s", u) if err := c.Get(ctx, u, &idx.Index); err != nil { return nil, err } } } return idx, nil } func (c *Client) FetchRightsByCycle(ctx context.Context, height, cycle int64, bundle *Bundle) error { level := BlockLevel(height) if bundle.Params == nil { p, err := c.GetParams(ctx, level) if err != nil { return fmt.Errorf("params: %v", err) } bundle.Params = p log.Debugf("Using fresh params for v%03d", p.Version) } else { log.Debugf("Using passed params for v%03d", bundle.Params.Version) } p := bundle.Params br, err := c.ListBakingRightsCycle(ctx, level, cycle, 0, p) if err != nil { return fmt.Errorf("baking: %v", err) } if len(br) == 0 { return fmt.Errorf("empty baking rights, make sure your Tezos node runs in archive mode") } log.Debugf("Fetched %d baking rights for cycle %d at height %d", len(br), cycle, height) bundle.Baking = append(bundle.Baking, br) er, err := c.ListEndorsingRightsCycle(ctx, level, cycle, p) if err != nil { return fmt.Errorf("endorsing: %v", err) } if len(er) == 0 { return fmt.Errorf("empty endorsing rights, make sure your Tezos node runs in archive mode") } log.Debugf("Fetched %d endorsing rights for cycle %d at height %d", len(er), cycle, height) bundle.Endorsing = append(bundle.Endorsing, er) // unavailable on genesis if height > 1 { prev, err := c.ListEndorsingRights(ctx, BlockLevel(height-1), p) if err != nil { return fmt.Errorf("last endorsing: %v", err) } if len(prev) == 0 { return fmt.Errorf("empty endorsing rights from last cycle end, make sure your Tezos node runs in archive mode") } bundle.PrevEndorsing = prev } // unavailable for the first preserved + 1 cycles (so 0..6 on mainnet) // post-Ithaca testnets have no snapshot for preserved + 1 cycles (0..4) snap, err := c.GetSnapshotIndexCycle(ctx, level, cycle, p) if err != nil { log.Errorf("Fetching snapshot index for c%d at block %d: %v", cycle, height, err) // return err snap = &SnapshotIndex{ Cycle: cycle, Base: p.SnapshotBaseCycle(cycle), Index: 15, // guess, just return something } } bundle.Snapshot = snap info, err := c.GetSnapshotInfoCycle(ctx, level, cycle) if err != nil { log.Errorf("Fetching snapshot info for c%d at block %d: %v", cycle, height, err) // return err } bundle.SnapInfo = info return nil } // ListSnapshotRollOwners returns information about a roll snapshot ownership. // Response is a nested array `[[roll_id, pubkey]]`. Deprecated in Ithaca. func (c *Client) ListSnapshotRollOwners(ctx context.Context, id BlockID, cycle, index int64) (*SnapshotOwners, error) { owners := &SnapshotOwners{Cycle: cycle, Index: index} u := fmt.Sprintf("chains/main/blocks/%s/context/raw/json/rolls/owner/snapshot/%d/%d?depth=1", id, cycle, index) if err := c.Get(ctx, u, &owners.Rolls); err != nil { return nil, err } return owners, nil }
idx.Base = p.SnapshotBaseCycle(cycle) idx.Index = info.RollSnapshot } else { idx.Cycle = cycle idx.Base = p.SnapshotBaseCycle(cycle)
random_line_split
rights.go
// Copyright (c) 2020-2022 Blockwatch Data Inc. // Author: [email protected] package rpc import ( "bytes" "context" "encoding/json" "fmt" "strconv" "blockwatch.cc/tzgo/tezos" ) // BakingRight holds simplified information about the right to bake a specific Tezos block type BakingRight struct { Delegate string Level int64 Round int } func (r BakingRight) Address() tezos.Address { a, _ := tezos.ParseAddress(r.Delegate) return a } func (r *BakingRight) UnmarshalJSON(data []byte) error { type FullBakingRight struct { Delegate string `json:"delegate"` Level int64 `json:"level"` Priority int `json:"priority"` // until v011 Round int `json:"round"` // v012+ } var rr FullBakingRight err := json.Unmarshal(data, &rr) r.Delegate = rr.Delegate r.Level = rr.Level r.Round = rr.Priority + rr.Round return err } // EndorsingRight holds simplified information about the right to endorse // a specific Tezos block type EndorsingRight struct { Delegate string Level int64 Power int } func (r EndorsingRight) Address() tezos.Address { a, _ := tezos.ParseAddress(r.Delegate) return a } type StakeInfo struct { ActiveStake int64 `json:"active_stake,string"` Baker tezos.Address `json:"baker"` } type SnapshotInfo struct { LastRoll []string `json:"last_roll"` Nonces []string `json:"nonces"` RandomSeed string `json:"random_seed"` RollSnapshot int `json:"roll_snapshot"` // until v011 Cycle int64 `json:"cycle"` // added, not part of RPC response BakerStake []StakeInfo `json:"selected_stake_distribution,omitempty"` // v012+ TotalStake int64 `json:"total_active_stake,string"` // v012+ // Slashed []??? "slashed_deposits" } type SnapshotIndex struct { Cycle int64 // the requested cycle that contains rights from the snapshot Base int64 // the cycle where the snapshot happened Index int // the index inside base where snapshot happened } type SnapshotOwners struct { Cycle int64 `json:"cycle"` Index int64 `json:"index"` Rolls []SnapshotRoll `json:"rolls"` } type SnapshotRoll struct { RollId int64 OwnerKey tezos.Key } func (r *SnapshotRoll) UnmarshalJSON(data []byte) error { if len(data) == 0 || bytes.Equal(data, []byte(`null`)) { return nil } if len(data) == 2 { return nil } if data[0] != '[' || data[len(data)-1] != ']' { return fmt.Errorf("SnapshotRoll: invalid json array '%s'", string(data)) } dec := json.NewDecoder(bytes.NewReader(data)) dec.UseNumber() unpacked := make([]any, 0) err := dec.Decode(&unpacked) if err != nil { return err } return r.decode(unpacked) } func (r *SnapshotRoll) decode(unpacked []any) error { if l := len(unpacked); l != 2 { return fmt.Errorf("SnapshotRoll: invalid json array len %d", l) } id, err := strconv.ParseInt(unpacked[0].(json.Number).String(), 10, 64) if err != nil { return fmt.Errorf("SnapshotRoll: invalid roll id: %v", err) } if err = r.OwnerKey.UnmarshalText([]byte(unpacked[1].(string))); err != nil { return err } r.RollId = id return nil } // ListBakingRights returns information about baking rights at block id. // Use max to set a max block priority (before Ithaca) or a max round (after Ithaca). func (c *Client) ListBakingRights(ctx context.Context, id BlockID, max int, p *Params) ([]BakingRight, error) { maxSelector := "max_priority=%d" if p.Version >= 12 && p.IsPreIthacaNetworkAtStart() { maxSelector = "max_round=%d" } if p.Version < 6 && p.IsPreIthacaNetworkAtStart() { max++ } rights := make([]BakingRight, 0) u := fmt.Sprintf("chains/main/blocks/%s/helpers/baking_rights?all=true&"+maxSelector, id, max) if err := c.Get(ctx, u, &rights); err != nil { return nil, err } return rights, nil } // ListBakingRightsCycle returns information about baking rights for an entire cycle // as seen from block id. Note block and cycle must be no further than preserved cycles // away from each other. Use max to set a max block priority (before Ithaca) or a max // round (after Ithaca). func (c *Client) ListBakingRightsCycle(ctx context.Context, id BlockID, cycle int64, max int, p *Params) ([]BakingRight, error) { maxSelector := "max_round=%d" if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { maxSelector = "max_priority=%d" } if p.Version < 6 && p.IsPreIthacaNetworkAtStart() { max++ } rights := make([]BakingRight, 0) u := fmt.Sprintf("chains/main/blocks/%s/helpers/baking_rights?all=true&cycle=%d&"+maxSelector, id, cycle, max) if err := c.Get(ctx, u, &rights); err != nil { return nil, err } return rights, nil } // ListEndorsingRights returns information about block endorsing rights. func (c *Client)
(ctx context.Context, id BlockID, p *Params) ([]EndorsingRight, error) { u := fmt.Sprintf("chains/main/blocks/%s/helpers/endorsing_rights?all=true", id) rights := make([]EndorsingRight, 0) // Note: future cycles are seen from current protocol (!) if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { type Rights struct { Level int64 `json:"level"` Delegate string `json:"delegate"` Slots []int `json:"slots"` } list := make([]Rights, 0) if err := c.Get(ctx, u, &list); err != nil { return nil, err } for _, r := range list { rights = append(rights, EndorsingRight{ Level: r.Level, Delegate: r.Delegate, Power: len(r.Slots), }) } } else { type V12Rights struct { Level int64 `json:"level"` Delegates []struct { Delegate string `json:"delegate"` Power int `json:"endorsing_power"` } `json:"delegates"` } v12rights := make([]V12Rights, 0) if err := c.Get(ctx, u, &v12rights); err != nil { return nil, err } for _, v := range v12rights { for _, r := range v.Delegates { rights = append(rights, EndorsingRight{ Level: v.Level, Delegate: r.Delegate, Power: r.Power, }) } } } return rights, nil } // ListEndorsingRightsCycle returns information about endorsing rights for an entire cycle // as seen from block id. Note block and cycle must be no further than preserved cycles // away. On protocol changes future rights must be refetched! func (c *Client) ListEndorsingRightsCycle(ctx context.Context, id BlockID, cycle int64, p *Params) ([]EndorsingRight, error) { u := fmt.Sprintf("chains/main/blocks/%s/helpers/endorsing_rights?all=true&cycle=%d", id, cycle) rights := make([]EndorsingRight, 0) // switch { case p.Version < 12 && p.IsPreIthacaNetworkAtStart(): // until Ithaca v012 type Rights struct { Level int64 `json:"level"` Delegate string `json:"delegate"` Slots []int `json:"slots"` } list := make([]Rights, 0) if err := c.Get(ctx, u, &list); err != nil { return nil, err } for _, r := range list { rights = append(rights, EndorsingRight{ Level: r.Level, Delegate: r.Delegate, Power: len(r.Slots), }) } default: // FIXME: it seems this is still not removed // case p.Version >= 12 && p.Version <= 15: // until Lima v015 type V12Rights struct { Level int64 `json:"level"` Delegates []struct { Delegate string `json:"delegate"` Power int `json:"endorsing_power"` } `json:"delegates"` } v12rights := make([]V12Rights, 0) if err := c.Get(ctx, u, &v12rights); err != nil { return nil, err } for _, v := range v12rights { for _, r := range v.Delegates { rights = append(rights, EndorsingRight{ Level: v.Level, Delegate: r.Delegate, Power: r.Power, }) } } // default: // Lima+ v016 (cannot fetch full cycle of endorsing rights) // TODO: fetch per block in parallel } return rights, nil } // GetSnapshotInfoCycle returns information about a roll snapshot as seen from block id. // Note block and cycle must be no further than preserved cycles away. func (c *Client) GetSnapshotInfoCycle(ctx context.Context, id BlockID, cycle int64) (*SnapshotInfo, error) { idx := &SnapshotInfo{ Cycle: cycle, RollSnapshot: -1, } u := fmt.Sprintf("chains/main/blocks/%s/context/raw/json/cycle/%d", id, cycle) if err := c.Get(ctx, u, idx); err != nil { return nil, err } if idx.RandomSeed == "" { return nil, fmt.Errorf("missing snapshot for cycle %d at block %s", cycle, id) } return idx, nil } // GetSnapshotIndexCycle returns information about a roll snapshot as seen from block id. // Note block and cycle must be no further than preserved cycles away. func (c *Client) GetSnapshotIndexCycle(ctx context.Context, id BlockID, cycle int64, p *Params) (*SnapshotIndex, error) { idx := &SnapshotIndex{} if p.Version < 12 && p.IsPreIthacaNetworkAtStart() { // pre-Ithaca we can at most look PRESERVED_CYCLES into the future since // the snapshot happened 2 cycles back from the block we're looking from. var info SnapshotInfo u := fmt.Sprintf("chains/main/blocks/%s/context/raw/json/cycle/%d", id, cycle) // log.Infof("GET %s", u) if err := c.Get(ctx, u, &info); err != nil { return nil, err } if info.RandomSeed == "" { return nil, fmt.Errorf("missing snapshot for cycle %d at block %s", cycle, id) } idx.Cycle = cycle idx.Base = p.SnapshotBaseCycle(cycle) idx.Index = info.RollSnapshot } else { idx.Cycle = cycle idx.Base = p.SnapshotBaseCycle(cycle) idx.Index = 15 // if cycle > p.PreservedCycles+1 { if idx.Base <= 0 { log.Debugf("No snapshot for cycle %d", cycle) } else { u := fmt.Sprintf("chains/main/blocks/%s/context/selected_snapshot?cycle=%d", id, cycle) // log.Infof("GET %s", u) if err := c.Get(ctx, u, &idx.Index); err != nil { return nil, err } } } return idx, nil } func (c *Client) FetchRightsByCycle(ctx context.Context, height, cycle int64, bundle *Bundle) error { level := BlockLevel(height) if bundle.Params == nil { p, err := c.GetParams(ctx, level) if err != nil { return fmt.Errorf("params: %v", err) } bundle.Params = p log.Debugf("Using fresh params for v%03d", p.Version) } else { log.Debugf("Using passed params for v%03d", bundle.Params.Version) } p := bundle.Params br, err := c.ListBakingRightsCycle(ctx, level, cycle, 0, p) if err != nil { return fmt.Errorf("baking: %v", err) } if len(br) == 0 { return fmt.Errorf("empty baking rights, make sure your Tezos node runs in archive mode") } log.Debugf("Fetched %d baking rights for cycle %d at height %d", len(br), cycle, height) bundle.Baking = append(bundle.Baking, br) er, err := c.ListEndorsingRightsCycle(ctx, level, cycle, p) if err != nil { return fmt.Errorf("endorsing: %v", err) } if len(er) == 0 { return fmt.Errorf("empty endorsing rights, make sure your Tezos node runs in archive mode") } log.Debugf("Fetched %d endorsing rights for cycle %d at height %d", len(er), cycle, height) bundle.Endorsing = append(bundle.Endorsing, er) // unavailable on genesis if height > 1 { prev, err := c.ListEndorsingRights(ctx, BlockLevel(height-1), p) if err != nil { return fmt.Errorf("last endorsing: %v", err) } if len(prev) == 0 { return fmt.Errorf("empty endorsing rights from last cycle end, make sure your Tezos node runs in archive mode") } bundle.PrevEndorsing = prev } // unavailable for the first preserved + 1 cycles (so 0..6 on mainnet) // post-Ithaca testnets have no snapshot for preserved + 1 cycles (0..4) snap, err := c.GetSnapshotIndexCycle(ctx, level, cycle, p) if err != nil { log.Errorf("Fetching snapshot index for c%d at block %d: %v", cycle, height, err) // return err snap = &SnapshotIndex{ Cycle: cycle, Base: p.SnapshotBaseCycle(cycle), Index: 15, // guess, just return something } } bundle.Snapshot = snap info, err := c.GetSnapshotInfoCycle(ctx, level, cycle) if err != nil { log.Errorf("Fetching snapshot info for c%d at block %d: %v", cycle, height, err) // return err } bundle.SnapInfo = info return nil } // ListSnapshotRollOwners returns information about a roll snapshot ownership. // Response is a nested array `[[roll_id, pubkey]]`. Deprecated in Ithaca. func (c *Client) ListSnapshotRollOwners(ctx context.Context, id BlockID, cycle, index int64) (*SnapshotOwners, error) { owners := &SnapshotOwners{Cycle: cycle, Index: index} u := fmt.Sprintf("chains/main/blocks/%s/context/raw/json/rolls/owner/snapshot/%d/%d?depth=1", id, cycle, index) if err := c.Get(ctx, u, &owners.Rolls); err != nil { return nil, err } return owners, nil }
ListEndorsingRights
identifier_name
lib.rs
mod error; use std::fs::{self, File, OpenOptions}; use std::io::{self, stdin, stdout, Write}; use std::ops::Deref; use std::os::unix::fs::OpenOptionsExt; use std::path::{Path, PathBuf}; use error_chain::bail; use hex::FromHex; use lazy_static::lazy_static; use seckey::SecBytes; use serde::{Deserialize, Serialize}; use sodiumoxide::crypto::{ pwhash, secretbox, sign, }; use termion::input::TermRead; pub use crate::error::{ErrorKind, Error, ResultExt}; lazy_static! { static ref HOMEDIR: PathBuf = { dirs::home_dir() .unwrap_or("./".into()) }; /// The default location for pkgar to look for the user's public key. /// /// Defaults to `$HOME/.pkgar/keys/id_ed25519.pub.toml`. If `$HOME` is /// unset, `./.pkgar/keys/id_ed25519.pub.toml`. pub static ref DEFAULT_PUBKEY: PathBuf = { Path::join(&HOMEDIR, ".pkgar/keys/id_ed25519.pub.toml") }; /// The default location for pkgar to look for the user's secret key. /// /// Defaults to `$HOME/.pkgar/keys/id_ed25519.toml`. If `$HOME` is unset, /// `./.pkgar/keys/id_ed25519.toml`. pub static ref DEFAULT_SECKEY: PathBuf = { Path::join(&HOMEDIR, ".pkgar/keys/id_ed25519.toml") }; } mod ser { use hex::FromHex; use serde::{Deserialize, Deserializer}; use serde::de::Error; use sodiumoxide::crypto::{pwhash, secretbox, sign}; //TODO: Macro? pub(crate) fn to_salt<'d, D: Deserializer<'d>>(deser: D) -> Result<pwhash::Salt, D::Error> { String::deserialize(deser) .and_then(|s| <[u8; 32]>::from_hex(s) .map(|val| pwhash::Salt(val) ) .map_err(|err| Error::custom(err.to_string()) ) ) } pub(crate) fn to_nonce<'d, D: Deserializer<'d>>(deser: D) -> Result<secretbox::Nonce, D::Error> { String::deserialize(deser) .and_then(|s| <[u8; 24]>::from_hex(s) .map(|val| secretbox::Nonce(val) ) .map_err(|err| Error::custom(err.to_string()) ) ) } pub(crate) fn to_pubkey<'d, D: Deserializer<'d>>(deser: D) -> Result<sign::PublicKey, D::Error> { String::deserialize(deser) .and_then(|s| <[u8; 32]>::from_hex(s) .map(|val| sign::PublicKey(val) ) .map_err(|err| Error::custom(err.to_string()) ) ) } } /// Standard pkgar public key format definition. Use serde to serialize/deserialize /// files into this struct (helper methods available). #[derive(Deserialize, Serialize)] pub struct PublicKeyFile { #[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_pubkey")] pub pkey: sign::PublicKey, } impl PublicKeyFile { /// Parse a `PublicKeyFile` from `file` (in toml format). pub fn open(file: impl AsRef<Path>) -> Result<PublicKeyFile, Error> { let content = fs::read_to_string(&file) .chain_err(|| file.as_ref() )?; toml::from_str(&content) .chain_err(|| file.as_ref() ) } /// Write `self` serialized as toml to `w`. pub fn write(&self, mut w: impl Write) -> Result<(), Error> { w.write_all(toml::to_string(self)?.as_bytes())?; Ok(()) } /// Shortcut to write the public key to `file` pub fn save(&self, file: impl AsRef<Path>) -> Result<(), Error> { self.write( File::create(&file) .chain_err(|| file.as_ref() )? ).chain_err(|| file.as_ref() ) } } enum SKey { Cipher([u8; 80]), Plain(sign::SecretKey), } impl SKey { fn encrypt(&mut self, passwd: Passwd, salt: pwhash::Salt, nonce: secretbox::Nonce) { if let SKey::Plain(skey) = self { if let Some(passwd_key) = passwd.gen_key(salt) { let mut buf = [0; 80]; buf.copy_from_slice(&secretbox::seal(skey.as_ref(), &nonce, &passwd_key)); *self = SKey::Cipher(buf); } } } fn decrypt(&mut self, passwd: Passwd, salt: pwhash::Salt, nonce: secretbox::Nonce) -> Result<(), Error> { if let SKey::Cipher(ciphertext) = self { if let Some(passwd_key) = passwd.gen_key(salt) { let skey_plain = secretbox::open(ciphertext.as_ref(), &nonce, &passwd_key) .map_err(|_| ErrorKind::PassphraseIncorrect )?; *self = SKey::Plain(sign::SecretKey::from_slice(&skey_plain) .ok_or(ErrorKind::KeyInvalid)?); } else { *self = SKey::Plain(sign::SecretKey::from_slice(&ciphertext[..64]) .ok_or(ErrorKind::KeyInvalid)?); } } Ok(()) } /// Returns `None` if encrypted fn skey(&self) -> Option<sign::SecretKey> { match &self { SKey::Plain(skey) => Some(skey.clone()), SKey::Cipher(_) => None, } } } impl AsRef<[u8]> for SKey { fn as_ref(&self) -> &[u8] { match self { SKey::Cipher(buf) => buf.as_ref(), SKey::Plain(skey) => skey.as_ref(), } } } impl FromHex for SKey { type Error = hex::FromHexError; fn from_hex<T: AsRef<[u8]>>(buf: T) -> Result<SKey, hex::FromHexError> { let bytes = hex::decode(buf)?; // Public key is only 64 bytes... if bytes.len() == 64 { Ok(SKey::Plain(sign::SecretKey::from_slice(&bytes) .expect("Somehow not the right number of bytes"))) } else { let mut buf = [0; 80]; buf.copy_from_slice(&bytes); Ok(SKey::Cipher(buf)) } } } /// Standard pkgar private key format definition. Use serde. /// Internally, this struct stores the encrypted state of the private key as an enum. /// Manipulate the state using the `encrypt()`, `decrypt()` and `is_encrypted()`. #[derive(Deserialize, Serialize)] pub struct SecretKeyFile { #[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_salt")] salt: pwhash::Salt, #[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_nonce")] nonce: secretbox::Nonce, #[serde(with = "hex")] skey: SKey, } impl SecretKeyFile { /// Generate a keypair with all the nessesary info to save both keys. You /// must call `save()` on each object to persist them to disk. pub fn new() -> (PublicKeyFile, SecretKeyFile) { let (pkey, skey) = sign::gen_keypair(); let pkey_file = PublicKeyFile { pkey }; let skey_file = SecretKeyFile { salt: pwhash::gen_salt(), nonce: secretbox::gen_nonce(), skey: SKey::Plain(skey), }; (pkey_file, skey_file) } /// Parse a `SecretKeyFile` from `file` (in toml format). pub fn open(file: impl AsRef<Path>) -> Result<SecretKeyFile, Error> { let content = fs::read_to_string(&file) .chain_err(|| file.as_ref() )?; toml::from_str(&content) .chain_err(|| file.as_ref() ) } /// Write `self` serialized as toml to `w`. pub fn write(&self, mut w: impl Write) -> Result<(), Error> { w.write_all(toml::to_string(&self)?.as_bytes())?; Ok(()) } /// Shortcut to write the secret key to `file`. /// /// Make sure to call `encrypt()` in order to encrypt /// the private key, otherwise it will be stored as plain text. pub fn save(&self, file: impl AsRef<Path>) -> Result<(), Error> { self.write( OpenOptions::new() .write(true) .create(true) .mode(0o600) .open(&file) .chain_err(|| file.as_ref() )? ).chain_err(|| file.as_ref() ) } /// Ensure that the internal state of this struct is encrypted. /// Note that if passwd is empty, this function is a no-op. pub fn encrypt(&mut self, passwd: Passwd) { self.skey.encrypt(passwd, self.salt, self.nonce) } /// Ensure that the internal state of this struct is decrypted. /// If the internal state is already decrypted, this function is a no-op. pub fn decrypt(&mut self, passwd: Passwd) -> Result<(), Error> { self.skey.decrypt(passwd, self.salt, self.nonce) } /// Status of the internal state. pub fn is_encrypted(&self) -> bool { match self.skey { SKey::Cipher(_) => true, SKey::Plain(_) => false, } } /// Returns `None` if the secret key is encrypted. pub fn key(&mut self) -> Option<sign::SecretKey> { match &self.skey { SKey::Plain(skey) => Some(skey.clone()), SKey::Cipher(_) => None, } } /// Returns `None` if the secret key is encrypted. pub fn public_key_file(&self) -> Option<PublicKeyFile> { Some(PublicKeyFile { pkey: self.skey.skey()?.public_key(), }) } } /// Secure in-memory representation of a password. pub struct Passwd { bytes: SecBytes, } impl Passwd { /// Create a new `Passwd` and zero the old string. pub fn new(passwd: &mut String) -> Passwd { let pwd = Passwd { bytes :SecBytes::with( passwd.len(), |buf| buf.copy_from_slice(passwd.as_bytes()) ), }; unsafe { seckey::zero(passwd.as_bytes_mut()); } pwd } /// Prompt the user for a `Passwd` on stdin. pub fn prompt(prompt: impl AsRef<str>) -> Result<Passwd, Error> { let stdout = stdout(); let mut stdout = stdout.lock(); let stdin = stdin(); let mut stdin = stdin.lock(); stdout.write_all(prompt.as_ref().as_bytes())?; stdout.flush()?; let mut passwd = stdin.read_passwd(&mut stdout)? .ok_or(ErrorKind::Io( io::Error::new( io::ErrorKind::UnexpectedEof, "Invalid Password Input", ) ))?; println!(); Ok(Passwd::new(&mut passwd)) } /// Prompt for a password on stdin and confirm it. For configurable /// prompts, use [`Passwd::prompt`](struct.Passwd.html#method.prompt). pub fn prompt_new() -> Result<Passwd, Error> { let passwd = Passwd::prompt( "Please enter a new passphrase (leave empty to store the key in plaintext): " )?; let confirm = Passwd::prompt("Please re-enter the passphrase: ")?; if passwd != confirm { bail!(ErrorKind::PassphraseMismatch); } Ok(passwd) } /// Get a key for symmetric key encryption from a password. fn gen_key(&self, salt: pwhash::Salt) -> Option<secretbox::Key> { if self.bytes.read().len() > 0 { let mut key = secretbox::Key([0; secretbox::KEYBYTES]); let secretbox::Key(ref mut binary_key) = key; pwhash::derive_key( binary_key, &self.bytes.read(), &salt, pwhash::OPSLIMIT_INTERACTIVE, pwhash::MEMLIMIT_INTERACTIVE, ).expect("Failed to get key from password"); Some(key) } else { None } } } impl PartialEq for Passwd { fn eq(&self, other: &Passwd) -> bool { self.bytes.read().deref() == other.bytes.read().deref() } } impl Eq for Passwd {} /// Generate a new keypair. The new keys will be saved to `file`. The user /// will be prompted on stdin for a password, empty passwords will cause the /// secret key to be stored in plain text. Note that parent /// directories will not be created. pub fn gen_keypair(pkey_path: &Path, skey_path: &Path) -> Result<(PublicKeyFile, SecretKeyFile), Error> {
skey_file.encrypt(passwd); skey_file.save(skey_path)?; pkey_file.save(pkey_path)?; println!("Generated {} and {}", pkey_path.display(), skey_path.display()); Ok((pkey_file, skey_file)) } fn prompt_skey(skey_path: &Path, prompt: impl AsRef<str>) -> Result<SecretKeyFile, Error> { let mut key_file = SecretKeyFile::open(skey_path)?; if key_file.is_encrypted() { let passwd = Passwd::prompt(&format!("{} {}: ", prompt.as_ref(), skey_path.display())) .chain_err(|| skey_path )?; key_file.decrypt(passwd) .chain_err(|| skey_path )?; } Ok(key_file) } /// Get a SecretKeyFile from a path. If the file is encrypted, prompt for a password on stdin. pub fn get_skey(skey_path: &Path) -> Result<SecretKeyFile, Error> { prompt_skey(skey_path, "Passphrase for") } /// Open, decrypt, re-encrypt with a different passphrase from stdin, and save the newly encrypted /// secret key at `skey_path`. pub fn re_encrypt(skey_path: &Path) -> Result<(), Error> { let mut skey_file = prompt_skey(skey_path, "Old passphrase for")?; let passwd = Passwd::prompt_new() .chain_err(|| skey_path )?; skey_file.encrypt(passwd); skey_file.save(skey_path) }
let passwd = Passwd::prompt_new() .chain_err(|| skey_path )?; let (pkey_file, mut skey_file) = SecretKeyFile::new();
random_line_split
lib.rs
mod error; use std::fs::{self, File, OpenOptions}; use std::io::{self, stdin, stdout, Write}; use std::ops::Deref; use std::os::unix::fs::OpenOptionsExt; use std::path::{Path, PathBuf}; use error_chain::bail; use hex::FromHex; use lazy_static::lazy_static; use seckey::SecBytes; use serde::{Deserialize, Serialize}; use sodiumoxide::crypto::{ pwhash, secretbox, sign, }; use termion::input::TermRead; pub use crate::error::{ErrorKind, Error, ResultExt}; lazy_static! { static ref HOMEDIR: PathBuf = { dirs::home_dir() .unwrap_or("./".into()) }; /// The default location for pkgar to look for the user's public key. /// /// Defaults to `$HOME/.pkgar/keys/id_ed25519.pub.toml`. If `$HOME` is /// unset, `./.pkgar/keys/id_ed25519.pub.toml`. pub static ref DEFAULT_PUBKEY: PathBuf = { Path::join(&HOMEDIR, ".pkgar/keys/id_ed25519.pub.toml") }; /// The default location for pkgar to look for the user's secret key. /// /// Defaults to `$HOME/.pkgar/keys/id_ed25519.toml`. If `$HOME` is unset, /// `./.pkgar/keys/id_ed25519.toml`. pub static ref DEFAULT_SECKEY: PathBuf = { Path::join(&HOMEDIR, ".pkgar/keys/id_ed25519.toml") }; } mod ser { use hex::FromHex; use serde::{Deserialize, Deserializer}; use serde::de::Error; use sodiumoxide::crypto::{pwhash, secretbox, sign}; //TODO: Macro? pub(crate) fn to_salt<'d, D: Deserializer<'d>>(deser: D) -> Result<pwhash::Salt, D::Error> { String::deserialize(deser) .and_then(|s| <[u8; 32]>::from_hex(s) .map(|val| pwhash::Salt(val) ) .map_err(|err| Error::custom(err.to_string()) ) ) } pub(crate) fn to_nonce<'d, D: Deserializer<'d>>(deser: D) -> Result<secretbox::Nonce, D::Error> { String::deserialize(deser) .and_then(|s| <[u8; 24]>::from_hex(s) .map(|val| secretbox::Nonce(val) ) .map_err(|err| Error::custom(err.to_string()) ) ) } pub(crate) fn to_pubkey<'d, D: Deserializer<'d>>(deser: D) -> Result<sign::PublicKey, D::Error> { String::deserialize(deser) .and_then(|s| <[u8; 32]>::from_hex(s) .map(|val| sign::PublicKey(val) ) .map_err(|err| Error::custom(err.to_string()) ) ) } } /// Standard pkgar public key format definition. Use serde to serialize/deserialize /// files into this struct (helper methods available). #[derive(Deserialize, Serialize)] pub struct PublicKeyFile { #[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_pubkey")] pub pkey: sign::PublicKey, } impl PublicKeyFile { /// Parse a `PublicKeyFile` from `file` (in toml format). pub fn open(file: impl AsRef<Path>) -> Result<PublicKeyFile, Error> { let content = fs::read_to_string(&file) .chain_err(|| file.as_ref() )?; toml::from_str(&content) .chain_err(|| file.as_ref() ) } /// Write `self` serialized as toml to `w`. pub fn write(&self, mut w: impl Write) -> Result<(), Error> { w.write_all(toml::to_string(self)?.as_bytes())?; Ok(()) } /// Shortcut to write the public key to `file` pub fn save(&self, file: impl AsRef<Path>) -> Result<(), Error> { self.write( File::create(&file) .chain_err(|| file.as_ref() )? ).chain_err(|| file.as_ref() ) } } enum SKey { Cipher([u8; 80]), Plain(sign::SecretKey), } impl SKey { fn encrypt(&mut self, passwd: Passwd, salt: pwhash::Salt, nonce: secretbox::Nonce) { if let SKey::Plain(skey) = self { if let Some(passwd_key) = passwd.gen_key(salt) { let mut buf = [0; 80]; buf.copy_from_slice(&secretbox::seal(skey.as_ref(), &nonce, &passwd_key)); *self = SKey::Cipher(buf); } } } fn decrypt(&mut self, passwd: Passwd, salt: pwhash::Salt, nonce: secretbox::Nonce) -> Result<(), Error> { if let SKey::Cipher(ciphertext) = self { if let Some(passwd_key) = passwd.gen_key(salt) { let skey_plain = secretbox::open(ciphertext.as_ref(), &nonce, &passwd_key) .map_err(|_| ErrorKind::PassphraseIncorrect )?; *self = SKey::Plain(sign::SecretKey::from_slice(&skey_plain) .ok_or(ErrorKind::KeyInvalid)?); } else { *self = SKey::Plain(sign::SecretKey::from_slice(&ciphertext[..64]) .ok_or(ErrorKind::KeyInvalid)?); } } Ok(()) } /// Returns `None` if encrypted fn skey(&self) -> Option<sign::SecretKey> { match &self { SKey::Plain(skey) => Some(skey.clone()), SKey::Cipher(_) => None, } } } impl AsRef<[u8]> for SKey { fn as_ref(&self) -> &[u8] { match self { SKey::Cipher(buf) => buf.as_ref(), SKey::Plain(skey) => skey.as_ref(), } } } impl FromHex for SKey { type Error = hex::FromHexError; fn from_hex<T: AsRef<[u8]>>(buf: T) -> Result<SKey, hex::FromHexError> { let bytes = hex::decode(buf)?; // Public key is only 64 bytes... if bytes.len() == 64 { Ok(SKey::Plain(sign::SecretKey::from_slice(&bytes) .expect("Somehow not the right number of bytes"))) } else { let mut buf = [0; 80]; buf.copy_from_slice(&bytes); Ok(SKey::Cipher(buf)) } } } /// Standard pkgar private key format definition. Use serde. /// Internally, this struct stores the encrypted state of the private key as an enum. /// Manipulate the state using the `encrypt()`, `decrypt()` and `is_encrypted()`. #[derive(Deserialize, Serialize)] pub struct SecretKeyFile { #[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_salt")] salt: pwhash::Salt, #[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_nonce")] nonce: secretbox::Nonce, #[serde(with = "hex")] skey: SKey, } impl SecretKeyFile { /// Generate a keypair with all the nessesary info to save both keys. You /// must call `save()` on each object to persist them to disk. pub fn new() -> (PublicKeyFile, SecretKeyFile) { let (pkey, skey) = sign::gen_keypair(); let pkey_file = PublicKeyFile { pkey }; let skey_file = SecretKeyFile { salt: pwhash::gen_salt(), nonce: secretbox::gen_nonce(), skey: SKey::Plain(skey), }; (pkey_file, skey_file) } /// Parse a `SecretKeyFile` from `file` (in toml format). pub fn open(file: impl AsRef<Path>) -> Result<SecretKeyFile, Error> { let content = fs::read_to_string(&file) .chain_err(|| file.as_ref() )?; toml::from_str(&content) .chain_err(|| file.as_ref() ) } /// Write `self` serialized as toml to `w`. pub fn write(&self, mut w: impl Write) -> Result<(), Error> { w.write_all(toml::to_string(&self)?.as_bytes())?; Ok(()) } /// Shortcut to write the secret key to `file`. /// /// Make sure to call `encrypt()` in order to encrypt /// the private key, otherwise it will be stored as plain text. pub fn save(&self, file: impl AsRef<Path>) -> Result<(), Error> { self.write( OpenOptions::new() .write(true) .create(true) .mode(0o600) .open(&file) .chain_err(|| file.as_ref() )? ).chain_err(|| file.as_ref() ) } /// Ensure that the internal state of this struct is encrypted. /// Note that if passwd is empty, this function is a no-op. pub fn encrypt(&mut self, passwd: Passwd) { self.skey.encrypt(passwd, self.salt, self.nonce) } /// Ensure that the internal state of this struct is decrypted. /// If the internal state is already decrypted, this function is a no-op. pub fn decrypt(&mut self, passwd: Passwd) -> Result<(), Error> { self.skey.decrypt(passwd, self.salt, self.nonce) } /// Status of the internal state. pub fn is_encrypted(&self) -> bool { match self.skey { SKey::Cipher(_) => true, SKey::Plain(_) => false, } } /// Returns `None` if the secret key is encrypted. pub fn key(&mut self) -> Option<sign::SecretKey> { match &self.skey { SKey::Plain(skey) => Some(skey.clone()), SKey::Cipher(_) => None, } } /// Returns `None` if the secret key is encrypted. pub fn public_key_file(&self) -> Option<PublicKeyFile> { Some(PublicKeyFile { pkey: self.skey.skey()?.public_key(), }) } } /// Secure in-memory representation of a password. pub struct Passwd { bytes: SecBytes, } impl Passwd { /// Create a new `Passwd` and zero the old string. pub fn new(passwd: &mut String) -> Passwd { let pwd = Passwd { bytes :SecBytes::with( passwd.len(), |buf| buf.copy_from_slice(passwd.as_bytes()) ), }; unsafe { seckey::zero(passwd.as_bytes_mut()); } pwd } /// Prompt the user for a `Passwd` on stdin. pub fn prompt(prompt: impl AsRef<str>) -> Result<Passwd, Error> { let stdout = stdout(); let mut stdout = stdout.lock(); let stdin = stdin(); let mut stdin = stdin.lock(); stdout.write_all(prompt.as_ref().as_bytes())?; stdout.flush()?; let mut passwd = stdin.read_passwd(&mut stdout)? .ok_or(ErrorKind::Io( io::Error::new( io::ErrorKind::UnexpectedEof, "Invalid Password Input", ) ))?; println!(); Ok(Passwd::new(&mut passwd)) } /// Prompt for a password on stdin and confirm it. For configurable /// prompts, use [`Passwd::prompt`](struct.Passwd.html#method.prompt). pub fn prompt_new() -> Result<Passwd, Error> { let passwd = Passwd::prompt( "Please enter a new passphrase (leave empty to store the key in plaintext): " )?; let confirm = Passwd::prompt("Please re-enter the passphrase: ")?; if passwd != confirm { bail!(ErrorKind::PassphraseMismatch); } Ok(passwd) } /// Get a key for symmetric key encryption from a password. fn gen_key(&self, salt: pwhash::Salt) -> Option<secretbox::Key> { if self.bytes.read().len() > 0 { let mut key = secretbox::Key([0; secretbox::KEYBYTES]); let secretbox::Key(ref mut binary_key) = key; pwhash::derive_key( binary_key, &self.bytes.read(), &salt, pwhash::OPSLIMIT_INTERACTIVE, pwhash::MEMLIMIT_INTERACTIVE, ).expect("Failed to get key from password"); Some(key) } else { None } } } impl PartialEq for Passwd { fn eq(&self, other: &Passwd) -> bool { self.bytes.read().deref() == other.bytes.read().deref() } } impl Eq for Passwd {} /// Generate a new keypair. The new keys will be saved to `file`. The user /// will be prompted on stdin for a password, empty passwords will cause the /// secret key to be stored in plain text. Note that parent /// directories will not be created. pub fn gen_keypair(pkey_path: &Path, skey_path: &Path) -> Result<(PublicKeyFile, SecretKeyFile), Error> { let passwd = Passwd::prompt_new() .chain_err(|| skey_path )?; let (pkey_file, mut skey_file) = SecretKeyFile::new(); skey_file.encrypt(passwd); skey_file.save(skey_path)?; pkey_file.save(pkey_path)?; println!("Generated {} and {}", pkey_path.display(), skey_path.display()); Ok((pkey_file, skey_file)) } fn prompt_skey(skey_path: &Path, prompt: impl AsRef<str>) -> Result<SecretKeyFile, Error>
/// Get a SecretKeyFile from a path. If the file is encrypted, prompt for a password on stdin. pub fn get_skey(skey_path: &Path) -> Result<SecretKeyFile, Error> { prompt_skey(skey_path, "Passphrase for") } /// Open, decrypt, re-encrypt with a different passphrase from stdin, and save the newly encrypted /// secret key at `skey_path`. pub fn re_encrypt(skey_path: &Path) -> Result<(), Error> { let mut skey_file = prompt_skey(skey_path, "Old passphrase for")?; let passwd = Passwd::prompt_new() .chain_err(|| skey_path )?; skey_file.encrypt(passwd); skey_file.save(skey_path) }
{ let mut key_file = SecretKeyFile::open(skey_path)?; if key_file.is_encrypted() { let passwd = Passwd::prompt(&format!("{} {}: ", prompt.as_ref(), skey_path.display())) .chain_err(|| skey_path )?; key_file.decrypt(passwd) .chain_err(|| skey_path )?; } Ok(key_file) }
identifier_body
lib.rs
mod error; use std::fs::{self, File, OpenOptions}; use std::io::{self, stdin, stdout, Write}; use std::ops::Deref; use std::os::unix::fs::OpenOptionsExt; use std::path::{Path, PathBuf}; use error_chain::bail; use hex::FromHex; use lazy_static::lazy_static; use seckey::SecBytes; use serde::{Deserialize, Serialize}; use sodiumoxide::crypto::{ pwhash, secretbox, sign, }; use termion::input::TermRead; pub use crate::error::{ErrorKind, Error, ResultExt}; lazy_static! { static ref HOMEDIR: PathBuf = { dirs::home_dir() .unwrap_or("./".into()) }; /// The default location for pkgar to look for the user's public key. /// /// Defaults to `$HOME/.pkgar/keys/id_ed25519.pub.toml`. If `$HOME` is /// unset, `./.pkgar/keys/id_ed25519.pub.toml`. pub static ref DEFAULT_PUBKEY: PathBuf = { Path::join(&HOMEDIR, ".pkgar/keys/id_ed25519.pub.toml") }; /// The default location for pkgar to look for the user's secret key. /// /// Defaults to `$HOME/.pkgar/keys/id_ed25519.toml`. If `$HOME` is unset, /// `./.pkgar/keys/id_ed25519.toml`. pub static ref DEFAULT_SECKEY: PathBuf = { Path::join(&HOMEDIR, ".pkgar/keys/id_ed25519.toml") }; } mod ser { use hex::FromHex; use serde::{Deserialize, Deserializer}; use serde::de::Error; use sodiumoxide::crypto::{pwhash, secretbox, sign}; //TODO: Macro? pub(crate) fn to_salt<'d, D: Deserializer<'d>>(deser: D) -> Result<pwhash::Salt, D::Error> { String::deserialize(deser) .and_then(|s| <[u8; 32]>::from_hex(s) .map(|val| pwhash::Salt(val) ) .map_err(|err| Error::custom(err.to_string()) ) ) } pub(crate) fn to_nonce<'d, D: Deserializer<'d>>(deser: D) -> Result<secretbox::Nonce, D::Error> { String::deserialize(deser) .and_then(|s| <[u8; 24]>::from_hex(s) .map(|val| secretbox::Nonce(val) ) .map_err(|err| Error::custom(err.to_string()) ) ) } pub(crate) fn to_pubkey<'d, D: Deserializer<'d>>(deser: D) -> Result<sign::PublicKey, D::Error> { String::deserialize(deser) .and_then(|s| <[u8; 32]>::from_hex(s) .map(|val| sign::PublicKey(val) ) .map_err(|err| Error::custom(err.to_string()) ) ) } } /// Standard pkgar public key format definition. Use serde to serialize/deserialize /// files into this struct (helper methods available). #[derive(Deserialize, Serialize)] pub struct PublicKeyFile { #[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_pubkey")] pub pkey: sign::PublicKey, } impl PublicKeyFile { /// Parse a `PublicKeyFile` from `file` (in toml format). pub fn open(file: impl AsRef<Path>) -> Result<PublicKeyFile, Error> { let content = fs::read_to_string(&file) .chain_err(|| file.as_ref() )?; toml::from_str(&content) .chain_err(|| file.as_ref() ) } /// Write `self` serialized as toml to `w`. pub fn write(&self, mut w: impl Write) -> Result<(), Error> { w.write_all(toml::to_string(self)?.as_bytes())?; Ok(()) } /// Shortcut to write the public key to `file` pub fn save(&self, file: impl AsRef<Path>) -> Result<(), Error> { self.write( File::create(&file) .chain_err(|| file.as_ref() )? ).chain_err(|| file.as_ref() ) } } enum SKey { Cipher([u8; 80]), Plain(sign::SecretKey), } impl SKey { fn encrypt(&mut self, passwd: Passwd, salt: pwhash::Salt, nonce: secretbox::Nonce) { if let SKey::Plain(skey) = self { if let Some(passwd_key) = passwd.gen_key(salt) { let mut buf = [0; 80]; buf.copy_from_slice(&secretbox::seal(skey.as_ref(), &nonce, &passwd_key)); *self = SKey::Cipher(buf); } } } fn decrypt(&mut self, passwd: Passwd, salt: pwhash::Salt, nonce: secretbox::Nonce) -> Result<(), Error> { if let SKey::Cipher(ciphertext) = self { if let Some(passwd_key) = passwd.gen_key(salt) { let skey_plain = secretbox::open(ciphertext.as_ref(), &nonce, &passwd_key) .map_err(|_| ErrorKind::PassphraseIncorrect )?; *self = SKey::Plain(sign::SecretKey::from_slice(&skey_plain) .ok_or(ErrorKind::KeyInvalid)?); } else { *self = SKey::Plain(sign::SecretKey::from_slice(&ciphertext[..64]) .ok_or(ErrorKind::KeyInvalid)?); } } Ok(()) } /// Returns `None` if encrypted fn skey(&self) -> Option<sign::SecretKey> { match &self { SKey::Plain(skey) => Some(skey.clone()), SKey::Cipher(_) => None, } } } impl AsRef<[u8]> for SKey { fn as_ref(&self) -> &[u8] { match self { SKey::Cipher(buf) => buf.as_ref(), SKey::Plain(skey) => skey.as_ref(), } } } impl FromHex for SKey { type Error = hex::FromHexError; fn from_hex<T: AsRef<[u8]>>(buf: T) -> Result<SKey, hex::FromHexError> { let bytes = hex::decode(buf)?; // Public key is only 64 bytes... if bytes.len() == 64 { Ok(SKey::Plain(sign::SecretKey::from_slice(&bytes) .expect("Somehow not the right number of bytes"))) } else { let mut buf = [0; 80]; buf.copy_from_slice(&bytes); Ok(SKey::Cipher(buf)) } } } /// Standard pkgar private key format definition. Use serde. /// Internally, this struct stores the encrypted state of the private key as an enum. /// Manipulate the state using the `encrypt()`, `decrypt()` and `is_encrypted()`. #[derive(Deserialize, Serialize)] pub struct SecretKeyFile { #[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_salt")] salt: pwhash::Salt, #[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_nonce")] nonce: secretbox::Nonce, #[serde(with = "hex")] skey: SKey, } impl SecretKeyFile { /// Generate a keypair with all the nessesary info to save both keys. You /// must call `save()` on each object to persist them to disk. pub fn new() -> (PublicKeyFile, SecretKeyFile) { let (pkey, skey) = sign::gen_keypair(); let pkey_file = PublicKeyFile { pkey }; let skey_file = SecretKeyFile { salt: pwhash::gen_salt(), nonce: secretbox::gen_nonce(), skey: SKey::Plain(skey), }; (pkey_file, skey_file) } /// Parse a `SecretKeyFile` from `file` (in toml format). pub fn open(file: impl AsRef<Path>) -> Result<SecretKeyFile, Error> { let content = fs::read_to_string(&file) .chain_err(|| file.as_ref() )?; toml::from_str(&content) .chain_err(|| file.as_ref() ) } /// Write `self` serialized as toml to `w`. pub fn write(&self, mut w: impl Write) -> Result<(), Error> { w.write_all(toml::to_string(&self)?.as_bytes())?; Ok(()) } /// Shortcut to write the secret key to `file`. /// /// Make sure to call `encrypt()` in order to encrypt /// the private key, otherwise it will be stored as plain text. pub fn save(&self, file: impl AsRef<Path>) -> Result<(), Error> { self.write( OpenOptions::new() .write(true) .create(true) .mode(0o600) .open(&file) .chain_err(|| file.as_ref() )? ).chain_err(|| file.as_ref() ) } /// Ensure that the internal state of this struct is encrypted. /// Note that if passwd is empty, this function is a no-op. pub fn encrypt(&mut self, passwd: Passwd) { self.skey.encrypt(passwd, self.salt, self.nonce) } /// Ensure that the internal state of this struct is decrypted. /// If the internal state is already decrypted, this function is a no-op. pub fn decrypt(&mut self, passwd: Passwd) -> Result<(), Error> { self.skey.decrypt(passwd, self.salt, self.nonce) } /// Status of the internal state. pub fn is_encrypted(&self) -> bool { match self.skey { SKey::Cipher(_) => true, SKey::Plain(_) => false, } } /// Returns `None` if the secret key is encrypted. pub fn
(&mut self) -> Option<sign::SecretKey> { match &self.skey { SKey::Plain(skey) => Some(skey.clone()), SKey::Cipher(_) => None, } } /// Returns `None` if the secret key is encrypted. pub fn public_key_file(&self) -> Option<PublicKeyFile> { Some(PublicKeyFile { pkey: self.skey.skey()?.public_key(), }) } } /// Secure in-memory representation of a password. pub struct Passwd { bytes: SecBytes, } impl Passwd { /// Create a new `Passwd` and zero the old string. pub fn new(passwd: &mut String) -> Passwd { let pwd = Passwd { bytes :SecBytes::with( passwd.len(), |buf| buf.copy_from_slice(passwd.as_bytes()) ), }; unsafe { seckey::zero(passwd.as_bytes_mut()); } pwd } /// Prompt the user for a `Passwd` on stdin. pub fn prompt(prompt: impl AsRef<str>) -> Result<Passwd, Error> { let stdout = stdout(); let mut stdout = stdout.lock(); let stdin = stdin(); let mut stdin = stdin.lock(); stdout.write_all(prompt.as_ref().as_bytes())?; stdout.flush()?; let mut passwd = stdin.read_passwd(&mut stdout)? .ok_or(ErrorKind::Io( io::Error::new( io::ErrorKind::UnexpectedEof, "Invalid Password Input", ) ))?; println!(); Ok(Passwd::new(&mut passwd)) } /// Prompt for a password on stdin and confirm it. For configurable /// prompts, use [`Passwd::prompt`](struct.Passwd.html#method.prompt). pub fn prompt_new() -> Result<Passwd, Error> { let passwd = Passwd::prompt( "Please enter a new passphrase (leave empty to store the key in plaintext): " )?; let confirm = Passwd::prompt("Please re-enter the passphrase: ")?; if passwd != confirm { bail!(ErrorKind::PassphraseMismatch); } Ok(passwd) } /// Get a key for symmetric key encryption from a password. fn gen_key(&self, salt: pwhash::Salt) -> Option<secretbox::Key> { if self.bytes.read().len() > 0 { let mut key = secretbox::Key([0; secretbox::KEYBYTES]); let secretbox::Key(ref mut binary_key) = key; pwhash::derive_key( binary_key, &self.bytes.read(), &salt, pwhash::OPSLIMIT_INTERACTIVE, pwhash::MEMLIMIT_INTERACTIVE, ).expect("Failed to get key from password"); Some(key) } else { None } } } impl PartialEq for Passwd { fn eq(&self, other: &Passwd) -> bool { self.bytes.read().deref() == other.bytes.read().deref() } } impl Eq for Passwd {} /// Generate a new keypair. The new keys will be saved to `file`. The user /// will be prompted on stdin for a password, empty passwords will cause the /// secret key to be stored in plain text. Note that parent /// directories will not be created. pub fn gen_keypair(pkey_path: &Path, skey_path: &Path) -> Result<(PublicKeyFile, SecretKeyFile), Error> { let passwd = Passwd::prompt_new() .chain_err(|| skey_path )?; let (pkey_file, mut skey_file) = SecretKeyFile::new(); skey_file.encrypt(passwd); skey_file.save(skey_path)?; pkey_file.save(pkey_path)?; println!("Generated {} and {}", pkey_path.display(), skey_path.display()); Ok((pkey_file, skey_file)) } fn prompt_skey(skey_path: &Path, prompt: impl AsRef<str>) -> Result<SecretKeyFile, Error> { let mut key_file = SecretKeyFile::open(skey_path)?; if key_file.is_encrypted() { let passwd = Passwd::prompt(&format!("{} {}: ", prompt.as_ref(), skey_path.display())) .chain_err(|| skey_path )?; key_file.decrypt(passwd) .chain_err(|| skey_path )?; } Ok(key_file) } /// Get a SecretKeyFile from a path. If the file is encrypted, prompt for a password on stdin. pub fn get_skey(skey_path: &Path) -> Result<SecretKeyFile, Error> { prompt_skey(skey_path, "Passphrase for") } /// Open, decrypt, re-encrypt with a different passphrase from stdin, and save the newly encrypted /// secret key at `skey_path`. pub fn re_encrypt(skey_path: &Path) -> Result<(), Error> { let mut skey_file = prompt_skey(skey_path, "Old passphrase for")?; let passwd = Passwd::prompt_new() .chain_err(|| skey_path )?; skey_file.encrypt(passwd); skey_file.save(skey_path) }
key
identifier_name
fwliir.py
from deap import base from deap import algorithms from deap import tools from deap import creator import random import functools import numpy as np import scipy.signal as signal creator.create( 'ResponseMismatch', base.Fitness, weights=(1.0,) ) creator.create( 'IIR', list, fitness=creator.ResponseMismatch, nbits=int ) IIR = creator.IIR def fitsos(sos, nbits): """ Ajusta la ganancia de una etapa de segundo orden en punto fijo para que sus coeficientes puedan representarse en 1.(`nbits`-1) :param sos: etapa de segundo orden de filtro digital IIR en punto fijo. :param nbits: cantidad de bits de la representación en punto fijo de los coeficientes de la etapa. """ # Computa el límite númerico de la representación entera signada. n = 2**(nbits - 1) # Busca el coeficiente de máximo valor absoluto. c = sos[np.argmax(np.abs(sos[-1] * sos[:-1]))] if abs(c) >= n: # Escala todos los coeficientes para que el coeficiente # de máximo valor absoluto sea -1 en punto fijo. sos[:-1] = -n * sos[-1] * sos[:-1] // abs(c) # Conserva el factor de escala aplicado. sos[-1] = min(-1, -abs(c) // n) return sos def impulse(iir, ts, n): """ Computa la respuesta al impulso de un filtro digital IIR en punto fijo. El filtro se representa como una secuencia ordenada de etapas de segundo orden, cada una representada por los coeficientes de su ecuación en diferencias más una ganancia: [b0, b1, b2, a1, a2, k]. El coeficiente a0 se asume unitario. Etapas de primer orden pueden obtenerse haciendo b2 = a2 = 0. :param iir: filtro digital IIR en punto fijo. :param ts: período de muestreo, en segundos. :param n: cantidad de muestras. :return: tiempo y respuesta al impulso, como tupla. """ # Inicializa en cero los vectores de salida de # cada una de las M etapas del filtro más el de # entrada. Cada vector puede contener N muestras # más 2 muestras adicionales para conformar la # línea de demora. y = np.zeros([len(iir) + 1, n + 2], dtype=int) # Inicializar el vector de entrada con un delta # discreto. y[0, 2] = 2**(iir.nbits-1) - 1 # Computar para cada instante discreto las salidas # de cada etapa, desde la primera hasta la última # en ese orden. for j in range(2, n + 2): for i, sos in enumerate(iir, start=1): b0, b1, b2, a1, a2, k = sos # Computar la ecuación de diferencias, truncando # y saturando el resultado para ser representado # en punto fijo 1.(`nbits`-1) y[i, j] = np.clip((k * ( b0 * y[i - 1, j] + b1 * y[i - 1, j - 1] + b2 * y[i - 1, j - 2] - a1 * y[i, j - 1] - a2 * y[i, j - 2] )) >> (iir.nbits - 1), -2**(iir.nbits - 1), 2**(iir.nbits - 1) - 1) # Retorna respuesta al impulso renormalizando la salida # al intervalo [-1, 1) # salida del t = ts * np.arange(n) im = y[-1, 2:] / 2**(iir.nbits - 1) return t, im def iir2sos(iir): """ Convierte un filtro digital IIR en punto fijo a su representación como secuencia de secciones de segundo orden en punto flotante (ver `scipy.signal.sos2tf` como referencia). :param iir: filtro digital IIR en punto fijo. :return: filtro digital en representación SOS. """ # Computa el límite númerico de la representación entera signada. n = 2**(iir.nbits - 1) # Escala el filtro digital en punto fijo acorde a la ganancia y # normaliza al intervalo [-1, 1) en punto flotante. return np.array([ (*(sos[-1] * sos[:3] / n), 1., *(sos[-1] * sos[3:5] / n)) for sos in iir ]) def genStablePrototype(nlimit, nbits=32): """ Genera un filtro digital IIR en punto fijo estable en forma aleatoria. :param nlimit: orden máximo admitido para el filtro. :param nbits: cantidad de bits utilizados para la representación numérica de los coeficientes. :return: filtro digital IIR en punto fijo generado. """ iir = IIR() # Computa el límite númerico de la representación entera signada. n = 2 ** (nbits - 1) # Selecciona el orden del filtro en forma aleatoria # del intervalo [1, nlimit]. order = max(int(random.random() * (nlimit + 1)), 1) # Si el orden es impar se introduce una etapa de primer orden. if order % 2 != 0: # Cero y polo de la etapa se ubican dentro o sobre el # círculo unidad. b0 = n b1 = np.random.randint(-n, n-1) a1 = np.random.randint(-n, n-1) sos = np.array([b0, b1, 0, a1, 0, 1]) # Ajusta la ganancia de la sección para su representación. fitsos(sos, nbits) # Incorpora la etapa al filtro. iir.append(sos) # Introduce N etapas de segundo orden para alcanzar # el orden seleccionado. for _ in range(order // 2): # Ceros y polos de la etapa se ubican dentro del círculo unidad. b0 = n b2 = np.random.randint(-n+1, n-1) a2 = np.random.randint(-n+1, n-1) b1 = np.random.randint(-b2-n, b2+n) a1 = np.random.randint(-a2-n, a2+n) sos = np.array([b0, b1, b2, a1, a2, 1]) # Ajusta la ganancia de la sección para su representación. fitsos(sos, nbits) # Incorpora la etapa al filtro. iir.append(sos) if hasattr(iir, 'nbits'): # Preserva el número de bits en el filtro. iir.nbits = nbits return iir def cxUniformND(iir1, iir2, ndpb): """ Cruza numeradores y denominadores de filtros digitales IIR en punto fijo, potencialmente de distinto orden, produciendo dos descendientes. El orden de las etapas a cruzar es modificado aleatoriamente. Variante de `deap.tools.cxUniform`. :param iir1: primer filtro progenitor. :param iir2: segundo filtro progenitor. :param ndpb: probabilidad de cruza de numerador y/o denominador. """ # Tomando el filtro candidato de menor orden, itera las # secciones de a pares tomados en forma aleatoria. for i, j in zip( random.sample(list(range(len(iir1))), len(iir1)), random.sample(list(range(len(iir2))), len(iir2)) ): # Obtiene las etapas de cada filtro a cruzar. sos1 = iir1[i] sos2 = iir2[j] if random.random() < ndpb: # Cruza los numeradores de las etapas sos1[:3], sos2[:3] = sos2[:3], sos1[:3] if random.random() < ndpb: # Cruza los denominadores de las etapas sos1[3:5], sos2[3:5] = sos2[3:5], sos1[3:5] # Ajusta la ganancia de la primera sección para que los # coeficientes del filtro puedan representarse en punto # fijo para el número de bits del filtro candidato. fitsos(sos1, iir1.nbits) # Ajusta la ganancia de la primera sección para que los # coeficientes del filtro puedan representarse en punto # fijo para el número de bits del filtro candidato. fitsos(sos2, iir2.nbits) return iir1, iir2 def evTimeResponse(iir, target, ts): """ Evalúa la aptitud de la respuesta temporal de un filtro digital IIR en punto fijo según la similitud que su respuesta temporal presenta respecto a la respuesta objetivo. :param iir: filtro digital IIR en punto fijo. :param target: respuesta al impulso objetivo. :param ts: período de muestreo, en segundos. :return: aptitud del filtro provisto. """ # Computa la respuesta al impulso del filtro candidato # en su representación SOS. _, (im,) = signal.dimpulse( (*signal.sos2tf(iir2sos(iir)), ts), n=len(target) ) # Computa el error relativo entre respuesta al impulso # del filtro candidato y respuesta al impulso esperada. et = (im - target) / np.max(np.abs(target)) # Evalua la aptitud del filtro candidato como el recíproco # de la potencia de error relativo. return (1. / (np.mean(et)**2 + np.var(et)),) def mutCoeffGaussian(iir, mu, sigma, indpb): """ Muta los coeficientes de un filtro digital IIR en punto fijo mediante perturbaciones numéricas. Variante de `deap.tools.mutGaussian`. :param mu: media de la distribución gaussiana de la que se toman las perturbaciones a aplicar. :param sigma: desvío estandar de la distribución gaussiana de la que se toman las perturbaciones a aplicar. :param indpb: probabilidad de perturbar un coeficiente. """ # Itera cada sección del filtro. for sos in iir: # Conforma una máscara de los coeficientes de la # sección actual del filtro, según la probabilidad # dada. mask = (np.random.random(len(sos)-1) < indpb) # Perturba los coeficientes a partir de una distribución # normal con media y desvío estándar dados. sos[:-1][mask] += np.random.normal( mu, sigma, np.count_nonzero(mask) ).astype(int) # Ajusta la ganancia de la sección para que los coeficientes # del filtro puedan representarse en punto fijo para el # número de bits del filtro. fitsos(sos, iir.nbits) return iir, def eaSimplePlusElitism(population, toolbox, cxpb, mutpb, eprop, ngen, stats=None, halloffame=None, verbose=__debug__): """ Variante de `deap.algorithms.eaSimple` con una
cxpb=0.7, ndpb=0.5, mutpb=0.2, mutmean=0.0, mutstd=0.3, coeffpb=0.1, tournsize=3, nind=1000, eprop=0.005, ngen=400, verbose=__debug__): """ Configura una función de aproximación de filtros digitales IIR en punto fijo con una dada respuesta al impulso. :param nbits: cantidad de bits utilizados para la representación numérica de los coeficientes la solución. :param nlimit: orden máximo admitido para la solución. :param nsln: cantidad de soluciones a conservar de entre las más aptas. :param cxpb: probabilidad de cruza genética de soluciones. :param ndpb: probabilidad de intercambio de numerador y denominador de soluciones seleccionadas para la cruza. :param mutpb: probabilidad de mutar una solución. :param mutmean: media de perturbaciones utilizadas para la mutación, en el intervalo [-1, 1). :param mutstd: desvío estándar de las perturbaciones utilizadas para la mutación, en el intervalo [-1, 1). :param coeffpb: probabilidad de perturbación de un coeficiente de una solución seleccionada para la mutación. :param tournsize: cantidad de soluciones a someter en cada instancia de torneo de selección. :param nind: cantidad de soluciones en el pool genético a explorar. :param eprop: proporción de elitismo o la cantidad de soluciones de elite respecto al total de soluciones en el pool. :param ngen: cantidad de generaciones a evolucionar. :return: función de aproximación de filtro digital IIR en punto fijo. """ def approx(target, ts, nlimit=nlimit, nsln=nsln): """ Aproxima un filtro digital IIR en punto fijo para que presente la respuesta al impulso objetivo. :param target: respuesta al impulso objetivo. :param ts: período de muestreo, en segundos. :param nlimit: orden máximo admitido para la solución. :param nsln: cantidad de soluciones a conservar de entre las más aptas. :return: filtro digital IIR en punto fijo. """ toolbox = base.Toolbox() toolbox.register( 'individual', genStablePrototype, nlimit=nlimit, nbits=nbits ) toolbox.register( 'population', tools.initRepeat, list, toolbox.individual ) toolbox.register('mate', cxUniformND, ndpb=ndpb) toolbox.register('select', tools.selTournament, tournsize=tournsize) toolbox.register( 'mutate', mutCoeffGaussian, mu=mutmean*2**(nbits-1), sigma=mutstd*2**(nbits-1), indpb=coeffpb ) toolbox.register('evaluate', evTimeResponse, target=target, ts=ts) stats = tools.Statistics( lambda individual: individual.fitness.values ) stats.register('mean_fitness', np.mean) stats.register('fitness_stddev', np.std) stats.register('min_fitness', np.min) stats.register('max_fitness', np.max) hall = tools.HallOfFame( maxsize=nsln, similar=lambda x, y: ( np.all(np.equal(np.shape(x), np.shape(y))) and np.all(np.equal(x, y)) ) ) population = toolbox.population(nind) offspring, logbook = eaSimplePlusElitism( population, toolbox, cxpb=cxpb, mutpb=mutpb, eprop=eprop, ngen=ngen, stats=stats, halloffame=hall, verbose=verbose ) return hall, offspring, logbook return approx if __name__ == '__main__': n = 40 fs = 1000 ts = 1 / fs b_t, a_t = signal.iirdesign( wp=0.3, ws=0.6, gpass=1, gstop=40, ftype='butter', analog=False ) w, h_t = signal.freqz(b_t, a_t) t, (im_t,) = signal.dimpulse((b_t, a_t, ts), n=n) approx = configure_genetic_approx(nbits=16, nlimit=8) best, pool, logbook = approx(im_t, ts) iir_min_err = best[0] print('Minimum error', iir_min_err, len(iir_min_err)) sos_min_err = iir2sos(iir_min_err) _, h_min_err = signal.sosfreqz(sos_min_err, worN=w) _, im_min_err = impulse(iir_min_err, ts, n) iir_min_n = sorted(best, key=lambda iir: len(iir))[0] print('Minimum order', iir_min_n, len(iir_min_n)) sos_min_n = iir2sos(iir_min_n) _, h_min_n = signal.sosfreqz(sos_min_n, worN=w) _, im_min_n = impulse(iir_min_n, ts, n) import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(311) ax.plot(w, 20 * np.log10(abs(h_min_n))) ax.plot(w, 20 * np.log10(abs(h_min_err))) ax.plot(w, 20 * np.log10(abs(h_t))) ax.set_xlabel('Frequency [radians / sample]') ax.set_ylabel('Amplitude [dB]') ax.grid(which='both', axis='both') ax = fig.add_subplot(312) ax.plot(w, np.angle(h_min_n)) ax.plot(w, np.angle(h_min_err)) ax.plot(w, np.angle(h_t)) ax.set_xlabel('Frequency [radians / sample]') ax.set_ylabel('Angle [radians]') ax.grid(which='both', axis='both') ax = fig.add_subplot(313) ax.plot(t, im_min_n) ax.plot(t, im_min_err) ax.plot(t, im_t) ax.set_xlabel('Time [seconds]') ax.set_ylabel('Amplitude [ ]') ax.grid(which='both', axis='both') zeros_min_n, poles_min_n, gain_min_n = signal.sos2zpk(sos_min_n) zeros_min_err, poles_min_err, gain_min_err = signal.sos2zpk(sos_min_err) zeros_o, poles_o, gains_o = [], [], [] for iir in pool: z, p, k = signal.sos2zpk(iir2sos(iir)) zeros_o.extend(z); poles_o.extend(p); gains_o.append(k) zeros_t, poles_t, gain_t = signal.tf2zpk(b_t, a_t) fig = plt.figure() ax = fig.add_subplot(311) ax.scatter(x=np.real(zeros_t), y=np.imag(zeros_t), color='blue') ax.scatter(x=np.real(zeros_min_err), y=np.imag(zeros_min_err), color='red') ax.scatter(x=np.real(zeros_min_n), y=np.imag(zeros_min_n), color='green') ax.scatter(x=np.real(zeros_o), y=np.imag(zeros_o), color='yellow') ax.axis([-2, 2, -2, 2]) ax = fig.add_subplot(312) ax.scatter(x=np.real(poles_t), y=np.imag(poles_t), marker='x', color='blue') ax.scatter(x=np.real(poles_o), y=np.imag(poles_o), color='yellow') ax.scatter(x=np.real(poles_min_err), y=np.imag(poles_min_err), marker='x', color='red') ax.scatter(x=np.real(poles_min_n), y=np.imag(poles_min_n), marker='x', color='green') ax.axis([-2, 2, -2, 2]) ax = fig.add_subplot(313) ax.hist(gains_o, bins=100, density=True, color='yellow') ax.axvline(x=gain_t, color='blue') ax.axvline(x=gain_min_err, color='red') ax.axvline(x=gain_min_n, color='green') plt.show()
proporción de elitismo. """ logbook = tools.Logbook() logbook.header = ['gen', 'nevals'] + (stats.fields if stats else []) # Evalua los individuos con aptitud inválida. invalid_ind = [ind for ind in population if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit if halloffame is not None: halloffame.update(population) record = stats.compile(population) if stats else {} logbook.record(gen=0, nevals=len(invalid_ind), **record) if verbose: print(logbook.stream) # Comienza el proceso evolutivo. for gen in range(1, ngen + 1): # Seleccina la próxima generación de individuos. offspring = toolbox.select(population, len(population)) # Varia el pool de individuos, aplicando cruza y mutación. offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb) # Evalua los individuos con aptitud inválida. invalid_ind = [ind for ind in offspring if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit # Actualiza el grupo de mejores individuos. if halloffame is not None: halloffame.update(offspring) # Reemplaza la población actual con los mejores del conjunto # compuesta por su descendencia y la elite. elite_count = int(len(population) * eprop) elite = tools.selBest(population, elite_count) population[:] = tools.selBest(offspring + elite, len(population)) # Toma nota de las estadísticas de la generación actual. record = stats.compile(population) if stats else {} logbook.record(gen=gen, nevals=len(invalid_ind), **record) if verbose: print(logbook.stream) return population, logbook def configure_genetic_approx(*, nbits=16, nlimit=8, nsln=10,
identifier_body
fwliir.py
from deap import base from deap import algorithms from deap import tools from deap import creator import random import functools import numpy as np import scipy.signal as signal creator.create( 'ResponseMismatch', base.Fitness, weights=(1.0,) ) creator.create( 'IIR', list, fitness=creator.ResponseMismatch, nbits=int ) IIR = creator.IIR def fitsos(sos, nbits): """ Ajusta la ganancia de una etapa de segundo orden en punto fijo para que sus coeficientes puedan representarse en 1.(`nbits`-1) :param sos: etapa de segundo orden de filtro digital IIR en punto fijo. :param nbits: cantidad de bits de la representación en punto fijo de los coeficientes de la etapa. """ # Computa el límite númerico de la representación entera signada. n = 2**(nbits - 1) # Busca el coeficiente de máximo valor absoluto. c = sos[np.argmax(np.abs(sos[-1] * sos[:-1]))] if abs(c) >= n: # Escala todos los coeficientes para que el coeficiente # de máximo valor absoluto sea -1 en punto fijo. sos[:-1] = -n * sos[-1] * sos[:-1] // abs(c) # Conserva el factor de escala aplicado. sos[-1] = min(-1, -abs(c) // n) return sos def impulse(iir, ts, n): """ Computa la respuesta al impulso de un filtro digital IIR en punto fijo. El filtro se representa como una secuencia ordenada de etapas de segundo orden, cada una representada por los coeficientes de su ecuación en diferencias más una ganancia: [b0, b1, b2, a1, a2, k]. El coeficiente a0 se asume unitario. Etapas de primer orden pueden obtenerse haciendo b2 = a2 = 0. :param iir: filtro digital IIR en punto fijo. :param ts: período de muestreo, en segundos. :param n: cantidad de muestras. :return: tiempo y respuesta al impulso, como tupla. """ # Inicializa en cero los vectores de salida de # cada una de las M etapas del filtro más el de # entrada. Cada vector puede contener N muestras # más 2 muestras adicionales para conformar la # línea de demora. y = np.zeros([len(iir) + 1, n + 2], dtype=int) # Inicializar el vector de entrada con un delta # discreto. y[0, 2] = 2**(iir.nbits-1) - 1 # Computar para cada instante discreto las salidas # de cada etapa, desde la primera hasta la última # en ese orden. for j in range(2, n + 2): for i, sos in enumerate(iir, start=1): b0, b1, b2, a1, a2, k = sos # Computar la ecuación de diferencias, truncando # y saturando el resultado para ser representado # en punto fijo 1.(`nbits`-1) y[i, j] = np.clip((k * ( b0 * y[i - 1, j] + b1 * y[i - 1, j - 1] + b2 * y[i - 1, j - 2] - a1 * y[i, j - 1] - a2 * y[i, j - 2] )) >> (iir.nbits - 1), -2**(iir.nbits - 1), 2**(iir.nbits - 1) - 1) # Retorna respuesta al impulso renormalizando la salida # al intervalo [-1, 1) # salida del t = ts * np.arange(n) im = y[-1, 2:] / 2**(iir.nbits - 1) return t, im def iir2sos(iir): """ Convierte un filtro digital IIR en punto fijo a su representación como secuencia de secciones de segundo orden en punto flotante (ver `scipy.signal.sos2tf` como referencia). :param iir: filtro digital IIR en punto fijo. :return: filtro digital en representación SOS. """ # Computa el límite númerico de la representación entera signada. n = 2**(iir.nbits - 1) # Escala el filtro digital en punto fijo acorde a la ganancia y # normaliza al intervalo [-1, 1) en punto flotante. return np.array([ (*(sos[-1] * sos[:3] / n), 1., *(sos[-1] * sos[3:5] / n)) for sos in iir ]) def genStablePrototype(nlimit, nbits=32): """ Genera un filtro digital IIR en punto fijo estable en forma aleatoria. :param nlimit: orden máximo admitido para el filtro. :param nbits: cantidad de bits utilizados para la representación numérica de los coeficientes. :return: filtro digital IIR en punto fijo generado. """ iir = IIR() # Computa el límite númerico de la representación entera signada. n = 2 ** (nbits - 1) # Selecciona el orden del filtro en forma aleatoria # del intervalo [1, nlimit]. order = max(int(random.random() * (nlimit + 1)), 1) # Si el orden es impar se introduce una etapa de primer orden. if order % 2 != 0: # Cero y polo de la etapa se ubican dentro o sobre el # círculo unidad. b0 = n b1 = np.random.randint(-n, n-1) a1 = np.random.randint(-n, n-1) sos = np.array([b0, b1, 0, a1, 0, 1]) # Ajusta la ganancia de la sección para su representación. fitsos(sos, nbits) # Incorpora la etapa al filtro. iir.append(sos) # Introduce N etapas de segundo orden para alcanzar # el orden seleccionado. for _ in range(order // 2): # Ceros y polos de la etapa se ubican dentro del círculo unidad. b0 = n b2 = np.random.randint(-n+1, n-1) a2 = np.random.randint(-n+1, n-1) b1 = np.random.randint(-b2-n, b2+n) a1 = np.random.randint(-a2-n, a2+n) sos = np.array([b0, b1, b2, a1, a2, 1]) # Ajusta la ganancia de la sección para su representación. fitsos(sos, nbits) # Incorpora la etapa al filtro. iir.append(sos) if hasattr(iir, 'nbits'): # Preserva el número de bits en el filtro. iir.nbits = nbits return iir def cxUniformND(iir1, iir2, ndpb): """ Cruza numeradores y denominadores de filtros digitales IIR en punto fijo, potencialmente de distinto orden, produciendo dos descendientes. El orden de las etapas a cruzar es modificado aleatoriamente. Variante de `deap.tools.cxUniform`. :param iir1: primer filtro progenitor. :param iir2: segundo filtro progenitor. :param ndpb: probabilidad de cruza de numerador y/o denominador. """ # Tomando el filtro candidato de menor orden, itera las # secciones de a pares tomados en forma aleatoria. for i, j in zip( random.sample(list(range(len(iir1))), len(iir1)), random.sample(list(range(len(iir2))), len(iir2)) ): # Obtiene las etapas de cada filtro a cruzar. sos1 = iir1[i] sos2 = iir2[j] if random.random() < ndpb: # Cruza los numeradores de las etapas sos1[:3], sos2[:3] = sos2[:3], sos1[:3] if random.random() < ndpb: # Cruza los denominadores de las etapas sos1[3:5], sos2[3:5] = sos2[3:5], sos1[3:5] # Ajusta la ganancia de la primera sección para que los # coeficientes del filtro puedan representarse en punto # fijo para el número de bits del filtro candidato. fitsos(sos1, iir1.nbits) # Ajusta la ganancia de la primera sección para que los # coeficientes del filtro puedan representarse en punto # fijo para el número de bits del filtro candidato. fitsos(sos2, iir2.nbits) return iir1, iir2 def evTimeResponse(iir, target, ts): """ Evalúa la aptitud de la respuesta temporal de un filtro digital IIR en punto fijo según la similitud que su respuesta temporal presenta respecto a la respuesta objetivo. :param iir: filtro digital IIR en punto fijo. :param target: respuesta al impulso objetivo. :param ts: período de muestreo, en segundos. :return: aptitud del filtro provisto. """ # Computa la respuesta al impulso del filtro candidato # en su representación SOS. _, (im,) = signal.dimpulse( (*signal.sos2tf(iir2sos(iir)), ts), n=len(target) ) # Computa el error relativo entre respuesta al impulso # del filtro candidato y respuesta al impulso esperada. et = (im - target) / np.max(np.abs(target)) # Evalua la aptitud del filtro candidato como el recíproco # de la potencia de error relativo. return (1. / (np.mean(et)**2 + np.var(et)),) def mutCoeffGaussian(iir, mu, sigma, indpb): """ Muta los coeficientes de un filtro digital IIR en punto fijo mediante perturbaciones numéricas. Variante de `deap.tools.mutGaussian`. :param mu: media de la distribución gaussiana de la que se toman las perturbaciones a aplicar. :param sigma: desvío estandar de la distribución gaussiana de la que se toman las perturbaciones a aplicar. :param indpb: probabilidad de perturbar un coeficiente. """ # Itera cada sección del filtro. for sos in iir: # Conforma una máscara de los coeficientes de la # sección actual del filtro, según la probabilidad # dada. mask = (np.random.random(len(sos)-1) < indpb) # Perturba los coeficientes a partir de una distribución # normal con media y desvío estándar dados. sos[:-1][mask] += np.random.normal( mu, sigma, np.count_nonzero(mask) ).astype(int) # Ajusta la ganancia de la sección para que los coeficientes # del filtro puedan representarse en punto fijo para el # número de bits del filtro. fitsos(sos, iir.nbits) return iir, def eaSimplePlusElitism(population, toolbox, cxpb, mutpb, eprop, ngen, stats=None, halloffame=None, verbose=__debug__): """ Variante de `deap.algorithms.eaSimple` con una proporción de elitismo. """ logbook = tools.Logbook() logbook.header = ['gen', 'nevals'] + (stats.fields if stats else []) # Evalua los individuos con aptitud inválida. invalid_ind = [ind for ind in population if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit if halloffame is not None: halloffame.update(population) record = stats.compile(population) if stats else {} logbook.record(gen=0, nevals=len(invalid_ind), **record) if verbose: print(logbook.stream) # Comienza el proceso evolutivo. for gen in range(1, ngen + 1): # Seleccina la próxima generación de individuos. offspring = toolbox.select(population, len(population)) # Varia el pool de individuos, aplicando cruza y mutación. offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb) # Evalua los individuos con aptitud inválida. invalid_ind = [ind for ind in offspring if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit # Actualiza el grupo de mejores individuos. if halloffame is not None: halloffame.update(offspring) # Reemplaza la población actual con los mejores del conjunto # compuesta por su descendencia y la elite. elite_count = int(len(population) * eprop) elite = tools.selBest(population, elite_count) population[:] = tools.selBest(offspring + elite, len(population)) # Toma nota de las estadísticas de la generación actual. record = stats.compile(population) if stats else {} logbook.record(gen=gen, nevals=len(invalid_ind), **record) if verbose: print(logbook.stream) return population, logbook def configure_genetic_approx(*, nbits=16, nlimit=8, nsln=10, cxpb=0
mutpb=0.2, mutmean=0.0, mutstd=0.3, coeffpb=0.1, tournsize=3, nind=1000, eprop=0.005, ngen=400, verbose=__debug__): """ Configura una función de aproximación de filtros digitales IIR en punto fijo con una dada respuesta al impulso. :param nbits: cantidad de bits utilizados para la representación numérica de los coeficientes la solución. :param nlimit: orden máximo admitido para la solución. :param nsln: cantidad de soluciones a conservar de entre las más aptas. :param cxpb: probabilidad de cruza genética de soluciones. :param ndpb: probabilidad de intercambio de numerador y denominador de soluciones seleccionadas para la cruza. :param mutpb: probabilidad de mutar una solución. :param mutmean: media de perturbaciones utilizadas para la mutación, en el intervalo [-1, 1). :param mutstd: desvío estándar de las perturbaciones utilizadas para la mutación, en el intervalo [-1, 1). :param coeffpb: probabilidad de perturbación de un coeficiente de una solución seleccionada para la mutación. :param tournsize: cantidad de soluciones a someter en cada instancia de torneo de selección. :param nind: cantidad de soluciones en el pool genético a explorar. :param eprop: proporción de elitismo o la cantidad de soluciones de elite respecto al total de soluciones en el pool. :param ngen: cantidad de generaciones a evolucionar. :return: función de aproximación de filtro digital IIR en punto fijo. """ def approx(target, ts, nlimit=nlimit, nsln=nsln): """ Aproxima un filtro digital IIR en punto fijo para que presente la respuesta al impulso objetivo. :param target: respuesta al impulso objetivo. :param ts: período de muestreo, en segundos. :param nlimit: orden máximo admitido para la solución. :param nsln: cantidad de soluciones a conservar de entre las más aptas. :return: filtro digital IIR en punto fijo. """ toolbox = base.Toolbox() toolbox.register( 'individual', genStablePrototype, nlimit=nlimit, nbits=nbits ) toolbox.register( 'population', tools.initRepeat, list, toolbox.individual ) toolbox.register('mate', cxUniformND, ndpb=ndpb) toolbox.register('select', tools.selTournament, tournsize=tournsize) toolbox.register( 'mutate', mutCoeffGaussian, mu=mutmean*2**(nbits-1), sigma=mutstd*2**(nbits-1), indpb=coeffpb ) toolbox.register('evaluate', evTimeResponse, target=target, ts=ts) stats = tools.Statistics( lambda individual: individual.fitness.values ) stats.register('mean_fitness', np.mean) stats.register('fitness_stddev', np.std) stats.register('min_fitness', np.min) stats.register('max_fitness', np.max) hall = tools.HallOfFame( maxsize=nsln, similar=lambda x, y: ( np.all(np.equal(np.shape(x), np.shape(y))) and np.all(np.equal(x, y)) ) ) population = toolbox.population(nind) offspring, logbook = eaSimplePlusElitism( population, toolbox, cxpb=cxpb, mutpb=mutpb, eprop=eprop, ngen=ngen, stats=stats, halloffame=hall, verbose=verbose ) return hall, offspring, logbook return approx if __name__ == '__main__': n = 40 fs = 1000 ts = 1 / fs b_t, a_t = signal.iirdesign( wp=0.3, ws=0.6, gpass=1, gstop=40, ftype='butter', analog=False ) w, h_t = signal.freqz(b_t, a_t) t, (im_t,) = signal.dimpulse((b_t, a_t, ts), n=n) approx = configure_genetic_approx(nbits=16, nlimit=8) best, pool, logbook = approx(im_t, ts) iir_min_err = best[0] print('Minimum error', iir_min_err, len(iir_min_err)) sos_min_err = iir2sos(iir_min_err) _, h_min_err = signal.sosfreqz(sos_min_err, worN=w) _, im_min_err = impulse(iir_min_err, ts, n) iir_min_n = sorted(best, key=lambda iir: len(iir))[0] print('Minimum order', iir_min_n, len(iir_min_n)) sos_min_n = iir2sos(iir_min_n) _, h_min_n = signal.sosfreqz(sos_min_n, worN=w) _, im_min_n = impulse(iir_min_n, ts, n) import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(311) ax.plot(w, 20 * np.log10(abs(h_min_n))) ax.plot(w, 20 * np.log10(abs(h_min_err))) ax.plot(w, 20 * np.log10(abs(h_t))) ax.set_xlabel('Frequency [radians / sample]') ax.set_ylabel('Amplitude [dB]') ax.grid(which='both', axis='both') ax = fig.add_subplot(312) ax.plot(w, np.angle(h_min_n)) ax.plot(w, np.angle(h_min_err)) ax.plot(w, np.angle(h_t)) ax.set_xlabel('Frequency [radians / sample]') ax.set_ylabel('Angle [radians]') ax.grid(which='both', axis='both') ax = fig.add_subplot(313) ax.plot(t, im_min_n) ax.plot(t, im_min_err) ax.plot(t, im_t) ax.set_xlabel('Time [seconds]') ax.set_ylabel('Amplitude [ ]') ax.grid(which='both', axis='both') zeros_min_n, poles_min_n, gain_min_n = signal.sos2zpk(sos_min_n) zeros_min_err, poles_min_err, gain_min_err = signal.sos2zpk(sos_min_err) zeros_o, poles_o, gains_o = [], [], [] for iir in pool: z, p, k = signal.sos2zpk(iir2sos(iir)) zeros_o.extend(z); poles_o.extend(p); gains_o.append(k) zeros_t, poles_t, gain_t = signal.tf2zpk(b_t, a_t) fig = plt.figure() ax = fig.add_subplot(311) ax.scatter(x=np.real(zeros_t), y=np.imag(zeros_t), color='blue') ax.scatter(x=np.real(zeros_min_err), y=np.imag(zeros_min_err), color='red') ax.scatter(x=np.real(zeros_min_n), y=np.imag(zeros_min_n), color='green') ax.scatter(x=np.real(zeros_o), y=np.imag(zeros_o), color='yellow') ax.axis([-2, 2, -2, 2]) ax = fig.add_subplot(312) ax.scatter(x=np.real(poles_t), y=np.imag(poles_t), marker='x', color='blue') ax.scatter(x=np.real(poles_o), y=np.imag(poles_o), color='yellow') ax.scatter(x=np.real(poles_min_err), y=np.imag(poles_min_err), marker='x', color='red') ax.scatter(x=np.real(poles_min_n), y=np.imag(poles_min_n), marker='x', color='green') ax.axis([-2, 2, -2, 2]) ax = fig.add_subplot(313) ax.hist(gains_o, bins=100, density=True, color='yellow') ax.axvline(x=gain_t, color='blue') ax.axvline(x=gain_min_err, color='red') ax.axvline(x=gain_min_n, color='green') plt.show()
.7, ndpb=0.5,
identifier_name
fwliir.py
from deap import base from deap import algorithms from deap import tools from deap import creator import random import functools import numpy as np import scipy.signal as signal creator.create( 'ResponseMismatch', base.Fitness, weights=(1.0,) ) creator.create( 'IIR', list, fitness=creator.ResponseMismatch, nbits=int ) IIR = creator.IIR def fitsos(sos, nbits): """ Ajusta la ganancia de una etapa de segundo orden en punto fijo para que sus coeficientes puedan representarse en 1.(`nbits`-1) :param sos: etapa de segundo orden de filtro digital IIR en punto fijo. :param nbits: cantidad de bits de la representación en punto fijo de los coeficientes de la etapa. """ # Computa el límite númerico de la representación entera signada. n = 2**(nbits - 1) # Busca el coeficiente de máximo valor absoluto. c = sos[np.argmax(np.abs(sos[-1] * sos[:-1]))] if abs(c) >= n: # Escala todos los coeficientes para que el coeficiente # de máximo valor absoluto sea -1 en punto fijo. sos[:-1] = -n * sos[-1] * sos[:-1] // abs(c) # Conserva el factor de escala aplicado. sos[-1] = min(-1, -abs(c) // n) return sos def impulse(iir, ts, n): """ Computa la respuesta al impulso de un filtro digital IIR en punto fijo. El filtro se representa como una secuencia ordenada de etapas de segundo orden, cada una representada por los coeficientes de su ecuación en diferencias más una ganancia: [b0, b1, b2, a1, a2, k]. El coeficiente a0 se asume unitario. Etapas de primer orden pueden obtenerse haciendo b2 = a2 = 0. :param iir: filtro digital IIR en punto fijo. :param ts: período de muestreo, en segundos. :param n: cantidad de muestras. :return: tiempo y respuesta al impulso, como tupla. """ # Inicializa en cero los vectores de salida de # cada una de las M etapas del filtro más el de # entrada. Cada vector puede contener N muestras # más 2 muestras adicionales para conformar la # línea de demora. y = np.zeros([len(iir) + 1, n + 2], dtype=int) # Inicializar el vector de entrada con un delta # discreto. y[0, 2] = 2**(iir.nbits-1) - 1 # Computar para cada instante discreto las salidas # de cada etapa, desde la primera hasta la última # en ese orden. for j in range(2, n + 2): for i, sos in
respuesta al impulso renormalizando la salida # al intervalo [-1, 1) # salida del t = ts * np.arange(n) im = y[-1, 2:] / 2**(iir.nbits - 1) return t, im def iir2sos(iir): """ Convierte un filtro digital IIR en punto fijo a su representación como secuencia de secciones de segundo orden en punto flotante (ver `scipy.signal.sos2tf` como referencia). :param iir: filtro digital IIR en punto fijo. :return: filtro digital en representación SOS. """ # Computa el límite númerico de la representación entera signada. n = 2**(iir.nbits - 1) # Escala el filtro digital en punto fijo acorde a la ganancia y # normaliza al intervalo [-1, 1) en punto flotante. return np.array([ (*(sos[-1] * sos[:3] / n), 1., *(sos[-1] * sos[3:5] / n)) for sos in iir ]) def genStablePrototype(nlimit, nbits=32): """ Genera un filtro digital IIR en punto fijo estable en forma aleatoria. :param nlimit: orden máximo admitido para el filtro. :param nbits: cantidad de bits utilizados para la representación numérica de los coeficientes. :return: filtro digital IIR en punto fijo generado. """ iir = IIR() # Computa el límite númerico de la representación entera signada. n = 2 ** (nbits - 1) # Selecciona el orden del filtro en forma aleatoria # del intervalo [1, nlimit]. order = max(int(random.random() * (nlimit + 1)), 1) # Si el orden es impar se introduce una etapa de primer orden. if order % 2 != 0: # Cero y polo de la etapa se ubican dentro o sobre el # círculo unidad. b0 = n b1 = np.random.randint(-n, n-1) a1 = np.random.randint(-n, n-1) sos = np.array([b0, b1, 0, a1, 0, 1]) # Ajusta la ganancia de la sección para su representación. fitsos(sos, nbits) # Incorpora la etapa al filtro. iir.append(sos) # Introduce N etapas de segundo orden para alcanzar # el orden seleccionado. for _ in range(order // 2): # Ceros y polos de la etapa se ubican dentro del círculo unidad. b0 = n b2 = np.random.randint(-n+1, n-1) a2 = np.random.randint(-n+1, n-1) b1 = np.random.randint(-b2-n, b2+n) a1 = np.random.randint(-a2-n, a2+n) sos = np.array([b0, b1, b2, a1, a2, 1]) # Ajusta la ganancia de la sección para su representación. fitsos(sos, nbits) # Incorpora la etapa al filtro. iir.append(sos) if hasattr(iir, 'nbits'): # Preserva el número de bits en el filtro. iir.nbits = nbits return iir def cxUniformND(iir1, iir2, ndpb): """ Cruza numeradores y denominadores de filtros digitales IIR en punto fijo, potencialmente de distinto orden, produciendo dos descendientes. El orden de las etapas a cruzar es modificado aleatoriamente. Variante de `deap.tools.cxUniform`. :param iir1: primer filtro progenitor. :param iir2: segundo filtro progenitor. :param ndpb: probabilidad de cruza de numerador y/o denominador. """ # Tomando el filtro candidato de menor orden, itera las # secciones de a pares tomados en forma aleatoria. for i, j in zip( random.sample(list(range(len(iir1))), len(iir1)), random.sample(list(range(len(iir2))), len(iir2)) ): # Obtiene las etapas de cada filtro a cruzar. sos1 = iir1[i] sos2 = iir2[j] if random.random() < ndpb: # Cruza los numeradores de las etapas sos1[:3], sos2[:3] = sos2[:3], sos1[:3] if random.random() < ndpb: # Cruza los denominadores de las etapas sos1[3:5], sos2[3:5] = sos2[3:5], sos1[3:5] # Ajusta la ganancia de la primera sección para que los # coeficientes del filtro puedan representarse en punto # fijo para el número de bits del filtro candidato. fitsos(sos1, iir1.nbits) # Ajusta la ganancia de la primera sección para que los # coeficientes del filtro puedan representarse en punto # fijo para el número de bits del filtro candidato. fitsos(sos2, iir2.nbits) return iir1, iir2 def evTimeResponse(iir, target, ts): """ Evalúa la aptitud de la respuesta temporal de un filtro digital IIR en punto fijo según la similitud que su respuesta temporal presenta respecto a la respuesta objetivo. :param iir: filtro digital IIR en punto fijo. :param target: respuesta al impulso objetivo. :param ts: período de muestreo, en segundos. :return: aptitud del filtro provisto. """ # Computa la respuesta al impulso del filtro candidato # en su representación SOS. _, (im,) = signal.dimpulse( (*signal.sos2tf(iir2sos(iir)), ts), n=len(target) ) # Computa el error relativo entre respuesta al impulso # del filtro candidato y respuesta al impulso esperada. et = (im - target) / np.max(np.abs(target)) # Evalua la aptitud del filtro candidato como el recíproco # de la potencia de error relativo. return (1. / (np.mean(et)**2 + np.var(et)),) def mutCoeffGaussian(iir, mu, sigma, indpb): """ Muta los coeficientes de un filtro digital IIR en punto fijo mediante perturbaciones numéricas. Variante de `deap.tools.mutGaussian`. :param mu: media de la distribución gaussiana de la que se toman las perturbaciones a aplicar. :param sigma: desvío estandar de la distribución gaussiana de la que se toman las perturbaciones a aplicar. :param indpb: probabilidad de perturbar un coeficiente. """ # Itera cada sección del filtro. for sos in iir: # Conforma una máscara de los coeficientes de la # sección actual del filtro, según la probabilidad # dada. mask = (np.random.random(len(sos)-1) < indpb) # Perturba los coeficientes a partir de una distribución # normal con media y desvío estándar dados. sos[:-1][mask] += np.random.normal( mu, sigma, np.count_nonzero(mask) ).astype(int) # Ajusta la ganancia de la sección para que los coeficientes # del filtro puedan representarse en punto fijo para el # número de bits del filtro. fitsos(sos, iir.nbits) return iir, def eaSimplePlusElitism(population, toolbox, cxpb, mutpb, eprop, ngen, stats=None, halloffame=None, verbose=__debug__): """ Variante de `deap.algorithms.eaSimple` con una proporción de elitismo. """ logbook = tools.Logbook() logbook.header = ['gen', 'nevals'] + (stats.fields if stats else []) # Evalua los individuos con aptitud inválida. invalid_ind = [ind for ind in population if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit if halloffame is not None: halloffame.update(population) record = stats.compile(population) if stats else {} logbook.record(gen=0, nevals=len(invalid_ind), **record) if verbose: print(logbook.stream) # Comienza el proceso evolutivo. for gen in range(1, ngen + 1): # Seleccina la próxima generación de individuos. offspring = toolbox.select(population, len(population)) # Varia el pool de individuos, aplicando cruza y mutación. offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb) # Evalua los individuos con aptitud inválida. invalid_ind = [ind for ind in offspring if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit # Actualiza el grupo de mejores individuos. if halloffame is not None: halloffame.update(offspring) # Reemplaza la población actual con los mejores del conjunto # compuesta por su descendencia y la elite. elite_count = int(len(population) * eprop) elite = tools.selBest(population, elite_count) population[:] = tools.selBest(offspring + elite, len(population)) # Toma nota de las estadísticas de la generación actual. record = stats.compile(population) if stats else {} logbook.record(gen=gen, nevals=len(invalid_ind), **record) if verbose: print(logbook.stream) return population, logbook def configure_genetic_approx(*, nbits=16, nlimit=8, nsln=10, cxpb=0.7, ndpb=0.5, mutpb=0.2, mutmean=0.0, mutstd=0.3, coeffpb=0.1, tournsize=3, nind=1000, eprop=0.005, ngen=400, verbose=__debug__): """ Configura una función de aproximación de filtros digitales IIR en punto fijo con una dada respuesta al impulso. :param nbits: cantidad de bits utilizados para la representación numérica de los coeficientes la solución. :param nlimit: orden máximo admitido para la solución. :param nsln: cantidad de soluciones a conservar de entre las más aptas. :param cxpb: probabilidad de cruza genética de soluciones. :param ndpb: probabilidad de intercambio de numerador y denominador de soluciones seleccionadas para la cruza. :param mutpb: probabilidad de mutar una solución. :param mutmean: media de perturbaciones utilizadas para la mutación, en el intervalo [-1, 1). :param mutstd: desvío estándar de las perturbaciones utilizadas para la mutación, en el intervalo [-1, 1). :param coeffpb: probabilidad de perturbación de un coeficiente de una solución seleccionada para la mutación. :param tournsize: cantidad de soluciones a someter en cada instancia de torneo de selección. :param nind: cantidad de soluciones en el pool genético a explorar. :param eprop: proporción de elitismo o la cantidad de soluciones de elite respecto al total de soluciones en el pool. :param ngen: cantidad de generaciones a evolucionar. :return: función de aproximación de filtro digital IIR en punto fijo. """ def approx(target, ts, nlimit=nlimit, nsln=nsln): """ Aproxima un filtro digital IIR en punto fijo para que presente la respuesta al impulso objetivo. :param target: respuesta al impulso objetivo. :param ts: período de muestreo, en segundos. :param nlimit: orden máximo admitido para la solución. :param nsln: cantidad de soluciones a conservar de entre las más aptas. :return: filtro digital IIR en punto fijo. """ toolbox = base.Toolbox() toolbox.register( 'individual', genStablePrototype, nlimit=nlimit, nbits=nbits ) toolbox.register( 'population', tools.initRepeat, list, toolbox.individual ) toolbox.register('mate', cxUniformND, ndpb=ndpb) toolbox.register('select', tools.selTournament, tournsize=tournsize) toolbox.register( 'mutate', mutCoeffGaussian, mu=mutmean*2**(nbits-1), sigma=mutstd*2**(nbits-1), indpb=coeffpb ) toolbox.register('evaluate', evTimeResponse, target=target, ts=ts) stats = tools.Statistics( lambda individual: individual.fitness.values ) stats.register('mean_fitness', np.mean) stats.register('fitness_stddev', np.std) stats.register('min_fitness', np.min) stats.register('max_fitness', np.max) hall = tools.HallOfFame( maxsize=nsln, similar=lambda x, y: ( np.all(np.equal(np.shape(x), np.shape(y))) and np.all(np.equal(x, y)) ) ) population = toolbox.population(nind) offspring, logbook = eaSimplePlusElitism( population, toolbox, cxpb=cxpb, mutpb=mutpb, eprop=eprop, ngen=ngen, stats=stats, halloffame=hall, verbose=verbose ) return hall, offspring, logbook return approx if __name__ == '__main__': n = 40 fs = 1000 ts = 1 / fs b_t, a_t = signal.iirdesign( wp=0.3, ws=0.6, gpass=1, gstop=40, ftype='butter', analog=False ) w, h_t = signal.freqz(b_t, a_t) t, (im_t,) = signal.dimpulse((b_t, a_t, ts), n=n) approx = configure_genetic_approx(nbits=16, nlimit=8) best, pool, logbook = approx(im_t, ts) iir_min_err = best[0] print('Minimum error', iir_min_err, len(iir_min_err)) sos_min_err = iir2sos(iir_min_err) _, h_min_err = signal.sosfreqz(sos_min_err, worN=w) _, im_min_err = impulse(iir_min_err, ts, n) iir_min_n = sorted(best, key=lambda iir: len(iir))[0] print('Minimum order', iir_min_n, len(iir_min_n)) sos_min_n = iir2sos(iir_min_n) _, h_min_n = signal.sosfreqz(sos_min_n, worN=w) _, im_min_n = impulse(iir_min_n, ts, n) import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(311) ax.plot(w, 20 * np.log10(abs(h_min_n))) ax.plot(w, 20 * np.log10(abs(h_min_err))) ax.plot(w, 20 * np.log10(abs(h_t))) ax.set_xlabel('Frequency [radians / sample]') ax.set_ylabel('Amplitude [dB]') ax.grid(which='both', axis='both') ax = fig.add_subplot(312) ax.plot(w, np.angle(h_min_n)) ax.plot(w, np.angle(h_min_err)) ax.plot(w, np.angle(h_t)) ax.set_xlabel('Frequency [radians / sample]') ax.set_ylabel('Angle [radians]') ax.grid(which='both', axis='both') ax = fig.add_subplot(313) ax.plot(t, im_min_n) ax.plot(t, im_min_err) ax.plot(t, im_t) ax.set_xlabel('Time [seconds]') ax.set_ylabel('Amplitude [ ]') ax.grid(which='both', axis='both') zeros_min_n, poles_min_n, gain_min_n = signal.sos2zpk(sos_min_n) zeros_min_err, poles_min_err, gain_min_err = signal.sos2zpk(sos_min_err) zeros_o, poles_o, gains_o = [], [], [] for iir in pool: z, p, k = signal.sos2zpk(iir2sos(iir)) zeros_o.extend(z); poles_o.extend(p); gains_o.append(k) zeros_t, poles_t, gain_t = signal.tf2zpk(b_t, a_t) fig = plt.figure() ax = fig.add_subplot(311) ax.scatter(x=np.real(zeros_t), y=np.imag(zeros_t), color='blue') ax.scatter(x=np.real(zeros_min_err), y=np.imag(zeros_min_err), color='red') ax.scatter(x=np.real(zeros_min_n), y=np.imag(zeros_min_n), color='green') ax.scatter(x=np.real(zeros_o), y=np.imag(zeros_o), color='yellow') ax.axis([-2, 2, -2, 2]) ax = fig.add_subplot(312) ax.scatter(x=np.real(poles_t), y=np.imag(poles_t), marker='x', color='blue') ax.scatter(x=np.real(poles_o), y=np.imag(poles_o), color='yellow') ax.scatter(x=np.real(poles_min_err), y=np.imag(poles_min_err), marker='x', color='red') ax.scatter(x=np.real(poles_min_n), y=np.imag(poles_min_n), marker='x', color='green') ax.axis([-2, 2, -2, 2]) ax = fig.add_subplot(313) ax.hist(gains_o, bins=100, density=True, color='yellow') ax.axvline(x=gain_t, color='blue') ax.axvline(x=gain_min_err, color='red') ax.axvline(x=gain_min_n, color='green') plt.show()
enumerate(iir, start=1): b0, b1, b2, a1, a2, k = sos # Computar la ecuación de diferencias, truncando # y saturando el resultado para ser representado # en punto fijo 1.(`nbits`-1) y[i, j] = np.clip((k * ( b0 * y[i - 1, j] + b1 * y[i - 1, j - 1] + b2 * y[i - 1, j - 2] - a1 * y[i, j - 1] - a2 * y[i, j - 2] )) >> (iir.nbits - 1), -2**(iir.nbits - 1), 2**(iir.nbits - 1) - 1) # Retorna
conditional_block
fwliir.py
from deap import base from deap import algorithms from deap import tools from deap import creator import random import functools import numpy as np import scipy.signal as signal creator.create( 'ResponseMismatch', base.Fitness, weights=(1.0,) ) creator.create( 'IIR', list, fitness=creator.ResponseMismatch, nbits=int ) IIR = creator.IIR def fitsos(sos, nbits): """ Ajusta la ganancia de una etapa de segundo orden en punto fijo para que sus coeficientes puedan representarse en 1.(`nbits`-1) :param sos: etapa de segundo orden de filtro digital IIR en punto fijo. :param nbits: cantidad de bits de la representación en punto fijo de los coeficientes de la etapa. """ # Computa el límite númerico de la representación entera signada. n = 2**(nbits - 1) # Busca el coeficiente de máximo valor absoluto. c = sos[np.argmax(np.abs(sos[-1] * sos[:-1]))] if abs(c) >= n: # Escala todos los coeficientes para que el coeficiente # de máximo valor absoluto sea -1 en punto fijo. sos[:-1] = -n * sos[-1] * sos[:-1] // abs(c) # Conserva el factor de escala aplicado. sos[-1] = min(-1, -abs(c) // n) return sos def impulse(iir, ts, n): """ Computa la respuesta al impulso de un filtro digital IIR en punto fijo. El filtro se representa como una secuencia ordenada de etapas de segundo orden, cada una representada por los coeficientes de su ecuación en diferencias más una ganancia: [b0, b1, b2, a1, a2, k]. El coeficiente a0 se asume unitario. Etapas de primer orden pueden obtenerse haciendo b2 = a2 = 0. :param iir: filtro digital IIR en punto fijo. :param ts: período de muestreo, en segundos. :param n: cantidad de muestras. :return: tiempo y respuesta al impulso, como tupla. """ # Inicializa en cero los vectores de salida de # cada una de las M etapas del filtro más el de # entrada. Cada vector puede contener N muestras # más 2 muestras adicionales para conformar la # línea de demora. y = np.zeros([len(iir) + 1, n + 2], dtype=int) # Inicializar el vector de entrada con un delta # discreto. y[0, 2] = 2**(iir.nbits-1) - 1 # Computar para cada instante discreto las salidas # de cada etapa, desde la primera hasta la última # en ese orden. for j in range(2, n + 2): for i, sos in enumerate(iir, start=1): b0, b1, b2, a1, a2, k = sos # Computar la ecuación de diferencias, truncando # y saturando el resultado para ser representado # en punto fijo 1.(`nbits`-1) y[i, j] = np.clip((k * ( b0 * y[i - 1, j] + b1 * y[i - 1, j - 1] + b2 * y[i - 1, j - 2] - a1 * y[i, j - 1] - a2 * y[i, j - 2] )) >> (iir.nbits - 1), -2**(iir.nbits - 1), 2**(iir.nbits - 1) - 1) # Retorna respuesta al impulso renormalizando la salida # al intervalo [-1, 1) # salida del t = ts * np.arange(n) im = y[-1, 2:] / 2**(iir.nbits - 1) return t, im def iir2sos(iir): """ Convierte un filtro digital IIR en punto fijo a su representación como secuencia de secciones de segundo orden en punto flotante (ver `scipy.signal.sos2tf` como referencia). :param iir: filtro digital IIR en punto fijo. :return: filtro digital en representación SOS. """ # Computa el límite númerico de la representación entera signada. n = 2**(iir.nbits - 1) # Escala el filtro digital en punto fijo acorde a la ganancia y # normaliza al intervalo [-1, 1) en punto flotante. return np.array([ (*(sos[-1] * sos[:3] / n), 1., *(sos[-1] * sos[3:5] / n))
""" Genera un filtro digital IIR en punto fijo estable en forma aleatoria. :param nlimit: orden máximo admitido para el filtro. :param nbits: cantidad de bits utilizados para la representación numérica de los coeficientes. :return: filtro digital IIR en punto fijo generado. """ iir = IIR() # Computa el límite númerico de la representación entera signada. n = 2 ** (nbits - 1) # Selecciona el orden del filtro en forma aleatoria # del intervalo [1, nlimit]. order = max(int(random.random() * (nlimit + 1)), 1) # Si el orden es impar se introduce una etapa de primer orden. if order % 2 != 0: # Cero y polo de la etapa se ubican dentro o sobre el # círculo unidad. b0 = n b1 = np.random.randint(-n, n-1) a1 = np.random.randint(-n, n-1) sos = np.array([b0, b1, 0, a1, 0, 1]) # Ajusta la ganancia de la sección para su representación. fitsos(sos, nbits) # Incorpora la etapa al filtro. iir.append(sos) # Introduce N etapas de segundo orden para alcanzar # el orden seleccionado. for _ in range(order // 2): # Ceros y polos de la etapa se ubican dentro del círculo unidad. b0 = n b2 = np.random.randint(-n+1, n-1) a2 = np.random.randint(-n+1, n-1) b1 = np.random.randint(-b2-n, b2+n) a1 = np.random.randint(-a2-n, a2+n) sos = np.array([b0, b1, b2, a1, a2, 1]) # Ajusta la ganancia de la sección para su representación. fitsos(sos, nbits) # Incorpora la etapa al filtro. iir.append(sos) if hasattr(iir, 'nbits'): # Preserva el número de bits en el filtro. iir.nbits = nbits return iir def cxUniformND(iir1, iir2, ndpb): """ Cruza numeradores y denominadores de filtros digitales IIR en punto fijo, potencialmente de distinto orden, produciendo dos descendientes. El orden de las etapas a cruzar es modificado aleatoriamente. Variante de `deap.tools.cxUniform`. :param iir1: primer filtro progenitor. :param iir2: segundo filtro progenitor. :param ndpb: probabilidad de cruza de numerador y/o denominador. """ # Tomando el filtro candidato de menor orden, itera las # secciones de a pares tomados en forma aleatoria. for i, j in zip( random.sample(list(range(len(iir1))), len(iir1)), random.sample(list(range(len(iir2))), len(iir2)) ): # Obtiene las etapas de cada filtro a cruzar. sos1 = iir1[i] sos2 = iir2[j] if random.random() < ndpb: # Cruza los numeradores de las etapas sos1[:3], sos2[:3] = sos2[:3], sos1[:3] if random.random() < ndpb: # Cruza los denominadores de las etapas sos1[3:5], sos2[3:5] = sos2[3:5], sos1[3:5] # Ajusta la ganancia de la primera sección para que los # coeficientes del filtro puedan representarse en punto # fijo para el número de bits del filtro candidato. fitsos(sos1, iir1.nbits) # Ajusta la ganancia de la primera sección para que los # coeficientes del filtro puedan representarse en punto # fijo para el número de bits del filtro candidato. fitsos(sos2, iir2.nbits) return iir1, iir2 def evTimeResponse(iir, target, ts): """ Evalúa la aptitud de la respuesta temporal de un filtro digital IIR en punto fijo según la similitud que su respuesta temporal presenta respecto a la respuesta objetivo. :param iir: filtro digital IIR en punto fijo. :param target: respuesta al impulso objetivo. :param ts: período de muestreo, en segundos. :return: aptitud del filtro provisto. """ # Computa la respuesta al impulso del filtro candidato # en su representación SOS. _, (im,) = signal.dimpulse( (*signal.sos2tf(iir2sos(iir)), ts), n=len(target) ) # Computa el error relativo entre respuesta al impulso # del filtro candidato y respuesta al impulso esperada. et = (im - target) / np.max(np.abs(target)) # Evalua la aptitud del filtro candidato como el recíproco # de la potencia de error relativo. return (1. / (np.mean(et)**2 + np.var(et)),) def mutCoeffGaussian(iir, mu, sigma, indpb): """ Muta los coeficientes de un filtro digital IIR en punto fijo mediante perturbaciones numéricas. Variante de `deap.tools.mutGaussian`. :param mu: media de la distribución gaussiana de la que se toman las perturbaciones a aplicar. :param sigma: desvío estandar de la distribución gaussiana de la que se toman las perturbaciones a aplicar. :param indpb: probabilidad de perturbar un coeficiente. """ # Itera cada sección del filtro. for sos in iir: # Conforma una máscara de los coeficientes de la # sección actual del filtro, según la probabilidad # dada. mask = (np.random.random(len(sos)-1) < indpb) # Perturba los coeficientes a partir de una distribución # normal con media y desvío estándar dados. sos[:-1][mask] += np.random.normal( mu, sigma, np.count_nonzero(mask) ).astype(int) # Ajusta la ganancia de la sección para que los coeficientes # del filtro puedan representarse en punto fijo para el # número de bits del filtro. fitsos(sos, iir.nbits) return iir, def eaSimplePlusElitism(population, toolbox, cxpb, mutpb, eprop, ngen, stats=None, halloffame=None, verbose=__debug__): """ Variante de `deap.algorithms.eaSimple` con una proporción de elitismo. """ logbook = tools.Logbook() logbook.header = ['gen', 'nevals'] + (stats.fields if stats else []) # Evalua los individuos con aptitud inválida. invalid_ind = [ind for ind in population if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit if halloffame is not None: halloffame.update(population) record = stats.compile(population) if stats else {} logbook.record(gen=0, nevals=len(invalid_ind), **record) if verbose: print(logbook.stream) # Comienza el proceso evolutivo. for gen in range(1, ngen + 1): # Seleccina la próxima generación de individuos. offspring = toolbox.select(population, len(population)) # Varia el pool de individuos, aplicando cruza y mutación. offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb) # Evalua los individuos con aptitud inválida. invalid_ind = [ind for ind in offspring if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit # Actualiza el grupo de mejores individuos. if halloffame is not None: halloffame.update(offspring) # Reemplaza la población actual con los mejores del conjunto # compuesta por su descendencia y la elite. elite_count = int(len(population) * eprop) elite = tools.selBest(population, elite_count) population[:] = tools.selBest(offspring + elite, len(population)) # Toma nota de las estadísticas de la generación actual. record = stats.compile(population) if stats else {} logbook.record(gen=gen, nevals=len(invalid_ind), **record) if verbose: print(logbook.stream) return population, logbook def configure_genetic_approx(*, nbits=16, nlimit=8, nsln=10, cxpb=0.7, ndpb=0.5, mutpb=0.2, mutmean=0.0, mutstd=0.3, coeffpb=0.1, tournsize=3, nind=1000, eprop=0.005, ngen=400, verbose=__debug__): """ Configura una función de aproximación de filtros digitales IIR en punto fijo con una dada respuesta al impulso. :param nbits: cantidad de bits utilizados para la representación numérica de los coeficientes la solución. :param nlimit: orden máximo admitido para la solución. :param nsln: cantidad de soluciones a conservar de entre las más aptas. :param cxpb: probabilidad de cruza genética de soluciones. :param ndpb: probabilidad de intercambio de numerador y denominador de soluciones seleccionadas para la cruza. :param mutpb: probabilidad de mutar una solución. :param mutmean: media de perturbaciones utilizadas para la mutación, en el intervalo [-1, 1). :param mutstd: desvío estándar de las perturbaciones utilizadas para la mutación, en el intervalo [-1, 1). :param coeffpb: probabilidad de perturbación de un coeficiente de una solución seleccionada para la mutación. :param tournsize: cantidad de soluciones a someter en cada instancia de torneo de selección. :param nind: cantidad de soluciones en el pool genético a explorar. :param eprop: proporción de elitismo o la cantidad de soluciones de elite respecto al total de soluciones en el pool. :param ngen: cantidad de generaciones a evolucionar. :return: función de aproximación de filtro digital IIR en punto fijo. """ def approx(target, ts, nlimit=nlimit, nsln=nsln): """ Aproxima un filtro digital IIR en punto fijo para que presente la respuesta al impulso objetivo. :param target: respuesta al impulso objetivo. :param ts: período de muestreo, en segundos. :param nlimit: orden máximo admitido para la solución. :param nsln: cantidad de soluciones a conservar de entre las más aptas. :return: filtro digital IIR en punto fijo. """ toolbox = base.Toolbox() toolbox.register( 'individual', genStablePrototype, nlimit=nlimit, nbits=nbits ) toolbox.register( 'population', tools.initRepeat, list, toolbox.individual ) toolbox.register('mate', cxUniformND, ndpb=ndpb) toolbox.register('select', tools.selTournament, tournsize=tournsize) toolbox.register( 'mutate', mutCoeffGaussian, mu=mutmean*2**(nbits-1), sigma=mutstd*2**(nbits-1), indpb=coeffpb ) toolbox.register('evaluate', evTimeResponse, target=target, ts=ts) stats = tools.Statistics( lambda individual: individual.fitness.values ) stats.register('mean_fitness', np.mean) stats.register('fitness_stddev', np.std) stats.register('min_fitness', np.min) stats.register('max_fitness', np.max) hall = tools.HallOfFame( maxsize=nsln, similar=lambda x, y: ( np.all(np.equal(np.shape(x), np.shape(y))) and np.all(np.equal(x, y)) ) ) population = toolbox.population(nind) offspring, logbook = eaSimplePlusElitism( population, toolbox, cxpb=cxpb, mutpb=mutpb, eprop=eprop, ngen=ngen, stats=stats, halloffame=hall, verbose=verbose ) return hall, offspring, logbook return approx if __name__ == '__main__': n = 40 fs = 1000 ts = 1 / fs b_t, a_t = signal.iirdesign( wp=0.3, ws=0.6, gpass=1, gstop=40, ftype='butter', analog=False ) w, h_t = signal.freqz(b_t, a_t) t, (im_t,) = signal.dimpulse((b_t, a_t, ts), n=n) approx = configure_genetic_approx(nbits=16, nlimit=8) best, pool, logbook = approx(im_t, ts) iir_min_err = best[0] print('Minimum error', iir_min_err, len(iir_min_err)) sos_min_err = iir2sos(iir_min_err) _, h_min_err = signal.sosfreqz(sos_min_err, worN=w) _, im_min_err = impulse(iir_min_err, ts, n) iir_min_n = sorted(best, key=lambda iir: len(iir))[0] print('Minimum order', iir_min_n, len(iir_min_n)) sos_min_n = iir2sos(iir_min_n) _, h_min_n = signal.sosfreqz(sos_min_n, worN=w) _, im_min_n = impulse(iir_min_n, ts, n) import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(311) ax.plot(w, 20 * np.log10(abs(h_min_n))) ax.plot(w, 20 * np.log10(abs(h_min_err))) ax.plot(w, 20 * np.log10(abs(h_t))) ax.set_xlabel('Frequency [radians / sample]') ax.set_ylabel('Amplitude [dB]') ax.grid(which='both', axis='both') ax = fig.add_subplot(312) ax.plot(w, np.angle(h_min_n)) ax.plot(w, np.angle(h_min_err)) ax.plot(w, np.angle(h_t)) ax.set_xlabel('Frequency [radians / sample]') ax.set_ylabel('Angle [radians]') ax.grid(which='both', axis='both') ax = fig.add_subplot(313) ax.plot(t, im_min_n) ax.plot(t, im_min_err) ax.plot(t, im_t) ax.set_xlabel('Time [seconds]') ax.set_ylabel('Amplitude [ ]') ax.grid(which='both', axis='both') zeros_min_n, poles_min_n, gain_min_n = signal.sos2zpk(sos_min_n) zeros_min_err, poles_min_err, gain_min_err = signal.sos2zpk(sos_min_err) zeros_o, poles_o, gains_o = [], [], [] for iir in pool: z, p, k = signal.sos2zpk(iir2sos(iir)) zeros_o.extend(z); poles_o.extend(p); gains_o.append(k) zeros_t, poles_t, gain_t = signal.tf2zpk(b_t, a_t) fig = plt.figure() ax = fig.add_subplot(311) ax.scatter(x=np.real(zeros_t), y=np.imag(zeros_t), color='blue') ax.scatter(x=np.real(zeros_min_err), y=np.imag(zeros_min_err), color='red') ax.scatter(x=np.real(zeros_min_n), y=np.imag(zeros_min_n), color='green') ax.scatter(x=np.real(zeros_o), y=np.imag(zeros_o), color='yellow') ax.axis([-2, 2, -2, 2]) ax = fig.add_subplot(312) ax.scatter(x=np.real(poles_t), y=np.imag(poles_t), marker='x', color='blue') ax.scatter(x=np.real(poles_o), y=np.imag(poles_o), color='yellow') ax.scatter(x=np.real(poles_min_err), y=np.imag(poles_min_err), marker='x', color='red') ax.scatter(x=np.real(poles_min_n), y=np.imag(poles_min_n), marker='x', color='green') ax.axis([-2, 2, -2, 2]) ax = fig.add_subplot(313) ax.hist(gains_o, bins=100, density=True, color='yellow') ax.axvline(x=gain_t, color='blue') ax.axvline(x=gain_min_err, color='red') ax.axvline(x=gain_min_n, color='green') plt.show()
for sos in iir ]) def genStablePrototype(nlimit, nbits=32):
random_line_split
main.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License.. #![allow(deprecated)] extern crate sgx_types; extern crate sgx_urts; use sgx_types::*; use sgx_urts::SgxEnclave; extern crate mio; use mio::tcp::TcpStream; use std::os::unix::io::AsRawFd; use std::ffi::CString; use std::net::SocketAddr; use std::str; use std::io::{self, Write}; const BUFFER_SIZE: usize = 1024; static ENCLAVE_FILE: &'static str = "enclave.signed.so"; extern { fn tls_client_new(eid: sgx_enclave_id_t, retval: *mut usize, fd: c_int, hostname: *const c_char, cert: *const c_char) -> sgx_status_t; fn tls_client_read(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize, buf: *mut c_void, cnt: c_int) -> sgx_status_t; fn tls_client_write(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize, buf: *const c_void, cnt: c_int) -> sgx_status_t; fn tls_client_wants_read(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize) -> sgx_status_t; fn tls_client_wants_write(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize) -> sgx_status_t; fn tls_client_close(eid: sgx_enclave_id_t, session_id: usize) -> sgx_status_t; } fn init_enclave() -> SgxResult<SgxEnclave> { let mut launch_token: sgx_launch_token_t = [0; 1024]; let mut launch_token_updated: i32 = 0; // call sgx_create_enclave to initialize an enclave instance // Debug Support: set 2nd parameter to 1 let debug = 1; let mut misc_attr = sgx_misc_attribute_t {secs_attr: sgx_attributes_t { flags:0, xfrm:0}, misc_select:0}; SgxEnclave::create(ENCLAVE_FILE, debug, &mut launch_token, &mut launch_token_updated, &mut misc_attr) } const CLIENT: mio::Token = mio::Token(0); /// This encapsulates the TCP-level connection, some connection /// state, and the underlying TLS-level session. struct TlsClient { enclave_id: sgx_enclave_id_t, socket: TcpStream, closing: bool, tlsclient_id: usize, } impl TlsClient { fn ready(&mut self, poll: &mut mio::Poll, ev: &mio::Event) -> bool { assert_eq!(ev.token(), CLIENT); if ev.readiness().is_error() { println!("Error"); return false; } if ev.readiness().is_readable() { self.do_read(); } if ev.readiness().is_writable() { self.do_write(); } if self.is_closed() { println!("Connection closed"); return false; } self.reregister(poll); true } } impl TlsClient { fn new(enclave_id: sgx_enclave_id_t, sock: TcpStream, hostname: &str, cert: &str) -> Option<TlsClient> { println!("[+] TlsClient new {} {}", hostname, cert); let mut tlsclient_id: usize = 0xFFFF_FFFF_FFFF_FFFF; let c_host = CString::new(hostname.to_string()).unwrap(); let c_cert = CString::new(cert.to_string()).unwrap(); let retval = unsafe { tls_client_new(enclave_id, &mut tlsclient_id, sock.as_raw_fd(), c_host.as_ptr() as *const c_char, c_cert.as_ptr() as *const c_char) }; if retval != sgx_status_t::SGX_SUCCESS { println!("[-] ECALL Enclave [tls_client_new] Failed {}!", retval); return Option::None; } if tlsclient_id == 0xFFFF_FFFF_FFFF_FFFF { println!("[-] New enclave tlsclient error"); return Option::None; } Option::Some( TlsClient { enclave_id: enclave_id, socket: sock, closing: false, tlsclient_id: tlsclient_id, }) } fn close(&self)
fn read_tls(&self, buf: &mut [u8]) -> isize { let mut retval = -1; let result = unsafe { tls_client_read(self.enclave_id, &mut retval, self.tlsclient_id, buf.as_mut_ptr() as * mut c_void, buf.len() as c_int) }; match result { sgx_status_t::SGX_SUCCESS => { retval as isize } _ => { println!("[-] ECALL Enclave [tls_client_read] Failed {}!", result); -1 } } } fn write_tls(&self, buf: &[u8]) -> isize { let mut retval = -1; let result = unsafe { tls_client_write(self.enclave_id, &mut retval, self.tlsclient_id, buf.as_ptr() as * const c_void, buf.len() as c_int) }; match result { sgx_status_t::SGX_SUCCESS => { retval as isize } _ => { println!("[-] ECALL Enclave [tls_client_write] Failed {}!", result); -1 } } } /// We're ready to do a read. fn do_read(&mut self) { // BUFFER_SIZE = 1024, just for test. // Do read all plaintext, you need to do more ecalls to get buffer size and buffer. let mut plaintext = vec![0; BUFFER_SIZE]; let rc = self.read_tls(plaintext.as_mut_slice()); if rc == -1 { println!("TLS read error: {:?}", rc); self.closing = true; return; } plaintext.resize(rc as usize, 0); io::stdout().write_all(&plaintext).unwrap(); } fn do_write(&mut self) { let buf = Vec::new(); self.write_tls(buf.as_slice()); } fn register(&self, poll: &mut mio::Poll) { poll.register(&self.socket, CLIENT, self.ready_interest(), mio::PollOpt::level() | mio::PollOpt::oneshot()) .unwrap(); } fn reregister(&self, poll: &mut mio::Poll) { poll.reregister(&self.socket, CLIENT, self.ready_interest(), mio::PollOpt::level() | mio::PollOpt::oneshot()) .unwrap(); } fn wants_read(&self) -> bool { let mut retval = -1; let result = unsafe { tls_client_wants_read(self.enclave_id, &mut retval, self.tlsclient_id) }; match result { sgx_status_t::SGX_SUCCESS => { }, _ => { println!("[-] ECALL Enclave [tls_client_wants_read] Failed {}!", result); return false; } } match retval { 0 => false, _ => true } } fn wants_write(&self) -> bool { let mut retval = -1; let result = unsafe { tls_client_wants_write(self.enclave_id, &mut retval, self.tlsclient_id) }; match result { sgx_status_t::SGX_SUCCESS => { }, _ => { println!("[-] ECALL Enclave [tls_client_wants_write] Failed {}!", result); return false; } } match retval { 0 => false, _ => true } } // Use wants_read/wants_write to register for different mio-level // IO readiness events. fn ready_interest(&self) -> mio::Ready { let rd = self.wants_read(); let wr = self.wants_write(); if rd && wr { mio::Ready::readable() | mio::Ready::writable() } else if wr { mio::Ready::writable() } else { mio::Ready::readable() } } fn is_closed(&self) -> bool { self.closing } } /// We implement `io::Write` and pass through to the TLS session impl io::Write for TlsClient { fn write(&mut self, bytes: &[u8]) -> io::Result<usize> { Ok(self.write_tls(bytes) as usize) } // unused fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl io::Read for TlsClient { fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> { Ok(self.read_tls(bytes) as usize) } } fn lookup_ipv4(host: &str, port: u16) -> SocketAddr { use std::net::ToSocketAddrs; let addrs = (host, port).to_socket_addrs().unwrap(); for addr in addrs { if let SocketAddr::V4(_) = addr { return addr; } } unreachable!("Cannot lookup address"); } fn main() { let enclave = match init_enclave() { Ok(r) => { println!("[+] Init Enclave Successful {}!", r.geteid()); r }, Err(x) => { println!("[-] Init Enclave Failed {}!", x.as_str()); return; }, }; println!("[+] Test tlsclient in enclave, start!"); let port = 8443; let hostname = "localhost"; let cert = "./ca.cert"; let addr = lookup_ipv4(hostname, port); let sock = TcpStream::connect(&addr).expect("[-] Connect tls server failed!"); let tlsclient = TlsClient::new(enclave.geteid(), sock, hostname, cert); if tlsclient.is_some() { println!("[+] Tlsclient new success!"); let mut tlsclient = tlsclient.unwrap(); let httpreq = format!("GET / HTTP/1.1\r\nHost: {}\r\nConnection: \ close\r\nAccept-Encoding: identity\r\n\r\n", hostname); tlsclient.write_all(httpreq.as_bytes()).unwrap(); let mut poll = mio::Poll::new() .unwrap(); let mut events = mio::Events::with_capacity(32); tlsclient.register(&mut poll); 'outer: loop { poll.poll(&mut events, None).unwrap(); for ev in events.iter() { if !tlsclient.ready(&mut poll, &ev) { tlsclient.close(); break 'outer ; } } } } else { println!("[-] Tlsclient new failed!"); } println!("[+] Test tlsclient in enclave, done!"); enclave.destroy(); }
{ let retval = unsafe { tls_client_close(self.enclave_id, self.tlsclient_id) }; if retval != sgx_status_t::SGX_SUCCESS { println!("[-] ECALL Enclave [tls_client_close] Failed {}!", retval); } }
identifier_body
main.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License.. #![allow(deprecated)] extern crate sgx_types; extern crate sgx_urts; use sgx_types::*; use sgx_urts::SgxEnclave; extern crate mio; use mio::tcp::TcpStream; use std::os::unix::io::AsRawFd; use std::ffi::CString; use std::net::SocketAddr; use std::str; use std::io::{self, Write}; const BUFFER_SIZE: usize = 1024; static ENCLAVE_FILE: &'static str = "enclave.signed.so"; extern { fn tls_client_new(eid: sgx_enclave_id_t, retval: *mut usize, fd: c_int, hostname: *const c_char, cert: *const c_char) -> sgx_status_t; fn tls_client_read(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize, buf: *mut c_void, cnt: c_int) -> sgx_status_t; fn tls_client_write(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize, buf: *const c_void, cnt: c_int) -> sgx_status_t; fn tls_client_wants_read(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize) -> sgx_status_t; fn tls_client_wants_write(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize) -> sgx_status_t; fn tls_client_close(eid: sgx_enclave_id_t, session_id: usize) -> sgx_status_t; } fn init_enclave() -> SgxResult<SgxEnclave> { let mut launch_token: sgx_launch_token_t = [0; 1024]; let mut launch_token_updated: i32 = 0; // call sgx_create_enclave to initialize an enclave instance // Debug Support: set 2nd parameter to 1 let debug = 1; let mut misc_attr = sgx_misc_attribute_t {secs_attr: sgx_attributes_t { flags:0, xfrm:0}, misc_select:0}; SgxEnclave::create(ENCLAVE_FILE, debug, &mut launch_token, &mut launch_token_updated, &mut misc_attr) } const CLIENT: mio::Token = mio::Token(0); /// This encapsulates the TCP-level connection, some connection /// state, and the underlying TLS-level session. struct TlsClient { enclave_id: sgx_enclave_id_t, socket: TcpStream, closing: bool, tlsclient_id: usize, } impl TlsClient { fn ready(&mut self, poll: &mut mio::Poll, ev: &mio::Event) -> bool { assert_eq!(ev.token(), CLIENT); if ev.readiness().is_error() { println!("Error"); return false; } if ev.readiness().is_readable() { self.do_read(); } if ev.readiness().is_writable() { self.do_write(); } if self.is_closed() { println!("Connection closed"); return false; } self.reregister(poll); true } } impl TlsClient { fn new(enclave_id: sgx_enclave_id_t, sock: TcpStream, hostname: &str, cert: &str) -> Option<TlsClient> { println!("[+] TlsClient new {} {}", hostname, cert); let mut tlsclient_id: usize = 0xFFFF_FFFF_FFFF_FFFF; let c_host = CString::new(hostname.to_string()).unwrap(); let c_cert = CString::new(cert.to_string()).unwrap(); let retval = unsafe { tls_client_new(enclave_id, &mut tlsclient_id, sock.as_raw_fd(), c_host.as_ptr() as *const c_char, c_cert.as_ptr() as *const c_char) }; if retval != sgx_status_t::SGX_SUCCESS { println!("[-] ECALL Enclave [tls_client_new] Failed {}!", retval); return Option::None; } if tlsclient_id == 0xFFFF_FFFF_FFFF_FFFF { println!("[-] New enclave tlsclient error"); return Option::None; } Option::Some( TlsClient { enclave_id: enclave_id, socket: sock, closing: false, tlsclient_id: tlsclient_id, }) } fn close(&self) { let retval = unsafe { tls_client_close(self.enclave_id, self.tlsclient_id) }; if retval != sgx_status_t::SGX_SUCCESS { println!("[-] ECALL Enclave [tls_client_close] Failed {}!", retval); } } fn read_tls(&self, buf: &mut [u8]) -> isize { let mut retval = -1; let result = unsafe { tls_client_read(self.enclave_id, &mut retval, self.tlsclient_id, buf.as_mut_ptr() as * mut c_void, buf.len() as c_int) }; match result { sgx_status_t::SGX_SUCCESS => { retval as isize } _ => { println!("[-] ECALL Enclave [tls_client_read] Failed {}!", result); -1 } } } fn write_tls(&self, buf: &[u8]) -> isize { let mut retval = -1; let result = unsafe { tls_client_write(self.enclave_id, &mut retval, self.tlsclient_id, buf.as_ptr() as * const c_void, buf.len() as c_int) }; match result { sgx_status_t::SGX_SUCCESS => { retval as isize } _ => { println!("[-] ECALL Enclave [tls_client_write] Failed {}!", result); -1 } } } /// We're ready to do a read. fn do_read(&mut self) { // BUFFER_SIZE = 1024, just for test. // Do read all plaintext, you need to do more ecalls to get buffer size and buffer. let mut plaintext = vec![0; BUFFER_SIZE]; let rc = self.read_tls(plaintext.as_mut_slice()); if rc == -1 { println!("TLS read error: {:?}", rc); self.closing = true; return; } plaintext.resize(rc as usize, 0); io::stdout().write_all(&plaintext).unwrap(); } fn do_write(&mut self) { let buf = Vec::new(); self.write_tls(buf.as_slice()); } fn register(&self, poll: &mut mio::Poll) { poll.register(&self.socket, CLIENT, self.ready_interest(), mio::PollOpt::level() | mio::PollOpt::oneshot()) .unwrap(); } fn reregister(&self, poll: &mut mio::Poll) { poll.reregister(&self.socket, CLIENT, self.ready_interest(), mio::PollOpt::level() | mio::PollOpt::oneshot()) .unwrap(); } fn
(&self) -> bool { let mut retval = -1; let result = unsafe { tls_client_wants_read(self.enclave_id, &mut retval, self.tlsclient_id) }; match result { sgx_status_t::SGX_SUCCESS => { }, _ => { println!("[-] ECALL Enclave [tls_client_wants_read] Failed {}!", result); return false; } } match retval { 0 => false, _ => true } } fn wants_write(&self) -> bool { let mut retval = -1; let result = unsafe { tls_client_wants_write(self.enclave_id, &mut retval, self.tlsclient_id) }; match result { sgx_status_t::SGX_SUCCESS => { }, _ => { println!("[-] ECALL Enclave [tls_client_wants_write] Failed {}!", result); return false; } } match retval { 0 => false, _ => true } } // Use wants_read/wants_write to register for different mio-level // IO readiness events. fn ready_interest(&self) -> mio::Ready { let rd = self.wants_read(); let wr = self.wants_write(); if rd && wr { mio::Ready::readable() | mio::Ready::writable() } else if wr { mio::Ready::writable() } else { mio::Ready::readable() } } fn is_closed(&self) -> bool { self.closing } } /// We implement `io::Write` and pass through to the TLS session impl io::Write for TlsClient { fn write(&mut self, bytes: &[u8]) -> io::Result<usize> { Ok(self.write_tls(bytes) as usize) } // unused fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl io::Read for TlsClient { fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> { Ok(self.read_tls(bytes) as usize) } } fn lookup_ipv4(host: &str, port: u16) -> SocketAddr { use std::net::ToSocketAddrs; let addrs = (host, port).to_socket_addrs().unwrap(); for addr in addrs { if let SocketAddr::V4(_) = addr { return addr; } } unreachable!("Cannot lookup address"); } fn main() { let enclave = match init_enclave() { Ok(r) => { println!("[+] Init Enclave Successful {}!", r.geteid()); r }, Err(x) => { println!("[-] Init Enclave Failed {}!", x.as_str()); return; }, }; println!("[+] Test tlsclient in enclave, start!"); let port = 8443; let hostname = "localhost"; let cert = "./ca.cert"; let addr = lookup_ipv4(hostname, port); let sock = TcpStream::connect(&addr).expect("[-] Connect tls server failed!"); let tlsclient = TlsClient::new(enclave.geteid(), sock, hostname, cert); if tlsclient.is_some() { println!("[+] Tlsclient new success!"); let mut tlsclient = tlsclient.unwrap(); let httpreq = format!("GET / HTTP/1.1\r\nHost: {}\r\nConnection: \ close\r\nAccept-Encoding: identity\r\n\r\n", hostname); tlsclient.write_all(httpreq.as_bytes()).unwrap(); let mut poll = mio::Poll::new() .unwrap(); let mut events = mio::Events::with_capacity(32); tlsclient.register(&mut poll); 'outer: loop { poll.poll(&mut events, None).unwrap(); for ev in events.iter() { if !tlsclient.ready(&mut poll, &ev) { tlsclient.close(); break 'outer ; } } } } else { println!("[-] Tlsclient new failed!"); } println!("[+] Test tlsclient in enclave, done!"); enclave.destroy(); }
wants_read
identifier_name
main.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License.. #![allow(deprecated)] extern crate sgx_types; extern crate sgx_urts; use sgx_types::*; use sgx_urts::SgxEnclave; extern crate mio; use mio::tcp::TcpStream; use std::os::unix::io::AsRawFd; use std::ffi::CString; use std::net::SocketAddr; use std::str; use std::io::{self, Write}; const BUFFER_SIZE: usize = 1024; static ENCLAVE_FILE: &'static str = "enclave.signed.so"; extern { fn tls_client_new(eid: sgx_enclave_id_t, retval: *mut usize, fd: c_int, hostname: *const c_char, cert: *const c_char) -> sgx_status_t; fn tls_client_read(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize, buf: *mut c_void, cnt: c_int) -> sgx_status_t; fn tls_client_write(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize, buf: *const c_void, cnt: c_int) -> sgx_status_t; fn tls_client_wants_read(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize) -> sgx_status_t; fn tls_client_wants_write(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize) -> sgx_status_t; fn tls_client_close(eid: sgx_enclave_id_t, session_id: usize) -> sgx_status_t; } fn init_enclave() -> SgxResult<SgxEnclave> { let mut launch_token: sgx_launch_token_t = [0; 1024]; let mut launch_token_updated: i32 = 0; // call sgx_create_enclave to initialize an enclave instance // Debug Support: set 2nd parameter to 1 let debug = 1; let mut misc_attr = sgx_misc_attribute_t {secs_attr: sgx_attributes_t { flags:0, xfrm:0}, misc_select:0}; SgxEnclave::create(ENCLAVE_FILE, debug, &mut launch_token, &mut launch_token_updated, &mut misc_attr) } const CLIENT: mio::Token = mio::Token(0); /// This encapsulates the TCP-level connection, some connection /// state, and the underlying TLS-level session. struct TlsClient { enclave_id: sgx_enclave_id_t, socket: TcpStream, closing: bool, tlsclient_id: usize, } impl TlsClient { fn ready(&mut self, poll: &mut mio::Poll, ev: &mio::Event) -> bool { assert_eq!(ev.token(), CLIENT); if ev.readiness().is_error() { println!("Error"); return false; } if ev.readiness().is_readable() { self.do_read(); } if ev.readiness().is_writable()
if self.is_closed() { println!("Connection closed"); return false; } self.reregister(poll); true } } impl TlsClient { fn new(enclave_id: sgx_enclave_id_t, sock: TcpStream, hostname: &str, cert: &str) -> Option<TlsClient> { println!("[+] TlsClient new {} {}", hostname, cert); let mut tlsclient_id: usize = 0xFFFF_FFFF_FFFF_FFFF; let c_host = CString::new(hostname.to_string()).unwrap(); let c_cert = CString::new(cert.to_string()).unwrap(); let retval = unsafe { tls_client_new(enclave_id, &mut tlsclient_id, sock.as_raw_fd(), c_host.as_ptr() as *const c_char, c_cert.as_ptr() as *const c_char) }; if retval != sgx_status_t::SGX_SUCCESS { println!("[-] ECALL Enclave [tls_client_new] Failed {}!", retval); return Option::None; } if tlsclient_id == 0xFFFF_FFFF_FFFF_FFFF { println!("[-] New enclave tlsclient error"); return Option::None; } Option::Some( TlsClient { enclave_id: enclave_id, socket: sock, closing: false, tlsclient_id: tlsclient_id, }) } fn close(&self) { let retval = unsafe { tls_client_close(self.enclave_id, self.tlsclient_id) }; if retval != sgx_status_t::SGX_SUCCESS { println!("[-] ECALL Enclave [tls_client_close] Failed {}!", retval); } } fn read_tls(&self, buf: &mut [u8]) -> isize { let mut retval = -1; let result = unsafe { tls_client_read(self.enclave_id, &mut retval, self.tlsclient_id, buf.as_mut_ptr() as * mut c_void, buf.len() as c_int) }; match result { sgx_status_t::SGX_SUCCESS => { retval as isize } _ => { println!("[-] ECALL Enclave [tls_client_read] Failed {}!", result); -1 } } } fn write_tls(&self, buf: &[u8]) -> isize { let mut retval = -1; let result = unsafe { tls_client_write(self.enclave_id, &mut retval, self.tlsclient_id, buf.as_ptr() as * const c_void, buf.len() as c_int) }; match result { sgx_status_t::SGX_SUCCESS => { retval as isize } _ => { println!("[-] ECALL Enclave [tls_client_write] Failed {}!", result); -1 } } } /// We're ready to do a read. fn do_read(&mut self) { // BUFFER_SIZE = 1024, just for test. // Do read all plaintext, you need to do more ecalls to get buffer size and buffer. let mut plaintext = vec![0; BUFFER_SIZE]; let rc = self.read_tls(plaintext.as_mut_slice()); if rc == -1 { println!("TLS read error: {:?}", rc); self.closing = true; return; } plaintext.resize(rc as usize, 0); io::stdout().write_all(&plaintext).unwrap(); } fn do_write(&mut self) { let buf = Vec::new(); self.write_tls(buf.as_slice()); } fn register(&self, poll: &mut mio::Poll) { poll.register(&self.socket, CLIENT, self.ready_interest(), mio::PollOpt::level() | mio::PollOpt::oneshot()) .unwrap(); } fn reregister(&self, poll: &mut mio::Poll) { poll.reregister(&self.socket, CLIENT, self.ready_interest(), mio::PollOpt::level() | mio::PollOpt::oneshot()) .unwrap(); } fn wants_read(&self) -> bool { let mut retval = -1; let result = unsafe { tls_client_wants_read(self.enclave_id, &mut retval, self.tlsclient_id) }; match result { sgx_status_t::SGX_SUCCESS => { }, _ => { println!("[-] ECALL Enclave [tls_client_wants_read] Failed {}!", result); return false; } } match retval { 0 => false, _ => true } } fn wants_write(&self) -> bool { let mut retval = -1; let result = unsafe { tls_client_wants_write(self.enclave_id, &mut retval, self.tlsclient_id) }; match result { sgx_status_t::SGX_SUCCESS => { }, _ => { println!("[-] ECALL Enclave [tls_client_wants_write] Failed {}!", result); return false; } } match retval { 0 => false, _ => true } } // Use wants_read/wants_write to register for different mio-level // IO readiness events. fn ready_interest(&self) -> mio::Ready { let rd = self.wants_read(); let wr = self.wants_write(); if rd && wr { mio::Ready::readable() | mio::Ready::writable() } else if wr { mio::Ready::writable() } else { mio::Ready::readable() } } fn is_closed(&self) -> bool { self.closing } } /// We implement `io::Write` and pass through to the TLS session impl io::Write for TlsClient { fn write(&mut self, bytes: &[u8]) -> io::Result<usize> { Ok(self.write_tls(bytes) as usize) } // unused fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl io::Read for TlsClient { fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> { Ok(self.read_tls(bytes) as usize) } } fn lookup_ipv4(host: &str, port: u16) -> SocketAddr { use std::net::ToSocketAddrs; let addrs = (host, port).to_socket_addrs().unwrap(); for addr in addrs { if let SocketAddr::V4(_) = addr { return addr; } } unreachable!("Cannot lookup address"); } fn main() { let enclave = match init_enclave() { Ok(r) => { println!("[+] Init Enclave Successful {}!", r.geteid()); r }, Err(x) => { println!("[-] Init Enclave Failed {}!", x.as_str()); return; }, }; println!("[+] Test tlsclient in enclave, start!"); let port = 8443; let hostname = "localhost"; let cert = "./ca.cert"; let addr = lookup_ipv4(hostname, port); let sock = TcpStream::connect(&addr).expect("[-] Connect tls server failed!"); let tlsclient = TlsClient::new(enclave.geteid(), sock, hostname, cert); if tlsclient.is_some() { println!("[+] Tlsclient new success!"); let mut tlsclient = tlsclient.unwrap(); let httpreq = format!("GET / HTTP/1.1\r\nHost: {}\r\nConnection: \ close\r\nAccept-Encoding: identity\r\n\r\n", hostname); tlsclient.write_all(httpreq.as_bytes()).unwrap(); let mut poll = mio::Poll::new() .unwrap(); let mut events = mio::Events::with_capacity(32); tlsclient.register(&mut poll); 'outer: loop { poll.poll(&mut events, None).unwrap(); for ev in events.iter() { if !tlsclient.ready(&mut poll, &ev) { tlsclient.close(); break 'outer ; } } } } else { println!("[-] Tlsclient new failed!"); } println!("[+] Test tlsclient in enclave, done!"); enclave.destroy(); }
{ self.do_write(); }
conditional_block
main.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License.. #![allow(deprecated)] extern crate sgx_types; extern crate sgx_urts; use sgx_types::*; use sgx_urts::SgxEnclave; extern crate mio; use mio::tcp::TcpStream; use std::os::unix::io::AsRawFd; use std::ffi::CString; use std::net::SocketAddr; use std::str; use std::io::{self, Write}; const BUFFER_SIZE: usize = 1024; static ENCLAVE_FILE: &'static str = "enclave.signed.so"; extern { fn tls_client_new(eid: sgx_enclave_id_t, retval: *mut usize, fd: c_int, hostname: *const c_char, cert: *const c_char) -> sgx_status_t; fn tls_client_read(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize, buf: *mut c_void, cnt: c_int) -> sgx_status_t; fn tls_client_write(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize, buf: *const c_void, cnt: c_int) -> sgx_status_t; fn tls_client_wants_read(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize) -> sgx_status_t; fn tls_client_wants_write(eid: sgx_enclave_id_t, retval: *mut c_int, session_id: usize) -> sgx_status_t; fn tls_client_close(eid: sgx_enclave_id_t, session_id: usize) -> sgx_status_t; } fn init_enclave() -> SgxResult<SgxEnclave> { let mut launch_token: sgx_launch_token_t = [0; 1024]; let mut launch_token_updated: i32 = 0; // call sgx_create_enclave to initialize an enclave instance // Debug Support: set 2nd parameter to 1 let debug = 1; let mut misc_attr = sgx_misc_attribute_t {secs_attr: sgx_attributes_t { flags:0, xfrm:0}, misc_select:0}; SgxEnclave::create(ENCLAVE_FILE, debug, &mut launch_token, &mut launch_token_updated, &mut misc_attr) } const CLIENT: mio::Token = mio::Token(0); /// This encapsulates the TCP-level connection, some connection /// state, and the underlying TLS-level session. struct TlsClient { enclave_id: sgx_enclave_id_t, socket: TcpStream, closing: bool, tlsclient_id: usize, } impl TlsClient { fn ready(&mut self, poll: &mut mio::Poll, ev: &mio::Event) -> bool { assert_eq!(ev.token(), CLIENT); if ev.readiness().is_error() { println!("Error"); return false; } if ev.readiness().is_readable() { self.do_read(); } if ev.readiness().is_writable() { self.do_write(); } if self.is_closed() { println!("Connection closed"); return false; } self.reregister(poll); true } } impl TlsClient { fn new(enclave_id: sgx_enclave_id_t, sock: TcpStream, hostname: &str, cert: &str) -> Option<TlsClient> { println!("[+] TlsClient new {} {}", hostname, cert); let mut tlsclient_id: usize = 0xFFFF_FFFF_FFFF_FFFF; let c_host = CString::new(hostname.to_string()).unwrap(); let c_cert = CString::new(cert.to_string()).unwrap(); let retval = unsafe { tls_client_new(enclave_id, &mut tlsclient_id, sock.as_raw_fd(), c_host.as_ptr() as *const c_char, c_cert.as_ptr() as *const c_char) }; if retval != sgx_status_t::SGX_SUCCESS { println!("[-] ECALL Enclave [tls_client_new] Failed {}!", retval); return Option::None; } if tlsclient_id == 0xFFFF_FFFF_FFFF_FFFF { println!("[-] New enclave tlsclient error"); return Option::None; } Option::Some( TlsClient { enclave_id: enclave_id, socket: sock, closing: false, tlsclient_id: tlsclient_id, }) } fn close(&self) { let retval = unsafe { tls_client_close(self.enclave_id, self.tlsclient_id) }; if retval != sgx_status_t::SGX_SUCCESS { println!("[-] ECALL Enclave [tls_client_close] Failed {}!", retval); } } fn read_tls(&self, buf: &mut [u8]) -> isize { let mut retval = -1; let result = unsafe { tls_client_read(self.enclave_id, &mut retval, self.tlsclient_id, buf.as_mut_ptr() as * mut c_void, buf.len() as c_int) }; match result { sgx_status_t::SGX_SUCCESS => { retval as isize } _ => { println!("[-] ECALL Enclave [tls_client_read] Failed {}!", result); -1 }
let result = unsafe { tls_client_write(self.enclave_id, &mut retval, self.tlsclient_id, buf.as_ptr() as * const c_void, buf.len() as c_int) }; match result { sgx_status_t::SGX_SUCCESS => { retval as isize } _ => { println!("[-] ECALL Enclave [tls_client_write] Failed {}!", result); -1 } } } /// We're ready to do a read. fn do_read(&mut self) { // BUFFER_SIZE = 1024, just for test. // Do read all plaintext, you need to do more ecalls to get buffer size and buffer. let mut plaintext = vec![0; BUFFER_SIZE]; let rc = self.read_tls(plaintext.as_mut_slice()); if rc == -1 { println!("TLS read error: {:?}", rc); self.closing = true; return; } plaintext.resize(rc as usize, 0); io::stdout().write_all(&plaintext).unwrap(); } fn do_write(&mut self) { let buf = Vec::new(); self.write_tls(buf.as_slice()); } fn register(&self, poll: &mut mio::Poll) { poll.register(&self.socket, CLIENT, self.ready_interest(), mio::PollOpt::level() | mio::PollOpt::oneshot()) .unwrap(); } fn reregister(&self, poll: &mut mio::Poll) { poll.reregister(&self.socket, CLIENT, self.ready_interest(), mio::PollOpt::level() | mio::PollOpt::oneshot()) .unwrap(); } fn wants_read(&self) -> bool { let mut retval = -1; let result = unsafe { tls_client_wants_read(self.enclave_id, &mut retval, self.tlsclient_id) }; match result { sgx_status_t::SGX_SUCCESS => { }, _ => { println!("[-] ECALL Enclave [tls_client_wants_read] Failed {}!", result); return false; } } match retval { 0 => false, _ => true } } fn wants_write(&self) -> bool { let mut retval = -1; let result = unsafe { tls_client_wants_write(self.enclave_id, &mut retval, self.tlsclient_id) }; match result { sgx_status_t::SGX_SUCCESS => { }, _ => { println!("[-] ECALL Enclave [tls_client_wants_write] Failed {}!", result); return false; } } match retval { 0 => false, _ => true } } // Use wants_read/wants_write to register for different mio-level // IO readiness events. fn ready_interest(&self) -> mio::Ready { let rd = self.wants_read(); let wr = self.wants_write(); if rd && wr { mio::Ready::readable() | mio::Ready::writable() } else if wr { mio::Ready::writable() } else { mio::Ready::readable() } } fn is_closed(&self) -> bool { self.closing } } /// We implement `io::Write` and pass through to the TLS session impl io::Write for TlsClient { fn write(&mut self, bytes: &[u8]) -> io::Result<usize> { Ok(self.write_tls(bytes) as usize) } // unused fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl io::Read for TlsClient { fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> { Ok(self.read_tls(bytes) as usize) } } fn lookup_ipv4(host: &str, port: u16) -> SocketAddr { use std::net::ToSocketAddrs; let addrs = (host, port).to_socket_addrs().unwrap(); for addr in addrs { if let SocketAddr::V4(_) = addr { return addr; } } unreachable!("Cannot lookup address"); } fn main() { let enclave = match init_enclave() { Ok(r) => { println!("[+] Init Enclave Successful {}!", r.geteid()); r }, Err(x) => { println!("[-] Init Enclave Failed {}!", x.as_str()); return; }, }; println!("[+] Test tlsclient in enclave, start!"); let port = 8443; let hostname = "localhost"; let cert = "./ca.cert"; let addr = lookup_ipv4(hostname, port); let sock = TcpStream::connect(&addr).expect("[-] Connect tls server failed!"); let tlsclient = TlsClient::new(enclave.geteid(), sock, hostname, cert); if tlsclient.is_some() { println!("[+] Tlsclient new success!"); let mut tlsclient = tlsclient.unwrap(); let httpreq = format!("GET / HTTP/1.1\r\nHost: {}\r\nConnection: \ close\r\nAccept-Encoding: identity\r\n\r\n", hostname); tlsclient.write_all(httpreq.as_bytes()).unwrap(); let mut poll = mio::Poll::new() .unwrap(); let mut events = mio::Events::with_capacity(32); tlsclient.register(&mut poll); 'outer: loop { poll.poll(&mut events, None).unwrap(); for ev in events.iter() { if !tlsclient.ready(&mut poll, &ev) { tlsclient.close(); break 'outer ; } } } } else { println!("[-] Tlsclient new failed!"); } println!("[+] Test tlsclient in enclave, done!"); enclave.destroy(); }
} } fn write_tls(&self, buf: &[u8]) -> isize { let mut retval = -1;
random_line_split
mod.rs
// MIT/Apache2 License //! This module defines the `Display` object, which acts as a connection to the X11 server, and the //! `Connection` trait, which the `Display` object abstracts itself over. See the documentation for //! these objects for more information. use crate::{ auth_info::AuthInfo, auto::{ xproto::{Colormap, Screen, Setup, SetupRequest, Visualid, Visualtype, Window}, AsByteSequence, }, event::Event, util::cycled_zeroes, xid::XidGenerator, Request, XID, }; use alloc::{boxed::Box, collections::VecDeque}; use core::{fmt, iter, marker::PhantomData, mem, num::NonZeroU32}; use cty::c_int; use hashbrown::HashMap; use tinyvec::TinyVec; #[cfg(feature = "std")] use std::borrow::Cow; #[cfg(feature = "async")] use std::{future::Future, pin::Pin}; mod connection; pub use connection::*; #[cfg(feature = "std")] pub mod name; mod functions; mod input; mod output; pub use functions::*; pub(crate) const EXT_KEY_SIZE: usize = 24; /// The connection to the X11 server. Most operations done in breadx revolve around this object /// in some way, shape or form. /// /// Internally, this acts as a layer of abstraction over the inner `Conn` object that keeps track /// of the setup, outgoing and pending requests and replies, the event queue, et cetera. Orthodoxically, /// X11 usually takes place over a TCP stream or a Unix socket connection; however, `Display` is able /// to use any object implementing the `Connection` trait as a vehicle for the X11 protocol. /// /// Upon its instantiation, the `Display` sends bytes to the server requesting the setup information, and /// then stores it for later use. Afterwards, it awaits commands from the programmer to send requests, /// receive replies or process events. /// /// # Example /// /// Open a connection to the X11 server and get the screen resolution. /// /// ```rust,no_run /// use breadx::DisplayConnection; /// /// let mut conn = DisplayConnection::create(None, None).unwrap(); /// /// let default_screen = conn.default_screen(); /// println!("Default screen is {} x {}", default_screen.width_in_pixels, default_screen.height_in_pixels); /// ``` pub struct Display<Conn> { // the connection to the server pub(crate) connection: Conn, // the setup received from the server pub(crate) setup: Setup, // xid generator xid: XidGenerator, // the screen to be used by default default_screen: usize, // input variables pub(crate) event_queue: VecDeque<Event>, pub(crate) pending_requests: VecDeque<input::PendingRequest>, pub(crate) pending_replies: HashMap<u16, Box<[u8]>>, // output variables request_number: u64, // store the interned atoms pub(crate) wm_protocols_atom: Option<NonZeroU32>, // context db // context: HashMap<(XID, ContextID), NonNull<c_void>>, // hashmap linking extension names to major opcodes // we use byte arrays instead of static string pointers // here because cache locality leads to an overall speedup (todo: verify) extensions: HashMap<[u8; EXT_KEY_SIZE], u8>, } /// Unique identifier for a context. pub type ContextID = c_int; /// A cookie for a request. /// /// Requests usually take time to resolve into replies. Therefore, the `Display::send_request` method returns /// the `RequestCookie`, which is later used to block (or await) for the request's eventual result. #[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Default, Eq, Hash)] #[repr(transparent)] pub struct RequestCookie<R: Request> { sequence: u16, _phantom: PhantomData<Option<R::Reply>>, } impl<R: Request> RequestCookie<R> { #[inline] pub(crate) fn from_sequence(sequence: u64) -> Self { Self { sequence: sequence as u16, // truncate to lower bits _phantom: PhantomData, } } } #[derive(Default, Debug)] pub(crate) struct PendingRequestFlags { pub discard_reply: bool, pub checked: bool, } impl<Conn: fmt::Debug> fmt::Debug for Display<Conn> { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Display") .field("connection", &self.connection) .field("setup", &self.setup) .field("xid", &self.xid) .field("default_screen", &self.default_screen) .field("event_queue", &self.event_queue) .field("pending_requests", &self.pending_requests) .field("pending_replies", &self.pending_replies) .field("request_number", &self.request_number) .finish() } } #[inline] const fn endian_byte() -> u8 { // Excerpt from the X Window System Protocol // // The client must send an initial byte of data to identify the byte order to be employed. // The value of the byte must be octal 102 or 154. The value 102 (ASCII uppercase B) means // values are transmitted most significant byte first, and value 154 (ASCII lowercase l) // means values are transmitted least significant byte first. #[cfg(not(target_endian = "little"))] { const BE_SIGNIFIER: u8 = b'B'; BE_SIGNIFIER } #[cfg(target_endian = "little")] { const LE_SIGNIFIER: u8 = b'l'; LE_SIGNIFIER } } impl<Conn: Connection> Display<Conn> { #[inline] fn decode_reply<R: Request>(reply: Box<[u8]>) -> crate::Result<R::Reply> { Ok(R::Reply::from_bytes(&reply) .ok_or(crate::BreadError::BadObjectRead(None))? .0) } /// Send a request object to the X11 server. /// /// Given a request object, this function sends it across the connection to the X11 server and returns /// a cookie used to determine when this request will resolve. Usually, the `Display` object has functions /// that act as a wrapper around this object; however, if you'd like to circumvent those, this is usually /// the best option. #[inline] pub fn send_request<R: Request>(&mut self, req: R) -> crate::Result<RequestCookie<R>> { self.send_request_internal(req) } /// Wait for a request from the X11 server. /// /// This function checks the `Display`'s queues to see if a reply matching the given `RequestCookie` /// has been processed by the X11 server. If not, it polls the server for new events until it has /// determined that the request has resolved. #[inline] pub fn resolve_request<R: Request>( &mut self, token: RequestCookie<R>, ) -> crate::Result<R::Reply> where R::Reply: Default, { if mem::size_of::<R::Reply>() == 0 { log::debug!("Immediately resolving for reply of size 0"); return Ok(Default::default()); } loop { log::trace!("Current replies: {:?}", &self.pending_replies); match self.pending_replies.remove(&token.sequence) { Some(reply) => break Self::decode_reply::<R>(reply), None => self.wait()?, } } } /// Send a request object to the X11 server, async redox. See the `send_request` function for more /// information. #[cfg(feature = "async")] #[inline] pub fn send_request_async<'future, R: Request + Send + 'future>( &'future mut self, req: R, ) -> Pin<Box<dyn Future<Output = crate::Result<RequestCookie<R>>> + Send + 'future>> { Box::pin(self.send_request_internal_async(req)) } /// Wait for a request from the X11 server, async redox. See the `resolve_request` function for more /// information. #[cfg(feature = "async")] #[inline] pub async fn resolve_request_async<R: Request>( &mut self, token: RequestCookie<R>, ) -> crate::Result<R::Reply> where R::Reply: Default, { if mem::size_of::<R::Reply>() == 0 { return Ok(Default::default()); } loop { match self.pending_replies.remove(&token.sequence) { Some(reply) => { break Self::decode_reply::<R>(reply); } None => self.wait_async().await?, } } } #[inline] fn from_connection_internal(connection: Conn) -> Self { Self { connection, setup: Default::default(), xid: Default::default(), default_screen: 0, event_queue: VecDeque::with_capacity(8), pending_requests: VecDeque::new(), pending_replies: HashMap::with_capacity(4), request_number: 1, wm_protocols_atom: None, // context: HashMap::new(), extensions: HashMap::with_capacity(8), } } /// Creates a new `Display` from a connection and authentication info. /// /// It is expected that the connection passed in has not had any information sent into it aside from /// what is necessary for the underlying protocol. After the object is created, the `Display` will poll /// the server for setup information. #[inline] pub fn from_connection(connection: Conn, auth: Option<AuthInfo>) -> crate::Result<Self> { let mut d = Self::from_connection_internal(connection); d.init(auth)?; Ok(d) } /// Creates a new `Display` from a connection and authentication info, async redox. See the `from_connection` /// function for more information. #[cfg(feature = "async")] #[inline] pub async fn from_connection_async( connection: Conn, auth: Option<AuthInfo>, ) -> crate::Result<Self> { let mut d = Self::from_connection_internal(connection); d.init_async(auth).await?; Ok(d) } /// Generate the setup from the authentication info. #[inline] fn create_setup(auth: AuthInfo) -> SetupRequest { let AuthInfo { name, data, .. } = auth; SetupRequest { byte_order: endian_byte(), protocol_major_version: 11, protocol_minor_version: 0, authorization_protocol_name: name, authorization_protocol_data: data, } } /// Initialize the setup. #[inline] fn init(&mut self, auth: Option<AuthInfo>) -> crate::Result { let setup = Self::create_setup(match auth { Some(auth) => auth, None => AuthInfo::get(), }); let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(setup.size()); let len = setup.as_bytes(&mut bytes); bytes.truncate(len); self.connection.send_packet(&bytes[0..len])?; let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(8); self.connection.read_packet(&mut bytes)?; match bytes[0] { 0 => return Err(crate::BreadError::FailedToConnect), 2 => return Err(crate::BreadError::FailedToAuthorize), _ => (), } // read in the rest of the bytes let length_bytes: [u8; 2] = [bytes[6], bytes[7]]; let length = (u16::from_ne_bytes(length_bytes) as usize) * 4; bytes.extend(iter::once(0).cycle().take(length)); self.connection.read_packet(&mut bytes[8..])?; let (setup, _) = Setup::from_bytes(&bytes).ok_or(crate::BreadError::BadObjectRead(Some("Setup")))?; self.setup = setup; self.xid = XidGenerator::new(self.setup.resource_id_base, self.setup.resource_id_mask); log::debug!("resource_id_base is {:#032b}", self.setup.resource_id_base); log::debug!("resource_id_mask is {:#032b}", self.setup.resource_id_mask); log::debug!( "resource_id inc. is {:#032b}", self.setup.resource_id_mask & self.setup.resource_id_mask.wrapping_neg() ); Ok(()) } /// Initialize the setup, async redox. /// /// TODO; lots of copy-pasted code, redo this at some point #[cfg(feature = "async")] #[inline] async fn init_async(&mut self, auth: Option<AuthInfo>) -> crate::Result { let setup = Self::create_setup(match auth { Some(auth) => auth, None => AuthInfo::get_async().await, }); let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(setup.size()); let len = setup.as_bytes(&mut bytes); bytes.truncate(len); self.connection.send_packet_async(&bytes[0..len]).await?; let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(8); self.connection.read_packet_async(&mut bytes).await?; match bytes[0] { 0 => return Err(crate::BreadError::FailedToConnect), 2 => return Err(crate::BreadError::FailedToAuthorize), _ => (), } // read in the rest of the bytes let length_bytes: [u8; 2] = [bytes[6], bytes[7]]; let length = (u16::from_ne_bytes(length_bytes) as usize) * 4; bytes.extend(iter::once(0).cycle().take(length)); self.connection.read_packet_async(&mut bytes[8..]).await?; let (setup, _) = Setup::from_bytes(&bytes) .ok_or_else(|| crate::BreadError::BadObjectRead(Some("Setup")))?; self.setup = setup; self.xid = XidGenerator::new(self.setup.resource_id_base, self.setup.resource_id_mask); log::debug!("resource_id_base is {:#032b}", self.setup.resource_id_base); log::debug!("resource_id_mask is {:#032b}", self.setup.resource_id_mask); log::debug!( "resource_id inc. is {:#032b}", self.setup.resource_id_mask & self.setup.resource_id_mask.wrapping_neg() ); Ok(()) } /// Get the setup associates with this display. #[inline] pub fn setup(&self) -> &Setup { &self.setup } #[inline] pub fn default_root(&self) -> Window { self.default_screen().root } #[inline] pub fn default_screen(&self) -> &Screen { &self.setup.roots[self.default_screen] } #[inline] pub fn default_white_pixel(&self) -> u32 { self.default_screen().white_pixel } #[inline] pub fn default_black_pixel(&self) -> u32 { self.default_screen().black_pixel } #[inline] pub fn default_visual_id(&self) -> Visualid { self.default_screen().root_visual } #[inline] pub fn default_visual(&self) -> &Visualtype { self.visual_id_to_visual(self.default_visual_id()).unwrap() } #[inline] pub fn default_colormap(&self) -> Colormap { self.default_screen().default_colormap } /// Get a visual type from a visual ID. #[inline] pub fn visual_id_to_visual(&self, id: Visualid) -> Option<&Visualtype> { self.setup .roots .iter() .flat_map(|s| s.allowed_depths.iter()) .flat_map(|d| d.visuals.iter()) .find(|v| v.visual_id == id) } /// Generate a unique X ID for a window, colormap, or other object. Usually, `Display`'s helper functions /// will generate this for you. If you'd like to circumvent them, this will generate ID's for you. #[inline] pub fn generate_xid(&mut self) -> crate::Result<XID> { Ok(self.xid.next().unwrap()) } /// Wait for an event to be generated by the X server. /// /// This checks the event queue for a new event. If the queue is empty, the `Display` will poll the /// server for new events. #[inline] pub fn wait_for_event(&mut self) -> crate::Result<Event> { log::debug!("Beginning event wait..."); loop { match self.event_queue.pop_front() { Some(event) => break Ok(event), None => self.wait()?, } } } /// Wait for an event to be generated by the X server, async redox. See the `wait_for_event` function for /// more information. #[cfg(feature = "async")] #[inline] pub async fn wait_for_event_async(&mut self) -> crate::Result<Event> { loop { match self.event_queue.pop_front() { Some(event) => break Ok(event), None => self.wait_async().await?, } } } /// If there is an event currently in the queue that matches the predicate, returns true. #[inline] pub fn check_if_event<F: FnMut(&Event) -> bool>(&self, predicate: F) -> bool { self.event_queue.iter().any(predicate) } /* /// Save a pointer into this display's map of contexts. #[inline] pub fn save_context(&mut self, xid: XID, context: ContextID, data: NonNull<c_void>) { self.context.insert((xid, context), data); } /// Retrieve a pointer from the context. #[inline] pub fn find_context(&mut self, xid: XID, context: ContextID) -> Option<NonNull<c_void>> { self.context.get(&(xid, context)).copied() } /// Delete an entry in the context. #[inline] pub fn delete_context(&mut self, xid: XID, context: ContextID) { self.context.remove(&(xid, context)); } */ }
#[cfg(feature = "std")] impl DisplayConnection { /// Create a new connection to the X server, given an optional name and authorization information. #[inline] pub fn create(name: Option<Cow<'_, str>>, auth_info: Option<AuthInfo>) -> crate::Result<Self> { let connection = name::NameConnection::connect_internal(name)?; Self::from_connection(connection, auth_info) } /// Create a new connection to the X server, given an optional name and authorization information, async /// redox. #[cfg(feature = "async")] #[inline] pub async fn create_async( name: Option<Cow<'_, str>>, auth_info: Option<AuthInfo>, ) -> crate::Result<Self> { let connection = name::NameConnection::connect_internal_async(name).await?; Self::from_connection_async(connection, auth_info).await } }
/// A variant of `Display` that uses X11's default connection mechanisms to connect to the server. In /// most cases, you should be using this over any variant of `Display`. #[cfg(feature = "std")] pub type DisplayConnection = Display<name::NameConnection>;
random_line_split
mod.rs
// MIT/Apache2 License //! This module defines the `Display` object, which acts as a connection to the X11 server, and the //! `Connection` trait, which the `Display` object abstracts itself over. See the documentation for //! these objects for more information. use crate::{ auth_info::AuthInfo, auto::{ xproto::{Colormap, Screen, Setup, SetupRequest, Visualid, Visualtype, Window}, AsByteSequence, }, event::Event, util::cycled_zeroes, xid::XidGenerator, Request, XID, }; use alloc::{boxed::Box, collections::VecDeque}; use core::{fmt, iter, marker::PhantomData, mem, num::NonZeroU32}; use cty::c_int; use hashbrown::HashMap; use tinyvec::TinyVec; #[cfg(feature = "std")] use std::borrow::Cow; #[cfg(feature = "async")] use std::{future::Future, pin::Pin}; mod connection; pub use connection::*; #[cfg(feature = "std")] pub mod name; mod functions; mod input; mod output; pub use functions::*; pub(crate) const EXT_KEY_SIZE: usize = 24; /// The connection to the X11 server. Most operations done in breadx revolve around this object /// in some way, shape or form. /// /// Internally, this acts as a layer of abstraction over the inner `Conn` object that keeps track /// of the setup, outgoing and pending requests and replies, the event queue, et cetera. Orthodoxically, /// X11 usually takes place over a TCP stream or a Unix socket connection; however, `Display` is able /// to use any object implementing the `Connection` trait as a vehicle for the X11 protocol. /// /// Upon its instantiation, the `Display` sends bytes to the server requesting the setup information, and /// then stores it for later use. Afterwards, it awaits commands from the programmer to send requests, /// receive replies or process events. /// /// # Example /// /// Open a connection to the X11 server and get the screen resolution. /// /// ```rust,no_run /// use breadx::DisplayConnection; /// /// let mut conn = DisplayConnection::create(None, None).unwrap(); /// /// let default_screen = conn.default_screen(); /// println!("Default screen is {} x {}", default_screen.width_in_pixels, default_screen.height_in_pixels); /// ``` pub struct Display<Conn> { // the connection to the server pub(crate) connection: Conn, // the setup received from the server pub(crate) setup: Setup, // xid generator xid: XidGenerator, // the screen to be used by default default_screen: usize, // input variables pub(crate) event_queue: VecDeque<Event>, pub(crate) pending_requests: VecDeque<input::PendingRequest>, pub(crate) pending_replies: HashMap<u16, Box<[u8]>>, // output variables request_number: u64, // store the interned atoms pub(crate) wm_protocols_atom: Option<NonZeroU32>, // context db // context: HashMap<(XID, ContextID), NonNull<c_void>>, // hashmap linking extension names to major opcodes // we use byte arrays instead of static string pointers // here because cache locality leads to an overall speedup (todo: verify) extensions: HashMap<[u8; EXT_KEY_SIZE], u8>, } /// Unique identifier for a context. pub type ContextID = c_int; /// A cookie for a request. /// /// Requests usually take time to resolve into replies. Therefore, the `Display::send_request` method returns /// the `RequestCookie`, which is later used to block (or await) for the request's eventual result. #[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Default, Eq, Hash)] #[repr(transparent)] pub struct RequestCookie<R: Request> { sequence: u16, _phantom: PhantomData<Option<R::Reply>>, } impl<R: Request> RequestCookie<R> { #[inline] pub(crate) fn from_sequence(sequence: u64) -> Self { Self { sequence: sequence as u16, // truncate to lower bits _phantom: PhantomData, } } } #[derive(Default, Debug)] pub(crate) struct PendingRequestFlags { pub discard_reply: bool, pub checked: bool, } impl<Conn: fmt::Debug> fmt::Debug for Display<Conn> { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Display") .field("connection", &self.connection) .field("setup", &self.setup) .field("xid", &self.xid) .field("default_screen", &self.default_screen) .field("event_queue", &self.event_queue) .field("pending_requests", &self.pending_requests) .field("pending_replies", &self.pending_replies) .field("request_number", &self.request_number) .finish() } } #[inline] const fn endian_byte() -> u8 { // Excerpt from the X Window System Protocol // // The client must send an initial byte of data to identify the byte order to be employed. // The value of the byte must be octal 102 or 154. The value 102 (ASCII uppercase B) means // values are transmitted most significant byte first, and value 154 (ASCII lowercase l) // means values are transmitted least significant byte first. #[cfg(not(target_endian = "little"))] { const BE_SIGNIFIER: u8 = b'B'; BE_SIGNIFIER } #[cfg(target_endian = "little")] { const LE_SIGNIFIER: u8 = b'l'; LE_SIGNIFIER } } impl<Conn: Connection> Display<Conn> { #[inline] fn decode_reply<R: Request>(reply: Box<[u8]>) -> crate::Result<R::Reply> { Ok(R::Reply::from_bytes(&reply) .ok_or(crate::BreadError::BadObjectRead(None))? .0) } /// Send a request object to the X11 server. /// /// Given a request object, this function sends it across the connection to the X11 server and returns /// a cookie used to determine when this request will resolve. Usually, the `Display` object has functions /// that act as a wrapper around this object; however, if you'd like to circumvent those, this is usually /// the best option. #[inline] pub fn send_request<R: Request>(&mut self, req: R) -> crate::Result<RequestCookie<R>> { self.send_request_internal(req) } /// Wait for a request from the X11 server. /// /// This function checks the `Display`'s queues to see if a reply matching the given `RequestCookie` /// has been processed by the X11 server. If not, it polls the server for new events until it has /// determined that the request has resolved. #[inline] pub fn resolve_request<R: Request>( &mut self, token: RequestCookie<R>, ) -> crate::Result<R::Reply> where R::Reply: Default, { if mem::size_of::<R::Reply>() == 0 { log::debug!("Immediately resolving for reply of size 0"); return Ok(Default::default()); } loop { log::trace!("Current replies: {:?}", &self.pending_replies); match self.pending_replies.remove(&token.sequence) { Some(reply) => break Self::decode_reply::<R>(reply), None => self.wait()?, } } } /// Send a request object to the X11 server, async redox. See the `send_request` function for more /// information. #[cfg(feature = "async")] #[inline] pub fn send_request_async<'future, R: Request + Send + 'future>( &'future mut self, req: R, ) -> Pin<Box<dyn Future<Output = crate::Result<RequestCookie<R>>> + Send + 'future>> { Box::pin(self.send_request_internal_async(req)) } /// Wait for a request from the X11 server, async redox. See the `resolve_request` function for more /// information. #[cfg(feature = "async")] #[inline] pub async fn resolve_request_async<R: Request>( &mut self, token: RequestCookie<R>, ) -> crate::Result<R::Reply> where R::Reply: Default, { if mem::size_of::<R::Reply>() == 0 { return Ok(Default::default()); } loop { match self.pending_replies.remove(&token.sequence) { Some(reply) => { break Self::decode_reply::<R>(reply); } None => self.wait_async().await?, } } } #[inline] fn from_connection_internal(connection: Conn) -> Self { Self { connection, setup: Default::default(), xid: Default::default(), default_screen: 0, event_queue: VecDeque::with_capacity(8), pending_requests: VecDeque::new(), pending_replies: HashMap::with_capacity(4), request_number: 1, wm_protocols_atom: None, // context: HashMap::new(), extensions: HashMap::with_capacity(8), } } /// Creates a new `Display` from a connection and authentication info. /// /// It is expected that the connection passed in has not had any information sent into it aside from /// what is necessary for the underlying protocol. After the object is created, the `Display` will poll /// the server for setup information. #[inline] pub fn from_connection(connection: Conn, auth: Option<AuthInfo>) -> crate::Result<Self> { let mut d = Self::from_connection_internal(connection); d.init(auth)?; Ok(d) } /// Creates a new `Display` from a connection and authentication info, async redox. See the `from_connection` /// function for more information. #[cfg(feature = "async")] #[inline] pub async fn from_connection_async( connection: Conn, auth: Option<AuthInfo>, ) -> crate::Result<Self> { let mut d = Self::from_connection_internal(connection); d.init_async(auth).await?; Ok(d) } /// Generate the setup from the authentication info. #[inline] fn create_setup(auth: AuthInfo) -> SetupRequest { let AuthInfo { name, data, .. } = auth; SetupRequest { byte_order: endian_byte(), protocol_major_version: 11, protocol_minor_version: 0, authorization_protocol_name: name, authorization_protocol_data: data, } } /// Initialize the setup. #[inline] fn init(&mut self, auth: Option<AuthInfo>) -> crate::Result { let setup = Self::create_setup(match auth { Some(auth) => auth, None => AuthInfo::get(), }); let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(setup.size()); let len = setup.as_bytes(&mut bytes); bytes.truncate(len); self.connection.send_packet(&bytes[0..len])?; let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(8); self.connection.read_packet(&mut bytes)?; match bytes[0] { 0 => return Err(crate::BreadError::FailedToConnect), 2 => return Err(crate::BreadError::FailedToAuthorize), _ => (), } // read in the rest of the bytes let length_bytes: [u8; 2] = [bytes[6], bytes[7]]; let length = (u16::from_ne_bytes(length_bytes) as usize) * 4; bytes.extend(iter::once(0).cycle().take(length)); self.connection.read_packet(&mut bytes[8..])?; let (setup, _) = Setup::from_bytes(&bytes).ok_or(crate::BreadError::BadObjectRead(Some("Setup")))?; self.setup = setup; self.xid = XidGenerator::new(self.setup.resource_id_base, self.setup.resource_id_mask); log::debug!("resource_id_base is {:#032b}", self.setup.resource_id_base); log::debug!("resource_id_mask is {:#032b}", self.setup.resource_id_mask); log::debug!( "resource_id inc. is {:#032b}", self.setup.resource_id_mask & self.setup.resource_id_mask.wrapping_neg() ); Ok(()) } /// Initialize the setup, async redox. /// /// TODO; lots of copy-pasted code, redo this at some point #[cfg(feature = "async")] #[inline] async fn init_async(&mut self, auth: Option<AuthInfo>) -> crate::Result { let setup = Self::create_setup(match auth { Some(auth) => auth, None => AuthInfo::get_async().await, }); let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(setup.size()); let len = setup.as_bytes(&mut bytes); bytes.truncate(len); self.connection.send_packet_async(&bytes[0..len]).await?; let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(8); self.connection.read_packet_async(&mut bytes).await?; match bytes[0] { 0 => return Err(crate::BreadError::FailedToConnect), 2 => return Err(crate::BreadError::FailedToAuthorize), _ => (), } // read in the rest of the bytes let length_bytes: [u8; 2] = [bytes[6], bytes[7]]; let length = (u16::from_ne_bytes(length_bytes) as usize) * 4; bytes.extend(iter::once(0).cycle().take(length)); self.connection.read_packet_async(&mut bytes[8..]).await?; let (setup, _) = Setup::from_bytes(&bytes) .ok_or_else(|| crate::BreadError::BadObjectRead(Some("Setup")))?; self.setup = setup; self.xid = XidGenerator::new(self.setup.resource_id_base, self.setup.resource_id_mask); log::debug!("resource_id_base is {:#032b}", self.setup.resource_id_base); log::debug!("resource_id_mask is {:#032b}", self.setup.resource_id_mask); log::debug!( "resource_id inc. is {:#032b}", self.setup.resource_id_mask & self.setup.resource_id_mask.wrapping_neg() ); Ok(()) } /// Get the setup associates with this display. #[inline] pub fn setup(&self) -> &Setup { &self.setup } #[inline] pub fn default_root(&self) -> Window { self.default_screen().root } #[inline] pub fn default_screen(&self) -> &Screen { &self.setup.roots[self.default_screen] } #[inline] pub fn default_white_pixel(&self) -> u32 { self.default_screen().white_pixel } #[inline] pub fn default_black_pixel(&self) -> u32 { self.default_screen().black_pixel } #[inline] pub fn default_visual_id(&self) -> Visualid { self.default_screen().root_visual } #[inline] pub fn default_visual(&self) -> &Visualtype { self.visual_id_to_visual(self.default_visual_id()).unwrap() } #[inline] pub fn default_colormap(&self) -> Colormap { self.default_screen().default_colormap } /// Get a visual type from a visual ID. #[inline] pub fn visual_id_to_visual(&self, id: Visualid) -> Option<&Visualtype> { self.setup .roots .iter() .flat_map(|s| s.allowed_depths.iter()) .flat_map(|d| d.visuals.iter()) .find(|v| v.visual_id == id) } /// Generate a unique X ID for a window, colormap, or other object. Usually, `Display`'s helper functions /// will generate this for you. If you'd like to circumvent them, this will generate ID's for you. #[inline] pub fn generate_xid(&mut self) -> crate::Result<XID> { Ok(self.xid.next().unwrap()) } /// Wait for an event to be generated by the X server. /// /// This checks the event queue for a new event. If the queue is empty, the `Display` will poll the /// server for new events. #[inline] pub fn wait_for_event(&mut self) -> crate::Result<Event> { log::debug!("Beginning event wait..."); loop { match self.event_queue.pop_front() { Some(event) => break Ok(event), None => self.wait()?, } } } /// Wait for an event to be generated by the X server, async redox. See the `wait_for_event` function for /// more information. #[cfg(feature = "async")] #[inline] pub async fn
(&mut self) -> crate::Result<Event> { loop { match self.event_queue.pop_front() { Some(event) => break Ok(event), None => self.wait_async().await?, } } } /// If there is an event currently in the queue that matches the predicate, returns true. #[inline] pub fn check_if_event<F: FnMut(&Event) -> bool>(&self, predicate: F) -> bool { self.event_queue.iter().any(predicate) } /* /// Save a pointer into this display's map of contexts. #[inline] pub fn save_context(&mut self, xid: XID, context: ContextID, data: NonNull<c_void>) { self.context.insert((xid, context), data); } /// Retrieve a pointer from the context. #[inline] pub fn find_context(&mut self, xid: XID, context: ContextID) -> Option<NonNull<c_void>> { self.context.get(&(xid, context)).copied() } /// Delete an entry in the context. #[inline] pub fn delete_context(&mut self, xid: XID, context: ContextID) { self.context.remove(&(xid, context)); } */ } /// A variant of `Display` that uses X11's default connection mechanisms to connect to the server. In /// most cases, you should be using this over any variant of `Display`. #[cfg(feature = "std")] pub type DisplayConnection = Display<name::NameConnection>; #[cfg(feature = "std")] impl DisplayConnection { /// Create a new connection to the X server, given an optional name and authorization information. #[inline] pub fn create(name: Option<Cow<'_, str>>, auth_info: Option<AuthInfo>) -> crate::Result<Self> { let connection = name::NameConnection::connect_internal(name)?; Self::from_connection(connection, auth_info) } /// Create a new connection to the X server, given an optional name and authorization information, async /// redox. #[cfg(feature = "async")] #[inline] pub async fn create_async( name: Option<Cow<'_, str>>, auth_info: Option<AuthInfo>, ) -> crate::Result<Self> { let connection = name::NameConnection::connect_internal_async(name).await?; Self::from_connection_async(connection, auth_info).await } }
wait_for_event_async
identifier_name
solvers.py
""" Functions concerned with solving for decoders or full weight matrices. Many of the solvers in this file can solve for decoders or weight matrices, depending on whether the post-population encoders `E` are provided (see below). Solvers that are only intended to solve for either decoders or weights can remove the `E` parameter or make it manditory as they see fit. """ import collections import logging import numpy as np from nengo.params import Parameter import nengo.utils.numpy as npext from nengo.utils.compat import range, with_metaclass, iteritems from nengo.utils.magic import DocstringInheritor logger = logging.getLogger(__name__) def _rmses(A, X, Y): """Returns the root-mean-squared error (RMSE) of the solution X.""" return npext.rms(Y - np.dot(A, X), axis=0) def cholesky(A, y, sigma, transpose=None): """Solve the least-squares system using the Cholesky decomposition.""" m, n = A.shape if transpose is None: # transpose if matrix is fat, but not if we have sigmas for each neuron transpose = m < n and sigma.size == 1 if transpose: # substitution: x = A'*xbar, G*xbar = b where G = A*A' + lambda*I G = np.dot(A, A.T) b = y else: # multiplication by A': G*x = A'*b where G = A'*A + lambda*I G = np.dot(A.T, A) b = np.dot(A.T, y) # add L2 regularization term 'lambda' = m * sigma**2 np.fill_diagonal(G, G.diagonal() + m * sigma**2) try: import scipy.linalg factor = scipy.linalg.cho_factor(G, overwrite_a=True) x = scipy.linalg.cho_solve(factor, b) except ImportError: L = np.linalg.cholesky(G) L = np.linalg.inv(L.T) x = np.dot(L, np.dot(L.T, b)) x = np.dot(A.T, x) if transpose else x info = {'rmses': _rmses(A, x, y)} return x, info def conjgrad_scipy(A, Y, sigma, tol=1e-4): """Solve the least-squares system using Scipy's conjugate gradient.""" import scipy.sparse.linalg Y, m, n, d, matrix_in = _format_system(A, Y) damp = m * sigma**2 calcAA = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x G = scipy.sparse.linalg.LinearOperator( (n, n), matvec=calcAA, matmat=calcAA, dtype=A.dtype) B = np.dot(A.T, Y) X = np.zeros((n, d), dtype=B.dtype) infos = np.zeros(d, dtype='int') itns = np.zeros(d, dtype='int') for i in range(d): def callback(x): itns[i] += 1 # use the callback to count the number of iterations X[:, i], infos[i] = scipy.sparse.linalg.cg( G, B[:, i], tol=tol, callback=callback) info = {'rmses': _rmses(A, X, Y), 'iterations': itns, 'info': infos} return X if matrix_in else X.flatten(), info def lsmr_scipy(A, Y, sigma, tol=1e-4): """Solve the least-squares system using Scipy's LSMR.""" import scipy.sparse.linalg Y, m, n, d, matrix_in = _format_system(A, Y) damp = sigma * np.sqrt(m) X = np.zeros((n, d), dtype=Y.dtype) itns = np.zeros(d, dtype='int') for i in range(d): X[:, i], _, itns[i], _, _, _, _, _ = scipy.sparse.linalg.lsmr( A, Y[:, i], damp=damp, atol=tol, btol=tol) info = {'rmses': _rmses(A, X, Y), 'iterations': itns} return X if matrix_in else X.flatten(), info def _conjgrad_iters(calcAx, b, x, maxiters=None, rtol=1e-6): """Solve the single-RHS linear system using conjugate gradient.""" if maxiters is None: maxiters = b.shape[0] r = b - calcAx(x) p = r.copy() rsold = np.dot(r, r) for i in range(maxiters): Ap = calcAx(p) alpha = rsold / np.dot(p, Ap) x += alpha * p r -= alpha * Ap rsnew = np.dot(r, r) beta = rsnew / rsold if np.sqrt(rsnew) < rtol: break if beta < 1e-12: # no perceptible change in p break # p = r + beta*p p *= beta p += r rsold = rsnew return x, i+1 def conjgrad(A, Y, sigma, X0=None, maxiters=None, tol=1e-2): """Solve the least-squares system using conjugate gradient.""" Y, m, n, d, matrix_in = _format_system(A, Y) damp = m * sigma**2 rtol = tol * np.sqrt(m) G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x B = np.dot(A.T, Y) X = np.zeros((n, d)) if X0 is None else np.array(X0).reshape((n, d)) iters = -np.ones(d, dtype='int') for i in range(d): X[:, i], iters[i] = _conjgrad_iters( G, B[:, i], X[:, i], maxiters=maxiters, rtol=rtol) info = {'rmses': _rmses(A, X, Y), 'iterations': iters} return X if matrix_in else X.flatten(), info def block_conjgrad(A, Y, sigma, X0=None, tol=1e-2): """Solve a multiple-RHS least-squares system using block conjuate gradient. """ Y, m, n, d, matrix_in = _format_system(A, Y) sigma = np.asarray(sigma, dtype='float') sigma = sigma.reshape(sigma.size, 1) damp = m * sigma**2 rtol = tol * np.sqrt(m) G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x B = np.dot(A.T, Y) # --- conjugate gradient X = np.zeros((n, d)) if X0 is None else np.array(X0).reshape((n, d)) R = B - G(X) P = np.array(R) Rsold = np.dot(R.T, R) AP = np.zeros((n, d)) maxiters = int(n / d) for i in range(maxiters): AP = G(P) alpha = np.linalg.solve(np.dot(P.T, AP), Rsold) X += np.dot(P, alpha) R -= np.dot(AP, alpha) Rsnew = np.dot(R.T, R) if (np.diag(Rsnew) < rtol**2).all(): break beta = np.linalg.solve(Rsold, Rsnew) P = R + np.dot(P, beta) Rsold = Rsnew info = {'rmses': _rmses(A, X, Y), 'iterations': i + 1} return X if matrix_in else X.flatten(), info def _format_system(A, Y): m, n = A.shape matrix_in = Y.ndim > 1 d = Y.shape[1] if matrix_in else 1 Y = Y.reshape((Y.shape[0], d)) return Y, m, n, d, matrix_in class Solver(with_metaclass(DocstringInheritor)): """ Decoder or weight solver. """ def __call__(self, A, Y, rng=None, E=None): """Call the solver. Parameters ---------- A : array_like (M, N) Matrix of the N neurons' activities at the M evaluation points Y : array_like (M, D) Matrix of the target decoded values for each of the D dimensions, at each of the M evaluation points. rng : numpy.RandomState, optional A random number generator to use as required. If none is provided, numpy.random will be used. E : array_like (D, N2), optional Array of post-population encoders. Providing this tells the solver to return an array of connection weights rather than decoders. Returns ------- X : np.ndarray (N, D) or (N, N2) (N, D) array of decoders (if solver.weights == False) or (N, N2) array of weights (if solver.weights == True). info : dict A dictionary of information about the solve. All dictionaries have an 'rmses' key that contains RMS errors of the solve. Other keys are unique to particular solvers. """ raise NotImplementedError("Solvers must implement '__call__'") def mul_encoders(self, Y, E, copy=False): if self.weights: if E is None: raise ValueError("Encoders must be provided for weight solver") return np.dot(Y, E) else: if E is not None: raise ValueError("Encoders must be 'None' for decoder solver") return Y.copy() if copy else Y def __hash__(self): items = list(self.__dict__.items()) items.sort(key=lambda item: item[0]) hashes = [] for k, v in items: if isinstance(v, np.ndarray): if v.size < 1e5: a = v[:] a.setflags(write=False) hashes.append(hash(a)) else: raise ValueError("array is too large to hash") elif isinstance(v, collections.Iterable): hashes.append(hash(tuple(v))) elif isinstance(v, collections.Callable): hashes.append(hash(v.__code__)) else: hashes.append(hash(v)) return hash(tuple(hashes)) def __str__(self): return "%s(%s)" % ( self.__class__.__name__, ', '.join("%s=%s" % (k, v) for k, v in iteritems(self.__dict__))) class Lstsq(Solver): """Unregularized least-squares""" def __init__(self, weights=False, rcond=0.01): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. rcond : float, optional Cut-off ratio for small singular values (see `numpy.linalg.lstsq`). """ self.rcond = rcond self.weights = weights def __call__(self, A, Y, rng=None, E=None): Y = self.mul_encoders(Y, E) X, residuals2, rank, s = np.linalg.lstsq(A, Y, rcond=self.rcond) return X, {'rmses': _rmses(A, X, Y), 'residuals': np.sqrt(residuals2), 'rank': rank, 'singular_values': s} class _LstsqNoiseSolver(Solver): """Base for least-squares solvers with noise""" def __init__(self, weights=False, noise=0.1, solver=cholesky, **kwargs): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. noise : float, optional Amount of noise, as a fraction of the neuron activity. solver : callable, optional Subsolver to use for solving the least-squares problem. kwargs Additional arguments passed to `solver`. """ self.weights = weights self.noise = noise self.solver = solver self.kwargs = kwargs class LstsqNoise(_LstsqNoiseSolver): """Least-squares with additive Gaussian white noise.""" def __call__(self, A, Y, rng=None, E=None): rng = np.random if rng is None else rng sigma = self.noise * A.max() A = A + rng.normal(scale=sigma, size=A.shape) X, info = self.solver(A, Y, 0, **self.kwargs) return self.mul_encoders(X, E), info class LstsqMultNoise(_LstsqNoiseSolver): """Least-squares with multiplicative white noise.""" def __call__(self, A, Y, rng=None, E=None): rng = np.random if rng is None else rng A = A + rng.normal(scale=self.noise, size=A.shape) * A X, info = self.solver(A, Y, 0, **self.kwargs) return self.mul_encoders(X, E), info class _LstsqL2Solver(Solver): """Base for L2-regularized least-squares solvers""" def __init__(self, weights=False, reg=0.1, solver=cholesky, **kwargs): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. reg : float, optional Amount of regularization, as a fraction of the neuron activity. solver : callable, optional Subsolver to use for solving the least-squares problem. kwargs Additional arguments passed to `solver`. """ self.weights = weights self.reg = reg self.solver = solver self.kwargs = kwargs
def __call__(self, A, Y, rng=None, E=None): sigma = self.reg * A.max() X, info = self.solver(A, Y, sigma, **self.kwargs) return self.mul_encoders(X, E), info class LstsqL2nz(_LstsqL2Solver): """Least-squares with L2 regularization on non-zero components.""" def __call__(self, A, Y, rng=None, E=None): # Compute the equivalent noise standard deviation. This equals the # base amplitude (noise_amp times the overall max activation) times # the square-root of the fraction of non-zero components. sigma = (self.reg * A.max()) * np.sqrt((A > 0).mean(axis=0)) # sigma == 0 means the neuron is never active, so won't be used, but # we have to make sigma != 0 for numeric reasons. sigma[sigma == 0] = sigma.max() X, info = self.solver(A, Y, sigma, **self.kwargs) return self.mul_encoders(X, E), info class LstsqL1(Solver): """Least-squares with L1 and L2 regularization (elastic net). This method is well suited for creating sparse decoders or weight matrices. """ def __init__(self, weights=False, l1=1e-4, l2=1e-6): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. l1 : float, optional Amount of L1 regularization. l2 : float, optional Amount of L2 regularization. """ import sklearn.linear_model # noqa F401, import to check existence assert sklearn.linear_model self.weights = weights self.l1 = l1 self.l2 = l2 def __call__(self, A, Y, rng=None, E=None): import sklearn.linear_model Y = self.mul_encoders(Y, E, copy=True) # copy since 'fit' may modify Y # TODO: play around with regularization constants (I just guessed). # Do we need to scale regularization by number of neurons, to get # same level of sparsity? esp. with weights? Currently, setting # l1=1e-3 works well with weights when connecting 1D populations # with 100 neurons each. a = self.l1 * A.max() # L1 regularization b = self.l2 * A.max()**2 # L2 regularization alpha = a + b l1_ratio = a / (a + b) # --- solve least-squares A * X = Y model = sklearn.linear_model.ElasticNet( alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False, max_iter=1000) model.fit(A, Y) X = model.coef_.T X.shape = (A.shape[1], Y.shape[1]) if Y.ndim > 1 else (A.shape[1],) infos = {'rmses': _rmses(A, X, Y)} return X, infos class LstsqDrop(Solver): """Find sparser decoders/weights by dropping small values. This solver first solves for coefficients (decoders/weights) with L2 regularization, drops those nearest to zero, and retrains remaining. """ def __init__(self, weights=False, drop=0.25, solver1=LstsqL2nz(reg=0.1), solver2=LstsqL2nz(reg=0.01)): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. drop : float, optional Fraction of decoders or weights to set to zero. solver1 : Solver, optional Solver for finding the initial decoders. solver2 : Solver, optional Used for re-solving for the decoders after dropout. """ self.weights = weights self.drop = drop self.solver1 = solver1 self.solver2 = solver2 def __call__(self, A, Y, rng=None, E=None): Y, m, n, d, matrix_in = _format_system(A, Y) # solve for coefficients using standard solver X, info0 = self.solver1(A, Y, rng=rng) X = self.mul_encoders(X, E) # drop weights close to zero, based on `drop` ratio Xabs = np.sort(np.abs(X.flat)) threshold = Xabs[int(np.round(self.drop * Xabs.size))] X[np.abs(X) < threshold] = 0 # retrain nonzero weights Y = self.mul_encoders(Y, E) for i in range(X.shape[1]): nonzero = X[:, i] != 0 if nonzero.sum() > 0: X[nonzero, i], info1 = self.solver2( A[:, nonzero], Y[:, i], rng=rng) info = {'rmses': _rmses(A, X, Y), 'info0': info0, 'info1': info1} return X if matrix_in else X.flatten(), info class Nnls(Solver): """Non-negative least-squares without regularization. Similar to `lstsq`, except the output values are non-negative. """ def __init__(self, weights=False): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. """ import scipy.optimize # import here too to throw error early assert scipy.optimize self.weights = weights def __call__(self, A, Y, rng=None, E=None): import scipy.optimize Y, m, n, d, matrix_in = _format_system(A, Y) Y = self.mul_encoders(Y, E) X = np.zeros((n, d)) residuals = np.zeros(d) for i in range(d): X[:, i], residuals[i] = scipy.optimize.nnls(A, Y[:, i]) info = {'rmses': _rmses(A, X, Y), 'residuals': residuals} return X if matrix_in else X.flatten(), info class NnlsL2(Nnls): """Non-negative least-squares with L2 regularization. Similar to `lstsq_L2`, except the output values are non-negative. """ def __init__(self, weights=False, reg=0.1): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. reg : float, optional Amount of regularization, as a fraction of the neuron activity. """ super(NnlsL2, self).__init__(weights) self.reg = reg def _solve(self, A, Y, rng, E, sigma): # form Gram matrix so we can add regularization GA = np.dot(A.T, A) GY = np.dot(A.T, Y) np.fill_diagonal(GA, GA.diagonal() + A.shape[0] * sigma**2) X, info = super(NnlsL2, self).__call__(GA, GY, rng=rng, E=E) # recompute the RMSE in terms of the original matrices info = {'rmses': _rmses(A, X, Y), 'gram_info': info} return X, info def __call__(self, A, Y, rng=None, E=None): return self._solve(A, Y, rng, E, sigma=self.reg * A.max()) class NnlsL2nz(NnlsL2): """Non-negative least-squares with L2 regularization on nonzero components. Similar to `lstsq_L2nz`, except the output values are non-negative. """ def __call__(self, A, Y, rng=None, E=None): sigma = (self.reg * A.max()) * np.sqrt((A > 0).mean(axis=0)) sigma[sigma == 0] = 1 return self._solve(A, Y, rng, E, sigma=sigma) class SolverParam(Parameter): def validate(self, instance, solver): if solver is not None and not isinstance(solver, Solver): raise ValueError("'%s' is not a solver" % solver) super(SolverParam, self).validate(instance, solver)
class LstsqL2(_LstsqL2Solver): """Least-squares with L2 regularization."""
random_line_split
solvers.py
""" Functions concerned with solving for decoders or full weight matrices. Many of the solvers in this file can solve for decoders or weight matrices, depending on whether the post-population encoders `E` are provided (see below). Solvers that are only intended to solve for either decoders or weights can remove the `E` parameter or make it manditory as they see fit. """ import collections import logging import numpy as np from nengo.params import Parameter import nengo.utils.numpy as npext from nengo.utils.compat import range, with_metaclass, iteritems from nengo.utils.magic import DocstringInheritor logger = logging.getLogger(__name__) def _rmses(A, X, Y): """Returns the root-mean-squared error (RMSE) of the solution X.""" return npext.rms(Y - np.dot(A, X), axis=0) def cholesky(A, y, sigma, transpose=None): """Solve the least-squares system using the Cholesky decomposition.""" m, n = A.shape if transpose is None: # transpose if matrix is fat, but not if we have sigmas for each neuron transpose = m < n and sigma.size == 1 if transpose: # substitution: x = A'*xbar, G*xbar = b where G = A*A' + lambda*I G = np.dot(A, A.T) b = y else: # multiplication by A': G*x = A'*b where G = A'*A + lambda*I G = np.dot(A.T, A) b = np.dot(A.T, y) # add L2 regularization term 'lambda' = m * sigma**2 np.fill_diagonal(G, G.diagonal() + m * sigma**2) try: import scipy.linalg factor = scipy.linalg.cho_factor(G, overwrite_a=True) x = scipy.linalg.cho_solve(factor, b) except ImportError: L = np.linalg.cholesky(G) L = np.linalg.inv(L.T) x = np.dot(L, np.dot(L.T, b)) x = np.dot(A.T, x) if transpose else x info = {'rmses': _rmses(A, x, y)} return x, info def conjgrad_scipy(A, Y, sigma, tol=1e-4): """Solve the least-squares system using Scipy's conjugate gradient.""" import scipy.sparse.linalg Y, m, n, d, matrix_in = _format_system(A, Y) damp = m * sigma**2 calcAA = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x G = scipy.sparse.linalg.LinearOperator( (n, n), matvec=calcAA, matmat=calcAA, dtype=A.dtype) B = np.dot(A.T, Y) X = np.zeros((n, d), dtype=B.dtype) infos = np.zeros(d, dtype='int') itns = np.zeros(d, dtype='int') for i in range(d): def callback(x): itns[i] += 1 # use the callback to count the number of iterations X[:, i], infos[i] = scipy.sparse.linalg.cg( G, B[:, i], tol=tol, callback=callback) info = {'rmses': _rmses(A, X, Y), 'iterations': itns, 'info': infos} return X if matrix_in else X.flatten(), info def lsmr_scipy(A, Y, sigma, tol=1e-4): """Solve the least-squares system using Scipy's LSMR.""" import scipy.sparse.linalg Y, m, n, d, matrix_in = _format_system(A, Y) damp = sigma * np.sqrt(m) X = np.zeros((n, d), dtype=Y.dtype) itns = np.zeros(d, dtype='int') for i in range(d): X[:, i], _, itns[i], _, _, _, _, _ = scipy.sparse.linalg.lsmr( A, Y[:, i], damp=damp, atol=tol, btol=tol) info = {'rmses': _rmses(A, X, Y), 'iterations': itns} return X if matrix_in else X.flatten(), info def _conjgrad_iters(calcAx, b, x, maxiters=None, rtol=1e-6): """Solve the single-RHS linear system using conjugate gradient.""" if maxiters is None: maxiters = b.shape[0] r = b - calcAx(x) p = r.copy() rsold = np.dot(r, r) for i in range(maxiters): Ap = calcAx(p) alpha = rsold / np.dot(p, Ap) x += alpha * p r -= alpha * Ap rsnew = np.dot(r, r) beta = rsnew / rsold if np.sqrt(rsnew) < rtol: break if beta < 1e-12: # no perceptible change in p break # p = r + beta*p p *= beta p += r rsold = rsnew return x, i+1 def conjgrad(A, Y, sigma, X0=None, maxiters=None, tol=1e-2): """Solve the least-squares system using conjugate gradient.""" Y, m, n, d, matrix_in = _format_system(A, Y) damp = m * sigma**2 rtol = tol * np.sqrt(m) G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x B = np.dot(A.T, Y) X = np.zeros((n, d)) if X0 is None else np.array(X0).reshape((n, d)) iters = -np.ones(d, dtype='int') for i in range(d): X[:, i], iters[i] = _conjgrad_iters( G, B[:, i], X[:, i], maxiters=maxiters, rtol=rtol) info = {'rmses': _rmses(A, X, Y), 'iterations': iters} return X if matrix_in else X.flatten(), info def block_conjgrad(A, Y, sigma, X0=None, tol=1e-2): """Solve a multiple-RHS least-squares system using block conjuate gradient. """ Y, m, n, d, matrix_in = _format_system(A, Y) sigma = np.asarray(sigma, dtype='float') sigma = sigma.reshape(sigma.size, 1) damp = m * sigma**2 rtol = tol * np.sqrt(m) G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x B = np.dot(A.T, Y) # --- conjugate gradient X = np.zeros((n, d)) if X0 is None else np.array(X0).reshape((n, d)) R = B - G(X) P = np.array(R) Rsold = np.dot(R.T, R) AP = np.zeros((n, d)) maxiters = int(n / d) for i in range(maxiters): AP = G(P) alpha = np.linalg.solve(np.dot(P.T, AP), Rsold) X += np.dot(P, alpha) R -= np.dot(AP, alpha) Rsnew = np.dot(R.T, R) if (np.diag(Rsnew) < rtol**2).all(): break beta = np.linalg.solve(Rsold, Rsnew) P = R + np.dot(P, beta) Rsold = Rsnew info = {'rmses': _rmses(A, X, Y), 'iterations': i + 1} return X if matrix_in else X.flatten(), info def _format_system(A, Y): m, n = A.shape matrix_in = Y.ndim > 1 d = Y.shape[1] if matrix_in else 1 Y = Y.reshape((Y.shape[0], d)) return Y, m, n, d, matrix_in class Solver(with_metaclass(DocstringInheritor)): """ Decoder or weight solver. """ def __call__(self, A, Y, rng=None, E=None): """Call the solver. Parameters ---------- A : array_like (M, N) Matrix of the N neurons' activities at the M evaluation points Y : array_like (M, D) Matrix of the target decoded values for each of the D dimensions, at each of the M evaluation points. rng : numpy.RandomState, optional A random number generator to use as required. If none is provided, numpy.random will be used. E : array_like (D, N2), optional Array of post-population encoders. Providing this tells the solver to return an array of connection weights rather than decoders. Returns ------- X : np.ndarray (N, D) or (N, N2) (N, D) array of decoders (if solver.weights == False) or (N, N2) array of weights (if solver.weights == True). info : dict A dictionary of information about the solve. All dictionaries have an 'rmses' key that contains RMS errors of the solve. Other keys are unique to particular solvers. """ raise NotImplementedError("Solvers must implement '__call__'") def mul_encoders(self, Y, E, copy=False): if self.weights: if E is None: raise ValueError("Encoders must be provided for weight solver") return np.dot(Y, E) else: if E is not None: raise ValueError("Encoders must be 'None' for decoder solver") return Y.copy() if copy else Y def __hash__(self): items = list(self.__dict__.items()) items.sort(key=lambda item: item[0]) hashes = [] for k, v in items: if isinstance(v, np.ndarray): if v.size < 1e5: a = v[:] a.setflags(write=False) hashes.append(hash(a)) else: raise ValueError("array is too large to hash") elif isinstance(v, collections.Iterable): hashes.append(hash(tuple(v))) elif isinstance(v, collections.Callable): hashes.append(hash(v.__code__)) else: hashes.append(hash(v)) return hash(tuple(hashes)) def __str__(self): return "%s(%s)" % ( self.__class__.__name__, ', '.join("%s=%s" % (k, v) for k, v in iteritems(self.__dict__))) class Lstsq(Solver): """Unregularized least-squares""" def __init__(self, weights=False, rcond=0.01): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. rcond : float, optional Cut-off ratio for small singular values (see `numpy.linalg.lstsq`). """ self.rcond = rcond self.weights = weights def __call__(self, A, Y, rng=None, E=None): Y = self.mul_encoders(Y, E) X, residuals2, rank, s = np.linalg.lstsq(A, Y, rcond=self.rcond) return X, {'rmses': _rmses(A, X, Y), 'residuals': np.sqrt(residuals2), 'rank': rank, 'singular_values': s} class _LstsqNoiseSolver(Solver): """Base for least-squares solvers with noise""" def __init__(self, weights=False, noise=0.1, solver=cholesky, **kwargs): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. noise : float, optional Amount of noise, as a fraction of the neuron activity. solver : callable, optional Subsolver to use for solving the least-squares problem. kwargs Additional arguments passed to `solver`. """ self.weights = weights self.noise = noise self.solver = solver self.kwargs = kwargs class LstsqNoise(_LstsqNoiseSolver): """Least-squares with additive Gaussian white noise.""" def __call__(self, A, Y, rng=None, E=None): rng = np.random if rng is None else rng sigma = self.noise * A.max() A = A + rng.normal(scale=sigma, size=A.shape) X, info = self.solver(A, Y, 0, **self.kwargs) return self.mul_encoders(X, E), info class LstsqMultNoise(_LstsqNoiseSolver): """Least-squares with multiplicative white noise.""" def __call__(self, A, Y, rng=None, E=None): rng = np.random if rng is None else rng A = A + rng.normal(scale=self.noise, size=A.shape) * A X, info = self.solver(A, Y, 0, **self.kwargs) return self.mul_encoders(X, E), info class _LstsqL2Solver(Solver): """Base for L2-regularized least-squares solvers""" def __init__(self, weights=False, reg=0.1, solver=cholesky, **kwargs): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. reg : float, optional Amount of regularization, as a fraction of the neuron activity. solver : callable, optional Subsolver to use for solving the least-squares problem. kwargs Additional arguments passed to `solver`. """ self.weights = weights self.reg = reg self.solver = solver self.kwargs = kwargs class LstsqL2(_LstsqL2Solver): """Least-squares with L2 regularization.""" def __call__(self, A, Y, rng=None, E=None): sigma = self.reg * A.max() X, info = self.solver(A, Y, sigma, **self.kwargs) return self.mul_encoders(X, E), info class LstsqL2nz(_LstsqL2Solver): """Least-squares with L2 regularization on non-zero components.""" def __call__(self, A, Y, rng=None, E=None): # Compute the equivalent noise standard deviation. This equals the # base amplitude (noise_amp times the overall max activation) times # the square-root of the fraction of non-zero components. sigma = (self.reg * A.max()) * np.sqrt((A > 0).mean(axis=0)) # sigma == 0 means the neuron is never active, so won't be used, but # we have to make sigma != 0 for numeric reasons. sigma[sigma == 0] = sigma.max() X, info = self.solver(A, Y, sigma, **self.kwargs) return self.mul_encoders(X, E), info class LstsqL1(Solver): """Least-squares with L1 and L2 regularization (elastic net). This method is well suited for creating sparse decoders or weight matrices. """ def __init__(self, weights=False, l1=1e-4, l2=1e-6): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. l1 : float, optional Amount of L1 regularization. l2 : float, optional Amount of L2 regularization. """ import sklearn.linear_model # noqa F401, import to check existence assert sklearn.linear_model self.weights = weights self.l1 = l1 self.l2 = l2 def __call__(self, A, Y, rng=None, E=None): import sklearn.linear_model Y = self.mul_encoders(Y, E, copy=True) # copy since 'fit' may modify Y # TODO: play around with regularization constants (I just guessed). # Do we need to scale regularization by number of neurons, to get # same level of sparsity? esp. with weights? Currently, setting # l1=1e-3 works well with weights when connecting 1D populations # with 100 neurons each. a = self.l1 * A.max() # L1 regularization b = self.l2 * A.max()**2 # L2 regularization alpha = a + b l1_ratio = a / (a + b) # --- solve least-squares A * X = Y model = sklearn.linear_model.ElasticNet( alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False, max_iter=1000) model.fit(A, Y) X = model.coef_.T X.shape = (A.shape[1], Y.shape[1]) if Y.ndim > 1 else (A.shape[1],) infos = {'rmses': _rmses(A, X, Y)} return X, infos class LstsqDrop(Solver): """Find sparser decoders/weights by dropping small values. This solver first solves for coefficients (decoders/weights) with L2 regularization, drops those nearest to zero, and retrains remaining. """ def __init__(self, weights=False, drop=0.25, solver1=LstsqL2nz(reg=0.1), solver2=LstsqL2nz(reg=0.01)):
def __call__(self, A, Y, rng=None, E=None): Y, m, n, d, matrix_in = _format_system(A, Y) # solve for coefficients using standard solver X, info0 = self.solver1(A, Y, rng=rng) X = self.mul_encoders(X, E) # drop weights close to zero, based on `drop` ratio Xabs = np.sort(np.abs(X.flat)) threshold = Xabs[int(np.round(self.drop * Xabs.size))] X[np.abs(X) < threshold] = 0 # retrain nonzero weights Y = self.mul_encoders(Y, E) for i in range(X.shape[1]): nonzero = X[:, i] != 0 if nonzero.sum() > 0: X[nonzero, i], info1 = self.solver2( A[:, nonzero], Y[:, i], rng=rng) info = {'rmses': _rmses(A, X, Y), 'info0': info0, 'info1': info1} return X if matrix_in else X.flatten(), info class Nnls(Solver): """Non-negative least-squares without regularization. Similar to `lstsq`, except the output values are non-negative. """ def __init__(self, weights=False): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. """ import scipy.optimize # import here too to throw error early assert scipy.optimize self.weights = weights def __call__(self, A, Y, rng=None, E=None): import scipy.optimize Y, m, n, d, matrix_in = _format_system(A, Y) Y = self.mul_encoders(Y, E) X = np.zeros((n, d)) residuals = np.zeros(d) for i in range(d): X[:, i], residuals[i] = scipy.optimize.nnls(A, Y[:, i]) info = {'rmses': _rmses(A, X, Y), 'residuals': residuals} return X if matrix_in else X.flatten(), info class NnlsL2(Nnls): """Non-negative least-squares with L2 regularization. Similar to `lstsq_L2`, except the output values are non-negative. """ def __init__(self, weights=False, reg=0.1): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. reg : float, optional Amount of regularization, as a fraction of the neuron activity. """ super(NnlsL2, self).__init__(weights) self.reg = reg def _solve(self, A, Y, rng, E, sigma): # form Gram matrix so we can add regularization GA = np.dot(A.T, A) GY = np.dot(A.T, Y) np.fill_diagonal(GA, GA.diagonal() + A.shape[0] * sigma**2) X, info = super(NnlsL2, self).__call__(GA, GY, rng=rng, E=E) # recompute the RMSE in terms of the original matrices info = {'rmses': _rmses(A, X, Y), 'gram_info': info} return X, info def __call__(self, A, Y, rng=None, E=None): return self._solve(A, Y, rng, E, sigma=self.reg * A.max()) class NnlsL2nz(NnlsL2): """Non-negative least-squares with L2 regularization on nonzero components. Similar to `lstsq_L2nz`, except the output values are non-negative. """ def __call__(self, A, Y, rng=None, E=None): sigma = (self.reg * A.max()) * np.sqrt((A > 0).mean(axis=0)) sigma[sigma == 0] = 1 return self._solve(A, Y, rng, E, sigma=sigma) class SolverParam(Parameter): def validate(self, instance, solver): if solver is not None and not isinstance(solver, Solver): raise ValueError("'%s' is not a solver" % solver) super(SolverParam, self).validate(instance, solver)
""" weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. drop : float, optional Fraction of decoders or weights to set to zero. solver1 : Solver, optional Solver for finding the initial decoders. solver2 : Solver, optional Used for re-solving for the decoders after dropout. """ self.weights = weights self.drop = drop self.solver1 = solver1 self.solver2 = solver2
identifier_body
solvers.py
""" Functions concerned with solving for decoders or full weight matrices. Many of the solvers in this file can solve for decoders or weight matrices, depending on whether the post-population encoders `E` are provided (see below). Solvers that are only intended to solve for either decoders or weights can remove the `E` parameter or make it manditory as they see fit. """ import collections import logging import numpy as np from nengo.params import Parameter import nengo.utils.numpy as npext from nengo.utils.compat import range, with_metaclass, iteritems from nengo.utils.magic import DocstringInheritor logger = logging.getLogger(__name__) def _rmses(A, X, Y): """Returns the root-mean-squared error (RMSE) of the solution X.""" return npext.rms(Y - np.dot(A, X), axis=0) def cholesky(A, y, sigma, transpose=None): """Solve the least-squares system using the Cholesky decomposition.""" m, n = A.shape if transpose is None: # transpose if matrix is fat, but not if we have sigmas for each neuron transpose = m < n and sigma.size == 1 if transpose: # substitution: x = A'*xbar, G*xbar = b where G = A*A' + lambda*I G = np.dot(A, A.T) b = y else: # multiplication by A': G*x = A'*b where G = A'*A + lambda*I G = np.dot(A.T, A) b = np.dot(A.T, y) # add L2 regularization term 'lambda' = m * sigma**2 np.fill_diagonal(G, G.diagonal() + m * sigma**2) try: import scipy.linalg factor = scipy.linalg.cho_factor(G, overwrite_a=True) x = scipy.linalg.cho_solve(factor, b) except ImportError: L = np.linalg.cholesky(G) L = np.linalg.inv(L.T) x = np.dot(L, np.dot(L.T, b)) x = np.dot(A.T, x) if transpose else x info = {'rmses': _rmses(A, x, y)} return x, info def conjgrad_scipy(A, Y, sigma, tol=1e-4): """Solve the least-squares system using Scipy's conjugate gradient.""" import scipy.sparse.linalg Y, m, n, d, matrix_in = _format_system(A, Y) damp = m * sigma**2 calcAA = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x G = scipy.sparse.linalg.LinearOperator( (n, n), matvec=calcAA, matmat=calcAA, dtype=A.dtype) B = np.dot(A.T, Y) X = np.zeros((n, d), dtype=B.dtype) infos = np.zeros(d, dtype='int') itns = np.zeros(d, dtype='int') for i in range(d): def callback(x): itns[i] += 1 # use the callback to count the number of iterations X[:, i], infos[i] = scipy.sparse.linalg.cg( G, B[:, i], tol=tol, callback=callback) info = {'rmses': _rmses(A, X, Y), 'iterations': itns, 'info': infos} return X if matrix_in else X.flatten(), info def lsmr_scipy(A, Y, sigma, tol=1e-4): """Solve the least-squares system using Scipy's LSMR.""" import scipy.sparse.linalg Y, m, n, d, matrix_in = _format_system(A, Y) damp = sigma * np.sqrt(m) X = np.zeros((n, d), dtype=Y.dtype) itns = np.zeros(d, dtype='int') for i in range(d): X[:, i], _, itns[i], _, _, _, _, _ = scipy.sparse.linalg.lsmr( A, Y[:, i], damp=damp, atol=tol, btol=tol) info = {'rmses': _rmses(A, X, Y), 'iterations': itns} return X if matrix_in else X.flatten(), info def _conjgrad_iters(calcAx, b, x, maxiters=None, rtol=1e-6): """Solve the single-RHS linear system using conjugate gradient.""" if maxiters is None: maxiters = b.shape[0] r = b - calcAx(x) p = r.copy() rsold = np.dot(r, r) for i in range(maxiters): Ap = calcAx(p) alpha = rsold / np.dot(p, Ap) x += alpha * p r -= alpha * Ap rsnew = np.dot(r, r) beta = rsnew / rsold if np.sqrt(rsnew) < rtol: break if beta < 1e-12: # no perceptible change in p break # p = r + beta*p p *= beta p += r rsold = rsnew return x, i+1 def conjgrad(A, Y, sigma, X0=None, maxiters=None, tol=1e-2): """Solve the least-squares system using conjugate gradient.""" Y, m, n, d, matrix_in = _format_system(A, Y) damp = m * sigma**2 rtol = tol * np.sqrt(m) G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x B = np.dot(A.T, Y) X = np.zeros((n, d)) if X0 is None else np.array(X0).reshape((n, d)) iters = -np.ones(d, dtype='int') for i in range(d): X[:, i], iters[i] = _conjgrad_iters( G, B[:, i], X[:, i], maxiters=maxiters, rtol=rtol) info = {'rmses': _rmses(A, X, Y), 'iterations': iters} return X if matrix_in else X.flatten(), info def block_conjgrad(A, Y, sigma, X0=None, tol=1e-2): """Solve a multiple-RHS least-squares system using block conjuate gradient. """ Y, m, n, d, matrix_in = _format_system(A, Y) sigma = np.asarray(sigma, dtype='float') sigma = sigma.reshape(sigma.size, 1) damp = m * sigma**2 rtol = tol * np.sqrt(m) G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x B = np.dot(A.T, Y) # --- conjugate gradient X = np.zeros((n, d)) if X0 is None else np.array(X0).reshape((n, d)) R = B - G(X) P = np.array(R) Rsold = np.dot(R.T, R) AP = np.zeros((n, d)) maxiters = int(n / d) for i in range(maxiters): AP = G(P) alpha = np.linalg.solve(np.dot(P.T, AP), Rsold) X += np.dot(P, alpha) R -= np.dot(AP, alpha) Rsnew = np.dot(R.T, R) if (np.diag(Rsnew) < rtol**2).all(): break beta = np.linalg.solve(Rsold, Rsnew) P = R + np.dot(P, beta) Rsold = Rsnew info = {'rmses': _rmses(A, X, Y), 'iterations': i + 1} return X if matrix_in else X.flatten(), info def _format_system(A, Y): m, n = A.shape matrix_in = Y.ndim > 1 d = Y.shape[1] if matrix_in else 1 Y = Y.reshape((Y.shape[0], d)) return Y, m, n, d, matrix_in class Solver(with_metaclass(DocstringInheritor)): """ Decoder or weight solver. """ def __call__(self, A, Y, rng=None, E=None): """Call the solver. Parameters ---------- A : array_like (M, N) Matrix of the N neurons' activities at the M evaluation points Y : array_like (M, D) Matrix of the target decoded values for each of the D dimensions, at each of the M evaluation points. rng : numpy.RandomState, optional A random number generator to use as required. If none is provided, numpy.random will be used. E : array_like (D, N2), optional Array of post-population encoders. Providing this tells the solver to return an array of connection weights rather than decoders. Returns ------- X : np.ndarray (N, D) or (N, N2) (N, D) array of decoders (if solver.weights == False) or (N, N2) array of weights (if solver.weights == True). info : dict A dictionary of information about the solve. All dictionaries have an 'rmses' key that contains RMS errors of the solve. Other keys are unique to particular solvers. """ raise NotImplementedError("Solvers must implement '__call__'") def mul_encoders(self, Y, E, copy=False): if self.weights: if E is None: raise ValueError("Encoders must be provided for weight solver") return np.dot(Y, E) else: if E is not None: raise ValueError("Encoders must be 'None' for decoder solver") return Y.copy() if copy else Y def __hash__(self): items = list(self.__dict__.items()) items.sort(key=lambda item: item[0]) hashes = [] for k, v in items: if isinstance(v, np.ndarray): if v.size < 1e5: a = v[:] a.setflags(write=False) hashes.append(hash(a)) else: raise ValueError("array is too large to hash") elif isinstance(v, collections.Iterable): hashes.append(hash(tuple(v))) elif isinstance(v, collections.Callable): hashes.append(hash(v.__code__)) else: hashes.append(hash(v)) return hash(tuple(hashes)) def __str__(self): return "%s(%s)" % ( self.__class__.__name__, ', '.join("%s=%s" % (k, v) for k, v in iteritems(self.__dict__))) class Lstsq(Solver): """Unregularized least-squares""" def __init__(self, weights=False, rcond=0.01): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. rcond : float, optional Cut-off ratio for small singular values (see `numpy.linalg.lstsq`). """ self.rcond = rcond self.weights = weights def __call__(self, A, Y, rng=None, E=None): Y = self.mul_encoders(Y, E) X, residuals2, rank, s = np.linalg.lstsq(A, Y, rcond=self.rcond) return X, {'rmses': _rmses(A, X, Y), 'residuals': np.sqrt(residuals2), 'rank': rank, 'singular_values': s} class _LstsqNoiseSolver(Solver): """Base for least-squares solvers with noise""" def __init__(self, weights=False, noise=0.1, solver=cholesky, **kwargs): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. noise : float, optional Amount of noise, as a fraction of the neuron activity. solver : callable, optional Subsolver to use for solving the least-squares problem. kwargs Additional arguments passed to `solver`. """ self.weights = weights self.noise = noise self.solver = solver self.kwargs = kwargs class LstsqNoise(_LstsqNoiseSolver): """Least-squares with additive Gaussian white noise.""" def __call__(self, A, Y, rng=None, E=None): rng = np.random if rng is None else rng sigma = self.noise * A.max() A = A + rng.normal(scale=sigma, size=A.shape) X, info = self.solver(A, Y, 0, **self.kwargs) return self.mul_encoders(X, E), info class LstsqMultNoise(_LstsqNoiseSolver): """Least-squares with multiplicative white noise.""" def __call__(self, A, Y, rng=None, E=None): rng = np.random if rng is None else rng A = A + rng.normal(scale=self.noise, size=A.shape) * A X, info = self.solver(A, Y, 0, **self.kwargs) return self.mul_encoders(X, E), info class _LstsqL2Solver(Solver): """Base for L2-regularized least-squares solvers""" def __init__(self, weights=False, reg=0.1, solver=cholesky, **kwargs): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. reg : float, optional Amount of regularization, as a fraction of the neuron activity. solver : callable, optional Subsolver to use for solving the least-squares problem. kwargs Additional arguments passed to `solver`. """ self.weights = weights self.reg = reg self.solver = solver self.kwargs = kwargs class LstsqL2(_LstsqL2Solver): """Least-squares with L2 regularization.""" def __call__(self, A, Y, rng=None, E=None): sigma = self.reg * A.max() X, info = self.solver(A, Y, sigma, **self.kwargs) return self.mul_encoders(X, E), info class
(_LstsqL2Solver): """Least-squares with L2 regularization on non-zero components.""" def __call__(self, A, Y, rng=None, E=None): # Compute the equivalent noise standard deviation. This equals the # base amplitude (noise_amp times the overall max activation) times # the square-root of the fraction of non-zero components. sigma = (self.reg * A.max()) * np.sqrt((A > 0).mean(axis=0)) # sigma == 0 means the neuron is never active, so won't be used, but # we have to make sigma != 0 for numeric reasons. sigma[sigma == 0] = sigma.max() X, info = self.solver(A, Y, sigma, **self.kwargs) return self.mul_encoders(X, E), info class LstsqL1(Solver): """Least-squares with L1 and L2 regularization (elastic net). This method is well suited for creating sparse decoders or weight matrices. """ def __init__(self, weights=False, l1=1e-4, l2=1e-6): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. l1 : float, optional Amount of L1 regularization. l2 : float, optional Amount of L2 regularization. """ import sklearn.linear_model # noqa F401, import to check existence assert sklearn.linear_model self.weights = weights self.l1 = l1 self.l2 = l2 def __call__(self, A, Y, rng=None, E=None): import sklearn.linear_model Y = self.mul_encoders(Y, E, copy=True) # copy since 'fit' may modify Y # TODO: play around with regularization constants (I just guessed). # Do we need to scale regularization by number of neurons, to get # same level of sparsity? esp. with weights? Currently, setting # l1=1e-3 works well with weights when connecting 1D populations # with 100 neurons each. a = self.l1 * A.max() # L1 regularization b = self.l2 * A.max()**2 # L2 regularization alpha = a + b l1_ratio = a / (a + b) # --- solve least-squares A * X = Y model = sklearn.linear_model.ElasticNet( alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False, max_iter=1000) model.fit(A, Y) X = model.coef_.T X.shape = (A.shape[1], Y.shape[1]) if Y.ndim > 1 else (A.shape[1],) infos = {'rmses': _rmses(A, X, Y)} return X, infos class LstsqDrop(Solver): """Find sparser decoders/weights by dropping small values. This solver first solves for coefficients (decoders/weights) with L2 regularization, drops those nearest to zero, and retrains remaining. """ def __init__(self, weights=False, drop=0.25, solver1=LstsqL2nz(reg=0.1), solver2=LstsqL2nz(reg=0.01)): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. drop : float, optional Fraction of decoders or weights to set to zero. solver1 : Solver, optional Solver for finding the initial decoders. solver2 : Solver, optional Used for re-solving for the decoders after dropout. """ self.weights = weights self.drop = drop self.solver1 = solver1 self.solver2 = solver2 def __call__(self, A, Y, rng=None, E=None): Y, m, n, d, matrix_in = _format_system(A, Y) # solve for coefficients using standard solver X, info0 = self.solver1(A, Y, rng=rng) X = self.mul_encoders(X, E) # drop weights close to zero, based on `drop` ratio Xabs = np.sort(np.abs(X.flat)) threshold = Xabs[int(np.round(self.drop * Xabs.size))] X[np.abs(X) < threshold] = 0 # retrain nonzero weights Y = self.mul_encoders(Y, E) for i in range(X.shape[1]): nonzero = X[:, i] != 0 if nonzero.sum() > 0: X[nonzero, i], info1 = self.solver2( A[:, nonzero], Y[:, i], rng=rng) info = {'rmses': _rmses(A, X, Y), 'info0': info0, 'info1': info1} return X if matrix_in else X.flatten(), info class Nnls(Solver): """Non-negative least-squares without regularization. Similar to `lstsq`, except the output values are non-negative. """ def __init__(self, weights=False): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. """ import scipy.optimize # import here too to throw error early assert scipy.optimize self.weights = weights def __call__(self, A, Y, rng=None, E=None): import scipy.optimize Y, m, n, d, matrix_in = _format_system(A, Y) Y = self.mul_encoders(Y, E) X = np.zeros((n, d)) residuals = np.zeros(d) for i in range(d): X[:, i], residuals[i] = scipy.optimize.nnls(A, Y[:, i]) info = {'rmses': _rmses(A, X, Y), 'residuals': residuals} return X if matrix_in else X.flatten(), info class NnlsL2(Nnls): """Non-negative least-squares with L2 regularization. Similar to `lstsq_L2`, except the output values are non-negative. """ def __init__(self, weights=False, reg=0.1): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. reg : float, optional Amount of regularization, as a fraction of the neuron activity. """ super(NnlsL2, self).__init__(weights) self.reg = reg def _solve(self, A, Y, rng, E, sigma): # form Gram matrix so we can add regularization GA = np.dot(A.T, A) GY = np.dot(A.T, Y) np.fill_diagonal(GA, GA.diagonal() + A.shape[0] * sigma**2) X, info = super(NnlsL2, self).__call__(GA, GY, rng=rng, E=E) # recompute the RMSE in terms of the original matrices info = {'rmses': _rmses(A, X, Y), 'gram_info': info} return X, info def __call__(self, A, Y, rng=None, E=None): return self._solve(A, Y, rng, E, sigma=self.reg * A.max()) class NnlsL2nz(NnlsL2): """Non-negative least-squares with L2 regularization on nonzero components. Similar to `lstsq_L2nz`, except the output values are non-negative. """ def __call__(self, A, Y, rng=None, E=None): sigma = (self.reg * A.max()) * np.sqrt((A > 0).mean(axis=0)) sigma[sigma == 0] = 1 return self._solve(A, Y, rng, E, sigma=sigma) class SolverParam(Parameter): def validate(self, instance, solver): if solver is not None and not isinstance(solver, Solver): raise ValueError("'%s' is not a solver" % solver) super(SolverParam, self).validate(instance, solver)
LstsqL2nz
identifier_name
solvers.py
""" Functions concerned with solving for decoders or full weight matrices. Many of the solvers in this file can solve for decoders or weight matrices, depending on whether the post-population encoders `E` are provided (see below). Solvers that are only intended to solve for either decoders or weights can remove the `E` parameter or make it manditory as they see fit. """ import collections import logging import numpy as np from nengo.params import Parameter import nengo.utils.numpy as npext from nengo.utils.compat import range, with_metaclass, iteritems from nengo.utils.magic import DocstringInheritor logger = logging.getLogger(__name__) def _rmses(A, X, Y): """Returns the root-mean-squared error (RMSE) of the solution X.""" return npext.rms(Y - np.dot(A, X), axis=0) def cholesky(A, y, sigma, transpose=None): """Solve the least-squares system using the Cholesky decomposition.""" m, n = A.shape if transpose is None: # transpose if matrix is fat, but not if we have sigmas for each neuron transpose = m < n and sigma.size == 1 if transpose: # substitution: x = A'*xbar, G*xbar = b where G = A*A' + lambda*I G = np.dot(A, A.T) b = y else: # multiplication by A': G*x = A'*b where G = A'*A + lambda*I G = np.dot(A.T, A) b = np.dot(A.T, y) # add L2 regularization term 'lambda' = m * sigma**2 np.fill_diagonal(G, G.diagonal() + m * sigma**2) try: import scipy.linalg factor = scipy.linalg.cho_factor(G, overwrite_a=True) x = scipy.linalg.cho_solve(factor, b) except ImportError: L = np.linalg.cholesky(G) L = np.linalg.inv(L.T) x = np.dot(L, np.dot(L.T, b)) x = np.dot(A.T, x) if transpose else x info = {'rmses': _rmses(A, x, y)} return x, info def conjgrad_scipy(A, Y, sigma, tol=1e-4): """Solve the least-squares system using Scipy's conjugate gradient.""" import scipy.sparse.linalg Y, m, n, d, matrix_in = _format_system(A, Y) damp = m * sigma**2 calcAA = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x G = scipy.sparse.linalg.LinearOperator( (n, n), matvec=calcAA, matmat=calcAA, dtype=A.dtype) B = np.dot(A.T, Y) X = np.zeros((n, d), dtype=B.dtype) infos = np.zeros(d, dtype='int') itns = np.zeros(d, dtype='int') for i in range(d): def callback(x): itns[i] += 1 # use the callback to count the number of iterations X[:, i], infos[i] = scipy.sparse.linalg.cg( G, B[:, i], tol=tol, callback=callback) info = {'rmses': _rmses(A, X, Y), 'iterations': itns, 'info': infos} return X if matrix_in else X.flatten(), info def lsmr_scipy(A, Y, sigma, tol=1e-4): """Solve the least-squares system using Scipy's LSMR.""" import scipy.sparse.linalg Y, m, n, d, matrix_in = _format_system(A, Y) damp = sigma * np.sqrt(m) X = np.zeros((n, d), dtype=Y.dtype) itns = np.zeros(d, dtype='int') for i in range(d): X[:, i], _, itns[i], _, _, _, _, _ = scipy.sparse.linalg.lsmr( A, Y[:, i], damp=damp, atol=tol, btol=tol) info = {'rmses': _rmses(A, X, Y), 'iterations': itns} return X if matrix_in else X.flatten(), info def _conjgrad_iters(calcAx, b, x, maxiters=None, rtol=1e-6): """Solve the single-RHS linear system using conjugate gradient.""" if maxiters is None: maxiters = b.shape[0] r = b - calcAx(x) p = r.copy() rsold = np.dot(r, r) for i in range(maxiters): Ap = calcAx(p) alpha = rsold / np.dot(p, Ap) x += alpha * p r -= alpha * Ap rsnew = np.dot(r, r) beta = rsnew / rsold if np.sqrt(rsnew) < rtol: break if beta < 1e-12: # no perceptible change in p break # p = r + beta*p p *= beta p += r rsold = rsnew return x, i+1 def conjgrad(A, Y, sigma, X0=None, maxiters=None, tol=1e-2): """Solve the least-squares system using conjugate gradient.""" Y, m, n, d, matrix_in = _format_system(A, Y) damp = m * sigma**2 rtol = tol * np.sqrt(m) G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x B = np.dot(A.T, Y) X = np.zeros((n, d)) if X0 is None else np.array(X0).reshape((n, d)) iters = -np.ones(d, dtype='int') for i in range(d): X[:, i], iters[i] = _conjgrad_iters( G, B[:, i], X[:, i], maxiters=maxiters, rtol=rtol) info = {'rmses': _rmses(A, X, Y), 'iterations': iters} return X if matrix_in else X.flatten(), info def block_conjgrad(A, Y, sigma, X0=None, tol=1e-2): """Solve a multiple-RHS least-squares system using block conjuate gradient. """ Y, m, n, d, matrix_in = _format_system(A, Y) sigma = np.asarray(sigma, dtype='float') sigma = sigma.reshape(sigma.size, 1) damp = m * sigma**2 rtol = tol * np.sqrt(m) G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x B = np.dot(A.T, Y) # --- conjugate gradient X = np.zeros((n, d)) if X0 is None else np.array(X0).reshape((n, d)) R = B - G(X) P = np.array(R) Rsold = np.dot(R.T, R) AP = np.zeros((n, d)) maxiters = int(n / d) for i in range(maxiters): AP = G(P) alpha = np.linalg.solve(np.dot(P.T, AP), Rsold) X += np.dot(P, alpha) R -= np.dot(AP, alpha) Rsnew = np.dot(R.T, R) if (np.diag(Rsnew) < rtol**2).all(): break beta = np.linalg.solve(Rsold, Rsnew) P = R + np.dot(P, beta) Rsold = Rsnew info = {'rmses': _rmses(A, X, Y), 'iterations': i + 1} return X if matrix_in else X.flatten(), info def _format_system(A, Y): m, n = A.shape matrix_in = Y.ndim > 1 d = Y.shape[1] if matrix_in else 1 Y = Y.reshape((Y.shape[0], d)) return Y, m, n, d, matrix_in class Solver(with_metaclass(DocstringInheritor)): """ Decoder or weight solver. """ def __call__(self, A, Y, rng=None, E=None): """Call the solver. Parameters ---------- A : array_like (M, N) Matrix of the N neurons' activities at the M evaluation points Y : array_like (M, D) Matrix of the target decoded values for each of the D dimensions, at each of the M evaluation points. rng : numpy.RandomState, optional A random number generator to use as required. If none is provided, numpy.random will be used. E : array_like (D, N2), optional Array of post-population encoders. Providing this tells the solver to return an array of connection weights rather than decoders. Returns ------- X : np.ndarray (N, D) or (N, N2) (N, D) array of decoders (if solver.weights == False) or (N, N2) array of weights (if solver.weights == True). info : dict A dictionary of information about the solve. All dictionaries have an 'rmses' key that contains RMS errors of the solve. Other keys are unique to particular solvers. """ raise NotImplementedError("Solvers must implement '__call__'") def mul_encoders(self, Y, E, copy=False): if self.weights: if E is None: raise ValueError("Encoders must be provided for weight solver") return np.dot(Y, E) else: if E is not None: raise ValueError("Encoders must be 'None' for decoder solver") return Y.copy() if copy else Y def __hash__(self): items = list(self.__dict__.items()) items.sort(key=lambda item: item[0]) hashes = [] for k, v in items: if isinstance(v, np.ndarray): if v.size < 1e5: a = v[:] a.setflags(write=False) hashes.append(hash(a)) else:
elif isinstance(v, collections.Iterable): hashes.append(hash(tuple(v))) elif isinstance(v, collections.Callable): hashes.append(hash(v.__code__)) else: hashes.append(hash(v)) return hash(tuple(hashes)) def __str__(self): return "%s(%s)" % ( self.__class__.__name__, ', '.join("%s=%s" % (k, v) for k, v in iteritems(self.__dict__))) class Lstsq(Solver): """Unregularized least-squares""" def __init__(self, weights=False, rcond=0.01): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. rcond : float, optional Cut-off ratio for small singular values (see `numpy.linalg.lstsq`). """ self.rcond = rcond self.weights = weights def __call__(self, A, Y, rng=None, E=None): Y = self.mul_encoders(Y, E) X, residuals2, rank, s = np.linalg.lstsq(A, Y, rcond=self.rcond) return X, {'rmses': _rmses(A, X, Y), 'residuals': np.sqrt(residuals2), 'rank': rank, 'singular_values': s} class _LstsqNoiseSolver(Solver): """Base for least-squares solvers with noise""" def __init__(self, weights=False, noise=0.1, solver=cholesky, **kwargs): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. noise : float, optional Amount of noise, as a fraction of the neuron activity. solver : callable, optional Subsolver to use for solving the least-squares problem. kwargs Additional arguments passed to `solver`. """ self.weights = weights self.noise = noise self.solver = solver self.kwargs = kwargs class LstsqNoise(_LstsqNoiseSolver): """Least-squares with additive Gaussian white noise.""" def __call__(self, A, Y, rng=None, E=None): rng = np.random if rng is None else rng sigma = self.noise * A.max() A = A + rng.normal(scale=sigma, size=A.shape) X, info = self.solver(A, Y, 0, **self.kwargs) return self.mul_encoders(X, E), info class LstsqMultNoise(_LstsqNoiseSolver): """Least-squares with multiplicative white noise.""" def __call__(self, A, Y, rng=None, E=None): rng = np.random if rng is None else rng A = A + rng.normal(scale=self.noise, size=A.shape) * A X, info = self.solver(A, Y, 0, **self.kwargs) return self.mul_encoders(X, E), info class _LstsqL2Solver(Solver): """Base for L2-regularized least-squares solvers""" def __init__(self, weights=False, reg=0.1, solver=cholesky, **kwargs): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. reg : float, optional Amount of regularization, as a fraction of the neuron activity. solver : callable, optional Subsolver to use for solving the least-squares problem. kwargs Additional arguments passed to `solver`. """ self.weights = weights self.reg = reg self.solver = solver self.kwargs = kwargs class LstsqL2(_LstsqL2Solver): """Least-squares with L2 regularization.""" def __call__(self, A, Y, rng=None, E=None): sigma = self.reg * A.max() X, info = self.solver(A, Y, sigma, **self.kwargs) return self.mul_encoders(X, E), info class LstsqL2nz(_LstsqL2Solver): """Least-squares with L2 regularization on non-zero components.""" def __call__(self, A, Y, rng=None, E=None): # Compute the equivalent noise standard deviation. This equals the # base amplitude (noise_amp times the overall max activation) times # the square-root of the fraction of non-zero components. sigma = (self.reg * A.max()) * np.sqrt((A > 0).mean(axis=0)) # sigma == 0 means the neuron is never active, so won't be used, but # we have to make sigma != 0 for numeric reasons. sigma[sigma == 0] = sigma.max() X, info = self.solver(A, Y, sigma, **self.kwargs) return self.mul_encoders(X, E), info class LstsqL1(Solver): """Least-squares with L1 and L2 regularization (elastic net). This method is well suited for creating sparse decoders or weight matrices. """ def __init__(self, weights=False, l1=1e-4, l2=1e-6): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. l1 : float, optional Amount of L1 regularization. l2 : float, optional Amount of L2 regularization. """ import sklearn.linear_model # noqa F401, import to check existence assert sklearn.linear_model self.weights = weights self.l1 = l1 self.l2 = l2 def __call__(self, A, Y, rng=None, E=None): import sklearn.linear_model Y = self.mul_encoders(Y, E, copy=True) # copy since 'fit' may modify Y # TODO: play around with regularization constants (I just guessed). # Do we need to scale regularization by number of neurons, to get # same level of sparsity? esp. with weights? Currently, setting # l1=1e-3 works well with weights when connecting 1D populations # with 100 neurons each. a = self.l1 * A.max() # L1 regularization b = self.l2 * A.max()**2 # L2 regularization alpha = a + b l1_ratio = a / (a + b) # --- solve least-squares A * X = Y model = sklearn.linear_model.ElasticNet( alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False, max_iter=1000) model.fit(A, Y) X = model.coef_.T X.shape = (A.shape[1], Y.shape[1]) if Y.ndim > 1 else (A.shape[1],) infos = {'rmses': _rmses(A, X, Y)} return X, infos class LstsqDrop(Solver): """Find sparser decoders/weights by dropping small values. This solver first solves for coefficients (decoders/weights) with L2 regularization, drops those nearest to zero, and retrains remaining. """ def __init__(self, weights=False, drop=0.25, solver1=LstsqL2nz(reg=0.1), solver2=LstsqL2nz(reg=0.01)): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. drop : float, optional Fraction of decoders or weights to set to zero. solver1 : Solver, optional Solver for finding the initial decoders. solver2 : Solver, optional Used for re-solving for the decoders after dropout. """ self.weights = weights self.drop = drop self.solver1 = solver1 self.solver2 = solver2 def __call__(self, A, Y, rng=None, E=None): Y, m, n, d, matrix_in = _format_system(A, Y) # solve for coefficients using standard solver X, info0 = self.solver1(A, Y, rng=rng) X = self.mul_encoders(X, E) # drop weights close to zero, based on `drop` ratio Xabs = np.sort(np.abs(X.flat)) threshold = Xabs[int(np.round(self.drop * Xabs.size))] X[np.abs(X) < threshold] = 0 # retrain nonzero weights Y = self.mul_encoders(Y, E) for i in range(X.shape[1]): nonzero = X[:, i] != 0 if nonzero.sum() > 0: X[nonzero, i], info1 = self.solver2( A[:, nonzero], Y[:, i], rng=rng) info = {'rmses': _rmses(A, X, Y), 'info0': info0, 'info1': info1} return X if matrix_in else X.flatten(), info class Nnls(Solver): """Non-negative least-squares without regularization. Similar to `lstsq`, except the output values are non-negative. """ def __init__(self, weights=False): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. """ import scipy.optimize # import here too to throw error early assert scipy.optimize self.weights = weights def __call__(self, A, Y, rng=None, E=None): import scipy.optimize Y, m, n, d, matrix_in = _format_system(A, Y) Y = self.mul_encoders(Y, E) X = np.zeros((n, d)) residuals = np.zeros(d) for i in range(d): X[:, i], residuals[i] = scipy.optimize.nnls(A, Y[:, i]) info = {'rmses': _rmses(A, X, Y), 'residuals': residuals} return X if matrix_in else X.flatten(), info class NnlsL2(Nnls): """Non-negative least-squares with L2 regularization. Similar to `lstsq_L2`, except the output values are non-negative. """ def __init__(self, weights=False, reg=0.1): """ weights : boolean, optional If false solve for decoders (default), otherwise solve for weights. reg : float, optional Amount of regularization, as a fraction of the neuron activity. """ super(NnlsL2, self).__init__(weights) self.reg = reg def _solve(self, A, Y, rng, E, sigma): # form Gram matrix so we can add regularization GA = np.dot(A.T, A) GY = np.dot(A.T, Y) np.fill_diagonal(GA, GA.diagonal() + A.shape[0] * sigma**2) X, info = super(NnlsL2, self).__call__(GA, GY, rng=rng, E=E) # recompute the RMSE in terms of the original matrices info = {'rmses': _rmses(A, X, Y), 'gram_info': info} return X, info def __call__(self, A, Y, rng=None, E=None): return self._solve(A, Y, rng, E, sigma=self.reg * A.max()) class NnlsL2nz(NnlsL2): """Non-negative least-squares with L2 regularization on nonzero components. Similar to `lstsq_L2nz`, except the output values are non-negative. """ def __call__(self, A, Y, rng=None, E=None): sigma = (self.reg * A.max()) * np.sqrt((A > 0).mean(axis=0)) sigma[sigma == 0] = 1 return self._solve(A, Y, rng, E, sigma=sigma) class SolverParam(Parameter): def validate(self, instance, solver): if solver is not None and not isinstance(solver, Solver): raise ValueError("'%s' is not a solver" % solver) super(SolverParam, self).validate(instance, solver)
raise ValueError("array is too large to hash")
conditional_block
models.py
from django.db import models from django.db import transaction from django.shortcuts import get_object_or_404 import datetime import time from django.utils import timezone import backoff import dramatiq from django.utils.encoding import smart_text as smart_unicode from django.utils.translation import ugettext_lazy as _ from rss_feeder_api.constants import ENTRY_UNREAD, ENTRY_READ from rss_feeder_api import managers from django.core.validators import URLValidator import feedparser from rss_feeder.settings import MAX_FEED_UPDATE_RETRIES import json def backoff_hdlr(details): print ("Backing off {wait:0.1f} seconds afters {tries} tries " "calling function {target} with args {args} and kwargs " "{kwargs}".format(**details)) feed = details['args'][0] wait = details['wait'] notification = Notification(feed=feed, owner=feed.owner, title='BackOff', message=f'Feed: {feed.id}, {feed.link} failed to update, retrying in {wait:0.1f}', is_error=True) notification.save() @dramatiq.actor def feed_update_failure(message_data, exception_data): """ A dramatiq callback on each failed attempt for a feed update the user will notified by inserting a notification in the db only on the final failure TODO: log all errors to somewhere for metrics and analysis """ feed_id = message_data['args'][0] feed = Feed.objects.get(pk=feed_id) # mark feed as failed to update and stop updateing it automatically feed.flagged = True feed.save() notification = Notification(feed=feed, owner=feed.owner, title=exception_data['type'], message=exception_data['message']+f'[Feed: {feed.id}, {feed.link}]', is_error=True) notification.save() print("dramatiq callback: feed update error") @dramatiq.actor def feed_update_success(message_data, result): """ A dramatiq callback on successful attempt for a feed update the user will notified by inserting a notification in the db marks the feed as not flagged TODO ??maybe log this also for checking failure/success rates? """ feed_id = message_data['args'][0] feed = Feed.objects.get(pk=feed_id) feed.flagged = False feed.save() notification = Notification(feed=feed, owner=feed.owner, title='FeedUpdated', message=f'Feed: {feed.id}, {feed.link}, {feed.updated_at}]', is_error=False) notification.save() print("dramatiq callback: : feed update success") # Exceptions ################################################# class FeedError(Exception): """ An error occurred when fetching the feed If it was parsed despite the error, the feed and entries will be available: e.feed None if not parsed e.entries Empty list if not parsed """ def __init__(self, *args, **kwargs): super(FeedError, self).__init__(*args, **kwargs) # End: Exceptions ################################################# # Feed ################################################# class Feed(models.Model): ''' The feeds model describes a registered field. Its contains feed related information as well as user related info and other meta data ''' link = models.URLField(max_length = 200) title = models.CharField(max_length=200, null=True) subtitle = models.CharField(max_length=200, null=True) description = models.TextField(null=True) language = models.CharField(max_length=5, null=True) copyright = models.CharField(max_length=50, null=True) ttl = models.PositiveIntegerField(null=True) atomLogo = models.URLField(max_length = 200, null=True) pubdate = models.DateTimeField(null=True) nickname = models.CharField(max_length=60) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) following = models.BooleanField(default=True) flagged = models.BooleanField(default=False) owner = models.ForeignKey('auth.User', related_name='feeds', on_delete=models.CASCADE) class Meta: verbose_name = ("Feed") verbose_name_plural = ("Feeds") ordering = ('-updated_at',) unique_together = ('link', 'owner') objects = managers.FeedManager() def __str__(self): return f'Nickname: {self.nickname}' def save(self, *args, **kwargs): # assure minimum required fields assert self.link assert self.nickname super(Feed, self).save(*args, **kwargs) assert self.id > 0 return def force_update(self, *args, **kwargs): ''' force updates a feed using a async call to the _updateFeed method ''' print(f'Forcing update [Feed ID: {self.id}], Nickname: {self.nickname}] ...') self._updateFeed.send_with_options(args=(self.id,), on_failure=feed_update_failure, on_success=feed_update_success) return @backoff.on_exception(backoff.expo, FeedError, max_tries=MAX_FEED_UPDATE_RETRIES, on_backoff=backoff_hdlr) def _fetch_feed(self): ''' internal method to get feed details from the link provided in self returns raw feed and entry details as returned by the feedparser library ''' # Request and parse the feed link = self.link d = feedparser.parse(link) status = d.get('status', 200) feed = d.get('feed', None) entries = d.get('entries', None) if status in (200, 302, 304, 307): if ( feed is None or 'title' not in feed or 'link' not in feed ): raise FeedError('Feed parsed but with invalid contents') return feed, entries if status in (404, 500, 502, 503, 504): raise FeedError('Temporary error %s' % status) # Follow permanent redirection if status == 301: # Avoid circular redirection self.link = d.get('href', self.link) return self._fetch_feed() if status == 410: raise FeedError('Feed has gone') # Unknown status raise FeedError('Unrecognised HTTP status %s' % status) @dramatiq.actor(max_retries=0, max_age=10000)#, throws=FeedError) @transaction.atomic def _updateFeed(pk): """ An internal function that fetches a feed and parses it into the Feed object for the DB """ feed = get_object_or_404(Feed, pk=pk) rawFeed, entries = feed._fetch_feed() feed.title = rawFeed.get('title', None) feed.subtitle = rawFeed.get('subtitle', None) feed.copyright = rawFeed.get('rights', None) feed.ttl = rawFeed.get('ttl', None) feed.atomLogo = rawFeed.get('logo', None) # Try to find the updated time updated = rawFeed.get( 'updated_parsed', rawFeed.get('published_parsed', None), ) if updated: updated = datetime.datetime.fromtimestamp( time.mktime(updated) ) feed.pubdate = updated super(Feed, feed).save() if entries: dbEntriesCreate = [] dbEntriesupdate = [] for raw_entry in entries: entry = Entry.objects.parseFromFeed(raw_entry) entry.feed = feed try: newEntry = Entry.objects.get(guid=entry.guid, feed=feed) except: newEntry = None if newEntry: # if it was updated, then mark it as unread, otherwise no need to do anything if newEntry.date > entry.date: entry.state = ENTRY_UNREAD id = newEntry.id newEntry = entry newEntry.id = id dbEntriesupdate.append(newEntry) else:
with transaction.atomic(): if len(dbEntriesCreate)>0: Entry.objects.bulk_create(dbEntriesCreate) if len(dbEntriesupdate)>0: fields = ['feed', 'state', 'title' , 'content', 'date', 'author', 'url' ,'comments_url'] Entry.objects.bulk_update(dbEntriesupdate, fields) return # Enrty ################################################# class Entry(models.Model): """ Represents a feed entry object If creating from a feedparser entry, use Entry.objects.parseFromFeed() """ feed = models.ForeignKey(Feed, related_name='feed', on_delete=models.CASCADE) state = models.IntegerField(default=ENTRY_UNREAD, choices=( (ENTRY_UNREAD, 'Unread'), (ENTRY_READ, 'Read'), )) # Compulsory data fields title = models.TextField(blank=True) content = models.TextField(blank=True) date = models.DateTimeField( help_text="When this entry says it was published", ) # Optional data fields author = models.TextField(blank=True) url = models.TextField( blank=True, validators=[URLValidator()], help_text="URL for the HTML for this entry", ) comments_url = models.TextField( blank=True, validators=[URLValidator()], help_text="URL for HTML comment submission page", ) guid = models.TextField( blank=True, help_text="GUID for the entry, according to the feed", ) last_updated = models.DateTimeField(auto_now=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) objects = managers.EntryManager() def __unicode__(self): return self.title def save(self, *args, **kwargs): # Default the date if self.date is None: self.date = datetime.datetime.now() # Save super(Entry, self).save(*args, **kwargs) class Meta: ordering = ('-updated_at',) verbose_name_plural = 'entries' # two users can have the same feed but one migh force update and the other # wants to keep old version, so make it unique even though it make entries redundant unique_together = ['feed', 'guid'] # Notification class Notification(models.Model): ''' Notifications for users. Currently used for feed update success/failure events ''' owner = models.ForeignKey('auth.User', on_delete=models.CASCADE) feed = models.ForeignKey(Feed, on_delete=models.CASCADE) state = models.IntegerField(default=ENTRY_UNREAD, choices=( (ENTRY_UNREAD, 'Unread'), (ENTRY_READ, 'Read'), )) title = models.CharField(max_length=200, null=True) message = models.CharField(max_length=200, null=True) is_error = models.BooleanField(default=False) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) class Meta: verbose_name = ("Notification") verbose_name_plural = ("Notifications") ordering = ('-updated_at',) def __unicode__(self): return self.title def __str__(self): return f'feed: {self.feed}, owner: {self.owner}'
dbEntriesCreate.append(entry)
conditional_block
models.py
from django.db import models from django.db import transaction from django.shortcuts import get_object_or_404 import datetime import time from django.utils import timezone import backoff import dramatiq from django.utils.encoding import smart_text as smart_unicode from django.utils.translation import ugettext_lazy as _ from rss_feeder_api.constants import ENTRY_UNREAD, ENTRY_READ from rss_feeder_api import managers from django.core.validators import URLValidator import feedparser from rss_feeder.settings import MAX_FEED_UPDATE_RETRIES import json def backoff_hdlr(details): print ("Backing off {wait:0.1f} seconds afters {tries} tries " "calling function {target} with args {args} and kwargs " "{kwargs}".format(**details)) feed = details['args'][0] wait = details['wait'] notification = Notification(feed=feed, owner=feed.owner, title='BackOff', message=f'Feed: {feed.id}, {feed.link} failed to update, retrying in {wait:0.1f}', is_error=True) notification.save() @dramatiq.actor def feed_update_failure(message_data, exception_data): """ A dramatiq callback on each failed attempt for a feed update the user will notified by inserting a notification in the db only on the final failure TODO: log all errors to somewhere for metrics and analysis """ feed_id = message_data['args'][0] feed = Feed.objects.get(pk=feed_id) # mark feed as failed to update and stop updateing it automatically feed.flagged = True feed.save() notification = Notification(feed=feed, owner=feed.owner, title=exception_data['type'], message=exception_data['message']+f'[Feed: {feed.id}, {feed.link}]', is_error=True) notification.save() print("dramatiq callback: feed update error") @dramatiq.actor def feed_update_success(message_data, result): """ A dramatiq callback on successful attempt for a feed update the user will notified by inserting a notification in the db marks the feed as not flagged TODO ??maybe log this also for checking failure/success rates? """ feed_id = message_data['args'][0] feed = Feed.objects.get(pk=feed_id) feed.flagged = False feed.save() notification = Notification(feed=feed, owner=feed.owner, title='FeedUpdated', message=f'Feed: {feed.id}, {feed.link}, {feed.updated_at}]', is_error=False) notification.save() print("dramatiq callback: : feed update success") # Exceptions ################################################# class FeedError(Exception): """ An error occurred when fetching the feed If it was parsed despite the error, the feed and entries will be available: e.feed None if not parsed e.entries Empty list if not parsed """ def __init__(self, *args, **kwargs): super(FeedError, self).__init__(*args, **kwargs) # End: Exceptions ################################################# # Feed ################################################# class Feed(models.Model): ''' The feeds model describes a registered field. Its contains feed related information as well as user related info and other meta data ''' link = models.URLField(max_length = 200) title = models.CharField(max_length=200, null=True) subtitle = models.CharField(max_length=200, null=True) description = models.TextField(null=True) language = models.CharField(max_length=5, null=True) copyright = models.CharField(max_length=50, null=True) ttl = models.PositiveIntegerField(null=True) atomLogo = models.URLField(max_length = 200, null=True) pubdate = models.DateTimeField(null=True) nickname = models.CharField(max_length=60) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) following = models.BooleanField(default=True) flagged = models.BooleanField(default=False) owner = models.ForeignKey('auth.User', related_name='feeds', on_delete=models.CASCADE) class Meta: verbose_name = ("Feed") verbose_name_plural = ("Feeds") ordering = ('-updated_at',) unique_together = ('link', 'owner') objects = managers.FeedManager() def __str__(self): return f'Nickname: {self.nickname}' def save(self, *args, **kwargs): # assure minimum required fields assert self.link assert self.nickname super(Feed, self).save(*args, **kwargs) assert self.id > 0 return def force_update(self, *args, **kwargs): ''' force updates a feed using a async call to the _updateFeed method ''' print(f'Forcing update [Feed ID: {self.id}], Nickname: {self.nickname}] ...') self._updateFeed.send_with_options(args=(self.id,), on_failure=feed_update_failure, on_success=feed_update_success) return @backoff.on_exception(backoff.expo, FeedError, max_tries=MAX_FEED_UPDATE_RETRIES, on_backoff=backoff_hdlr) def _fetch_feed(self): ''' internal method to get feed details from the link provided in self returns raw feed and entry details as returned by the feedparser library ''' # Request and parse the feed link = self.link d = feedparser.parse(link) status = d.get('status', 200) feed = d.get('feed', None) entries = d.get('entries', None) if status in (200, 302, 304, 307): if ( feed is None or 'title' not in feed or 'link' not in feed ): raise FeedError('Feed parsed but with invalid contents') return feed, entries if status in (404, 500, 502, 503, 504): raise FeedError('Temporary error %s' % status) # Follow permanent redirection if status == 301: # Avoid circular redirection self.link = d.get('href', self.link) return self._fetch_feed() if status == 410: raise FeedError('Feed has gone') # Unknown status raise FeedError('Unrecognised HTTP status %s' % status) @dramatiq.actor(max_retries=0, max_age=10000)#, throws=FeedError) @transaction.atomic def _updateFeed(pk): """ An internal function that fetches a feed and parses it into the Feed object for the DB """ feed = get_object_or_404(Feed, pk=pk) rawFeed, entries = feed._fetch_feed() feed.title = rawFeed.get('title', None) feed.subtitle = rawFeed.get('subtitle', None) feed.copyright = rawFeed.get('rights', None) feed.ttl = rawFeed.get('ttl', None) feed.atomLogo = rawFeed.get('logo', None) # Try to find the updated time updated = rawFeed.get( 'updated_parsed', rawFeed.get('published_parsed', None), ) if updated: updated = datetime.datetime.fromtimestamp( time.mktime(updated) ) feed.pubdate = updated super(Feed, feed).save() if entries: dbEntriesCreate = [] dbEntriesupdate = [] for raw_entry in entries: entry = Entry.objects.parseFromFeed(raw_entry) entry.feed = feed try: newEntry = Entry.objects.get(guid=entry.guid, feed=feed) except: newEntry = None if newEntry: # if it was updated, then mark it as unread, otherwise no need to do anything if newEntry.date > entry.date: entry.state = ENTRY_UNREAD id = newEntry.id newEntry = entry newEntry.id = id dbEntriesupdate.append(newEntry) else: dbEntriesCreate.append(entry) with transaction.atomic(): if len(dbEntriesCreate)>0: Entry.objects.bulk_create(dbEntriesCreate) if len(dbEntriesupdate)>0: fields = ['feed', 'state', 'title' , 'content', 'date', 'author', 'url' ,'comments_url'] Entry.objects.bulk_update(dbEntriesupdate, fields) return # Enrty ################################################# class Entry(models.Model): """ Represents a feed entry object If creating from a feedparser entry, use Entry.objects.parseFromFeed() """ feed = models.ForeignKey(Feed, related_name='feed', on_delete=models.CASCADE) state = models.IntegerField(default=ENTRY_UNREAD, choices=( (ENTRY_UNREAD, 'Unread'), (ENTRY_READ, 'Read'), )) # Compulsory data fields title = models.TextField(blank=True) content = models.TextField(blank=True) date = models.DateTimeField( help_text="When this entry says it was published", ) # Optional data fields author = models.TextField(blank=True) url = models.TextField(
) comments_url = models.TextField( blank=True, validators=[URLValidator()], help_text="URL for HTML comment submission page", ) guid = models.TextField( blank=True, help_text="GUID for the entry, according to the feed", ) last_updated = models.DateTimeField(auto_now=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) objects = managers.EntryManager() def __unicode__(self): return self.title def save(self, *args, **kwargs): # Default the date if self.date is None: self.date = datetime.datetime.now() # Save super(Entry, self).save(*args, **kwargs) class Meta: ordering = ('-updated_at',) verbose_name_plural = 'entries' # two users can have the same feed but one migh force update and the other # wants to keep old version, so make it unique even though it make entries redundant unique_together = ['feed', 'guid'] # Notification class Notification(models.Model): ''' Notifications for users. Currently used for feed update success/failure events ''' owner = models.ForeignKey('auth.User', on_delete=models.CASCADE) feed = models.ForeignKey(Feed, on_delete=models.CASCADE) state = models.IntegerField(default=ENTRY_UNREAD, choices=( (ENTRY_UNREAD, 'Unread'), (ENTRY_READ, 'Read'), )) title = models.CharField(max_length=200, null=True) message = models.CharField(max_length=200, null=True) is_error = models.BooleanField(default=False) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) class Meta: verbose_name = ("Notification") verbose_name_plural = ("Notifications") ordering = ('-updated_at',) def __unicode__(self): return self.title def __str__(self): return f'feed: {self.feed}, owner: {self.owner}'
blank=True, validators=[URLValidator()], help_text="URL for the HTML for this entry",
random_line_split
models.py
from django.db import models from django.db import transaction from django.shortcuts import get_object_or_404 import datetime import time from django.utils import timezone import backoff import dramatiq from django.utils.encoding import smart_text as smart_unicode from django.utils.translation import ugettext_lazy as _ from rss_feeder_api.constants import ENTRY_UNREAD, ENTRY_READ from rss_feeder_api import managers from django.core.validators import URLValidator import feedparser from rss_feeder.settings import MAX_FEED_UPDATE_RETRIES import json def backoff_hdlr(details): print ("Backing off {wait:0.1f} seconds afters {tries} tries " "calling function {target} with args {args} and kwargs " "{kwargs}".format(**details)) feed = details['args'][0] wait = details['wait'] notification = Notification(feed=feed, owner=feed.owner, title='BackOff', message=f'Feed: {feed.id}, {feed.link} failed to update, retrying in {wait:0.1f}', is_error=True) notification.save() @dramatiq.actor def feed_update_failure(message_data, exception_data): """ A dramatiq callback on each failed attempt for a feed update the user will notified by inserting a notification in the db only on the final failure TODO: log all errors to somewhere for metrics and analysis """ feed_id = message_data['args'][0] feed = Feed.objects.get(pk=feed_id) # mark feed as failed to update and stop updateing it automatically feed.flagged = True feed.save() notification = Notification(feed=feed, owner=feed.owner, title=exception_data['type'], message=exception_data['message']+f'[Feed: {feed.id}, {feed.link}]', is_error=True) notification.save() print("dramatiq callback: feed update error") @dramatiq.actor def feed_update_success(message_data, result): """ A dramatiq callback on successful attempt for a feed update the user will notified by inserting a notification in the db marks the feed as not flagged TODO ??maybe log this also for checking failure/success rates? """ feed_id = message_data['args'][0] feed = Feed.objects.get(pk=feed_id) feed.flagged = False feed.save() notification = Notification(feed=feed, owner=feed.owner, title='FeedUpdated', message=f'Feed: {feed.id}, {feed.link}, {feed.updated_at}]', is_error=False) notification.save() print("dramatiq callback: : feed update success") # Exceptions ################################################# class FeedError(Exception): """ An error occurred when fetching the feed If it was parsed despite the error, the feed and entries will be available: e.feed None if not parsed e.entries Empty list if not parsed """ def __init__(self, *args, **kwargs): super(FeedError, self).__init__(*args, **kwargs) # End: Exceptions ################################################# # Feed ################################################# class Feed(models.Model): ''' The feeds model describes a registered field. Its contains feed related information as well as user related info and other meta data ''' link = models.URLField(max_length = 200) title = models.CharField(max_length=200, null=True) subtitle = models.CharField(max_length=200, null=True) description = models.TextField(null=True) language = models.CharField(max_length=5, null=True) copyright = models.CharField(max_length=50, null=True) ttl = models.PositiveIntegerField(null=True) atomLogo = models.URLField(max_length = 200, null=True) pubdate = models.DateTimeField(null=True) nickname = models.CharField(max_length=60) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) following = models.BooleanField(default=True) flagged = models.BooleanField(default=False) owner = models.ForeignKey('auth.User', related_name='feeds', on_delete=models.CASCADE) class Meta: verbose_name = ("Feed") verbose_name_plural = ("Feeds") ordering = ('-updated_at',) unique_together = ('link', 'owner') objects = managers.FeedManager() def __str__(self): return f'Nickname: {self.nickname}' def save(self, *args, **kwargs): # assure minimum required fields assert self.link assert self.nickname super(Feed, self).save(*args, **kwargs) assert self.id > 0 return def force_update(self, *args, **kwargs): ''' force updates a feed using a async call to the _updateFeed method ''' print(f'Forcing update [Feed ID: {self.id}], Nickname: {self.nickname}] ...') self._updateFeed.send_with_options(args=(self.id,), on_failure=feed_update_failure, on_success=feed_update_success) return @backoff.on_exception(backoff.expo, FeedError, max_tries=MAX_FEED_UPDATE_RETRIES, on_backoff=backoff_hdlr) def _fetch_feed(self): ''' internal method to get feed details from the link provided in self returns raw feed and entry details as returned by the feedparser library ''' # Request and parse the feed link = self.link d = feedparser.parse(link) status = d.get('status', 200) feed = d.get('feed', None) entries = d.get('entries', None) if status in (200, 302, 304, 307): if ( feed is None or 'title' not in feed or 'link' not in feed ): raise FeedError('Feed parsed but with invalid contents') return feed, entries if status in (404, 500, 502, 503, 504): raise FeedError('Temporary error %s' % status) # Follow permanent redirection if status == 301: # Avoid circular redirection self.link = d.get('href', self.link) return self._fetch_feed() if status == 410: raise FeedError('Feed has gone') # Unknown status raise FeedError('Unrecognised HTTP status %s' % status) @dramatiq.actor(max_retries=0, max_age=10000)#, throws=FeedError) @transaction.atomic def _updateFeed(pk): """ An internal function that fetches a feed and parses it into the Feed object for the DB """ feed = get_object_or_404(Feed, pk=pk) rawFeed, entries = feed._fetch_feed() feed.title = rawFeed.get('title', None) feed.subtitle = rawFeed.get('subtitle', None) feed.copyright = rawFeed.get('rights', None) feed.ttl = rawFeed.get('ttl', None) feed.atomLogo = rawFeed.get('logo', None) # Try to find the updated time updated = rawFeed.get( 'updated_parsed', rawFeed.get('published_parsed', None), ) if updated: updated = datetime.datetime.fromtimestamp( time.mktime(updated) ) feed.pubdate = updated super(Feed, feed).save() if entries: dbEntriesCreate = [] dbEntriesupdate = [] for raw_entry in entries: entry = Entry.objects.parseFromFeed(raw_entry) entry.feed = feed try: newEntry = Entry.objects.get(guid=entry.guid, feed=feed) except: newEntry = None if newEntry: # if it was updated, then mark it as unread, otherwise no need to do anything if newEntry.date > entry.date: entry.state = ENTRY_UNREAD id = newEntry.id newEntry = entry newEntry.id = id dbEntriesupdate.append(newEntry) else: dbEntriesCreate.append(entry) with transaction.atomic(): if len(dbEntriesCreate)>0: Entry.objects.bulk_create(dbEntriesCreate) if len(dbEntriesupdate)>0: fields = ['feed', 'state', 'title' , 'content', 'date', 'author', 'url' ,'comments_url'] Entry.objects.bulk_update(dbEntriesupdate, fields) return # Enrty ################################################# class Entry(models.Model): """ Represents a feed entry object If creating from a feedparser entry, use Entry.objects.parseFromFeed() """ feed = models.ForeignKey(Feed, related_name='feed', on_delete=models.CASCADE) state = models.IntegerField(default=ENTRY_UNREAD, choices=( (ENTRY_UNREAD, 'Unread'), (ENTRY_READ, 'Read'), )) # Compulsory data fields title = models.TextField(blank=True) content = models.TextField(blank=True) date = models.DateTimeField( help_text="When this entry says it was published", ) # Optional data fields author = models.TextField(blank=True) url = models.TextField( blank=True, validators=[URLValidator()], help_text="URL for the HTML for this entry", ) comments_url = models.TextField( blank=True, validators=[URLValidator()], help_text="URL for HTML comment submission page", ) guid = models.TextField( blank=True, help_text="GUID for the entry, according to the feed", ) last_updated = models.DateTimeField(auto_now=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) objects = managers.EntryManager() def __unicode__(self): return self.title def save(self, *args, **kwargs): # Default the date if self.date is None: self.date = datetime.datetime.now() # Save super(Entry, self).save(*args, **kwargs) class Meta: ordering = ('-updated_at',) verbose_name_plural = 'entries' # two users can have the same feed but one migh force update and the other # wants to keep old version, so make it unique even though it make entries redundant unique_together = ['feed', 'guid'] # Notification class
(models.Model): ''' Notifications for users. Currently used for feed update success/failure events ''' owner = models.ForeignKey('auth.User', on_delete=models.CASCADE) feed = models.ForeignKey(Feed, on_delete=models.CASCADE) state = models.IntegerField(default=ENTRY_UNREAD, choices=( (ENTRY_UNREAD, 'Unread'), (ENTRY_READ, 'Read'), )) title = models.CharField(max_length=200, null=True) message = models.CharField(max_length=200, null=True) is_error = models.BooleanField(default=False) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) class Meta: verbose_name = ("Notification") verbose_name_plural = ("Notifications") ordering = ('-updated_at',) def __unicode__(self): return self.title def __str__(self): return f'feed: {self.feed}, owner: {self.owner}'
Notification
identifier_name
models.py
from django.db import models from django.db import transaction from django.shortcuts import get_object_or_404 import datetime import time from django.utils import timezone import backoff import dramatiq from django.utils.encoding import smart_text as smart_unicode from django.utils.translation import ugettext_lazy as _ from rss_feeder_api.constants import ENTRY_UNREAD, ENTRY_READ from rss_feeder_api import managers from django.core.validators import URLValidator import feedparser from rss_feeder.settings import MAX_FEED_UPDATE_RETRIES import json def backoff_hdlr(details): print ("Backing off {wait:0.1f} seconds afters {tries} tries " "calling function {target} with args {args} and kwargs " "{kwargs}".format(**details)) feed = details['args'][0] wait = details['wait'] notification = Notification(feed=feed, owner=feed.owner, title='BackOff', message=f'Feed: {feed.id}, {feed.link} failed to update, retrying in {wait:0.1f}', is_error=True) notification.save() @dramatiq.actor def feed_update_failure(message_data, exception_data): """ A dramatiq callback on each failed attempt for a feed update the user will notified by inserting a notification in the db only on the final failure TODO: log all errors to somewhere for metrics and analysis """ feed_id = message_data['args'][0] feed = Feed.objects.get(pk=feed_id) # mark feed as failed to update and stop updateing it automatically feed.flagged = True feed.save() notification = Notification(feed=feed, owner=feed.owner, title=exception_data['type'], message=exception_data['message']+f'[Feed: {feed.id}, {feed.link}]', is_error=True) notification.save() print("dramatiq callback: feed update error") @dramatiq.actor def feed_update_success(message_data, result): """ A dramatiq callback on successful attempt for a feed update the user will notified by inserting a notification in the db marks the feed as not flagged TODO ??maybe log this also for checking failure/success rates? """ feed_id = message_data['args'][0] feed = Feed.objects.get(pk=feed_id) feed.flagged = False feed.save() notification = Notification(feed=feed, owner=feed.owner, title='FeedUpdated', message=f'Feed: {feed.id}, {feed.link}, {feed.updated_at}]', is_error=False) notification.save() print("dramatiq callback: : feed update success") # Exceptions ################################################# class FeedError(Exception): """ An error occurred when fetching the feed If it was parsed despite the error, the feed and entries will be available: e.feed None if not parsed e.entries Empty list if not parsed """ def __init__(self, *args, **kwargs): super(FeedError, self).__init__(*args, **kwargs) # End: Exceptions ################################################# # Feed ################################################# class Feed(models.Model):
# Enrty ################################################# class Entry(models.Model): """ Represents a feed entry object If creating from a feedparser entry, use Entry.objects.parseFromFeed() """ feed = models.ForeignKey(Feed, related_name='feed', on_delete=models.CASCADE) state = models.IntegerField(default=ENTRY_UNREAD, choices=( (ENTRY_UNREAD, 'Unread'), (ENTRY_READ, 'Read'), )) # Compulsory data fields title = models.TextField(blank=True) content = models.TextField(blank=True) date = models.DateTimeField( help_text="When this entry says it was published", ) # Optional data fields author = models.TextField(blank=True) url = models.TextField( blank=True, validators=[URLValidator()], help_text="URL for the HTML for this entry", ) comments_url = models.TextField( blank=True, validators=[URLValidator()], help_text="URL for HTML comment submission page", ) guid = models.TextField( blank=True, help_text="GUID for the entry, according to the feed", ) last_updated = models.DateTimeField(auto_now=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) objects = managers.EntryManager() def __unicode__(self): return self.title def save(self, *args, **kwargs): # Default the date if self.date is None: self.date = datetime.datetime.now() # Save super(Entry, self).save(*args, **kwargs) class Meta: ordering = ('-updated_at',) verbose_name_plural = 'entries' # two users can have the same feed but one migh force update and the other # wants to keep old version, so make it unique even though it make entries redundant unique_together = ['feed', 'guid'] # Notification class Notification(models.Model): ''' Notifications for users. Currently used for feed update success/failure events ''' owner = models.ForeignKey('auth.User', on_delete=models.CASCADE) feed = models.ForeignKey(Feed, on_delete=models.CASCADE) state = models.IntegerField(default=ENTRY_UNREAD, choices=( (ENTRY_UNREAD, 'Unread'), (ENTRY_READ, 'Read'), )) title = models.CharField(max_length=200, null=True) message = models.CharField(max_length=200, null=True) is_error = models.BooleanField(default=False) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) class Meta: verbose_name = ("Notification") verbose_name_plural = ("Notifications") ordering = ('-updated_at',) def __unicode__(self): return self.title def __str__(self): return f'feed: {self.feed}, owner: {self.owner}'
''' The feeds model describes a registered field. Its contains feed related information as well as user related info and other meta data ''' link = models.URLField(max_length = 200) title = models.CharField(max_length=200, null=True) subtitle = models.CharField(max_length=200, null=True) description = models.TextField(null=True) language = models.CharField(max_length=5, null=True) copyright = models.CharField(max_length=50, null=True) ttl = models.PositiveIntegerField(null=True) atomLogo = models.URLField(max_length = 200, null=True) pubdate = models.DateTimeField(null=True) nickname = models.CharField(max_length=60) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) following = models.BooleanField(default=True) flagged = models.BooleanField(default=False) owner = models.ForeignKey('auth.User', related_name='feeds', on_delete=models.CASCADE) class Meta: verbose_name = ("Feed") verbose_name_plural = ("Feeds") ordering = ('-updated_at',) unique_together = ('link', 'owner') objects = managers.FeedManager() def __str__(self): return f'Nickname: {self.nickname}' def save(self, *args, **kwargs): # assure minimum required fields assert self.link assert self.nickname super(Feed, self).save(*args, **kwargs) assert self.id > 0 return def force_update(self, *args, **kwargs): ''' force updates a feed using a async call to the _updateFeed method ''' print(f'Forcing update [Feed ID: {self.id}], Nickname: {self.nickname}] ...') self._updateFeed.send_with_options(args=(self.id,), on_failure=feed_update_failure, on_success=feed_update_success) return @backoff.on_exception(backoff.expo, FeedError, max_tries=MAX_FEED_UPDATE_RETRIES, on_backoff=backoff_hdlr) def _fetch_feed(self): ''' internal method to get feed details from the link provided in self returns raw feed and entry details as returned by the feedparser library ''' # Request and parse the feed link = self.link d = feedparser.parse(link) status = d.get('status', 200) feed = d.get('feed', None) entries = d.get('entries', None) if status in (200, 302, 304, 307): if ( feed is None or 'title' not in feed or 'link' not in feed ): raise FeedError('Feed parsed but with invalid contents') return feed, entries if status in (404, 500, 502, 503, 504): raise FeedError('Temporary error %s' % status) # Follow permanent redirection if status == 301: # Avoid circular redirection self.link = d.get('href', self.link) return self._fetch_feed() if status == 410: raise FeedError('Feed has gone') # Unknown status raise FeedError('Unrecognised HTTP status %s' % status) @dramatiq.actor(max_retries=0, max_age=10000)#, throws=FeedError) @transaction.atomic def _updateFeed(pk): """ An internal function that fetches a feed and parses it into the Feed object for the DB """ feed = get_object_or_404(Feed, pk=pk) rawFeed, entries = feed._fetch_feed() feed.title = rawFeed.get('title', None) feed.subtitle = rawFeed.get('subtitle', None) feed.copyright = rawFeed.get('rights', None) feed.ttl = rawFeed.get('ttl', None) feed.atomLogo = rawFeed.get('logo', None) # Try to find the updated time updated = rawFeed.get( 'updated_parsed', rawFeed.get('published_parsed', None), ) if updated: updated = datetime.datetime.fromtimestamp( time.mktime(updated) ) feed.pubdate = updated super(Feed, feed).save() if entries: dbEntriesCreate = [] dbEntriesupdate = [] for raw_entry in entries: entry = Entry.objects.parseFromFeed(raw_entry) entry.feed = feed try: newEntry = Entry.objects.get(guid=entry.guid, feed=feed) except: newEntry = None if newEntry: # if it was updated, then mark it as unread, otherwise no need to do anything if newEntry.date > entry.date: entry.state = ENTRY_UNREAD id = newEntry.id newEntry = entry newEntry.id = id dbEntriesupdate.append(newEntry) else: dbEntriesCreate.append(entry) with transaction.atomic(): if len(dbEntriesCreate)>0: Entry.objects.bulk_create(dbEntriesCreate) if len(dbEntriesupdate)>0: fields = ['feed', 'state', 'title' , 'content', 'date', 'author', 'url' ,'comments_url'] Entry.objects.bulk_update(dbEntriesupdate, fields) return
identifier_body
server.rs
// // Copyright (c) Pirmin Kalberer. All rights reserved. // Licensed under the MIT License. See LICENSE file in the project root for full license information. // use crate::core::config::ApplicationCfg; use crate::mvt_service::MvtService; use crate::runtime_config::{config_from_args, service_from_args}; use crate::static_files::StaticFiles; use actix_cors::Cors; use actix_files as fs; use actix_web::http::header; use actix_web::middleware::Compress; use actix_web::{ guard, middleware, web, web::Data, App, HttpRequest, HttpResponse, HttpServer, Result, }; use clap::ArgMatches; use log::Level; use num_cpus; use open; use std::collections::HashMap; use std::str; use std::str::FromStr; static DINO: &'static str = " xxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxx xxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx x xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxx xxxxxxxxx xxxxxxx xxxxxx xxxxxxx"; async fn mvt_metadata(service: web::Data<MvtService>) -> Result<HttpResponse> { let json = service.get_mvt_metadata()?; Ok(HttpResponse::Ok().json(&json)) } /// Font list for Maputnik async fn fontstacks() -> Result<HttpResponse> { Ok(HttpResponse::Ok().json(&["Roboto Medium", "Roboto Regular"])) } // Include method fonts() which returns HashMap with embedded font files include!(concat!(env!("OUT_DIR"), "/fonts.rs")); /// Fonts for Maputnik /// Example: /fonts/Open%20Sans%20Regular,Arial%20Unicode%20MS%20Regular/0-255.pbf async fn fonts_pbf(params: web::Path<(String, String)>) -> Result<HttpResponse> { let fontpbfs = fonts(); let fontlist = &params.as_ref().0; let range = &params.as_ref().1; let mut fonts = fontlist.split(",").collect::<Vec<_>>(); fonts.push("Roboto Regular"); // Fallback let mut resp = HttpResponse::NotFound().finish(); for font in fonts { let key = format!("fonts/{}/{}.pbf", font.replace("%20", " "), range); debug!("Font lookup: {}", key); if let Some(pbf) = fontpbfs.get(&key as &str) { resp = HttpResponse::Ok() .content_type("application/x-protobuf") // data is already gzip compressed .insert_header(header::ContentEncoding::Gzip) .body(*pbf); // TODO: chunked response break; } } Ok(resp) } fn req_baseurl(req: &HttpRequest) -> String { let conninfo = req.connection_info(); format!("{}://{}", conninfo.scheme(), conninfo.host()) } async fn tileset_tilejson( service: web::Data<MvtService>, tileset: web::Path<String>, req: HttpRequest, ) -> Result<HttpResponse> { let url = req_baseurl(&req); let json = web::block(move || service.get_tilejson(&url, &tileset, &service.grid).ok()).await?; Ok(HttpResponse::Ok().json(&json)) } async fn tileset_style_json( service: web::Data<MvtService>, tileset: web::Path<String>, req: HttpRequest, ) -> Result<HttpResponse> { let json = service.get_stylejson(&req_baseurl(&req), &tileset)?; Ok(HttpResponse::Ok().json(&json)) } async fn tileset_metadata_json( service: web::Data<MvtService>, tileset: web::Path<String>, ) -> Result<HttpResponse> { let json = web::block(move || service.get_mbtiles_metadata(&tileset, &service.grid).ok()).await?; Ok(HttpResponse::Ok().json(&json)) } async fn tile_pbf( config: web::Data<ApplicationCfg>, service: web::Data<MvtService>, params: web::Path<(String, u8, u32, u32)>, req: HttpRequest, ) -> Result<HttpResponse> { let params = params.into_inner(); let tileset = params.0; let z = params.1; let x = params.2; let y = params.3; let gzip = req .headers() .get(header::ACCEPT_ENCODING) .and_then(|headerval| { headerval .to_str() .ok() .and_then(|headerstr| Some(headerstr.contains("gzip"))) }) .unwrap_or(false); // rust-postgres starts its own Tokio runtime // without blocking we get 'Cannot start a runtime from within a runtime' let tile = web::block(move || service.tile_cached(&tileset, x, y, z, gzip, None)).await?; let resp = match tile { Some(tile) => { let mut r = HttpResponse::Ok(); r.content_type("application/x-protobuf"); if gzip { // data is already gzip compressed r.insert_header(header::ContentEncoding::Gzip); } let cache_max_age = config.webserver.cache_control_max_age.unwrap_or(300); r.insert_header((header::CACHE_CONTROL, format!("max-age={}", cache_max_age))); r.body(tile) // TODO: chunked response } None => HttpResponse::NoContent().finish(), }; Ok(resp) } lazy_static! { static ref STATIC_FILES: StaticFiles = StaticFiles::init(); } async fn static_file_handler(req: HttpRequest) -> Result<HttpResponse> { let key = req.path()[1..].to_string(); let resp = if let Some(ref content) = STATIC_FILES.content(None, key) { HttpResponse::Ok() .insert_header((header::ACCESS_CONTROL_ALLOW_ORIGIN, "*")) // TOOD: use Actix middleware .content_type(content.1) .body(content.0) // TODO: chunked response } else { HttpResponse::NotFound().finish() }; Ok(resp) } #[derive(Deserialize)] struct DrilldownParams { minzoom: Option<u8>, maxzoom: Option<u8>, points: String, //x1,y1,x2,y2,.. } async fn drilldown_handler( service: web::Data<MvtService>, params: web::Query<DrilldownParams>, ) -> Result<HttpResponse> { let tileset = None; // all tilesets let progress = false; let points: Vec<f64> = params .points .split(",") .map(|v| { v.parse() .expect("Error parsing 'point' as pair of float values") //FIXME: map_err(|_| error::ErrorInternalServerError("...") }) .collect(); let stats = service.drilldown(tileset, params.minzoom, params.maxzoom, points, progress); let json = stats.as_json()?; Ok(HttpResponse::Ok().json(&json)) } #[actix_web::main] pub async fn webserver(args: ArgMatches<'static>) -> std::io::Result<()> { let config = config_from_args(&args); let host = config .webserver .bind .clone() .unwrap_or("127.0.0.1".to_string()); let port = config.webserver.port.unwrap_or(6767); let bind_addr = format!("{}:{}", host, port); let workers = config.webserver.threads.unwrap_or(num_cpus::get() as u8); let mvt_viewer = config.service.mvt.viewer; let openbrowser = bool::from_str(args.value_of("openbrowser").unwrap_or("true")).unwrap_or(false); let static_dirs = config.webserver.static_.clone(); let svc_config = config.clone(); let service = web::block(move || { let mut service = service_from_args(&svc_config, &args); service.prepare_feature_queries(); service.init_cache(); service }) .await .expect("service initialization failed"); let server = HttpServer::new(move || { let mut app = App::new() .app_data(Data::new(config.clone())) .app_data(Data::new(service.clone())) .wrap(middleware::Logger::new("%r %s %b %Dms %a")) .wrap(Compress::default()) .wrap( Cors::default() .allow_any_origin() .send_wildcard() .allowed_methods(vec!["GET"]), ) .service( web::resource("/index.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(mvt_metadata), ), ) .service( web::resource("/fontstacks.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fontstacks), ), ) .service( web::resource("/fonts.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fontstacks), ), ) .service( web::resource("/fonts/{fonts}/{range}.pbf").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fonts_pbf), ), ); for static_dir in &static_dirs { let dir = &static_dir.dir; if std::path::Path::new(dir).is_dir() { info!("Serving static files from directory '{}'", dir); app = app.service(fs::Files::new(&static_dir.path, dir)); } else { warn!("Static file directory '{}' not found", dir); } } app = app .service( web::resource("/{tileset}.style.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(tileset_style_json), ), ) .service( web::resource("/{tileset}/metadata.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(tileset_metadata_json), ), ) .service( web::resource("/{tileset}.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(tileset_tilejson), ), )
.service( web::resource("/{tileset}/{z}/{x}/{y}.pbf").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(tile_pbf), ), ); if mvt_viewer { app = app.service( web::resource("/drilldown").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(drilldown_handler), ), ); app = app.default_service(web::to(static_file_handler)); } app }) .workers(workers as usize) .bind(&bind_addr) .expect("Can not start server on given IP/Port") .shutdown_timeout(3) // default: 30s .run(); if log_enabled!(Level::Info) { println!("{}", DINO); } if openbrowser && mvt_viewer { let _res = open::that(format!("http://{}:{}", &host, port)); } server.await }
random_line_split
server.rs
// // Copyright (c) Pirmin Kalberer. All rights reserved. // Licensed under the MIT License. See LICENSE file in the project root for full license information. // use crate::core::config::ApplicationCfg; use crate::mvt_service::MvtService; use crate::runtime_config::{config_from_args, service_from_args}; use crate::static_files::StaticFiles; use actix_cors::Cors; use actix_files as fs; use actix_web::http::header; use actix_web::middleware::Compress; use actix_web::{ guard, middleware, web, web::Data, App, HttpRequest, HttpResponse, HttpServer, Result, }; use clap::ArgMatches; use log::Level; use num_cpus; use open; use std::collections::HashMap; use std::str; use std::str::FromStr; static DINO: &'static str = " xxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxx xxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx x xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxx xxxxxxxxx xxxxxxx xxxxxx xxxxxxx"; async fn mvt_metadata(service: web::Data<MvtService>) -> Result<HttpResponse> { let json = service.get_mvt_metadata()?; Ok(HttpResponse::Ok().json(&json)) } /// Font list for Maputnik async fn fontstacks() -> Result<HttpResponse> { Ok(HttpResponse::Ok().json(&["Roboto Medium", "Roboto Regular"])) } // Include method fonts() which returns HashMap with embedded font files include!(concat!(env!("OUT_DIR"), "/fonts.rs")); /// Fonts for Maputnik /// Example: /fonts/Open%20Sans%20Regular,Arial%20Unicode%20MS%20Regular/0-255.pbf async fn fonts_pbf(params: web::Path<(String, String)>) -> Result<HttpResponse> { let fontpbfs = fonts(); let fontlist = &params.as_ref().0; let range = &params.as_ref().1; let mut fonts = fontlist.split(",").collect::<Vec<_>>(); fonts.push("Roboto Regular"); // Fallback let mut resp = HttpResponse::NotFound().finish(); for font in fonts { let key = format!("fonts/{}/{}.pbf", font.replace("%20", " "), range); debug!("Font lookup: {}", key); if let Some(pbf) = fontpbfs.get(&key as &str) { resp = HttpResponse::Ok() .content_type("application/x-protobuf") // data is already gzip compressed .insert_header(header::ContentEncoding::Gzip) .body(*pbf); // TODO: chunked response break; } } Ok(resp) } fn req_baseurl(req: &HttpRequest) -> String { let conninfo = req.connection_info(); format!("{}://{}", conninfo.scheme(), conninfo.host()) } async fn tileset_tilejson( service: web::Data<MvtService>, tileset: web::Path<String>, req: HttpRequest, ) -> Result<HttpResponse> { let url = req_baseurl(&req); let json = web::block(move || service.get_tilejson(&url, &tileset, &service.grid).ok()).await?; Ok(HttpResponse::Ok().json(&json)) } async fn tileset_style_json( service: web::Data<MvtService>, tileset: web::Path<String>, req: HttpRequest, ) -> Result<HttpResponse> { let json = service.get_stylejson(&req_baseurl(&req), &tileset)?; Ok(HttpResponse::Ok().json(&json)) } async fn tileset_metadata_json( service: web::Data<MvtService>, tileset: web::Path<String>, ) -> Result<HttpResponse> { let json = web::block(move || service.get_mbtiles_metadata(&tileset, &service.grid).ok()).await?; Ok(HttpResponse::Ok().json(&json)) } async fn
( config: web::Data<ApplicationCfg>, service: web::Data<MvtService>, params: web::Path<(String, u8, u32, u32)>, req: HttpRequest, ) -> Result<HttpResponse> { let params = params.into_inner(); let tileset = params.0; let z = params.1; let x = params.2; let y = params.3; let gzip = req .headers() .get(header::ACCEPT_ENCODING) .and_then(|headerval| { headerval .to_str() .ok() .and_then(|headerstr| Some(headerstr.contains("gzip"))) }) .unwrap_or(false); // rust-postgres starts its own Tokio runtime // without blocking we get 'Cannot start a runtime from within a runtime' let tile = web::block(move || service.tile_cached(&tileset, x, y, z, gzip, None)).await?; let resp = match tile { Some(tile) => { let mut r = HttpResponse::Ok(); r.content_type("application/x-protobuf"); if gzip { // data is already gzip compressed r.insert_header(header::ContentEncoding::Gzip); } let cache_max_age = config.webserver.cache_control_max_age.unwrap_or(300); r.insert_header((header::CACHE_CONTROL, format!("max-age={}", cache_max_age))); r.body(tile) // TODO: chunked response } None => HttpResponse::NoContent().finish(), }; Ok(resp) } lazy_static! { static ref STATIC_FILES: StaticFiles = StaticFiles::init(); } async fn static_file_handler(req: HttpRequest) -> Result<HttpResponse> { let key = req.path()[1..].to_string(); let resp = if let Some(ref content) = STATIC_FILES.content(None, key) { HttpResponse::Ok() .insert_header((header::ACCESS_CONTROL_ALLOW_ORIGIN, "*")) // TOOD: use Actix middleware .content_type(content.1) .body(content.0) // TODO: chunked response } else { HttpResponse::NotFound().finish() }; Ok(resp) } #[derive(Deserialize)] struct DrilldownParams { minzoom: Option<u8>, maxzoom: Option<u8>, points: String, //x1,y1,x2,y2,.. } async fn drilldown_handler( service: web::Data<MvtService>, params: web::Query<DrilldownParams>, ) -> Result<HttpResponse> { let tileset = None; // all tilesets let progress = false; let points: Vec<f64> = params .points .split(",") .map(|v| { v.parse() .expect("Error parsing 'point' as pair of float values") //FIXME: map_err(|_| error::ErrorInternalServerError("...") }) .collect(); let stats = service.drilldown(tileset, params.minzoom, params.maxzoom, points, progress); let json = stats.as_json()?; Ok(HttpResponse::Ok().json(&json)) } #[actix_web::main] pub async fn webserver(args: ArgMatches<'static>) -> std::io::Result<()> { let config = config_from_args(&args); let host = config .webserver .bind .clone() .unwrap_or("127.0.0.1".to_string()); let port = config.webserver.port.unwrap_or(6767); let bind_addr = format!("{}:{}", host, port); let workers = config.webserver.threads.unwrap_or(num_cpus::get() as u8); let mvt_viewer = config.service.mvt.viewer; let openbrowser = bool::from_str(args.value_of("openbrowser").unwrap_or("true")).unwrap_or(false); let static_dirs = config.webserver.static_.clone(); let svc_config = config.clone(); let service = web::block(move || { let mut service = service_from_args(&svc_config, &args); service.prepare_feature_queries(); service.init_cache(); service }) .await .expect("service initialization failed"); let server = HttpServer::new(move || { let mut app = App::new() .app_data(Data::new(config.clone())) .app_data(Data::new(service.clone())) .wrap(middleware::Logger::new("%r %s %b %Dms %a")) .wrap(Compress::default()) .wrap( Cors::default() .allow_any_origin() .send_wildcard() .allowed_methods(vec!["GET"]), ) .service( web::resource("/index.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(mvt_metadata), ), ) .service( web::resource("/fontstacks.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fontstacks), ), ) .service( web::resource("/fonts.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fontstacks), ), ) .service( web::resource("/fonts/{fonts}/{range}.pbf").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fonts_pbf), ), ); for static_dir in &static_dirs { let dir = &static_dir.dir; if std::path::Path::new(dir).is_dir() { info!("Serving static files from directory '{}'", dir); app = app.service(fs::Files::new(&static_dir.path, dir)); } else { warn!("Static file directory '{}' not found", dir); } } app = app .service( web::resource("/{tileset}.style.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(tileset_style_json), ), ) .service( web::resource("/{tileset}/metadata.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(tileset_metadata_json), ), ) .service( web::resource("/{tileset}.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(tileset_tilejson), ), ) .service( web::resource("/{tileset}/{z}/{x}/{y}.pbf").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(tile_pbf), ), ); if mvt_viewer { app = app.service( web::resource("/drilldown").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(drilldown_handler), ), ); app = app.default_service(web::to(static_file_handler)); } app }) .workers(workers as usize) .bind(&bind_addr) .expect("Can not start server on given IP/Port") .shutdown_timeout(3) // default: 30s .run(); if log_enabled!(Level::Info) { println!("{}", DINO); } if openbrowser && mvt_viewer { let _res = open::that(format!("http://{}:{}", &host, port)); } server.await }
tile_pbf
identifier_name
server.rs
// // Copyright (c) Pirmin Kalberer. All rights reserved. // Licensed under the MIT License. See LICENSE file in the project root for full license information. // use crate::core::config::ApplicationCfg; use crate::mvt_service::MvtService; use crate::runtime_config::{config_from_args, service_from_args}; use crate::static_files::StaticFiles; use actix_cors::Cors; use actix_files as fs; use actix_web::http::header; use actix_web::middleware::Compress; use actix_web::{ guard, middleware, web, web::Data, App, HttpRequest, HttpResponse, HttpServer, Result, }; use clap::ArgMatches; use log::Level; use num_cpus; use open; use std::collections::HashMap; use std::str; use std::str::FromStr; static DINO: &'static str = " xxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxx xxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx x xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxx xxxxxxxxx xxxxxxx xxxxxx xxxxxxx"; async fn mvt_metadata(service: web::Data<MvtService>) -> Result<HttpResponse> { let json = service.get_mvt_metadata()?; Ok(HttpResponse::Ok().json(&json)) } /// Font list for Maputnik async fn fontstacks() -> Result<HttpResponse> { Ok(HttpResponse::Ok().json(&["Roboto Medium", "Roboto Regular"])) } // Include method fonts() which returns HashMap with embedded font files include!(concat!(env!("OUT_DIR"), "/fonts.rs")); /// Fonts for Maputnik /// Example: /fonts/Open%20Sans%20Regular,Arial%20Unicode%20MS%20Regular/0-255.pbf async fn fonts_pbf(params: web::Path<(String, String)>) -> Result<HttpResponse> { let fontpbfs = fonts(); let fontlist = &params.as_ref().0; let range = &params.as_ref().1; let mut fonts = fontlist.split(",").collect::<Vec<_>>(); fonts.push("Roboto Regular"); // Fallback let mut resp = HttpResponse::NotFound().finish(); for font in fonts { let key = format!("fonts/{}/{}.pbf", font.replace("%20", " "), range); debug!("Font lookup: {}", key); if let Some(pbf) = fontpbfs.get(&key as &str) { resp = HttpResponse::Ok() .content_type("application/x-protobuf") // data is already gzip compressed .insert_header(header::ContentEncoding::Gzip) .body(*pbf); // TODO: chunked response break; } } Ok(resp) } fn req_baseurl(req: &HttpRequest) -> String { let conninfo = req.connection_info(); format!("{}://{}", conninfo.scheme(), conninfo.host()) } async fn tileset_tilejson( service: web::Data<MvtService>, tileset: web::Path<String>, req: HttpRequest, ) -> Result<HttpResponse> { let url = req_baseurl(&req); let json = web::block(move || service.get_tilejson(&url, &tileset, &service.grid).ok()).await?; Ok(HttpResponse::Ok().json(&json)) } async fn tileset_style_json( service: web::Data<MvtService>, tileset: web::Path<String>, req: HttpRequest, ) -> Result<HttpResponse> { let json = service.get_stylejson(&req_baseurl(&req), &tileset)?; Ok(HttpResponse::Ok().json(&json)) } async fn tileset_metadata_json( service: web::Data<MvtService>, tileset: web::Path<String>, ) -> Result<HttpResponse> { let json = web::block(move || service.get_mbtiles_metadata(&tileset, &service.grid).ok()).await?; Ok(HttpResponse::Ok().json(&json)) } async fn tile_pbf( config: web::Data<ApplicationCfg>, service: web::Data<MvtService>, params: web::Path<(String, u8, u32, u32)>, req: HttpRequest, ) -> Result<HttpResponse>
lazy_static! { static ref STATIC_FILES: StaticFiles = StaticFiles::init(); } async fn static_file_handler(req: HttpRequest) -> Result<HttpResponse> { let key = req.path()[1..].to_string(); let resp = if let Some(ref content) = STATIC_FILES.content(None, key) { HttpResponse::Ok() .insert_header((header::ACCESS_CONTROL_ALLOW_ORIGIN, "*")) // TOOD: use Actix middleware .content_type(content.1) .body(content.0) // TODO: chunked response } else { HttpResponse::NotFound().finish() }; Ok(resp) } #[derive(Deserialize)] struct DrilldownParams { minzoom: Option<u8>, maxzoom: Option<u8>, points: String, //x1,y1,x2,y2,.. } async fn drilldown_handler( service: web::Data<MvtService>, params: web::Query<DrilldownParams>, ) -> Result<HttpResponse> { let tileset = None; // all tilesets let progress = false; let points: Vec<f64> = params .points .split(",") .map(|v| { v.parse() .expect("Error parsing 'point' as pair of float values") //FIXME: map_err(|_| error::ErrorInternalServerError("...") }) .collect(); let stats = service.drilldown(tileset, params.minzoom, params.maxzoom, points, progress); let json = stats.as_json()?; Ok(HttpResponse::Ok().json(&json)) } #[actix_web::main] pub async fn webserver(args: ArgMatches<'static>) -> std::io::Result<()> { let config = config_from_args(&args); let host = config .webserver .bind .clone() .unwrap_or("127.0.0.1".to_string()); let port = config.webserver.port.unwrap_or(6767); let bind_addr = format!("{}:{}", host, port); let workers = config.webserver.threads.unwrap_or(num_cpus::get() as u8); let mvt_viewer = config.service.mvt.viewer; let openbrowser = bool::from_str(args.value_of("openbrowser").unwrap_or("true")).unwrap_or(false); let static_dirs = config.webserver.static_.clone(); let svc_config = config.clone(); let service = web::block(move || { let mut service = service_from_args(&svc_config, &args); service.prepare_feature_queries(); service.init_cache(); service }) .await .expect("service initialization failed"); let server = HttpServer::new(move || { let mut app = App::new() .app_data(Data::new(config.clone())) .app_data(Data::new(service.clone())) .wrap(middleware::Logger::new("%r %s %b %Dms %a")) .wrap(Compress::default()) .wrap( Cors::default() .allow_any_origin() .send_wildcard() .allowed_methods(vec!["GET"]), ) .service( web::resource("/index.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(mvt_metadata), ), ) .service( web::resource("/fontstacks.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fontstacks), ), ) .service( web::resource("/fonts.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fontstacks), ), ) .service( web::resource("/fonts/{fonts}/{range}.pbf").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(fonts_pbf), ), ); for static_dir in &static_dirs { let dir = &static_dir.dir; if std::path::Path::new(dir).is_dir() { info!("Serving static files from directory '{}'", dir); app = app.service(fs::Files::new(&static_dir.path, dir)); } else { warn!("Static file directory '{}' not found", dir); } } app = app .service( web::resource("/{tileset}.style.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(tileset_style_json), ), ) .service( web::resource("/{tileset}/metadata.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(tileset_metadata_json), ), ) .service( web::resource("/{tileset}.json").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(tileset_tilejson), ), ) .service( web::resource("/{tileset}/{z}/{x}/{y}.pbf").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(tile_pbf), ), ); if mvt_viewer { app = app.service( web::resource("/drilldown").route( web::route() .guard(guard::Any(guard::Get()).or(guard::Head())) .to(drilldown_handler), ), ); app = app.default_service(web::to(static_file_handler)); } app }) .workers(workers as usize) .bind(&bind_addr) .expect("Can not start server on given IP/Port") .shutdown_timeout(3) // default: 30s .run(); if log_enabled!(Level::Info) { println!("{}", DINO); } if openbrowser && mvt_viewer { let _res = open::that(format!("http://{}:{}", &host, port)); } server.await }
{ let params = params.into_inner(); let tileset = params.0; let z = params.1; let x = params.2; let y = params.3; let gzip = req .headers() .get(header::ACCEPT_ENCODING) .and_then(|headerval| { headerval .to_str() .ok() .and_then(|headerstr| Some(headerstr.contains("gzip"))) }) .unwrap_or(false); // rust-postgres starts its own Tokio runtime // without blocking we get 'Cannot start a runtime from within a runtime' let tile = web::block(move || service.tile_cached(&tileset, x, y, z, gzip, None)).await?; let resp = match tile { Some(tile) => { let mut r = HttpResponse::Ok(); r.content_type("application/x-protobuf"); if gzip { // data is already gzip compressed r.insert_header(header::ContentEncoding::Gzip); } let cache_max_age = config.webserver.cache_control_max_age.unwrap_or(300); r.insert_header((header::CACHE_CONTROL, format!("max-age={}", cache_max_age))); r.body(tile) // TODO: chunked response } None => HttpResponse::NoContent().finish(), }; Ok(resp) }
identifier_body
NeighborChainCli.py
import base64 import hashlib import json import os import subprocess import sys import typing import pexpect from Configs.Constants import PBNB_ID, PBTC_ID from Drivers.Connections import RpcConnection from Helpers.Logging import config_logger from Helpers.TestHelper import l6, json_extract from Helpers.Time import WAIT logger = config_logger(__name__) class NeighborChainCli: @staticmethod def new(token_id): if token_id == PBNB_ID: return BnbCli() elif token_id == PBTC_ID: return BtcGo() class NeighborChainError(BaseException): pass class BnbCli: _bnb_host = 'data-seed-pre-0-s1.binance.org' _bnb_rpc_port = 443 _bnb_rpc_protocol = 'https' _path = f'{os.getcwd()}/bin' _binary = {'darwin': f'{_path}/tbnbcli-mac', 'linux': f'{_path}/tbnbcli-linux', '*': f'{_path}/tbnbcli-win'} tbnbcli = _binary.get(sys.platform, _binary["*"]) def __init__(self, cmd=tbnbcli, chain_id="Binance-Chain-Ganges", node=None): if node is None: self.node = f'tcp://{BnbCli._bnb_host}:80' self.cmd = cmd self.chain_id = chain_id self.trust = '--trust-node' self.stdout = subprocess.PIPE self.stderr = subprocess.PIPE def get_default_conn(self): return ['--chain-id', self.chain_id, '--node', self.node, self.trust] def get_balance(self, key): process = subprocess.Popen([self.cmd, 'account', key] + self.get_default_conn(), stdout=self.stdout, stderr=self.stderr, universal_newlines=True) stdout, stderr = process.communicate() out = json_extract(stdout) bal = int(BnbCli.BnbResponse(out).get_balance()) logger.debug(f"out: {stdout.strip()}") return bal def send_to(self, sender, receiver, amount, password, memo): memo_encoded = BnbCli.encode_memo(memo) logger.info(f'Bnbcli | send {amount} from {l6(sender)} to {l6(receiver)} | memo: {memo_encoded}') command = [self.cmd, 'send', '--from', sender, '--to', receiver, '--amount', f'{amount}:BNB', '--json', '--memo', memo_encoded] + self.get_default_conn() return self._exe_bnb_cli(command, password) def send_to_multi(self, sender, receiver_amount_dict: dict, password, memo): """ :param sender: sender addr or account name :param receiver_amount_dict: dict { receiver_addr : amount to send, ...} :param password: :param memo: :return: """ memo_encoded = BnbCli.encode_memo(memo) logger.info( f'Bnbcli | send from {l6(sender)} to {json.dumps(receiver_amount_dict, indent=3)} | memo: {memo_encoded}') bnb_output = '[' for key, value in receiver_amount_dict.items(): bnb_output += "{\"to\":\"%s\",\"amount\":\"%s:BNB\"}," % (key, value) bnb_output = bnb_output[:-1] + ']' command = [self.cmd, 'token', 'multi-send', '--from', sender, '--transfers', bnb_output, '--json', '--memo', memo_encoded] return self._exe_bnb_cli(command, password) def _spawn(self, command, timeout=15, local=False): if not local: command += self.get_default_conn() logger.info(command) child = pexpect.spawn(command, encoding='utf-8', timeout=timeout) child.logfile = sys.stdout return child def _exe_bnb_cli(self, command, more_input): command += self.get_default_conn() process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) WAIT(7) stdout, stderr = process.communicate(f'{more_input}\n') logger.debug(f"\n" f"+++ command: {' '.join(command)}\n" f"+++ out: {stdout}\n" f"+++ err: {stderr}") out = json_extract(stdout) err = json_extract(stderr) if out is not None: return BnbCli.BnbResponse(out) elif err is not None: return BnbCli.BnbResponse(err) else:
@staticmethod def get_bnb_rpc_url(): return f'{BnbCli._bnb_rpc_protocol}://{BnbCli._bnb_host}:{BnbCli._bnb_rpc_port}' @staticmethod def encode_memo(info): """ @param info: Expect porting id string, or tuple/list of (redeem id,incognito address) @return: """ if type(info) is str: return BnbCli.encode_porting_memo(info) if (type(info) is tuple or type(info, list)) and len(info) == 2: return BnbCli.encode_redeem_memo(info[0], info[1]) raise Exception(f'Expect porting id string, or tuple/list of (redeem id,incognito address), ' f'got {type(info)}: {info} ') @staticmethod def encode_porting_memo(porting_id): logger.info(f"""Encoding porting memo Porting id: {porting_id}""") memo_struct = '{"PortingID":"%s"}' % porting_id byte_ascii = memo_struct.encode('ascii') b64_encode = base64.b64encode(byte_ascii) encode_memo_str_output = b64_encode.decode('utf-8') return encode_memo_str_output @staticmethod def encode_redeem_memo(redeem_id, custodian_incognito_addr): logger.info(f"""Encoding redeem memo Redeem id: {redeem_id} Incognito addr: {custodian_incognito_addr}""") memo_struct = '{"RedeemID":"%s","CustodianIncognitoAddress":"%s"}' % (redeem_id, custodian_incognito_addr) byte_ascii = memo_struct.encode('ascii') sha3_256 = hashlib.sha3_256(byte_ascii) b64_encode = base64.b64encode(sha3_256.digest()) encode_memo_str_output = b64_encode.decode('utf-8') return encode_memo_str_output def import_mnemonics(self, username, pass_phrase, mnemonic, overwrite=True): """ :param overwrite: option to overwrite existing username :param username: user name prefix :param pass_phrase: pass phrase for all user (all user will have the same pass phrase :param mnemonic: could be a string or a list of strings of mnemonic :return: """ mnemonic_list = [mnemonic] if type(mnemonic) is str else mnemonic i = 1 for m in mnemonic_list: name = f'{username}{i}' i += 1 logger.info(f'Importing key with passphrase: {pass_phrase} | {m}') command = f"{self.cmd} keys add --recover {name}" child = self._spawn(command, local=True) try: child.expect('override the existing name', timeout=2) if overwrite: child.sendline('y') else: child.sendline('n') child.close() return except pexpect.exceptions.TIMEOUT: pass child.expect('Enter a passphrase for your key:') child.sendline(pass_phrase) child.expect('Repeat the passphrase:') child.sendline(pass_phrase) child.expect('> Enter your recovery seed phrase:') child.sendline(m) child.expect(pexpect.EOF) child.close() def delete_local_address(self, user=None, password=None): if user is None: users_to_del = self.list_user_addresses() elif type(user) is str: users_to_del = [user] elif type(user) is list: users_to_del = user else: raise Exception('un-support arg type of <user> arg') password = '123123Az' if password is None else password for user in users_to_del: command = f'{self.cmd} keys delete {user}' child = self._spawn(command, local=True) try: child.expect('not found', timeout=2) child.close() continue except pexpect.exceptions.TIMEOUT: pass child.expect('DANGER - enter password to permanently delete key:') child.sendline(password) child.expect(pexpect.EOF) child.close() def list_user_addresses(self): user_addresses = {} command = f"{self.cmd} keys list" child = self._spawn(command, local=True) line = child.readline() while line != '': list_ = line.strip('\r\n').split('\t') if 'bnb' in list_[2]: user = list_[0] address = list_[2] user_addresses[user] = address line = child.readline() return user_addresses class BnbResponse: def __init__(self, stdout): self.data = stdout if self.data is None: raise ValueError('Response data must not be None') def get_coins(self): try: return self.data['value']['base']['coins'] except TypeError: return 0 def get_amount(self, denom): coins = self.get_coins() for coin in coins: if coin['denom'] == denom: return coin['amount'] def get_balance(self): return self.get_amount('BNB') def get_tx_hash(self): try: return self.data['hash'] except KeyError as ke: raise Exception(f'Response data does not contain hash: {ke} :{self.data}') def build_proof(self, tx_hash=None): tx_hash = self.get_tx_hash() if tx_hash is None else tx_hash logger.info() logger.info(f'Portal | Building proof | tx {tx_hash}') bnb_get_block_url = f"{BnbCli.get_bnb_rpc_url()}/tx?hash=0x{tx_hash}&prove=true" block_response = RpcConnection(bnb_get_block_url, id_num='', json_rpc='2.0'). \ with_params([]).with_method('').execute() block_height = int(block_response.data()['result']['height']) proof = {"Proof": block_response.data()['result']['proof'], "BlockHeight": block_height} proof['Proof']['Proof']['total'] = int(proof['Proof']['Proof']['total']) # convert to int proof['Proof']['Proof']['index'] = int(proof['Proof']['Proof']['index']) # convert to int proof_string = json.dumps(proof, separators=(',', ':')) # separators=(',', ':') to remove all spaces proof_ascii = proof_string.encode('ascii') # convert to byte string_base64 = base64.b64encode(proof_ascii) # encode string_base64_utf8 = string_base64.decode('utf-8') # convert to string logger.debug(f""" Proof: ================= \n{proof}""") return string_base64_utf8 # ============= BTC =================== class BtcGo: if sys.platform == 'darwin': btc_go_path = f'{os.getcwd()}/IncognitoChain/bin/btcGo/mac/' elif sys.platform == 'linux': btc_go_path = f'{os.getcwd()}/IncognitoChain/bin/btcGo/linux/' else: btc_go_path = f'{os.getcwd()}/IncognitoChain/bin/btcGo/win/' btc_build_proof_cli = btc_go_path + 'buildProof' btc_get_tx_cli = btc_go_path + 'getTxBTC' btc_send_porting_cli = btc_go_path + 'txportingBTC' btc_send_redeem_cli = btc_go_path + 'txRedeemBTC' if sys.platform == 'windows': btc_build_proof_cli += '.exe' btc_get_tx_cli += '.exe' btc_send_porting_cli += '.exe' btc_send_redeem_cli += '.exe' def __init__(self): pass @staticmethod def get_balance(addr): pass @staticmethod def send_to(sender, receiver, amount, password, memo): """ :param sender: dummy, just add here to match with bnbcli signature. sender is hardcoded = miERaVjAsBriPmAEHSkfymRUo3xjaEoM2r in btcGo command :param receiver: :param amount: :param password: dummy, just add here to match with bnbcli signature :param memo: :return: """ if type(memo) is typing.Tuple: # send redeem tx send_cli = BtcGo.btc_send_redeem_cli redeem_id = memo[0] custodian_addr = memo[1] command = [send_cli, '-amt', f"{amount}", '-userAdd', f"{receiver}", '-redeemId', f"{redeem_id}", '-custIncAdd', f"{custodian_addr}"] else: # send porting tx send_cli = BtcGo.btc_send_porting_cli porting_id = memo command = [send_cli, '-amtAdd1', f'{amount}', '-outAdd1', f'{receiver}', '-portingId', f'{porting_id}'] return BtcGo._exe_command(command) @staticmethod def send_to_multi(sender, receiver_amount_dict: dict, password, *memo): """ :param receiver_amount_dict: :param sender: dummy, just add here to match with bnbcli signature. sender is hardcoded = miERaVjAsBriPmAEHSkfymRUo3xjaEoM2r in btcGo command :param password: dummy, just add here to match with bnbcli signature :param memo: :return: """ command = [BtcGo.btc_send_porting_cli] i = 1 for receiver, amount in receiver_amount_dict.items(): command.append(f'-amtAdd{i}') command.append(f'{amount}') command.append(f'-outAdd{i}') command.append(f'{receiver}') i += 1 command.append('-portingId') command.append(f'{memo[0]}') return BtcGo._exe_command(command) @staticmethod def get_tx_by_hash(tx_hash): command = [BtcGo.btc_get_tx_cli, '-txhash', tx_hash] return BtcGo._exe_command(command) @staticmethod def _exe_command(command): process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) stdout, stderr = process.communicate() logger.info(f"\n" f"+++ command: {' '.join(command)}\n\n" f"+++ out: {stdout}\n\n" f"+++ err: {stderr}") dict_response = json_extract(stdout) return BtcGo.BtcResponse(dict_response) class BtcResponse: def __init__(self, data): self.data = data def get_tx_hash(self): try: return self.data['tx']['hash'] except KeyError: return self.data['hash'] def get_block_height(self): try: return self.data['tx']["block_height"] except KeyError: return self.data["block_height"] def build_proof(self): logger.info() logger.info(f'Portal | Building proof | tx {self.get_tx_hash()}') tx_by_hash = BtcGo.get_tx_by_hash(self.get_tx_hash()) height = tx_by_hash.get_block_height() timeout = 2 * 60 * 60 # 2hours interval = 120 while timeout > 0: if height != -1: break WAIT(interval) tx_by_hash = BtcGo.get_tx_by_hash(self.get_tx_hash()) height = tx_by_hash.get_block_height() timeout -= interval WAIT(30) command = [BtcGo.btc_build_proof_cli, '-blockHeight', f'{height}', '-txhash', f'{self.get_tx_hash()}'] process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) stdout, stderr = process.communicate() logger.info(f"\n" f"+++ command: {' '.join(command)}\n\n" f"+++ out: {stdout}\n\n" f"+++ err: {stderr}") proof = stdout.split()[1] logger.debug(f""" Proof: ================= \n{proof}""") return proof
raise NeighborChainError(stderr)
conditional_block
NeighborChainCli.py
import base64 import hashlib import json import os import subprocess import sys import typing import pexpect from Configs.Constants import PBNB_ID, PBTC_ID from Drivers.Connections import RpcConnection from Helpers.Logging import config_logger from Helpers.TestHelper import l6, json_extract from Helpers.Time import WAIT logger = config_logger(__name__) class NeighborChainCli: @staticmethod def new(token_id): if token_id == PBNB_ID: return BnbCli() elif token_id == PBTC_ID: return BtcGo() class NeighborChainError(BaseException): pass class BnbCli: _bnb_host = 'data-seed-pre-0-s1.binance.org' _bnb_rpc_port = 443 _bnb_rpc_protocol = 'https' _path = f'{os.getcwd()}/bin' _binary = {'darwin': f'{_path}/tbnbcli-mac', 'linux': f'{_path}/tbnbcli-linux', '*': f'{_path}/tbnbcli-win'} tbnbcli = _binary.get(sys.platform, _binary["*"]) def __init__(self, cmd=tbnbcli, chain_id="Binance-Chain-Ganges", node=None): if node is None: self.node = f'tcp://{BnbCli._bnb_host}:80' self.cmd = cmd self.chain_id = chain_id self.trust = '--trust-node' self.stdout = subprocess.PIPE self.stderr = subprocess.PIPE def
(self): return ['--chain-id', self.chain_id, '--node', self.node, self.trust] def get_balance(self, key): process = subprocess.Popen([self.cmd, 'account', key] + self.get_default_conn(), stdout=self.stdout, stderr=self.stderr, universal_newlines=True) stdout, stderr = process.communicate() out = json_extract(stdout) bal = int(BnbCli.BnbResponse(out).get_balance()) logger.debug(f"out: {stdout.strip()}") return bal def send_to(self, sender, receiver, amount, password, memo): memo_encoded = BnbCli.encode_memo(memo) logger.info(f'Bnbcli | send {amount} from {l6(sender)} to {l6(receiver)} | memo: {memo_encoded}') command = [self.cmd, 'send', '--from', sender, '--to', receiver, '--amount', f'{amount}:BNB', '--json', '--memo', memo_encoded] + self.get_default_conn() return self._exe_bnb_cli(command, password) def send_to_multi(self, sender, receiver_amount_dict: dict, password, memo): """ :param sender: sender addr or account name :param receiver_amount_dict: dict { receiver_addr : amount to send, ...} :param password: :param memo: :return: """ memo_encoded = BnbCli.encode_memo(memo) logger.info( f'Bnbcli | send from {l6(sender)} to {json.dumps(receiver_amount_dict, indent=3)} | memo: {memo_encoded}') bnb_output = '[' for key, value in receiver_amount_dict.items(): bnb_output += "{\"to\":\"%s\",\"amount\":\"%s:BNB\"}," % (key, value) bnb_output = bnb_output[:-1] + ']' command = [self.cmd, 'token', 'multi-send', '--from', sender, '--transfers', bnb_output, '--json', '--memo', memo_encoded] return self._exe_bnb_cli(command, password) def _spawn(self, command, timeout=15, local=False): if not local: command += self.get_default_conn() logger.info(command) child = pexpect.spawn(command, encoding='utf-8', timeout=timeout) child.logfile = sys.stdout return child def _exe_bnb_cli(self, command, more_input): command += self.get_default_conn() process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) WAIT(7) stdout, stderr = process.communicate(f'{more_input}\n') logger.debug(f"\n" f"+++ command: {' '.join(command)}\n" f"+++ out: {stdout}\n" f"+++ err: {stderr}") out = json_extract(stdout) err = json_extract(stderr) if out is not None: return BnbCli.BnbResponse(out) elif err is not None: return BnbCli.BnbResponse(err) else: raise NeighborChainError(stderr) @staticmethod def get_bnb_rpc_url(): return f'{BnbCli._bnb_rpc_protocol}://{BnbCli._bnb_host}:{BnbCli._bnb_rpc_port}' @staticmethod def encode_memo(info): """ @param info: Expect porting id string, or tuple/list of (redeem id,incognito address) @return: """ if type(info) is str: return BnbCli.encode_porting_memo(info) if (type(info) is tuple or type(info, list)) and len(info) == 2: return BnbCli.encode_redeem_memo(info[0], info[1]) raise Exception(f'Expect porting id string, or tuple/list of (redeem id,incognito address), ' f'got {type(info)}: {info} ') @staticmethod def encode_porting_memo(porting_id): logger.info(f"""Encoding porting memo Porting id: {porting_id}""") memo_struct = '{"PortingID":"%s"}' % porting_id byte_ascii = memo_struct.encode('ascii') b64_encode = base64.b64encode(byte_ascii) encode_memo_str_output = b64_encode.decode('utf-8') return encode_memo_str_output @staticmethod def encode_redeem_memo(redeem_id, custodian_incognito_addr): logger.info(f"""Encoding redeem memo Redeem id: {redeem_id} Incognito addr: {custodian_incognito_addr}""") memo_struct = '{"RedeemID":"%s","CustodianIncognitoAddress":"%s"}' % (redeem_id, custodian_incognito_addr) byte_ascii = memo_struct.encode('ascii') sha3_256 = hashlib.sha3_256(byte_ascii) b64_encode = base64.b64encode(sha3_256.digest()) encode_memo_str_output = b64_encode.decode('utf-8') return encode_memo_str_output def import_mnemonics(self, username, pass_phrase, mnemonic, overwrite=True): """ :param overwrite: option to overwrite existing username :param username: user name prefix :param pass_phrase: pass phrase for all user (all user will have the same pass phrase :param mnemonic: could be a string or a list of strings of mnemonic :return: """ mnemonic_list = [mnemonic] if type(mnemonic) is str else mnemonic i = 1 for m in mnemonic_list: name = f'{username}{i}' i += 1 logger.info(f'Importing key with passphrase: {pass_phrase} | {m}') command = f"{self.cmd} keys add --recover {name}" child = self._spawn(command, local=True) try: child.expect('override the existing name', timeout=2) if overwrite: child.sendline('y') else: child.sendline('n') child.close() return except pexpect.exceptions.TIMEOUT: pass child.expect('Enter a passphrase for your key:') child.sendline(pass_phrase) child.expect('Repeat the passphrase:') child.sendline(pass_phrase) child.expect('> Enter your recovery seed phrase:') child.sendline(m) child.expect(pexpect.EOF) child.close() def delete_local_address(self, user=None, password=None): if user is None: users_to_del = self.list_user_addresses() elif type(user) is str: users_to_del = [user] elif type(user) is list: users_to_del = user else: raise Exception('un-support arg type of <user> arg') password = '123123Az' if password is None else password for user in users_to_del: command = f'{self.cmd} keys delete {user}' child = self._spawn(command, local=True) try: child.expect('not found', timeout=2) child.close() continue except pexpect.exceptions.TIMEOUT: pass child.expect('DANGER - enter password to permanently delete key:') child.sendline(password) child.expect(pexpect.EOF) child.close() def list_user_addresses(self): user_addresses = {} command = f"{self.cmd} keys list" child = self._spawn(command, local=True) line = child.readline() while line != '': list_ = line.strip('\r\n').split('\t') if 'bnb' in list_[2]: user = list_[0] address = list_[2] user_addresses[user] = address line = child.readline() return user_addresses class BnbResponse: def __init__(self, stdout): self.data = stdout if self.data is None: raise ValueError('Response data must not be None') def get_coins(self): try: return self.data['value']['base']['coins'] except TypeError: return 0 def get_amount(self, denom): coins = self.get_coins() for coin in coins: if coin['denom'] == denom: return coin['amount'] def get_balance(self): return self.get_amount('BNB') def get_tx_hash(self): try: return self.data['hash'] except KeyError as ke: raise Exception(f'Response data does not contain hash: {ke} :{self.data}') def build_proof(self, tx_hash=None): tx_hash = self.get_tx_hash() if tx_hash is None else tx_hash logger.info() logger.info(f'Portal | Building proof | tx {tx_hash}') bnb_get_block_url = f"{BnbCli.get_bnb_rpc_url()}/tx?hash=0x{tx_hash}&prove=true" block_response = RpcConnection(bnb_get_block_url, id_num='', json_rpc='2.0'). \ with_params([]).with_method('').execute() block_height = int(block_response.data()['result']['height']) proof = {"Proof": block_response.data()['result']['proof'], "BlockHeight": block_height} proof['Proof']['Proof']['total'] = int(proof['Proof']['Proof']['total']) # convert to int proof['Proof']['Proof']['index'] = int(proof['Proof']['Proof']['index']) # convert to int proof_string = json.dumps(proof, separators=(',', ':')) # separators=(',', ':') to remove all spaces proof_ascii = proof_string.encode('ascii') # convert to byte string_base64 = base64.b64encode(proof_ascii) # encode string_base64_utf8 = string_base64.decode('utf-8') # convert to string logger.debug(f""" Proof: ================= \n{proof}""") return string_base64_utf8 # ============= BTC =================== class BtcGo: if sys.platform == 'darwin': btc_go_path = f'{os.getcwd()}/IncognitoChain/bin/btcGo/mac/' elif sys.platform == 'linux': btc_go_path = f'{os.getcwd()}/IncognitoChain/bin/btcGo/linux/' else: btc_go_path = f'{os.getcwd()}/IncognitoChain/bin/btcGo/win/' btc_build_proof_cli = btc_go_path + 'buildProof' btc_get_tx_cli = btc_go_path + 'getTxBTC' btc_send_porting_cli = btc_go_path + 'txportingBTC' btc_send_redeem_cli = btc_go_path + 'txRedeemBTC' if sys.platform == 'windows': btc_build_proof_cli += '.exe' btc_get_tx_cli += '.exe' btc_send_porting_cli += '.exe' btc_send_redeem_cli += '.exe' def __init__(self): pass @staticmethod def get_balance(addr): pass @staticmethod def send_to(sender, receiver, amount, password, memo): """ :param sender: dummy, just add here to match with bnbcli signature. sender is hardcoded = miERaVjAsBriPmAEHSkfymRUo3xjaEoM2r in btcGo command :param receiver: :param amount: :param password: dummy, just add here to match with bnbcli signature :param memo: :return: """ if type(memo) is typing.Tuple: # send redeem tx send_cli = BtcGo.btc_send_redeem_cli redeem_id = memo[0] custodian_addr = memo[1] command = [send_cli, '-amt', f"{amount}", '-userAdd', f"{receiver}", '-redeemId', f"{redeem_id}", '-custIncAdd', f"{custodian_addr}"] else: # send porting tx send_cli = BtcGo.btc_send_porting_cli porting_id = memo command = [send_cli, '-amtAdd1', f'{amount}', '-outAdd1', f'{receiver}', '-portingId', f'{porting_id}'] return BtcGo._exe_command(command) @staticmethod def send_to_multi(sender, receiver_amount_dict: dict, password, *memo): """ :param receiver_amount_dict: :param sender: dummy, just add here to match with bnbcli signature. sender is hardcoded = miERaVjAsBriPmAEHSkfymRUo3xjaEoM2r in btcGo command :param password: dummy, just add here to match with bnbcli signature :param memo: :return: """ command = [BtcGo.btc_send_porting_cli] i = 1 for receiver, amount in receiver_amount_dict.items(): command.append(f'-amtAdd{i}') command.append(f'{amount}') command.append(f'-outAdd{i}') command.append(f'{receiver}') i += 1 command.append('-portingId') command.append(f'{memo[0]}') return BtcGo._exe_command(command) @staticmethod def get_tx_by_hash(tx_hash): command = [BtcGo.btc_get_tx_cli, '-txhash', tx_hash] return BtcGo._exe_command(command) @staticmethod def _exe_command(command): process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) stdout, stderr = process.communicate() logger.info(f"\n" f"+++ command: {' '.join(command)}\n\n" f"+++ out: {stdout}\n\n" f"+++ err: {stderr}") dict_response = json_extract(stdout) return BtcGo.BtcResponse(dict_response) class BtcResponse: def __init__(self, data): self.data = data def get_tx_hash(self): try: return self.data['tx']['hash'] except KeyError: return self.data['hash'] def get_block_height(self): try: return self.data['tx']["block_height"] except KeyError: return self.data["block_height"] def build_proof(self): logger.info() logger.info(f'Portal | Building proof | tx {self.get_tx_hash()}') tx_by_hash = BtcGo.get_tx_by_hash(self.get_tx_hash()) height = tx_by_hash.get_block_height() timeout = 2 * 60 * 60 # 2hours interval = 120 while timeout > 0: if height != -1: break WAIT(interval) tx_by_hash = BtcGo.get_tx_by_hash(self.get_tx_hash()) height = tx_by_hash.get_block_height() timeout -= interval WAIT(30) command = [BtcGo.btc_build_proof_cli, '-blockHeight', f'{height}', '-txhash', f'{self.get_tx_hash()}'] process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) stdout, stderr = process.communicate() logger.info(f"\n" f"+++ command: {' '.join(command)}\n\n" f"+++ out: {stdout}\n\n" f"+++ err: {stderr}") proof = stdout.split()[1] logger.debug(f""" Proof: ================= \n{proof}""") return proof
get_default_conn
identifier_name
NeighborChainCli.py
import base64 import hashlib import json import os import subprocess import sys import typing import pexpect from Configs.Constants import PBNB_ID, PBTC_ID from Drivers.Connections import RpcConnection from Helpers.Logging import config_logger from Helpers.TestHelper import l6, json_extract from Helpers.Time import WAIT logger = config_logger(__name__) class NeighborChainCli: @staticmethod def new(token_id):
class NeighborChainError(BaseException): pass class BnbCli: _bnb_host = 'data-seed-pre-0-s1.binance.org' _bnb_rpc_port = 443 _bnb_rpc_protocol = 'https' _path = f'{os.getcwd()}/bin' _binary = {'darwin': f'{_path}/tbnbcli-mac', 'linux': f'{_path}/tbnbcli-linux', '*': f'{_path}/tbnbcli-win'} tbnbcli = _binary.get(sys.platform, _binary["*"]) def __init__(self, cmd=tbnbcli, chain_id="Binance-Chain-Ganges", node=None): if node is None: self.node = f'tcp://{BnbCli._bnb_host}:80' self.cmd = cmd self.chain_id = chain_id self.trust = '--trust-node' self.stdout = subprocess.PIPE self.stderr = subprocess.PIPE def get_default_conn(self): return ['--chain-id', self.chain_id, '--node', self.node, self.trust] def get_balance(self, key): process = subprocess.Popen([self.cmd, 'account', key] + self.get_default_conn(), stdout=self.stdout, stderr=self.stderr, universal_newlines=True) stdout, stderr = process.communicate() out = json_extract(stdout) bal = int(BnbCli.BnbResponse(out).get_balance()) logger.debug(f"out: {stdout.strip()}") return bal def send_to(self, sender, receiver, amount, password, memo): memo_encoded = BnbCli.encode_memo(memo) logger.info(f'Bnbcli | send {amount} from {l6(sender)} to {l6(receiver)} | memo: {memo_encoded}') command = [self.cmd, 'send', '--from', sender, '--to', receiver, '--amount', f'{amount}:BNB', '--json', '--memo', memo_encoded] + self.get_default_conn() return self._exe_bnb_cli(command, password) def send_to_multi(self, sender, receiver_amount_dict: dict, password, memo): """ :param sender: sender addr or account name :param receiver_amount_dict: dict { receiver_addr : amount to send, ...} :param password: :param memo: :return: """ memo_encoded = BnbCli.encode_memo(memo) logger.info( f'Bnbcli | send from {l6(sender)} to {json.dumps(receiver_amount_dict, indent=3)} | memo: {memo_encoded}') bnb_output = '[' for key, value in receiver_amount_dict.items(): bnb_output += "{\"to\":\"%s\",\"amount\":\"%s:BNB\"}," % (key, value) bnb_output = bnb_output[:-1] + ']' command = [self.cmd, 'token', 'multi-send', '--from', sender, '--transfers', bnb_output, '--json', '--memo', memo_encoded] return self._exe_bnb_cli(command, password) def _spawn(self, command, timeout=15, local=False): if not local: command += self.get_default_conn() logger.info(command) child = pexpect.spawn(command, encoding='utf-8', timeout=timeout) child.logfile = sys.stdout return child def _exe_bnb_cli(self, command, more_input): command += self.get_default_conn() process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) WAIT(7) stdout, stderr = process.communicate(f'{more_input}\n') logger.debug(f"\n" f"+++ command: {' '.join(command)}\n" f"+++ out: {stdout}\n" f"+++ err: {stderr}") out = json_extract(stdout) err = json_extract(stderr) if out is not None: return BnbCli.BnbResponse(out) elif err is not None: return BnbCli.BnbResponse(err) else: raise NeighborChainError(stderr) @staticmethod def get_bnb_rpc_url(): return f'{BnbCli._bnb_rpc_protocol}://{BnbCli._bnb_host}:{BnbCli._bnb_rpc_port}' @staticmethod def encode_memo(info): """ @param info: Expect porting id string, or tuple/list of (redeem id,incognito address) @return: """ if type(info) is str: return BnbCli.encode_porting_memo(info) if (type(info) is tuple or type(info, list)) and len(info) == 2: return BnbCli.encode_redeem_memo(info[0], info[1]) raise Exception(f'Expect porting id string, or tuple/list of (redeem id,incognito address), ' f'got {type(info)}: {info} ') @staticmethod def encode_porting_memo(porting_id): logger.info(f"""Encoding porting memo Porting id: {porting_id}""") memo_struct = '{"PortingID":"%s"}' % porting_id byte_ascii = memo_struct.encode('ascii') b64_encode = base64.b64encode(byte_ascii) encode_memo_str_output = b64_encode.decode('utf-8') return encode_memo_str_output @staticmethod def encode_redeem_memo(redeem_id, custodian_incognito_addr): logger.info(f"""Encoding redeem memo Redeem id: {redeem_id} Incognito addr: {custodian_incognito_addr}""") memo_struct = '{"RedeemID":"%s","CustodianIncognitoAddress":"%s"}' % (redeem_id, custodian_incognito_addr) byte_ascii = memo_struct.encode('ascii') sha3_256 = hashlib.sha3_256(byte_ascii) b64_encode = base64.b64encode(sha3_256.digest()) encode_memo_str_output = b64_encode.decode('utf-8') return encode_memo_str_output def import_mnemonics(self, username, pass_phrase, mnemonic, overwrite=True): """ :param overwrite: option to overwrite existing username :param username: user name prefix :param pass_phrase: pass phrase for all user (all user will have the same pass phrase :param mnemonic: could be a string or a list of strings of mnemonic :return: """ mnemonic_list = [mnemonic] if type(mnemonic) is str else mnemonic i = 1 for m in mnemonic_list: name = f'{username}{i}' i += 1 logger.info(f'Importing key with passphrase: {pass_phrase} | {m}') command = f"{self.cmd} keys add --recover {name}" child = self._spawn(command, local=True) try: child.expect('override the existing name', timeout=2) if overwrite: child.sendline('y') else: child.sendline('n') child.close() return except pexpect.exceptions.TIMEOUT: pass child.expect('Enter a passphrase for your key:') child.sendline(pass_phrase) child.expect('Repeat the passphrase:') child.sendline(pass_phrase) child.expect('> Enter your recovery seed phrase:') child.sendline(m) child.expect(pexpect.EOF) child.close() def delete_local_address(self, user=None, password=None): if user is None: users_to_del = self.list_user_addresses() elif type(user) is str: users_to_del = [user] elif type(user) is list: users_to_del = user else: raise Exception('un-support arg type of <user> arg') password = '123123Az' if password is None else password for user in users_to_del: command = f'{self.cmd} keys delete {user}' child = self._spawn(command, local=True) try: child.expect('not found', timeout=2) child.close() continue except pexpect.exceptions.TIMEOUT: pass child.expect('DANGER - enter password to permanently delete key:') child.sendline(password) child.expect(pexpect.EOF) child.close() def list_user_addresses(self): user_addresses = {} command = f"{self.cmd} keys list" child = self._spawn(command, local=True) line = child.readline() while line != '': list_ = line.strip('\r\n').split('\t') if 'bnb' in list_[2]: user = list_[0] address = list_[2] user_addresses[user] = address line = child.readline() return user_addresses class BnbResponse: def __init__(self, stdout): self.data = stdout if self.data is None: raise ValueError('Response data must not be None') def get_coins(self): try: return self.data['value']['base']['coins'] except TypeError: return 0 def get_amount(self, denom): coins = self.get_coins() for coin in coins: if coin['denom'] == denom: return coin['amount'] def get_balance(self): return self.get_amount('BNB') def get_tx_hash(self): try: return self.data['hash'] except KeyError as ke: raise Exception(f'Response data does not contain hash: {ke} :{self.data}') def build_proof(self, tx_hash=None): tx_hash = self.get_tx_hash() if tx_hash is None else tx_hash logger.info() logger.info(f'Portal | Building proof | tx {tx_hash}') bnb_get_block_url = f"{BnbCli.get_bnb_rpc_url()}/tx?hash=0x{tx_hash}&prove=true" block_response = RpcConnection(bnb_get_block_url, id_num='', json_rpc='2.0'). \ with_params([]).with_method('').execute() block_height = int(block_response.data()['result']['height']) proof = {"Proof": block_response.data()['result']['proof'], "BlockHeight": block_height} proof['Proof']['Proof']['total'] = int(proof['Proof']['Proof']['total']) # convert to int proof['Proof']['Proof']['index'] = int(proof['Proof']['Proof']['index']) # convert to int proof_string = json.dumps(proof, separators=(',', ':')) # separators=(',', ':') to remove all spaces proof_ascii = proof_string.encode('ascii') # convert to byte string_base64 = base64.b64encode(proof_ascii) # encode string_base64_utf8 = string_base64.decode('utf-8') # convert to string logger.debug(f""" Proof: ================= \n{proof}""") return string_base64_utf8 # ============= BTC =================== class BtcGo: if sys.platform == 'darwin': btc_go_path = f'{os.getcwd()}/IncognitoChain/bin/btcGo/mac/' elif sys.platform == 'linux': btc_go_path = f'{os.getcwd()}/IncognitoChain/bin/btcGo/linux/' else: btc_go_path = f'{os.getcwd()}/IncognitoChain/bin/btcGo/win/' btc_build_proof_cli = btc_go_path + 'buildProof' btc_get_tx_cli = btc_go_path + 'getTxBTC' btc_send_porting_cli = btc_go_path + 'txportingBTC' btc_send_redeem_cli = btc_go_path + 'txRedeemBTC' if sys.platform == 'windows': btc_build_proof_cli += '.exe' btc_get_tx_cli += '.exe' btc_send_porting_cli += '.exe' btc_send_redeem_cli += '.exe' def __init__(self): pass @staticmethod def get_balance(addr): pass @staticmethod def send_to(sender, receiver, amount, password, memo): """ :param sender: dummy, just add here to match with bnbcli signature. sender is hardcoded = miERaVjAsBriPmAEHSkfymRUo3xjaEoM2r in btcGo command :param receiver: :param amount: :param password: dummy, just add here to match with bnbcli signature :param memo: :return: """ if type(memo) is typing.Tuple: # send redeem tx send_cli = BtcGo.btc_send_redeem_cli redeem_id = memo[0] custodian_addr = memo[1] command = [send_cli, '-amt', f"{amount}", '-userAdd', f"{receiver}", '-redeemId', f"{redeem_id}", '-custIncAdd', f"{custodian_addr}"] else: # send porting tx send_cli = BtcGo.btc_send_porting_cli porting_id = memo command = [send_cli, '-amtAdd1', f'{amount}', '-outAdd1', f'{receiver}', '-portingId', f'{porting_id}'] return BtcGo._exe_command(command) @staticmethod def send_to_multi(sender, receiver_amount_dict: dict, password, *memo): """ :param receiver_amount_dict: :param sender: dummy, just add here to match with bnbcli signature. sender is hardcoded = miERaVjAsBriPmAEHSkfymRUo3xjaEoM2r in btcGo command :param password: dummy, just add here to match with bnbcli signature :param memo: :return: """ command = [BtcGo.btc_send_porting_cli] i = 1 for receiver, amount in receiver_amount_dict.items(): command.append(f'-amtAdd{i}') command.append(f'{amount}') command.append(f'-outAdd{i}') command.append(f'{receiver}') i += 1 command.append('-portingId') command.append(f'{memo[0]}') return BtcGo._exe_command(command) @staticmethod def get_tx_by_hash(tx_hash): command = [BtcGo.btc_get_tx_cli, '-txhash', tx_hash] return BtcGo._exe_command(command) @staticmethod def _exe_command(command): process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) stdout, stderr = process.communicate() logger.info(f"\n" f"+++ command: {' '.join(command)}\n\n" f"+++ out: {stdout}\n\n" f"+++ err: {stderr}") dict_response = json_extract(stdout) return BtcGo.BtcResponse(dict_response) class BtcResponse: def __init__(self, data): self.data = data def get_tx_hash(self): try: return self.data['tx']['hash'] except KeyError: return self.data['hash'] def get_block_height(self): try: return self.data['tx']["block_height"] except KeyError: return self.data["block_height"] def build_proof(self): logger.info() logger.info(f'Portal | Building proof | tx {self.get_tx_hash()}') tx_by_hash = BtcGo.get_tx_by_hash(self.get_tx_hash()) height = tx_by_hash.get_block_height() timeout = 2 * 60 * 60 # 2hours interval = 120 while timeout > 0: if height != -1: break WAIT(interval) tx_by_hash = BtcGo.get_tx_by_hash(self.get_tx_hash()) height = tx_by_hash.get_block_height() timeout -= interval WAIT(30) command = [BtcGo.btc_build_proof_cli, '-blockHeight', f'{height}', '-txhash', f'{self.get_tx_hash()}'] process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) stdout, stderr = process.communicate() logger.info(f"\n" f"+++ command: {' '.join(command)}\n\n" f"+++ out: {stdout}\n\n" f"+++ err: {stderr}") proof = stdout.split()[1] logger.debug(f""" Proof: ================= \n{proof}""") return proof
if token_id == PBNB_ID: return BnbCli() elif token_id == PBTC_ID: return BtcGo()
identifier_body
NeighborChainCli.py
import base64 import hashlib import json import os import subprocess import sys import typing import pexpect from Configs.Constants import PBNB_ID, PBTC_ID from Drivers.Connections import RpcConnection from Helpers.Logging import config_logger from Helpers.TestHelper import l6, json_extract from Helpers.Time import WAIT logger = config_logger(__name__) class NeighborChainCli: @staticmethod def new(token_id): if token_id == PBNB_ID: return BnbCli() elif token_id == PBTC_ID: return BtcGo() class NeighborChainError(BaseException): pass class BnbCli: _bnb_host = 'data-seed-pre-0-s1.binance.org' _bnb_rpc_port = 443 _bnb_rpc_protocol = 'https' _path = f'{os.getcwd()}/bin' _binary = {'darwin': f'{_path}/tbnbcli-mac', 'linux': f'{_path}/tbnbcli-linux', '*': f'{_path}/tbnbcli-win'} tbnbcli = _binary.get(sys.platform, _binary["*"]) def __init__(self, cmd=tbnbcli, chain_id="Binance-Chain-Ganges", node=None): if node is None: self.node = f'tcp://{BnbCli._bnb_host}:80' self.cmd = cmd self.chain_id = chain_id self.trust = '--trust-node' self.stdout = subprocess.PIPE self.stderr = subprocess.PIPE def get_default_conn(self): return ['--chain-id', self.chain_id, '--node', self.node, self.trust] def get_balance(self, key): process = subprocess.Popen([self.cmd, 'account', key] + self.get_default_conn(), stdout=self.stdout, stderr=self.stderr, universal_newlines=True) stdout, stderr = process.communicate() out = json_extract(stdout) bal = int(BnbCli.BnbResponse(out).get_balance()) logger.debug(f"out: {stdout.strip()}") return bal def send_to(self, sender, receiver, amount, password, memo): memo_encoded = BnbCli.encode_memo(memo) logger.info(f'Bnbcli | send {amount} from {l6(sender)} to {l6(receiver)} | memo: {memo_encoded}') command = [self.cmd, 'send', '--from', sender, '--to', receiver, '--amount', f'{amount}:BNB', '--json', '--memo', memo_encoded] + self.get_default_conn() return self._exe_bnb_cli(command, password) def send_to_multi(self, sender, receiver_amount_dict: dict, password, memo): """ :param sender: sender addr or account name :param receiver_amount_dict: dict { receiver_addr : amount to send, ...} :param password: :param memo: :return: """ memo_encoded = BnbCli.encode_memo(memo) logger.info( f'Bnbcli | send from {l6(sender)} to {json.dumps(receiver_amount_dict, indent=3)} | memo: {memo_encoded}') bnb_output = '[' for key, value in receiver_amount_dict.items(): bnb_output += "{\"to\":\"%s\",\"amount\":\"%s:BNB\"}," % (key, value) bnb_output = bnb_output[:-1] + ']' command = [self.cmd, 'token', 'multi-send', '--from', sender, '--transfers', bnb_output, '--json', '--memo', memo_encoded] return self._exe_bnb_cli(command, password) def _spawn(self, command, timeout=15, local=False): if not local: command += self.get_default_conn() logger.info(command) child = pexpect.spawn(command, encoding='utf-8', timeout=timeout) child.logfile = sys.stdout return child def _exe_bnb_cli(self, command, more_input): command += self.get_default_conn() process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
WAIT(7) stdout, stderr = process.communicate(f'{more_input}\n') logger.debug(f"\n" f"+++ command: {' '.join(command)}\n" f"+++ out: {stdout}\n" f"+++ err: {stderr}") out = json_extract(stdout) err = json_extract(stderr) if out is not None: return BnbCli.BnbResponse(out) elif err is not None: return BnbCli.BnbResponse(err) else: raise NeighborChainError(stderr) @staticmethod def get_bnb_rpc_url(): return f'{BnbCli._bnb_rpc_protocol}://{BnbCli._bnb_host}:{BnbCli._bnb_rpc_port}' @staticmethod def encode_memo(info): """ @param info: Expect porting id string, or tuple/list of (redeem id,incognito address) @return: """ if type(info) is str: return BnbCli.encode_porting_memo(info) if (type(info) is tuple or type(info, list)) and len(info) == 2: return BnbCli.encode_redeem_memo(info[0], info[1]) raise Exception(f'Expect porting id string, or tuple/list of (redeem id,incognito address), ' f'got {type(info)}: {info} ') @staticmethod def encode_porting_memo(porting_id): logger.info(f"""Encoding porting memo Porting id: {porting_id}""") memo_struct = '{"PortingID":"%s"}' % porting_id byte_ascii = memo_struct.encode('ascii') b64_encode = base64.b64encode(byte_ascii) encode_memo_str_output = b64_encode.decode('utf-8') return encode_memo_str_output @staticmethod def encode_redeem_memo(redeem_id, custodian_incognito_addr): logger.info(f"""Encoding redeem memo Redeem id: {redeem_id} Incognito addr: {custodian_incognito_addr}""") memo_struct = '{"RedeemID":"%s","CustodianIncognitoAddress":"%s"}' % (redeem_id, custodian_incognito_addr) byte_ascii = memo_struct.encode('ascii') sha3_256 = hashlib.sha3_256(byte_ascii) b64_encode = base64.b64encode(sha3_256.digest()) encode_memo_str_output = b64_encode.decode('utf-8') return encode_memo_str_output def import_mnemonics(self, username, pass_phrase, mnemonic, overwrite=True): """ :param overwrite: option to overwrite existing username :param username: user name prefix :param pass_phrase: pass phrase for all user (all user will have the same pass phrase :param mnemonic: could be a string or a list of strings of mnemonic :return: """ mnemonic_list = [mnemonic] if type(mnemonic) is str else mnemonic i = 1 for m in mnemonic_list: name = f'{username}{i}' i += 1 logger.info(f'Importing key with passphrase: {pass_phrase} | {m}') command = f"{self.cmd} keys add --recover {name}" child = self._spawn(command, local=True) try: child.expect('override the existing name', timeout=2) if overwrite: child.sendline('y') else: child.sendline('n') child.close() return except pexpect.exceptions.TIMEOUT: pass child.expect('Enter a passphrase for your key:') child.sendline(pass_phrase) child.expect('Repeat the passphrase:') child.sendline(pass_phrase) child.expect('> Enter your recovery seed phrase:') child.sendline(m) child.expect(pexpect.EOF) child.close() def delete_local_address(self, user=None, password=None): if user is None: users_to_del = self.list_user_addresses() elif type(user) is str: users_to_del = [user] elif type(user) is list: users_to_del = user else: raise Exception('un-support arg type of <user> arg') password = '123123Az' if password is None else password for user in users_to_del: command = f'{self.cmd} keys delete {user}' child = self._spawn(command, local=True) try: child.expect('not found', timeout=2) child.close() continue except pexpect.exceptions.TIMEOUT: pass child.expect('DANGER - enter password to permanently delete key:') child.sendline(password) child.expect(pexpect.EOF) child.close() def list_user_addresses(self): user_addresses = {} command = f"{self.cmd} keys list" child = self._spawn(command, local=True) line = child.readline() while line != '': list_ = line.strip('\r\n').split('\t') if 'bnb' in list_[2]: user = list_[0] address = list_[2] user_addresses[user] = address line = child.readline() return user_addresses class BnbResponse: def __init__(self, stdout): self.data = stdout if self.data is None: raise ValueError('Response data must not be None') def get_coins(self): try: return self.data['value']['base']['coins'] except TypeError: return 0 def get_amount(self, denom): coins = self.get_coins() for coin in coins: if coin['denom'] == denom: return coin['amount'] def get_balance(self): return self.get_amount('BNB') def get_tx_hash(self): try: return self.data['hash'] except KeyError as ke: raise Exception(f'Response data does not contain hash: {ke} :{self.data}') def build_proof(self, tx_hash=None): tx_hash = self.get_tx_hash() if tx_hash is None else tx_hash logger.info() logger.info(f'Portal | Building proof | tx {tx_hash}') bnb_get_block_url = f"{BnbCli.get_bnb_rpc_url()}/tx?hash=0x{tx_hash}&prove=true" block_response = RpcConnection(bnb_get_block_url, id_num='', json_rpc='2.0'). \ with_params([]).with_method('').execute() block_height = int(block_response.data()['result']['height']) proof = {"Proof": block_response.data()['result']['proof'], "BlockHeight": block_height} proof['Proof']['Proof']['total'] = int(proof['Proof']['Proof']['total']) # convert to int proof['Proof']['Proof']['index'] = int(proof['Proof']['Proof']['index']) # convert to int proof_string = json.dumps(proof, separators=(',', ':')) # separators=(',', ':') to remove all spaces proof_ascii = proof_string.encode('ascii') # convert to byte string_base64 = base64.b64encode(proof_ascii) # encode string_base64_utf8 = string_base64.decode('utf-8') # convert to string logger.debug(f""" Proof: ================= \n{proof}""") return string_base64_utf8 # ============= BTC =================== class BtcGo: if sys.platform == 'darwin': btc_go_path = f'{os.getcwd()}/IncognitoChain/bin/btcGo/mac/' elif sys.platform == 'linux': btc_go_path = f'{os.getcwd()}/IncognitoChain/bin/btcGo/linux/' else: btc_go_path = f'{os.getcwd()}/IncognitoChain/bin/btcGo/win/' btc_build_proof_cli = btc_go_path + 'buildProof' btc_get_tx_cli = btc_go_path + 'getTxBTC' btc_send_porting_cli = btc_go_path + 'txportingBTC' btc_send_redeem_cli = btc_go_path + 'txRedeemBTC' if sys.platform == 'windows': btc_build_proof_cli += '.exe' btc_get_tx_cli += '.exe' btc_send_porting_cli += '.exe' btc_send_redeem_cli += '.exe' def __init__(self): pass @staticmethod def get_balance(addr): pass @staticmethod def send_to(sender, receiver, amount, password, memo): """ :param sender: dummy, just add here to match with bnbcli signature. sender is hardcoded = miERaVjAsBriPmAEHSkfymRUo3xjaEoM2r in btcGo command :param receiver: :param amount: :param password: dummy, just add here to match with bnbcli signature :param memo: :return: """ if type(memo) is typing.Tuple: # send redeem tx send_cli = BtcGo.btc_send_redeem_cli redeem_id = memo[0] custodian_addr = memo[1] command = [send_cli, '-amt', f"{amount}", '-userAdd', f"{receiver}", '-redeemId', f"{redeem_id}", '-custIncAdd', f"{custodian_addr}"] else: # send porting tx send_cli = BtcGo.btc_send_porting_cli porting_id = memo command = [send_cli, '-amtAdd1', f'{amount}', '-outAdd1', f'{receiver}', '-portingId', f'{porting_id}'] return BtcGo._exe_command(command) @staticmethod def send_to_multi(sender, receiver_amount_dict: dict, password, *memo): """ :param receiver_amount_dict: :param sender: dummy, just add here to match with bnbcli signature. sender is hardcoded = miERaVjAsBriPmAEHSkfymRUo3xjaEoM2r in btcGo command :param password: dummy, just add here to match with bnbcli signature :param memo: :return: """ command = [BtcGo.btc_send_porting_cli] i = 1 for receiver, amount in receiver_amount_dict.items(): command.append(f'-amtAdd{i}') command.append(f'{amount}') command.append(f'-outAdd{i}') command.append(f'{receiver}') i += 1 command.append('-portingId') command.append(f'{memo[0]}') return BtcGo._exe_command(command) @staticmethod def get_tx_by_hash(tx_hash): command = [BtcGo.btc_get_tx_cli, '-txhash', tx_hash] return BtcGo._exe_command(command) @staticmethod def _exe_command(command): process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) stdout, stderr = process.communicate() logger.info(f"\n" f"+++ command: {' '.join(command)}\n\n" f"+++ out: {stdout}\n\n" f"+++ err: {stderr}") dict_response = json_extract(stdout) return BtcGo.BtcResponse(dict_response) class BtcResponse: def __init__(self, data): self.data = data def get_tx_hash(self): try: return self.data['tx']['hash'] except KeyError: return self.data['hash'] def get_block_height(self): try: return self.data['tx']["block_height"] except KeyError: return self.data["block_height"] def build_proof(self): logger.info() logger.info(f'Portal | Building proof | tx {self.get_tx_hash()}') tx_by_hash = BtcGo.get_tx_by_hash(self.get_tx_hash()) height = tx_by_hash.get_block_height() timeout = 2 * 60 * 60 # 2hours interval = 120 while timeout > 0: if height != -1: break WAIT(interval) tx_by_hash = BtcGo.get_tx_by_hash(self.get_tx_hash()) height = tx_by_hash.get_block_height() timeout -= interval WAIT(30) command = [BtcGo.btc_build_proof_cli, '-blockHeight', f'{height}', '-txhash', f'{self.get_tx_hash()}'] process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) stdout, stderr = process.communicate() logger.info(f"\n" f"+++ command: {' '.join(command)}\n\n" f"+++ out: {stdout}\n\n" f"+++ err: {stderr}") proof = stdout.split()[1] logger.debug(f""" Proof: ================= \n{proof}""") return proof
random_line_split
dots.py
#!/usr/bin/env python #dots.py import sys import os import pickle import time import random from VisionEgg.Textures import * from VisionEgg.Core import * from VisionEgg.FlowControl import TIME_INDEPENDENT from VisionEgg.FlowControl import Presentation, FunctionController, TIME_SEC_ABSOLUTE, FRAMES_ABSOLUTE sys.path.append(os.path.split(os.getcwd())[0]) import experiments import subject import shuffler import CBalance myArgs = sys.argv try: number = int(myArgs[1]) except: number = 666 sub = subject.Subject(number, experiment = "dots") ###BEGIN SETTINGS #total trials trials = 240 #of the total trials, how many do you want to run (good for testing), put -1 for all subtrials = 30 #blocks to be displyaed blocks = ["sequential", "paired", "overlapping"] #the text presented when a break is given breakText = "Time for a break.\nPRESS SPACE TO CONTINUE." #take a break after this many trials break_trial = 60 #total duration of each dot array, in seconds dot_duration = .750 #total duration of each mask mask_dur = 0.5 mask_img = Image.open("mask.BMP") #size of fixation cross crossSize = 80 #duration of fixation cross cross_duration = .750 ###END SETTINGS if os.path.exists("cb.pck"): f = open("cb.pck") cb = pickle.load(f) f.close() else: cb = CBalance.Counterbalance(blocks) blockOrder = cb.advance() f = open("cb.pck", "w") pickle.dump(cb, f) f.close() stimLib = "stimuli" screen = get_default_screen() screen.parameters.bgcolor = (.52, .51, .52) pygame.init() fixText, fixCross = experiments.printWord(screen, '+', crossSize, (0, 0, 0)) trial = 1 #HANDLERS def put_image_sequential(t_abs): global phase global start if not phase: start = t_abs phase = "dots1" t = t_abs - start if t >= dot_duration and phase == "dots1": texture_object.put_sub_image(Image.open(os.path.join(stimLib,fname2))) phase = "dots2" elif t >= (dot_duration * 2) and phase == "dots2": texture_object.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration * 2 + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration * 2 + mask_dur + cross_duration): p.parameters.go_duration = [0, 'frames'] def
(t_abs): global phase global start if not phase: start = t_abs phase = "dots" t = t_abs - start if t >= dot_duration and phase == "dots": texture_object1.put_sub_image(mask_img) texture_object2.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration + mask_dur + cross_duration): p.parameters.go_duration = (0, 'frames') def put_image_overlapping(t_abs): global phase global start if not phase: start = t_abs phase = "dots" t = t_abs - start if t >= dot_duration and phase == "dots": texture_object.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration + mask_dur + cross_duration): p.parameters.go_duration = (0, 'frames') def keyFunc(event): global color global cDict global yellowB global blueB global trial global block global side global pressed RT = p.time_sec_since_go * 1000 if block == "sequential": RT-= (dot_duration * 1000) correct = cDict[color] sub.inputData(trial, "RT", RT) if event.key == pygame.locals.K_LCTRL: sub.inputData(trial, "key", "L_CTRL") elif event.key == pygame.locals.K_RCTRL: sub.inputData(trial, "key", "R_CTRL") else: sub.inputData(trial, "key", "NA") if not pressed: if block == "paired": if event.key == pygame.locals.K_LCTRL: if side == "large": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) elif event.key == pygame.locals.K_RCTRL: if side == "large": sub.inputData(trial, "ACC", 0) else: sub.inputData(trial, "ACC", 1) else: if event.key == pygame.locals.K_LCTRL: if yellowB == "Left CTRL" and correct == "yellow": sub.inputData(trial, "ACC", 1) elif blueB == "Left CTRL" and correct == "blue": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) elif event.key == pygame.locals.K_RCTRL: if yellowB == "Right CTRL" and correct == "yellow": sub.inputData(trial, "ACC", 1) elif blueB == "Right CTRL" and correct == "blue": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) if RT <= 0: sub.inputData(trial, "ACC", 3) else: sub.inputData(trial, "ACC", 2) pressed = True #fixation pause #add response handlers fixText, fixCross = experiments.printWord(screen, '+', crossSize, (0, 0, 0)) blockIns = {} blockIns['paired'] = "The groups will both appear at the same time." blockIns['sequential'] = "The groups will appear one after the other." blockIns['overlapping'] = "The groups will both appear at the same time." for block in blockOrder: print "creating instructions..." if os.path.exists("colButton.pck"): f = open("colButton.pck", "r") col = pickle.load(f) col.reverse() f.close() else: col = ["Left CTRL", "Right CTRL"] f = open("colButton.pck", "w") pickle.dump(col, f) f.close() yellowB = col[0] blueB = col[1] if block == "paired": instructionText = "In this stage you will see 2 groups of dots.\n%s\n Press LEFT CTRL when there are more dots on the left side of the screen.\n Press RIGHT CTRL when there are more dots on the right side of the screen.\n\nPRESS SPACE TO CONTINUE." % (blockIns[block]) else: instructionText = "In this stage you will see 2 groups of dots.\n%s\n Each group will be either yellow or blue. Your job is to choose which group has more dots in it.\n\nPress %s for yellow.\nPress %s for blue.\n\nPRESS SPACE TO CONTINUE." % (blockIns[block], yellowB, blueB) print "entering block %s" % block ratios = shuffler.Condition([.9, .75, .66, .5, .33, .25], "ratio", 6) seeds = shuffler.Condition([6, 7, 8, 9, 10], "seed", 6) size = shuffler.Condition(["con", "incon"], "size", 5) exemplars = shuffler.Condition([1, 2, 3, 4], "exemplar", 20) order = ["large", "small"] color = ["C1", "C2"] cDict = {} cDict["C1"] = "blue" cDict["C2"] = "yellow" print "loading ratio/seed/size/exemplar order..." myShuffler = shuffler.MultiShuffler([ratios, seeds, size, exemplars], trials) stimList = myShuffler.shuffle() sides = shuffler.Condition(order, "sides", 5) colors = shuffler.Condition(color, "colors", 6) print "loading sides/colors order..." csShuffler = shuffler.MultiShuffler([sides, colors], trials) csList = csShuffler.shuffle() print "configuring stimulus displays windows..." if block == "overlapping" or block == "sequential": x = screen.size[0] / 2 y = screen.size[1] / 2 else: x = screen.size[0] / 4 y = screen.size[1] / 2 print "Beginning block now..." experiments.showInstructions(screen, instructionText, textcolor=(0, 0, 0)) if subtrials == -1: stimList = stimList csList = csList else: stimList = stimList[0:subtrials] csList = csList[0:subtrials] for stim, cs in zip(stimList, csList): pressed = False ratio = getattr(stim, "ratio") n1 = getattr(stim, "seed") n2 = int(round(n1 * 1/ratio, 0)) size = getattr(stim, "size") exemplar = getattr(stim, "exemplar") side = getattr(cs, "sides") color = getattr(cs, "colors") sub.inputData(trial, "ACC", "NA") sub.inputData(trial, "RT", "NA") sub.inputData(trial, "block", block) sub.inputData(trial, "ratio", ratio) sub.inputData(trial, "n1", n1) sub.inputData(trial, "n2", n2) sub.inputData(trial, "sizectrl", size) sub.inputData(trial, "exemplar", exemplar) sub.inputData(trial, "order", side) sub.inputData(trial, "largecolor", cDict[color]) sub.inputData(trial, "yellowButton", yellowB) sub.inputData(trial, "blueButton", blueB) print color if block == "overlapping": phase = "" fname = "%s_%s_%s_%s_%s_OL.bmp" % (ratio, n1, color, size, exemplar) t = Texture(Image.open(os.path.join(stimLib, fname))) s = TextureStimulus(texture = t, position = (x, y), anchor = 'center') texture_object = s.parameters.texture.get_texture_object() v = Viewport(screen=screen, stimuli=[s]) p = Presentation(go_duration = ('forever', ), viewports=[v]) p.add_controller(None, None, FunctionController(during_go_func=put_image_overlapping, temporal_variables = TIME_SEC_ABSOLUTE)) p.parameters.handle_event_callbacks=[(pygame.locals.KEYDOWN, keyFunc)] p.go() else: if side == "large": fname1 = "%s_%s_%s_%s_%s_S2.bmp" % (ratio, n1, color, size, exemplar) fname2 = "%s_%s_%s_%s_%s_S1.bmp" % (ratio, n1, color, size, exemplar) else: fname1 = "%s_%s_%s_%s_%s_S1.bmp" % (ratio, n1, color, size, exemplar) fname2 = "%s_%s_%s_%s_%s_S2.bmp" % (ratio, n1, color, size, exemplar) #### t1 = Texture(Image.open(os.path.join(stimLib,fname1))) t2 = Texture(Image.open(os.path.join(stimLib,fname2))) if block == "sequential": phase = "" s = TextureStimulus(texture = t1, position = (x, y), anchor = 'center') texture_object = s.parameters.texture.get_texture_object() v = Viewport(screen=screen, stimuli=[s]) p = Presentation(go_duration=('forever', ), viewports=[v]) p.add_controller(None, None, FunctionController(during_go_func=put_image_sequential, temporal_variables = TIME_SEC_ABSOLUTE)) p.parameters.handle_event_callbacks=[(pygame.locals.KEYDOWN, keyFunc)] p.go() else: phase = "" s1 = TextureStimulus(texture = t1, position = (x, y), anchor = 'center') s2 = TextureStimulus(texture = t2, position = (x * 3, y), anchor = 'center') texture_object1 = s1.parameters.texture.get_texture_object() texture_object2 = s2.parameters.texture.get_texture_object() v = Viewport(screen=screen, stimuli=[s1,s2]) p = Presentation(go_duration=('forever', ), viewports=[v]) p.add_controller(None, None, FunctionController(during_go_func=put_image_dual, temporal_variables = TIME_SEC_ABSOLUTE)) p.parameters.handle_event_callbacks=[(pygame.locals.KEYDOWN, keyFunc)] p.go() if trial % break_trial == 0 and trial != trials: print trial, "BREAK TIME" experiments.showInstructions(screen, breakText, textcolor = [0, 0, 0]) trial += 1 sub.printData()
put_image_dual
identifier_name
dots.py
#!/usr/bin/env python #dots.py import sys import os import pickle import time import random from VisionEgg.Textures import * from VisionEgg.Core import * from VisionEgg.FlowControl import TIME_INDEPENDENT from VisionEgg.FlowControl import Presentation, FunctionController, TIME_SEC_ABSOLUTE, FRAMES_ABSOLUTE sys.path.append(os.path.split(os.getcwd())[0]) import experiments import subject import shuffler import CBalance myArgs = sys.argv try: number = int(myArgs[1]) except: number = 666 sub = subject.Subject(number, experiment = "dots") ###BEGIN SETTINGS #total trials trials = 240 #of the total trials, how many do you want to run (good for testing), put -1 for all subtrials = 30 #blocks to be displyaed blocks = ["sequential", "paired", "overlapping"] #the text presented when a break is given breakText = "Time for a break.\nPRESS SPACE TO CONTINUE." #take a break after this many trials break_trial = 60 #total duration of each dot array, in seconds dot_duration = .750 #total duration of each mask mask_dur = 0.5 mask_img = Image.open("mask.BMP") #size of fixation cross crossSize = 80 #duration of fixation cross cross_duration = .750 ###END SETTINGS if os.path.exists("cb.pck"): f = open("cb.pck") cb = pickle.load(f) f.close() else: cb = CBalance.Counterbalance(blocks) blockOrder = cb.advance() f = open("cb.pck", "w") pickle.dump(cb, f) f.close() stimLib = "stimuli" screen = get_default_screen() screen.parameters.bgcolor = (.52, .51, .52) pygame.init() fixText, fixCross = experiments.printWord(screen, '+', crossSize, (0, 0, 0)) trial = 1 #HANDLERS def put_image_sequential(t_abs): global phase global start if not phase: start = t_abs phase = "dots1" t = t_abs - start if t >= dot_duration and phase == "dots1": texture_object.put_sub_image(Image.open(os.path.join(stimLib,fname2))) phase = "dots2" elif t >= (dot_duration * 2) and phase == "dots2": texture_object.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration * 2 + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration * 2 + mask_dur + cross_duration): p.parameters.go_duration = [0, 'frames'] def put_image_dual(t_abs): global phase global start if not phase: start = t_abs phase = "dots" t = t_abs - start if t >= dot_duration and phase == "dots": texture_object1.put_sub_image(mask_img) texture_object2.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration + mask_dur + cross_duration): p.parameters.go_duration = (0, 'frames') def put_image_overlapping(t_abs): global phase global start if not phase: start = t_abs phase = "dots" t = t_abs - start if t >= dot_duration and phase == "dots": texture_object.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration + mask_dur + cross_duration): p.parameters.go_duration = (0, 'frames') def keyFunc(event): global color global cDict global yellowB global blueB global trial global block global side global pressed RT = p.time_sec_since_go * 1000 if block == "sequential": RT-= (dot_duration * 1000) correct = cDict[color] sub.inputData(trial, "RT", RT) if event.key == pygame.locals.K_LCTRL: sub.inputData(trial, "key", "L_CTRL") elif event.key == pygame.locals.K_RCTRL: sub.inputData(trial, "key", "R_CTRL") else: sub.inputData(trial, "key", "NA") if not pressed: if block == "paired": if event.key == pygame.locals.K_LCTRL: if side == "large": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) elif event.key == pygame.locals.K_RCTRL: if side == "large": sub.inputData(trial, "ACC", 0) else: sub.inputData(trial, "ACC", 1) else: if event.key == pygame.locals.K_LCTRL: if yellowB == "Left CTRL" and correct == "yellow": sub.inputData(trial, "ACC", 1) elif blueB == "Left CTRL" and correct == "blue": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) elif event.key == pygame.locals.K_RCTRL: if yellowB == "Right CTRL" and correct == "yellow": sub.inputData(trial, "ACC", 1) elif blueB == "Right CTRL" and correct == "blue":
else: sub.inputData(trial, "ACC", 0) if RT <= 0: sub.inputData(trial, "ACC", 3) else: sub.inputData(trial, "ACC", 2) pressed = True #fixation pause #add response handlers fixText, fixCross = experiments.printWord(screen, '+', crossSize, (0, 0, 0)) blockIns = {} blockIns['paired'] = "The groups will both appear at the same time." blockIns['sequential'] = "The groups will appear one after the other." blockIns['overlapping'] = "The groups will both appear at the same time." for block in blockOrder: print "creating instructions..." if os.path.exists("colButton.pck"): f = open("colButton.pck", "r") col = pickle.load(f) col.reverse() f.close() else: col = ["Left CTRL", "Right CTRL"] f = open("colButton.pck", "w") pickle.dump(col, f) f.close() yellowB = col[0] blueB = col[1] if block == "paired": instructionText = "In this stage you will see 2 groups of dots.\n%s\n Press LEFT CTRL when there are more dots on the left side of the screen.\n Press RIGHT CTRL when there are more dots on the right side of the screen.\n\nPRESS SPACE TO CONTINUE." % (blockIns[block]) else: instructionText = "In this stage you will see 2 groups of dots.\n%s\n Each group will be either yellow or blue. Your job is to choose which group has more dots in it.\n\nPress %s for yellow.\nPress %s for blue.\n\nPRESS SPACE TO CONTINUE." % (blockIns[block], yellowB, blueB) print "entering block %s" % block ratios = shuffler.Condition([.9, .75, .66, .5, .33, .25], "ratio", 6) seeds = shuffler.Condition([6, 7, 8, 9, 10], "seed", 6) size = shuffler.Condition(["con", "incon"], "size", 5) exemplars = shuffler.Condition([1, 2, 3, 4], "exemplar", 20) order = ["large", "small"] color = ["C1", "C2"] cDict = {} cDict["C1"] = "blue" cDict["C2"] = "yellow" print "loading ratio/seed/size/exemplar order..." myShuffler = shuffler.MultiShuffler([ratios, seeds, size, exemplars], trials) stimList = myShuffler.shuffle() sides = shuffler.Condition(order, "sides", 5) colors = shuffler.Condition(color, "colors", 6) print "loading sides/colors order..." csShuffler = shuffler.MultiShuffler([sides, colors], trials) csList = csShuffler.shuffle() print "configuring stimulus displays windows..." if block == "overlapping" or block == "sequential": x = screen.size[0] / 2 y = screen.size[1] / 2 else: x = screen.size[0] / 4 y = screen.size[1] / 2 print "Beginning block now..." experiments.showInstructions(screen, instructionText, textcolor=(0, 0, 0)) if subtrials == -1: stimList = stimList csList = csList else: stimList = stimList[0:subtrials] csList = csList[0:subtrials] for stim, cs in zip(stimList, csList): pressed = False ratio = getattr(stim, "ratio") n1 = getattr(stim, "seed") n2 = int(round(n1 * 1/ratio, 0)) size = getattr(stim, "size") exemplar = getattr(stim, "exemplar") side = getattr(cs, "sides") color = getattr(cs, "colors") sub.inputData(trial, "ACC", "NA") sub.inputData(trial, "RT", "NA") sub.inputData(trial, "block", block) sub.inputData(trial, "ratio", ratio) sub.inputData(trial, "n1", n1) sub.inputData(trial, "n2", n2) sub.inputData(trial, "sizectrl", size) sub.inputData(trial, "exemplar", exemplar) sub.inputData(trial, "order", side) sub.inputData(trial, "largecolor", cDict[color]) sub.inputData(trial, "yellowButton", yellowB) sub.inputData(trial, "blueButton", blueB) print color if block == "overlapping": phase = "" fname = "%s_%s_%s_%s_%s_OL.bmp" % (ratio, n1, color, size, exemplar) t = Texture(Image.open(os.path.join(stimLib, fname))) s = TextureStimulus(texture = t, position = (x, y), anchor = 'center') texture_object = s.parameters.texture.get_texture_object() v = Viewport(screen=screen, stimuli=[s]) p = Presentation(go_duration = ('forever', ), viewports=[v]) p.add_controller(None, None, FunctionController(during_go_func=put_image_overlapping, temporal_variables = TIME_SEC_ABSOLUTE)) p.parameters.handle_event_callbacks=[(pygame.locals.KEYDOWN, keyFunc)] p.go() else: if side == "large": fname1 = "%s_%s_%s_%s_%s_S2.bmp" % (ratio, n1, color, size, exemplar) fname2 = "%s_%s_%s_%s_%s_S1.bmp" % (ratio, n1, color, size, exemplar) else: fname1 = "%s_%s_%s_%s_%s_S1.bmp" % (ratio, n1, color, size, exemplar) fname2 = "%s_%s_%s_%s_%s_S2.bmp" % (ratio, n1, color, size, exemplar) #### t1 = Texture(Image.open(os.path.join(stimLib,fname1))) t2 = Texture(Image.open(os.path.join(stimLib,fname2))) if block == "sequential": phase = "" s = TextureStimulus(texture = t1, position = (x, y), anchor = 'center') texture_object = s.parameters.texture.get_texture_object() v = Viewport(screen=screen, stimuli=[s]) p = Presentation(go_duration=('forever', ), viewports=[v]) p.add_controller(None, None, FunctionController(during_go_func=put_image_sequential, temporal_variables = TIME_SEC_ABSOLUTE)) p.parameters.handle_event_callbacks=[(pygame.locals.KEYDOWN, keyFunc)] p.go() else: phase = "" s1 = TextureStimulus(texture = t1, position = (x, y), anchor = 'center') s2 = TextureStimulus(texture = t2, position = (x * 3, y), anchor = 'center') texture_object1 = s1.parameters.texture.get_texture_object() texture_object2 = s2.parameters.texture.get_texture_object() v = Viewport(screen=screen, stimuli=[s1,s2]) p = Presentation(go_duration=('forever', ), viewports=[v]) p.add_controller(None, None, FunctionController(during_go_func=put_image_dual, temporal_variables = TIME_SEC_ABSOLUTE)) p.parameters.handle_event_callbacks=[(pygame.locals.KEYDOWN, keyFunc)] p.go() if trial % break_trial == 0 and trial != trials: print trial, "BREAK TIME" experiments.showInstructions(screen, breakText, textcolor = [0, 0, 0]) trial += 1 sub.printData()
sub.inputData(trial, "ACC", 1)
random_line_split
dots.py
#!/usr/bin/env python #dots.py import sys import os import pickle import time import random from VisionEgg.Textures import * from VisionEgg.Core import * from VisionEgg.FlowControl import TIME_INDEPENDENT from VisionEgg.FlowControl import Presentation, FunctionController, TIME_SEC_ABSOLUTE, FRAMES_ABSOLUTE sys.path.append(os.path.split(os.getcwd())[0]) import experiments import subject import shuffler import CBalance myArgs = sys.argv try: number = int(myArgs[1]) except: number = 666 sub = subject.Subject(number, experiment = "dots") ###BEGIN SETTINGS #total trials trials = 240 #of the total trials, how many do you want to run (good for testing), put -1 for all subtrials = 30 #blocks to be displyaed blocks = ["sequential", "paired", "overlapping"] #the text presented when a break is given breakText = "Time for a break.\nPRESS SPACE TO CONTINUE." #take a break after this many trials break_trial = 60 #total duration of each dot array, in seconds dot_duration = .750 #total duration of each mask mask_dur = 0.5 mask_img = Image.open("mask.BMP") #size of fixation cross crossSize = 80 #duration of fixation cross cross_duration = .750 ###END SETTINGS if os.path.exists("cb.pck"): f = open("cb.pck") cb = pickle.load(f) f.close() else: cb = CBalance.Counterbalance(blocks) blockOrder = cb.advance() f = open("cb.pck", "w") pickle.dump(cb, f) f.close() stimLib = "stimuli" screen = get_default_screen() screen.parameters.bgcolor = (.52, .51, .52) pygame.init() fixText, fixCross = experiments.printWord(screen, '+', crossSize, (0, 0, 0)) trial = 1 #HANDLERS def put_image_sequential(t_abs): global phase global start if not phase: start = t_abs phase = "dots1" t = t_abs - start if t >= dot_duration and phase == "dots1": texture_object.put_sub_image(Image.open(os.path.join(stimLib,fname2))) phase = "dots2" elif t >= (dot_duration * 2) and phase == "dots2": texture_object.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration * 2 + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration * 2 + mask_dur + cross_duration): p.parameters.go_duration = [0, 'frames'] def put_image_dual(t_abs): global phase global start if not phase: start = t_abs phase = "dots" t = t_abs - start if t >= dot_duration and phase == "dots": texture_object1.put_sub_image(mask_img) texture_object2.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration + mask_dur + cross_duration): p.parameters.go_duration = (0, 'frames') def put_image_overlapping(t_abs):
def keyFunc(event): global color global cDict global yellowB global blueB global trial global block global side global pressed RT = p.time_sec_since_go * 1000 if block == "sequential": RT-= (dot_duration * 1000) correct = cDict[color] sub.inputData(trial, "RT", RT) if event.key == pygame.locals.K_LCTRL: sub.inputData(trial, "key", "L_CTRL") elif event.key == pygame.locals.K_RCTRL: sub.inputData(trial, "key", "R_CTRL") else: sub.inputData(trial, "key", "NA") if not pressed: if block == "paired": if event.key == pygame.locals.K_LCTRL: if side == "large": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) elif event.key == pygame.locals.K_RCTRL: if side == "large": sub.inputData(trial, "ACC", 0) else: sub.inputData(trial, "ACC", 1) else: if event.key == pygame.locals.K_LCTRL: if yellowB == "Left CTRL" and correct == "yellow": sub.inputData(trial, "ACC", 1) elif blueB == "Left CTRL" and correct == "blue": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) elif event.key == pygame.locals.K_RCTRL: if yellowB == "Right CTRL" and correct == "yellow": sub.inputData(trial, "ACC", 1) elif blueB == "Right CTRL" and correct == "blue": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) if RT <= 0: sub.inputData(trial, "ACC", 3) else: sub.inputData(trial, "ACC", 2) pressed = True #fixation pause #add response handlers fixText, fixCross = experiments.printWord(screen, '+', crossSize, (0, 0, 0)) blockIns = {} blockIns['paired'] = "The groups will both appear at the same time." blockIns['sequential'] = "The groups will appear one after the other." blockIns['overlapping'] = "The groups will both appear at the same time." for block in blockOrder: print "creating instructions..." if os.path.exists("colButton.pck"): f = open("colButton.pck", "r") col = pickle.load(f) col.reverse() f.close() else: col = ["Left CTRL", "Right CTRL"] f = open("colButton.pck", "w") pickle.dump(col, f) f.close() yellowB = col[0] blueB = col[1] if block == "paired": instructionText = "In this stage you will see 2 groups of dots.\n%s\n Press LEFT CTRL when there are more dots on the left side of the screen.\n Press RIGHT CTRL when there are more dots on the right side of the screen.\n\nPRESS SPACE TO CONTINUE." % (blockIns[block]) else: instructionText = "In this stage you will see 2 groups of dots.\n%s\n Each group will be either yellow or blue. Your job is to choose which group has more dots in it.\n\nPress %s for yellow.\nPress %s for blue.\n\nPRESS SPACE TO CONTINUE." % (blockIns[block], yellowB, blueB) print "entering block %s" % block ratios = shuffler.Condition([.9, .75, .66, .5, .33, .25], "ratio", 6) seeds = shuffler.Condition([6, 7, 8, 9, 10], "seed", 6) size = shuffler.Condition(["con", "incon"], "size", 5) exemplars = shuffler.Condition([1, 2, 3, 4], "exemplar", 20) order = ["large", "small"] color = ["C1", "C2"] cDict = {} cDict["C1"] = "blue" cDict["C2"] = "yellow" print "loading ratio/seed/size/exemplar order..." myShuffler = shuffler.MultiShuffler([ratios, seeds, size, exemplars], trials) stimList = myShuffler.shuffle() sides = shuffler.Condition(order, "sides", 5) colors = shuffler.Condition(color, "colors", 6) print "loading sides/colors order..." csShuffler = shuffler.MultiShuffler([sides, colors], trials) csList = csShuffler.shuffle() print "configuring stimulus displays windows..." if block == "overlapping" or block == "sequential": x = screen.size[0] / 2 y = screen.size[1] / 2 else: x = screen.size[0] / 4 y = screen.size[1] / 2 print "Beginning block now..." experiments.showInstructions(screen, instructionText, textcolor=(0, 0, 0)) if subtrials == -1: stimList = stimList csList = csList else: stimList = stimList[0:subtrials] csList = csList[0:subtrials] for stim, cs in zip(stimList, csList): pressed = False ratio = getattr(stim, "ratio") n1 = getattr(stim, "seed") n2 = int(round(n1 * 1/ratio, 0)) size = getattr(stim, "size") exemplar = getattr(stim, "exemplar") side = getattr(cs, "sides") color = getattr(cs, "colors") sub.inputData(trial, "ACC", "NA") sub.inputData(trial, "RT", "NA") sub.inputData(trial, "block", block) sub.inputData(trial, "ratio", ratio) sub.inputData(trial, "n1", n1) sub.inputData(trial, "n2", n2) sub.inputData(trial, "sizectrl", size) sub.inputData(trial, "exemplar", exemplar) sub.inputData(trial, "order", side) sub.inputData(trial, "largecolor", cDict[color]) sub.inputData(trial, "yellowButton", yellowB) sub.inputData(trial, "blueButton", blueB) print color if block == "overlapping": phase = "" fname = "%s_%s_%s_%s_%s_OL.bmp" % (ratio, n1, color, size, exemplar) t = Texture(Image.open(os.path.join(stimLib, fname))) s = TextureStimulus(texture = t, position = (x, y), anchor = 'center') texture_object = s.parameters.texture.get_texture_object() v = Viewport(screen=screen, stimuli=[s]) p = Presentation(go_duration = ('forever', ), viewports=[v]) p.add_controller(None, None, FunctionController(during_go_func=put_image_overlapping, temporal_variables = TIME_SEC_ABSOLUTE)) p.parameters.handle_event_callbacks=[(pygame.locals.KEYDOWN, keyFunc)] p.go() else: if side == "large": fname1 = "%s_%s_%s_%s_%s_S2.bmp" % (ratio, n1, color, size, exemplar) fname2 = "%s_%s_%s_%s_%s_S1.bmp" % (ratio, n1, color, size, exemplar) else: fname1 = "%s_%s_%s_%s_%s_S1.bmp" % (ratio, n1, color, size, exemplar) fname2 = "%s_%s_%s_%s_%s_S2.bmp" % (ratio, n1, color, size, exemplar) #### t1 = Texture(Image.open(os.path.join(stimLib,fname1))) t2 = Texture(Image.open(os.path.join(stimLib,fname2))) if block == "sequential": phase = "" s = TextureStimulus(texture = t1, position = (x, y), anchor = 'center') texture_object = s.parameters.texture.get_texture_object() v = Viewport(screen=screen, stimuli=[s]) p = Presentation(go_duration=('forever', ), viewports=[v]) p.add_controller(None, None, FunctionController(during_go_func=put_image_sequential, temporal_variables = TIME_SEC_ABSOLUTE)) p.parameters.handle_event_callbacks=[(pygame.locals.KEYDOWN, keyFunc)] p.go() else: phase = "" s1 = TextureStimulus(texture = t1, position = (x, y), anchor = 'center') s2 = TextureStimulus(texture = t2, position = (x * 3, y), anchor = 'center') texture_object1 = s1.parameters.texture.get_texture_object() texture_object2 = s2.parameters.texture.get_texture_object() v = Viewport(screen=screen, stimuli=[s1,s2]) p = Presentation(go_duration=('forever', ), viewports=[v]) p.add_controller(None, None, FunctionController(during_go_func=put_image_dual, temporal_variables = TIME_SEC_ABSOLUTE)) p.parameters.handle_event_callbacks=[(pygame.locals.KEYDOWN, keyFunc)] p.go() if trial % break_trial == 0 and trial != trials: print trial, "BREAK TIME" experiments.showInstructions(screen, breakText, textcolor = [0, 0, 0]) trial += 1 sub.printData()
global phase global start if not phase: start = t_abs phase = "dots" t = t_abs - start if t >= dot_duration and phase == "dots": texture_object.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration + mask_dur + cross_duration): p.parameters.go_duration = (0, 'frames')
identifier_body
dots.py
#!/usr/bin/env python #dots.py import sys import os import pickle import time import random from VisionEgg.Textures import * from VisionEgg.Core import * from VisionEgg.FlowControl import TIME_INDEPENDENT from VisionEgg.FlowControl import Presentation, FunctionController, TIME_SEC_ABSOLUTE, FRAMES_ABSOLUTE sys.path.append(os.path.split(os.getcwd())[0]) import experiments import subject import shuffler import CBalance myArgs = sys.argv try: number = int(myArgs[1]) except: number = 666 sub = subject.Subject(number, experiment = "dots") ###BEGIN SETTINGS #total trials trials = 240 #of the total trials, how many do you want to run (good for testing), put -1 for all subtrials = 30 #blocks to be displyaed blocks = ["sequential", "paired", "overlapping"] #the text presented when a break is given breakText = "Time for a break.\nPRESS SPACE TO CONTINUE." #take a break after this many trials break_trial = 60 #total duration of each dot array, in seconds dot_duration = .750 #total duration of each mask mask_dur = 0.5 mask_img = Image.open("mask.BMP") #size of fixation cross crossSize = 80 #duration of fixation cross cross_duration = .750 ###END SETTINGS if os.path.exists("cb.pck"): f = open("cb.pck") cb = pickle.load(f) f.close() else: cb = CBalance.Counterbalance(blocks) blockOrder = cb.advance() f = open("cb.pck", "w") pickle.dump(cb, f) f.close() stimLib = "stimuli" screen = get_default_screen() screen.parameters.bgcolor = (.52, .51, .52) pygame.init() fixText, fixCross = experiments.printWord(screen, '+', crossSize, (0, 0, 0)) trial = 1 #HANDLERS def put_image_sequential(t_abs): global phase global start if not phase: start = t_abs phase = "dots1" t = t_abs - start if t >= dot_duration and phase == "dots1": texture_object.put_sub_image(Image.open(os.path.join(stimLib,fname2))) phase = "dots2" elif t >= (dot_duration * 2) and phase == "dots2": texture_object.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration * 2 + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration * 2 + mask_dur + cross_duration): p.parameters.go_duration = [0, 'frames'] def put_image_dual(t_abs): global phase global start if not phase: start = t_abs phase = "dots" t = t_abs - start if t >= dot_duration and phase == "dots": texture_object1.put_sub_image(mask_img) texture_object2.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration + mask_dur + cross_duration): p.parameters.go_duration = (0, 'frames') def put_image_overlapping(t_abs): global phase global start if not phase: start = t_abs phase = "dots" t = t_abs - start if t >= dot_duration and phase == "dots": texture_object.put_sub_image(mask_img) phase = "mask" elif t >= (dot_duration + mask_dur) and phase == "mask": p.parameters.viewports = [fixCross] phase = "cross" elif t >= (dot_duration + mask_dur + cross_duration): p.parameters.go_duration = (0, 'frames') def keyFunc(event): global color global cDict global yellowB global blueB global trial global block global side global pressed RT = p.time_sec_since_go * 1000 if block == "sequential": RT-= (dot_duration * 1000) correct = cDict[color] sub.inputData(trial, "RT", RT) if event.key == pygame.locals.K_LCTRL: sub.inputData(trial, "key", "L_CTRL") elif event.key == pygame.locals.K_RCTRL: sub.inputData(trial, "key", "R_CTRL") else: sub.inputData(trial, "key", "NA") if not pressed: if block == "paired": if event.key == pygame.locals.K_LCTRL: if side == "large": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) elif event.key == pygame.locals.K_RCTRL: if side == "large": sub.inputData(trial, "ACC", 0) else: sub.inputData(trial, "ACC", 1) else: if event.key == pygame.locals.K_LCTRL: if yellowB == "Left CTRL" and correct == "yellow": sub.inputData(trial, "ACC", 1) elif blueB == "Left CTRL" and correct == "blue": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) elif event.key == pygame.locals.K_RCTRL: if yellowB == "Right CTRL" and correct == "yellow": sub.inputData(trial, "ACC", 1) elif blueB == "Right CTRL" and correct == "blue": sub.inputData(trial, "ACC", 1) else: sub.inputData(trial, "ACC", 0) if RT <= 0: sub.inputData(trial, "ACC", 3) else: sub.inputData(trial, "ACC", 2) pressed = True #fixation pause #add response handlers fixText, fixCross = experiments.printWord(screen, '+', crossSize, (0, 0, 0)) blockIns = {} blockIns['paired'] = "The groups will both appear at the same time." blockIns['sequential'] = "The groups will appear one after the other." blockIns['overlapping'] = "The groups will both appear at the same time." for block in blockOrder: print "creating instructions..." if os.path.exists("colButton.pck"): f = open("colButton.pck", "r") col = pickle.load(f) col.reverse() f.close() else: col = ["Left CTRL", "Right CTRL"] f = open("colButton.pck", "w") pickle.dump(col, f) f.close() yellowB = col[0] blueB = col[1] if block == "paired": instructionText = "In this stage you will see 2 groups of dots.\n%s\n Press LEFT CTRL when there are more dots on the left side of the screen.\n Press RIGHT CTRL when there are more dots on the right side of the screen.\n\nPRESS SPACE TO CONTINUE." % (blockIns[block]) else:
print "entering block %s" % block ratios = shuffler.Condition([.9, .75, .66, .5, .33, .25], "ratio", 6) seeds = shuffler.Condition([6, 7, 8, 9, 10], "seed", 6) size = shuffler.Condition(["con", "incon"], "size", 5) exemplars = shuffler.Condition([1, 2, 3, 4], "exemplar", 20) order = ["large", "small"] color = ["C1", "C2"] cDict = {} cDict["C1"] = "blue" cDict["C2"] = "yellow" print "loading ratio/seed/size/exemplar order..." myShuffler = shuffler.MultiShuffler([ratios, seeds, size, exemplars], trials) stimList = myShuffler.shuffle() sides = shuffler.Condition(order, "sides", 5) colors = shuffler.Condition(color, "colors", 6) print "loading sides/colors order..." csShuffler = shuffler.MultiShuffler([sides, colors], trials) csList = csShuffler.shuffle() print "configuring stimulus displays windows..." if block == "overlapping" or block == "sequential": x = screen.size[0] / 2 y = screen.size[1] / 2 else: x = screen.size[0] / 4 y = screen.size[1] / 2 print "Beginning block now..." experiments.showInstructions(screen, instructionText, textcolor=(0, 0, 0)) if subtrials == -1: stimList = stimList csList = csList else: stimList = stimList[0:subtrials] csList = csList[0:subtrials] for stim, cs in zip(stimList, csList): pressed = False ratio = getattr(stim, "ratio") n1 = getattr(stim, "seed") n2 = int(round(n1 * 1/ratio, 0)) size = getattr(stim, "size") exemplar = getattr(stim, "exemplar") side = getattr(cs, "sides") color = getattr(cs, "colors") sub.inputData(trial, "ACC", "NA") sub.inputData(trial, "RT", "NA") sub.inputData(trial, "block", block) sub.inputData(trial, "ratio", ratio) sub.inputData(trial, "n1", n1) sub.inputData(trial, "n2", n2) sub.inputData(trial, "sizectrl", size) sub.inputData(trial, "exemplar", exemplar) sub.inputData(trial, "order", side) sub.inputData(trial, "largecolor", cDict[color]) sub.inputData(trial, "yellowButton", yellowB) sub.inputData(trial, "blueButton", blueB) print color if block == "overlapping": phase = "" fname = "%s_%s_%s_%s_%s_OL.bmp" % (ratio, n1, color, size, exemplar) t = Texture(Image.open(os.path.join(stimLib, fname))) s = TextureStimulus(texture = t, position = (x, y), anchor = 'center') texture_object = s.parameters.texture.get_texture_object() v = Viewport(screen=screen, stimuli=[s]) p = Presentation(go_duration = ('forever', ), viewports=[v]) p.add_controller(None, None, FunctionController(during_go_func=put_image_overlapping, temporal_variables = TIME_SEC_ABSOLUTE)) p.parameters.handle_event_callbacks=[(pygame.locals.KEYDOWN, keyFunc)] p.go() else: if side == "large": fname1 = "%s_%s_%s_%s_%s_S2.bmp" % (ratio, n1, color, size, exemplar) fname2 = "%s_%s_%s_%s_%s_S1.bmp" % (ratio, n1, color, size, exemplar) else: fname1 = "%s_%s_%s_%s_%s_S1.bmp" % (ratio, n1, color, size, exemplar) fname2 = "%s_%s_%s_%s_%s_S2.bmp" % (ratio, n1, color, size, exemplar) #### t1 = Texture(Image.open(os.path.join(stimLib,fname1))) t2 = Texture(Image.open(os.path.join(stimLib,fname2))) if block == "sequential": phase = "" s = TextureStimulus(texture = t1, position = (x, y), anchor = 'center') texture_object = s.parameters.texture.get_texture_object() v = Viewport(screen=screen, stimuli=[s]) p = Presentation(go_duration=('forever', ), viewports=[v]) p.add_controller(None, None, FunctionController(during_go_func=put_image_sequential, temporal_variables = TIME_SEC_ABSOLUTE)) p.parameters.handle_event_callbacks=[(pygame.locals.KEYDOWN, keyFunc)] p.go() else: phase = "" s1 = TextureStimulus(texture = t1, position = (x, y), anchor = 'center') s2 = TextureStimulus(texture = t2, position = (x * 3, y), anchor = 'center') texture_object1 = s1.parameters.texture.get_texture_object() texture_object2 = s2.parameters.texture.get_texture_object() v = Viewport(screen=screen, stimuli=[s1,s2]) p = Presentation(go_duration=('forever', ), viewports=[v]) p.add_controller(None, None, FunctionController(during_go_func=put_image_dual, temporal_variables = TIME_SEC_ABSOLUTE)) p.parameters.handle_event_callbacks=[(pygame.locals.KEYDOWN, keyFunc)] p.go() if trial % break_trial == 0 and trial != trials: print trial, "BREAK TIME" experiments.showInstructions(screen, breakText, textcolor = [0, 0, 0]) trial += 1 sub.printData()
instructionText = "In this stage you will see 2 groups of dots.\n%s\n Each group will be either yellow or blue. Your job is to choose which group has more dots in it.\n\nPress %s for yellow.\nPress %s for blue.\n\nPRESS SPACE TO CONTINUE." % (blockIns[block], yellowB, blueB)
conditional_block
interaction.rs
use bevy_math::{Mat4, Quat, Vec2, Vec3}; use bevy_utils::Duration; use serde::{Deserialize, Serialize}; use std::{ collections::HashMap, ops::{Deref, Mul}, }; // Note: indices follow WebXR convention. OpenXR's palm joint is missing, but it can be retrieved // using `XrTrackingSource::hands_pose()`. pub const XR_HAND_JOINT_WRIST: usize = 0; pub const XR_HAND_JOINT_THUMB_METACARPAL: usize = 1; pub const XR_HAND_JOINT_THUMB_PROXIMAL: usize = 2; pub const XR_HAND_JOINT_THUMB_DISTAL: usize = 3; pub const XR_HAND_JOINT_THUMB_TIP: usize = 4; pub const XR_HAND_JOINT_INDEX_METACARPAL: usize = 5; pub const XR_HAND_JOINT_INDEX_PROXIMAL: usize = 6; pub const XR_HAND_JOINT_INDEX_INTERMEDIATE: usize = 7; pub const XR_HAND_JOINT_INDEX_DISTAL: usize = 8; pub const XR_HAND_JOINT_INDEX_TIP: usize = 9; pub const XR_HAND_JOINT_MIDDLE_METACARPAL: usize = 10; pub const XR_HAND_JOINT_MIDDLE_PROXIMAL: usize = 11; pub const XR_HAND_JOINT_MIDDLE_INTERMEDIATE: usize = 12; pub const XR_HAND_JOINT_MIDDLE_DISTAL: usize = 13; pub const XR_HAND_JOINT_MIDDLE_TIP: usize = 14; pub const XR_HAND_JOINT_RING_METACARPAL: usize = 15; pub const XR_HAND_JOINT_RING_PROXIMAL: usize = 16; pub const XR_HAND_JOINT_RING_INTERMEDIATE: usize = 17; pub const XR_HAND_JOINT_RING_DISTAL: usize = 18; pub const XR_HAND_JOINT_RING_TIP: usize = 19; pub const XR_HAND_JOINT_LITTLE_METACARPAL: usize = 20; pub const XR_HAND_JOINT_LITTLE_PROXIMAL: usize = 21; pub const XR_HAND_JOINT_LITTLE_INTERMEDIATE: usize = 22; pub const XR_HAND_JOINT_LITTLE_DISTAL: usize = 23; pub const XR_HAND_JOINT_LITTLE_TIP: usize = 24; // To be verified: in all useful instances, when the orientation is valid, the position is also // valid. In case of 3DOF headsets, position should always be emulated using a neck and arm model. // In case of hand tracking, when a joint is estimated, both pose and orientation are available. #[derive(Clone, Copy, Default, Debug, Serialize, Deserialize)] pub struct XrRigidTransform { pub position: Vec3, pub orientation: Quat, } impl Mul for XrRigidTransform { type Output = XrRigidTransform; fn mul(self, rhs: Self) -> Self::Output { XrRigidTransform { position: self.position + self.orientation * rhs.position, orientation: self.orientation * rhs.orientation, } } } impl XrRigidTransform { pub fn to_mat4(&self) -> Mat4 { todo!() } } #[derive(Clone, Default, Debug, Serialize, Deserialize)] pub struct XrPose { pub transform: XrRigidTransform, pub linear_velocity: Option<Vec3>, pub angular_velocity: Option<Vec3>, pub emulated_position: bool, } impl Deref for XrPose { type Target = XrRigidTransform; fn deref(&self) -> &Self::Target { &self.transform } } #[derive(Clone, Default, Debug, Serialize, Deserialize)] pub struct XrJointPose { pub pose: XrPose, /// Radius of a sphere placed at the center of the joint that roughly touches the skin on both /// sides of the hand. pub radius: f32, } impl Deref for XrJointPose { type Target = XrPose; fn deref(&self) -> &Self::Target { &self.pose } } #[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] pub enum XrReferenceSpaceType { /// The coordinate system (position and orientation) is set as the headset pose at startup or /// after a recenter. This should be used only for experiences where the user is laid down. Viewer, /// The coordinate system (position and gravity-aligned orientation) is calculated from the /// headset pose at startup or after a recenter. This is for seated experiences. Local, /// The coordinate system (position and orientation) corresponds to the center of a rectangle at /// floor level, with +Y up. This is for stading or room-scale experiences. Stage, } pub mod implementation { use super::XrReferenceSpaceType; use crate::{interaction::XrPose, XrJointPose}; use bevy_math::Vec3; pub trait XrTrackingSourceBackend: Send + Sync { fn reference_space_type(&self) -> XrReferenceSpaceType; fn set_reference_space_type(&self, reference_space_type: XrReferenceSpaceType) -> bool; fn bounds_geometry(&self) -> Option<Vec<Vec3>>; fn views_poses(&self) -> Vec<XrPose>; fn hands_pose(&self) -> [Option<XrPose>; 2]; fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2]; fn hands_target_ray(&self) -> [Option<XrPose>; 2]; fn viewer_target_ray(&self) -> XrPose; } } /// Component used to poll tracking data. Tracking data is obtained "on-demand" to get the best /// precision possible. Poses are predicted for the next V-Sync. To obtain poses for an arbitrary /// point in time, `bevy_openxr` backend provides this functionality with OpenXrTrackingState. pub struct XrTrackingSource { inner: Box<dyn implementation::XrTrackingSourceBackend>, } impl XrTrackingSource { pub fn new(backend: Box<dyn implementation::XrTrackingSourceBackend>) -> Self { Self { inner: backend } } pub fn reference_space_type(&self) -> XrReferenceSpaceType { self.inner.reference_space_type() } /// Returns true if the tracking mode has been set correctly. If false is returned the tracking /// mode is not supported and another one must be chosen. pub fn set_reference_space_type(&mut self, reference_space_type: XrReferenceSpaceType) -> bool { self.inner.set_reference_space_type(reference_space_type) } pub fn just_reset_reference_space(&mut self) -> bool { todo!() } /// Returns a list of points, ordered clockwise, that define the playspace boundary. Only /// available when the reference space is set to `BoundedFloor`. Y component is always 0. pub fn bounds_geometry(&self) -> Option<Vec<Vec3>> { self.inner.bounds_geometry() } pub fn views_poses(&self) -> Vec<XrPose> { self.inner.views_poses() } /// Index 0 corresponds to the left hand, index 1 corresponds to the right hand. pub fn hands_pose(&self) -> [Option<XrPose>; 2] { self.inner.hands_pose() } /// Index 0 corresponds to the left hand, index 1 corresponds to the right hand. pub fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2]
/// Returns poses that can be used to render a target ray or cursor. The ray is along -Z. The /// behavior is vendor-specific. Index 0 corresponds to the left hand, index 1 corresponds to /// the right hand. pub fn hand_target_ray(&self) -> [Option<XrPose>; 2] { self.inner.hands_target_ray() } /// Returns a pose that can be used to render a target ray or cursor. The ray is along -Z. The /// origin is between the eyes for head-mounted displays and the center of the screen for /// handheld devices. pub fn viewer_target_ray(&self) -> XrPose { self.inner.viewer_target_ray() } // future extensions: // * eye tracking // * lower face tracking // * AR face tracking // * body/skeletal trackers // * scene understanding (anchors, planes, meshes) } #[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] pub enum XrHandType { Left, Right, } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)] pub enum XrButtonState { Default, Touched, Pressed, } impl Default for XrButtonState { fn default() -> Self { Self::Default } } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrActionType { /// Convenience type that groups click, touch and value actions for a single button. /// The last segment of the path (`/click`, `/touch` or `/value`) must be omitted. Button { touch: bool, }, Binary, Scalar, /// Convenience type that groups x and y axes for a touchpad or thumbstick action. /// The last segment of the path (`/x` or `/y`) must be omitted. Vec2D, } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrActionState { Button { state: XrButtonState, value: f32 }, Binary(bool), Scalar(f32), Vec2D(Vec2), } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct XrActionDescriptor { pub name: String, pub action_type: XrActionType, } /// List bindings related to a single interaction profile. `tracked` and `has_haptics` can always be /// set to false but if they are set to true and the interaction profile does not support them, the /// the profile will be disabled completely. pub struct XrProfileDescriptor { pub profile: String, pub bindings: Vec<(XrActionDescriptor, String)>, pub tracked: bool, pub has_haptics: bool, } pub struct XrActionSet { current_states: HashMap<String, XrActionState>, previous_states: HashMap<String, XrActionState>, } impl XrActionSet { pub fn state(&self, action: &str) -> Option<XrActionState> { self.current_states.get(action).cloned() } pub fn button_state(&self, action: &str) -> XrButtonState { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state } else { XrButtonState::Default } } pub fn button_touched(&self, action: &str) -> bool { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state != XrButtonState::Default } else { false } } pub fn button_pressed(&self, action: &str) -> bool { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state == XrButtonState::Pressed } else { false } } fn button_states(&self, action: &str) -> Option<(XrButtonState, XrButtonState)> { if let ( Some(XrActionState::Button { state: current_state, .. }), Some(XrActionState::Button { state: previous_state, .. }), ) = ( self.current_states.get(action), self.previous_states.get(action), ) { Some((*current_state, *previous_state)) } else { None } } pub fn button_just_touched(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur != XrButtonState::Default && prev == XrButtonState::Default) .unwrap_or(false) } pub fn button_just_untouched(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur == XrButtonState::Default && prev != XrButtonState::Default) .unwrap_or(false) } pub fn button_just_pressed(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur == XrButtonState::Pressed && prev != XrButtonState::Pressed) .unwrap_or(false) } pub fn button_just_unpressed(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur != XrButtonState::Pressed && prev == XrButtonState::Pressed) .unwrap_or(false) } pub fn binary_value(&self, action: &str) -> bool { if let Some(XrActionState::Binary(value)) = self.current_states.get(action) { *value } else { self.button_pressed(action) } } pub fn scalar_value(&self, action: &str) -> f32 { if let Some(XrActionState::Scalar(value) | XrActionState::Button { value, .. }) = self.current_states.get(action) { *value } else { 0.0 } } pub fn vec_2d_value(&self, action: &str) -> Vec2 { if let Some(XrActionState::Vec2D(value)) = self.current_states.get(action) { *value } else { Vec2::ZERO } } pub fn set(&mut self, states: HashMap<String, XrActionState>) { self.previous_states = self.current_states.clone(); self.current_states = states; } pub fn clear(&mut self) { self.current_states.clear(); self.previous_states.clear(); } } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrVibrationEventType { Apply { duration: Duration, frequency: f32, amplitude: f32, }, Stop, } #[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] pub struct XrVibrationEvent { pub hand: XrHandType, pub command: XrVibrationEventType, } /// Active interaction profiles. The format is backend-specific. They can be used to choose the /// controller 3D models to display. /// Note: in case skeletal hand tracking is active, the profiles still point to controller profiles. /// The correct 3D model to display can be decided depending on if skeletal hand tracking data is /// available or not. #[derive(Clone, PartialEq, Default, Debug, Serialize, Deserialize)] pub struct XrProfiles { pub left_hand: Option<String>, pub right_hand: Option<String>, }
{ self.inner.hands_skeleton_pose() }
identifier_body
interaction.rs
use bevy_math::{Mat4, Quat, Vec2, Vec3}; use bevy_utils::Duration; use serde::{Deserialize, Serialize}; use std::{ collections::HashMap, ops::{Deref, Mul}, }; // Note: indices follow WebXR convention. OpenXR's palm joint is missing, but it can be retrieved // using `XrTrackingSource::hands_pose()`. pub const XR_HAND_JOINT_WRIST: usize = 0; pub const XR_HAND_JOINT_THUMB_METACARPAL: usize = 1; pub const XR_HAND_JOINT_THUMB_PROXIMAL: usize = 2; pub const XR_HAND_JOINT_THUMB_DISTAL: usize = 3; pub const XR_HAND_JOINT_THUMB_TIP: usize = 4; pub const XR_HAND_JOINT_INDEX_METACARPAL: usize = 5; pub const XR_HAND_JOINT_INDEX_PROXIMAL: usize = 6; pub const XR_HAND_JOINT_INDEX_INTERMEDIATE: usize = 7; pub const XR_HAND_JOINT_INDEX_DISTAL: usize = 8; pub const XR_HAND_JOINT_INDEX_TIP: usize = 9; pub const XR_HAND_JOINT_MIDDLE_METACARPAL: usize = 10; pub const XR_HAND_JOINT_MIDDLE_PROXIMAL: usize = 11; pub const XR_HAND_JOINT_MIDDLE_INTERMEDIATE: usize = 12; pub const XR_HAND_JOINT_MIDDLE_DISTAL: usize = 13; pub const XR_HAND_JOINT_MIDDLE_TIP: usize = 14; pub const XR_HAND_JOINT_RING_METACARPAL: usize = 15; pub const XR_HAND_JOINT_RING_PROXIMAL: usize = 16; pub const XR_HAND_JOINT_RING_INTERMEDIATE: usize = 17; pub const XR_HAND_JOINT_RING_DISTAL: usize = 18; pub const XR_HAND_JOINT_RING_TIP: usize = 19; pub const XR_HAND_JOINT_LITTLE_METACARPAL: usize = 20; pub const XR_HAND_JOINT_LITTLE_PROXIMAL: usize = 21; pub const XR_HAND_JOINT_LITTLE_INTERMEDIATE: usize = 22; pub const XR_HAND_JOINT_LITTLE_DISTAL: usize = 23; pub const XR_HAND_JOINT_LITTLE_TIP: usize = 24; // To be verified: in all useful instances, when the orientation is valid, the position is also // valid. In case of 3DOF headsets, position should always be emulated using a neck and arm model. // In case of hand tracking, when a joint is estimated, both pose and orientation are available. #[derive(Clone, Copy, Default, Debug, Serialize, Deserialize)] pub struct XrRigidTransform { pub position: Vec3, pub orientation: Quat, } impl Mul for XrRigidTransform { type Output = XrRigidTransform; fn mul(self, rhs: Self) -> Self::Output { XrRigidTransform { position: self.position + self.orientation * rhs.position, orientation: self.orientation * rhs.orientation, } } } impl XrRigidTransform { pub fn to_mat4(&self) -> Mat4 { todo!() } } #[derive(Clone, Default, Debug, Serialize, Deserialize)] pub struct XrPose { pub transform: XrRigidTransform, pub linear_velocity: Option<Vec3>, pub angular_velocity: Option<Vec3>, pub emulated_position: bool, } impl Deref for XrPose { type Target = XrRigidTransform; fn deref(&self) -> &Self::Target { &self.transform } } #[derive(Clone, Default, Debug, Serialize, Deserialize)] pub struct XrJointPose { pub pose: XrPose, /// Radius of a sphere placed at the center of the joint that roughly touches the skin on both /// sides of the hand. pub radius: f32, } impl Deref for XrJointPose { type Target = XrPose; fn deref(&self) -> &Self::Target { &self.pose } } #[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] pub enum XrReferenceSpaceType { /// The coordinate system (position and orientation) is set as the headset pose at startup or /// after a recenter. This should be used only for experiences where the user is laid down. Viewer, /// The coordinate system (position and gravity-aligned orientation) is calculated from the /// headset pose at startup or after a recenter. This is for seated experiences. Local, /// The coordinate system (position and orientation) corresponds to the center of a rectangle at /// floor level, with +Y up. This is for stading or room-scale experiences. Stage, } pub mod implementation { use super::XrReferenceSpaceType; use crate::{interaction::XrPose, XrJointPose}; use bevy_math::Vec3; pub trait XrTrackingSourceBackend: Send + Sync { fn reference_space_type(&self) -> XrReferenceSpaceType; fn set_reference_space_type(&self, reference_space_type: XrReferenceSpaceType) -> bool; fn bounds_geometry(&self) -> Option<Vec<Vec3>>; fn views_poses(&self) -> Vec<XrPose>; fn hands_pose(&self) -> [Option<XrPose>; 2]; fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2]; fn hands_target_ray(&self) -> [Option<XrPose>; 2]; fn viewer_target_ray(&self) -> XrPose; } } /// Component used to poll tracking data. Tracking data is obtained "on-demand" to get the best /// precision possible. Poses are predicted for the next V-Sync. To obtain poses for an arbitrary /// point in time, `bevy_openxr` backend provides this functionality with OpenXrTrackingState. pub struct XrTrackingSource { inner: Box<dyn implementation::XrTrackingSourceBackend>, } impl XrTrackingSource { pub fn new(backend: Box<dyn implementation::XrTrackingSourceBackend>) -> Self { Self { inner: backend } } pub fn reference_space_type(&self) -> XrReferenceSpaceType { self.inner.reference_space_type() } /// Returns true if the tracking mode has been set correctly. If false is returned the tracking /// mode is not supported and another one must be chosen. pub fn set_reference_space_type(&mut self, reference_space_type: XrReferenceSpaceType) -> bool { self.inner.set_reference_space_type(reference_space_type) } pub fn just_reset_reference_space(&mut self) -> bool { todo!() } /// Returns a list of points, ordered clockwise, that define the playspace boundary. Only /// available when the reference space is set to `BoundedFloor`. Y component is always 0. pub fn bounds_geometry(&self) -> Option<Vec<Vec3>> { self.inner.bounds_geometry() } pub fn views_poses(&self) -> Vec<XrPose> { self.inner.views_poses() } /// Index 0 corresponds to the left hand, index 1 corresponds to the right hand. pub fn hands_pose(&self) -> [Option<XrPose>; 2] { self.inner.hands_pose() } /// Index 0 corresponds to the left hand, index 1 corresponds to the right hand. pub fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2] { self.inner.hands_skeleton_pose() } /// Returns poses that can be used to render a target ray or cursor. The ray is along -Z. The /// behavior is vendor-specific. Index 0 corresponds to the left hand, index 1 corresponds to /// the right hand. pub fn hand_target_ray(&self) -> [Option<XrPose>; 2] { self.inner.hands_target_ray() } /// Returns a pose that can be used to render a target ray or cursor. The ray is along -Z. The /// origin is between the eyes for head-mounted displays and the center of the screen for /// handheld devices. pub fn viewer_target_ray(&self) -> XrPose { self.inner.viewer_target_ray() } // future extensions: // * eye tracking // * lower face tracking // * AR face tracking // * body/skeletal trackers // * scene understanding (anchors, planes, meshes) } #[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] pub enum XrHandType { Left, Right, } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)] pub enum XrButtonState { Default, Touched, Pressed, } impl Default for XrButtonState { fn default() -> Self { Self::Default } } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrActionType { /// Convenience type that groups click, touch and value actions for a single button. /// The last segment of the path (`/click`, `/touch` or `/value`) must be omitted. Button { touch: bool, }, Binary, Scalar, /// Convenience type that groups x and y axes for a touchpad or thumbstick action. /// The last segment of the path (`/x` or `/y`) must be omitted. Vec2D, } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrActionState { Button { state: XrButtonState, value: f32 }, Binary(bool), Scalar(f32), Vec2D(Vec2), } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct XrActionDescriptor { pub name: String, pub action_type: XrActionType, } /// List bindings related to a single interaction profile. `tracked` and `has_haptics` can always be /// set to false but if they are set to true and the interaction profile does not support them, the /// the profile will be disabled completely. pub struct XrProfileDescriptor { pub profile: String, pub bindings: Vec<(XrActionDescriptor, String)>, pub tracked: bool, pub has_haptics: bool, } pub struct XrActionSet { current_states: HashMap<String, XrActionState>, previous_states: HashMap<String, XrActionState>, } impl XrActionSet { pub fn state(&self, action: &str) -> Option<XrActionState> { self.current_states.get(action).cloned() } pub fn button_state(&self, action: &str) -> XrButtonState { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action)
else { XrButtonState::Default } } pub fn button_touched(&self, action: &str) -> bool { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state != XrButtonState::Default } else { false } } pub fn button_pressed(&self, action: &str) -> bool { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state == XrButtonState::Pressed } else { false } } fn button_states(&self, action: &str) -> Option<(XrButtonState, XrButtonState)> { if let ( Some(XrActionState::Button { state: current_state, .. }), Some(XrActionState::Button { state: previous_state, .. }), ) = ( self.current_states.get(action), self.previous_states.get(action), ) { Some((*current_state, *previous_state)) } else { None } } pub fn button_just_touched(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur != XrButtonState::Default && prev == XrButtonState::Default) .unwrap_or(false) } pub fn button_just_untouched(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur == XrButtonState::Default && prev != XrButtonState::Default) .unwrap_or(false) } pub fn button_just_pressed(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur == XrButtonState::Pressed && prev != XrButtonState::Pressed) .unwrap_or(false) } pub fn button_just_unpressed(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur != XrButtonState::Pressed && prev == XrButtonState::Pressed) .unwrap_or(false) } pub fn binary_value(&self, action: &str) -> bool { if let Some(XrActionState::Binary(value)) = self.current_states.get(action) { *value } else { self.button_pressed(action) } } pub fn scalar_value(&self, action: &str) -> f32 { if let Some(XrActionState::Scalar(value) | XrActionState::Button { value, .. }) = self.current_states.get(action) { *value } else { 0.0 } } pub fn vec_2d_value(&self, action: &str) -> Vec2 { if let Some(XrActionState::Vec2D(value)) = self.current_states.get(action) { *value } else { Vec2::ZERO } } pub fn set(&mut self, states: HashMap<String, XrActionState>) { self.previous_states = self.current_states.clone(); self.current_states = states; } pub fn clear(&mut self) { self.current_states.clear(); self.previous_states.clear(); } } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrVibrationEventType { Apply { duration: Duration, frequency: f32, amplitude: f32, }, Stop, } #[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] pub struct XrVibrationEvent { pub hand: XrHandType, pub command: XrVibrationEventType, } /// Active interaction profiles. The format is backend-specific. They can be used to choose the /// controller 3D models to display. /// Note: in case skeletal hand tracking is active, the profiles still point to controller profiles. /// The correct 3D model to display can be decided depending on if skeletal hand tracking data is /// available or not. #[derive(Clone, PartialEq, Default, Debug, Serialize, Deserialize)] pub struct XrProfiles { pub left_hand: Option<String>, pub right_hand: Option<String>, }
{ *state }
conditional_block
interaction.rs
use bevy_math::{Mat4, Quat, Vec2, Vec3}; use bevy_utils::Duration; use serde::{Deserialize, Serialize}; use std::{ collections::HashMap, ops::{Deref, Mul}, }; // Note: indices follow WebXR convention. OpenXR's palm joint is missing, but it can be retrieved // using `XrTrackingSource::hands_pose()`. pub const XR_HAND_JOINT_WRIST: usize = 0; pub const XR_HAND_JOINT_THUMB_METACARPAL: usize = 1; pub const XR_HAND_JOINT_THUMB_PROXIMAL: usize = 2; pub const XR_HAND_JOINT_THUMB_DISTAL: usize = 3; pub const XR_HAND_JOINT_THUMB_TIP: usize = 4; pub const XR_HAND_JOINT_INDEX_METACARPAL: usize = 5; pub const XR_HAND_JOINT_INDEX_PROXIMAL: usize = 6; pub const XR_HAND_JOINT_INDEX_INTERMEDIATE: usize = 7; pub const XR_HAND_JOINT_INDEX_DISTAL: usize = 8; pub const XR_HAND_JOINT_INDEX_TIP: usize = 9; pub const XR_HAND_JOINT_MIDDLE_METACARPAL: usize = 10; pub const XR_HAND_JOINT_MIDDLE_PROXIMAL: usize = 11; pub const XR_HAND_JOINT_MIDDLE_INTERMEDIATE: usize = 12; pub const XR_HAND_JOINT_MIDDLE_DISTAL: usize = 13; pub const XR_HAND_JOINT_MIDDLE_TIP: usize = 14; pub const XR_HAND_JOINT_RING_METACARPAL: usize = 15; pub const XR_HAND_JOINT_RING_PROXIMAL: usize = 16; pub const XR_HAND_JOINT_RING_INTERMEDIATE: usize = 17; pub const XR_HAND_JOINT_RING_DISTAL: usize = 18; pub const XR_HAND_JOINT_RING_TIP: usize = 19; pub const XR_HAND_JOINT_LITTLE_METACARPAL: usize = 20; pub const XR_HAND_JOINT_LITTLE_PROXIMAL: usize = 21; pub const XR_HAND_JOINT_LITTLE_INTERMEDIATE: usize = 22; pub const XR_HAND_JOINT_LITTLE_DISTAL: usize = 23; pub const XR_HAND_JOINT_LITTLE_TIP: usize = 24; // To be verified: in all useful instances, when the orientation is valid, the position is also // valid. In case of 3DOF headsets, position should always be emulated using a neck and arm model. // In case of hand tracking, when a joint is estimated, both pose and orientation are available. #[derive(Clone, Copy, Default, Debug, Serialize, Deserialize)] pub struct XrRigidTransform { pub position: Vec3, pub orientation: Quat, } impl Mul for XrRigidTransform { type Output = XrRigidTransform; fn mul(self, rhs: Self) -> Self::Output { XrRigidTransform { position: self.position + self.orientation * rhs.position, orientation: self.orientation * rhs.orientation, } } } impl XrRigidTransform { pub fn to_mat4(&self) -> Mat4 { todo!() } } #[derive(Clone, Default, Debug, Serialize, Deserialize)] pub struct XrPose { pub transform: XrRigidTransform, pub linear_velocity: Option<Vec3>, pub angular_velocity: Option<Vec3>, pub emulated_position: bool, } impl Deref for XrPose { type Target = XrRigidTransform; fn deref(&self) -> &Self::Target { &self.transform } } #[derive(Clone, Default, Debug, Serialize, Deserialize)] pub struct XrJointPose { pub pose: XrPose, /// Radius of a sphere placed at the center of the joint that roughly touches the skin on both /// sides of the hand. pub radius: f32, } impl Deref for XrJointPose { type Target = XrPose; fn deref(&self) -> &Self::Target { &self.pose } } #[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] pub enum XrReferenceSpaceType { /// The coordinate system (position and orientation) is set as the headset pose at startup or /// after a recenter. This should be used only for experiences where the user is laid down. Viewer, /// The coordinate system (position and gravity-aligned orientation) is calculated from the /// headset pose at startup or after a recenter. This is for seated experiences. Local, /// The coordinate system (position and orientation) corresponds to the center of a rectangle at /// floor level, with +Y up. This is for stading or room-scale experiences. Stage, } pub mod implementation { use super::XrReferenceSpaceType; use crate::{interaction::XrPose, XrJointPose}; use bevy_math::Vec3; pub trait XrTrackingSourceBackend: Send + Sync { fn reference_space_type(&self) -> XrReferenceSpaceType; fn set_reference_space_type(&self, reference_space_type: XrReferenceSpaceType) -> bool; fn bounds_geometry(&self) -> Option<Vec<Vec3>>; fn views_poses(&self) -> Vec<XrPose>; fn hands_pose(&self) -> [Option<XrPose>; 2]; fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2]; fn hands_target_ray(&self) -> [Option<XrPose>; 2]; fn viewer_target_ray(&self) -> XrPose; } } /// Component used to poll tracking data. Tracking data is obtained "on-demand" to get the best /// precision possible. Poses are predicted for the next V-Sync. To obtain poses for an arbitrary /// point in time, `bevy_openxr` backend provides this functionality with OpenXrTrackingState. pub struct XrTrackingSource { inner: Box<dyn implementation::XrTrackingSourceBackend>, } impl XrTrackingSource { pub fn new(backend: Box<dyn implementation::XrTrackingSourceBackend>) -> Self { Self { inner: backend } } pub fn reference_space_type(&self) -> XrReferenceSpaceType { self.inner.reference_space_type() } /// Returns true if the tracking mode has been set correctly. If false is returned the tracking /// mode is not supported and another one must be chosen. pub fn set_reference_space_type(&mut self, reference_space_type: XrReferenceSpaceType) -> bool { self.inner.set_reference_space_type(reference_space_type) } pub fn just_reset_reference_space(&mut self) -> bool { todo!() } /// Returns a list of points, ordered clockwise, that define the playspace boundary. Only /// available when the reference space is set to `BoundedFloor`. Y component is always 0. pub fn bounds_geometry(&self) -> Option<Vec<Vec3>> { self.inner.bounds_geometry() } pub fn views_poses(&self) -> Vec<XrPose> { self.inner.views_poses() } /// Index 0 corresponds to the left hand, index 1 corresponds to the right hand. pub fn hands_pose(&self) -> [Option<XrPose>; 2] { self.inner.hands_pose() } /// Index 0 corresponds to the left hand, index 1 corresponds to the right hand. pub fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2] { self.inner.hands_skeleton_pose() } /// Returns poses that can be used to render a target ray or cursor. The ray is along -Z. The /// behavior is vendor-specific. Index 0 corresponds to the left hand, index 1 corresponds to /// the right hand. pub fn hand_target_ray(&self) -> [Option<XrPose>; 2] { self.inner.hands_target_ray() } /// Returns a pose that can be used to render a target ray or cursor. The ray is along -Z. The /// origin is between the eyes for head-mounted displays and the center of the screen for /// handheld devices. pub fn viewer_target_ray(&self) -> XrPose { self.inner.viewer_target_ray() } // future extensions: // * eye tracking // * lower face tracking // * AR face tracking // * body/skeletal trackers // * scene understanding (anchors, planes, meshes) } #[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] pub enum XrHandType { Left, Right, } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)] pub enum XrButtonState { Default, Touched, Pressed, } impl Default for XrButtonState { fn default() -> Self { Self::Default } } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrActionType { /// Convenience type that groups click, touch and value actions for a single button. /// The last segment of the path (`/click`, `/touch` or `/value`) must be omitted. Button { touch: bool, }, Binary, Scalar, /// Convenience type that groups x and y axes for a touchpad or thumbstick action. /// The last segment of the path (`/x` or `/y`) must be omitted. Vec2D, } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrActionState { Button { state: XrButtonState, value: f32 }, Binary(bool), Scalar(f32), Vec2D(Vec2), } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct XrActionDescriptor { pub name: String, pub action_type: XrActionType, } /// List bindings related to a single interaction profile. `tracked` and `has_haptics` can always be /// set to false but if they are set to true and the interaction profile does not support them, the /// the profile will be disabled completely. pub struct XrProfileDescriptor { pub profile: String, pub bindings: Vec<(XrActionDescriptor, String)>, pub tracked: bool, pub has_haptics: bool, } pub struct XrActionSet { current_states: HashMap<String, XrActionState>, previous_states: HashMap<String, XrActionState>, } impl XrActionSet { pub fn state(&self, action: &str) -> Option<XrActionState> { self.current_states.get(action).cloned() } pub fn button_state(&self, action: &str) -> XrButtonState { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state } else { XrButtonState::Default } } pub fn button_touched(&self, action: &str) -> bool { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state != XrButtonState::Default } else { false } } pub fn button_pressed(&self, action: &str) -> bool { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state == XrButtonState::Pressed } else { false } } fn button_states(&self, action: &str) -> Option<(XrButtonState, XrButtonState)> { if let ( Some(XrActionState::Button { state: current_state, .. }), Some(XrActionState::Button { state: previous_state, .. }), ) = ( self.current_states.get(action), self.previous_states.get(action), ) { Some((*current_state, *previous_state)) } else { None } } pub fn button_just_touched(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur != XrButtonState::Default && prev == XrButtonState::Default) .unwrap_or(false) } pub fn button_just_untouched(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur == XrButtonState::Default && prev != XrButtonState::Default) .unwrap_or(false) } pub fn button_just_pressed(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur == XrButtonState::Pressed && prev != XrButtonState::Pressed) .unwrap_or(false) } pub fn button_just_unpressed(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur != XrButtonState::Pressed && prev == XrButtonState::Pressed) .unwrap_or(false) } pub fn binary_value(&self, action: &str) -> bool { if let Some(XrActionState::Binary(value)) = self.current_states.get(action) { *value } else { self.button_pressed(action) } } pub fn scalar_value(&self, action: &str) -> f32 { if let Some(XrActionState::Scalar(value) | XrActionState::Button { value, .. }) = self.current_states.get(action) { *value } else { 0.0 } } pub fn vec_2d_value(&self, action: &str) -> Vec2 { if let Some(XrActionState::Vec2D(value)) = self.current_states.get(action) { *value } else { Vec2::ZERO } } pub fn set(&mut self, states: HashMap<String, XrActionState>) { self.previous_states = self.current_states.clone(); self.current_states = states; } pub fn clear(&mut self) { self.current_states.clear(); self.previous_states.clear(); } } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrVibrationEventType { Apply { duration: Duration, frequency: f32, amplitude: f32, }, Stop, } #[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] pub struct XrVibrationEvent { pub hand: XrHandType, pub command: XrVibrationEventType, } /// Active interaction profiles. The format is backend-specific. They can be used to choose the /// controller 3D models to display. /// Note: in case skeletal hand tracking is active, the profiles still point to controller profiles. /// The correct 3D model to display can be decided depending on if skeletal hand tracking data is /// available or not. #[derive(Clone, PartialEq, Default, Debug, Serialize, Deserialize)] pub struct
{ pub left_hand: Option<String>, pub right_hand: Option<String>, }
XrProfiles
identifier_name
interaction.rs
use bevy_math::{Mat4, Quat, Vec2, Vec3}; use bevy_utils::Duration; use serde::{Deserialize, Serialize}; use std::{ collections::HashMap, ops::{Deref, Mul}, }; // Note: indices follow WebXR convention. OpenXR's palm joint is missing, but it can be retrieved // using `XrTrackingSource::hands_pose()`. pub const XR_HAND_JOINT_WRIST: usize = 0; pub const XR_HAND_JOINT_THUMB_METACARPAL: usize = 1; pub const XR_HAND_JOINT_THUMB_PROXIMAL: usize = 2; pub const XR_HAND_JOINT_THUMB_DISTAL: usize = 3; pub const XR_HAND_JOINT_THUMB_TIP: usize = 4; pub const XR_HAND_JOINT_INDEX_METACARPAL: usize = 5; pub const XR_HAND_JOINT_INDEX_PROXIMAL: usize = 6; pub const XR_HAND_JOINT_INDEX_INTERMEDIATE: usize = 7; pub const XR_HAND_JOINT_INDEX_DISTAL: usize = 8; pub const XR_HAND_JOINT_INDEX_TIP: usize = 9; pub const XR_HAND_JOINT_MIDDLE_METACARPAL: usize = 10; pub const XR_HAND_JOINT_MIDDLE_PROXIMAL: usize = 11; pub const XR_HAND_JOINT_MIDDLE_INTERMEDIATE: usize = 12; pub const XR_HAND_JOINT_MIDDLE_DISTAL: usize = 13; pub const XR_HAND_JOINT_MIDDLE_TIP: usize = 14; pub const XR_HAND_JOINT_RING_METACARPAL: usize = 15; pub const XR_HAND_JOINT_RING_PROXIMAL: usize = 16; pub const XR_HAND_JOINT_RING_INTERMEDIATE: usize = 17; pub const XR_HAND_JOINT_RING_DISTAL: usize = 18; pub const XR_HAND_JOINT_RING_TIP: usize = 19; pub const XR_HAND_JOINT_LITTLE_METACARPAL: usize = 20; pub const XR_HAND_JOINT_LITTLE_PROXIMAL: usize = 21; pub const XR_HAND_JOINT_LITTLE_INTERMEDIATE: usize = 22; pub const XR_HAND_JOINT_LITTLE_DISTAL: usize = 23; pub const XR_HAND_JOINT_LITTLE_TIP: usize = 24; // To be verified: in all useful instances, when the orientation is valid, the position is also // valid. In case of 3DOF headsets, position should always be emulated using a neck and arm model. // In case of hand tracking, when a joint is estimated, both pose and orientation are available. #[derive(Clone, Copy, Default, Debug, Serialize, Deserialize)] pub struct XrRigidTransform { pub position: Vec3, pub orientation: Quat, } impl Mul for XrRigidTransform { type Output = XrRigidTransform; fn mul(self, rhs: Self) -> Self::Output { XrRigidTransform { position: self.position + self.orientation * rhs.position, orientation: self.orientation * rhs.orientation, } } } impl XrRigidTransform { pub fn to_mat4(&self) -> Mat4 { todo!() } } #[derive(Clone, Default, Debug, Serialize, Deserialize)] pub struct XrPose { pub transform: XrRigidTransform, pub linear_velocity: Option<Vec3>, pub angular_velocity: Option<Vec3>, pub emulated_position: bool, } impl Deref for XrPose { type Target = XrRigidTransform; fn deref(&self) -> &Self::Target { &self.transform } } #[derive(Clone, Default, Debug, Serialize, Deserialize)] pub struct XrJointPose { pub pose: XrPose, /// Radius of a sphere placed at the center of the joint that roughly touches the skin on both /// sides of the hand. pub radius: f32, } impl Deref for XrJointPose { type Target = XrPose; fn deref(&self) -> &Self::Target { &self.pose } } #[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] pub enum XrReferenceSpaceType { /// The coordinate system (position and orientation) is set as the headset pose at startup or /// after a recenter. This should be used only for experiences where the user is laid down. Viewer, /// The coordinate system (position and gravity-aligned orientation) is calculated from the /// headset pose at startup or after a recenter. This is for seated experiences. Local, /// The coordinate system (position and orientation) corresponds to the center of a rectangle at /// floor level, with +Y up. This is for stading or room-scale experiences. Stage, } pub mod implementation { use super::XrReferenceSpaceType; use crate::{interaction::XrPose, XrJointPose}; use bevy_math::Vec3; pub trait XrTrackingSourceBackend: Send + Sync { fn reference_space_type(&self) -> XrReferenceSpaceType; fn set_reference_space_type(&self, reference_space_type: XrReferenceSpaceType) -> bool; fn bounds_geometry(&self) -> Option<Vec<Vec3>>; fn views_poses(&self) -> Vec<XrPose>; fn hands_pose(&self) -> [Option<XrPose>; 2]; fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2]; fn hands_target_ray(&self) -> [Option<XrPose>; 2]; fn viewer_target_ray(&self) -> XrPose; } } /// Component used to poll tracking data. Tracking data is obtained "on-demand" to get the best /// precision possible. Poses are predicted for the next V-Sync. To obtain poses for an arbitrary /// point in time, `bevy_openxr` backend provides this functionality with OpenXrTrackingState. pub struct XrTrackingSource { inner: Box<dyn implementation::XrTrackingSourceBackend>, } impl XrTrackingSource { pub fn new(backend: Box<dyn implementation::XrTrackingSourceBackend>) -> Self { Self { inner: backend } } pub fn reference_space_type(&self) -> XrReferenceSpaceType { self.inner.reference_space_type() } /// Returns true if the tracking mode has been set correctly. If false is returned the tracking /// mode is not supported and another one must be chosen. pub fn set_reference_space_type(&mut self, reference_space_type: XrReferenceSpaceType) -> bool { self.inner.set_reference_space_type(reference_space_type) } pub fn just_reset_reference_space(&mut self) -> bool { todo!() } /// Returns a list of points, ordered clockwise, that define the playspace boundary. Only /// available when the reference space is set to `BoundedFloor`. Y component is always 0. pub fn bounds_geometry(&self) -> Option<Vec<Vec3>> { self.inner.bounds_geometry() } pub fn views_poses(&self) -> Vec<XrPose> { self.inner.views_poses() } /// Index 0 corresponds to the left hand, index 1 corresponds to the right hand. pub fn hands_pose(&self) -> [Option<XrPose>; 2] { self.inner.hands_pose() } /// Index 0 corresponds to the left hand, index 1 corresponds to the right hand. pub fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2] { self.inner.hands_skeleton_pose() } /// Returns poses that can be used to render a target ray or cursor. The ray is along -Z. The /// behavior is vendor-specific. Index 0 corresponds to the left hand, index 1 corresponds to /// the right hand. pub fn hand_target_ray(&self) -> [Option<XrPose>; 2] { self.inner.hands_target_ray() } /// Returns a pose that can be used to render a target ray or cursor. The ray is along -Z. The /// origin is between the eyes for head-mounted displays and the center of the screen for /// handheld devices. pub fn viewer_target_ray(&self) -> XrPose { self.inner.viewer_target_ray() } // future extensions: // * eye tracking // * lower face tracking // * AR face tracking // * body/skeletal trackers // * scene understanding (anchors, planes, meshes) } #[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] pub enum XrHandType { Left, Right, } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)] pub enum XrButtonState { Default, Touched, Pressed, } impl Default for XrButtonState { fn default() -> Self { Self::Default } } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrActionType { /// Convenience type that groups click, touch and value actions for a single button. /// The last segment of the path (`/click`, `/touch` or `/value`) must be omitted. Button { touch: bool, }, Binary, Scalar, /// Convenience type that groups x and y axes for a touchpad or thumbstick action. /// The last segment of the path (`/x` or `/y`) must be omitted. Vec2D, } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrActionState { Button { state: XrButtonState, value: f32 }, Binary(bool), Scalar(f32), Vec2D(Vec2), } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct XrActionDescriptor { pub name: String, pub action_type: XrActionType, } /// List bindings related to a single interaction profile. `tracked` and `has_haptics` can always be /// set to false but if they are set to true and the interaction profile does not support them, the /// the profile will be disabled completely. pub struct XrProfileDescriptor { pub profile: String, pub bindings: Vec<(XrActionDescriptor, String)>, pub tracked: bool, pub has_haptics: bool, } pub struct XrActionSet { current_states: HashMap<String, XrActionState>, previous_states: HashMap<String, XrActionState>, } impl XrActionSet { pub fn state(&self, action: &str) -> Option<XrActionState> { self.current_states.get(action).cloned() } pub fn button_state(&self, action: &str) -> XrButtonState { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state } else { XrButtonState::Default } } pub fn button_touched(&self, action: &str) -> bool { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state != XrButtonState::Default } else { false } } pub fn button_pressed(&self, action: &str) -> bool { if let Some(XrActionState::Button { state, .. }) = self.current_states.get(action) { *state == XrButtonState::Pressed } else { false } } fn button_states(&self, action: &str) -> Option<(XrButtonState, XrButtonState)> { if let ( Some(XrActionState::Button { state: current_state, .. }), Some(XrActionState::Button { state: previous_state, .. }), ) = ( self.current_states.get(action), self.previous_states.get(action), ) { Some((*current_state, *previous_state)) } else { None } } pub fn button_just_touched(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur != XrButtonState::Default && prev == XrButtonState::Default) .unwrap_or(false) } pub fn button_just_untouched(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur == XrButtonState::Default && prev != XrButtonState::Default) .unwrap_or(false) } pub fn button_just_pressed(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur == XrButtonState::Pressed && prev != XrButtonState::Pressed) .unwrap_or(false) }
} pub fn binary_value(&self, action: &str) -> bool { if let Some(XrActionState::Binary(value)) = self.current_states.get(action) { *value } else { self.button_pressed(action) } } pub fn scalar_value(&self, action: &str) -> f32 { if let Some(XrActionState::Scalar(value) | XrActionState::Button { value, .. }) = self.current_states.get(action) { *value } else { 0.0 } } pub fn vec_2d_value(&self, action: &str) -> Vec2 { if let Some(XrActionState::Vec2D(value)) = self.current_states.get(action) { *value } else { Vec2::ZERO } } pub fn set(&mut self, states: HashMap<String, XrActionState>) { self.previous_states = self.current_states.clone(); self.current_states = states; } pub fn clear(&mut self) { self.current_states.clear(); self.previous_states.clear(); } } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] pub enum XrVibrationEventType { Apply { duration: Duration, frequency: f32, amplitude: f32, }, Stop, } #[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] pub struct XrVibrationEvent { pub hand: XrHandType, pub command: XrVibrationEventType, } /// Active interaction profiles. The format is backend-specific. They can be used to choose the /// controller 3D models to display. /// Note: in case skeletal hand tracking is active, the profiles still point to controller profiles. /// The correct 3D model to display can be decided depending on if skeletal hand tracking data is /// available or not. #[derive(Clone, PartialEq, Default, Debug, Serialize, Deserialize)] pub struct XrProfiles { pub left_hand: Option<String>, pub right_hand: Option<String>, }
pub fn button_just_unpressed(&self, action: &str) -> bool { self.button_states(action) .map(|(cur, prev)| cur != XrButtonState::Pressed && prev == XrButtonState::Pressed) .unwrap_or(false)
random_line_split
recorder_test.go
// Copyright 2015 The Cockroach Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. // // Author: Matt Tracy ([email protected]) package status import ( "reflect" "regexp" "sort" "strconv" "testing" "time" "github.com/kr/pretty" "github.com/cockroachdb/cockroach/roachpb" "github.com/cockroachdb/cockroach/storage" "github.com/cockroachdb/cockroach/storage/engine" "github.com/cockroachdb/cockroach/ts" "github.com/cockroachdb/cockroach/util/hlc" "github.com/cockroachdb/cockroach/util/leaktest" "github.com/cockroachdb/cockroach/util/metric" ) const sep = "-" // byTimeAndName is a slice of ts.TimeSeriesData. type byTimeAndName []ts.TimeSeriesData // implement sort.Interface for byTimeAndName func (a byTimeAndName) Len() int { return len(a) } func (a byTimeAndName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byTimeAndName) Less(i, j int) bool { if a[i].Name != a[j].Name { return a[i].Name < a[j].Name } if a[i].Datapoints[0].TimestampNanos != a[j].Datapoints[0].TimestampNanos { return a[i].Datapoints[0].TimestampNanos < a[j].Datapoints[0].TimestampNanos } return a[i].Source < a[j].Source } var _ sort.Interface = byTimeAndName{} // byStoreID is a slice of roachpb.StoreID. type byStoreID []roachpb.StoreID // implement sort.Interface for byStoreID func (a byStoreID) Len() int { return len(a) } func (a byStoreID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byStoreID) Less(i, j int) bool { return a[i] < a[j] } var _ sort.Interface = byStoreID{} // byStoreDescID is a slice of storage.StoreStatus type byStoreDescID []storage.StoreStatus // implement sort.Interface for byStoreDescID. func (a byStoreDescID)
() int { return len(a) } func (a byStoreDescID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byStoreDescID) Less(i, j int) bool { return a[i].Desc.StoreID < a[j].Desc.StoreID } var _ sort.Interface = byStoreDescID{} // fakeStore implements only the methods of store needed by MetricsRecorder to // interact with stores. type fakeStore struct { storeID roachpb.StoreID stats engine.MVCCStats desc roachpb.StoreDescriptor registry *metric.Registry } func (fs fakeStore) StoreID() roachpb.StoreID { return fs.storeID } func (fs fakeStore) Descriptor() (*roachpb.StoreDescriptor, error) { return &fs.desc, nil } func (fs fakeStore) MVCCStats() engine.MVCCStats { return fs.stats } func (fs fakeStore) Registry() *metric.Registry { return fs.registry } // TestMetricsRecorder verifies that the metrics recorder properly formats the // statistics from various registries, both for Time Series and for Status // Summaries. func TestMetricsRecorder(t *testing.T) { defer leaktest.AfterTest(t)() // Fake descriptors and stats for status summaries. nodeDesc := roachpb.NodeDescriptor{ NodeID: roachpb.NodeID(1), } storeDesc1 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(1), Capacity: roachpb.StoreCapacity{ Capacity: 100, Available: 50, }, } storeDesc2 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(2), Capacity: roachpb.StoreCapacity{ Capacity: 200, Available: 75, }, } stats := engine.MVCCStats{ LiveBytes: 1, KeyBytes: 2, ValBytes: 3, IntentBytes: 4, LiveCount: 5, KeyCount: 6, ValCount: 7, IntentCount: 8, IntentAge: 9, GCBytesAge: 10, LastUpdateNanos: 1 * 1E9, } // Create some registries and add them to the recorder (two at node-level, // two at store-level). reg1 := metric.NewRegistry() reg2 := metric.NewRegistry() store1 := fakeStore{ storeID: roachpb.StoreID(1), stats: stats, desc: storeDesc1, registry: metric.NewRegistry(), } store2 := fakeStore{ storeID: roachpb.StoreID(2), stats: stats, desc: storeDesc2, registry: metric.NewRegistry(), } manual := hlc.NewManualClock(100) recorder := NewMetricsRecorder(hlc.NewClock(manual.UnixNano)) recorder.AddNodeRegistry("one.%s", reg1) recorder.AddNodeRegistry("two.%s", reg1) recorder.AddStore(store1) recorder.AddStore(store2) recorder.NodeStarted(nodeDesc, 50) // Ensure the metric system's view of time does not advance during this test // as the test expects time to not advance too far which would age the actual // data (e.g. in histogram's) unexpectedly. defer metric.TestingSetNow(func() time.Time { return time.Unix(0, manual.UnixNano()).UTC() })() // Create a flat array of registries, along with metadata for each, to help // generate expected results. regList := []struct { reg *metric.Registry prefix string source int64 }{ { reg: reg1, prefix: "cr.node.one.", source: 1, }, { reg: reg2, prefix: "cr.node.two.", source: 1, }, { reg: store1.registry, prefix: "cr.store.", source: int64(store1.storeID), }, { reg: store2.registry, prefix: "cr.store.", source: int64(store2.storeID), }, } // Every registry will have the following metrics. metricNames := []struct { name string typ string val int64 }{ {"testGauge", "gauge", 20}, {"testCounter", "counter", 5}, {"testRate", "rate", 2}, {"testHistogram", "histogram", 10}, {"testLatency", "latency", 10}, // Stats needed for store summaries. {"ranges", "counter", 1}, {"ranges.leader", "gauge", 1}, {"ranges.replicated", "gauge", 1}, {"ranges.available", "gauge", 1}, } // Add the above metrics to each registry. At the same time, generate // expected time series results. var expected []ts.TimeSeriesData addExpected := func(prefix, name string, source, time, val int64) { expect := ts.TimeSeriesData{ Name: prefix + name, Source: strconv.FormatInt(source, 10), Datapoints: []*ts.TimeSeriesDatapoint{ { TimestampNanos: time, Value: float64(val), }, }, } expected = append(expected, expect) } for _, data := range metricNames { for _, reg := range regList { switch data.typ { case "gauge": reg.reg.Gauge(data.name).Update(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val) case "counter": reg.reg.Counter(data.name).Inc(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val) case "rate": reg.reg.Rates(data.name).Add(data.val) addExpected(reg.prefix, data.name+"-count", reg.source, 100, data.val) for _, scale := range metric.DefaultTimeScales { // Rate data is subject to timing errors in tests. Zero out // these values. addExpected(reg.prefix, data.name+sep+scale.Name(), reg.source, 100, 0) } case "histogram": reg.reg.Histogram(data.name, time.Second, 1000, 2).RecordValue(data.val) for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+q.suffix, reg.source, 100, data.val) } case "latency": reg.reg.Latency(data.name).RecordValue(data.val) // Latency is simply three histograms (at different resolution // time scales). for _, scale := range metric.DefaultTimeScales { for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+sep+scale.Name()+q.suffix, reg.source, 100, data.val) } } } } } actual := recorder.GetTimeSeriesData() // Zero-out timing-sensitive rate values from actual data. for _, act := range actual { match, err := regexp.MatchString(`testRate-\d+m`, act.Name) if err != nil { t.Fatal(err) } if match { act.Datapoints[0].Value = 0.0 } } // Actual comparison is simple: sort the resulting arrays by time and name, // and use reflect.DeepEqual. sort.Sort(byTimeAndName(actual)) sort.Sort(byTimeAndName(expected)) if a, e := actual, expected; !reflect.DeepEqual(a, e) { t.Errorf("recorder did not yield expected time series collection; diff:\n %v", pretty.Diff(e, a)) } // **** STATUS SUMMARY TESTING // Generate an expected node summary and two store summaries. The // information here is relatively simple in our test. expectedNodeSummary := &NodeStatus{ Desc: nodeDesc, StartedAt: 50, UpdatedAt: 100, StoreIDs: []roachpb.StoreID{ roachpb.StoreID(1), roachpb.StoreID(2), }, RangeCount: 2, LeaderRangeCount: 2, AvailableRangeCount: 2, ReplicatedRangeCount: 2, } expectedStoreSummaries := []storage.StoreStatus{ { Desc: storeDesc1, NodeID: roachpb.NodeID(1), StartedAt: 50, UpdatedAt: 100, RangeCount: 1, LeaderRangeCount: 1, AvailableRangeCount: 1, ReplicatedRangeCount: 1, Stats: stats, }, { Desc: storeDesc2, NodeID: roachpb.NodeID(1), StartedAt: 50, UpdatedAt: 100, RangeCount: 1, LeaderRangeCount: 1, AvailableRangeCount: 1, ReplicatedRangeCount: 1, Stats: stats, }, } for _, ss := range expectedStoreSummaries { expectedNodeSummary.Stats.Add(ss.Stats) } nodeSummary, storeSummaries := recorder.GetStatusSummaries() if nodeSummary == nil { t.Fatalf("recorder did not return nodeSummary.") } if storeSummaries == nil { t.Fatalf("recorder did not return storeSummaries.") } sort.Sort(byStoreDescID(storeSummaries)) sort.Sort(byStoreID(nodeSummary.StoreIDs)) if a, e := nodeSummary, expectedNodeSummary; !reflect.DeepEqual(a, e) { t.Errorf("recorder did not produce expected NodeSummary; diff:\n %v", pretty.Diff(e, a)) } if a, e := storeSummaries, expectedStoreSummaries; !reflect.DeepEqual(a, e) { t.Errorf("recorder did not produce expected StoreSummaries; diff:\n %v", pretty.Diff(e, a)) } }
Len
identifier_name
recorder_test.go
// Copyright 2015 The Cockroach Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. // // Author: Matt Tracy ([email protected]) package status import ( "reflect" "regexp" "sort" "strconv" "testing" "time" "github.com/kr/pretty" "github.com/cockroachdb/cockroach/roachpb" "github.com/cockroachdb/cockroach/storage" "github.com/cockroachdb/cockroach/storage/engine" "github.com/cockroachdb/cockroach/ts" "github.com/cockroachdb/cockroach/util/hlc" "github.com/cockroachdb/cockroach/util/leaktest" "github.com/cockroachdb/cockroach/util/metric" ) const sep = "-" // byTimeAndName is a slice of ts.TimeSeriesData. type byTimeAndName []ts.TimeSeriesData // implement sort.Interface for byTimeAndName func (a byTimeAndName) Len() int { return len(a) } func (a byTimeAndName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byTimeAndName) Less(i, j int) bool { if a[i].Name != a[j].Name { return a[i].Name < a[j].Name } if a[i].Datapoints[0].TimestampNanos != a[j].Datapoints[0].TimestampNanos { return a[i].Datapoints[0].TimestampNanos < a[j].Datapoints[0].TimestampNanos } return a[i].Source < a[j].Source } var _ sort.Interface = byTimeAndName{} // byStoreID is a slice of roachpb.StoreID. type byStoreID []roachpb.StoreID // implement sort.Interface for byStoreID func (a byStoreID) Len() int { return len(a) } func (a byStoreID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byStoreID) Less(i, j int) bool { return a[i] < a[j] } var _ sort.Interface = byStoreID{} // byStoreDescID is a slice of storage.StoreStatus type byStoreDescID []storage.StoreStatus // implement sort.Interface for byStoreDescID. func (a byStoreDescID) Len() int { return len(a) } func (a byStoreDescID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byStoreDescID) Less(i, j int) bool { return a[i].Desc.StoreID < a[j].Desc.StoreID } var _ sort.Interface = byStoreDescID{} // fakeStore implements only the methods of store needed by MetricsRecorder to // interact with stores. type fakeStore struct { storeID roachpb.StoreID stats engine.MVCCStats desc roachpb.StoreDescriptor registry *metric.Registry } func (fs fakeStore) StoreID() roachpb.StoreID { return fs.storeID } func (fs fakeStore) Descriptor() (*roachpb.StoreDescriptor, error) { return &fs.desc, nil } func (fs fakeStore) MVCCStats() engine.MVCCStats { return fs.stats } func (fs fakeStore) Registry() *metric.Registry { return fs.registry } // TestMetricsRecorder verifies that the metrics recorder properly formats the // statistics from various registries, both for Time Series and for Status // Summaries. func TestMetricsRecorder(t *testing.T) { defer leaktest.AfterTest(t)() // Fake descriptors and stats for status summaries. nodeDesc := roachpb.NodeDescriptor{ NodeID: roachpb.NodeID(1), } storeDesc1 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(1), Capacity: roachpb.StoreCapacity{ Capacity: 100, Available: 50, }, } storeDesc2 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(2), Capacity: roachpb.StoreCapacity{ Capacity: 200, Available: 75, }, } stats := engine.MVCCStats{ LiveBytes: 1, KeyBytes: 2, ValBytes: 3, IntentBytes: 4, LiveCount: 5, KeyCount: 6, ValCount: 7, IntentCount: 8, IntentAge: 9, GCBytesAge: 10, LastUpdateNanos: 1 * 1E9, } // Create some registries and add them to the recorder (two at node-level, // two at store-level). reg1 := metric.NewRegistry() reg2 := metric.NewRegistry() store1 := fakeStore{ storeID: roachpb.StoreID(1), stats: stats, desc: storeDesc1, registry: metric.NewRegistry(), } store2 := fakeStore{ storeID: roachpb.StoreID(2), stats: stats, desc: storeDesc2, registry: metric.NewRegistry(), } manual := hlc.NewManualClock(100) recorder := NewMetricsRecorder(hlc.NewClock(manual.UnixNano)) recorder.AddNodeRegistry("one.%s", reg1) recorder.AddNodeRegistry("two.%s", reg1) recorder.AddStore(store1) recorder.AddStore(store2) recorder.NodeStarted(nodeDesc, 50) // Ensure the metric system's view of time does not advance during this test // as the test expects time to not advance too far which would age the actual // data (e.g. in histogram's) unexpectedly. defer metric.TestingSetNow(func() time.Time { return time.Unix(0, manual.UnixNano()).UTC() })() // Create a flat array of registries, along with metadata for each, to help // generate expected results. regList := []struct { reg *metric.Registry prefix string source int64 }{ { reg: reg1, prefix: "cr.node.one.", source: 1, }, { reg: reg2, prefix: "cr.node.two.", source: 1, }, { reg: store1.registry, prefix: "cr.store.", source: int64(store1.storeID), }, { reg: store2.registry, prefix: "cr.store.", source: int64(store2.storeID), }, } // Every registry will have the following metrics. metricNames := []struct { name string typ string val int64 }{ {"testGauge", "gauge", 20}, {"testCounter", "counter", 5}, {"testRate", "rate", 2}, {"testHistogram", "histogram", 10}, {"testLatency", "latency", 10}, // Stats needed for store summaries. {"ranges", "counter", 1}, {"ranges.leader", "gauge", 1}, {"ranges.replicated", "gauge", 1}, {"ranges.available", "gauge", 1}, } // Add the above metrics to each registry. At the same time, generate // expected time series results. var expected []ts.TimeSeriesData addExpected := func(prefix, name string, source, time, val int64) { expect := ts.TimeSeriesData{ Name: prefix + name, Source: strconv.FormatInt(source, 10), Datapoints: []*ts.TimeSeriesDatapoint{ { TimestampNanos: time, Value: float64(val), }, }, } expected = append(expected, expect) } for _, data := range metricNames { for _, reg := range regList { switch data.typ { case "gauge": reg.reg.Gauge(data.name).Update(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val) case "counter": reg.reg.Counter(data.name).Inc(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val) case "rate": reg.reg.Rates(data.name).Add(data.val) addExpected(reg.prefix, data.name+"-count", reg.source, 100, data.val) for _, scale := range metric.DefaultTimeScales { // Rate data is subject to timing errors in tests. Zero out // these values. addExpected(reg.prefix, data.name+sep+scale.Name(), reg.source, 100, 0) } case "histogram": reg.reg.Histogram(data.name, time.Second, 1000, 2).RecordValue(data.val) for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+q.suffix, reg.source, 100, data.val) } case "latency": reg.reg.Latency(data.name).RecordValue(data.val) // Latency is simply three histograms (at different resolution // time scales). for _, scale := range metric.DefaultTimeScales { for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+sep+scale.Name()+q.suffix, reg.source, 100, data.val) } } } } } actual := recorder.GetTimeSeriesData() // Zero-out timing-sensitive rate values from actual data. for _, act := range actual { match, err := regexp.MatchString(`testRate-\d+m`, act.Name) if err != nil
if match { act.Datapoints[0].Value = 0.0 } } // Actual comparison is simple: sort the resulting arrays by time and name, // and use reflect.DeepEqual. sort.Sort(byTimeAndName(actual)) sort.Sort(byTimeAndName(expected)) if a, e := actual, expected; !reflect.DeepEqual(a, e) { t.Errorf("recorder did not yield expected time series collection; diff:\n %v", pretty.Diff(e, a)) } // **** STATUS SUMMARY TESTING // Generate an expected node summary and two store summaries. The // information here is relatively simple in our test. expectedNodeSummary := &NodeStatus{ Desc: nodeDesc, StartedAt: 50, UpdatedAt: 100, StoreIDs: []roachpb.StoreID{ roachpb.StoreID(1), roachpb.StoreID(2), }, RangeCount: 2, LeaderRangeCount: 2, AvailableRangeCount: 2, ReplicatedRangeCount: 2, } expectedStoreSummaries := []storage.StoreStatus{ { Desc: storeDesc1, NodeID: roachpb.NodeID(1), StartedAt: 50, UpdatedAt: 100, RangeCount: 1, LeaderRangeCount: 1, AvailableRangeCount: 1, ReplicatedRangeCount: 1, Stats: stats, }, { Desc: storeDesc2, NodeID: roachpb.NodeID(1), StartedAt: 50, UpdatedAt: 100, RangeCount: 1, LeaderRangeCount: 1, AvailableRangeCount: 1, ReplicatedRangeCount: 1, Stats: stats, }, } for _, ss := range expectedStoreSummaries { expectedNodeSummary.Stats.Add(ss.Stats) } nodeSummary, storeSummaries := recorder.GetStatusSummaries() if nodeSummary == nil { t.Fatalf("recorder did not return nodeSummary.") } if storeSummaries == nil { t.Fatalf("recorder did not return storeSummaries.") } sort.Sort(byStoreDescID(storeSummaries)) sort.Sort(byStoreID(nodeSummary.StoreIDs)) if a, e := nodeSummary, expectedNodeSummary; !reflect.DeepEqual(a, e) { t.Errorf("recorder did not produce expected NodeSummary; diff:\n %v", pretty.Diff(e, a)) } if a, e := storeSummaries, expectedStoreSummaries; !reflect.DeepEqual(a, e) { t.Errorf("recorder did not produce expected StoreSummaries; diff:\n %v", pretty.Diff(e, a)) } }
{ t.Fatal(err) }
conditional_block
recorder_test.go
// Copyright 2015 The Cockroach Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. // // Author: Matt Tracy ([email protected]) package status import ( "reflect" "regexp" "sort" "strconv" "testing" "time" "github.com/kr/pretty" "github.com/cockroachdb/cockroach/roachpb" "github.com/cockroachdb/cockroach/storage" "github.com/cockroachdb/cockroach/storage/engine" "github.com/cockroachdb/cockroach/ts" "github.com/cockroachdb/cockroach/util/hlc" "github.com/cockroachdb/cockroach/util/leaktest" "github.com/cockroachdb/cockroach/util/metric" ) const sep = "-" // byTimeAndName is a slice of ts.TimeSeriesData. type byTimeAndName []ts.TimeSeriesData // implement sort.Interface for byTimeAndName func (a byTimeAndName) Len() int
func (a byTimeAndName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byTimeAndName) Less(i, j int) bool { if a[i].Name != a[j].Name { return a[i].Name < a[j].Name } if a[i].Datapoints[0].TimestampNanos != a[j].Datapoints[0].TimestampNanos { return a[i].Datapoints[0].TimestampNanos < a[j].Datapoints[0].TimestampNanos } return a[i].Source < a[j].Source } var _ sort.Interface = byTimeAndName{} // byStoreID is a slice of roachpb.StoreID. type byStoreID []roachpb.StoreID // implement sort.Interface for byStoreID func (a byStoreID) Len() int { return len(a) } func (a byStoreID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byStoreID) Less(i, j int) bool { return a[i] < a[j] } var _ sort.Interface = byStoreID{} // byStoreDescID is a slice of storage.StoreStatus type byStoreDescID []storage.StoreStatus // implement sort.Interface for byStoreDescID. func (a byStoreDescID) Len() int { return len(a) } func (a byStoreDescID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byStoreDescID) Less(i, j int) bool { return a[i].Desc.StoreID < a[j].Desc.StoreID } var _ sort.Interface = byStoreDescID{} // fakeStore implements only the methods of store needed by MetricsRecorder to // interact with stores. type fakeStore struct { storeID roachpb.StoreID stats engine.MVCCStats desc roachpb.StoreDescriptor registry *metric.Registry } func (fs fakeStore) StoreID() roachpb.StoreID { return fs.storeID } func (fs fakeStore) Descriptor() (*roachpb.StoreDescriptor, error) { return &fs.desc, nil } func (fs fakeStore) MVCCStats() engine.MVCCStats { return fs.stats } func (fs fakeStore) Registry() *metric.Registry { return fs.registry } // TestMetricsRecorder verifies that the metrics recorder properly formats the // statistics from various registries, both for Time Series and for Status // Summaries. func TestMetricsRecorder(t *testing.T) { defer leaktest.AfterTest(t)() // Fake descriptors and stats for status summaries. nodeDesc := roachpb.NodeDescriptor{ NodeID: roachpb.NodeID(1), } storeDesc1 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(1), Capacity: roachpb.StoreCapacity{ Capacity: 100, Available: 50, }, } storeDesc2 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(2), Capacity: roachpb.StoreCapacity{ Capacity: 200, Available: 75, }, } stats := engine.MVCCStats{ LiveBytes: 1, KeyBytes: 2, ValBytes: 3, IntentBytes: 4, LiveCount: 5, KeyCount: 6, ValCount: 7, IntentCount: 8, IntentAge: 9, GCBytesAge: 10, LastUpdateNanos: 1 * 1E9, } // Create some registries and add them to the recorder (two at node-level, // two at store-level). reg1 := metric.NewRegistry() reg2 := metric.NewRegistry() store1 := fakeStore{ storeID: roachpb.StoreID(1), stats: stats, desc: storeDesc1, registry: metric.NewRegistry(), } store2 := fakeStore{ storeID: roachpb.StoreID(2), stats: stats, desc: storeDesc2, registry: metric.NewRegistry(), } manual := hlc.NewManualClock(100) recorder := NewMetricsRecorder(hlc.NewClock(manual.UnixNano)) recorder.AddNodeRegistry("one.%s", reg1) recorder.AddNodeRegistry("two.%s", reg1) recorder.AddStore(store1) recorder.AddStore(store2) recorder.NodeStarted(nodeDesc, 50) // Ensure the metric system's view of time does not advance during this test // as the test expects time to not advance too far which would age the actual // data (e.g. in histogram's) unexpectedly. defer metric.TestingSetNow(func() time.Time { return time.Unix(0, manual.UnixNano()).UTC() })() // Create a flat array of registries, along with metadata for each, to help // generate expected results. regList := []struct { reg *metric.Registry prefix string source int64 }{ { reg: reg1, prefix: "cr.node.one.", source: 1, }, { reg: reg2, prefix: "cr.node.two.", source: 1, }, { reg: store1.registry, prefix: "cr.store.", source: int64(store1.storeID), }, { reg: store2.registry, prefix: "cr.store.", source: int64(store2.storeID), }, } // Every registry will have the following metrics. metricNames := []struct { name string typ string val int64 }{ {"testGauge", "gauge", 20}, {"testCounter", "counter", 5}, {"testRate", "rate", 2}, {"testHistogram", "histogram", 10}, {"testLatency", "latency", 10}, // Stats needed for store summaries. {"ranges", "counter", 1}, {"ranges.leader", "gauge", 1}, {"ranges.replicated", "gauge", 1}, {"ranges.available", "gauge", 1}, } // Add the above metrics to each registry. At the same time, generate // expected time series results. var expected []ts.TimeSeriesData addExpected := func(prefix, name string, source, time, val int64) { expect := ts.TimeSeriesData{ Name: prefix + name, Source: strconv.FormatInt(source, 10), Datapoints: []*ts.TimeSeriesDatapoint{ { TimestampNanos: time, Value: float64(val), }, }, } expected = append(expected, expect) } for _, data := range metricNames { for _, reg := range regList { switch data.typ { case "gauge": reg.reg.Gauge(data.name).Update(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val) case "counter": reg.reg.Counter(data.name).Inc(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val) case "rate": reg.reg.Rates(data.name).Add(data.val) addExpected(reg.prefix, data.name+"-count", reg.source, 100, data.val) for _, scale := range metric.DefaultTimeScales { // Rate data is subject to timing errors in tests. Zero out // these values. addExpected(reg.prefix, data.name+sep+scale.Name(), reg.source, 100, 0) } case "histogram": reg.reg.Histogram(data.name, time.Second, 1000, 2).RecordValue(data.val) for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+q.suffix, reg.source, 100, data.val) } case "latency": reg.reg.Latency(data.name).RecordValue(data.val) // Latency is simply three histograms (at different resolution // time scales). for _, scale := range metric.DefaultTimeScales { for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+sep+scale.Name()+q.suffix, reg.source, 100, data.val) } } } } } actual := recorder.GetTimeSeriesData() // Zero-out timing-sensitive rate values from actual data. for _, act := range actual { match, err := regexp.MatchString(`testRate-\d+m`, act.Name) if err != nil { t.Fatal(err) } if match { act.Datapoints[0].Value = 0.0 } } // Actual comparison is simple: sort the resulting arrays by time and name, // and use reflect.DeepEqual. sort.Sort(byTimeAndName(actual)) sort.Sort(byTimeAndName(expected)) if a, e := actual, expected; !reflect.DeepEqual(a, e) { t.Errorf("recorder did not yield expected time series collection; diff:\n %v", pretty.Diff(e, a)) } // **** STATUS SUMMARY TESTING // Generate an expected node summary and two store summaries. The // information here is relatively simple in our test. expectedNodeSummary := &NodeStatus{ Desc: nodeDesc, StartedAt: 50, UpdatedAt: 100, StoreIDs: []roachpb.StoreID{ roachpb.StoreID(1), roachpb.StoreID(2), }, RangeCount: 2, LeaderRangeCount: 2, AvailableRangeCount: 2, ReplicatedRangeCount: 2, } expectedStoreSummaries := []storage.StoreStatus{ { Desc: storeDesc1, NodeID: roachpb.NodeID(1), StartedAt: 50, UpdatedAt: 100, RangeCount: 1, LeaderRangeCount: 1, AvailableRangeCount: 1, ReplicatedRangeCount: 1, Stats: stats, }, { Desc: storeDesc2, NodeID: roachpb.NodeID(1), StartedAt: 50, UpdatedAt: 100, RangeCount: 1, LeaderRangeCount: 1, AvailableRangeCount: 1, ReplicatedRangeCount: 1, Stats: stats, }, } for _, ss := range expectedStoreSummaries { expectedNodeSummary.Stats.Add(ss.Stats) } nodeSummary, storeSummaries := recorder.GetStatusSummaries() if nodeSummary == nil { t.Fatalf("recorder did not return nodeSummary.") } if storeSummaries == nil { t.Fatalf("recorder did not return storeSummaries.") } sort.Sort(byStoreDescID(storeSummaries)) sort.Sort(byStoreID(nodeSummary.StoreIDs)) if a, e := nodeSummary, expectedNodeSummary; !reflect.DeepEqual(a, e) { t.Errorf("recorder did not produce expected NodeSummary; diff:\n %v", pretty.Diff(e, a)) } if a, e := storeSummaries, expectedStoreSummaries; !reflect.DeepEqual(a, e) { t.Errorf("recorder did not produce expected StoreSummaries; diff:\n %v", pretty.Diff(e, a)) } }
{ return len(a) }
identifier_body
recorder_test.go
// Copyright 2015 The Cockroach Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. // // Author: Matt Tracy ([email protected]) package status import ( "reflect" "regexp" "sort" "strconv" "testing" "time" "github.com/kr/pretty" "github.com/cockroachdb/cockroach/roachpb" "github.com/cockroachdb/cockroach/storage" "github.com/cockroachdb/cockroach/storage/engine" "github.com/cockroachdb/cockroach/ts" "github.com/cockroachdb/cockroach/util/hlc" "github.com/cockroachdb/cockroach/util/leaktest" "github.com/cockroachdb/cockroach/util/metric" ) const sep = "-"
func (a byTimeAndName) Len() int { return len(a) } func (a byTimeAndName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byTimeAndName) Less(i, j int) bool { if a[i].Name != a[j].Name { return a[i].Name < a[j].Name } if a[i].Datapoints[0].TimestampNanos != a[j].Datapoints[0].TimestampNanos { return a[i].Datapoints[0].TimestampNanos < a[j].Datapoints[0].TimestampNanos } return a[i].Source < a[j].Source } var _ sort.Interface = byTimeAndName{} // byStoreID is a slice of roachpb.StoreID. type byStoreID []roachpb.StoreID // implement sort.Interface for byStoreID func (a byStoreID) Len() int { return len(a) } func (a byStoreID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byStoreID) Less(i, j int) bool { return a[i] < a[j] } var _ sort.Interface = byStoreID{} // byStoreDescID is a slice of storage.StoreStatus type byStoreDescID []storage.StoreStatus // implement sort.Interface for byStoreDescID. func (a byStoreDescID) Len() int { return len(a) } func (a byStoreDescID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byStoreDescID) Less(i, j int) bool { return a[i].Desc.StoreID < a[j].Desc.StoreID } var _ sort.Interface = byStoreDescID{} // fakeStore implements only the methods of store needed by MetricsRecorder to // interact with stores. type fakeStore struct { storeID roachpb.StoreID stats engine.MVCCStats desc roachpb.StoreDescriptor registry *metric.Registry } func (fs fakeStore) StoreID() roachpb.StoreID { return fs.storeID } func (fs fakeStore) Descriptor() (*roachpb.StoreDescriptor, error) { return &fs.desc, nil } func (fs fakeStore) MVCCStats() engine.MVCCStats { return fs.stats } func (fs fakeStore) Registry() *metric.Registry { return fs.registry } // TestMetricsRecorder verifies that the metrics recorder properly formats the // statistics from various registries, both for Time Series and for Status // Summaries. func TestMetricsRecorder(t *testing.T) { defer leaktest.AfterTest(t)() // Fake descriptors and stats for status summaries. nodeDesc := roachpb.NodeDescriptor{ NodeID: roachpb.NodeID(1), } storeDesc1 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(1), Capacity: roachpb.StoreCapacity{ Capacity: 100, Available: 50, }, } storeDesc2 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(2), Capacity: roachpb.StoreCapacity{ Capacity: 200, Available: 75, }, } stats := engine.MVCCStats{ LiveBytes: 1, KeyBytes: 2, ValBytes: 3, IntentBytes: 4, LiveCount: 5, KeyCount: 6, ValCount: 7, IntentCount: 8, IntentAge: 9, GCBytesAge: 10, LastUpdateNanos: 1 * 1E9, } // Create some registries and add them to the recorder (two at node-level, // two at store-level). reg1 := metric.NewRegistry() reg2 := metric.NewRegistry() store1 := fakeStore{ storeID: roachpb.StoreID(1), stats: stats, desc: storeDesc1, registry: metric.NewRegistry(), } store2 := fakeStore{ storeID: roachpb.StoreID(2), stats: stats, desc: storeDesc2, registry: metric.NewRegistry(), } manual := hlc.NewManualClock(100) recorder := NewMetricsRecorder(hlc.NewClock(manual.UnixNano)) recorder.AddNodeRegistry("one.%s", reg1) recorder.AddNodeRegistry("two.%s", reg1) recorder.AddStore(store1) recorder.AddStore(store2) recorder.NodeStarted(nodeDesc, 50) // Ensure the metric system's view of time does not advance during this test // as the test expects time to not advance too far which would age the actual // data (e.g. in histogram's) unexpectedly. defer metric.TestingSetNow(func() time.Time { return time.Unix(0, manual.UnixNano()).UTC() })() // Create a flat array of registries, along with metadata for each, to help // generate expected results. regList := []struct { reg *metric.Registry prefix string source int64 }{ { reg: reg1, prefix: "cr.node.one.", source: 1, }, { reg: reg2, prefix: "cr.node.two.", source: 1, }, { reg: store1.registry, prefix: "cr.store.", source: int64(store1.storeID), }, { reg: store2.registry, prefix: "cr.store.", source: int64(store2.storeID), }, } // Every registry will have the following metrics. metricNames := []struct { name string typ string val int64 }{ {"testGauge", "gauge", 20}, {"testCounter", "counter", 5}, {"testRate", "rate", 2}, {"testHistogram", "histogram", 10}, {"testLatency", "latency", 10}, // Stats needed for store summaries. {"ranges", "counter", 1}, {"ranges.leader", "gauge", 1}, {"ranges.replicated", "gauge", 1}, {"ranges.available", "gauge", 1}, } // Add the above metrics to each registry. At the same time, generate // expected time series results. var expected []ts.TimeSeriesData addExpected := func(prefix, name string, source, time, val int64) { expect := ts.TimeSeriesData{ Name: prefix + name, Source: strconv.FormatInt(source, 10), Datapoints: []*ts.TimeSeriesDatapoint{ { TimestampNanos: time, Value: float64(val), }, }, } expected = append(expected, expect) } for _, data := range metricNames { for _, reg := range regList { switch data.typ { case "gauge": reg.reg.Gauge(data.name).Update(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val) case "counter": reg.reg.Counter(data.name).Inc(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val) case "rate": reg.reg.Rates(data.name).Add(data.val) addExpected(reg.prefix, data.name+"-count", reg.source, 100, data.val) for _, scale := range metric.DefaultTimeScales { // Rate data is subject to timing errors in tests. Zero out // these values. addExpected(reg.prefix, data.name+sep+scale.Name(), reg.source, 100, 0) } case "histogram": reg.reg.Histogram(data.name, time.Second, 1000, 2).RecordValue(data.val) for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+q.suffix, reg.source, 100, data.val) } case "latency": reg.reg.Latency(data.name).RecordValue(data.val) // Latency is simply three histograms (at different resolution // time scales). for _, scale := range metric.DefaultTimeScales { for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+sep+scale.Name()+q.suffix, reg.source, 100, data.val) } } } } } actual := recorder.GetTimeSeriesData() // Zero-out timing-sensitive rate values from actual data. for _, act := range actual { match, err := regexp.MatchString(`testRate-\d+m`, act.Name) if err != nil { t.Fatal(err) } if match { act.Datapoints[0].Value = 0.0 } } // Actual comparison is simple: sort the resulting arrays by time and name, // and use reflect.DeepEqual. sort.Sort(byTimeAndName(actual)) sort.Sort(byTimeAndName(expected)) if a, e := actual, expected; !reflect.DeepEqual(a, e) { t.Errorf("recorder did not yield expected time series collection; diff:\n %v", pretty.Diff(e, a)) } // **** STATUS SUMMARY TESTING // Generate an expected node summary and two store summaries. The // information here is relatively simple in our test. expectedNodeSummary := &NodeStatus{ Desc: nodeDesc, StartedAt: 50, UpdatedAt: 100, StoreIDs: []roachpb.StoreID{ roachpb.StoreID(1), roachpb.StoreID(2), }, RangeCount: 2, LeaderRangeCount: 2, AvailableRangeCount: 2, ReplicatedRangeCount: 2, } expectedStoreSummaries := []storage.StoreStatus{ { Desc: storeDesc1, NodeID: roachpb.NodeID(1), StartedAt: 50, UpdatedAt: 100, RangeCount: 1, LeaderRangeCount: 1, AvailableRangeCount: 1, ReplicatedRangeCount: 1, Stats: stats, }, { Desc: storeDesc2, NodeID: roachpb.NodeID(1), StartedAt: 50, UpdatedAt: 100, RangeCount: 1, LeaderRangeCount: 1, AvailableRangeCount: 1, ReplicatedRangeCount: 1, Stats: stats, }, } for _, ss := range expectedStoreSummaries { expectedNodeSummary.Stats.Add(ss.Stats) } nodeSummary, storeSummaries := recorder.GetStatusSummaries() if nodeSummary == nil { t.Fatalf("recorder did not return nodeSummary.") } if storeSummaries == nil { t.Fatalf("recorder did not return storeSummaries.") } sort.Sort(byStoreDescID(storeSummaries)) sort.Sort(byStoreID(nodeSummary.StoreIDs)) if a, e := nodeSummary, expectedNodeSummary; !reflect.DeepEqual(a, e) { t.Errorf("recorder did not produce expected NodeSummary; diff:\n %v", pretty.Diff(e, a)) } if a, e := storeSummaries, expectedStoreSummaries; !reflect.DeepEqual(a, e) { t.Errorf("recorder did not produce expected StoreSummaries; diff:\n %v", pretty.Diff(e, a)) } }
// byTimeAndName is a slice of ts.TimeSeriesData. type byTimeAndName []ts.TimeSeriesData // implement sort.Interface for byTimeAndName
random_line_split
TransformExpression.ts
import IDocumentWriter from '../../documentWriter/IDocumentWriter'; import { ConcreteDocumentNode, ConcreteElementNode, ConcreteNode, NODE_TYPES } from '../../domFacade/ConcreteNode'; import IWrappingDomFacade from '../../domFacade/IWrappingDomFacade'; import INodesFactory from '../../nodesFactory/INodesFactory'; import createNodeValue from '../dataTypes/createNodeValue'; import isSubtypeOf from '../dataTypes/isSubtypeOf'; import Value from '../dataTypes/Value'; import sequenceFactory from '../dataTypes/sequenceFactory'; import QName from '../dataTypes/valueTypes/QName'; import DynamicContext from '../DynamicContext'; import ExecutionParameters from '../ExecutionParameters'; import Expression, { RESULT_ORDERINGS } from '../Expression'; import Specificity from '../Specificity'; import StaticContext from '../StaticContext'; import UpdatingExpressionResult from '../UpdatingExpressionResult'; import { DONE_TOKEN, IAsyncIterator, IterationHint, notReady, ready } from '../util/iterators'; import { applyUpdates, mergeUpdates } from './pulRoutines'; import UpdatingExpression from './UpdatingExpression'; import { errXUDY0014, errXUDY0037, errXUTY0013 } from './XQueryUpdateFacilityErrors'; import { IPendingUpdate } from './IPendingUpdate'; import ISequence from '../dataTypes/ISequence'; import { separateXDMValueFromUpdatingExpressionResult } from '../PossiblyUpdatingExpression'; function deepCloneNode( node: ConcreteNode, domFacade: IWrappingDomFacade, nodesFactory: INodesFactory, documentWriter: IDocumentWriter ) { // Each copied node receives a new node identity. The parent, children, and attributes properties of the copied nodes are set so as to preserve their inter-node relationships. The parent property of the copy of $node is set to empty. Other properties of the copied nodes are determined as follows: // For a copied document node, the document-uri property is set to empty. // For a copied element node, the type-name property is set to xs:untyped, and the nilled, is-id, and is-idrefs properties are set to false. // For a copied attribute node, the type-name property is set to xs:untypedAtomic and the is-idrefs property is set to false. The is-id property is set to true if the qualified name of the attribute node is xml:id; otherwise it is set to false. // The string-value of each copied element and attribute node remains unchanged, and its typed value becomes equal to its string value as an instance of xs:untypedAtomic. // Note:Implementations that store only the typed value of a node are required at this point to convert the typed value to a string form. // If copy-namespaces mode in the static context specifies preserve, all in-scope-namespaces of the original element are retained in the new copy. If copy-namespaces mode specifies no-preserve, the new copy retains only those in-scope namespaces of the original element that are used in the names of the element and its attributes. // All other properties of the copied nodes are preserved. switch (node.nodeType) { case NODE_TYPES.ELEMENT_NODE: const cloneElem = nodesFactory.createElementNS(node.namespaceURI, node.nodeName); domFacade .getAllAttributes(node) .forEach(attr => documentWriter.setAttributeNS( cloneElem, attr.namespaceURI, attr.name, attr.value ) ); for (const child of domFacade.getChildNodes(node)) { const descendant = deepCloneNode(child, domFacade, nodesFactory, documentWriter); documentWriter.insertBefore(cloneElem as ConcreteElementNode, descendant, null); } return cloneElem; case NODE_TYPES.ATTRIBUTE_NODE: const cloneAttr = nodesFactory.createAttributeNS(node.namespaceURI, node.nodeName); cloneAttr.value = node.value; return cloneAttr; case NODE_TYPES.CDATA_SECTION_NODE: return nodesFactory.createCDATASection(node.data); case NODE_TYPES.COMMENT_NODE: return nodesFactory.createComment(node.data); case NODE_TYPES.DOCUMENT_NODE: const cloneDoc = nodesFactory.createDocument(); for (const child of domFacade.getChildNodes(node)) { const descendant = deepCloneNode(child, domFacade, nodesFactory, documentWriter); documentWriter.insertBefore(cloneDoc as ConcreteDocumentNode, descendant, null); } return cloneDoc; case NODE_TYPES.PROCESSING_INSTRUCTION_NODE: return nodesFactory.createProcessingInstruction(node.target, node.data); case NODE_TYPES.TEXT_NODE: return nodesFactory.createTextNode(node.data); } } function isCreatedNode(node, createdNodes, domFacade) { if (createdNodes.includes(node)) { return true; } const parent = domFacade.getParentNode(node); return parent ? isCreatedNode(parent, createdNodes, domFacade) : false; } type VariableBinding = { registeredVariable?: string; sourceExpr: Expression; varRef: QName }; class TransformExpression extends UpdatingExpression { public _modifyExpr: Expression; public _returnExpr: Expression; public _variableBindings: VariableBinding[]; constructor( variableBindings: VariableBinding[], modifyExpr: Expression, returnExpr: Expression ) { super( new Specificity({}), variableBindings.reduce( (childExpressions, variableBinding) => { childExpressions.push(variableBinding.sourceExpr); return childExpressions; }, [modifyExpr, returnExpr] ), { canBeStaticallyEvaluated: false, resultOrder: RESULT_ORDERINGS.UNSORTED } ); this._variableBindings = variableBindings; this._modifyExpr = modifyExpr; this._returnExpr = returnExpr; } public evaluateWithUpdateList( dynamicContext: DynamicContext, executionParameters: ExecutionParameters ): IAsyncIterator<UpdatingExpressionResult> { const { domFacade, nodesFactory, documentWriter } = executionParameters; const sourceValueIterators: IAsyncIterator<UpdatingExpressionResult>[] = []; let modifyValueIterator: IAsyncIterator<UpdatingExpressionResult>; let returnValueIterator: IAsyncIterator<UpdatingExpressionResult>; let modifyPul: IPendingUpdate[]; const createdNodes = []; const toMergePuls = []; return { next: () => { if (createdNodes.length !== this._variableBindings.length) { // The copy clause contains one or more variable bindings, each of which consists of a variable name and an expression called the source expression. for (let i = createdNodes.length; i < this._variableBindings.length; i++) { const variableBinding = this._variableBindings[i]; let sourceValueIterator: IAsyncIterator<UpdatingExpressionResult> = sourceValueIterators[i]; // Each variable binding is processed as follows: if (!sourceValueIterator) { sourceValueIterators[ i ] = sourceValueIterator = this.ensureUpdateListWrapper( variableBinding.sourceExpr )(dynamicContext, executionParameters); } const sv = sourceValueIterator.next(IterationHint.NONE); if (!sv.ready) { return sv; } // The result of evaluating the source expression must be a single node [err:XUTY0013]. Let $node be this single node. if ( sv.value.xdmValue.length !== 1 || !isSubtypeOf(sv.value.xdmValue[0].type, 'node()') ) { throw errXUTY0013(); } const node = sv.value.xdmValue[0]; // A new copy is made of $node and all nodes that have $node as an ancestor, collectively referred to as copied nodes. const copiedNodes = createNodeValue( deepCloneNode(node.value, domFacade, nodesFactory, documentWriter) ); createdNodes.push(copiedNodes.value); toMergePuls.push(sv.value.pendingUpdateList); // The variable name is bound to the top-level copied node generated in the previous step. The scope of this variable binding includes all subexpressions of the containing copy modify expression that appear after the variable binding clause, including the source expressions of later variable bindings, but it does not include the source expression to which the current variable name is bound. dynamicContext = dynamicContext.scopeWithVariableBindings({ [variableBinding.registeredVariable]: () => sequenceFactory.singleton(copiedNodes) }); } } if (!modifyPul) { // The expression in the modify clause is evaluated, if (!modifyValueIterator) { modifyValueIterator = this.ensureUpdateListWrapper(this._modifyExpr)( dynamicContext, executionParameters ); } const mv = modifyValueIterator.next(IterationHint.NONE); if (!mv.ready) { return mv; } // resulting in a pending update list (denoted $pul) and an XDM instance. The XDM instance is discarded, and does not form part of the result of the copy modify expression. modifyPul = mv.value.pendingUpdateList; } modifyPul.forEach(pu => { // If the target node of any update primitive in $pul is a node that was not newly created in Step 1, a dynamic error is raised [err:XUDY0014]. if (pu.target && !isCreatedNode(pu.target, createdNodes, domFacade)) { throw errXUDY0014(pu.target); } // If $pul contains a upd:put update primitive, a dynamic error is raised [err:XUDY0037]. if (pu.type === 'put') { throw errXUDY0037(); } }); // Let $revalidation-mode be the value of the revalidation mode in the static context of the library or main module containing the copy modify expression, and $inherit-namespaces be the value of inherit-namespaces in the static context of the copy modify expression. The following update operation is invoked: upd:applyUpdates($pul, $revalidation-mode, $inherit-namespaces). applyUpdates(modifyPul, null, null, domFacade, nodesFactory, documentWriter); // The return clause is evaluated, resulting in a pending update list and an XDM instance. if (!returnValueIterator) { returnValueIterator = this.ensureUpdateListWrapper(this._returnExpr)( dynamicContext, executionParameters ); } const rv = returnValueIterator.next(IterationHint.NONE); if (!rv.ready) { return rv; } // The result of the copy modify expression is the XDM instance returned, as well as a pending update list constructed by merging the pending update lists returned by any of the copy modify expression's copy or return clause operand expressions using upd:mergeUpdates. During evaluation of the return clause, changes applied to copied nodes by the preceding step are visible. return ready({ xdmValue: rv.value.xdmValue, pendingUpdateList: mergeUpdates(rv.value.pendingUpdateList, ...toMergePuls) }); } }; } public performStaticEvaluation(staticContext: StaticContext) { staticContext.introduceScope(); this._variableBindings.forEach( variableBinding => (variableBinding.registeredVariable = staticContext.registerVariable( variableBinding.varRef.namespaceURI, variableBinding.varRef.localName )) ); super.performStaticEvaluation(staticContext); staticContext.removeScope(); // If all of the copy modify expression's copy and return clauses have operand expressions // that are simple expressions, then the copy modify expression is a simple expression. // If any of the copy modify expression's copy or return clauses have operand expressions // that are updating expressions, then the copy modify expression is a updating expression. this.isUpdating = this._variableBindings.some(varBinding => varBinding.sourceExpr.isUpdating) || this._returnExpr.isUpdating; } public
( dynamicContext: DynamicContext, executionParameters: ExecutionParameters ): ISequence { // If we were updating, the calling code would have called the evaluateWithUpdateList // method. We can assume we're not actually updating const pendingUpdateIterator = this.evaluateWithUpdateList( dynamicContext, executionParameters ); return separateXDMValueFromUpdatingExpressionResult(pendingUpdateIterator, _pul => {}); } } export default TransformExpression;
evaluate
identifier_name
TransformExpression.ts
import IDocumentWriter from '../../documentWriter/IDocumentWriter'; import { ConcreteDocumentNode, ConcreteElementNode, ConcreteNode, NODE_TYPES } from '../../domFacade/ConcreteNode'; import IWrappingDomFacade from '../../domFacade/IWrappingDomFacade'; import INodesFactory from '../../nodesFactory/INodesFactory'; import createNodeValue from '../dataTypes/createNodeValue'; import isSubtypeOf from '../dataTypes/isSubtypeOf'; import Value from '../dataTypes/Value'; import sequenceFactory from '../dataTypes/sequenceFactory'; import QName from '../dataTypes/valueTypes/QName'; import DynamicContext from '../DynamicContext'; import ExecutionParameters from '../ExecutionParameters'; import Expression, { RESULT_ORDERINGS } from '../Expression'; import Specificity from '../Specificity'; import StaticContext from '../StaticContext'; import UpdatingExpressionResult from '../UpdatingExpressionResult'; import { DONE_TOKEN, IAsyncIterator, IterationHint, notReady, ready } from '../util/iterators'; import { applyUpdates, mergeUpdates } from './pulRoutines'; import UpdatingExpression from './UpdatingExpression'; import { errXUDY0014, errXUDY0037, errXUTY0013 } from './XQueryUpdateFacilityErrors'; import { IPendingUpdate } from './IPendingUpdate'; import ISequence from '../dataTypes/ISequence'; import { separateXDMValueFromUpdatingExpressionResult } from '../PossiblyUpdatingExpression'; function deepCloneNode( node: ConcreteNode, domFacade: IWrappingDomFacade, nodesFactory: INodesFactory, documentWriter: IDocumentWriter ) { // Each copied node receives a new node identity. The parent, children, and attributes properties of the copied nodes are set so as to preserve their inter-node relationships. The parent property of the copy of $node is set to empty. Other properties of the copied nodes are determined as follows: // For a copied document node, the document-uri property is set to empty. // For a copied element node, the type-name property is set to xs:untyped, and the nilled, is-id, and is-idrefs properties are set to false. // For a copied attribute node, the type-name property is set to xs:untypedAtomic and the is-idrefs property is set to false. The is-id property is set to true if the qualified name of the attribute node is xml:id; otherwise it is set to false. // The string-value of each copied element and attribute node remains unchanged, and its typed value becomes equal to its string value as an instance of xs:untypedAtomic. // Note:Implementations that store only the typed value of a node are required at this point to convert the typed value to a string form. // If copy-namespaces mode in the static context specifies preserve, all in-scope-namespaces of the original element are retained in the new copy. If copy-namespaces mode specifies no-preserve, the new copy retains only those in-scope namespaces of the original element that are used in the names of the element and its attributes. // All other properties of the copied nodes are preserved. switch (node.nodeType) { case NODE_TYPES.ELEMENT_NODE: const cloneElem = nodesFactory.createElementNS(node.namespaceURI, node.nodeName); domFacade .getAllAttributes(node) .forEach(attr => documentWriter.setAttributeNS( cloneElem, attr.namespaceURI, attr.name, attr.value ) ); for (const child of domFacade.getChildNodes(node)) { const descendant = deepCloneNode(child, domFacade, nodesFactory, documentWriter); documentWriter.insertBefore(cloneElem as ConcreteElementNode, descendant, null); } return cloneElem; case NODE_TYPES.ATTRIBUTE_NODE: const cloneAttr = nodesFactory.createAttributeNS(node.namespaceURI, node.nodeName); cloneAttr.value = node.value; return cloneAttr; case NODE_TYPES.CDATA_SECTION_NODE: return nodesFactory.createCDATASection(node.data); case NODE_TYPES.COMMENT_NODE: return nodesFactory.createComment(node.data); case NODE_TYPES.DOCUMENT_NODE: const cloneDoc = nodesFactory.createDocument(); for (const child of domFacade.getChildNodes(node)) { const descendant = deepCloneNode(child, domFacade, nodesFactory, documentWriter); documentWriter.insertBefore(cloneDoc as ConcreteDocumentNode, descendant, null); } return cloneDoc; case NODE_TYPES.PROCESSING_INSTRUCTION_NODE: return nodesFactory.createProcessingInstruction(node.target, node.data); case NODE_TYPES.TEXT_NODE: return nodesFactory.createTextNode(node.data); } } function isCreatedNode(node, createdNodes, domFacade) { if (createdNodes.includes(node)) { return true; } const parent = domFacade.getParentNode(node); return parent ? isCreatedNode(parent, createdNodes, domFacade) : false; } type VariableBinding = { registeredVariable?: string; sourceExpr: Expression; varRef: QName }; class TransformExpression extends UpdatingExpression { public _modifyExpr: Expression; public _returnExpr: Expression; public _variableBindings: VariableBinding[]; constructor( variableBindings: VariableBinding[], modifyExpr: Expression, returnExpr: Expression ) { super( new Specificity({}), variableBindings.reduce( (childExpressions, variableBinding) => { childExpressions.push(variableBinding.sourceExpr); return childExpressions; }, [modifyExpr, returnExpr] ), { canBeStaticallyEvaluated: false, resultOrder: RESULT_ORDERINGS.UNSORTED } ); this._variableBindings = variableBindings; this._modifyExpr = modifyExpr; this._returnExpr = returnExpr; } public evaluateWithUpdateList( dynamicContext: DynamicContext, executionParameters: ExecutionParameters ): IAsyncIterator<UpdatingExpressionResult> { const { domFacade, nodesFactory, documentWriter } = executionParameters; const sourceValueIterators: IAsyncIterator<UpdatingExpressionResult>[] = []; let modifyValueIterator: IAsyncIterator<UpdatingExpressionResult>; let returnValueIterator: IAsyncIterator<UpdatingExpressionResult>; let modifyPul: IPendingUpdate[]; const createdNodes = []; const toMergePuls = []; return { next: () => { if (createdNodes.length !== this._variableBindings.length) { // The copy clause contains one or more variable bindings, each of which consists of a variable name and an expression called the source expression. for (let i = createdNodes.length; i < this._variableBindings.length; i++) { const variableBinding = this._variableBindings[i]; let sourceValueIterator: IAsyncIterator<UpdatingExpressionResult> = sourceValueIterators[i]; // Each variable binding is processed as follows: if (!sourceValueIterator) { sourceValueIterators[ i ] = sourceValueIterator = this.ensureUpdateListWrapper( variableBinding.sourceExpr )(dynamicContext, executionParameters); } const sv = sourceValueIterator.next(IterationHint.NONE); if (!sv.ready) { return sv; } // The result of evaluating the source expression must be a single node [err:XUTY0013]. Let $node be this single node. if ( sv.value.xdmValue.length !== 1 || !isSubtypeOf(sv.value.xdmValue[0].type, 'node()') ) { throw errXUTY0013(); } const node = sv.value.xdmValue[0]; // A new copy is made of $node and all nodes that have $node as an ancestor, collectively referred to as copied nodes. const copiedNodes = createNodeValue( deepCloneNode(node.value, domFacade, nodesFactory, documentWriter) ); createdNodes.push(copiedNodes.value); toMergePuls.push(sv.value.pendingUpdateList); // The variable name is bound to the top-level copied node generated in the previous step. The scope of this variable binding includes all subexpressions of the containing copy modify expression that appear after the variable binding clause, including the source expressions of later variable bindings, but it does not include the source expression to which the current variable name is bound. dynamicContext = dynamicContext.scopeWithVariableBindings({ [variableBinding.registeredVariable]: () => sequenceFactory.singleton(copiedNodes) }); } } if (!modifyPul) { // The expression in the modify clause is evaluated, if (!modifyValueIterator) { modifyValueIterator = this.ensureUpdateListWrapper(this._modifyExpr)( dynamicContext, executionParameters ); } const mv = modifyValueIterator.next(IterationHint.NONE); if (!mv.ready)
// resulting in a pending update list (denoted $pul) and an XDM instance. The XDM instance is discarded, and does not form part of the result of the copy modify expression. modifyPul = mv.value.pendingUpdateList; } modifyPul.forEach(pu => { // If the target node of any update primitive in $pul is a node that was not newly created in Step 1, a dynamic error is raised [err:XUDY0014]. if (pu.target && !isCreatedNode(pu.target, createdNodes, domFacade)) { throw errXUDY0014(pu.target); } // If $pul contains a upd:put update primitive, a dynamic error is raised [err:XUDY0037]. if (pu.type === 'put') { throw errXUDY0037(); } }); // Let $revalidation-mode be the value of the revalidation mode in the static context of the library or main module containing the copy modify expression, and $inherit-namespaces be the value of inherit-namespaces in the static context of the copy modify expression. The following update operation is invoked: upd:applyUpdates($pul, $revalidation-mode, $inherit-namespaces). applyUpdates(modifyPul, null, null, domFacade, nodesFactory, documentWriter); // The return clause is evaluated, resulting in a pending update list and an XDM instance. if (!returnValueIterator) { returnValueIterator = this.ensureUpdateListWrapper(this._returnExpr)( dynamicContext, executionParameters ); } const rv = returnValueIterator.next(IterationHint.NONE); if (!rv.ready) { return rv; } // The result of the copy modify expression is the XDM instance returned, as well as a pending update list constructed by merging the pending update lists returned by any of the copy modify expression's copy or return clause operand expressions using upd:mergeUpdates. During evaluation of the return clause, changes applied to copied nodes by the preceding step are visible. return ready({ xdmValue: rv.value.xdmValue, pendingUpdateList: mergeUpdates(rv.value.pendingUpdateList, ...toMergePuls) }); } }; } public performStaticEvaluation(staticContext: StaticContext) { staticContext.introduceScope(); this._variableBindings.forEach( variableBinding => (variableBinding.registeredVariable = staticContext.registerVariable( variableBinding.varRef.namespaceURI, variableBinding.varRef.localName )) ); super.performStaticEvaluation(staticContext); staticContext.removeScope(); // If all of the copy modify expression's copy and return clauses have operand expressions // that are simple expressions, then the copy modify expression is a simple expression. // If any of the copy modify expression's copy or return clauses have operand expressions // that are updating expressions, then the copy modify expression is a updating expression. this.isUpdating = this._variableBindings.some(varBinding => varBinding.sourceExpr.isUpdating) || this._returnExpr.isUpdating; } public evaluate( dynamicContext: DynamicContext, executionParameters: ExecutionParameters ): ISequence { // If we were updating, the calling code would have called the evaluateWithUpdateList // method. We can assume we're not actually updating const pendingUpdateIterator = this.evaluateWithUpdateList( dynamicContext, executionParameters ); return separateXDMValueFromUpdatingExpressionResult(pendingUpdateIterator, _pul => {}); } } export default TransformExpression;
{ return mv; }
conditional_block
TransformExpression.ts
import IDocumentWriter from '../../documentWriter/IDocumentWriter'; import { ConcreteDocumentNode, ConcreteElementNode, ConcreteNode, NODE_TYPES } from '../../domFacade/ConcreteNode'; import IWrappingDomFacade from '../../domFacade/IWrappingDomFacade'; import INodesFactory from '../../nodesFactory/INodesFactory'; import createNodeValue from '../dataTypes/createNodeValue'; import isSubtypeOf from '../dataTypes/isSubtypeOf'; import Value from '../dataTypes/Value'; import sequenceFactory from '../dataTypes/sequenceFactory'; import QName from '../dataTypes/valueTypes/QName'; import DynamicContext from '../DynamicContext'; import ExecutionParameters from '../ExecutionParameters'; import Expression, { RESULT_ORDERINGS } from '../Expression'; import Specificity from '../Specificity'; import StaticContext from '../StaticContext'; import UpdatingExpressionResult from '../UpdatingExpressionResult'; import { DONE_TOKEN, IAsyncIterator, IterationHint, notReady, ready } from '../util/iterators'; import { applyUpdates, mergeUpdates } from './pulRoutines'; import UpdatingExpression from './UpdatingExpression'; import { errXUDY0014, errXUDY0037, errXUTY0013 } from './XQueryUpdateFacilityErrors'; import { IPendingUpdate } from './IPendingUpdate'; import ISequence from '../dataTypes/ISequence'; import { separateXDMValueFromUpdatingExpressionResult } from '../PossiblyUpdatingExpression'; function deepCloneNode( node: ConcreteNode, domFacade: IWrappingDomFacade, nodesFactory: INodesFactory, documentWriter: IDocumentWriter ) { // Each copied node receives a new node identity. The parent, children, and attributes properties of the copied nodes are set so as to preserve their inter-node relationships. The parent property of the copy of $node is set to empty. Other properties of the copied nodes are determined as follows: // For a copied document node, the document-uri property is set to empty. // For a copied element node, the type-name property is set to xs:untyped, and the nilled, is-id, and is-idrefs properties are set to false. // For a copied attribute node, the type-name property is set to xs:untypedAtomic and the is-idrefs property is set to false. The is-id property is set to true if the qualified name of the attribute node is xml:id; otherwise it is set to false. // The string-value of each copied element and attribute node remains unchanged, and its typed value becomes equal to its string value as an instance of xs:untypedAtomic. // Note:Implementations that store only the typed value of a node are required at this point to convert the typed value to a string form. // If copy-namespaces mode in the static context specifies preserve, all in-scope-namespaces of the original element are retained in the new copy. If copy-namespaces mode specifies no-preserve, the new copy retains only those in-scope namespaces of the original element that are used in the names of the element and its attributes. // All other properties of the copied nodes are preserved. switch (node.nodeType) { case NODE_TYPES.ELEMENT_NODE: const cloneElem = nodesFactory.createElementNS(node.namespaceURI, node.nodeName); domFacade .getAllAttributes(node) .forEach(attr => documentWriter.setAttributeNS( cloneElem, attr.namespaceURI, attr.name, attr.value ) ); for (const child of domFacade.getChildNodes(node)) { const descendant = deepCloneNode(child, domFacade, nodesFactory, documentWriter); documentWriter.insertBefore(cloneElem as ConcreteElementNode, descendant, null); } return cloneElem; case NODE_TYPES.ATTRIBUTE_NODE: const cloneAttr = nodesFactory.createAttributeNS(node.namespaceURI, node.nodeName); cloneAttr.value = node.value; return cloneAttr; case NODE_TYPES.CDATA_SECTION_NODE: return nodesFactory.createCDATASection(node.data); case NODE_TYPES.COMMENT_NODE: return nodesFactory.createComment(node.data); case NODE_TYPES.DOCUMENT_NODE: const cloneDoc = nodesFactory.createDocument(); for (const child of domFacade.getChildNodes(node)) { const descendant = deepCloneNode(child, domFacade, nodesFactory, documentWriter); documentWriter.insertBefore(cloneDoc as ConcreteDocumentNode, descendant, null); } return cloneDoc; case NODE_TYPES.PROCESSING_INSTRUCTION_NODE: return nodesFactory.createProcessingInstruction(node.target, node.data); case NODE_TYPES.TEXT_NODE: return nodesFactory.createTextNode(node.data); } } function isCreatedNode(node, createdNodes, domFacade) { if (createdNodes.includes(node)) { return true; } const parent = domFacade.getParentNode(node); return parent ? isCreatedNode(parent, createdNodes, domFacade) : false; } type VariableBinding = { registeredVariable?: string; sourceExpr: Expression; varRef: QName }; class TransformExpression extends UpdatingExpression { public _modifyExpr: Expression; public _returnExpr: Expression; public _variableBindings: VariableBinding[]; constructor( variableBindings: VariableBinding[], modifyExpr: Expression, returnExpr: Expression )
public evaluateWithUpdateList( dynamicContext: DynamicContext, executionParameters: ExecutionParameters ): IAsyncIterator<UpdatingExpressionResult> { const { domFacade, nodesFactory, documentWriter } = executionParameters; const sourceValueIterators: IAsyncIterator<UpdatingExpressionResult>[] = []; let modifyValueIterator: IAsyncIterator<UpdatingExpressionResult>; let returnValueIterator: IAsyncIterator<UpdatingExpressionResult>; let modifyPul: IPendingUpdate[]; const createdNodes = []; const toMergePuls = []; return { next: () => { if (createdNodes.length !== this._variableBindings.length) { // The copy clause contains one or more variable bindings, each of which consists of a variable name and an expression called the source expression. for (let i = createdNodes.length; i < this._variableBindings.length; i++) { const variableBinding = this._variableBindings[i]; let sourceValueIterator: IAsyncIterator<UpdatingExpressionResult> = sourceValueIterators[i]; // Each variable binding is processed as follows: if (!sourceValueIterator) { sourceValueIterators[ i ] = sourceValueIterator = this.ensureUpdateListWrapper( variableBinding.sourceExpr )(dynamicContext, executionParameters); } const sv = sourceValueIterator.next(IterationHint.NONE); if (!sv.ready) { return sv; } // The result of evaluating the source expression must be a single node [err:XUTY0013]. Let $node be this single node. if ( sv.value.xdmValue.length !== 1 || !isSubtypeOf(sv.value.xdmValue[0].type, 'node()') ) { throw errXUTY0013(); } const node = sv.value.xdmValue[0]; // A new copy is made of $node and all nodes that have $node as an ancestor, collectively referred to as copied nodes. const copiedNodes = createNodeValue( deepCloneNode(node.value, domFacade, nodesFactory, documentWriter) ); createdNodes.push(copiedNodes.value); toMergePuls.push(sv.value.pendingUpdateList); // The variable name is bound to the top-level copied node generated in the previous step. The scope of this variable binding includes all subexpressions of the containing copy modify expression that appear after the variable binding clause, including the source expressions of later variable bindings, but it does not include the source expression to which the current variable name is bound. dynamicContext = dynamicContext.scopeWithVariableBindings({ [variableBinding.registeredVariable]: () => sequenceFactory.singleton(copiedNodes) }); } } if (!modifyPul) { // The expression in the modify clause is evaluated, if (!modifyValueIterator) { modifyValueIterator = this.ensureUpdateListWrapper(this._modifyExpr)( dynamicContext, executionParameters ); } const mv = modifyValueIterator.next(IterationHint.NONE); if (!mv.ready) { return mv; } // resulting in a pending update list (denoted $pul) and an XDM instance. The XDM instance is discarded, and does not form part of the result of the copy modify expression. modifyPul = mv.value.pendingUpdateList; } modifyPul.forEach(pu => { // If the target node of any update primitive in $pul is a node that was not newly created in Step 1, a dynamic error is raised [err:XUDY0014]. if (pu.target && !isCreatedNode(pu.target, createdNodes, domFacade)) { throw errXUDY0014(pu.target); } // If $pul contains a upd:put update primitive, a dynamic error is raised [err:XUDY0037]. if (pu.type === 'put') { throw errXUDY0037(); } }); // Let $revalidation-mode be the value of the revalidation mode in the static context of the library or main module containing the copy modify expression, and $inherit-namespaces be the value of inherit-namespaces in the static context of the copy modify expression. The following update operation is invoked: upd:applyUpdates($pul, $revalidation-mode, $inherit-namespaces). applyUpdates(modifyPul, null, null, domFacade, nodesFactory, documentWriter); // The return clause is evaluated, resulting in a pending update list and an XDM instance. if (!returnValueIterator) { returnValueIterator = this.ensureUpdateListWrapper(this._returnExpr)( dynamicContext, executionParameters ); } const rv = returnValueIterator.next(IterationHint.NONE); if (!rv.ready) { return rv; } // The result of the copy modify expression is the XDM instance returned, as well as a pending update list constructed by merging the pending update lists returned by any of the copy modify expression's copy or return clause operand expressions using upd:mergeUpdates. During evaluation of the return clause, changes applied to copied nodes by the preceding step are visible. return ready({ xdmValue: rv.value.xdmValue, pendingUpdateList: mergeUpdates(rv.value.pendingUpdateList, ...toMergePuls) }); } }; } public performStaticEvaluation(staticContext: StaticContext) { staticContext.introduceScope(); this._variableBindings.forEach( variableBinding => (variableBinding.registeredVariable = staticContext.registerVariable( variableBinding.varRef.namespaceURI, variableBinding.varRef.localName )) ); super.performStaticEvaluation(staticContext); staticContext.removeScope(); // If all of the copy modify expression's copy and return clauses have operand expressions // that are simple expressions, then the copy modify expression is a simple expression. // If any of the copy modify expression's copy or return clauses have operand expressions // that are updating expressions, then the copy modify expression is a updating expression. this.isUpdating = this._variableBindings.some(varBinding => varBinding.sourceExpr.isUpdating) || this._returnExpr.isUpdating; } public evaluate( dynamicContext: DynamicContext, executionParameters: ExecutionParameters ): ISequence { // If we were updating, the calling code would have called the evaluateWithUpdateList // method. We can assume we're not actually updating const pendingUpdateIterator = this.evaluateWithUpdateList( dynamicContext, executionParameters ); return separateXDMValueFromUpdatingExpressionResult(pendingUpdateIterator, _pul => {}); } } export default TransformExpression;
{ super( new Specificity({}), variableBindings.reduce( (childExpressions, variableBinding) => { childExpressions.push(variableBinding.sourceExpr); return childExpressions; }, [modifyExpr, returnExpr] ), { canBeStaticallyEvaluated: false, resultOrder: RESULT_ORDERINGS.UNSORTED } ); this._variableBindings = variableBindings; this._modifyExpr = modifyExpr; this._returnExpr = returnExpr; }
identifier_body
TransformExpression.ts
import IDocumentWriter from '../../documentWriter/IDocumentWriter'; import { ConcreteDocumentNode, ConcreteElementNode, ConcreteNode, NODE_TYPES } from '../../domFacade/ConcreteNode'; import IWrappingDomFacade from '../../domFacade/IWrappingDomFacade'; import INodesFactory from '../../nodesFactory/INodesFactory'; import createNodeValue from '../dataTypes/createNodeValue'; import isSubtypeOf from '../dataTypes/isSubtypeOf'; import Value from '../dataTypes/Value'; import sequenceFactory from '../dataTypes/sequenceFactory'; import QName from '../dataTypes/valueTypes/QName'; import DynamicContext from '../DynamicContext'; import ExecutionParameters from '../ExecutionParameters'; import Expression, { RESULT_ORDERINGS } from '../Expression'; import Specificity from '../Specificity'; import StaticContext from '../StaticContext'; import UpdatingExpressionResult from '../UpdatingExpressionResult'; import { DONE_TOKEN, IAsyncIterator, IterationHint, notReady, ready } from '../util/iterators'; import { applyUpdates, mergeUpdates } from './pulRoutines'; import UpdatingExpression from './UpdatingExpression'; import { errXUDY0014, errXUDY0037, errXUTY0013 } from './XQueryUpdateFacilityErrors'; import { IPendingUpdate } from './IPendingUpdate'; import ISequence from '../dataTypes/ISequence'; import { separateXDMValueFromUpdatingExpressionResult } from '../PossiblyUpdatingExpression'; function deepCloneNode( node: ConcreteNode, domFacade: IWrappingDomFacade, nodesFactory: INodesFactory, documentWriter: IDocumentWriter ) { // Each copied node receives a new node identity. The parent, children, and attributes properties of the copied nodes are set so as to preserve their inter-node relationships. The parent property of the copy of $node is set to empty. Other properties of the copied nodes are determined as follows: // For a copied document node, the document-uri property is set to empty. // For a copied element node, the type-name property is set to xs:untyped, and the nilled, is-id, and is-idrefs properties are set to false. // For a copied attribute node, the type-name property is set to xs:untypedAtomic and the is-idrefs property is set to false. The is-id property is set to true if the qualified name of the attribute node is xml:id; otherwise it is set to false. // The string-value of each copied element and attribute node remains unchanged, and its typed value becomes equal to its string value as an instance of xs:untypedAtomic. // Note:Implementations that store only the typed value of a node are required at this point to convert the typed value to a string form. // If copy-namespaces mode in the static context specifies preserve, all in-scope-namespaces of the original element are retained in the new copy. If copy-namespaces mode specifies no-preserve, the new copy retains only those in-scope namespaces of the original element that are used in the names of the element and its attributes. // All other properties of the copied nodes are preserved. switch (node.nodeType) { case NODE_TYPES.ELEMENT_NODE: const cloneElem = nodesFactory.createElementNS(node.namespaceURI, node.nodeName); domFacade .getAllAttributes(node) .forEach(attr => documentWriter.setAttributeNS( cloneElem, attr.namespaceURI, attr.name, attr.value ) ); for (const child of domFacade.getChildNodes(node)) { const descendant = deepCloneNode(child, domFacade, nodesFactory, documentWriter); documentWriter.insertBefore(cloneElem as ConcreteElementNode, descendant, null); } return cloneElem; case NODE_TYPES.ATTRIBUTE_NODE: const cloneAttr = nodesFactory.createAttributeNS(node.namespaceURI, node.nodeName); cloneAttr.value = node.value; return cloneAttr; case NODE_TYPES.CDATA_SECTION_NODE: return nodesFactory.createCDATASection(node.data); case NODE_TYPES.COMMENT_NODE: return nodesFactory.createComment(node.data); case NODE_TYPES.DOCUMENT_NODE: const cloneDoc = nodesFactory.createDocument(); for (const child of domFacade.getChildNodes(node)) { const descendant = deepCloneNode(child, domFacade, nodesFactory, documentWriter); documentWriter.insertBefore(cloneDoc as ConcreteDocumentNode, descendant, null); } return cloneDoc; case NODE_TYPES.PROCESSING_INSTRUCTION_NODE: return nodesFactory.createProcessingInstruction(node.target, node.data); case NODE_TYPES.TEXT_NODE: return nodesFactory.createTextNode(node.data);
function isCreatedNode(node, createdNodes, domFacade) { if (createdNodes.includes(node)) { return true; } const parent = domFacade.getParentNode(node); return parent ? isCreatedNode(parent, createdNodes, domFacade) : false; } type VariableBinding = { registeredVariable?: string; sourceExpr: Expression; varRef: QName }; class TransformExpression extends UpdatingExpression { public _modifyExpr: Expression; public _returnExpr: Expression; public _variableBindings: VariableBinding[]; constructor( variableBindings: VariableBinding[], modifyExpr: Expression, returnExpr: Expression ) { super( new Specificity({}), variableBindings.reduce( (childExpressions, variableBinding) => { childExpressions.push(variableBinding.sourceExpr); return childExpressions; }, [modifyExpr, returnExpr] ), { canBeStaticallyEvaluated: false, resultOrder: RESULT_ORDERINGS.UNSORTED } ); this._variableBindings = variableBindings; this._modifyExpr = modifyExpr; this._returnExpr = returnExpr; } public evaluateWithUpdateList( dynamicContext: DynamicContext, executionParameters: ExecutionParameters ): IAsyncIterator<UpdatingExpressionResult> { const { domFacade, nodesFactory, documentWriter } = executionParameters; const sourceValueIterators: IAsyncIterator<UpdatingExpressionResult>[] = []; let modifyValueIterator: IAsyncIterator<UpdatingExpressionResult>; let returnValueIterator: IAsyncIterator<UpdatingExpressionResult>; let modifyPul: IPendingUpdate[]; const createdNodes = []; const toMergePuls = []; return { next: () => { if (createdNodes.length !== this._variableBindings.length) { // The copy clause contains one or more variable bindings, each of which consists of a variable name and an expression called the source expression. for (let i = createdNodes.length; i < this._variableBindings.length; i++) { const variableBinding = this._variableBindings[i]; let sourceValueIterator: IAsyncIterator<UpdatingExpressionResult> = sourceValueIterators[i]; // Each variable binding is processed as follows: if (!sourceValueIterator) { sourceValueIterators[ i ] = sourceValueIterator = this.ensureUpdateListWrapper( variableBinding.sourceExpr )(dynamicContext, executionParameters); } const sv = sourceValueIterator.next(IterationHint.NONE); if (!sv.ready) { return sv; } // The result of evaluating the source expression must be a single node [err:XUTY0013]. Let $node be this single node. if ( sv.value.xdmValue.length !== 1 || !isSubtypeOf(sv.value.xdmValue[0].type, 'node()') ) { throw errXUTY0013(); } const node = sv.value.xdmValue[0]; // A new copy is made of $node and all nodes that have $node as an ancestor, collectively referred to as copied nodes. const copiedNodes = createNodeValue( deepCloneNode(node.value, domFacade, nodesFactory, documentWriter) ); createdNodes.push(copiedNodes.value); toMergePuls.push(sv.value.pendingUpdateList); // The variable name is bound to the top-level copied node generated in the previous step. The scope of this variable binding includes all subexpressions of the containing copy modify expression that appear after the variable binding clause, including the source expressions of later variable bindings, but it does not include the source expression to which the current variable name is bound. dynamicContext = dynamicContext.scopeWithVariableBindings({ [variableBinding.registeredVariable]: () => sequenceFactory.singleton(copiedNodes) }); } } if (!modifyPul) { // The expression in the modify clause is evaluated, if (!modifyValueIterator) { modifyValueIterator = this.ensureUpdateListWrapper(this._modifyExpr)( dynamicContext, executionParameters ); } const mv = modifyValueIterator.next(IterationHint.NONE); if (!mv.ready) { return mv; } // resulting in a pending update list (denoted $pul) and an XDM instance. The XDM instance is discarded, and does not form part of the result of the copy modify expression. modifyPul = mv.value.pendingUpdateList; } modifyPul.forEach(pu => { // If the target node of any update primitive in $pul is a node that was not newly created in Step 1, a dynamic error is raised [err:XUDY0014]. if (pu.target && !isCreatedNode(pu.target, createdNodes, domFacade)) { throw errXUDY0014(pu.target); } // If $pul contains a upd:put update primitive, a dynamic error is raised [err:XUDY0037]. if (pu.type === 'put') { throw errXUDY0037(); } }); // Let $revalidation-mode be the value of the revalidation mode in the static context of the library or main module containing the copy modify expression, and $inherit-namespaces be the value of inherit-namespaces in the static context of the copy modify expression. The following update operation is invoked: upd:applyUpdates($pul, $revalidation-mode, $inherit-namespaces). applyUpdates(modifyPul, null, null, domFacade, nodesFactory, documentWriter); // The return clause is evaluated, resulting in a pending update list and an XDM instance. if (!returnValueIterator) { returnValueIterator = this.ensureUpdateListWrapper(this._returnExpr)( dynamicContext, executionParameters ); } const rv = returnValueIterator.next(IterationHint.NONE); if (!rv.ready) { return rv; } // The result of the copy modify expression is the XDM instance returned, as well as a pending update list constructed by merging the pending update lists returned by any of the copy modify expression's copy or return clause operand expressions using upd:mergeUpdates. During evaluation of the return clause, changes applied to copied nodes by the preceding step are visible. return ready({ xdmValue: rv.value.xdmValue, pendingUpdateList: mergeUpdates(rv.value.pendingUpdateList, ...toMergePuls) }); } }; } public performStaticEvaluation(staticContext: StaticContext) { staticContext.introduceScope(); this._variableBindings.forEach( variableBinding => (variableBinding.registeredVariable = staticContext.registerVariable( variableBinding.varRef.namespaceURI, variableBinding.varRef.localName )) ); super.performStaticEvaluation(staticContext); staticContext.removeScope(); // If all of the copy modify expression's copy and return clauses have operand expressions // that are simple expressions, then the copy modify expression is a simple expression. // If any of the copy modify expression's copy or return clauses have operand expressions // that are updating expressions, then the copy modify expression is a updating expression. this.isUpdating = this._variableBindings.some(varBinding => varBinding.sourceExpr.isUpdating) || this._returnExpr.isUpdating; } public evaluate( dynamicContext: DynamicContext, executionParameters: ExecutionParameters ): ISequence { // If we were updating, the calling code would have called the evaluateWithUpdateList // method. We can assume we're not actually updating const pendingUpdateIterator = this.evaluateWithUpdateList( dynamicContext, executionParameters ); return separateXDMValueFromUpdatingExpressionResult(pendingUpdateIterator, _pul => {}); } } export default TransformExpression;
} }
random_line_split
index.js
window.onload = function () { // 初始化首页 getHomePageList(200); // 初始化话题页 getTopic(); // 初始化小册页 getBrochure(); }; // 打开主菜单时设置相应菜单显示和隐藏 function openMainMenu(event) { var topic = document.querySelector('.content-container .topic-box'); var homepage = document.querySelector('.content-container .content-box'); var brochure = document.querySelector('.content-container .brochure-box'); var ad = document.querySelector('.content-container .ad-box'); var homePageSubtitleBox = document.querySelector( '.header-box .homepage-sub-title' ); var brochureSubtitleBox = document.querySelector( '.header-box .brochure-sub-title' ); var titles = document.querySelectorAll('.header-box .title-list li'); var container = document.querySelector('.container'); var addGroup = document.querySelector('.title-list .add-group'); var classNameArr = ['home-page', 'topic', 'brochure']; if (event.target.className) { var e = event.target; } else { e = event.target.parentNode; } var className = e.className; if (className && classNameArr.indexOf(className) >= 0) { titles.forEach(function (item) { item.classList.remove('active'); }); topic.style.display = 'none'; ad.style.display = 'none'; homepage.style.display = 'none'; brochureSubtitleBox.style.display = 'none'; homePageSubtitleBox.style.display = 'none'; brochure.style.display = 'none'; if (className == 'home-page') { ad.style.display = 'block'; homepage.style.display = 'block'; homePageSubtitleBox.style.display = 'block'; addGroup.innerText = '写文章'; e.classList.add('active'); } if (className == 'topic') { topic.style.display = 'block'; document.body.background = '#fff'; addGroup.innerText = '发沸点'; e.classList.add('active'); } if (className == 'brochure') { brochureSubtitleBox.style.display = 'block'; brochure.style.display = 'block'; addGroup.innerText = '写文章'; e.classList.add('active'); } } } // 打开子菜单 function openSubMenu(sort_type, e) { var lis = document.querySelectorAll('.content-title li'); getHomePageList(sort_type); lis.forEach(function (item) { item.classList.remove('active'); }); e.target.classList.add('active'); } // 获取首页数据 // 热门sort_type=200,最新sort_type=300,热榜sort_type=3, function getHomePageList(sort_type) { var obj = { client_type: 2608, cursor: '0', id_type: 2, limit: 20, sort_type: sort_type, }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/getRecommend', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { init(result.data); }, error: function (error) { console.log(error); }, }); } // 首页初始化 function init(data) { if (data.length > 0) { var ul = '<ul class="content-list-ul">'; data.forEach(function (item, index) { if (item.item_type == 2) { var author_user_info = item.item_info.author_user_info; var article_info = item.item_info.article_info; var category = ''; var cover_image = ''; item.item_info.tags.forEach(function (tagItem) { var tag_name = '<a class="tagname" target="_blank" href="https://juejin.im/tag/' + tagItem.tag_name + '">' + tagItem.tag_name + '</a>'; category += tag_name; }); if (article_info.cover_image) { cover_image = '<div style="background-image:url(' + article_info.cover_image + ')" class="cover-image"><a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '"></a></div>'; } var li = '<a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '"><object>' + '<li class="content-list">' + '<div class="detail-list">' + '<div class="left-box">' + '<div class="detail-title">' + '<ul><li>' + '<a target="_blank" href="https://juejin.im/user/' + article_info.user_id + '">' + author_user_info.user_name + '</a></li><li>' + dayjs(parseInt(article_info.ctime + '000')).fromNow() + '</li><li>' + category + '</li></ul></div><div class="detail-content">' + '<a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '">' + article_info.title + '</a></div><div class="detail-action">' + '<ul><li><a href="">' + '<i class="iconfont icon-good"></i>' + '<span>' + article_info.digg_count + '</span></a></li><li><a href="">' + '<i class="iconfont icon-message-reply"></i>' + '<span>' + article_info.comment_count + '</span></a></li><li class="share"><a href="">' + '<i class="iconfont icon-share"></i>' + '</a></li></ul></div></div><div class="right-box">' + cover_image + '</div></div></li></object></a>'; ul += li; } }); ul += '</ul>'; document.querySelector('.content-box .content').innerHTML = ul; } } // 获取话题数据 function getTopic() { var obj = { cursor: '0', limit: 21, sort_type: 7, }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/getTopic', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { initTopic(result.data); }, error: function (error) { console.log(error); }, }); } // 初始化话题页 function initTopic(data) { if (data.length > 0) { var topicBox = '<div class="topic-box-div">' + '<div class="topic-content">' + '<div class="topic-title">全部话题</div>' + '<div class="topic-list">'; data.forEach(function (item) { var topicItem = '<div class="topic-item">' + '<div class="pic" style="background-image: url(' + item.topic.icon + ')">' + '<a target="_blank" href="https://juejin.im/topic/' + item.topic.topic_id + '"></a>' + '</div>' + '<div class="detail">' + '<a target="_blank" href="https://juejin.im/topic/' + item.topic.topic_id + '">' + item.topic.title + '</a>' + '<span>' + item.topic.follower_count + ' 关注 · ' + item.topic.msg_count + ' 沸点</span>' + '<span> + 关注 </span>' + '</div>' + '</div>'; topicBox += topicItem; }); document.querySelector( '.content-container .topic-box' ).innerHTML = topicBox; } } // 获取小册数据 function getBrochure() { var obj = { cursor: '0', limit: 20, category_id: '0', }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/listbycategory', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { initBrochure(result.data); }, error: function (err) { console.log(err); }, }); } // 初始化小册页 function initBrochure(data) { var brochureList = ''; data.forEach(function (item) { var info = item.base_info; var userInfo = item.user_info; var presell = ''; var levelPic = ''; var tooltip = ''; var levelArr = [1, 2, 3, 4, 5]; if (info.is_finished == 0) { presell = '<span class="presell">预售</span>'; } if (levelArr.indexOf(userInfo.level) >= 0) { levelPic = '<a target="_blank" href="https://juejin.im/book/6844733795329900551/section/6844733795371843597" class="rank"><img src="./img/lv' + userInfo.level + '.svg" alt="" /></a>'; } if (userInfo.company) { userInfo.company = ' @ ' + userInfo.company; } if (item.event_discount) { var endTime = item.event_discount.end_time * 1000; countdown(endTime); var tooltip = '<div class="tooltip">' + '<span class="pre-text">' + '<img src="//s3.pstatp.com/toutiao/xitu_juejin_web/img/gift.9a8f3aa.png" alt=""/>' + '<span class="time-limit-price">限时优惠价 ' + (info.price / 1000) * item.event_discount.discount_rate + ' 元</span></span>' + '<span class="counnt-down">' + '<span class="endTime">' + '</span>' + '</span>' + '</div>'; } var brochureItem = '<a target="_blank" href="https://juejin.im/book/' + item.booklet_id + '"><object>' + '<div class="brochure-item">' + '<div class="pic" style="background-image: url(' + info.cover_img + ');"></div>' + '<div class="content">' + '<div class="title">' + presell + '<span class="title-detail">' + info.title + '</span>' + '</div>' + '<div class="detail">' + info.summary + '</div>' + '<div class="author">' + '<div class="author-info"><a target="_blank" href="https://juejin.im/user/' + userInfo.user_id + '"><div class="profile-photo" style="' + 'background-image: url(' + userInfo.avatar_large + ');"></div>' + '<a class="author-name" target="_blank" href="http://juejin.im/user/' + userInfo.user_id + '">' + userInfo.user_name + '</a>' + levelPic + '</a>' + '</div>' + '<div class="author-desc">' + '<span>' + userInfo.job_title + userInfo.company + '</span>' + '</div>' + '</div>' + '<div class="other">' + '<div class="price">' + '<a href="https://juejin.im/books/payment/' + item.booklet_id + '">' + '<div class="price-text">¥' + parseInt(info.price) / 100 + '</div>' + '</a>' + tooltip + '</div>' + '<div class="messages">' + '<span class="message">' + '<span>' + info.section_count + '小节</span>' + '</span>' + '<span class="message">' + '<span>' + ' ' + info.buy_count + '</span>' + '<span> 人已购买</span>' + '</span>' + '</div>' + '</div>' + '</div>' + '</div>' + '</object></a>'; brochureList += brochureItem; }); document.querySelector('.brochure-list').innerHTML = brochureList; } // 倒计时 function countdown(endTime) { var time; var timer = setInterval(function () { const msec = endTime - +new Date(); if (msec > 0) { // 计算时分秒数 let day = parseInt(msec / 1000 / 60 / 60 / 24); let hr = parseInt((msec / 1000 /
% 24); let min = parseInt((msec / 1000 / 60) % 60); let sec = parseInt((msec / 1000) % 60); let minsec = parseInt((msec % 1000) / 10); // 个位数前补零 hr = hr > 9 ? hr : '0' + hr; min = min > 9 ? min : '0' + min; sec = sec > 9 ? sec : '0' + sec; minsec = minsec > 9 ? minsec : '0' + minsec; time = day + '天 ' + hr + ':' + min + ':' + sec + '.' + minsec; document.querySelector('.endTime').innerText = '倒计时 ' + time; } else { clearInterval(timer); } }, 10); } // 监听滚动条 function initScroll() { if (document.documentElement.scrollTop > 660) { document.querySelector('.header-box').classList.add('visible'); document.querySelector('.content-container').classList.add('top'); } else { document.querySelector('.header-box').classList.remove('visible'); document.querySelector('.content-container').classList.remove('top'); } }
60 / 60)
identifier_name
index.js
window.onload = function () { // 初始化首页 getHomePageList(200); // 初始化话题页 getTopic(); // 初始化小册页 getBrochure(); }; // 打开主菜单时设置相应菜单显示和隐藏 function openMainMenu(event) { var topic = document.querySelector('.content-container .topic-box'); var homepage = document.querySelector('.content-container .content-box'); var brochure = document.querySelector('.content-container .brochure-box'); var ad = document.querySelector('.content-container .ad-box'); var homePageSubtitleBox = document.querySelector( '.header-box .homepage-sub-title' ); var brochureSubtitleBox = document.querySelector( '.header-box .brochure-sub-title' ); var titles = document.querySelectorAll('.header-box .title-list li'); var container = document.querySelector('.container'); var addGroup = document.querySelector('.title-list .add-group'); var classNameArr = ['home-page', 'topic', 'brochure']; if (event.target.className) { var e = event.target; } else { e = event.target.parentNode; } var className = e.className
xOf(className) >= 0) { titles.forEach(function (item) { item.classList.remove('active'); }); topic.style.display = 'none'; ad.style.display = 'none'; homepage.style.display = 'none'; brochureSubtitleBox.style.display = 'none'; homePageSubtitleBox.style.display = 'none'; brochure.style.display = 'none'; if (className == 'home-page') { ad.style.display = 'block'; homepage.style.display = 'block'; homePageSubtitleBox.style.display = 'block'; addGroup.innerText = '写文章'; e.classList.add('active'); } if (className == 'topic') { topic.style.display = 'block'; document.body.background = '#fff'; addGroup.innerText = '发沸点'; e.classList.add('active'); } if (className == 'brochure') { brochureSubtitleBox.style.display = 'block'; brochure.style.display = 'block'; addGroup.innerText = '写文章'; e.classList.add('active'); } } } // 打开子菜单 function openSubMenu(sort_type, e) { var lis = document.querySelectorAll('.content-title li'); getHomePageList(sort_type); lis.forEach(function (item) { item.classList.remove('active'); }); e.target.classList.add('active'); } // 获取首页数据 // 热门sort_type=200,最新sort_type=300,热榜sort_type=3, function getHomePageList(sort_type) { var obj = { client_type: 2608, cursor: '0', id_type: 2, limit: 20, sort_type: sort_type, }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/getRecommend', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { init(result.data); }, error: function (error) { console.log(error); }, }); } // 首页初始化 function init(data) { if (data.length > 0) { var ul = '<ul class="content-list-ul">'; data.forEach(function (item, index) { if (item.item_type == 2) { var author_user_info = item.item_info.author_user_info; var article_info = item.item_info.article_info; var category = ''; var cover_image = ''; item.item_info.tags.forEach(function (tagItem) { var tag_name = '<a class="tagname" target="_blank" href="https://juejin.im/tag/' + tagItem.tag_name + '">' + tagItem.tag_name + '</a>'; category += tag_name; }); if (article_info.cover_image) { cover_image = '<div style="background-image:url(' + article_info.cover_image + ')" class="cover-image"><a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '"></a></div>'; } var li = '<a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '"><object>' + '<li class="content-list">' + '<div class="detail-list">' + '<div class="left-box">' + '<div class="detail-title">' + '<ul><li>' + '<a target="_blank" href="https://juejin.im/user/' + article_info.user_id + '">' + author_user_info.user_name + '</a></li><li>' + dayjs(parseInt(article_info.ctime + '000')).fromNow() + '</li><li>' + category + '</li></ul></div><div class="detail-content">' + '<a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '">' + article_info.title + '</a></div><div class="detail-action">' + '<ul><li><a href="">' + '<i class="iconfont icon-good"></i>' + '<span>' + article_info.digg_count + '</span></a></li><li><a href="">' + '<i class="iconfont icon-message-reply"></i>' + '<span>' + article_info.comment_count + '</span></a></li><li class="share"><a href="">' + '<i class="iconfont icon-share"></i>' + '</a></li></ul></div></div><div class="right-box">' + cover_image + '</div></div></li></object></a>'; ul += li; } }); ul += '</ul>'; document.querySelector('.content-box .content').innerHTML = ul; } } // 获取话题数据 function getTopic() { var obj = { cursor: '0', limit: 21, sort_type: 7, }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/getTopic', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { initTopic(result.data); }, error: function (error) { console.log(error); }, }); } // 初始化话题页 function initTopic(data) { if (data.length > 0) { var topicBox = '<div class="topic-box-div">' + '<div class="topic-content">' + '<div class="topic-title">全部话题</div>' + '<div class="topic-list">'; data.forEach(function (item) { var topicItem = '<div class="topic-item">' + '<div class="pic" style="background-image: url(' + item.topic.icon + ')">' + '<a target="_blank" href="https://juejin.im/topic/' + item.topic.topic_id + '"></a>' + '</div>' + '<div class="detail">' + '<a target="_blank" href="https://juejin.im/topic/' + item.topic.topic_id + '">' + item.topic.title + '</a>' + '<span>' + item.topic.follower_count + ' 关注 · ' + item.topic.msg_count + ' 沸点</span>' + '<span> + 关注 </span>' + '</div>' + '</div>'; topicBox += topicItem; }); document.querySelector( '.content-container .topic-box' ).innerHTML = topicBox; } } // 获取小册数据 function getBrochure() { var obj = { cursor: '0', limit: 20, category_id: '0', }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/listbycategory', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { initBrochure(result.data); }, error: function (err) { console.log(err); }, }); } // 初始化小册页 function initBrochure(data) { var brochureList = ''; data.forEach(function (item) { var info = item.base_info; var userInfo = item.user_info; var presell = ''; var levelPic = ''; var tooltip = ''; var levelArr = [1, 2, 3, 4, 5]; if (info.is_finished == 0) { presell = '<span class="presell">预售</span>'; } if (levelArr.indexOf(userInfo.level) >= 0) { levelPic = '<a target="_blank" href="https://juejin.im/book/6844733795329900551/section/6844733795371843597" class="rank"><img src="./img/lv' + userInfo.level + '.svg" alt="" /></a>'; } if (userInfo.company) { userInfo.company = ' @ ' + userInfo.company; } if (item.event_discount) { var endTime = item.event_discount.end_time * 1000; countdown(endTime); var tooltip = '<div class="tooltip">' + '<span class="pre-text">' + '<img src="//s3.pstatp.com/toutiao/xitu_juejin_web/img/gift.9a8f3aa.png" alt=""/>' + '<span class="time-limit-price">限时优惠价 ' + (info.price / 1000) * item.event_discount.discount_rate + ' 元</span></span>' + '<span class="counnt-down">' + '<span class="endTime">' + '</span>' + '</span>' + '</div>'; } var brochureItem = '<a target="_blank" href="https://juejin.im/book/' + item.booklet_id + '"><object>' + '<div class="brochure-item">' + '<div class="pic" style="background-image: url(' + info.cover_img + ');"></div>' + '<div class="content">' + '<div class="title">' + presell + '<span class="title-detail">' + info.title + '</span>' + '</div>' + '<div class="detail">' + info.summary + '</div>' + '<div class="author">' + '<div class="author-info"><a target="_blank" href="https://juejin.im/user/' + userInfo.user_id + '"><div class="profile-photo" style="' + 'background-image: url(' + userInfo.avatar_large + ');"></div>' + '<a class="author-name" target="_blank" href="http://juejin.im/user/' + userInfo.user_id + '">' + userInfo.user_name + '</a>' + levelPic + '</a>' + '</div>' + '<div class="author-desc">' + '<span>' + userInfo.job_title + userInfo.company + '</span>' + '</div>' + '</div>' + '<div class="other">' + '<div class="price">' + '<a href="https://juejin.im/books/payment/' + item.booklet_id + '">' + '<div class="price-text">¥' + parseInt(info.price) / 100 + '</div>' + '</a>' + tooltip + '</div>' + '<div class="messages">' + '<span class="message">' + '<span>' + info.section_count + '小节</span>' + '</span>' + '<span class="message">' + '<span>' + ' ' + info.buy_count + '</span>' + '<span> 人已购买</span>' + '</span>' + '</div>' + '</div>' + '</div>' + '</div>' + '</object></a>'; brochureList += brochureItem; }); document.querySelector('.brochure-list').innerHTML = brochureList; } // 倒计时 function countdown(endTime) { var time; var timer = setInterval(function () { const msec = endTime - +new Date(); if (msec > 0) { // 计算时分秒数 let day = parseInt(msec / 1000 / 60 / 60 / 24); let hr = parseInt((msec / 1000 / 60 / 60) % 24); let min = parseInt((msec / 1000 / 60) % 60); let sec = parseInt((msec / 1000) % 60); let minsec = parseInt((msec % 1000) / 10); // 个位数前补零 hr = hr > 9 ? hr : '0' + hr; min = min > 9 ? min : '0' + min; sec = sec > 9 ? sec : '0' + sec; minsec = minsec > 9 ? minsec : '0' + minsec; time = day + '天 ' + hr + ':' + min + ':' + sec + '.' + minsec; document.querySelector('.endTime').innerText = '倒计时 ' + time; } else { clearInterval(timer); } }, 10); } // 监听滚动条 function initScroll() { if (document.documentElement.scrollTop > 660) { document.querySelector('.header-box').classList.add('visible'); document.querySelector('.content-container').classList.add('top'); } else { document.querySelector('.header-box').classList.remove('visible'); document.querySelector('.content-container').classList.remove('top'); } }
; if (className && classNameArr.inde
conditional_block
index.js
window.onload = function () { // 初始化首页 getHomePageList(200); // 初始化话题页 getTopic(); // 初始化小册页 getBrochure(); }; // 打开主菜单时设置相应菜单显示和隐藏 function openMainMenu(event) { var topic = document.querySelector('.content-container .topic-box'); var homepage = document.querySelector('.content-container .content-box'); var brochure = document.querySelector('.content-container .brochure-box'); var ad = document.querySelector('.content-container .ad-box'); var homePageSubtitleBox = document.querySelector( '.header-box .homepage-sub-title' ); var brochureSubtitleBox = document.querySelector( '.header-box .brochure-sub-title' ); var titles = document.querySelectorAll('.header-box .title-list li'); var container = document.querySelector('.container'); var addGroup = document.querySelector('.title-list .add-group'); var classNameArr = ['home-page', 'topic', 'brochure']; if (event.target.className) { var e = event.target; } else { e = event.target.parentNode; } var className = e.className; if (className && classNameArr.indexOf(className) >= 0) { titles.forEach(function (item) { item.classList.remove('active'); }); topic.style.display = 'none'; ad.style.display = 'none'; homepage.style.display = 'none'; brochureSubtitleBox.style.display = 'none'; homePageSubtitleBox.style.display = 'none'; brochure.style.display = 'none'; if (className == 'home-page') { ad.style.display = 'block'; homepage.style.display = 'block'; homePageSubtitleBox.style.display = 'block'; addGroup.innerText = '写文章'; e.classList.add('active'); } if (className == 'topic') { topic.style.display = 'block'; document.body.background = '#fff'; addGroup.innerText = '发沸点'; e.classList.add('active'); } if (className == 'brochure') { brochureSubtitleBox.style.display = 'block'; brochure.style.display = 'block'; addGroup.innerText = '写文章'; e.classList.add('active'); } } } // 打开子菜单 function openSubMenu(sort_type, e) { var lis = document.querySelectorAll('.content-title li'); getHomePageList(sort_type); lis.forEach(function (item) { item.classList.remove('active'); }); e.target.classList.add('active'); } // 获取首页数据 // 热门sort_type=200,最新sort_type=300,热榜sort_type=3, function getHomePageList(sort_type) { var obj = { client_type: 2608, cursor: '0', id_type: 2, limit: 20, sort_type: sort_type, }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/getRecommend', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { init(result.data); }, error: function (error) { console.log(error); }, }); } // 首页初始化 function init(data) { if (data.length > 0) { var ul = '<ul class="content-list-ul">'; data.forEach(function (item, index) { if (item.item_type == 2) { var author_user_info = item.item_info.author_user_info; var article_info = item.item_info.article_info; var category = ''; var cover_image = ''; item.item_info.tags.forEach(function (tagItem) { var tag_name = '<a class="tagname" target="_blank" href="https://juejin.im/tag/' + tagItem.tag_name + '">' + tagItem.tag_name + '</a>'; category += tag_name; }); if (article_info.cover_image) { cover_image = '<div style="background-image:url(' + article_info.cover_image + ')" class="cover-image"><a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '"></a></div>'; } var li = '<a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '"><object>' + '<li class="content-list">' + '<div class="detail-list">' + '<div class="left-box">' + '<div class="detail-title">' + '<ul><li>' + '<a target="_blank" href="https://juejin.im/user/' + article_info.user_id + '">' + author_user_info.user_name + '</a></li><li>' + dayjs(parseInt(article_info.ctime + '000')).fromNow() + '</li><li>' + category + '</li></ul></div><div class="detail-content">' + '<a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '">' + article_info.title + '</a></div><div class="detail-action">' + '<ul><li><a href="">' + '<i class="iconfont icon-good"></i>' + '<span>' + article_info.digg_count + '</span></a></li><li><a href="">' + '<i class="iconfont icon-message-reply"></i>' + '<span>' + article_info.comment_count + '</span></a></li><li class="share"><a href="">' + '<i class="iconfont icon-share"></i>' + '</a></li></ul></div></div><div class="right-box">' + cover_image + '</div></div></li></object></a>'; ul += li; } }); ul += '</ul>'; document.querySelector('.content-box .content').innerHTML = ul; } } // 获取话题数据 function getTopic() { var obj = { cursor: '0', limit: 21, sort_type: 7, }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/getTopic', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { initTopic(result.data); }, error: function (error) { console.log(error); }, }); } // 初始化话题页 function initTopic(data) { if (data.length > 0) { var topicBox = '<div class="topic-box-div">' + '<div class="topic-content">' + '<div class="topic-title">全部话题</
type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { initBrochure(result.data); }, error: function (err) { console.log(err); }, }); } // 初始化小册页 function initBrochure(data) { var brochureList = ''; data.forEach(function (item) { var info = item.base_info; var userInfo = item.user_info; var presell = ''; var levelPic = ''; var tooltip = ''; var levelArr = [1, 2, 3, 4, 5]; if (info.is_finished == 0) { presell = '<span class="presell">预售</span>'; } if (levelArr.indexOf(userInfo.level) >= 0) { levelPic = '<a target="_blank" href="https://juejin.im/book/6844733795329900551/section/6844733795371843597" class="rank"><img src="./img/lv' + userInfo.level + '.svg" alt="" /></a>'; } if (userInfo.company) { userInfo.company = ' @ ' + userInfo.company; } if (item.event_discount) { var endTime = item.event_discount.end_time * 1000; countdown(endTime); var tooltip = '<div class="tooltip">' + '<span class="pre-text">' + '<img src="//s3.pstatp.com/toutiao/xitu_juejin_web/img/gift.9a8f3aa.png" alt=""/>' + '<span class="time-limit-price">限时优惠价 ' + (info.price / 1000) * item.event_discount.discount_rate + ' 元</span></span>' + '<span class="counnt-down">' + '<span class="endTime">' + '</span>' + '</span>' + '</div>'; } var brochureItem = '<a target="_blank" href="https://juejin.im/book/' + item.booklet_id + '"><object>' + '<div class="brochure-item">' + '<div class="pic" style="background-image: url(' + info.cover_img + ');"></div>' + '<div class="content">' + '<div class="title">' + presell + '<span class="title-detail">' + info.title + '</span>' + '</div>' + '<div class="detail">' + info.summary + '</div>' + '<div class="author">' + '<div class="author-info"><a target="_blank" href="https://juejin.im/user/' + userInfo.user_id + '"><div class="profile-photo" style="' + 'background-image: url(' + userInfo.avatar_large + ');"></div>' + '<a class="author-name" target="_blank" href="http://juejin.im/user/' + userInfo.user_id + '">' + userInfo.user_name + '</a>' + levelPic + '</a>' + '</div>' + '<div class="author-desc">' + '<span>' + userInfo.job_title + userInfo.company + '</span>' + '</div>' + '</div>' + '<div class="other">' + '<div class="price">' + '<a href="https://juejin.im/books/payment/' + item.booklet_id + '">' + '<div class="price-text">¥' + parseInt(info.price) / 100 + '</div>' + '</a>' + tooltip + '</div>' + '<div class="messages">' + '<span class="message">' + '<span>' + info.section_count + '小节</span>' + '</span>' + '<span class="message">' + '<span>' + ' ' + info.buy_count + '</span>' + '<span> 人已购买</span>' + '</span>' + '</div>' + '</div>' + '</div>' + '</div>' + '</object></a>'; brochureList += brochureItem; }); document.querySelector('.brochure-list').innerHTML = brochureList; } // 倒计时 function countdown(endTime) { var time; var timer = setInterval(function () { const msec = endTime - +new Date(); if (msec > 0) { // 计算时分秒数 let day = parseInt(msec / 1000 / 60 / 60 / 24); let hr = parseInt((msec / 1000 / 60 / 60) % 24); let min = parseInt((msec / 1000 / 60) % 60); let sec = parseInt((msec / 1000) % 60); let minsec = parseInt((msec % 1000) / 10); // 个位数前补零 hr = hr > 9 ? hr : '0' + hr; min = min > 9 ? min : '0' + min; sec = sec > 9 ? sec : '0' + sec; minsec = minsec > 9 ? minsec : '0' + minsec; time = day + '天 ' + hr + ':' + min + ':' + sec + '.' + minsec; document.querySelector('.endTime').innerText = '倒计时 ' + time; } else { clearInterval(timer); } }, 10); } // 监听滚动条 function initScroll() { if (document.documentElement.scrollTop > 660) { document.querySelector('.header-box').classList.add('visible'); document.querySelector('.content-container').classList.add('top'); } else { document.querySelector('.header-box').classList.remove('visible'); document.querySelector('.content-container').classList.remove('top'); } }
div>' + '<div class="topic-list">'; data.forEach(function (item) { var topicItem = '<div class="topic-item">' + '<div class="pic" style="background-image: url(' + item.topic.icon + ')">' + '<a target="_blank" href="https://juejin.im/topic/' + item.topic.topic_id + '"></a>' + '</div>' + '<div class="detail">' + '<a target="_blank" href="https://juejin.im/topic/' + item.topic.topic_id + '">' + item.topic.title + '</a>' + '<span>' + item.topic.follower_count + ' 关注 · ' + item.topic.msg_count + ' 沸点</span>' + '<span> + 关注 </span>' + '</div>' + '</div>'; topicBox += topicItem; }); document.querySelector( '.content-container .topic-box' ).innerHTML = topicBox; } } // 获取小册数据 function getBrochure() { var obj = { cursor: '0', limit: 20, category_id: '0', }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/listbycategory',
identifier_body
index.js
window.onload = function () { // 初始化首页 getHomePageList(200); // 初始化话题页 getTopic(); // 初始化小册页 getBrochure(); }; // 打开主菜单时设置相应菜单显示和隐藏 function openMainMenu(event) { var topic = document.querySelector('.content-container .topic-box'); var homepage = document.querySelector('.content-container .content-box'); var brochure = document.querySelector('.content-container .brochure-box'); var ad = document.querySelector('.content-container .ad-box'); var homePageSubtitleBox = document.querySelector( '.header-box .homepage-sub-title' ); var brochureSubtitleBox = document.querySelector( '.header-box .brochure-sub-title' ); var titles = document.querySelectorAll('.header-box .title-list li'); var container = document.querySelector('.container'); var addGroup = document.querySelector('.title-list .add-group'); var classNameArr = ['home-page', 'topic', 'brochure']; if (event.target.className) { var e = event.target; } else { e = event.target.parentNode; } var className = e.className; if (className && classNameArr.indexOf(className) >= 0) { titles.forEach(function (item) { item.classList.remove('active'); }); topic.style.display = 'none'; ad.style.display = 'none'; homepage.style.display = 'none'; brochureSubtitleBox.style.display = 'none'; homePageSubtitleBox.style.display = 'none'; brochure.style.display = 'none'; if (className == 'home-page') { ad.style.display = 'block'; homepage.style.display = 'block'; homePageSubtitleBox.style.display = 'block'; addGroup.innerText = '写文章'; e.classList.add('active'); } if (className == 'topic') { topic.style.display = 'block'; document.body.background = '#fff'; addGroup.innerText = '发沸点'; e.classList.add('active'); } if (className == 'brochure') { brochureSubtitleBox.style.display = 'block'; brochure.style.display = 'block'; addGroup.innerText = '写文章'; e.classList.add('active'); } } } // 打开子菜单 function openSubMenu(sort_type, e) { var lis = document.querySelectorAll('.content-title li'); getHomePageList(sort_type); lis.forEach(function (item) { item.classList.remove('active'); }); e.target.classList.add('active'); } // 获取首页数据 // 热门sort_type=200,最新sort_type=300,热榜sort_type=3, function getHomePageList(sort_type) { var obj = { client_type: 2608, cursor: '0', id_type: 2, limit: 20, sort_type: sort_type, }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/getRecommend', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { init(result.data); }, error: function (error) { console.log(error); }, }); } // 首页初始化 function init(data) { if (data.length > 0) { var ul = '<ul class="content-list-ul">'; data.forEach(function (item, index) { if (item.item_type == 2) { var author_user_info = item.item_info.author_user_info; var article_info = item.item_info.article_info; var category = ''; var cover_image = ''; item.item_info.tags.forEach(function (tagItem) { var tag_name = '<a class="tagname" target="_blank" href="https://juejin.im/tag/' + tagItem.tag_name + '">' + tagItem.tag_name + '</a>'; category += tag_name; }); if (article_info.cover_image) { cover_image = '<div style="background-image:url(' + article_info.cover_image + ')" class="cover-image"><a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '"></a></div>'; } var li = '<a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '"><object>' + '<li class="content-list">' + '<div class="detail-list">' + '<div class="left-box">' + '<div class="detail-title">' + '<ul><li>' + '<a target="_blank" href="https://juejin.im/user/' + article_info.user_id + '">' + author_user_info.user_name + '</a></li><li>' + dayjs(parseInt(article_info.ctime + '000')).fromNow() + '</li><li>' + category + '</li></ul></div><div class="detail-content">' + '<a target="_blank" href="https://juejin.im/post/' + article_info.article_id + '">' + article_info.title + '</a></div><div class="detail-action">' + '<ul><li><a href="">' + '<i class="iconfont icon-good"></i>' + '<span>' + article_info.digg_count + '</span></a></li><li><a href="">' + '<i class="iconfont icon-message-reply"></i>' + '<span>' + article_info.comment_count + '</span></a></li><li class="share"><a href="">' + '<i class="iconfont icon-share"></i>' + '</a></li></ul></div></div><div class="right-box">' + cover_image + '</div></div></li></object></a>'; ul += li; } }); ul += '</ul>'; document.querySelector('.content-box .content').innerHTML = ul; } } // 获取话题数据 function getTopic() { var obj = { cursor: '0', limit: 21, sort_type: 7, }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/getTopic', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { initTopic(result.data);
console.log(error); }, }); } // 初始化话题页 function initTopic(data) { if (data.length > 0) { var topicBox = '<div class="topic-box-div">' + '<div class="topic-content">' + '<div class="topic-title">全部话题</div>' + '<div class="topic-list">'; data.forEach(function (item) { var topicItem = '<div class="topic-item">' + '<div class="pic" style="background-image: url(' + item.topic.icon + ')">' + '<a target="_blank" href="https://juejin.im/topic/' + item.topic.topic_id + '"></a>' + '</div>' + '<div class="detail">' + '<a target="_blank" href="https://juejin.im/topic/' + item.topic.topic_id + '">' + item.topic.title + '</a>' + '<span>' + item.topic.follower_count + ' 关注 · ' + item.topic.msg_count + ' 沸点</span>' + '<span> + 关注 </span>' + '</div>' + '</div>'; topicBox += topicItem; }); document.querySelector( '.content-container .topic-box' ).innerHTML = topicBox; } } // 获取小册数据 function getBrochure() { var obj = { cursor: '0', limit: 20, category_id: '0', }; $.ajax({ url: 'http://127.0.0.1:3000/api/juejin/listbycategory', type: 'POST', contentType: 'application/json', data: JSON.stringify(obj), success: function (result) { initBrochure(result.data); }, error: function (err) { console.log(err); }, }); } // 初始化小册页 function initBrochure(data) { var brochureList = ''; data.forEach(function (item) { var info = item.base_info; var userInfo = item.user_info; var presell = ''; var levelPic = ''; var tooltip = ''; var levelArr = [1, 2, 3, 4, 5]; if (info.is_finished == 0) { presell = '<span class="presell">预售</span>'; } if (levelArr.indexOf(userInfo.level) >= 0) { levelPic = '<a target="_blank" href="https://juejin.im/book/6844733795329900551/section/6844733795371843597" class="rank"><img src="./img/lv' + userInfo.level + '.svg" alt="" /></a>'; } if (userInfo.company) { userInfo.company = ' @ ' + userInfo.company; } if (item.event_discount) { var endTime = item.event_discount.end_time * 1000; countdown(endTime); var tooltip = '<div class="tooltip">' + '<span class="pre-text">' + '<img src="//s3.pstatp.com/toutiao/xitu_juejin_web/img/gift.9a8f3aa.png" alt=""/>' + '<span class="time-limit-price">限时优惠价 ' + (info.price / 1000) * item.event_discount.discount_rate + ' 元</span></span>' + '<span class="counnt-down">' + '<span class="endTime">' + '</span>' + '</span>' + '</div>'; } var brochureItem = '<a target="_blank" href="https://juejin.im/book/' + item.booklet_id + '"><object>' + '<div class="brochure-item">' + '<div class="pic" style="background-image: url(' + info.cover_img + ');"></div>' + '<div class="content">' + '<div class="title">' + presell + '<span class="title-detail">' + info.title + '</span>' + '</div>' + '<div class="detail">' + info.summary + '</div>' + '<div class="author">' + '<div class="author-info"><a target="_blank" href="https://juejin.im/user/' + userInfo.user_id + '"><div class="profile-photo" style="' + 'background-image: url(' + userInfo.avatar_large + ');"></div>' + '<a class="author-name" target="_blank" href="http://juejin.im/user/' + userInfo.user_id + '">' + userInfo.user_name + '</a>' + levelPic + '</a>' + '</div>' + '<div class="author-desc">' + '<span>' + userInfo.job_title + userInfo.company + '</span>' + '</div>' + '</div>' + '<div class="other">' + '<div class="price">' + '<a href="https://juejin.im/books/payment/' + item.booklet_id + '">' + '<div class="price-text">¥' + parseInt(info.price) / 100 + '</div>' + '</a>' + tooltip + '</div>' + '<div class="messages">' + '<span class="message">' + '<span>' + info.section_count + '小节</span>' + '</span>' + '<span class="message">' + '<span>' + ' ' + info.buy_count + '</span>' + '<span> 人已购买</span>' + '</span>' + '</div>' + '</div>' + '</div>' + '</div>' + '</object></a>'; brochureList += brochureItem; }); document.querySelector('.brochure-list').innerHTML = brochureList; } // 倒计时 function countdown(endTime) { var time; var timer = setInterval(function () { const msec = endTime - +new Date(); if (msec > 0) { // 计算时分秒数 let day = parseInt(msec / 1000 / 60 / 60 / 24); let hr = parseInt((msec / 1000 / 60 / 60) % 24); let min = parseInt((msec / 1000 / 60) % 60); let sec = parseInt((msec / 1000) % 60); let minsec = parseInt((msec % 1000) / 10); // 个位数前补零 hr = hr > 9 ? hr : '0' + hr; min = min > 9 ? min : '0' + min; sec = sec > 9 ? sec : '0' + sec; minsec = minsec > 9 ? minsec : '0' + minsec; time = day + '天 ' + hr + ':' + min + ':' + sec + '.' + minsec; document.querySelector('.endTime').innerText = '倒计时 ' + time; } else { clearInterval(timer); } }, 10); } // 监听滚动条 function initScroll() { if (document.documentElement.scrollTop > 660) { document.querySelector('.header-box').classList.add('visible'); document.querySelector('.content-container').classList.add('top'); } else { document.querySelector('.header-box').classList.remove('visible'); document.querySelector('.content-container').classList.remove('top'); } }
}, error: function (error) {
random_line_split
futures.rs
//! A futures executor as an event source //! //! Only available with the `executor` cargo feature of `calloop`. //! //! This executor is intended for light futures, which will be polled as part of your //! event loop. Such futures may be waiting for IO, or for some external computation on an //! other thread for example. //! //! You can create a new executor using the `executor` function, which creates a pair //! `(Executor<T>, Scheduler<T>)` to handle futures that all evaluate to type `T`. The //! executor should be inserted into your event loop, and will yield the return values of //! the futures as they finish into your callback. The scheduler can be cloned and used //! to send futures to be executed into the executor. A generic executor can be obtained //! by choosing `T = ()` and letting futures handle the forwarding of their return values //! (if any) by their own means. //! //! **Note:** The futures must have their own means of being woken up, as this executor is, //! by itself, not I/O aware. See [`LoopHandle::adapt_io`](crate::LoopHandle#method.adapt_io) //! for that, or you can use some other mechanism if you prefer. use async_task::{Builder, Runnable}; use slab::Slab; use std::{ cell::RefCell, future::Future, rc::Rc, sync::{ atomic::{AtomicBool, Ordering}, mpsc, Arc, Mutex, }, task::Waker, }; use crate::{ sources::{ channel::ChannelError, ping::{make_ping, Ping, PingError, PingSource}, EventSource, }, Poll, PostAction, Readiness, Token, TokenFactory, }; /// A future executor as an event source #[derive(Debug)] pub struct Executor<T> { /// Shared state between the executor and the scheduler. state: Rc<State<T>>, /// Notifies us when the executor is woken up. ping: PingSource, } /// A scheduler to send futures to an executor #[derive(Clone, Debug)] pub struct Scheduler<T> { /// Shared state between the executor and the scheduler. state: Rc<State<T>>, } /// The inner state of the executor. #[derive(Debug)] struct State<T> { /// The incoming queue of runnables to be executed. incoming: mpsc::Receiver<Runnable<usize>>, /// The sender corresponding to `incoming`. sender: Arc<Sender>, /// The list of currently active tasks. /// /// This is set to `None` when the executor is destroyed. active_tasks: RefCell<Option<Slab<Active<T>>>>, } /// Send a future to an executor. /// /// This needs to be thread-safe, as it is called from a `Waker` that may be on a different thread. #[derive(Debug)] struct Sender { /// The sender used to send runnables to the executor. /// /// `mpsc::Sender` is `!Sync`, wrapping it in a `Mutex` makes it `Sync`. sender: Mutex<mpsc::Sender<Runnable<usize>>>, /// The ping source used to wake up the executor. wake_up: Ping, /// Whether the executor has already been woken. notified: AtomicBool, } /// An active future or its result. #[derive(Debug)] enum Active<T> { /// The future is currently being polled. /// /// Waking this waker will insert the runnable into `incoming`. Future(Waker), /// The future has finished polling, and its result is stored here. Finished(T), } impl<T> Active<T> { fn is_finished(&self) -> bool { matches!(self, Active::Finished(_)) } } impl<T> Scheduler<T> { /// Sends the given future to the executor associated to this scheduler /// /// Returns an error if the the executor not longer exists. pub fn schedule<Fut: 'static>(&self, future: Fut) -> Result<(), ExecutorDestroyed> where Fut: Future<Output = T>, T: 'static, { /// Store this future's result in the executor. struct StoreOnDrop<'a, T> { index: usize, value: Option<T>, state: &'a State<T>, } impl<T> Drop for StoreOnDrop<'_, T> { fn drop(&mut self) { let mut active_tasks = self.state.active_tasks.borrow_mut(); if let Some(active_tasks) = active_tasks.as_mut() { if let Some(value) = self.value.take() { active_tasks[self.index] = Active::Finished(value); } else { // The future was dropped before it finished. // Remove it from the active list. active_tasks.remove(self.index); } } } } fn assert_send_and_sync<T: Send + Sync>(_: &T) {} let mut active_guard = self.state.active_tasks.borrow_mut(); let active_tasks = active_guard.as_mut().ok_or(ExecutorDestroyed)?; // Wrap the future in another future that polls it and stores the result. let index = active_tasks.vacant_key(); let future = { let state = self.state.clone(); async move { let mut guard = StoreOnDrop { index, value: None, state: &state, }; // Get the value of the future. let value = future.await; // Store it in the executor. guard.value = Some(value); } }; // A schedule function that inserts the runnable into the incoming queue. let schedule = { let sender = self.state.sender.clone(); move |runnable| sender.send(runnable) }; assert_send_and_sync(&schedule); // Spawn the future. let (runnable, task) = Builder::new() .metadata(index) .spawn_local(move |_| future, schedule); // Insert the runnable into the set of active tasks. active_tasks.insert(Active::Future(runnable.waker())); drop(active_guard); // Schedule the runnable and detach the task so it isn't cancellable. runnable.schedule(); task.detach(); Ok(()) } } impl Sender { /// Send a runnable to the executor. fn send(&self, runnable: Runnable<usize>) { // Send on the channel. // // All we do with the lock is call `send`, so there's no chance of any state being corrupted on // panic. Therefore it's safe to ignore the mutex poison. if let Err(e) = self .sender .lock() .unwrap_or_else(|e| e.into_inner()) .send(runnable) { // The runnable must be dropped on its origin thread, since the original future might be // !Send. This channel immediately sends it back to the Executor, which is pinned to the // origin thread. The executor's Drop implementation will force all of the runnables to be // dropped, therefore the channel should always be available. If we can't send the runnable, // it indicates that the above behavior is broken and that unsoundness has occurred. The // only option at this stage is to forget the runnable and leak the future. std::mem::forget(e); unreachable!("Attempted to send runnable to a stopped executor"); } // If the executor is already awake, don't bother waking it up again. if self.notified.swap(true, Ordering::SeqCst) { return; } // Wake the executor. self.wake_up.ping(); } } impl<T> Drop for Executor<T> { fn drop(&mut self) { let active_tasks = self.state.active_tasks.borrow_mut().take().unwrap(); // Wake all of the active tasks in order to destroy their runnables. for (_, task) in active_tasks { if let Active::Future(waker) = task { // Don't let a panicking waker blow everything up. // // There is a chance that a future will panic and, during the unwinding process, // drop this executor. However, since the future panicked, there is a possibility // that the internal state of the waker will be invalid in such a way that the waker // panics as well. Since this would be a panic during a panic, Rust will upgrade it // into an abort. // // In the interest of not aborting without a good reason, we just drop the panic here. std::panic::catch_unwind(|| waker.wake()).ok(); } } // Drain the queue in order to drop all of the runnables. while self.state.incoming.try_recv().is_ok() {} } } /// Error generated when trying to schedule a future after the /// executor was destroyed. #[derive(thiserror::Error, Debug)] #[error("the executor was destroyed")] pub struct ExecutorDestroyed; /// Create a new executor, and its associated scheduler /// /// May fail due to OS errors preventing calloop to setup its internal pipes (if your /// process has reatched its file descriptor limit for example). pub fn executor<T>() -> crate::Result<(Executor<T>, Scheduler<T>)> { let (sender, incoming) = mpsc::channel(); let (wake_up, ping) = make_ping()?; let state = Rc::new(State { incoming, active_tasks: RefCell::new(Some(Slab::new())), sender: Arc::new(Sender { sender: Mutex::new(sender), wake_up, notified: AtomicBool::new(false), }), }); Ok(( Executor { state: state.clone(), ping, }, Scheduler { state }, )) } impl<T> EventSource for Executor<T> { type Event = T; type Metadata = (); type Ret = (); type Error = ExecutorError; fn process_events<F>( &mut self, readiness: Readiness, token: Token, mut callback: F, ) -> Result<PostAction, Self::Error> where F: FnMut(T, &mut ()), { let state = &self.state; // Set to the unnotified state. state.sender.notified.store(false, Ordering::SeqCst); let clear_readiness = { let mut clear_readiness = false; // Process runnables, but not too many at a time; better to move onto the next event quickly! for _ in 0..1024 { let runnable = match state.incoming.try_recv() { Ok(runnable) => runnable, Err(_) => { // Make sure to clear the readiness if there are no more runnables. clear_readiness = true; break; } }; // Run the runnable. let index = *runnable.metadata(); runnable.run(); // If the runnable finished with a result, call the callback. let mut active_guard = state.active_tasks.borrow_mut(); let active_tasks = active_guard.as_mut().unwrap(); if let Some(state) = active_tasks.get(index) { if state.is_finished() { // Take out the state and provide it to the caller. let result = match active_tasks.remove(index) { Active::Finished(result) => result, _ => unreachable!(), }; callback(result, &mut ()); } } } clear_readiness }; // Clear the readiness of the ping source if there are no more runnables. if clear_readiness { self.ping .process_events(readiness, token, |(), &mut ()| {}) .map_err(ExecutorError::WakeError)?; } Ok(PostAction::Continue) } fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> { self.ping.register(poll, token_factory)?; Ok(()) } fn reregister( &mut self, poll: &mut Poll, token_factory: &mut TokenFactory, ) -> crate::Result<()> { self.ping.reregister(poll, token_factory)?; Ok(()) } fn
(&mut self, poll: &mut Poll) -> crate::Result<()> { self.ping.unregister(poll)?; Ok(()) } } /// An error arising from processing events in an async executor event source. #[derive(thiserror::Error, Debug)] pub enum ExecutorError { /// Error while reading new futures added via [`Scheduler::schedule()`]. #[error("error adding new futures")] NewFutureError(ChannelError), /// Error while processing wake events from existing futures. #[error("error processing wake events")] WakeError(PingError), } #[cfg(test)] mod tests { use super::*; #[test] fn ready() { let mut event_loop = crate::EventLoop::<u32>::try_new().unwrap(); let handle = event_loop.handle(); let (exec, sched) = executor::<u32>().unwrap(); handle .insert_source(exec, move |ret, &mut (), got| { *got = ret; }) .unwrap(); let mut got = 0; let fut = async { 42 }; event_loop .dispatch(Some(::std::time::Duration::ZERO), &mut got) .unwrap(); // the future is not yet inserted, and thus has not yet run assert_eq!(got, 0); sched.schedule(fut).unwrap(); event_loop .dispatch(Some(::std::time::Duration::ZERO), &mut got) .unwrap(); // the future has run assert_eq!(got, 42); } }
unregister
identifier_name
futures.rs
//! A futures executor as an event source //! //! Only available with the `executor` cargo feature of `calloop`. //! //! This executor is intended for light futures, which will be polled as part of your //! event loop. Such futures may be waiting for IO, or for some external computation on an //! other thread for example. //! //! You can create a new executor using the `executor` function, which creates a pair //! `(Executor<T>, Scheduler<T>)` to handle futures that all evaluate to type `T`. The //! executor should be inserted into your event loop, and will yield the return values of //! the futures as they finish into your callback. The scheduler can be cloned and used //! to send futures to be executed into the executor. A generic executor can be obtained //! by choosing `T = ()` and letting futures handle the forwarding of their return values //! (if any) by their own means. //! //! **Note:** The futures must have their own means of being woken up, as this executor is, //! by itself, not I/O aware. See [`LoopHandle::adapt_io`](crate::LoopHandle#method.adapt_io) //! for that, or you can use some other mechanism if you prefer. use async_task::{Builder, Runnable}; use slab::Slab; use std::{ cell::RefCell, future::Future, rc::Rc, sync::{ atomic::{AtomicBool, Ordering}, mpsc, Arc, Mutex, }, task::Waker, }; use crate::{ sources::{ channel::ChannelError, ping::{make_ping, Ping, PingError, PingSource}, EventSource, }, Poll, PostAction, Readiness, Token, TokenFactory, }; /// A future executor as an event source #[derive(Debug)] pub struct Executor<T> { /// Shared state between the executor and the scheduler. state: Rc<State<T>>, /// Notifies us when the executor is woken up. ping: PingSource, } /// A scheduler to send futures to an executor #[derive(Clone, Debug)] pub struct Scheduler<T> { /// Shared state between the executor and the scheduler. state: Rc<State<T>>, } /// The inner state of the executor. #[derive(Debug)] struct State<T> { /// The incoming queue of runnables to be executed. incoming: mpsc::Receiver<Runnable<usize>>, /// The sender corresponding to `incoming`. sender: Arc<Sender>, /// The list of currently active tasks. /// /// This is set to `None` when the executor is destroyed. active_tasks: RefCell<Option<Slab<Active<T>>>>, } /// Send a future to an executor. /// /// This needs to be thread-safe, as it is called from a `Waker` that may be on a different thread. #[derive(Debug)] struct Sender { /// The sender used to send runnables to the executor. /// /// `mpsc::Sender` is `!Sync`, wrapping it in a `Mutex` makes it `Sync`. sender: Mutex<mpsc::Sender<Runnable<usize>>>, /// The ping source used to wake up the executor. wake_up: Ping, /// Whether the executor has already been woken. notified: AtomicBool, } /// An active future or its result. #[derive(Debug)] enum Active<T> { /// The future is currently being polled. /// /// Waking this waker will insert the runnable into `incoming`. Future(Waker), /// The future has finished polling, and its result is stored here. Finished(T), } impl<T> Active<T> { fn is_finished(&self) -> bool { matches!(self, Active::Finished(_)) } } impl<T> Scheduler<T> { /// Sends the given future to the executor associated to this scheduler /// /// Returns an error if the the executor not longer exists. pub fn schedule<Fut: 'static>(&self, future: Fut) -> Result<(), ExecutorDestroyed> where Fut: Future<Output = T>, T: 'static, { /// Store this future's result in the executor. struct StoreOnDrop<'a, T> { index: usize, value: Option<T>, state: &'a State<T>, } impl<T> Drop for StoreOnDrop<'_, T> { fn drop(&mut self) { let mut active_tasks = self.state.active_tasks.borrow_mut(); if let Some(active_tasks) = active_tasks.as_mut() { if let Some(value) = self.value.take() { active_tasks[self.index] = Active::Finished(value); } else { // The future was dropped before it finished. // Remove it from the active list. active_tasks.remove(self.index); } } } } fn assert_send_and_sync<T: Send + Sync>(_: &T) {} let mut active_guard = self.state.active_tasks.borrow_mut(); let active_tasks = active_guard.as_mut().ok_or(ExecutorDestroyed)?; // Wrap the future in another future that polls it and stores the result. let index = active_tasks.vacant_key(); let future = { let state = self.state.clone(); async move { let mut guard = StoreOnDrop { index, value: None, state: &state, }; // Get the value of the future. let value = future.await; // Store it in the executor. guard.value = Some(value); } }; // A schedule function that inserts the runnable into the incoming queue. let schedule = { let sender = self.state.sender.clone(); move |runnable| sender.send(runnable) }; assert_send_and_sync(&schedule); // Spawn the future. let (runnable, task) = Builder::new() .metadata(index) .spawn_local(move |_| future, schedule); // Insert the runnable into the set of active tasks. active_tasks.insert(Active::Future(runnable.waker())); drop(active_guard); // Schedule the runnable and detach the task so it isn't cancellable. runnable.schedule(); task.detach(); Ok(()) } } impl Sender { /// Send a runnable to the executor. fn send(&self, runnable: Runnable<usize>) { // Send on the channel. // // All we do with the lock is call `send`, so there's no chance of any state being corrupted on // panic. Therefore it's safe to ignore the mutex poison. if let Err(e) = self .sender .lock() .unwrap_or_else(|e| e.into_inner()) .send(runnable) { // The runnable must be dropped on its origin thread, since the original future might be // !Send. This channel immediately sends it back to the Executor, which is pinned to the // origin thread. The executor's Drop implementation will force all of the runnables to be // dropped, therefore the channel should always be available. If we can't send the runnable, // it indicates that the above behavior is broken and that unsoundness has occurred. The // only option at this stage is to forget the runnable and leak the future. std::mem::forget(e); unreachable!("Attempted to send runnable to a stopped executor"); } // If the executor is already awake, don't bother waking it up again. if self.notified.swap(true, Ordering::SeqCst) { return; } // Wake the executor. self.wake_up.ping(); } } impl<T> Drop for Executor<T> { fn drop(&mut self) { let active_tasks = self.state.active_tasks.borrow_mut().take().unwrap(); // Wake all of the active tasks in order to destroy their runnables. for (_, task) in active_tasks { if let Active::Future(waker) = task { // Don't let a panicking waker blow everything up. // // There is a chance that a future will panic and, during the unwinding process, // drop this executor. However, since the future panicked, there is a possibility // that the internal state of the waker will be invalid in such a way that the waker // panics as well. Since this would be a panic during a panic, Rust will upgrade it // into an abort. // // In the interest of not aborting without a good reason, we just drop the panic here. std::panic::catch_unwind(|| waker.wake()).ok(); } } // Drain the queue in order to drop all of the runnables. while self.state.incoming.try_recv().is_ok() {} } } /// Error generated when trying to schedule a future after the /// executor was destroyed. #[derive(thiserror::Error, Debug)] #[error("the executor was destroyed")] pub struct ExecutorDestroyed; /// Create a new executor, and its associated scheduler /// /// May fail due to OS errors preventing calloop to setup its internal pipes (if your /// process has reatched its file descriptor limit for example). pub fn executor<T>() -> crate::Result<(Executor<T>, Scheduler<T>)>
impl<T> EventSource for Executor<T> { type Event = T; type Metadata = (); type Ret = (); type Error = ExecutorError; fn process_events<F>( &mut self, readiness: Readiness, token: Token, mut callback: F, ) -> Result<PostAction, Self::Error> where F: FnMut(T, &mut ()), { let state = &self.state; // Set to the unnotified state. state.sender.notified.store(false, Ordering::SeqCst); let clear_readiness = { let mut clear_readiness = false; // Process runnables, but not too many at a time; better to move onto the next event quickly! for _ in 0..1024 { let runnable = match state.incoming.try_recv() { Ok(runnable) => runnable, Err(_) => { // Make sure to clear the readiness if there are no more runnables. clear_readiness = true; break; } }; // Run the runnable. let index = *runnable.metadata(); runnable.run(); // If the runnable finished with a result, call the callback. let mut active_guard = state.active_tasks.borrow_mut(); let active_tasks = active_guard.as_mut().unwrap(); if let Some(state) = active_tasks.get(index) { if state.is_finished() { // Take out the state and provide it to the caller. let result = match active_tasks.remove(index) { Active::Finished(result) => result, _ => unreachable!(), }; callback(result, &mut ()); } } } clear_readiness }; // Clear the readiness of the ping source if there are no more runnables. if clear_readiness { self.ping .process_events(readiness, token, |(), &mut ()| {}) .map_err(ExecutorError::WakeError)?; } Ok(PostAction::Continue) } fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> { self.ping.register(poll, token_factory)?; Ok(()) } fn reregister( &mut self, poll: &mut Poll, token_factory: &mut TokenFactory, ) -> crate::Result<()> { self.ping.reregister(poll, token_factory)?; Ok(()) } fn unregister(&mut self, poll: &mut Poll) -> crate::Result<()> { self.ping.unregister(poll)?; Ok(()) } } /// An error arising from processing events in an async executor event source. #[derive(thiserror::Error, Debug)] pub enum ExecutorError { /// Error while reading new futures added via [`Scheduler::schedule()`]. #[error("error adding new futures")] NewFutureError(ChannelError), /// Error while processing wake events from existing futures. #[error("error processing wake events")] WakeError(PingError), } #[cfg(test)] mod tests { use super::*; #[test] fn ready() { let mut event_loop = crate::EventLoop::<u32>::try_new().unwrap(); let handle = event_loop.handle(); let (exec, sched) = executor::<u32>().unwrap(); handle .insert_source(exec, move |ret, &mut (), got| { *got = ret; }) .unwrap(); let mut got = 0; let fut = async { 42 }; event_loop .dispatch(Some(::std::time::Duration::ZERO), &mut got) .unwrap(); // the future is not yet inserted, and thus has not yet run assert_eq!(got, 0); sched.schedule(fut).unwrap(); event_loop .dispatch(Some(::std::time::Duration::ZERO), &mut got) .unwrap(); // the future has run assert_eq!(got, 42); } }
{ let (sender, incoming) = mpsc::channel(); let (wake_up, ping) = make_ping()?; let state = Rc::new(State { incoming, active_tasks: RefCell::new(Some(Slab::new())), sender: Arc::new(Sender { sender: Mutex::new(sender), wake_up, notified: AtomicBool::new(false), }), }); Ok(( Executor { state: state.clone(), ping, }, Scheduler { state }, )) }
identifier_body
futures.rs
//! A futures executor as an event source //! //! Only available with the `executor` cargo feature of `calloop`. //! //! This executor is intended for light futures, which will be polled as part of your //! event loop. Such futures may be waiting for IO, or for some external computation on an //! other thread for example. //! //! You can create a new executor using the `executor` function, which creates a pair //! `(Executor<T>, Scheduler<T>)` to handle futures that all evaluate to type `T`. The //! executor should be inserted into your event loop, and will yield the return values of //! the futures as they finish into your callback. The scheduler can be cloned and used //! to send futures to be executed into the executor. A generic executor can be obtained //! by choosing `T = ()` and letting futures handle the forwarding of their return values //! (if any) by their own means. //! //! **Note:** The futures must have their own means of being woken up, as this executor is, //! by itself, not I/O aware. See [`LoopHandle::adapt_io`](crate::LoopHandle#method.adapt_io) //! for that, or you can use some other mechanism if you prefer. use async_task::{Builder, Runnable}; use slab::Slab; use std::{ cell::RefCell, future::Future, rc::Rc, sync::{ atomic::{AtomicBool, Ordering}, mpsc, Arc, Mutex, }, task::Waker, }; use crate::{ sources::{ channel::ChannelError, ping::{make_ping, Ping, PingError, PingSource}, EventSource, }, Poll, PostAction, Readiness, Token, TokenFactory, };
pub struct Executor<T> { /// Shared state between the executor and the scheduler. state: Rc<State<T>>, /// Notifies us when the executor is woken up. ping: PingSource, } /// A scheduler to send futures to an executor #[derive(Clone, Debug)] pub struct Scheduler<T> { /// Shared state between the executor and the scheduler. state: Rc<State<T>>, } /// The inner state of the executor. #[derive(Debug)] struct State<T> { /// The incoming queue of runnables to be executed. incoming: mpsc::Receiver<Runnable<usize>>, /// The sender corresponding to `incoming`. sender: Arc<Sender>, /// The list of currently active tasks. /// /// This is set to `None` when the executor is destroyed. active_tasks: RefCell<Option<Slab<Active<T>>>>, } /// Send a future to an executor. /// /// This needs to be thread-safe, as it is called from a `Waker` that may be on a different thread. #[derive(Debug)] struct Sender { /// The sender used to send runnables to the executor. /// /// `mpsc::Sender` is `!Sync`, wrapping it in a `Mutex` makes it `Sync`. sender: Mutex<mpsc::Sender<Runnable<usize>>>, /// The ping source used to wake up the executor. wake_up: Ping, /// Whether the executor has already been woken. notified: AtomicBool, } /// An active future or its result. #[derive(Debug)] enum Active<T> { /// The future is currently being polled. /// /// Waking this waker will insert the runnable into `incoming`. Future(Waker), /// The future has finished polling, and its result is stored here. Finished(T), } impl<T> Active<T> { fn is_finished(&self) -> bool { matches!(self, Active::Finished(_)) } } impl<T> Scheduler<T> { /// Sends the given future to the executor associated to this scheduler /// /// Returns an error if the the executor not longer exists. pub fn schedule<Fut: 'static>(&self, future: Fut) -> Result<(), ExecutorDestroyed> where Fut: Future<Output = T>, T: 'static, { /// Store this future's result in the executor. struct StoreOnDrop<'a, T> { index: usize, value: Option<T>, state: &'a State<T>, } impl<T> Drop for StoreOnDrop<'_, T> { fn drop(&mut self) { let mut active_tasks = self.state.active_tasks.borrow_mut(); if let Some(active_tasks) = active_tasks.as_mut() { if let Some(value) = self.value.take() { active_tasks[self.index] = Active::Finished(value); } else { // The future was dropped before it finished. // Remove it from the active list. active_tasks.remove(self.index); } } } } fn assert_send_and_sync<T: Send + Sync>(_: &T) {} let mut active_guard = self.state.active_tasks.borrow_mut(); let active_tasks = active_guard.as_mut().ok_or(ExecutorDestroyed)?; // Wrap the future in another future that polls it and stores the result. let index = active_tasks.vacant_key(); let future = { let state = self.state.clone(); async move { let mut guard = StoreOnDrop { index, value: None, state: &state, }; // Get the value of the future. let value = future.await; // Store it in the executor. guard.value = Some(value); } }; // A schedule function that inserts the runnable into the incoming queue. let schedule = { let sender = self.state.sender.clone(); move |runnable| sender.send(runnable) }; assert_send_and_sync(&schedule); // Spawn the future. let (runnable, task) = Builder::new() .metadata(index) .spawn_local(move |_| future, schedule); // Insert the runnable into the set of active tasks. active_tasks.insert(Active::Future(runnable.waker())); drop(active_guard); // Schedule the runnable and detach the task so it isn't cancellable. runnable.schedule(); task.detach(); Ok(()) } } impl Sender { /// Send a runnable to the executor. fn send(&self, runnable: Runnable<usize>) { // Send on the channel. // // All we do with the lock is call `send`, so there's no chance of any state being corrupted on // panic. Therefore it's safe to ignore the mutex poison. if let Err(e) = self .sender .lock() .unwrap_or_else(|e| e.into_inner()) .send(runnable) { // The runnable must be dropped on its origin thread, since the original future might be // !Send. This channel immediately sends it back to the Executor, which is pinned to the // origin thread. The executor's Drop implementation will force all of the runnables to be // dropped, therefore the channel should always be available. If we can't send the runnable, // it indicates that the above behavior is broken and that unsoundness has occurred. The // only option at this stage is to forget the runnable and leak the future. std::mem::forget(e); unreachable!("Attempted to send runnable to a stopped executor"); } // If the executor is already awake, don't bother waking it up again. if self.notified.swap(true, Ordering::SeqCst) { return; } // Wake the executor. self.wake_up.ping(); } } impl<T> Drop for Executor<T> { fn drop(&mut self) { let active_tasks = self.state.active_tasks.borrow_mut().take().unwrap(); // Wake all of the active tasks in order to destroy their runnables. for (_, task) in active_tasks { if let Active::Future(waker) = task { // Don't let a panicking waker blow everything up. // // There is a chance that a future will panic and, during the unwinding process, // drop this executor. However, since the future panicked, there is a possibility // that the internal state of the waker will be invalid in such a way that the waker // panics as well. Since this would be a panic during a panic, Rust will upgrade it // into an abort. // // In the interest of not aborting without a good reason, we just drop the panic here. std::panic::catch_unwind(|| waker.wake()).ok(); } } // Drain the queue in order to drop all of the runnables. while self.state.incoming.try_recv().is_ok() {} } } /// Error generated when trying to schedule a future after the /// executor was destroyed. #[derive(thiserror::Error, Debug)] #[error("the executor was destroyed")] pub struct ExecutorDestroyed; /// Create a new executor, and its associated scheduler /// /// May fail due to OS errors preventing calloop to setup its internal pipes (if your /// process has reatched its file descriptor limit for example). pub fn executor<T>() -> crate::Result<(Executor<T>, Scheduler<T>)> { let (sender, incoming) = mpsc::channel(); let (wake_up, ping) = make_ping()?; let state = Rc::new(State { incoming, active_tasks: RefCell::new(Some(Slab::new())), sender: Arc::new(Sender { sender: Mutex::new(sender), wake_up, notified: AtomicBool::new(false), }), }); Ok(( Executor { state: state.clone(), ping, }, Scheduler { state }, )) } impl<T> EventSource for Executor<T> { type Event = T; type Metadata = (); type Ret = (); type Error = ExecutorError; fn process_events<F>( &mut self, readiness: Readiness, token: Token, mut callback: F, ) -> Result<PostAction, Self::Error> where F: FnMut(T, &mut ()), { let state = &self.state; // Set to the unnotified state. state.sender.notified.store(false, Ordering::SeqCst); let clear_readiness = { let mut clear_readiness = false; // Process runnables, but not too many at a time; better to move onto the next event quickly! for _ in 0..1024 { let runnable = match state.incoming.try_recv() { Ok(runnable) => runnable, Err(_) => { // Make sure to clear the readiness if there are no more runnables. clear_readiness = true; break; } }; // Run the runnable. let index = *runnable.metadata(); runnable.run(); // If the runnable finished with a result, call the callback. let mut active_guard = state.active_tasks.borrow_mut(); let active_tasks = active_guard.as_mut().unwrap(); if let Some(state) = active_tasks.get(index) { if state.is_finished() { // Take out the state and provide it to the caller. let result = match active_tasks.remove(index) { Active::Finished(result) => result, _ => unreachable!(), }; callback(result, &mut ()); } } } clear_readiness }; // Clear the readiness of the ping source if there are no more runnables. if clear_readiness { self.ping .process_events(readiness, token, |(), &mut ()| {}) .map_err(ExecutorError::WakeError)?; } Ok(PostAction::Continue) } fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> { self.ping.register(poll, token_factory)?; Ok(()) } fn reregister( &mut self, poll: &mut Poll, token_factory: &mut TokenFactory, ) -> crate::Result<()> { self.ping.reregister(poll, token_factory)?; Ok(()) } fn unregister(&mut self, poll: &mut Poll) -> crate::Result<()> { self.ping.unregister(poll)?; Ok(()) } } /// An error arising from processing events in an async executor event source. #[derive(thiserror::Error, Debug)] pub enum ExecutorError { /// Error while reading new futures added via [`Scheduler::schedule()`]. #[error("error adding new futures")] NewFutureError(ChannelError), /// Error while processing wake events from existing futures. #[error("error processing wake events")] WakeError(PingError), } #[cfg(test)] mod tests { use super::*; #[test] fn ready() { let mut event_loop = crate::EventLoop::<u32>::try_new().unwrap(); let handle = event_loop.handle(); let (exec, sched) = executor::<u32>().unwrap(); handle .insert_source(exec, move |ret, &mut (), got| { *got = ret; }) .unwrap(); let mut got = 0; let fut = async { 42 }; event_loop .dispatch(Some(::std::time::Duration::ZERO), &mut got) .unwrap(); // the future is not yet inserted, and thus has not yet run assert_eq!(got, 0); sched.schedule(fut).unwrap(); event_loop .dispatch(Some(::std::time::Duration::ZERO), &mut got) .unwrap(); // the future has run assert_eq!(got, 42); } }
/// A future executor as an event source #[derive(Debug)]
random_line_split
futures.rs
//! A futures executor as an event source //! //! Only available with the `executor` cargo feature of `calloop`. //! //! This executor is intended for light futures, which will be polled as part of your //! event loop. Such futures may be waiting for IO, or for some external computation on an //! other thread for example. //! //! You can create a new executor using the `executor` function, which creates a pair //! `(Executor<T>, Scheduler<T>)` to handle futures that all evaluate to type `T`. The //! executor should be inserted into your event loop, and will yield the return values of //! the futures as they finish into your callback. The scheduler can be cloned and used //! to send futures to be executed into the executor. A generic executor can be obtained //! by choosing `T = ()` and letting futures handle the forwarding of their return values //! (if any) by their own means. //! //! **Note:** The futures must have their own means of being woken up, as this executor is, //! by itself, not I/O aware. See [`LoopHandle::adapt_io`](crate::LoopHandle#method.adapt_io) //! for that, or you can use some other mechanism if you prefer. use async_task::{Builder, Runnable}; use slab::Slab; use std::{ cell::RefCell, future::Future, rc::Rc, sync::{ atomic::{AtomicBool, Ordering}, mpsc, Arc, Mutex, }, task::Waker, }; use crate::{ sources::{ channel::ChannelError, ping::{make_ping, Ping, PingError, PingSource}, EventSource, }, Poll, PostAction, Readiness, Token, TokenFactory, }; /// A future executor as an event source #[derive(Debug)] pub struct Executor<T> { /// Shared state between the executor and the scheduler. state: Rc<State<T>>, /// Notifies us when the executor is woken up. ping: PingSource, } /// A scheduler to send futures to an executor #[derive(Clone, Debug)] pub struct Scheduler<T> { /// Shared state between the executor and the scheduler. state: Rc<State<T>>, } /// The inner state of the executor. #[derive(Debug)] struct State<T> { /// The incoming queue of runnables to be executed. incoming: mpsc::Receiver<Runnable<usize>>, /// The sender corresponding to `incoming`. sender: Arc<Sender>, /// The list of currently active tasks. /// /// This is set to `None` when the executor is destroyed. active_tasks: RefCell<Option<Slab<Active<T>>>>, } /// Send a future to an executor. /// /// This needs to be thread-safe, as it is called from a `Waker` that may be on a different thread. #[derive(Debug)] struct Sender { /// The sender used to send runnables to the executor. /// /// `mpsc::Sender` is `!Sync`, wrapping it in a `Mutex` makes it `Sync`. sender: Mutex<mpsc::Sender<Runnable<usize>>>, /// The ping source used to wake up the executor. wake_up: Ping, /// Whether the executor has already been woken. notified: AtomicBool, } /// An active future or its result. #[derive(Debug)] enum Active<T> { /// The future is currently being polled. /// /// Waking this waker will insert the runnable into `incoming`. Future(Waker), /// The future has finished polling, and its result is stored here. Finished(T), } impl<T> Active<T> { fn is_finished(&self) -> bool { matches!(self, Active::Finished(_)) } } impl<T> Scheduler<T> { /// Sends the given future to the executor associated to this scheduler /// /// Returns an error if the the executor not longer exists. pub fn schedule<Fut: 'static>(&self, future: Fut) -> Result<(), ExecutorDestroyed> where Fut: Future<Output = T>, T: 'static, { /// Store this future's result in the executor. struct StoreOnDrop<'a, T> { index: usize, value: Option<T>, state: &'a State<T>, } impl<T> Drop for StoreOnDrop<'_, T> { fn drop(&mut self) { let mut active_tasks = self.state.active_tasks.borrow_mut(); if let Some(active_tasks) = active_tasks.as_mut() { if let Some(value) = self.value.take() { active_tasks[self.index] = Active::Finished(value); } else { // The future was dropped before it finished. // Remove it from the active list. active_tasks.remove(self.index); } } } } fn assert_send_and_sync<T: Send + Sync>(_: &T) {} let mut active_guard = self.state.active_tasks.borrow_mut(); let active_tasks = active_guard.as_mut().ok_or(ExecutorDestroyed)?; // Wrap the future in another future that polls it and stores the result. let index = active_tasks.vacant_key(); let future = { let state = self.state.clone(); async move { let mut guard = StoreOnDrop { index, value: None, state: &state, }; // Get the value of the future. let value = future.await; // Store it in the executor. guard.value = Some(value); } }; // A schedule function that inserts the runnable into the incoming queue. let schedule = { let sender = self.state.sender.clone(); move |runnable| sender.send(runnable) }; assert_send_and_sync(&schedule); // Spawn the future. let (runnable, task) = Builder::new() .metadata(index) .spawn_local(move |_| future, schedule); // Insert the runnable into the set of active tasks. active_tasks.insert(Active::Future(runnable.waker())); drop(active_guard); // Schedule the runnable and detach the task so it isn't cancellable. runnable.schedule(); task.detach(); Ok(()) } } impl Sender { /// Send a runnable to the executor. fn send(&self, runnable: Runnable<usize>) { // Send on the channel. // // All we do with the lock is call `send`, so there's no chance of any state being corrupted on // panic. Therefore it's safe to ignore the mutex poison. if let Err(e) = self .sender .lock() .unwrap_or_else(|e| e.into_inner()) .send(runnable)
// If the executor is already awake, don't bother waking it up again. if self.notified.swap(true, Ordering::SeqCst) { return; } // Wake the executor. self.wake_up.ping(); } } impl<T> Drop for Executor<T> { fn drop(&mut self) { let active_tasks = self.state.active_tasks.borrow_mut().take().unwrap(); // Wake all of the active tasks in order to destroy their runnables. for (_, task) in active_tasks { if let Active::Future(waker) = task { // Don't let a panicking waker blow everything up. // // There is a chance that a future will panic and, during the unwinding process, // drop this executor. However, since the future panicked, there is a possibility // that the internal state of the waker will be invalid in such a way that the waker // panics as well. Since this would be a panic during a panic, Rust will upgrade it // into an abort. // // In the interest of not aborting without a good reason, we just drop the panic here. std::panic::catch_unwind(|| waker.wake()).ok(); } } // Drain the queue in order to drop all of the runnables. while self.state.incoming.try_recv().is_ok() {} } } /// Error generated when trying to schedule a future after the /// executor was destroyed. #[derive(thiserror::Error, Debug)] #[error("the executor was destroyed")] pub struct ExecutorDestroyed; /// Create a new executor, and its associated scheduler /// /// May fail due to OS errors preventing calloop to setup its internal pipes (if your /// process has reatched its file descriptor limit for example). pub fn executor<T>() -> crate::Result<(Executor<T>, Scheduler<T>)> { let (sender, incoming) = mpsc::channel(); let (wake_up, ping) = make_ping()?; let state = Rc::new(State { incoming, active_tasks: RefCell::new(Some(Slab::new())), sender: Arc::new(Sender { sender: Mutex::new(sender), wake_up, notified: AtomicBool::new(false), }), }); Ok(( Executor { state: state.clone(), ping, }, Scheduler { state }, )) } impl<T> EventSource for Executor<T> { type Event = T; type Metadata = (); type Ret = (); type Error = ExecutorError; fn process_events<F>( &mut self, readiness: Readiness, token: Token, mut callback: F, ) -> Result<PostAction, Self::Error> where F: FnMut(T, &mut ()), { let state = &self.state; // Set to the unnotified state. state.sender.notified.store(false, Ordering::SeqCst); let clear_readiness = { let mut clear_readiness = false; // Process runnables, but not too many at a time; better to move onto the next event quickly! for _ in 0..1024 { let runnable = match state.incoming.try_recv() { Ok(runnable) => runnable, Err(_) => { // Make sure to clear the readiness if there are no more runnables. clear_readiness = true; break; } }; // Run the runnable. let index = *runnable.metadata(); runnable.run(); // If the runnable finished with a result, call the callback. let mut active_guard = state.active_tasks.borrow_mut(); let active_tasks = active_guard.as_mut().unwrap(); if let Some(state) = active_tasks.get(index) { if state.is_finished() { // Take out the state and provide it to the caller. let result = match active_tasks.remove(index) { Active::Finished(result) => result, _ => unreachable!(), }; callback(result, &mut ()); } } } clear_readiness }; // Clear the readiness of the ping source if there are no more runnables. if clear_readiness { self.ping .process_events(readiness, token, |(), &mut ()| {}) .map_err(ExecutorError::WakeError)?; } Ok(PostAction::Continue) } fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> { self.ping.register(poll, token_factory)?; Ok(()) } fn reregister( &mut self, poll: &mut Poll, token_factory: &mut TokenFactory, ) -> crate::Result<()> { self.ping.reregister(poll, token_factory)?; Ok(()) } fn unregister(&mut self, poll: &mut Poll) -> crate::Result<()> { self.ping.unregister(poll)?; Ok(()) } } /// An error arising from processing events in an async executor event source. #[derive(thiserror::Error, Debug)] pub enum ExecutorError { /// Error while reading new futures added via [`Scheduler::schedule()`]. #[error("error adding new futures")] NewFutureError(ChannelError), /// Error while processing wake events from existing futures. #[error("error processing wake events")] WakeError(PingError), } #[cfg(test)] mod tests { use super::*; #[test] fn ready() { let mut event_loop = crate::EventLoop::<u32>::try_new().unwrap(); let handle = event_loop.handle(); let (exec, sched) = executor::<u32>().unwrap(); handle .insert_source(exec, move |ret, &mut (), got| { *got = ret; }) .unwrap(); let mut got = 0; let fut = async { 42 }; event_loop .dispatch(Some(::std::time::Duration::ZERO), &mut got) .unwrap(); // the future is not yet inserted, and thus has not yet run assert_eq!(got, 0); sched.schedule(fut).unwrap(); event_loop .dispatch(Some(::std::time::Duration::ZERO), &mut got) .unwrap(); // the future has run assert_eq!(got, 42); } }
{ // The runnable must be dropped on its origin thread, since the original future might be // !Send. This channel immediately sends it back to the Executor, which is pinned to the // origin thread. The executor's Drop implementation will force all of the runnables to be // dropped, therefore the channel should always be available. If we can't send the runnable, // it indicates that the above behavior is broken and that unsoundness has occurred. The // only option at this stage is to forget the runnable and leak the future. std::mem::forget(e); unreachable!("Attempted to send runnable to a stopped executor"); }
conditional_block
manifests.go
/****************************************************************************** * * Copyright 2020 SAP SE * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package tasks import ( "context" "database/sql" "fmt" "time" "github.com/opencontainers/go-digest" "github.com/sapcc/go-bits/logg" "github.com/sapcc/keppel/internal/keppel" ) //query that finds the next manifest to be validated var outdatedManifestSearchQuery = keppel.SimplifyWhitespaceInSQL(` SELECT * FROM manifests WHERE validated_at < $1 ORDER BY validated_at ASC -- oldest manifests first LIMIT 1 -- one at a time `) //ValidateNextManifest validates manifests that have not been validated for more //than 6 hours. At most one manifest is validated per call. If no manifest //needs to be validated, sql.ErrNoRows is returned. func (j *Janitor) ValidateNextManifest() (returnErr error) { defer func() { if returnErr == nil { validateManifestSuccessCounter.Inc() } else if returnErr != sql.ErrNoRows { validateManifestFailedCounter.Inc() returnErr = fmt.Errorf("while validating a manifest: %s", returnErr.Error()) } }() //find manifest var manifest keppel.Manifest maxValidatedAt := j.timeNow().Add(-6 * time.Hour) err := j.db.SelectOne(&manifest, outdatedManifestSearchQuery, maxValidatedAt) if err != nil { if err == sql.ErrNoRows { logg.Debug("no manifests to validate - slowing down...") return sql.ErrNoRows } return err } //find corresponding account and repo var repo keppel.Repository err = j.db.SelectOne(&repo, `SELECT * FROM repos WHERE id = $1`, manifest.RepositoryID) if err != nil { return fmt.Errorf("cannot find repo %d for manifest %s: %s", manifest.RepositoryID, manifest.Digest, err.Error()) } account, err := keppel.FindAccount(j.db, repo.AccountName) if err != nil { return fmt.Errorf("cannot find account for manifest %s/%s: %s", repo.FullName(), manifest.Digest, err.Error()) } //perform validation ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() err = retry(ctx, defaultRetryOpts, func() error { return j.processor().ValidateExistingManifest(*account, repo, &manifest, j.timeNow()) }) if err == nil { //update `validated_at` and reset error message _, err := j.db.Exec(` UPDATE manifests SET validated_at = $1, validation_error_message = '' WHERE repo_id = $2 AND digest = $3`, j.timeNow(), repo.ID, manifest.Digest, ) if err != nil { return err } } else { //attempt to log the error message, and also update the `validated_at` //timestamp to ensure that the ValidateNextManifest() loop does not get //stuck on this one _, updateErr := j.db.Exec(` UPDATE manifests SET validated_at = $1, validation_error_message = $2 WHERE repo_id = $3 AND digest = $4`, j.timeNow(), err.Error(), repo.ID, manifest.Digest, ) if updateErr != nil { err = fmt.Errorf("%s (additional error encountered while recording validation error: %s)", err.Error(), updateErr.Error()) } return err } return nil } var syncManifestRepoSelectQuery = keppel.SimplifyWhitespaceInSQL(` SELECT r.* FROM repos r JOIN accounts a ON r.account_name = a.name WHERE (r.next_manifest_sync_at IS NULL OR r.next_manifest_sync_at < $1) -- only consider repos in replica accounts AND a.upstream_peer_hostname != '' -- repos without any syncs first, then sorted by last sync ORDER BY r.next_manifest_sync_at IS NULL DESC, r.next_manifest_sync_at ASC -- only one repo at a time LIMIT 1 `) var syncManifestEnumerateRefsQuery = keppel.SimplifyWhitespaceInSQL(` SELECT parent_digest, child_digest FROM manifest_manifest_refs WHERE repo_id = $1 `) var syncManifestDoneQuery = keppel.SimplifyWhitespaceInSQL(` UPDATE repos SET next_manifest_sync_at = $2 WHERE id = $1 `) //SyncManifestsInNextRepo finds the next repository in a replica account where //manifests have not been synced for more than an hour, and syncs its manifests. //Syncing involves checking with the primary account which manifests have been //deleted there, and replicating the deletions on our side. // //If no repo needs syncing, sql.ErrNoRows is returned. func (j *Janitor) SyncManifestsInNextRepo() (returnErr error) { defer func() { if returnErr == nil { syncManifestsSuccessCounter.Inc() } else if returnErr != sql.ErrNoRows { syncManifestsFailedCounter.Inc() returnErr = fmt.Errorf("while syncing manifests in a replica repo: %s", returnErr.Error()) } }() //find repository to sync var repo keppel.Repository
err := j.db.SelectOne(&repo, syncManifestRepoSelectQuery, j.timeNow()) if err != nil { if err == sql.ErrNoRows { logg.Debug("no accounts to sync manifests in - slowing down...") return sql.ErrNoRows } return err } //find corresponding account account, err := keppel.FindAccount(j.db, repo.AccountName) if err != nil { return fmt.Errorf("cannot find account for repo %s: %s", repo.FullName(), err.Error()) } //do not perform manifest sync while account is in maintenance (maintenance mode blocks all kinds of replication) if !account.InMaintenance { err = j.performManifestSync(*account, repo) if err != nil { return err } } _, err = j.db.Exec(syncManifestDoneQuery, repo.ID, j.timeNow().Add(1*time.Hour)) return err } func (j *Janitor) performManifestSync(account keppel.Account, repo keppel.Repository) error { //enumerate manifests in this repo var manifests []keppel.Manifest _, err := j.db.Select(&manifests, `SELECT * FROM manifests WHERE repo_id = $1`, repo.ID) if err != nil { return fmt.Errorf("cannot list manifests in repo %s: %s", repo.FullName(), err.Error()) } //check which manifests need to be deleted shallDeleteManifest := make(map[string]bool) p := j.processor() for _, manifest := range manifests { ref := keppel.ManifestReference{Digest: digest.Digest(manifest.Digest)} exists, err := p.CheckManifestOnPrimary(account, repo, ref) if err != nil { return fmt.Errorf("cannot check existence of manifest %s/%s on primary account: %s", repo.FullName(), manifest.Digest, err.Error()) } if !exists { shallDeleteManifest[manifest.Digest] = true } } //enumerate manifest-manifest refs in this repo parentDigestsOf := make(map[string][]string) err = keppel.ForeachRow(j.db, syncManifestEnumerateRefsQuery, []interface{}{repo.ID}, func(rows *sql.Rows) error { var ( parentDigest string childDigest string ) err = rows.Scan(&parentDigest, &childDigest) if err != nil { return err } parentDigestsOf[childDigest] = append(parentDigestsOf[childDigest], parentDigest) return nil }) if err != nil { return fmt.Errorf("cannot enumerate manifest-manifest refs in repo %s: %s", repo.FullName(), err.Error()) } //delete manifests in correct order (if there is a parent-child relationship, //we always need to delete the parent manifest first, otherwise the database //will complain because of its consistency checks) if len(shallDeleteManifest) > 0 { logg.Info("deleting %d manifests in repo %s that were deleted on corresponding primary account", len(shallDeleteManifest), repo.FullName()) } manifestWasDeleted := make(map[string]bool) for len(shallDeleteManifest) > 0 { deletedSomething := false MANIFEST: for digest := range shallDeleteManifest { for _, parentDigest := range parentDigestsOf[digest] { if !manifestWasDeleted[parentDigest] { //cannot delete this manifest yet because it's still being referenced - retry in next iteration continue MANIFEST } } //no manifests left that reference this one - we can delete it // //The ordering is important: The DELETE statement could fail if some concurrent //process created a manifest reference in the meantime. If that happens, //and we have already deleted the manifest in the backing storage, we've //caused an inconsistency that we cannot recover from. To avoid that //risk, we do it the other way around. In this way, we could have an //inconsistency where the manifest is deleted from the database, but still //present in the backing storage. But this inconsistency is easier to //recover from: SweepStorageInNextAccount will take care of it soon //enough. Also the user will not notice this inconsistency because the DB //is our primary source of truth. _, err := j.db.Delete(&keppel.Manifest{RepositoryID: repo.ID, Digest: digest}) //without transaction: we need this committed right now if err != nil { return fmt.Errorf("cannot remove deleted manifest %s in repo %s from DB: %s", digest, repo.FullName(), err.Error()) } err = j.sd.DeleteManifest(account, repo.Name, digest) if err != nil { return fmt.Errorf("cannot remove deleted manifest %s in repo %s from storage: %s", digest, repo.FullName(), err.Error()) } //remove deletion from work queue (so that we can eventually exit from the outermost loop) delete(shallDeleteManifest, digest) //track deletion (so that we can eventually start deleting manifests referenced by this one) manifestWasDeleted[digest] = true //track that we're making progress deletedSomething = true } //we should be deleting something in each iteration, otherwise we will get stuck in an infinite loop if !deletedSomething { undeletedDigests := make([]string, 0, len(shallDeleteManifest)) for digest := range shallDeleteManifest { undeletedDigests = append(undeletedDigests, digest) } return fmt.Errorf("cannot remove deleted manifests %v in repo %s because they are still being referenced by other manifests (this smells like an inconsistency on the primary account)", undeletedDigests, repo.FullName()) } } return nil }
random_line_split
manifests.go
/****************************************************************************** * * Copyright 2020 SAP SE * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package tasks import ( "context" "database/sql" "fmt" "time" "github.com/opencontainers/go-digest" "github.com/sapcc/go-bits/logg" "github.com/sapcc/keppel/internal/keppel" ) //query that finds the next manifest to be validated var outdatedManifestSearchQuery = keppel.SimplifyWhitespaceInSQL(` SELECT * FROM manifests WHERE validated_at < $1 ORDER BY validated_at ASC -- oldest manifests first LIMIT 1 -- one at a time `) //ValidateNextManifest validates manifests that have not been validated for more //than 6 hours. At most one manifest is validated per call. If no manifest //needs to be validated, sql.ErrNoRows is returned. func (j *Janitor) ValidateNextManifest() (returnErr error) { defer func() { if returnErr == nil { validateManifestSuccessCounter.Inc() } else if returnErr != sql.ErrNoRows { validateManifestFailedCounter.Inc() returnErr = fmt.Errorf("while validating a manifest: %s", returnErr.Error()) } }() //find manifest var manifest keppel.Manifest maxValidatedAt := j.timeNow().Add(-6 * time.Hour) err := j.db.SelectOne(&manifest, outdatedManifestSearchQuery, maxValidatedAt) if err != nil { if err == sql.ErrNoRows { logg.Debug("no manifests to validate - slowing down...") return sql.ErrNoRows } return err } //find corresponding account and repo var repo keppel.Repository err = j.db.SelectOne(&repo, `SELECT * FROM repos WHERE id = $1`, manifest.RepositoryID) if err != nil { return fmt.Errorf("cannot find repo %d for manifest %s: %s", manifest.RepositoryID, manifest.Digest, err.Error()) } account, err := keppel.FindAccount(j.db, repo.AccountName) if err != nil { return fmt.Errorf("cannot find account for manifest %s/%s: %s", repo.FullName(), manifest.Digest, err.Error()) } //perform validation ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() err = retry(ctx, defaultRetryOpts, func() error { return j.processor().ValidateExistingManifest(*account, repo, &manifest, j.timeNow()) }) if err == nil { //update `validated_at` and reset error message _, err := j.db.Exec(` UPDATE manifests SET validated_at = $1, validation_error_message = '' WHERE repo_id = $2 AND digest = $3`, j.timeNow(), repo.ID, manifest.Digest, ) if err != nil { return err } } else { //attempt to log the error message, and also update the `validated_at` //timestamp to ensure that the ValidateNextManifest() loop does not get //stuck on this one _, updateErr := j.db.Exec(` UPDATE manifests SET validated_at = $1, validation_error_message = $2 WHERE repo_id = $3 AND digest = $4`, j.timeNow(), err.Error(), repo.ID, manifest.Digest, ) if updateErr != nil { err = fmt.Errorf("%s (additional error encountered while recording validation error: %s)", err.Error(), updateErr.Error()) } return err } return nil } var syncManifestRepoSelectQuery = keppel.SimplifyWhitespaceInSQL(` SELECT r.* FROM repos r JOIN accounts a ON r.account_name = a.name WHERE (r.next_manifest_sync_at IS NULL OR r.next_manifest_sync_at < $1) -- only consider repos in replica accounts AND a.upstream_peer_hostname != '' -- repos without any syncs first, then sorted by last sync ORDER BY r.next_manifest_sync_at IS NULL DESC, r.next_manifest_sync_at ASC -- only one repo at a time LIMIT 1 `) var syncManifestEnumerateRefsQuery = keppel.SimplifyWhitespaceInSQL(` SELECT parent_digest, child_digest FROM manifest_manifest_refs WHERE repo_id = $1 `) var syncManifestDoneQuery = keppel.SimplifyWhitespaceInSQL(` UPDATE repos SET next_manifest_sync_at = $2 WHERE id = $1 `) //SyncManifestsInNextRepo finds the next repository in a replica account where //manifests have not been synced for more than an hour, and syncs its manifests. //Syncing involves checking with the primary account which manifests have been //deleted there, and replicating the deletions on our side. // //If no repo needs syncing, sql.ErrNoRows is returned. func (j *Janitor) SyncManifestsInNextRepo() (returnErr error) { defer func() { if returnErr == nil { syncManifestsSuccessCounter.Inc() } else if returnErr != sql.ErrNoRows { syncManifestsFailedCounter.Inc() returnErr = fmt.Errorf("while syncing manifests in a replica repo: %s", returnErr.Error()) } }() //find repository to sync var repo keppel.Repository err := j.db.SelectOne(&repo, syncManifestRepoSelectQuery, j.timeNow()) if err != nil { if err == sql.ErrNoRows { logg.Debug("no accounts to sync manifests in - slowing down...") return sql.ErrNoRows } return err } //find corresponding account account, err := keppel.FindAccount(j.db, repo.AccountName) if err != nil { return fmt.Errorf("cannot find account for repo %s: %s", repo.FullName(), err.Error()) } //do not perform manifest sync while account is in maintenance (maintenance mode blocks all kinds of replication) if !account.InMaintenance
_, err = j.db.Exec(syncManifestDoneQuery, repo.ID, j.timeNow().Add(1*time.Hour)) return err } func (j *Janitor) performManifestSync(account keppel.Account, repo keppel.Repository) error { //enumerate manifests in this repo var manifests []keppel.Manifest _, err := j.db.Select(&manifests, `SELECT * FROM manifests WHERE repo_id = $1`, repo.ID) if err != nil { return fmt.Errorf("cannot list manifests in repo %s: %s", repo.FullName(), err.Error()) } //check which manifests need to be deleted shallDeleteManifest := make(map[string]bool) p := j.processor() for _, manifest := range manifests { ref := keppel.ManifestReference{Digest: digest.Digest(manifest.Digest)} exists, err := p.CheckManifestOnPrimary(account, repo, ref) if err != nil { return fmt.Errorf("cannot check existence of manifest %s/%s on primary account: %s", repo.FullName(), manifest.Digest, err.Error()) } if !exists { shallDeleteManifest[manifest.Digest] = true } } //enumerate manifest-manifest refs in this repo parentDigestsOf := make(map[string][]string) err = keppel.ForeachRow(j.db, syncManifestEnumerateRefsQuery, []interface{}{repo.ID}, func(rows *sql.Rows) error { var ( parentDigest string childDigest string ) err = rows.Scan(&parentDigest, &childDigest) if err != nil { return err } parentDigestsOf[childDigest] = append(parentDigestsOf[childDigest], parentDigest) return nil }) if err != nil { return fmt.Errorf("cannot enumerate manifest-manifest refs in repo %s: %s", repo.FullName(), err.Error()) } //delete manifests in correct order (if there is a parent-child relationship, //we always need to delete the parent manifest first, otherwise the database //will complain because of its consistency checks) if len(shallDeleteManifest) > 0 { logg.Info("deleting %d manifests in repo %s that were deleted on corresponding primary account", len(shallDeleteManifest), repo.FullName()) } manifestWasDeleted := make(map[string]bool) for len(shallDeleteManifest) > 0 { deletedSomething := false MANIFEST: for digest := range shallDeleteManifest { for _, parentDigest := range parentDigestsOf[digest] { if !manifestWasDeleted[parentDigest] { //cannot delete this manifest yet because it's still being referenced - retry in next iteration continue MANIFEST } } //no manifests left that reference this one - we can delete it // //The ordering is important: The DELETE statement could fail if some concurrent //process created a manifest reference in the meantime. If that happens, //and we have already deleted the manifest in the backing storage, we've //caused an inconsistency that we cannot recover from. To avoid that //risk, we do it the other way around. In this way, we could have an //inconsistency where the manifest is deleted from the database, but still //present in the backing storage. But this inconsistency is easier to //recover from: SweepStorageInNextAccount will take care of it soon //enough. Also the user will not notice this inconsistency because the DB //is our primary source of truth. _, err := j.db.Delete(&keppel.Manifest{RepositoryID: repo.ID, Digest: digest}) //without transaction: we need this committed right now if err != nil { return fmt.Errorf("cannot remove deleted manifest %s in repo %s from DB: %s", digest, repo.FullName(), err.Error()) } err = j.sd.DeleteManifest(account, repo.Name, digest) if err != nil { return fmt.Errorf("cannot remove deleted manifest %s in repo %s from storage: %s", digest, repo.FullName(), err.Error()) } //remove deletion from work queue (so that we can eventually exit from the outermost loop) delete(shallDeleteManifest, digest) //track deletion (so that we can eventually start deleting manifests referenced by this one) manifestWasDeleted[digest] = true //track that we're making progress deletedSomething = true } //we should be deleting something in each iteration, otherwise we will get stuck in an infinite loop if !deletedSomething { undeletedDigests := make([]string, 0, len(shallDeleteManifest)) for digest := range shallDeleteManifest { undeletedDigests = append(undeletedDigests, digest) } return fmt.Errorf("cannot remove deleted manifests %v in repo %s because they are still being referenced by other manifests (this smells like an inconsistency on the primary account)", undeletedDigests, repo.FullName()) } } return nil }
{ err = j.performManifestSync(*account, repo) if err != nil { return err } }
conditional_block
manifests.go
/****************************************************************************** * * Copyright 2020 SAP SE * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package tasks import ( "context" "database/sql" "fmt" "time" "github.com/opencontainers/go-digest" "github.com/sapcc/go-bits/logg" "github.com/sapcc/keppel/internal/keppel" ) //query that finds the next manifest to be validated var outdatedManifestSearchQuery = keppel.SimplifyWhitespaceInSQL(` SELECT * FROM manifests WHERE validated_at < $1 ORDER BY validated_at ASC -- oldest manifests first LIMIT 1 -- one at a time `) //ValidateNextManifest validates manifests that have not been validated for more //than 6 hours. At most one manifest is validated per call. If no manifest //needs to be validated, sql.ErrNoRows is returned. func (j *Janitor) ValidateNextManifest() (returnErr error) { defer func() { if returnErr == nil { validateManifestSuccessCounter.Inc() } else if returnErr != sql.ErrNoRows { validateManifestFailedCounter.Inc() returnErr = fmt.Errorf("while validating a manifest: %s", returnErr.Error()) } }() //find manifest var manifest keppel.Manifest maxValidatedAt := j.timeNow().Add(-6 * time.Hour) err := j.db.SelectOne(&manifest, outdatedManifestSearchQuery, maxValidatedAt) if err != nil { if err == sql.ErrNoRows { logg.Debug("no manifests to validate - slowing down...") return sql.ErrNoRows } return err } //find corresponding account and repo var repo keppel.Repository err = j.db.SelectOne(&repo, `SELECT * FROM repos WHERE id = $1`, manifest.RepositoryID) if err != nil { return fmt.Errorf("cannot find repo %d for manifest %s: %s", manifest.RepositoryID, manifest.Digest, err.Error()) } account, err := keppel.FindAccount(j.db, repo.AccountName) if err != nil { return fmt.Errorf("cannot find account for manifest %s/%s: %s", repo.FullName(), manifest.Digest, err.Error()) } //perform validation ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() err = retry(ctx, defaultRetryOpts, func() error { return j.processor().ValidateExistingManifest(*account, repo, &manifest, j.timeNow()) }) if err == nil { //update `validated_at` and reset error message _, err := j.db.Exec(` UPDATE manifests SET validated_at = $1, validation_error_message = '' WHERE repo_id = $2 AND digest = $3`, j.timeNow(), repo.ID, manifest.Digest, ) if err != nil { return err } } else { //attempt to log the error message, and also update the `validated_at` //timestamp to ensure that the ValidateNextManifest() loop does not get //stuck on this one _, updateErr := j.db.Exec(` UPDATE manifests SET validated_at = $1, validation_error_message = $2 WHERE repo_id = $3 AND digest = $4`, j.timeNow(), err.Error(), repo.ID, manifest.Digest, ) if updateErr != nil { err = fmt.Errorf("%s (additional error encountered while recording validation error: %s)", err.Error(), updateErr.Error()) } return err } return nil } var syncManifestRepoSelectQuery = keppel.SimplifyWhitespaceInSQL(` SELECT r.* FROM repos r JOIN accounts a ON r.account_name = a.name WHERE (r.next_manifest_sync_at IS NULL OR r.next_manifest_sync_at < $1) -- only consider repos in replica accounts AND a.upstream_peer_hostname != '' -- repos without any syncs first, then sorted by last sync ORDER BY r.next_manifest_sync_at IS NULL DESC, r.next_manifest_sync_at ASC -- only one repo at a time LIMIT 1 `) var syncManifestEnumerateRefsQuery = keppel.SimplifyWhitespaceInSQL(` SELECT parent_digest, child_digest FROM manifest_manifest_refs WHERE repo_id = $1 `) var syncManifestDoneQuery = keppel.SimplifyWhitespaceInSQL(` UPDATE repos SET next_manifest_sync_at = $2 WHERE id = $1 `) //SyncManifestsInNextRepo finds the next repository in a replica account where //manifests have not been synced for more than an hour, and syncs its manifests. //Syncing involves checking with the primary account which manifests have been //deleted there, and replicating the deletions on our side. // //If no repo needs syncing, sql.ErrNoRows is returned. func (j *Janitor)
() (returnErr error) { defer func() { if returnErr == nil { syncManifestsSuccessCounter.Inc() } else if returnErr != sql.ErrNoRows { syncManifestsFailedCounter.Inc() returnErr = fmt.Errorf("while syncing manifests in a replica repo: %s", returnErr.Error()) } }() //find repository to sync var repo keppel.Repository err := j.db.SelectOne(&repo, syncManifestRepoSelectQuery, j.timeNow()) if err != nil { if err == sql.ErrNoRows { logg.Debug("no accounts to sync manifests in - slowing down...") return sql.ErrNoRows } return err } //find corresponding account account, err := keppel.FindAccount(j.db, repo.AccountName) if err != nil { return fmt.Errorf("cannot find account for repo %s: %s", repo.FullName(), err.Error()) } //do not perform manifest sync while account is in maintenance (maintenance mode blocks all kinds of replication) if !account.InMaintenance { err = j.performManifestSync(*account, repo) if err != nil { return err } } _, err = j.db.Exec(syncManifestDoneQuery, repo.ID, j.timeNow().Add(1*time.Hour)) return err } func (j *Janitor) performManifestSync(account keppel.Account, repo keppel.Repository) error { //enumerate manifests in this repo var manifests []keppel.Manifest _, err := j.db.Select(&manifests, `SELECT * FROM manifests WHERE repo_id = $1`, repo.ID) if err != nil { return fmt.Errorf("cannot list manifests in repo %s: %s", repo.FullName(), err.Error()) } //check which manifests need to be deleted shallDeleteManifest := make(map[string]bool) p := j.processor() for _, manifest := range manifests { ref := keppel.ManifestReference{Digest: digest.Digest(manifest.Digest)} exists, err := p.CheckManifestOnPrimary(account, repo, ref) if err != nil { return fmt.Errorf("cannot check existence of manifest %s/%s on primary account: %s", repo.FullName(), manifest.Digest, err.Error()) } if !exists { shallDeleteManifest[manifest.Digest] = true } } //enumerate manifest-manifest refs in this repo parentDigestsOf := make(map[string][]string) err = keppel.ForeachRow(j.db, syncManifestEnumerateRefsQuery, []interface{}{repo.ID}, func(rows *sql.Rows) error { var ( parentDigest string childDigest string ) err = rows.Scan(&parentDigest, &childDigest) if err != nil { return err } parentDigestsOf[childDigest] = append(parentDigestsOf[childDigest], parentDigest) return nil }) if err != nil { return fmt.Errorf("cannot enumerate manifest-manifest refs in repo %s: %s", repo.FullName(), err.Error()) } //delete manifests in correct order (if there is a parent-child relationship, //we always need to delete the parent manifest first, otherwise the database //will complain because of its consistency checks) if len(shallDeleteManifest) > 0 { logg.Info("deleting %d manifests in repo %s that were deleted on corresponding primary account", len(shallDeleteManifest), repo.FullName()) } manifestWasDeleted := make(map[string]bool) for len(shallDeleteManifest) > 0 { deletedSomething := false MANIFEST: for digest := range shallDeleteManifest { for _, parentDigest := range parentDigestsOf[digest] { if !manifestWasDeleted[parentDigest] { //cannot delete this manifest yet because it's still being referenced - retry in next iteration continue MANIFEST } } //no manifests left that reference this one - we can delete it // //The ordering is important: The DELETE statement could fail if some concurrent //process created a manifest reference in the meantime. If that happens, //and we have already deleted the manifest in the backing storage, we've //caused an inconsistency that we cannot recover from. To avoid that //risk, we do it the other way around. In this way, we could have an //inconsistency where the manifest is deleted from the database, but still //present in the backing storage. But this inconsistency is easier to //recover from: SweepStorageInNextAccount will take care of it soon //enough. Also the user will not notice this inconsistency because the DB //is our primary source of truth. _, err := j.db.Delete(&keppel.Manifest{RepositoryID: repo.ID, Digest: digest}) //without transaction: we need this committed right now if err != nil { return fmt.Errorf("cannot remove deleted manifest %s in repo %s from DB: %s", digest, repo.FullName(), err.Error()) } err = j.sd.DeleteManifest(account, repo.Name, digest) if err != nil { return fmt.Errorf("cannot remove deleted manifest %s in repo %s from storage: %s", digest, repo.FullName(), err.Error()) } //remove deletion from work queue (so that we can eventually exit from the outermost loop) delete(shallDeleteManifest, digest) //track deletion (so that we can eventually start deleting manifests referenced by this one) manifestWasDeleted[digest] = true //track that we're making progress deletedSomething = true } //we should be deleting something in each iteration, otherwise we will get stuck in an infinite loop if !deletedSomething { undeletedDigests := make([]string, 0, len(shallDeleteManifest)) for digest := range shallDeleteManifest { undeletedDigests = append(undeletedDigests, digest) } return fmt.Errorf("cannot remove deleted manifests %v in repo %s because they are still being referenced by other manifests (this smells like an inconsistency on the primary account)", undeletedDigests, repo.FullName()) } } return nil }
SyncManifestsInNextRepo
identifier_name
manifests.go
/****************************************************************************** * * Copyright 2020 SAP SE * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package tasks import ( "context" "database/sql" "fmt" "time" "github.com/opencontainers/go-digest" "github.com/sapcc/go-bits/logg" "github.com/sapcc/keppel/internal/keppel" ) //query that finds the next manifest to be validated var outdatedManifestSearchQuery = keppel.SimplifyWhitespaceInSQL(` SELECT * FROM manifests WHERE validated_at < $1 ORDER BY validated_at ASC -- oldest manifests first LIMIT 1 -- one at a time `) //ValidateNextManifest validates manifests that have not been validated for more //than 6 hours. At most one manifest is validated per call. If no manifest //needs to be validated, sql.ErrNoRows is returned. func (j *Janitor) ValidateNextManifest() (returnErr error)
var syncManifestRepoSelectQuery = keppel.SimplifyWhitespaceInSQL(` SELECT r.* FROM repos r JOIN accounts a ON r.account_name = a.name WHERE (r.next_manifest_sync_at IS NULL OR r.next_manifest_sync_at < $1) -- only consider repos in replica accounts AND a.upstream_peer_hostname != '' -- repos without any syncs first, then sorted by last sync ORDER BY r.next_manifest_sync_at IS NULL DESC, r.next_manifest_sync_at ASC -- only one repo at a time LIMIT 1 `) var syncManifestEnumerateRefsQuery = keppel.SimplifyWhitespaceInSQL(` SELECT parent_digest, child_digest FROM manifest_manifest_refs WHERE repo_id = $1 `) var syncManifestDoneQuery = keppel.SimplifyWhitespaceInSQL(` UPDATE repos SET next_manifest_sync_at = $2 WHERE id = $1 `) //SyncManifestsInNextRepo finds the next repository in a replica account where //manifests have not been synced for more than an hour, and syncs its manifests. //Syncing involves checking with the primary account which manifests have been //deleted there, and replicating the deletions on our side. // //If no repo needs syncing, sql.ErrNoRows is returned. func (j *Janitor) SyncManifestsInNextRepo() (returnErr error) { defer func() { if returnErr == nil { syncManifestsSuccessCounter.Inc() } else if returnErr != sql.ErrNoRows { syncManifestsFailedCounter.Inc() returnErr = fmt.Errorf("while syncing manifests in a replica repo: %s", returnErr.Error()) } }() //find repository to sync var repo keppel.Repository err := j.db.SelectOne(&repo, syncManifestRepoSelectQuery, j.timeNow()) if err != nil { if err == sql.ErrNoRows { logg.Debug("no accounts to sync manifests in - slowing down...") return sql.ErrNoRows } return err } //find corresponding account account, err := keppel.FindAccount(j.db, repo.AccountName) if err != nil { return fmt.Errorf("cannot find account for repo %s: %s", repo.FullName(), err.Error()) } //do not perform manifest sync while account is in maintenance (maintenance mode blocks all kinds of replication) if !account.InMaintenance { err = j.performManifestSync(*account, repo) if err != nil { return err } } _, err = j.db.Exec(syncManifestDoneQuery, repo.ID, j.timeNow().Add(1*time.Hour)) return err } func (j *Janitor) performManifestSync(account keppel.Account, repo keppel.Repository) error { //enumerate manifests in this repo var manifests []keppel.Manifest _, err := j.db.Select(&manifests, `SELECT * FROM manifests WHERE repo_id = $1`, repo.ID) if err != nil { return fmt.Errorf("cannot list manifests in repo %s: %s", repo.FullName(), err.Error()) } //check which manifests need to be deleted shallDeleteManifest := make(map[string]bool) p := j.processor() for _, manifest := range manifests { ref := keppel.ManifestReference{Digest: digest.Digest(manifest.Digest)} exists, err := p.CheckManifestOnPrimary(account, repo, ref) if err != nil { return fmt.Errorf("cannot check existence of manifest %s/%s on primary account: %s", repo.FullName(), manifest.Digest, err.Error()) } if !exists { shallDeleteManifest[manifest.Digest] = true } } //enumerate manifest-manifest refs in this repo parentDigestsOf := make(map[string][]string) err = keppel.ForeachRow(j.db, syncManifestEnumerateRefsQuery, []interface{}{repo.ID}, func(rows *sql.Rows) error { var ( parentDigest string childDigest string ) err = rows.Scan(&parentDigest, &childDigest) if err != nil { return err } parentDigestsOf[childDigest] = append(parentDigestsOf[childDigest], parentDigest) return nil }) if err != nil { return fmt.Errorf("cannot enumerate manifest-manifest refs in repo %s: %s", repo.FullName(), err.Error()) } //delete manifests in correct order (if there is a parent-child relationship, //we always need to delete the parent manifest first, otherwise the database //will complain because of its consistency checks) if len(shallDeleteManifest) > 0 { logg.Info("deleting %d manifests in repo %s that were deleted on corresponding primary account", len(shallDeleteManifest), repo.FullName()) } manifestWasDeleted := make(map[string]bool) for len(shallDeleteManifest) > 0 { deletedSomething := false MANIFEST: for digest := range shallDeleteManifest { for _, parentDigest := range parentDigestsOf[digest] { if !manifestWasDeleted[parentDigest] { //cannot delete this manifest yet because it's still being referenced - retry in next iteration continue MANIFEST } } //no manifests left that reference this one - we can delete it // //The ordering is important: The DELETE statement could fail if some concurrent //process created a manifest reference in the meantime. If that happens, //and we have already deleted the manifest in the backing storage, we've //caused an inconsistency that we cannot recover from. To avoid that //risk, we do it the other way around. In this way, we could have an //inconsistency where the manifest is deleted from the database, but still //present in the backing storage. But this inconsistency is easier to //recover from: SweepStorageInNextAccount will take care of it soon //enough. Also the user will not notice this inconsistency because the DB //is our primary source of truth. _, err := j.db.Delete(&keppel.Manifest{RepositoryID: repo.ID, Digest: digest}) //without transaction: we need this committed right now if err != nil { return fmt.Errorf("cannot remove deleted manifest %s in repo %s from DB: %s", digest, repo.FullName(), err.Error()) } err = j.sd.DeleteManifest(account, repo.Name, digest) if err != nil { return fmt.Errorf("cannot remove deleted manifest %s in repo %s from storage: %s", digest, repo.FullName(), err.Error()) } //remove deletion from work queue (so that we can eventually exit from the outermost loop) delete(shallDeleteManifest, digest) //track deletion (so that we can eventually start deleting manifests referenced by this one) manifestWasDeleted[digest] = true //track that we're making progress deletedSomething = true } //we should be deleting something in each iteration, otherwise we will get stuck in an infinite loop if !deletedSomething { undeletedDigests := make([]string, 0, len(shallDeleteManifest)) for digest := range shallDeleteManifest { undeletedDigests = append(undeletedDigests, digest) } return fmt.Errorf("cannot remove deleted manifests %v in repo %s because they are still being referenced by other manifests (this smells like an inconsistency on the primary account)", undeletedDigests, repo.FullName()) } } return nil }
{ defer func() { if returnErr == nil { validateManifestSuccessCounter.Inc() } else if returnErr != sql.ErrNoRows { validateManifestFailedCounter.Inc() returnErr = fmt.Errorf("while validating a manifest: %s", returnErr.Error()) } }() //find manifest var manifest keppel.Manifest maxValidatedAt := j.timeNow().Add(-6 * time.Hour) err := j.db.SelectOne(&manifest, outdatedManifestSearchQuery, maxValidatedAt) if err != nil { if err == sql.ErrNoRows { logg.Debug("no manifests to validate - slowing down...") return sql.ErrNoRows } return err } //find corresponding account and repo var repo keppel.Repository err = j.db.SelectOne(&repo, `SELECT * FROM repos WHERE id = $1`, manifest.RepositoryID) if err != nil { return fmt.Errorf("cannot find repo %d for manifest %s: %s", manifest.RepositoryID, manifest.Digest, err.Error()) } account, err := keppel.FindAccount(j.db, repo.AccountName) if err != nil { return fmt.Errorf("cannot find account for manifest %s/%s: %s", repo.FullName(), manifest.Digest, err.Error()) } //perform validation ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() err = retry(ctx, defaultRetryOpts, func() error { return j.processor().ValidateExistingManifest(*account, repo, &manifest, j.timeNow()) }) if err == nil { //update `validated_at` and reset error message _, err := j.db.Exec(` UPDATE manifests SET validated_at = $1, validation_error_message = '' WHERE repo_id = $2 AND digest = $3`, j.timeNow(), repo.ID, manifest.Digest, ) if err != nil { return err } } else { //attempt to log the error message, and also update the `validated_at` //timestamp to ensure that the ValidateNextManifest() loop does not get //stuck on this one _, updateErr := j.db.Exec(` UPDATE manifests SET validated_at = $1, validation_error_message = $2 WHERE repo_id = $3 AND digest = $4`, j.timeNow(), err.Error(), repo.ID, manifest.Digest, ) if updateErr != nil { err = fmt.Errorf("%s (additional error encountered while recording validation error: %s)", err.Error(), updateErr.Error()) } return err } return nil }
identifier_body
path_through.rs
#![allow(clippy::unnecessary_mut_passed)] #![warn(clippy::unimplemented, clippy::todo)] // This example is another version of `passthrough.rs` that uses the // path strings instead of the file descriptors with `O_PATH` flag // for referencing the underlying file entries. // It has the advantage of being able to use straightforward the // *standard* filesystem APIs, but also the additional path resolution // cost for each operation. // // This example is inteded to be used as a templete for implementing // the path based filesystems such as libfuse's highlevel API. use pico_args::Arguments; use polyfuse::{ io::{Reader, Writer}, op, reply::{Reply, ReplyAttr, ReplyEntry, ReplyOpen, ReplyWrite}, Context, DirEntry, FileAttr, Filesystem, Forget, Operation, }; use slab::Slab; use std::{ collections::hash_map::{Entry, HashMap}, convert::TryInto, io, os::unix::prelude::*, path::{Path, PathBuf}, sync::Arc, }; use tokio::{ fs::{File, OpenOptions, ReadDir}, sync::Mutex, }; #[tokio::main] async fn main() -> anyhow::Result<()> { tracing_subscriber::fmt::init(); let mut args = Arguments::from_env(); let source: PathBuf = args .opt_value_from_str(["-s", "--source"])? .unwrap_or_else(|| std::env::current_dir().unwrap()); anyhow::ensure!(source.is_dir(), "the source path must be a directory"); let mountpoint: PathBuf = args .free_from_str()? .ok_or_else(|| anyhow::anyhow!("missing mountpoint"))?; anyhow::ensure!(mountpoint.is_dir(), "the mountpoint must be a directory"); let fs = PathThrough::new(source)?; polyfuse_tokio::mount(fs, mountpoint, &[]).await?; Ok(()) } type Ino = u64; struct INode { ino: Ino, path: PathBuf, refcount: u64, } struct INodeTable { map: HashMap<Ino, Arc<Mutex<INode>>>, path_to_ino: HashMap<PathBuf, Ino>, next_ino: u64, } impl INodeTable { fn new() -> Self { INodeTable { map: HashMap::new(), path_to_ino: HashMap::new(), next_ino: 1, // the inode number is started with 1 and the first node is root. } } fn vacant_entry(&mut self) -> VacantEntry<'_> { let ino = self.next_ino; VacantEntry { table: self, ino } } fn get(&self, ino: Ino) -> Option<Arc<Mutex<INode>>> { self.map.get(&ino).cloned() } fn get_path(&self, path: &Path) -> Option<Arc<Mutex<INode>>> { let ino = self.path_to_ino.get(path).copied()?; self.get(ino) } } struct VacantEntry<'a> { table: &'a mut INodeTable, ino: Ino, } impl VacantEntry<'_> { fn insert(mut self, inode: INode) { let path = inode.path.clone(); self.table.map.insert(self.ino, Arc::new(Mutex::new(inode))); self.table.path_to_ino.insert(path, self.ino); self.table.next_ino += 1; } } struct DirHandle { read_dir: ReadDir, last_entry: Option<DirEntry>, offset: u64, } struct FileHandle { file: File, } struct PathThrough { source: PathBuf, inodes: Mutex<INodeTable>, dirs: Mutex<Slab<Arc<Mutex<DirHandle>>>>, files: Mutex<Slab<Arc<Mutex<FileHandle>>>>, } impl PathThrough { fn new(source: PathBuf) -> io::Result<Self> { let source = source.canonicalize()?; let mut inodes = INodeTable::new(); inodes.vacant_entry().insert(INode { ino: 1, path: PathBuf::new(), refcount: u64::max_value() / 2, }); Ok(Self { source, inodes: Mutex::new(inodes), dirs: Mutex::default(), files: Mutex::default(), }) } fn make_entry_out(&self, ino: Ino, attr: FileAttr) -> io::Result<ReplyEntry> { let mut reply = ReplyEntry::default(); reply.ino(ino); reply.attr(attr); Ok(reply) } async fn get_attr(&self, path: impl AsRef<Path>) -> io::Result<FileAttr> { let metadata = tokio::fs::symlink_metadata(self.source.join(path)).await?; metadata .try_into() .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) } async fn do_lookup(&self, op: &op::Lookup<'_>) -> io::Result<ReplyEntry> { let mut inodes = self.inodes.lock().await; let parent = inodes.get(op.parent()).ok_or_else(no_entry)?; let parent = parent.lock().await; let path = parent.path.join(op.name()); let metadata = self.get_attr(&path).await?; let ino; match inodes.get_path(&path) { Some(inode) => { let mut inode = inode.lock().await; ino = inode.ino; inode.refcount += 1; } None => { let entry = inodes.vacant_entry(); ino = entry.ino; entry.insert(INode { ino, path, refcount: 1, }) } } self.make_entry_out(ino, metadata) } async fn do_forget(&self, forgets: &[Forget]) { let mut inodes = self.inodes.lock().await; for forget in forgets { if let Entry::Occupied(mut entry) = inodes.map.entry(forget.ino()) { let refcount = { let mut inode = entry.get_mut().lock().await; inode.refcount = inode.refcount.saturating_sub(forget.nlookup()); inode.refcount }; if refcount == 0 { tracing::debug!("remove ino={}", entry.key()); drop(entry.remove()); } } } } async fn do_getattr(&self, op: &op::Getattr<'_>) -> io::Result<ReplyAttr> { let inodes = self.inodes.lock().await; let inode = inodes.get(op.ino()).ok_or_else(no_entry)?; let inode = inode.lock().await; let attr = self.get_attr(&inode.path).await?; Ok(ReplyAttr::new(attr)) } async fn do_setattr(&self, op: &op::Setattr<'_>) -> io::Result<ReplyAttr> { let file = match op.fh() { Some(fh) => { let files = self.files.lock().await; files.get(fh as usize).cloned() } None => None, }; let mut file = match file { Some(ref file) => { let mut file = file.lock().await; file.file.sync_all().await?; Some(file) // keep file lock } None => None, }; let inode = { let inodes = self.inodes.lock().await; inodes.get(op.ino()).ok_or_else(no_entry)? }; let inode = inode.lock().await; let path = Arc::new(self.source.join(&inode.path)); enum FileRef<'a> { Borrowed(&'a mut File), Owned(File), } impl AsMut<File> for FileRef<'_> { fn as_mut(&mut self) -> &mut File { match self { Self::Borrowed(file) => file, Self::Owned(file) => file, } } } let mut file = match file { Some(ref mut file) => FileRef::Borrowed(&mut file.file), None => FileRef::Owned(File::open(&*path).await?), }; // chmod if let Some(mode) = op.mode() { let perm = std::fs::Permissions::from_mode(mode); file.as_mut().set_permissions(perm).await?; } // truncate if let Some(size) = op.size() { file.as_mut().set_len(size).await?; } // chown match (op.uid(), op.gid()) { (None, None) => (), (uid, gid) => { let path = path.clone(); let uid = uid.map(nix::unistd::Uid::from_raw); let gid = gid.map(nix::unistd::Gid::from_raw); tokio::task::spawn_blocking(move || nix::unistd::chown(&*path, uid, gid)) .await? .map_err(nix_to_io_error)?; } } // TODO: utimes let attr = self.get_attr(&inode.path).await?; Ok(ReplyAttr::new(attr)) } async fn do_readlink(&self, op: &op::Readlink<'_>) -> io::Result<PathBuf> { let inodes = self.inodes.lock().await; let inode = inodes.get(op.ino()).ok_or_else(no_entry)?; let inode = inode.lock().await; tokio::fs::read_link(self.source.join(&inode.path)).await } async fn do_opendir(&self, op: &op::Opendir<'_>) -> io::Result<ReplyOpen> { let inodes = self.inodes.lock().await; let inode = inodes.get(op.ino()).ok_or_else(no_entry)?; let inode = inode.lock().await; let dir = DirHandle { read_dir: tokio::fs::read_dir(self.source.join(&inode.path)).await?, last_entry: None, offset: 1, }; let mut dirs = self.dirs.lock().await; let key = dirs.insert(Arc::new(Mutex::new(dir))); Ok(ReplyOpen::new(key as u64)) } async fn do_readdir(&self, op: &op::Readdir<'_>) -> io::Result<impl Reply> { let dirs = self.dirs.lock().await; let dir = dirs .get(op.fh() as usize) .cloned() .ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?; let mut dir = dir.lock().await; let dir = &mut *dir; let mut entries = vec![]; let mut total_len = 0; if let Some(mut entry) = dir.last_entry.take() { if total_len + entry.as_ref().len() > op.size() as usize { return Err(io::Error::from_raw_os_error(libc::ERANGE)); } entry.set_offset(dir.offset); total_len += entry.as_ref().len(); dir.offset += 1; entries.push(entry); } while let Some(entry) = dir.read_dir.next_entry().await? { match entry.file_name() { name if name.as_bytes() == b"." || name.as_bytes() == b".." => continue, _ => (), } let metadata = entry.metadata().await?; let mut entry = DirEntry::new(entry.file_name(), metadata.ino(), 0); if total_len + entry.as_ref().len() <= op.size() as usize { entry.set_offset(dir.offset); total_len += entry.as_ref().len(); dir.offset += 1; entries.push(entry); } else { dir.last_entry.replace(entry); } } Ok(entries) } async fn do_releasedir(&self, op: &op::Releasedir<'_>) -> io::Result<()> { let mut dirs = self.dirs.lock().await; let dir = dirs.remove(op.fh() as usize); drop(dir); Ok(()) } async fn do_open(&self, op: &op::Open<'_>) -> io::Result<ReplyOpen> { let inodes = self.inodes.lock().await; let inode = inodes.get(op.ino()).ok_or_else(no_entry)?; let inode = inode.lock().await; let options = OpenOptions::from({ let mut options = std::fs::OpenOptions::new(); match op.flags() as i32 & libc::O_ACCMODE { libc::O_RDONLY => { options.read(true); } libc::O_WRONLY => { options.write(true); } libc::O_RDWR => { options.read(true).write(true); } _ => (), } options.custom_flags(op.flags() as i32 & !libc::O_NOFOLLOW); options }); let file = FileHandle { file: options.open(self.source.join(&inode.path)).await?, }; let mut files = self.files.lock().await; let key = files.insert(Arc::new(Mutex::new(file))); Ok(ReplyOpen::new(key as u64)) } async fn do_read(&self, op: &op::Read<'_>) -> io::Result<impl Reply> { let files = self.files.lock().await; let file = files .get(op.fh() as usize) .cloned() .ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?; let mut file = file.lock().await; let file = &mut file.file; file.seek(io::SeekFrom::Start(op.offset())).await?; let mut buf = Vec::<u8>::with_capacity(op.size() as usize); use tokio::io::AsyncReadExt; tokio::io::copy(&mut file.take(op.size() as u64), &mut buf).await?; Ok(buf) } async fn do_write<R: ?Sized>( &self, op: &op::Write<'_>, reader: &mut R, ) -> io::Result<ReplyWrite> where R: Reader + Unpin, { let files = self.files.lock().await; let file = files .get(op.fh() as usize) .cloned() .ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?; let mut file = file.lock().await; let file = &mut file.file; file.seek(io::SeekFrom::Start(op.offset())).await?; // At here, the data is transferred via the temporary buffer due to // the incompatibility between the I/O abstraction in `futures` and // `tokio`. // // In order to efficiently transfer the large files, both of zero // copying support in `polyfuse` and resolution of impedance mismatch // between `futures::io` and `tokio::io` are required.
{ use futures::io::AsyncReadExt; reader.read_to_end(&mut buf).await?; } use tokio::io::AsyncReadExt; let mut buf = &buf[..]; let mut buf = (&mut buf).take(op.size() as u64); let written = tokio::io::copy(&mut buf, &mut *file).await?; Ok(ReplyWrite::new(written as u32)) } async fn do_flush(&self, op: &op::Flush<'_>) -> io::Result<()> { let files = self.files.lock().await; let file = files .get(op.fh() as usize) .cloned() .ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?; let file = file.lock().await; file.file.try_clone().await?; Ok(()) } async fn do_fsync(&self, op: &op::Fsync<'_>) -> io::Result<()> { let files = self.files.lock().await; let file = files .get(op.fh() as usize) .cloned() .ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?; let mut file = file.lock().await; let file = &mut file.file; if op.datasync() { file.sync_data().await?; } else { file.sync_all().await?; } Ok(()) } async fn do_release(&self, op: &op::Release<'_>) -> io::Result<()> { let mut files = self.files.lock().await; let file = files.remove(op.fh() as usize); drop(file); Ok(()) } } #[polyfuse::async_trait] impl Filesystem for PathThrough { #[allow(clippy::cognitive_complexity)] async fn call<'a, 'cx, T: ?Sized>( &'a self, cx: &'a mut Context<'cx, T>, op: Operation<'cx>, ) -> io::Result<()> where T: Reader + Writer + Send + Unpin, { macro_rules! try_reply { ($e:expr) => { match ($e).await { Ok(reply) => cx.reply(reply).await, Err(err) => cx.reply_err(err.raw_os_error().unwrap_or(libc::EIO)).await, } }; } match op { Operation::Lookup(op) => try_reply!(self.do_lookup(&op)), Operation::Forget(forgets) => { self.do_forget(forgets.as_ref()).await; Ok(()) } Operation::Getattr(op) => try_reply!(self.do_getattr(&op)), Operation::Setattr(op) => try_reply!(self.do_setattr(&op)), Operation::Readlink(op) => try_reply!(self.do_readlink(&op)), Operation::Opendir(op) => try_reply!(self.do_opendir(&op)), Operation::Readdir(op) => try_reply!(self.do_readdir(&op)), Operation::Releasedir(op) => try_reply!(self.do_releasedir(&op)), Operation::Open(op) => try_reply!(self.do_open(&op)), Operation::Read(op) => try_reply!(self.do_read(&op)), Operation::Write(op) => { let res = self.do_write(&op, &mut cx.reader()).await; try_reply!(async { res }) } Operation::Flush(op) => try_reply!(self.do_flush(&op)), Operation::Fsync(op) => try_reply!(self.do_fsync(&op)), Operation::Release(op) => try_reply!(self.do_release(&op)), _ => Ok(()), } } } #[inline] fn no_entry() -> io::Error { io::Error::from_raw_os_error(libc::ENOENT) } fn nix_to_io_error(err: nix::Error) -> io::Error { let errno = err.as_errno().map_or(libc::EIO, |errno| errno as i32); io::Error::from_raw_os_error(errno) }
let mut buf = Vec::with_capacity(op.size() as usize);
random_line_split
path_through.rs
#![allow(clippy::unnecessary_mut_passed)] #![warn(clippy::unimplemented, clippy::todo)] // This example is another version of `passthrough.rs` that uses the // path strings instead of the file descriptors with `O_PATH` flag // for referencing the underlying file entries. // It has the advantage of being able to use straightforward the // *standard* filesystem APIs, but also the additional path resolution // cost for each operation. // // This example is inteded to be used as a templete for implementing // the path based filesystems such as libfuse's highlevel API. use pico_args::Arguments; use polyfuse::{ io::{Reader, Writer}, op, reply::{Reply, ReplyAttr, ReplyEntry, ReplyOpen, ReplyWrite}, Context, DirEntry, FileAttr, Filesystem, Forget, Operation, }; use slab::Slab; use std::{ collections::hash_map::{Entry, HashMap}, convert::TryInto, io, os::unix::prelude::*, path::{Path, PathBuf}, sync::Arc, }; use tokio::{ fs::{File, OpenOptions, ReadDir}, sync::Mutex, }; #[tokio::main] async fn main() -> anyhow::Result<()> { tracing_subscriber::fmt::init(); let mut args = Arguments::from_env(); let source: PathBuf = args .opt_value_from_str(["-s", "--source"])? .unwrap_or_else(|| std::env::current_dir().unwrap()); anyhow::ensure!(source.is_dir(), "the source path must be a directory"); let mountpoint: PathBuf = args .free_from_str()? .ok_or_else(|| anyhow::anyhow!("missing mountpoint"))?; anyhow::ensure!(mountpoint.is_dir(), "the mountpoint must be a directory"); let fs = PathThrough::new(source)?; polyfuse_tokio::mount(fs, mountpoint, &[]).await?; Ok(()) } type Ino = u64; struct INode { ino: Ino, path: PathBuf, refcount: u64, } struct INodeTable { map: HashMap<Ino, Arc<Mutex<INode>>>, path_to_ino: HashMap<PathBuf, Ino>, next_ino: u64, } impl INodeTable { fn new() -> Self { INodeTable { map: HashMap::new(), path_to_ino: HashMap::new(), next_ino: 1, // the inode number is started with 1 and the first node is root. } } fn vacant_entry(&mut self) -> VacantEntry<'_> { let ino = self.next_ino; VacantEntry { table: self, ino } } fn get(&self, ino: Ino) -> Option<Arc<Mutex<INode>>> { self.map.get(&ino).cloned() } fn
(&self, path: &Path) -> Option<Arc<Mutex<INode>>> { let ino = self.path_to_ino.get(path).copied()?; self.get(ino) } } struct VacantEntry<'a> { table: &'a mut INodeTable, ino: Ino, } impl VacantEntry<'_> { fn insert(mut self, inode: INode) { let path = inode.path.clone(); self.table.map.insert(self.ino, Arc::new(Mutex::new(inode))); self.table.path_to_ino.insert(path, self.ino); self.table.next_ino += 1; } } struct DirHandle { read_dir: ReadDir, last_entry: Option<DirEntry>, offset: u64, } struct FileHandle { file: File, } struct PathThrough { source: PathBuf, inodes: Mutex<INodeTable>, dirs: Mutex<Slab<Arc<Mutex<DirHandle>>>>, files: Mutex<Slab<Arc<Mutex<FileHandle>>>>, } impl PathThrough { fn new(source: PathBuf) -> io::Result<Self> { let source = source.canonicalize()?; let mut inodes = INodeTable::new(); inodes.vacant_entry().insert(INode { ino: 1, path: PathBuf::new(), refcount: u64::max_value() / 2, }); Ok(Self { source, inodes: Mutex::new(inodes), dirs: Mutex::default(), files: Mutex::default(), }) } fn make_entry_out(&self, ino: Ino, attr: FileAttr) -> io::Result<ReplyEntry> { let mut reply = ReplyEntry::default(); reply.ino(ino); reply.attr(attr); Ok(reply) } async fn get_attr(&self, path: impl AsRef<Path>) -> io::Result<FileAttr> { let metadata = tokio::fs::symlink_metadata(self.source.join(path)).await?; metadata .try_into() .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) } async fn do_lookup(&self, op: &op::Lookup<'_>) -> io::Result<ReplyEntry> { let mut inodes = self.inodes.lock().await; let parent = inodes.get(op.parent()).ok_or_else(no_entry)?; let parent = parent.lock().await; let path = parent.path.join(op.name()); let metadata = self.get_attr(&path).await?; let ino; match inodes.get_path(&path) { Some(inode) => { let mut inode = inode.lock().await; ino = inode.ino; inode.refcount += 1; } None => { let entry = inodes.vacant_entry(); ino = entry.ino; entry.insert(INode { ino, path, refcount: 1, }) } } self.make_entry_out(ino, metadata) } async fn do_forget(&self, forgets: &[Forget]) { let mut inodes = self.inodes.lock().await; for forget in forgets { if let Entry::Occupied(mut entry) = inodes.map.entry(forget.ino()) { let refcount = { let mut inode = entry.get_mut().lock().await; inode.refcount = inode.refcount.saturating_sub(forget.nlookup()); inode.refcount }; if refcount == 0 { tracing::debug!("remove ino={}", entry.key()); drop(entry.remove()); } } } } async fn do_getattr(&self, op: &op::Getattr<'_>) -> io::Result<ReplyAttr> { let inodes = self.inodes.lock().await; let inode = inodes.get(op.ino()).ok_or_else(no_entry)?; let inode = inode.lock().await; let attr = self.get_attr(&inode.path).await?; Ok(ReplyAttr::new(attr)) } async fn do_setattr(&self, op: &op::Setattr<'_>) -> io::Result<ReplyAttr> { let file = match op.fh() { Some(fh) => { let files = self.files.lock().await; files.get(fh as usize).cloned() } None => None, }; let mut file = match file { Some(ref file) => { let mut file = file.lock().await; file.file.sync_all().await?; Some(file) // keep file lock } None => None, }; let inode = { let inodes = self.inodes.lock().await; inodes.get(op.ino()).ok_or_else(no_entry)? }; let inode = inode.lock().await; let path = Arc::new(self.source.join(&inode.path)); enum FileRef<'a> { Borrowed(&'a mut File), Owned(File), } impl AsMut<File> for FileRef<'_> { fn as_mut(&mut self) -> &mut File { match self { Self::Borrowed(file) => file, Self::Owned(file) => file, } } } let mut file = match file { Some(ref mut file) => FileRef::Borrowed(&mut file.file), None => FileRef::Owned(File::open(&*path).await?), }; // chmod if let Some(mode) = op.mode() { let perm = std::fs::Permissions::from_mode(mode); file.as_mut().set_permissions(perm).await?; } // truncate if let Some(size) = op.size() { file.as_mut().set_len(size).await?; } // chown match (op.uid(), op.gid()) { (None, None) => (), (uid, gid) => { let path = path.clone(); let uid = uid.map(nix::unistd::Uid::from_raw); let gid = gid.map(nix::unistd::Gid::from_raw); tokio::task::spawn_blocking(move || nix::unistd::chown(&*path, uid, gid)) .await? .map_err(nix_to_io_error)?; } } // TODO: utimes let attr = self.get_attr(&inode.path).await?; Ok(ReplyAttr::new(attr)) } async fn do_readlink(&self, op: &op::Readlink<'_>) -> io::Result<PathBuf> { let inodes = self.inodes.lock().await; let inode = inodes.get(op.ino()).ok_or_else(no_entry)?; let inode = inode.lock().await; tokio::fs::read_link(self.source.join(&inode.path)).await } async fn do_opendir(&self, op: &op::Opendir<'_>) -> io::Result<ReplyOpen> { let inodes = self.inodes.lock().await; let inode = inodes.get(op.ino()).ok_or_else(no_entry)?; let inode = inode.lock().await; let dir = DirHandle { read_dir: tokio::fs::read_dir(self.source.join(&inode.path)).await?, last_entry: None, offset: 1, }; let mut dirs = self.dirs.lock().await; let key = dirs.insert(Arc::new(Mutex::new(dir))); Ok(ReplyOpen::new(key as u64)) } async fn do_readdir(&self, op: &op::Readdir<'_>) -> io::Result<impl Reply> { let dirs = self.dirs.lock().await; let dir = dirs .get(op.fh() as usize) .cloned() .ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?; let mut dir = dir.lock().await; let dir = &mut *dir; let mut entries = vec![]; let mut total_len = 0; if let Some(mut entry) = dir.last_entry.take() { if total_len + entry.as_ref().len() > op.size() as usize { return Err(io::Error::from_raw_os_error(libc::ERANGE)); } entry.set_offset(dir.offset); total_len += entry.as_ref().len(); dir.offset += 1; entries.push(entry); } while let Some(entry) = dir.read_dir.next_entry().await? { match entry.file_name() { name if name.as_bytes() == b"." || name.as_bytes() == b".." => continue, _ => (), } let metadata = entry.metadata().await?; let mut entry = DirEntry::new(entry.file_name(), metadata.ino(), 0); if total_len + entry.as_ref().len() <= op.size() as usize { entry.set_offset(dir.offset); total_len += entry.as_ref().len(); dir.offset += 1; entries.push(entry); } else { dir.last_entry.replace(entry); } } Ok(entries) } async fn do_releasedir(&self, op: &op::Releasedir<'_>) -> io::Result<()> { let mut dirs = self.dirs.lock().await; let dir = dirs.remove(op.fh() as usize); drop(dir); Ok(()) } async fn do_open(&self, op: &op::Open<'_>) -> io::Result<ReplyOpen> { let inodes = self.inodes.lock().await; let inode = inodes.get(op.ino()).ok_or_else(no_entry)?; let inode = inode.lock().await; let options = OpenOptions::from({ let mut options = std::fs::OpenOptions::new(); match op.flags() as i32 & libc::O_ACCMODE { libc::O_RDONLY => { options.read(true); } libc::O_WRONLY => { options.write(true); } libc::O_RDWR => { options.read(true).write(true); } _ => (), } options.custom_flags(op.flags() as i32 & !libc::O_NOFOLLOW); options }); let file = FileHandle { file: options.open(self.source.join(&inode.path)).await?, }; let mut files = self.files.lock().await; let key = files.insert(Arc::new(Mutex::new(file))); Ok(ReplyOpen::new(key as u64)) } async fn do_read(&self, op: &op::Read<'_>) -> io::Result<impl Reply> { let files = self.files.lock().await; let file = files .get(op.fh() as usize) .cloned() .ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?; let mut file = file.lock().await; let file = &mut file.file; file.seek(io::SeekFrom::Start(op.offset())).await?; let mut buf = Vec::<u8>::with_capacity(op.size() as usize); use tokio::io::AsyncReadExt; tokio::io::copy(&mut file.take(op.size() as u64), &mut buf).await?; Ok(buf) } async fn do_write<R: ?Sized>( &self, op: &op::Write<'_>, reader: &mut R, ) -> io::Result<ReplyWrite> where R: Reader + Unpin, { let files = self.files.lock().await; let file = files .get(op.fh() as usize) .cloned() .ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?; let mut file = file.lock().await; let file = &mut file.file; file.seek(io::SeekFrom::Start(op.offset())).await?; // At here, the data is transferred via the temporary buffer due to // the incompatibility between the I/O abstraction in `futures` and // `tokio`. // // In order to efficiently transfer the large files, both of zero // copying support in `polyfuse` and resolution of impedance mismatch // between `futures::io` and `tokio::io` are required. let mut buf = Vec::with_capacity(op.size() as usize); { use futures::io::AsyncReadExt; reader.read_to_end(&mut buf).await?; } use tokio::io::AsyncReadExt; let mut buf = &buf[..]; let mut buf = (&mut buf).take(op.size() as u64); let written = tokio::io::copy(&mut buf, &mut *file).await?; Ok(ReplyWrite::new(written as u32)) } async fn do_flush(&self, op: &op::Flush<'_>) -> io::Result<()> { let files = self.files.lock().await; let file = files .get(op.fh() as usize) .cloned() .ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?; let file = file.lock().await; file.file.try_clone().await?; Ok(()) } async fn do_fsync(&self, op: &op::Fsync<'_>) -> io::Result<()> { let files = self.files.lock().await; let file = files .get(op.fh() as usize) .cloned() .ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?; let mut file = file.lock().await; let file = &mut file.file; if op.datasync() { file.sync_data().await?; } else { file.sync_all().await?; } Ok(()) } async fn do_release(&self, op: &op::Release<'_>) -> io::Result<()> { let mut files = self.files.lock().await; let file = files.remove(op.fh() as usize); drop(file); Ok(()) } } #[polyfuse::async_trait] impl Filesystem for PathThrough { #[allow(clippy::cognitive_complexity)] async fn call<'a, 'cx, T: ?Sized>( &'a self, cx: &'a mut Context<'cx, T>, op: Operation<'cx>, ) -> io::Result<()> where T: Reader + Writer + Send + Unpin, { macro_rules! try_reply { ($e:expr) => { match ($e).await { Ok(reply) => cx.reply(reply).await, Err(err) => cx.reply_err(err.raw_os_error().unwrap_or(libc::EIO)).await, } }; } match op { Operation::Lookup(op) => try_reply!(self.do_lookup(&op)), Operation::Forget(forgets) => { self.do_forget(forgets.as_ref()).await; Ok(()) } Operation::Getattr(op) => try_reply!(self.do_getattr(&op)), Operation::Setattr(op) => try_reply!(self.do_setattr(&op)), Operation::Readlink(op) => try_reply!(self.do_readlink(&op)), Operation::Opendir(op) => try_reply!(self.do_opendir(&op)), Operation::Readdir(op) => try_reply!(self.do_readdir(&op)), Operation::Releasedir(op) => try_reply!(self.do_releasedir(&op)), Operation::Open(op) => try_reply!(self.do_open(&op)), Operation::Read(op) => try_reply!(self.do_read(&op)), Operation::Write(op) => { let res = self.do_write(&op, &mut cx.reader()).await; try_reply!(async { res }) } Operation::Flush(op) => try_reply!(self.do_flush(&op)), Operation::Fsync(op) => try_reply!(self.do_fsync(&op)), Operation::Release(op) => try_reply!(self.do_release(&op)), _ => Ok(()), } } } #[inline] fn no_entry() -> io::Error { io::Error::from_raw_os_error(libc::ENOENT) } fn nix_to_io_error(err: nix::Error) -> io::Error { let errno = err.as_errno().map_or(libc::EIO, |errno| errno as i32); io::Error::from_raw_os_error(errno) }
get_path
identifier_name
vvMakeVjetsShapes.py
#!/usr/bin/env python import ROOT from array import array from CMGTools.VVResonances.plotting.TreePlotter import TreePlotter from CMGTools.VVResonances.plotting.MergedPlotter import MergedPlotter from CMGTools.VVResonances.plotting.StackPlotter import StackPlotter from CMGTools.VVResonances.statistics.Fitter import Fitter from math import log import os, sys, re, optparse,pickle,shutil,json ROOT.gROOT.SetBatch(True) ROOT.gStyle.SetOptStat(0) parser = optparse.OptionParser() parser.add_option("-s","--sample",dest="sample",default='',help="Type of sample") parser.add_option("-c","--cut",dest="cut",help="Cut to apply for shape",default='') parser.add_option("-o","--output",dest="output",help="Output JSON",default='') parser.add_option("-m","--min",dest="mini",type=float,help="min MJJ",default=40) parser.add_option("-M","--max",dest="maxi",type=float,help="max MJJ",default=160) parser.add_option("--store",dest="store",type=str,help="store fitted parameters in this file",default="") parser.add_option("--corrFactorW",dest="corrFactorW",type=float,help="add correction factor xsec",default=0.205066345) parser.add_option("--corrFactorZ",dest="corrFactorZ",type=float,help="add correction factor xsec",default=0.09811023622) parser.add_option("-f","--fix",dest="fixPars",help="Fixed parameters",default="1") parser.add_option("--minMVV","--minMVV",dest="minMVV",type=float,help="mVV variable",default=1) parser.add_option("--maxMVV","--maxMVV",dest="maxMVV",type=float, help="mVV variable",default=1) parser.add_option("--binsMVV",dest="binsMVV",help="use special binning",default="") parser.add_option("-t","--triggerweight",dest="triggerW",action="store_true",help="Use trigger weights",default=False) (options,args) = parser.parse_args() samples={} def getBinning(binsMVV,minx,maxx,bins): l=[] if binsMVV=="": for i in range(0,bins+1): l.append(minx + i* (maxx - minx)/bins) else: s = binsMVV.split(",") for w in s: l.append(int(w)) return l def returnString(func,ftype,varname): if ftype.find("pol")!=-1: st='0' for i in range(0,func.GetNpar()): st=st+"+("+str(func.GetParameter(i))+")"+("*{varname}".format(varname=varname)*i) return st else: return "" def doFit(fitter,histo,histo_nonRes,label,leg): params={} print "fitting "+histo.GetName()+" contribution " exp = ROOT.TF1("gaus" ,"gaus",55,215) histo_nonRes.Fit(exp,"R") gauss = ROOT.TF1("gauss" ,"gaus",74,94) if histo.GetName().find("Z")!=-1: gauss = ROOT.TF1("gauss","gaus",80,100) histo.Fit(gauss,"R") mean = gauss.GetParameter(1) sigma = gauss.GetParameter(2) print "____________________________________" print "mean "+str(mean) print "sigma "+str(sigma) print "set paramters of double CB constant aground the ones from gaussian fit" fitter.w.var("mean").setVal(mean) fitter.w.var("mean").setConstant(1) #fitter.w.var("sigma").setVal(sigma) #fitter.w.var("sigma").setConstant(1) print "_____________________________________" fitter.importBinnedData(histo,['x'],'data') fitter.fit('model','data',[ROOT.RooFit.SumW2Error(1),ROOT.RooFit.Save(1),ROOT.RooFit.Range(55,120)]) #55,140 works well with fitting only the resonant part #ROOT.RooFit.Minos(ROOT.kTRUE) fitter.projection("model","data","x","debugJ"+leg+"_"+label+"_Res.pdf",0,False,"m_{jet}") c= getCanvas(label) histo_nonRes.SetMarkerStyle(1) histo_nonRes.SetMarkerColor(ROOT.kBlack) histo_nonRes.GetXaxis().SetTitle("m_{jet}") histo_nonRes.GetYaxis().SetTitleOffset(1.5) histo_nonRes.GetYaxis().SetTitle("events") histo_nonRes.Draw("p") exp.SetLineColor(ROOT.kRed) exp.Draw("same") text = ROOT.TLatex() text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debugJ"+leg+"_"+label+"_nonRes.pdf") params[label+"_Res_"+leg]={"mean": {"val": fitter.w.var("mean").getVal(), "err": fitter.w.var("mean").getError()}, "sigma": {"val": fitter.w.var("sigma").getVal(), "err": fitter.w.var("sigma").getError()}, "alpha":{ "val": fitter.w.var("alpha").getVal(), "err": fitter.w.var("alpha").getError()},"alpha2":{"val": fitter.w.var("alpha2").getVal(),"err": fitter.w.var("alpha2").getError()},"n":{ "val": fitter.w.var("n").getVal(), "err": fitter.w.var("n").getError()},"n2": {"val": fitter.w.var("n2").getVal(), "err": fitter.w.var("n2").getError()}} params[label+"_nonRes_"+leg]={"mean": {"val":exp.GetParameter(1),"err":exp.GetParError(1)},"sigma": {"val":exp.GetParameter(2),"err":exp.GetParError(2)}} return params def getCanvas(name): c=ROOT.TCanvas(name,name) c.cd() c.SetFillColor(0) c.SetBorderMode(0) c.SetFrameFillStyle(0) c.SetFrameBorderMode(0) c.SetLeftMargin(0.13) c.SetRightMargin(0.08) c.SetTopMargin( 0.1 ) c.SetBottomMargin( 0.12 ) return c label = options.output.split(".root")[0] t = label.split("_") el="" for words in t: if words.find("HP")!=-1 or words.find("LP")!=-1: continue el+=words+"_" label = el samplenames = options.sample.split(",") for filename in os.listdir(args[0]): for samplename in samplenames: if not (filename.find(samplename)!=-1): continue fnameParts=filename.split('.') fname=fnameParts[0] ext=fnameParts[1] if ext.find("root") ==-1: continue name = fname.split('_')[0] samples[name] = fname print 'found',filename sigmas=[] params={} legs=["l1","l2"] plotters=[] names = [] for name in samples.keys(): plotters.append(TreePlotter(args[0]+'/'+samples[name]+'.root','tree')) plotters[-1].setupFromFile(args[0]+'/'+samples[name]+'.pck') plotters[-1].addCorrectionFactor('xsec','tree') plotters[-1].addCorrectionFactor('genWeight','tree') plotters[-1].addCorrectionFactor('puWeight','tree') if options.triggerW: plotters[-1].addCorrectionFactor('triggerWeight','tree') corrFactor = options.corrFactorW if samples[name].find('Z') != -1: corrFactor = options.corrFactorZ if samples[name].find('W') != -1: corrFactor = options.corrFactorW plotters[-1].addCorrectionFactor(corrFactor,'flat') names.append(samples[name]) print 'Fitting Mjet:' histos2D_l2={} histos2D={} histos2D_nonRes={} histos2D_nonRes_l2={} for p in range(0,len(plotters)): key ="Wjets" if str(names[p]).find("ZJets")!=-1: key = "Zjets" if str(names[p]).find("TT")!=-1: key = "TTbar" print "make histo for "+key histos2D_nonRes [key] = plotters[p].drawTH2("jj_l1_softDrop_mass:jj_l2_softDrop_mass",options.cut+"*(jj_l1_mergedVTruth==0)*(jj_l1_softDrop_mass>55&&jj_l1_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_nonRes [key].SetName(key+"_nonResl1") histos2D [key] = plotters[p].drawTH2("jj_l1_softDrop_mass:jj_l2_softDrop_mass",options.cut+"*(jj_l1_mergedVTruth==1)*(jj_l1_softDrop_mass>55&&jj_l1_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D [key].SetName(key+"_Resl1") histos2D_nonRes_l2 [key] = plotters[p].drawTH2("jj_l2_softDrop_mass:jj_l1_softDrop_mass",options.cut+"*(jj_l2_mergedVTruth==0)*(jj_l2_softDrop_mass>55&&jj_l2_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_nonRes_l2 [key].SetName(key+"_nonResl2") histos2D_l2 [key] = plotters[p].drawTH2("jj_l2_softDrop_mass:jj_l1_softDrop_mass",options.cut+"*(jj_l2_mergedVTruth==1)*(jj_l2_softDrop_mass>55&&jj_l2_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_l2 [key].SetName(key+"_Resl2") histos2D[key].Scale(35900.) histos2D_l2[key].Scale(35900.) histos2D_nonRes[key].Scale(35900.) histos2D_nonRes_l2[key].Scale(35900.) ############################ tmpfile = ROOT.TFile("test.root","RECREATE") for key in histos2D.keys(): histos2D_l2[key].Write() histos2D_nonRes[key].Write() histos2D_nonRes_l2[key].Write() histos2D[key].Write() ########################### for leg in legs: histos = {} histos_nonRes = {} scales={} scales_nonRes={} purity = "LPLP" if options.output.find("HPHP")!=-1:purity = "HPHP" if options.output.find("HPLP")!=-1:purity = "HPLP" fitter=Fitter(['x']) fitter.jetResonanceVjets('model','x') if options.fixPars!="1": fixedPars =options.fixPars.split(',') if len(fixedPars) > 0: print " - Fix parameters: ", fixedPars for par in fixedPars: parVal = par.split(':') fitter.w.var(parVal[0]).setVal(float(parVal[1])) fitter.w.var(parVal[0]).setConstant(1) for key in histos2D.keys(): if leg=="l1": histos_nonRes [key] = histos2D_nonRes[key].ProjectionY() histos [key] = histos2D[key].ProjectionY() else: histos_nonRes [key] = histos2D_nonRes_l2[key].ProjectionY() histos [key] = histos2D_l2[key].ProjectionY() histos_nonRes[key].SetName(key+"_nonRes") histos [key].SetName(key) scales [key] = histos[key].Integral() scales_nonRes [key] = histos_nonRes[key].Integral() # combine ttbar and wjets contributions: Wjets = histos["Wjets"] Wjets_nonRes = histos_nonRes["Wjets"] if 'TTbar' in histos.keys(): Wjets.Add(histos["TTbar"]); Wjets_nonRes.Add(histos_nonRes["TTbar"]) keys = ["Wjets"] Wjets_params = doFit(fitter,Wjets,Wjets_nonRes,"Wjets_TTbar",leg) params.update(Wjets_params) params["ratio_Res_nonRes_"+leg]= {'ratio':scales["Wjets"]/scales_nonRes["Wjets"] } if 'Zjets' in histos.keys(): keys.append("Zjets") fitterZ=Fitter(['x']) fitterZ.jetResonanceVjets('model','x') Zjets_params = doFit(fitterZ,histos["Zjets"],histos_nonRes["Zjets"],"Zjets",leg) params.update(Wjets_params) params.update(Zjets_params) params["ratio_Res_nonRes_"+leg]= {'ratio': scales["Wjets"]/scales_nonRes["Wjets"] , 'ratio_Z': scales["Zjets"]/scales_nonRes["Zjets"]} if "Zjets" in histos.keys() and "TTbar" in histos.keys(): params["ratio_Res_nonRes_"+leg]= {'ratio': scales["Wjets"]/scales_nonRes["Wjets"] , 'ratio_Z': scales["Zjets"]/scales_nonRes["Zjets"],'ratio_TT': scales["TTbar"]/scales_nonRes["TTbar"]} fitter.drawVjets("Vjets_mjetRes_"+leg+"_"+purity+".pdf",histos,histos_nonRes,scales,scales_nonRes) del histos,histos_nonRes,fitter,fitterZ graphs={} projections=[[1,3],[4,6],[7,10],[11,15],[16,20],[21,26],[27,35],[36,50],[51,61],[62,75],[76,80]] for key in keys: graphs[key]=ROOT.TGraphErrors() n=0 for p in projections: i1 = histos2D[key].ProjectionY("tmp1",p[0],p[1]).Integral() i2 = histos2D_nonRes_l2[key].ProjectionY("tmp2",p[0],p[1]).Integral() i1_l2 = histos2D_l2[key].ProjectionY("tmp1",p[0],p[1]).Integral() i2_l2 = histos2D_nonRes[key].ProjectionY("tmp2",p[0],p[1]).Integral() graphs[key].SetPoint(n,55+p[0]*2+(p[1]-p[0]),(i1/i2 +i1_l2/i2_l2)/2.) if (key=="Wjets") and ("TTbar" in histos2D.keys()): norm = histos2D["TTbar"].Integral()/histos2D["Wjets"].Integral() tt_i1 = histos2D["TTbar"].ProjectionY("tmp1",p[0],p[1]).Integral()*norm tt_i2 = histos2D_nonRes_l2["TTbar"].ProjectionY("tmp2",p[0],p[1]).Integral() tt_i1_l2 = histos2D_l2["TTbar"].ProjectionY("tmp1",p[0],p[1]) .Integral()*norm tt_i2_l2 = histos2D_nonRes["TTbar"].ProjectionY("tmp2",p[0],p[1]).Integral() graphs[key].SetPoint(n,55+p[0]*2+(p[1]-p[0]),(i1/i2 +i1_l2/i2_l2)/2.+(tt_i1/tt_i2 + tt_i1_l2/tt_i2_l2)/2.) err = ROOT.TMath.Sqrt(pow(ROOT.TMath.Sqrt(i1)/i2 + ROOT.TMath.Sqrt(i2)*i1/(i2*i2),2)+pow(ROOT.TMath.Sqrt(i1_l2)/i2_l2 + ROOT.TMath.Sqrt(i2_l2)*i1_l2/(i2_l2*i2_l2),2)) graphs[key].SetPointError(n,0,err) print "set point errors "+str(err) n+=1 func=ROOT.TF1("pol","pol6",55,215) func2=ROOT.TF1("pol","pol6",55,215) l="ratio" for key in graphs.keys(): if key.find("Z")!=-1: l="ratio_Z" if key.find("T")!=-1: l="ratio_TT" if key.find("W")!=-1: l="ratio" if key.find("W")!=-1: graphs[key].Fit(func) st = returnString(func,"pol","MJ2") params["ratio_Res_nonRes_l1"][l] = st st = returnString(func,"pol","MJ1") params["ratio_Res_nonRes_l2"][l] = st else: graphs[key].Fit(func2) st = returnString(func2,"pol","MJ2") params["ratio_Res_nonRes_l1"][l] = st st = returnString(func2,"pol","MJ1") params["ratio_Res_nonRes_l2"][l] = st graphs[key].SetMarkerColor(ROOT.kBlack) graphs[key].SetMarkerStyle(1) graphs[key].SetMarkerColor(ROOT.kBlue) graphs[key].SetMarkerStyle(2) graphs[key].GetXaxis().SetTitle("m_{jet1}") graphs[key].GetYaxis().SetTitle("res/nonRes") graphs[key].GetFunction("pol").SetLineColor(ROOT.kBlack) graphs[key].GetXaxis().SetRangeUser(55,215) graphs[key].SetMinimum(0) c =getCanvas("c") graphs["Wjets"].Draw("AP") graphs["Wjets"].GetFunction("pol").Draw("same") legend = ROOT.TLegend(0.5607383,0.2063123,0.85,0.3089701) legend.SetLineWidth(2) legend.SetBorderSize(0) legend.SetFillColor(0) legend.SetTextFont(42) legend.SetTextSize(0.04) legend.SetTextAlign(12) legend.AddEntry(graphs["Wjets"],"ratio W+jets + t#bar{t}","lp") legend.AddEntry(graphs["Wjets"].GetFunction("pol"),"fit ","lp") legend.Draw("same") text = ROOT.TLatex() text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debug_corr_l1_l2_Wjets.pdf") if 'Zjets' in graphs.keys(): c = getCanvas("zjets") graphs["Zjets"].Draw("AP") graphs["Zjets"].GetFunction("pol").Draw("same") legend = ROOT.TLegend(0.5607383,0.2063123,0.85,0.3089701) legend.SetLineWidth(2) legend.SetBorderSize(0)
legend.SetFillColor(0) legend.SetTextFont(42) legend.SetTextSize(0.04) legend.SetTextAlign(12) legend.AddEntry(graphs["Zjets"],"ratio Z+jets m_{jet1}","lp") legend.AddEntry(graphs["Zjets"].GetFunction("pol"),"fit m_{jet1}","lp") legend.Draw("same") text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debug_corr_l1_l2_Zjets.pdf") if 'TTbar' in graphs.keys(): c=getCanvas("ttbar") graphs["TTbar"].Draw("AP") graphs["TTbar"].GetFunction("pol").Draw("same") legend = ROOT.TLegend(0.5607383,0.2063123,0.85,0.3089701) legend.SetLineWidth(2) legend.SetBorderSize(0) legend.SetFillColor(0) legend.SetTextFont(42) legend.SetTextSize(0.04) legend.SetTextAlign(12) legend.AddEntry(graphs["TTbar"],"ratio TTbar m_{jet1}","lp") legend.AddEntry(graphs["TTbar"].GetFunction("pol"),"fit m_{jet1}","lp") legend.Draw("same") text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debug_corr_l1_l2_TTbar.pdf") if options.store!="": print "write to file "+options.store f=open(options.store,"w") for par in params: f.write(str(par)+ " = " +str(params[par])+"\n")
random_line_split
vvMakeVjetsShapes.py
#!/usr/bin/env python import ROOT from array import array from CMGTools.VVResonances.plotting.TreePlotter import TreePlotter from CMGTools.VVResonances.plotting.MergedPlotter import MergedPlotter from CMGTools.VVResonances.plotting.StackPlotter import StackPlotter from CMGTools.VVResonances.statistics.Fitter import Fitter from math import log import os, sys, re, optparse,pickle,shutil,json ROOT.gROOT.SetBatch(True) ROOT.gStyle.SetOptStat(0) parser = optparse.OptionParser() parser.add_option("-s","--sample",dest="sample",default='',help="Type of sample") parser.add_option("-c","--cut",dest="cut",help="Cut to apply for shape",default='') parser.add_option("-o","--output",dest="output",help="Output JSON",default='') parser.add_option("-m","--min",dest="mini",type=float,help="min MJJ",default=40) parser.add_option("-M","--max",dest="maxi",type=float,help="max MJJ",default=160) parser.add_option("--store",dest="store",type=str,help="store fitted parameters in this file",default="") parser.add_option("--corrFactorW",dest="corrFactorW",type=float,help="add correction factor xsec",default=0.205066345) parser.add_option("--corrFactorZ",dest="corrFactorZ",type=float,help="add correction factor xsec",default=0.09811023622) parser.add_option("-f","--fix",dest="fixPars",help="Fixed parameters",default="1") parser.add_option("--minMVV","--minMVV",dest="minMVV",type=float,help="mVV variable",default=1) parser.add_option("--maxMVV","--maxMVV",dest="maxMVV",type=float, help="mVV variable",default=1) parser.add_option("--binsMVV",dest="binsMVV",help="use special binning",default="") parser.add_option("-t","--triggerweight",dest="triggerW",action="store_true",help="Use trigger weights",default=False) (options,args) = parser.parse_args() samples={} def getBinning(binsMVV,minx,maxx,bins): l=[] if binsMVV=="": for i in range(0,bins+1): l.append(minx + i* (maxx - minx)/bins) else: s = binsMVV.split(",") for w in s: l.append(int(w)) return l def returnString(func,ftype,varname): if ftype.find("pol")!=-1: st='0' for i in range(0,func.GetNpar()): st=st+"+("+str(func.GetParameter(i))+")"+("*{varname}".format(varname=varname)*i) return st else: return "" def doFit(fitter,histo,histo_nonRes,label,leg): params={} print "fitting "+histo.GetName()+" contribution " exp = ROOT.TF1("gaus" ,"gaus",55,215) histo_nonRes.Fit(exp,"R") gauss = ROOT.TF1("gauss" ,"gaus",74,94) if histo.GetName().find("Z")!=-1: gauss = ROOT.TF1("gauss","gaus",80,100) histo.Fit(gauss,"R") mean = gauss.GetParameter(1) sigma = gauss.GetParameter(2) print "____________________________________" print "mean "+str(mean) print "sigma "+str(sigma) print "set paramters of double CB constant aground the ones from gaussian fit" fitter.w.var("mean").setVal(mean) fitter.w.var("mean").setConstant(1) #fitter.w.var("sigma").setVal(sigma) #fitter.w.var("sigma").setConstant(1) print "_____________________________________" fitter.importBinnedData(histo,['x'],'data') fitter.fit('model','data',[ROOT.RooFit.SumW2Error(1),ROOT.RooFit.Save(1),ROOT.RooFit.Range(55,120)]) #55,140 works well with fitting only the resonant part #ROOT.RooFit.Minos(ROOT.kTRUE) fitter.projection("model","data","x","debugJ"+leg+"_"+label+"_Res.pdf",0,False,"m_{jet}") c= getCanvas(label) histo_nonRes.SetMarkerStyle(1) histo_nonRes.SetMarkerColor(ROOT.kBlack) histo_nonRes.GetXaxis().SetTitle("m_{jet}") histo_nonRes.GetYaxis().SetTitleOffset(1.5) histo_nonRes.GetYaxis().SetTitle("events") histo_nonRes.Draw("p") exp.SetLineColor(ROOT.kRed) exp.Draw("same") text = ROOT.TLatex() text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debugJ"+leg+"_"+label+"_nonRes.pdf") params[label+"_Res_"+leg]={"mean": {"val": fitter.w.var("mean").getVal(), "err": fitter.w.var("mean").getError()}, "sigma": {"val": fitter.w.var("sigma").getVal(), "err": fitter.w.var("sigma").getError()}, "alpha":{ "val": fitter.w.var("alpha").getVal(), "err": fitter.w.var("alpha").getError()},"alpha2":{"val": fitter.w.var("alpha2").getVal(),"err": fitter.w.var("alpha2").getError()},"n":{ "val": fitter.w.var("n").getVal(), "err": fitter.w.var("n").getError()},"n2": {"val": fitter.w.var("n2").getVal(), "err": fitter.w.var("n2").getError()}} params[label+"_nonRes_"+leg]={"mean": {"val":exp.GetParameter(1),"err":exp.GetParError(1)},"sigma": {"val":exp.GetParameter(2),"err":exp.GetParError(2)}} return params def getCanvas(name):
label = options.output.split(".root")[0] t = label.split("_") el="" for words in t: if words.find("HP")!=-1 or words.find("LP")!=-1: continue el+=words+"_" label = el samplenames = options.sample.split(",") for filename in os.listdir(args[0]): for samplename in samplenames: if not (filename.find(samplename)!=-1): continue fnameParts=filename.split('.') fname=fnameParts[0] ext=fnameParts[1] if ext.find("root") ==-1: continue name = fname.split('_')[0] samples[name] = fname print 'found',filename sigmas=[] params={} legs=["l1","l2"] plotters=[] names = [] for name in samples.keys(): plotters.append(TreePlotter(args[0]+'/'+samples[name]+'.root','tree')) plotters[-1].setupFromFile(args[0]+'/'+samples[name]+'.pck') plotters[-1].addCorrectionFactor('xsec','tree') plotters[-1].addCorrectionFactor('genWeight','tree') plotters[-1].addCorrectionFactor('puWeight','tree') if options.triggerW: plotters[-1].addCorrectionFactor('triggerWeight','tree') corrFactor = options.corrFactorW if samples[name].find('Z') != -1: corrFactor = options.corrFactorZ if samples[name].find('W') != -1: corrFactor = options.corrFactorW plotters[-1].addCorrectionFactor(corrFactor,'flat') names.append(samples[name]) print 'Fitting Mjet:' histos2D_l2={} histos2D={} histos2D_nonRes={} histos2D_nonRes_l2={} for p in range(0,len(plotters)): key ="Wjets" if str(names[p]).find("ZJets")!=-1: key = "Zjets" if str(names[p]).find("TT")!=-1: key = "TTbar" print "make histo for "+key histos2D_nonRes [key] = plotters[p].drawTH2("jj_l1_softDrop_mass:jj_l2_softDrop_mass",options.cut+"*(jj_l1_mergedVTruth==0)*(jj_l1_softDrop_mass>55&&jj_l1_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_nonRes [key].SetName(key+"_nonResl1") histos2D [key] = plotters[p].drawTH2("jj_l1_softDrop_mass:jj_l2_softDrop_mass",options.cut+"*(jj_l1_mergedVTruth==1)*(jj_l1_softDrop_mass>55&&jj_l1_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D [key].SetName(key+"_Resl1") histos2D_nonRes_l2 [key] = plotters[p].drawTH2("jj_l2_softDrop_mass:jj_l1_softDrop_mass",options.cut+"*(jj_l2_mergedVTruth==0)*(jj_l2_softDrop_mass>55&&jj_l2_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_nonRes_l2 [key].SetName(key+"_nonResl2") histos2D_l2 [key] = plotters[p].drawTH2("jj_l2_softDrop_mass:jj_l1_softDrop_mass",options.cut+"*(jj_l2_mergedVTruth==1)*(jj_l2_softDrop_mass>55&&jj_l2_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_l2 [key].SetName(key+"_Resl2") histos2D[key].Scale(35900.) histos2D_l2[key].Scale(35900.) histos2D_nonRes[key].Scale(35900.) histos2D_nonRes_l2[key].Scale(35900.) ############################ tmpfile = ROOT.TFile("test.root","RECREATE") for key in histos2D.keys(): histos2D_l2[key].Write() histos2D_nonRes[key].Write() histos2D_nonRes_l2[key].Write() histos2D[key].Write() ########################### for leg in legs: histos = {} histos_nonRes = {} scales={} scales_nonRes={} purity = "LPLP" if options.output.find("HPHP")!=-1:purity = "HPHP" if options.output.find("HPLP")!=-1:purity = "HPLP" fitter=Fitter(['x']) fitter.jetResonanceVjets('model','x') if options.fixPars!="1": fixedPars =options.fixPars.split(',') if len(fixedPars) > 0: print " - Fix parameters: ", fixedPars for par in fixedPars: parVal = par.split(':') fitter.w.var(parVal[0]).setVal(float(parVal[1])) fitter.w.var(parVal[0]).setConstant(1) for key in histos2D.keys(): if leg=="l1": histos_nonRes [key] = histos2D_nonRes[key].ProjectionY() histos [key] = histos2D[key].ProjectionY() else: histos_nonRes [key] = histos2D_nonRes_l2[key].ProjectionY() histos [key] = histos2D_l2[key].ProjectionY() histos_nonRes[key].SetName(key+"_nonRes") histos [key].SetName(key) scales [key] = histos[key].Integral() scales_nonRes [key] = histos_nonRes[key].Integral() # combine ttbar and wjets contributions: Wjets = histos["Wjets"] Wjets_nonRes = histos_nonRes["Wjets"] if 'TTbar' in histos.keys(): Wjets.Add(histos["TTbar"]); Wjets_nonRes.Add(histos_nonRes["TTbar"]) keys = ["Wjets"] Wjets_params = doFit(fitter,Wjets,Wjets_nonRes,"Wjets_TTbar",leg) params.update(Wjets_params) params["ratio_Res_nonRes_"+leg]= {'ratio':scales["Wjets"]/scales_nonRes["Wjets"] } if 'Zjets' in histos.keys(): keys.append("Zjets") fitterZ=Fitter(['x']) fitterZ.jetResonanceVjets('model','x') Zjets_params = doFit(fitterZ,histos["Zjets"],histos_nonRes["Zjets"],"Zjets",leg) params.update(Wjets_params) params.update(Zjets_params) params["ratio_Res_nonRes_"+leg]= {'ratio': scales["Wjets"]/scales_nonRes["Wjets"] , 'ratio_Z': scales["Zjets"]/scales_nonRes["Zjets"]} if "Zjets" in histos.keys() and "TTbar" in histos.keys(): params["ratio_Res_nonRes_"+leg]= {'ratio': scales["Wjets"]/scales_nonRes["Wjets"] , 'ratio_Z': scales["Zjets"]/scales_nonRes["Zjets"],'ratio_TT': scales["TTbar"]/scales_nonRes["TTbar"]} fitter.drawVjets("Vjets_mjetRes_"+leg+"_"+purity+".pdf",histos,histos_nonRes,scales,scales_nonRes) del histos,histos_nonRes,fitter,fitterZ graphs={} projections=[[1,3],[4,6],[7,10],[11,15],[16,20],[21,26],[27,35],[36,50],[51,61],[62,75],[76,80]] for key in keys: graphs[key]=ROOT.TGraphErrors() n=0 for p in projections: i1 = histos2D[key].ProjectionY("tmp1",p[0],p[1]).Integral() i2 = histos2D_nonRes_l2[key].ProjectionY("tmp2",p[0],p[1]).Integral() i1_l2 = histos2D_l2[key].ProjectionY("tmp1",p[0],p[1]).Integral() i2_l2 = histos2D_nonRes[key].ProjectionY("tmp2",p[0],p[1]).Integral() graphs[key].SetPoint(n,55+p[0]*2+(p[1]-p[0]),(i1/i2 +i1_l2/i2_l2)/2.) if (key=="Wjets") and ("TTbar" in histos2D.keys()): norm = histos2D["TTbar"].Integral()/histos2D["Wjets"].Integral() tt_i1 = histos2D["TTbar"].ProjectionY("tmp1",p[0],p[1]).Integral()*norm tt_i2 = histos2D_nonRes_l2["TTbar"].ProjectionY("tmp2",p[0],p[1]).Integral() tt_i1_l2 = histos2D_l2["TTbar"].ProjectionY("tmp1",p[0],p[1]) .Integral()*norm tt_i2_l2 = histos2D_nonRes["TTbar"].ProjectionY("tmp2",p[0],p[1]).Integral() graphs[key].SetPoint(n,55+p[0]*2+(p[1]-p[0]),(i1/i2 +i1_l2/i2_l2)/2.+(tt_i1/tt_i2 + tt_i1_l2/tt_i2_l2)/2.) err = ROOT.TMath.Sqrt(pow(ROOT.TMath.Sqrt(i1)/i2 + ROOT.TMath.Sqrt(i2)*i1/(i2*i2),2)+pow(ROOT.TMath.Sqrt(i1_l2)/i2_l2 + ROOT.TMath.Sqrt(i2_l2)*i1_l2/(i2_l2*i2_l2),2)) graphs[key].SetPointError(n,0,err) print "set point errors "+str(err) n+=1 func=ROOT.TF1("pol","pol6",55,215) func2=ROOT.TF1("pol","pol6",55,215) l="ratio" for key in graphs.keys(): if key.find("Z")!=-1: l="ratio_Z" if key.find("T")!=-1: l="ratio_TT" if key.find("W")!=-1: l="ratio" if key.find("W")!=-1: graphs[key].Fit(func) st = returnString(func,"pol","MJ2") params["ratio_Res_nonRes_l1"][l] = st st = returnString(func,"pol","MJ1") params["ratio_Res_nonRes_l2"][l] = st else: graphs[key].Fit(func2) st = returnString(func2,"pol","MJ2") params["ratio_Res_nonRes_l1"][l] = st st = returnString(func2,"pol","MJ1") params["ratio_Res_nonRes_l2"][l] = st graphs[key].SetMarkerColor(ROOT.kBlack) graphs[key].SetMarkerStyle(1) graphs[key].SetMarkerColor(ROOT.kBlue) graphs[key].SetMarkerStyle(2) graphs[key].GetXaxis().SetTitle("m_{jet1}") graphs[key].GetYaxis().SetTitle("res/nonRes") graphs[key].GetFunction("pol").SetLineColor(ROOT.kBlack) graphs[key].GetXaxis().SetRangeUser(55,215) graphs[key].SetMinimum(0) c =getCanvas("c") graphs["Wjets"].Draw("AP") graphs["Wjets"].GetFunction("pol").Draw("same") legend = ROOT.TLegend(0.5607383,0.2063123,0.85,0.3089701) legend.SetLineWidth(2) legend.SetBorderSize(0) legend.SetFillColor(0) legend.SetTextFont(42) legend.SetTextSize(0.04) legend.SetTextAlign(12) legend.AddEntry(graphs["Wjets"],"ratio W+jets + t#bar{t}","lp") legend.AddEntry(graphs["Wjets"].GetFunction("pol"),"fit ","lp") legend.Draw("same") text = ROOT.TLatex() text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debug_corr_l1_l2_Wjets.pdf") if 'Zjets' in graphs.keys(): c = getCanvas("zjets") graphs["Zjets"].Draw("AP") graphs["Zjets"].GetFunction("pol").Draw("same") legend = ROOT.TLegend(0.5607383,0.2063123,0.85,0.3089701) legend.SetLineWidth(2) legend.SetBorderSize(0) legend.SetFillColor(0) legend.SetTextFont(42) legend.SetTextSize(0.04) legend.SetTextAlign(12) legend.AddEntry(graphs["Zjets"],"ratio Z+jets m_{jet1}","lp") legend.AddEntry(graphs["Zjets"].GetFunction("pol"),"fit m_{jet1}","lp") legend.Draw("same") text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debug_corr_l1_l2_Zjets.pdf") if 'TTbar' in graphs.keys(): c=getCanvas("ttbar") graphs["TTbar"].Draw("AP") graphs["TTbar"].GetFunction("pol").Draw("same") legend = ROOT.TLegend(0.5607383,0.2063123,0.85,0.3089701) legend.SetLineWidth(2) legend.SetBorderSize(0) legend.SetFillColor(0) legend.SetTextFont(42) legend.SetTextSize(0.04) legend.SetTextAlign(12) legend.AddEntry(graphs["TTbar"],"ratio TTbar m_{jet1}","lp") legend.AddEntry(graphs["TTbar"].GetFunction("pol"),"fit m_{jet1}","lp") legend.Draw("same") text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debug_corr_l1_l2_TTbar.pdf") if options.store!="": print "write to file "+options.store f=open(options.store,"w") for par in params: f.write(str(par)+ " = " +str(params[par])+"\n")
c=ROOT.TCanvas(name,name) c.cd() c.SetFillColor(0) c.SetBorderMode(0) c.SetFrameFillStyle(0) c.SetFrameBorderMode(0) c.SetLeftMargin(0.13) c.SetRightMargin(0.08) c.SetTopMargin( 0.1 ) c.SetBottomMargin( 0.12 ) return c
identifier_body
vvMakeVjetsShapes.py
#!/usr/bin/env python import ROOT from array import array from CMGTools.VVResonances.plotting.TreePlotter import TreePlotter from CMGTools.VVResonances.plotting.MergedPlotter import MergedPlotter from CMGTools.VVResonances.plotting.StackPlotter import StackPlotter from CMGTools.VVResonances.statistics.Fitter import Fitter from math import log import os, sys, re, optparse,pickle,shutil,json ROOT.gROOT.SetBatch(True) ROOT.gStyle.SetOptStat(0) parser = optparse.OptionParser() parser.add_option("-s","--sample",dest="sample",default='',help="Type of sample") parser.add_option("-c","--cut",dest="cut",help="Cut to apply for shape",default='') parser.add_option("-o","--output",dest="output",help="Output JSON",default='') parser.add_option("-m","--min",dest="mini",type=float,help="min MJJ",default=40) parser.add_option("-M","--max",dest="maxi",type=float,help="max MJJ",default=160) parser.add_option("--store",dest="store",type=str,help="store fitted parameters in this file",default="") parser.add_option("--corrFactorW",dest="corrFactorW",type=float,help="add correction factor xsec",default=0.205066345) parser.add_option("--corrFactorZ",dest="corrFactorZ",type=float,help="add correction factor xsec",default=0.09811023622) parser.add_option("-f","--fix",dest="fixPars",help="Fixed parameters",default="1") parser.add_option("--minMVV","--minMVV",dest="minMVV",type=float,help="mVV variable",default=1) parser.add_option("--maxMVV","--maxMVV",dest="maxMVV",type=float, help="mVV variable",default=1) parser.add_option("--binsMVV",dest="binsMVV",help="use special binning",default="") parser.add_option("-t","--triggerweight",dest="triggerW",action="store_true",help="Use trigger weights",default=False) (options,args) = parser.parse_args() samples={} def getBinning(binsMVV,minx,maxx,bins): l=[] if binsMVV=="": for i in range(0,bins+1): l.append(minx + i* (maxx - minx)/bins) else: s = binsMVV.split(",") for w in s: l.append(int(w)) return l def
(func,ftype,varname): if ftype.find("pol")!=-1: st='0' for i in range(0,func.GetNpar()): st=st+"+("+str(func.GetParameter(i))+")"+("*{varname}".format(varname=varname)*i) return st else: return "" def doFit(fitter,histo,histo_nonRes,label,leg): params={} print "fitting "+histo.GetName()+" contribution " exp = ROOT.TF1("gaus" ,"gaus",55,215) histo_nonRes.Fit(exp,"R") gauss = ROOT.TF1("gauss" ,"gaus",74,94) if histo.GetName().find("Z")!=-1: gauss = ROOT.TF1("gauss","gaus",80,100) histo.Fit(gauss,"R") mean = gauss.GetParameter(1) sigma = gauss.GetParameter(2) print "____________________________________" print "mean "+str(mean) print "sigma "+str(sigma) print "set paramters of double CB constant aground the ones from gaussian fit" fitter.w.var("mean").setVal(mean) fitter.w.var("mean").setConstant(1) #fitter.w.var("sigma").setVal(sigma) #fitter.w.var("sigma").setConstant(1) print "_____________________________________" fitter.importBinnedData(histo,['x'],'data') fitter.fit('model','data',[ROOT.RooFit.SumW2Error(1),ROOT.RooFit.Save(1),ROOT.RooFit.Range(55,120)]) #55,140 works well with fitting only the resonant part #ROOT.RooFit.Minos(ROOT.kTRUE) fitter.projection("model","data","x","debugJ"+leg+"_"+label+"_Res.pdf",0,False,"m_{jet}") c= getCanvas(label) histo_nonRes.SetMarkerStyle(1) histo_nonRes.SetMarkerColor(ROOT.kBlack) histo_nonRes.GetXaxis().SetTitle("m_{jet}") histo_nonRes.GetYaxis().SetTitleOffset(1.5) histo_nonRes.GetYaxis().SetTitle("events") histo_nonRes.Draw("p") exp.SetLineColor(ROOT.kRed) exp.Draw("same") text = ROOT.TLatex() text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debugJ"+leg+"_"+label+"_nonRes.pdf") params[label+"_Res_"+leg]={"mean": {"val": fitter.w.var("mean").getVal(), "err": fitter.w.var("mean").getError()}, "sigma": {"val": fitter.w.var("sigma").getVal(), "err": fitter.w.var("sigma").getError()}, "alpha":{ "val": fitter.w.var("alpha").getVal(), "err": fitter.w.var("alpha").getError()},"alpha2":{"val": fitter.w.var("alpha2").getVal(),"err": fitter.w.var("alpha2").getError()},"n":{ "val": fitter.w.var("n").getVal(), "err": fitter.w.var("n").getError()},"n2": {"val": fitter.w.var("n2").getVal(), "err": fitter.w.var("n2").getError()}} params[label+"_nonRes_"+leg]={"mean": {"val":exp.GetParameter(1),"err":exp.GetParError(1)},"sigma": {"val":exp.GetParameter(2),"err":exp.GetParError(2)}} return params def getCanvas(name): c=ROOT.TCanvas(name,name) c.cd() c.SetFillColor(0) c.SetBorderMode(0) c.SetFrameFillStyle(0) c.SetFrameBorderMode(0) c.SetLeftMargin(0.13) c.SetRightMargin(0.08) c.SetTopMargin( 0.1 ) c.SetBottomMargin( 0.12 ) return c label = options.output.split(".root")[0] t = label.split("_") el="" for words in t: if words.find("HP")!=-1 or words.find("LP")!=-1: continue el+=words+"_" label = el samplenames = options.sample.split(",") for filename in os.listdir(args[0]): for samplename in samplenames: if not (filename.find(samplename)!=-1): continue fnameParts=filename.split('.') fname=fnameParts[0] ext=fnameParts[1] if ext.find("root") ==-1: continue name = fname.split('_')[0] samples[name] = fname print 'found',filename sigmas=[] params={} legs=["l1","l2"] plotters=[] names = [] for name in samples.keys(): plotters.append(TreePlotter(args[0]+'/'+samples[name]+'.root','tree')) plotters[-1].setupFromFile(args[0]+'/'+samples[name]+'.pck') plotters[-1].addCorrectionFactor('xsec','tree') plotters[-1].addCorrectionFactor('genWeight','tree') plotters[-1].addCorrectionFactor('puWeight','tree') if options.triggerW: plotters[-1].addCorrectionFactor('triggerWeight','tree') corrFactor = options.corrFactorW if samples[name].find('Z') != -1: corrFactor = options.corrFactorZ if samples[name].find('W') != -1: corrFactor = options.corrFactorW plotters[-1].addCorrectionFactor(corrFactor,'flat') names.append(samples[name]) print 'Fitting Mjet:' histos2D_l2={} histos2D={} histos2D_nonRes={} histos2D_nonRes_l2={} for p in range(0,len(plotters)): key ="Wjets" if str(names[p]).find("ZJets")!=-1: key = "Zjets" if str(names[p]).find("TT")!=-1: key = "TTbar" print "make histo for "+key histos2D_nonRes [key] = plotters[p].drawTH2("jj_l1_softDrop_mass:jj_l2_softDrop_mass",options.cut+"*(jj_l1_mergedVTruth==0)*(jj_l1_softDrop_mass>55&&jj_l1_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_nonRes [key].SetName(key+"_nonResl1") histos2D [key] = plotters[p].drawTH2("jj_l1_softDrop_mass:jj_l2_softDrop_mass",options.cut+"*(jj_l1_mergedVTruth==1)*(jj_l1_softDrop_mass>55&&jj_l1_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D [key].SetName(key+"_Resl1") histos2D_nonRes_l2 [key] = plotters[p].drawTH2("jj_l2_softDrop_mass:jj_l1_softDrop_mass",options.cut+"*(jj_l2_mergedVTruth==0)*(jj_l2_softDrop_mass>55&&jj_l2_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_nonRes_l2 [key].SetName(key+"_nonResl2") histos2D_l2 [key] = plotters[p].drawTH2("jj_l2_softDrop_mass:jj_l1_softDrop_mass",options.cut+"*(jj_l2_mergedVTruth==1)*(jj_l2_softDrop_mass>55&&jj_l2_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_l2 [key].SetName(key+"_Resl2") histos2D[key].Scale(35900.) histos2D_l2[key].Scale(35900.) histos2D_nonRes[key].Scale(35900.) histos2D_nonRes_l2[key].Scale(35900.) ############################ tmpfile = ROOT.TFile("test.root","RECREATE") for key in histos2D.keys(): histos2D_l2[key].Write() histos2D_nonRes[key].Write() histos2D_nonRes_l2[key].Write() histos2D[key].Write() ########################### for leg in legs: histos = {} histos_nonRes = {} scales={} scales_nonRes={} purity = "LPLP" if options.output.find("HPHP")!=-1:purity = "HPHP" if options.output.find("HPLP")!=-1:purity = "HPLP" fitter=Fitter(['x']) fitter.jetResonanceVjets('model','x') if options.fixPars!="1": fixedPars =options.fixPars.split(',') if len(fixedPars) > 0: print " - Fix parameters: ", fixedPars for par in fixedPars: parVal = par.split(':') fitter.w.var(parVal[0]).setVal(float(parVal[1])) fitter.w.var(parVal[0]).setConstant(1) for key in histos2D.keys(): if leg=="l1": histos_nonRes [key] = histos2D_nonRes[key].ProjectionY() histos [key] = histos2D[key].ProjectionY() else: histos_nonRes [key] = histos2D_nonRes_l2[key].ProjectionY() histos [key] = histos2D_l2[key].ProjectionY() histos_nonRes[key].SetName(key+"_nonRes") histos [key].SetName(key) scales [key] = histos[key].Integral() scales_nonRes [key] = histos_nonRes[key].Integral() # combine ttbar and wjets contributions: Wjets = histos["Wjets"] Wjets_nonRes = histos_nonRes["Wjets"] if 'TTbar' in histos.keys(): Wjets.Add(histos["TTbar"]); Wjets_nonRes.Add(histos_nonRes["TTbar"]) keys = ["Wjets"] Wjets_params = doFit(fitter,Wjets,Wjets_nonRes,"Wjets_TTbar",leg) params.update(Wjets_params) params["ratio_Res_nonRes_"+leg]= {'ratio':scales["Wjets"]/scales_nonRes["Wjets"] } if 'Zjets' in histos.keys(): keys.append("Zjets") fitterZ=Fitter(['x']) fitterZ.jetResonanceVjets('model','x') Zjets_params = doFit(fitterZ,histos["Zjets"],histos_nonRes["Zjets"],"Zjets",leg) params.update(Wjets_params) params.update(Zjets_params) params["ratio_Res_nonRes_"+leg]= {'ratio': scales["Wjets"]/scales_nonRes["Wjets"] , 'ratio_Z': scales["Zjets"]/scales_nonRes["Zjets"]} if "Zjets" in histos.keys() and "TTbar" in histos.keys(): params["ratio_Res_nonRes_"+leg]= {'ratio': scales["Wjets"]/scales_nonRes["Wjets"] , 'ratio_Z': scales["Zjets"]/scales_nonRes["Zjets"],'ratio_TT': scales["TTbar"]/scales_nonRes["TTbar"]} fitter.drawVjets("Vjets_mjetRes_"+leg+"_"+purity+".pdf",histos,histos_nonRes,scales,scales_nonRes) del histos,histos_nonRes,fitter,fitterZ graphs={} projections=[[1,3],[4,6],[7,10],[11,15],[16,20],[21,26],[27,35],[36,50],[51,61],[62,75],[76,80]] for key in keys: graphs[key]=ROOT.TGraphErrors() n=0 for p in projections: i1 = histos2D[key].ProjectionY("tmp1",p[0],p[1]).Integral() i2 = histos2D_nonRes_l2[key].ProjectionY("tmp2",p[0],p[1]).Integral() i1_l2 = histos2D_l2[key].ProjectionY("tmp1",p[0],p[1]).Integral() i2_l2 = histos2D_nonRes[key].ProjectionY("tmp2",p[0],p[1]).Integral() graphs[key].SetPoint(n,55+p[0]*2+(p[1]-p[0]),(i1/i2 +i1_l2/i2_l2)/2.) if (key=="Wjets") and ("TTbar" in histos2D.keys()): norm = histos2D["TTbar"].Integral()/histos2D["Wjets"].Integral() tt_i1 = histos2D["TTbar"].ProjectionY("tmp1",p[0],p[1]).Integral()*norm tt_i2 = histos2D_nonRes_l2["TTbar"].ProjectionY("tmp2",p[0],p[1]).Integral() tt_i1_l2 = histos2D_l2["TTbar"].ProjectionY("tmp1",p[0],p[1]) .Integral()*norm tt_i2_l2 = histos2D_nonRes["TTbar"].ProjectionY("tmp2",p[0],p[1]).Integral() graphs[key].SetPoint(n,55+p[0]*2+(p[1]-p[0]),(i1/i2 +i1_l2/i2_l2)/2.+(tt_i1/tt_i2 + tt_i1_l2/tt_i2_l2)/2.) err = ROOT.TMath.Sqrt(pow(ROOT.TMath.Sqrt(i1)/i2 + ROOT.TMath.Sqrt(i2)*i1/(i2*i2),2)+pow(ROOT.TMath.Sqrt(i1_l2)/i2_l2 + ROOT.TMath.Sqrt(i2_l2)*i1_l2/(i2_l2*i2_l2),2)) graphs[key].SetPointError(n,0,err) print "set point errors "+str(err) n+=1 func=ROOT.TF1("pol","pol6",55,215) func2=ROOT.TF1("pol","pol6",55,215) l="ratio" for key in graphs.keys(): if key.find("Z")!=-1: l="ratio_Z" if key.find("T")!=-1: l="ratio_TT" if key.find("W")!=-1: l="ratio" if key.find("W")!=-1: graphs[key].Fit(func) st = returnString(func,"pol","MJ2") params["ratio_Res_nonRes_l1"][l] = st st = returnString(func,"pol","MJ1") params["ratio_Res_nonRes_l2"][l] = st else: graphs[key].Fit(func2) st = returnString(func2,"pol","MJ2") params["ratio_Res_nonRes_l1"][l] = st st = returnString(func2,"pol","MJ1") params["ratio_Res_nonRes_l2"][l] = st graphs[key].SetMarkerColor(ROOT.kBlack) graphs[key].SetMarkerStyle(1) graphs[key].SetMarkerColor(ROOT.kBlue) graphs[key].SetMarkerStyle(2) graphs[key].GetXaxis().SetTitle("m_{jet1}") graphs[key].GetYaxis().SetTitle("res/nonRes") graphs[key].GetFunction("pol").SetLineColor(ROOT.kBlack) graphs[key].GetXaxis().SetRangeUser(55,215) graphs[key].SetMinimum(0) c =getCanvas("c") graphs["Wjets"].Draw("AP") graphs["Wjets"].GetFunction("pol").Draw("same") legend = ROOT.TLegend(0.5607383,0.2063123,0.85,0.3089701) legend.SetLineWidth(2) legend.SetBorderSize(0) legend.SetFillColor(0) legend.SetTextFont(42) legend.SetTextSize(0.04) legend.SetTextAlign(12) legend.AddEntry(graphs["Wjets"],"ratio W+jets + t#bar{t}","lp") legend.AddEntry(graphs["Wjets"].GetFunction("pol"),"fit ","lp") legend.Draw("same") text = ROOT.TLatex() text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debug_corr_l1_l2_Wjets.pdf") if 'Zjets' in graphs.keys(): c = getCanvas("zjets") graphs["Zjets"].Draw("AP") graphs["Zjets"].GetFunction("pol").Draw("same") legend = ROOT.TLegend(0.5607383,0.2063123,0.85,0.3089701) legend.SetLineWidth(2) legend.SetBorderSize(0) legend.SetFillColor(0) legend.SetTextFont(42) legend.SetTextSize(0.04) legend.SetTextAlign(12) legend.AddEntry(graphs["Zjets"],"ratio Z+jets m_{jet1}","lp") legend.AddEntry(graphs["Zjets"].GetFunction("pol"),"fit m_{jet1}","lp") legend.Draw("same") text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debug_corr_l1_l2_Zjets.pdf") if 'TTbar' in graphs.keys(): c=getCanvas("ttbar") graphs["TTbar"].Draw("AP") graphs["TTbar"].GetFunction("pol").Draw("same") legend = ROOT.TLegend(0.5607383,0.2063123,0.85,0.3089701) legend.SetLineWidth(2) legend.SetBorderSize(0) legend.SetFillColor(0) legend.SetTextFont(42) legend.SetTextSize(0.04) legend.SetTextAlign(12) legend.AddEntry(graphs["TTbar"],"ratio TTbar m_{jet1}","lp") legend.AddEntry(graphs["TTbar"].GetFunction("pol"),"fit m_{jet1}","lp") legend.Draw("same") text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debug_corr_l1_l2_TTbar.pdf") if options.store!="": print "write to file "+options.store f=open(options.store,"w") for par in params: f.write(str(par)+ " = " +str(params[par])+"\n")
returnString
identifier_name
vvMakeVjetsShapes.py
#!/usr/bin/env python import ROOT from array import array from CMGTools.VVResonances.plotting.TreePlotter import TreePlotter from CMGTools.VVResonances.plotting.MergedPlotter import MergedPlotter from CMGTools.VVResonances.plotting.StackPlotter import StackPlotter from CMGTools.VVResonances.statistics.Fitter import Fitter from math import log import os, sys, re, optparse,pickle,shutil,json ROOT.gROOT.SetBatch(True) ROOT.gStyle.SetOptStat(0) parser = optparse.OptionParser() parser.add_option("-s","--sample",dest="sample",default='',help="Type of sample") parser.add_option("-c","--cut",dest="cut",help="Cut to apply for shape",default='') parser.add_option("-o","--output",dest="output",help="Output JSON",default='') parser.add_option("-m","--min",dest="mini",type=float,help="min MJJ",default=40) parser.add_option("-M","--max",dest="maxi",type=float,help="max MJJ",default=160) parser.add_option("--store",dest="store",type=str,help="store fitted parameters in this file",default="") parser.add_option("--corrFactorW",dest="corrFactorW",type=float,help="add correction factor xsec",default=0.205066345) parser.add_option("--corrFactorZ",dest="corrFactorZ",type=float,help="add correction factor xsec",default=0.09811023622) parser.add_option("-f","--fix",dest="fixPars",help="Fixed parameters",default="1") parser.add_option("--minMVV","--minMVV",dest="minMVV",type=float,help="mVV variable",default=1) parser.add_option("--maxMVV","--maxMVV",dest="maxMVV",type=float, help="mVV variable",default=1) parser.add_option("--binsMVV",dest="binsMVV",help="use special binning",default="") parser.add_option("-t","--triggerweight",dest="triggerW",action="store_true",help="Use trigger weights",default=False) (options,args) = parser.parse_args() samples={} def getBinning(binsMVV,minx,maxx,bins): l=[] if binsMVV=="": for i in range(0,bins+1): l.append(minx + i* (maxx - minx)/bins) else: s = binsMVV.split(",") for w in s: l.append(int(w)) return l def returnString(func,ftype,varname): if ftype.find("pol")!=-1: st='0' for i in range(0,func.GetNpar()): st=st+"+("+str(func.GetParameter(i))+")"+("*{varname}".format(varname=varname)*i) return st else: return "" def doFit(fitter,histo,histo_nonRes,label,leg): params={} print "fitting "+histo.GetName()+" contribution " exp = ROOT.TF1("gaus" ,"gaus",55,215) histo_nonRes.Fit(exp,"R") gauss = ROOT.TF1("gauss" ,"gaus",74,94) if histo.GetName().find("Z")!=-1: gauss = ROOT.TF1("gauss","gaus",80,100) histo.Fit(gauss,"R") mean = gauss.GetParameter(1) sigma = gauss.GetParameter(2) print "____________________________________" print "mean "+str(mean) print "sigma "+str(sigma) print "set paramters of double CB constant aground the ones from gaussian fit" fitter.w.var("mean").setVal(mean) fitter.w.var("mean").setConstant(1) #fitter.w.var("sigma").setVal(sigma) #fitter.w.var("sigma").setConstant(1) print "_____________________________________" fitter.importBinnedData(histo,['x'],'data') fitter.fit('model','data',[ROOT.RooFit.SumW2Error(1),ROOT.RooFit.Save(1),ROOT.RooFit.Range(55,120)]) #55,140 works well with fitting only the resonant part #ROOT.RooFit.Minos(ROOT.kTRUE) fitter.projection("model","data","x","debugJ"+leg+"_"+label+"_Res.pdf",0,False,"m_{jet}") c= getCanvas(label) histo_nonRes.SetMarkerStyle(1) histo_nonRes.SetMarkerColor(ROOT.kBlack) histo_nonRes.GetXaxis().SetTitle("m_{jet}") histo_nonRes.GetYaxis().SetTitleOffset(1.5) histo_nonRes.GetYaxis().SetTitle("events") histo_nonRes.Draw("p") exp.SetLineColor(ROOT.kRed) exp.Draw("same") text = ROOT.TLatex() text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debugJ"+leg+"_"+label+"_nonRes.pdf") params[label+"_Res_"+leg]={"mean": {"val": fitter.w.var("mean").getVal(), "err": fitter.w.var("mean").getError()}, "sigma": {"val": fitter.w.var("sigma").getVal(), "err": fitter.w.var("sigma").getError()}, "alpha":{ "val": fitter.w.var("alpha").getVal(), "err": fitter.w.var("alpha").getError()},"alpha2":{"val": fitter.w.var("alpha2").getVal(),"err": fitter.w.var("alpha2").getError()},"n":{ "val": fitter.w.var("n").getVal(), "err": fitter.w.var("n").getError()},"n2": {"val": fitter.w.var("n2").getVal(), "err": fitter.w.var("n2").getError()}} params[label+"_nonRes_"+leg]={"mean": {"val":exp.GetParameter(1),"err":exp.GetParError(1)},"sigma": {"val":exp.GetParameter(2),"err":exp.GetParError(2)}} return params def getCanvas(name): c=ROOT.TCanvas(name,name) c.cd() c.SetFillColor(0) c.SetBorderMode(0) c.SetFrameFillStyle(0) c.SetFrameBorderMode(0) c.SetLeftMargin(0.13) c.SetRightMargin(0.08) c.SetTopMargin( 0.1 ) c.SetBottomMargin( 0.12 ) return c label = options.output.split(".root")[0] t = label.split("_") el="" for words in t: if words.find("HP")!=-1 or words.find("LP")!=-1: continue el+=words+"_" label = el samplenames = options.sample.split(",") for filename in os.listdir(args[0]): for samplename in samplenames: if not (filename.find(samplename)!=-1): continue fnameParts=filename.split('.') fname=fnameParts[0] ext=fnameParts[1] if ext.find("root") ==-1: continue name = fname.split('_')[0] samples[name] = fname print 'found',filename sigmas=[] params={} legs=["l1","l2"] plotters=[] names = [] for name in samples.keys(): plotters.append(TreePlotter(args[0]+'/'+samples[name]+'.root','tree')) plotters[-1].setupFromFile(args[0]+'/'+samples[name]+'.pck') plotters[-1].addCorrectionFactor('xsec','tree') plotters[-1].addCorrectionFactor('genWeight','tree') plotters[-1].addCorrectionFactor('puWeight','tree') if options.triggerW: plotters[-1].addCorrectionFactor('triggerWeight','tree') corrFactor = options.corrFactorW if samples[name].find('Z') != -1: corrFactor = options.corrFactorZ if samples[name].find('W') != -1: corrFactor = options.corrFactorW plotters[-1].addCorrectionFactor(corrFactor,'flat') names.append(samples[name]) print 'Fitting Mjet:' histos2D_l2={} histos2D={} histos2D_nonRes={} histos2D_nonRes_l2={} for p in range(0,len(plotters)): key ="Wjets" if str(names[p]).find("ZJets")!=-1: key = "Zjets" if str(names[p]).find("TT")!=-1: key = "TTbar" print "make histo for "+key histos2D_nonRes [key] = plotters[p].drawTH2("jj_l1_softDrop_mass:jj_l2_softDrop_mass",options.cut+"*(jj_l1_mergedVTruth==0)*(jj_l1_softDrop_mass>55&&jj_l1_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_nonRes [key].SetName(key+"_nonResl1") histos2D [key] = plotters[p].drawTH2("jj_l1_softDrop_mass:jj_l2_softDrop_mass",options.cut+"*(jj_l1_mergedVTruth==1)*(jj_l1_softDrop_mass>55&&jj_l1_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D [key].SetName(key+"_Resl1") histos2D_nonRes_l2 [key] = plotters[p].drawTH2("jj_l2_softDrop_mass:jj_l1_softDrop_mass",options.cut+"*(jj_l2_mergedVTruth==0)*(jj_l2_softDrop_mass>55&&jj_l2_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_nonRes_l2 [key].SetName(key+"_nonResl2") histos2D_l2 [key] = plotters[p].drawTH2("jj_l2_softDrop_mass:jj_l1_softDrop_mass",options.cut+"*(jj_l2_mergedVTruth==1)*(jj_l2_softDrop_mass>55&&jj_l2_softDrop_mass<215)","1",80,55,215,80,55,215) histos2D_l2 [key].SetName(key+"_Resl2") histos2D[key].Scale(35900.) histos2D_l2[key].Scale(35900.) histos2D_nonRes[key].Scale(35900.) histos2D_nonRes_l2[key].Scale(35900.) ############################ tmpfile = ROOT.TFile("test.root","RECREATE") for key in histos2D.keys(): histos2D_l2[key].Write() histos2D_nonRes[key].Write() histos2D_nonRes_l2[key].Write() histos2D[key].Write() ########################### for leg in legs: histos = {} histos_nonRes = {} scales={} scales_nonRes={} purity = "LPLP" if options.output.find("HPHP")!=-1:purity = "HPHP" if options.output.find("HPLP")!=-1:purity = "HPLP" fitter=Fitter(['x']) fitter.jetResonanceVjets('model','x') if options.fixPars!="1": fixedPars =options.fixPars.split(',') if len(fixedPars) > 0: print " - Fix parameters: ", fixedPars for par in fixedPars: parVal = par.split(':') fitter.w.var(parVal[0]).setVal(float(parVal[1])) fitter.w.var(parVal[0]).setConstant(1) for key in histos2D.keys(): if leg=="l1": histos_nonRes [key] = histos2D_nonRes[key].ProjectionY() histos [key] = histos2D[key].ProjectionY() else: histos_nonRes [key] = histos2D_nonRes_l2[key].ProjectionY() histos [key] = histos2D_l2[key].ProjectionY() histos_nonRes[key].SetName(key+"_nonRes") histos [key].SetName(key) scales [key] = histos[key].Integral() scales_nonRes [key] = histos_nonRes[key].Integral() # combine ttbar and wjets contributions: Wjets = histos["Wjets"] Wjets_nonRes = histos_nonRes["Wjets"] if 'TTbar' in histos.keys(): Wjets.Add(histos["TTbar"]); Wjets_nonRes.Add(histos_nonRes["TTbar"]) keys = ["Wjets"] Wjets_params = doFit(fitter,Wjets,Wjets_nonRes,"Wjets_TTbar",leg) params.update(Wjets_params) params["ratio_Res_nonRes_"+leg]= {'ratio':scales["Wjets"]/scales_nonRes["Wjets"] } if 'Zjets' in histos.keys():
if "Zjets" in histos.keys() and "TTbar" in histos.keys(): params["ratio_Res_nonRes_"+leg]= {'ratio': scales["Wjets"]/scales_nonRes["Wjets"] , 'ratio_Z': scales["Zjets"]/scales_nonRes["Zjets"],'ratio_TT': scales["TTbar"]/scales_nonRes["TTbar"]} fitter.drawVjets("Vjets_mjetRes_"+leg+"_"+purity+".pdf",histos,histos_nonRes,scales,scales_nonRes) del histos,histos_nonRes,fitter,fitterZ graphs={} projections=[[1,3],[4,6],[7,10],[11,15],[16,20],[21,26],[27,35],[36,50],[51,61],[62,75],[76,80]] for key in keys: graphs[key]=ROOT.TGraphErrors() n=0 for p in projections: i1 = histos2D[key].ProjectionY("tmp1",p[0],p[1]).Integral() i2 = histos2D_nonRes_l2[key].ProjectionY("tmp2",p[0],p[1]).Integral() i1_l2 = histos2D_l2[key].ProjectionY("tmp1",p[0],p[1]).Integral() i2_l2 = histos2D_nonRes[key].ProjectionY("tmp2",p[0],p[1]).Integral() graphs[key].SetPoint(n,55+p[0]*2+(p[1]-p[0]),(i1/i2 +i1_l2/i2_l2)/2.) if (key=="Wjets") and ("TTbar" in histos2D.keys()): norm = histos2D["TTbar"].Integral()/histos2D["Wjets"].Integral() tt_i1 = histos2D["TTbar"].ProjectionY("tmp1",p[0],p[1]).Integral()*norm tt_i2 = histos2D_nonRes_l2["TTbar"].ProjectionY("tmp2",p[0],p[1]).Integral() tt_i1_l2 = histos2D_l2["TTbar"].ProjectionY("tmp1",p[0],p[1]) .Integral()*norm tt_i2_l2 = histos2D_nonRes["TTbar"].ProjectionY("tmp2",p[0],p[1]).Integral() graphs[key].SetPoint(n,55+p[0]*2+(p[1]-p[0]),(i1/i2 +i1_l2/i2_l2)/2.+(tt_i1/tt_i2 + tt_i1_l2/tt_i2_l2)/2.) err = ROOT.TMath.Sqrt(pow(ROOT.TMath.Sqrt(i1)/i2 + ROOT.TMath.Sqrt(i2)*i1/(i2*i2),2)+pow(ROOT.TMath.Sqrt(i1_l2)/i2_l2 + ROOT.TMath.Sqrt(i2_l2)*i1_l2/(i2_l2*i2_l2),2)) graphs[key].SetPointError(n,0,err) print "set point errors "+str(err) n+=1 func=ROOT.TF1("pol","pol6",55,215) func2=ROOT.TF1("pol","pol6",55,215) l="ratio" for key in graphs.keys(): if key.find("Z")!=-1: l="ratio_Z" if key.find("T")!=-1: l="ratio_TT" if key.find("W")!=-1: l="ratio" if key.find("W")!=-1: graphs[key].Fit(func) st = returnString(func,"pol","MJ2") params["ratio_Res_nonRes_l1"][l] = st st = returnString(func,"pol","MJ1") params["ratio_Res_nonRes_l2"][l] = st else: graphs[key].Fit(func2) st = returnString(func2,"pol","MJ2") params["ratio_Res_nonRes_l1"][l] = st st = returnString(func2,"pol","MJ1") params["ratio_Res_nonRes_l2"][l] = st graphs[key].SetMarkerColor(ROOT.kBlack) graphs[key].SetMarkerStyle(1) graphs[key].SetMarkerColor(ROOT.kBlue) graphs[key].SetMarkerStyle(2) graphs[key].GetXaxis().SetTitle("m_{jet1}") graphs[key].GetYaxis().SetTitle("res/nonRes") graphs[key].GetFunction("pol").SetLineColor(ROOT.kBlack) graphs[key].GetXaxis().SetRangeUser(55,215) graphs[key].SetMinimum(0) c =getCanvas("c") graphs["Wjets"].Draw("AP") graphs["Wjets"].GetFunction("pol").Draw("same") legend = ROOT.TLegend(0.5607383,0.2063123,0.85,0.3089701) legend.SetLineWidth(2) legend.SetBorderSize(0) legend.SetFillColor(0) legend.SetTextFont(42) legend.SetTextSize(0.04) legend.SetTextAlign(12) legend.AddEntry(graphs["Wjets"],"ratio W+jets + t#bar{t}","lp") legend.AddEntry(graphs["Wjets"].GetFunction("pol"),"fit ","lp") legend.Draw("same") text = ROOT.TLatex() text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debug_corr_l1_l2_Wjets.pdf") if 'Zjets' in graphs.keys(): c = getCanvas("zjets") graphs["Zjets"].Draw("AP") graphs["Zjets"].GetFunction("pol").Draw("same") legend = ROOT.TLegend(0.5607383,0.2063123,0.85,0.3089701) legend.SetLineWidth(2) legend.SetBorderSize(0) legend.SetFillColor(0) legend.SetTextFont(42) legend.SetTextSize(0.04) legend.SetTextAlign(12) legend.AddEntry(graphs["Zjets"],"ratio Z+jets m_{jet1}","lp") legend.AddEntry(graphs["Zjets"].GetFunction("pol"),"fit m_{jet1}","lp") legend.Draw("same") text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debug_corr_l1_l2_Zjets.pdf") if 'TTbar' in graphs.keys(): c=getCanvas("ttbar") graphs["TTbar"].Draw("AP") graphs["TTbar"].GetFunction("pol").Draw("same") legend = ROOT.TLegend(0.5607383,0.2063123,0.85,0.3089701) legend.SetLineWidth(2) legend.SetBorderSize(0) legend.SetFillColor(0) legend.SetTextFont(42) legend.SetTextSize(0.04) legend.SetTextAlign(12) legend.AddEntry(graphs["TTbar"],"ratio TTbar m_{jet1}","lp") legend.AddEntry(graphs["TTbar"].GetFunction("pol"),"fit m_{jet1}","lp") legend.Draw("same") text.DrawLatexNDC(0.13,0.92,"#font[62]{CMS} #font[52]{Simulation}") c.SaveAs("debug_corr_l1_l2_TTbar.pdf") if options.store!="": print "write to file "+options.store f=open(options.store,"w") for par in params: f.write(str(par)+ " = " +str(params[par])+"\n")
keys.append("Zjets") fitterZ=Fitter(['x']) fitterZ.jetResonanceVjets('model','x') Zjets_params = doFit(fitterZ,histos["Zjets"],histos_nonRes["Zjets"],"Zjets",leg) params.update(Wjets_params) params.update(Zjets_params) params["ratio_Res_nonRes_"+leg]= {'ratio': scales["Wjets"]/scales_nonRes["Wjets"] , 'ratio_Z': scales["Zjets"]/scales_nonRes["Zjets"]}
conditional_block
reset.go
// Copyright 2020 Dolthub, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package actions import ( "context" "fmt" "time" "github.com/dolthub/dolt/go/store/datas" "github.com/dolthub/dolt/go/libraries/doltcore/doltdb" "github.com/dolthub/dolt/go/libraries/doltcore/env" "github.com/dolthub/dolt/go/libraries/doltcore/ref" "github.com/dolthub/dolt/go/libraries/doltcore/schema" "github.com/dolthub/dolt/go/libraries/utils/argparser" ) // resetHardTables resolves a new HEAD commit from a refSpec and updates working set roots by // resetting the table contexts for tracked tables. New tables are ignored. Returns new HEAD // Commit and Roots. func
(ctx context.Context, dbData env.DbData, cSpecStr string, roots doltdb.Roots) (*doltdb.Commit, doltdb.Roots, error) { ddb := dbData.Ddb rsr := dbData.Rsr var newHead *doltdb.Commit if cSpecStr != "" { cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return nil, doltdb.Roots{}, err } headRef, err := rsr.CWBHeadRef() if err != nil { return nil, doltdb.Roots{}, err } newHead, err = ddb.Resolve(ctx, cs, headRef) if err != nil { return nil, doltdb.Roots{}, err } roots.Head, err = newHead.GetRootValue(ctx) if err != nil { return nil, doltdb.Roots{}, err } } // mirroring Git behavior, untracked tables are ignored on 'reset --hard', // save the state of these tables and apply them to |newHead|'s root. // // as a special case, if an untracked table has a tag collision with any // tables in |newHead| we silently drop it from the new working set. // these tag collision is typically cause by table renames (bug #751). untracked, err := roots.Working.GetAllSchemas(ctx) if err != nil { return nil, doltdb.Roots{}, err } // untracked tables exist in |working| but not in |staged| staged, err := roots.Staged.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, name := range staged { delete(untracked, name) } newWkRoot := roots.Head ws, err := newWkRoot.GetAllSchemas(ctx) if err != nil { return nil, doltdb.Roots{}, err } tags := mapColumnTags(ws) for name, sch := range untracked { for _, pk := range sch.GetAllCols().GetColumns() { if _, ok := tags[pk.Tag]; ok { // |pk.Tag| collides with a schema in |newWkRoot| delete(untracked, name) } } } for name := range untracked { tbl, _, err := roots.Working.GetTable(ctx, name) if err != nil { return nil, doltdb.Roots{}, err } newWkRoot, err = newWkRoot.PutTable(ctx, name, tbl) if err != nil { return nil, doltdb.Roots{}, fmt.Errorf("failed to write table back to database: %s", err) } } // need to save the state of files that aren't tracked untrackedTables := make(map[string]*doltdb.Table) wTblNames, err := roots.Working.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, tblName := range wTblNames { untrackedTables[tblName], _, err = roots.Working.GetTable(ctx, tblName) if err != nil { return nil, doltdb.Roots{}, err } } headTblNames, err := roots.Staged.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, tblName := range headTblNames { delete(untrackedTables, tblName) } roots.Working = newWkRoot roots.Staged = roots.Head return newHead, roots, nil } // ResetHardTables resets the tables in working, staged, and head based on the given parameters. Returns the new // head commit and resulting roots func ResetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, roots doltdb.Roots) (*doltdb.Commit, doltdb.Roots, error) { return resetHardTables(ctx, dbData, cSpecStr, roots) } // ResetHard resets the working, staged, and head to the ones in the provided roots and head ref. // The reset can be performed on a non-current branch and working set. // Returns an error if the reset fails. func ResetHard( ctx context.Context, dbData env.DbData, doltDb *doltdb.DoltDB, username, email string, cSpecStr string, roots doltdb.Roots, headRef ref.DoltRef, ws *doltdb.WorkingSet, ) error { newHead, roots, err := resetHardTables(ctx, dbData, cSpecStr, roots) if err != nil { return err } currentWs, err := doltDb.ResolveWorkingSet(ctx, ws.Ref()) if err != nil { return err } h, err := currentWs.HashOf() if err != nil { return err } err = doltDb.UpdateWorkingSet(ctx, ws.Ref(), ws.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged).ClearMerge(), h, &datas.WorkingSetMeta{ Name: username, Email: email, Timestamp: uint64(time.Now().Unix()), Description: "reset hard", }, nil) if err != nil { return err } if newHead != nil { err = doltDb.SetHeadToCommit(ctx, headRef, newHead) if err != nil { return err } } return nil } func ResetSoftTables(ctx context.Context, dbData env.DbData, apr *argparser.ArgParseResults, roots doltdb.Roots) (doltdb.Roots, error) { tables, err := getUnionedTables(ctx, apr.Args, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } roots.Staged, err = MoveTablesBetweenRoots(ctx, tables, roots.Head, roots.Staged) if err != nil { return doltdb.Roots{}, err } return roots, nil } // ResetSoft resets the staged value from HEAD for the tables given and returns the updated roots. func ResetSoft(ctx context.Context, dbData env.DbData, tables []string, roots doltdb.Roots) (doltdb.Roots, error) { tables, err := getUnionedTables(ctx, tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } return resetStaged(ctx, roots, tables) } // ResetSoftToRef matches the `git reset --soft <REF>` pattern. It returns a new Roots with the Staged and Head values // set to the commit specified by the spec string. The Working root is not set func ResetSoftToRef(ctx context.Context, dbData env.DbData, cSpecStr string) (doltdb.Roots, error) { cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return doltdb.Roots{}, err } headRef, err := dbData.Rsr.CWBHeadRef() if err != nil { return doltdb.Roots{}, err } newHead, err := dbData.Ddb.Resolve(ctx, cs, headRef) if err != nil { return doltdb.Roots{}, err } foundRoot, err := newHead.GetRootValue(ctx) if err != nil { return doltdb.Roots{}, err } // Update the head to this commit if err = dbData.Ddb.SetHeadToCommit(ctx, headRef, newHead); err != nil { return doltdb.Roots{}, err } return doltdb.Roots{ Head: foundRoot, Staged: foundRoot, }, err } func getUnionedTables(ctx context.Context, tables []string, stagedRoot, headRoot *doltdb.RootValue) ([]string, error) { if len(tables) == 0 || (len(tables) == 1 && tables[0] == ".") { var err error tables, err = doltdb.UnionTableNames(ctx, stagedRoot, headRoot) if err != nil { return nil, err } } return tables, nil } func resetStaged(ctx context.Context, roots doltdb.Roots, tbls []string) (doltdb.Roots, error) { newStaged, err := MoveTablesBetweenRoots(ctx, tbls, roots.Head, roots.Staged) if err != nil { return doltdb.Roots{}, err } roots.Staged = newStaged return roots, nil } // IsValidRef validates whether the input parameter is a valid cString // TODO: this doesn't belong in this package func IsValidRef(ctx context.Context, cSpecStr string, ddb *doltdb.DoltDB, rsr env.RepoStateReader) (bool, error) { // The error return value is only for propagating unhandled errors from rsr.CWBHeadRef() // All other errors merely indicate an invalid ref spec. // TODO: It's much better to enumerate the expected errors, to make sure we don't suppress any unexpected ones. cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return false, nil } headRef, err := rsr.CWBHeadRef() if err == doltdb.ErrOperationNotSupportedInDetachedHead { // This is safe because ddb.Resolve checks if headRef is nil, but only when the value is actually needed. // Basically, this guarentees that resolving "HEAD" or similar will return an error but other resolves will work. headRef = nil } else if err != nil { return false, err } _, err = ddb.Resolve(ctx, cs, headRef) if err != nil { return false, nil } return true, nil } // CleanUntracked deletes untracked tables from the working root. // Evaluates untracked tables as: all working tables - all staged tables. func CleanUntracked(ctx context.Context, roots doltdb.Roots, tables []string, dryrun bool, force bool) (doltdb.Roots, error) { untrackedTables := make(map[string]struct{}) var err error if len(tables) == 0 { tables, err = roots.Working.GetTableNames(ctx) if err != nil { return doltdb.Roots{}, nil } } for i := range tables { name := tables[i] _, _, err = roots.Working.GetTable(ctx, name) if err != nil { return doltdb.Roots{}, err } untrackedTables[name] = struct{}{} } // untracked tables = working tables - staged tables headTblNames, err := roots.Staged.GetTableNames(ctx) if err != nil { return doltdb.Roots{}, err } for _, name := range headTblNames { delete(untrackedTables, name) } newRoot := roots.Working var toDelete []string for t := range untrackedTables { toDelete = append(toDelete, t) } newRoot, err = newRoot.RemoveTables(ctx, force, force, toDelete...) if err != nil { return doltdb.Roots{}, fmt.Errorf("failed to remove tables; %w", err) } if dryrun { return roots, nil } roots.Working = newRoot return roots, nil } // mapColumnTags takes a map from table name to schema.Schema and generates // a map from column tags to table names (see RootValue.GetAllSchemas). func mapColumnTags(tables map[string]schema.Schema) (m map[uint64]string) { m = make(map[uint64]string, len(tables)) for tbl, sch := range tables { for _, tag := range sch.GetAllCols().Tags { m[tag] = tbl } } return }
resetHardTables
identifier_name
reset.go
// Copyright 2020 Dolthub, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package actions import ( "context" "fmt" "time" "github.com/dolthub/dolt/go/store/datas" "github.com/dolthub/dolt/go/libraries/doltcore/doltdb" "github.com/dolthub/dolt/go/libraries/doltcore/env" "github.com/dolthub/dolt/go/libraries/doltcore/ref" "github.com/dolthub/dolt/go/libraries/doltcore/schema" "github.com/dolthub/dolt/go/libraries/utils/argparser" ) // resetHardTables resolves a new HEAD commit from a refSpec and updates working set roots by // resetting the table contexts for tracked tables. New tables are ignored. Returns new HEAD // Commit and Roots. func resetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, roots doltdb.Roots) (*doltdb.Commit, doltdb.Roots, error) { ddb := dbData.Ddb rsr := dbData.Rsr var newHead *doltdb.Commit if cSpecStr != "" { cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return nil, doltdb.Roots{}, err } headRef, err := rsr.CWBHeadRef() if err != nil { return nil, doltdb.Roots{}, err } newHead, err = ddb.Resolve(ctx, cs, headRef) if err != nil { return nil, doltdb.Roots{}, err } roots.Head, err = newHead.GetRootValue(ctx) if err != nil { return nil, doltdb.Roots{}, err } } // mirroring Git behavior, untracked tables are ignored on 'reset --hard', // save the state of these tables and apply them to |newHead|'s root. // // as a special case, if an untracked table has a tag collision with any // tables in |newHead| we silently drop it from the new working set. // these tag collision is typically cause by table renames (bug #751). untracked, err := roots.Working.GetAllSchemas(ctx) if err != nil { return nil, doltdb.Roots{}, err } // untracked tables exist in |working| but not in |staged| staged, err := roots.Staged.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, name := range staged { delete(untracked, name) } newWkRoot := roots.Head ws, err := newWkRoot.GetAllSchemas(ctx) if err != nil { return nil, doltdb.Roots{}, err } tags := mapColumnTags(ws) for name, sch := range untracked { for _, pk := range sch.GetAllCols().GetColumns() { if _, ok := tags[pk.Tag]; ok { // |pk.Tag| collides with a schema in |newWkRoot| delete(untracked, name) } } } for name := range untracked { tbl, _, err := roots.Working.GetTable(ctx, name) if err != nil { return nil, doltdb.Roots{}, err } newWkRoot, err = newWkRoot.PutTable(ctx, name, tbl) if err != nil { return nil, doltdb.Roots{}, fmt.Errorf("failed to write table back to database: %s", err) } } // need to save the state of files that aren't tracked untrackedTables := make(map[string]*doltdb.Table) wTblNames, err := roots.Working.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, tblName := range wTblNames { untrackedTables[tblName], _, err = roots.Working.GetTable(ctx, tblName) if err != nil { return nil, doltdb.Roots{}, err } } headTblNames, err := roots.Staged.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, tblName := range headTblNames { delete(untrackedTables, tblName) } roots.Working = newWkRoot roots.Staged = roots.Head return newHead, roots, nil } // ResetHardTables resets the tables in working, staged, and head based on the given parameters. Returns the new // head commit and resulting roots func ResetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, roots doltdb.Roots) (*doltdb.Commit, doltdb.Roots, error) { return resetHardTables(ctx, dbData, cSpecStr, roots) } // ResetHard resets the working, staged, and head to the ones in the provided roots and head ref. // The reset can be performed on a non-current branch and working set. // Returns an error if the reset fails. func ResetHard( ctx context.Context, dbData env.DbData, doltDb *doltdb.DoltDB, username, email string, cSpecStr string, roots doltdb.Roots, headRef ref.DoltRef, ws *doltdb.WorkingSet, ) error { newHead, roots, err := resetHardTables(ctx, dbData, cSpecStr, roots) if err != nil { return err } currentWs, err := doltDb.ResolveWorkingSet(ctx, ws.Ref()) if err != nil { return err } h, err := currentWs.HashOf() if err != nil { return err } err = doltDb.UpdateWorkingSet(ctx, ws.Ref(), ws.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged).ClearMerge(), h, &datas.WorkingSetMeta{ Name: username, Email: email, Timestamp: uint64(time.Now().Unix()), Description: "reset hard", }, nil) if err != nil { return err } if newHead != nil { err = doltDb.SetHeadToCommit(ctx, headRef, newHead) if err != nil { return err } } return nil } func ResetSoftTables(ctx context.Context, dbData env.DbData, apr *argparser.ArgParseResults, roots doltdb.Roots) (doltdb.Roots, error) { tables, err := getUnionedTables(ctx, apr.Args, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } roots.Staged, err = MoveTablesBetweenRoots(ctx, tables, roots.Head, roots.Staged) if err != nil { return doltdb.Roots{}, err } return roots, nil } // ResetSoft resets the staged value from HEAD for the tables given and returns the updated roots. func ResetSoft(ctx context.Context, dbData env.DbData, tables []string, roots doltdb.Roots) (doltdb.Roots, error) { tables, err := getUnionedTables(ctx, tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } return resetStaged(ctx, roots, tables) } // ResetSoftToRef matches the `git reset --soft <REF>` pattern. It returns a new Roots with the Staged and Head values // set to the commit specified by the spec string. The Working root is not set func ResetSoftToRef(ctx context.Context, dbData env.DbData, cSpecStr string) (doltdb.Roots, error) { cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return doltdb.Roots{}, err } headRef, err := dbData.Rsr.CWBHeadRef() if err != nil { return doltdb.Roots{}, err } newHead, err := dbData.Ddb.Resolve(ctx, cs, headRef) if err != nil { return doltdb.Roots{}, err } foundRoot, err := newHead.GetRootValue(ctx) if err != nil { return doltdb.Roots{}, err } // Update the head to this commit if err = dbData.Ddb.SetHeadToCommit(ctx, headRef, newHead); err != nil { return doltdb.Roots{}, err } return doltdb.Roots{ Head: foundRoot, Staged: foundRoot, }, err } func getUnionedTables(ctx context.Context, tables []string, stagedRoot, headRoot *doltdb.RootValue) ([]string, error) { if len(tables) == 0 || (len(tables) == 1 && tables[0] == ".") { var err error tables, err = doltdb.UnionTableNames(ctx, stagedRoot, headRoot) if err != nil { return nil, err } } return tables, nil } func resetStaged(ctx context.Context, roots doltdb.Roots, tbls []string) (doltdb.Roots, error)
// IsValidRef validates whether the input parameter is a valid cString // TODO: this doesn't belong in this package func IsValidRef(ctx context.Context, cSpecStr string, ddb *doltdb.DoltDB, rsr env.RepoStateReader) (bool, error) { // The error return value is only for propagating unhandled errors from rsr.CWBHeadRef() // All other errors merely indicate an invalid ref spec. // TODO: It's much better to enumerate the expected errors, to make sure we don't suppress any unexpected ones. cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return false, nil } headRef, err := rsr.CWBHeadRef() if err == doltdb.ErrOperationNotSupportedInDetachedHead { // This is safe because ddb.Resolve checks if headRef is nil, but only when the value is actually needed. // Basically, this guarentees that resolving "HEAD" or similar will return an error but other resolves will work. headRef = nil } else if err != nil { return false, err } _, err = ddb.Resolve(ctx, cs, headRef) if err != nil { return false, nil } return true, nil } // CleanUntracked deletes untracked tables from the working root. // Evaluates untracked tables as: all working tables - all staged tables. func CleanUntracked(ctx context.Context, roots doltdb.Roots, tables []string, dryrun bool, force bool) (doltdb.Roots, error) { untrackedTables := make(map[string]struct{}) var err error if len(tables) == 0 { tables, err = roots.Working.GetTableNames(ctx) if err != nil { return doltdb.Roots{}, nil } } for i := range tables { name := tables[i] _, _, err = roots.Working.GetTable(ctx, name) if err != nil { return doltdb.Roots{}, err } untrackedTables[name] = struct{}{} } // untracked tables = working tables - staged tables headTblNames, err := roots.Staged.GetTableNames(ctx) if err != nil { return doltdb.Roots{}, err } for _, name := range headTblNames { delete(untrackedTables, name) } newRoot := roots.Working var toDelete []string for t := range untrackedTables { toDelete = append(toDelete, t) } newRoot, err = newRoot.RemoveTables(ctx, force, force, toDelete...) if err != nil { return doltdb.Roots{}, fmt.Errorf("failed to remove tables; %w", err) } if dryrun { return roots, nil } roots.Working = newRoot return roots, nil } // mapColumnTags takes a map from table name to schema.Schema and generates // a map from column tags to table names (see RootValue.GetAllSchemas). func mapColumnTags(tables map[string]schema.Schema) (m map[uint64]string) { m = make(map[uint64]string, len(tables)) for tbl, sch := range tables { for _, tag := range sch.GetAllCols().Tags { m[tag] = tbl } } return }
{ newStaged, err := MoveTablesBetweenRoots(ctx, tbls, roots.Head, roots.Staged) if err != nil { return doltdb.Roots{}, err } roots.Staged = newStaged return roots, nil }
identifier_body
reset.go
// Copyright 2020 Dolthub, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package actions import ( "context" "fmt" "time" "github.com/dolthub/dolt/go/store/datas" "github.com/dolthub/dolt/go/libraries/doltcore/doltdb" "github.com/dolthub/dolt/go/libraries/doltcore/env" "github.com/dolthub/dolt/go/libraries/doltcore/ref" "github.com/dolthub/dolt/go/libraries/doltcore/schema" "github.com/dolthub/dolt/go/libraries/utils/argparser" ) // resetHardTables resolves a new HEAD commit from a refSpec and updates working set roots by // resetting the table contexts for tracked tables. New tables are ignored. Returns new HEAD // Commit and Roots. func resetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, roots doltdb.Roots) (*doltdb.Commit, doltdb.Roots, error) { ddb := dbData.Ddb rsr := dbData.Rsr var newHead *doltdb.Commit if cSpecStr != "" { cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil
headRef, err := rsr.CWBHeadRef() if err != nil { return nil, doltdb.Roots{}, err } newHead, err = ddb.Resolve(ctx, cs, headRef) if err != nil { return nil, doltdb.Roots{}, err } roots.Head, err = newHead.GetRootValue(ctx) if err != nil { return nil, doltdb.Roots{}, err } } // mirroring Git behavior, untracked tables are ignored on 'reset --hard', // save the state of these tables and apply them to |newHead|'s root. // // as a special case, if an untracked table has a tag collision with any // tables in |newHead| we silently drop it from the new working set. // these tag collision is typically cause by table renames (bug #751). untracked, err := roots.Working.GetAllSchemas(ctx) if err != nil { return nil, doltdb.Roots{}, err } // untracked tables exist in |working| but not in |staged| staged, err := roots.Staged.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, name := range staged { delete(untracked, name) } newWkRoot := roots.Head ws, err := newWkRoot.GetAllSchemas(ctx) if err != nil { return nil, doltdb.Roots{}, err } tags := mapColumnTags(ws) for name, sch := range untracked { for _, pk := range sch.GetAllCols().GetColumns() { if _, ok := tags[pk.Tag]; ok { // |pk.Tag| collides with a schema in |newWkRoot| delete(untracked, name) } } } for name := range untracked { tbl, _, err := roots.Working.GetTable(ctx, name) if err != nil { return nil, doltdb.Roots{}, err } newWkRoot, err = newWkRoot.PutTable(ctx, name, tbl) if err != nil { return nil, doltdb.Roots{}, fmt.Errorf("failed to write table back to database: %s", err) } } // need to save the state of files that aren't tracked untrackedTables := make(map[string]*doltdb.Table) wTblNames, err := roots.Working.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, tblName := range wTblNames { untrackedTables[tblName], _, err = roots.Working.GetTable(ctx, tblName) if err != nil { return nil, doltdb.Roots{}, err } } headTblNames, err := roots.Staged.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, tblName := range headTblNames { delete(untrackedTables, tblName) } roots.Working = newWkRoot roots.Staged = roots.Head return newHead, roots, nil } // ResetHardTables resets the tables in working, staged, and head based on the given parameters. Returns the new // head commit and resulting roots func ResetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, roots doltdb.Roots) (*doltdb.Commit, doltdb.Roots, error) { return resetHardTables(ctx, dbData, cSpecStr, roots) } // ResetHard resets the working, staged, and head to the ones in the provided roots and head ref. // The reset can be performed on a non-current branch and working set. // Returns an error if the reset fails. func ResetHard( ctx context.Context, dbData env.DbData, doltDb *doltdb.DoltDB, username, email string, cSpecStr string, roots doltdb.Roots, headRef ref.DoltRef, ws *doltdb.WorkingSet, ) error { newHead, roots, err := resetHardTables(ctx, dbData, cSpecStr, roots) if err != nil { return err } currentWs, err := doltDb.ResolveWorkingSet(ctx, ws.Ref()) if err != nil { return err } h, err := currentWs.HashOf() if err != nil { return err } err = doltDb.UpdateWorkingSet(ctx, ws.Ref(), ws.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged).ClearMerge(), h, &datas.WorkingSetMeta{ Name: username, Email: email, Timestamp: uint64(time.Now().Unix()), Description: "reset hard", }, nil) if err != nil { return err } if newHead != nil { err = doltDb.SetHeadToCommit(ctx, headRef, newHead) if err != nil { return err } } return nil } func ResetSoftTables(ctx context.Context, dbData env.DbData, apr *argparser.ArgParseResults, roots doltdb.Roots) (doltdb.Roots, error) { tables, err := getUnionedTables(ctx, apr.Args, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } roots.Staged, err = MoveTablesBetweenRoots(ctx, tables, roots.Head, roots.Staged) if err != nil { return doltdb.Roots{}, err } return roots, nil } // ResetSoft resets the staged value from HEAD for the tables given and returns the updated roots. func ResetSoft(ctx context.Context, dbData env.DbData, tables []string, roots doltdb.Roots) (doltdb.Roots, error) { tables, err := getUnionedTables(ctx, tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } return resetStaged(ctx, roots, tables) } // ResetSoftToRef matches the `git reset --soft <REF>` pattern. It returns a new Roots with the Staged and Head values // set to the commit specified by the spec string. The Working root is not set func ResetSoftToRef(ctx context.Context, dbData env.DbData, cSpecStr string) (doltdb.Roots, error) { cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return doltdb.Roots{}, err } headRef, err := dbData.Rsr.CWBHeadRef() if err != nil { return doltdb.Roots{}, err } newHead, err := dbData.Ddb.Resolve(ctx, cs, headRef) if err != nil { return doltdb.Roots{}, err } foundRoot, err := newHead.GetRootValue(ctx) if err != nil { return doltdb.Roots{}, err } // Update the head to this commit if err = dbData.Ddb.SetHeadToCommit(ctx, headRef, newHead); err != nil { return doltdb.Roots{}, err } return doltdb.Roots{ Head: foundRoot, Staged: foundRoot, }, err } func getUnionedTables(ctx context.Context, tables []string, stagedRoot, headRoot *doltdb.RootValue) ([]string, error) { if len(tables) == 0 || (len(tables) == 1 && tables[0] == ".") { var err error tables, err = doltdb.UnionTableNames(ctx, stagedRoot, headRoot) if err != nil { return nil, err } } return tables, nil } func resetStaged(ctx context.Context, roots doltdb.Roots, tbls []string) (doltdb.Roots, error) { newStaged, err := MoveTablesBetweenRoots(ctx, tbls, roots.Head, roots.Staged) if err != nil { return doltdb.Roots{}, err } roots.Staged = newStaged return roots, nil } // IsValidRef validates whether the input parameter is a valid cString // TODO: this doesn't belong in this package func IsValidRef(ctx context.Context, cSpecStr string, ddb *doltdb.DoltDB, rsr env.RepoStateReader) (bool, error) { // The error return value is only for propagating unhandled errors from rsr.CWBHeadRef() // All other errors merely indicate an invalid ref spec. // TODO: It's much better to enumerate the expected errors, to make sure we don't suppress any unexpected ones. cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return false, nil } headRef, err := rsr.CWBHeadRef() if err == doltdb.ErrOperationNotSupportedInDetachedHead { // This is safe because ddb.Resolve checks if headRef is nil, but only when the value is actually needed. // Basically, this guarentees that resolving "HEAD" or similar will return an error but other resolves will work. headRef = nil } else if err != nil { return false, err } _, err = ddb.Resolve(ctx, cs, headRef) if err != nil { return false, nil } return true, nil } // CleanUntracked deletes untracked tables from the working root. // Evaluates untracked tables as: all working tables - all staged tables. func CleanUntracked(ctx context.Context, roots doltdb.Roots, tables []string, dryrun bool, force bool) (doltdb.Roots, error) { untrackedTables := make(map[string]struct{}) var err error if len(tables) == 0 { tables, err = roots.Working.GetTableNames(ctx) if err != nil { return doltdb.Roots{}, nil } } for i := range tables { name := tables[i] _, _, err = roots.Working.GetTable(ctx, name) if err != nil { return doltdb.Roots{}, err } untrackedTables[name] = struct{}{} } // untracked tables = working tables - staged tables headTblNames, err := roots.Staged.GetTableNames(ctx) if err != nil { return doltdb.Roots{}, err } for _, name := range headTblNames { delete(untrackedTables, name) } newRoot := roots.Working var toDelete []string for t := range untrackedTables { toDelete = append(toDelete, t) } newRoot, err = newRoot.RemoveTables(ctx, force, force, toDelete...) if err != nil { return doltdb.Roots{}, fmt.Errorf("failed to remove tables; %w", err) } if dryrun { return roots, nil } roots.Working = newRoot return roots, nil } // mapColumnTags takes a map from table name to schema.Schema and generates // a map from column tags to table names (see RootValue.GetAllSchemas). func mapColumnTags(tables map[string]schema.Schema) (m map[uint64]string) { m = make(map[uint64]string, len(tables)) for tbl, sch := range tables { for _, tag := range sch.GetAllCols().Tags { m[tag] = tbl } } return }
{ return nil, doltdb.Roots{}, err }
conditional_block
reset.go
// Copyright 2020 Dolthub, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package actions import ( "context" "fmt" "time" "github.com/dolthub/dolt/go/store/datas" "github.com/dolthub/dolt/go/libraries/doltcore/doltdb" "github.com/dolthub/dolt/go/libraries/doltcore/env" "github.com/dolthub/dolt/go/libraries/doltcore/ref" "github.com/dolthub/dolt/go/libraries/doltcore/schema" "github.com/dolthub/dolt/go/libraries/utils/argparser" ) // resetHardTables resolves a new HEAD commit from a refSpec and updates working set roots by // resetting the table contexts for tracked tables. New tables are ignored. Returns new HEAD // Commit and Roots. func resetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, roots doltdb.Roots) (*doltdb.Commit, doltdb.Roots, error) { ddb := dbData.Ddb rsr := dbData.Rsr var newHead *doltdb.Commit if cSpecStr != "" { cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return nil, doltdb.Roots{}, err } headRef, err := rsr.CWBHeadRef() if err != nil { return nil, doltdb.Roots{}, err } newHead, err = ddb.Resolve(ctx, cs, headRef) if err != nil { return nil, doltdb.Roots{}, err } roots.Head, err = newHead.GetRootValue(ctx) if err != nil { return nil, doltdb.Roots{}, err } } // mirroring Git behavior, untracked tables are ignored on 'reset --hard', // save the state of these tables and apply them to |newHead|'s root. // // as a special case, if an untracked table has a tag collision with any // tables in |newHead| we silently drop it from the new working set. // these tag collision is typically cause by table renames (bug #751). untracked, err := roots.Working.GetAllSchemas(ctx) if err != nil { return nil, doltdb.Roots{}, err } // untracked tables exist in |working| but not in |staged| staged, err := roots.Staged.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, name := range staged { delete(untracked, name) } newWkRoot := roots.Head ws, err := newWkRoot.GetAllSchemas(ctx) if err != nil { return nil, doltdb.Roots{}, err } tags := mapColumnTags(ws) for name, sch := range untracked { for _, pk := range sch.GetAllCols().GetColumns() { if _, ok := tags[pk.Tag]; ok { // |pk.Tag| collides with a schema in |newWkRoot| delete(untracked, name) } } } for name := range untracked { tbl, _, err := roots.Working.GetTable(ctx, name) if err != nil { return nil, doltdb.Roots{}, err } newWkRoot, err = newWkRoot.PutTable(ctx, name, tbl) if err != nil { return nil, doltdb.Roots{}, fmt.Errorf("failed to write table back to database: %s", err) } } // need to save the state of files that aren't tracked untrackedTables := make(map[string]*doltdb.Table) wTblNames, err := roots.Working.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, tblName := range wTblNames { untrackedTables[tblName], _, err = roots.Working.GetTable(ctx, tblName) if err != nil { return nil, doltdb.Roots{}, err } } headTblNames, err := roots.Staged.GetTableNames(ctx) if err != nil { return nil, doltdb.Roots{}, err } for _, tblName := range headTblNames { delete(untrackedTables, tblName) } roots.Working = newWkRoot roots.Staged = roots.Head return newHead, roots, nil } // ResetHardTables resets the tables in working, staged, and head based on the given parameters. Returns the new // head commit and resulting roots func ResetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, roots doltdb.Roots) (*doltdb.Commit, doltdb.Roots, error) { return resetHardTables(ctx, dbData, cSpecStr, roots) } // ResetHard resets the working, staged, and head to the ones in the provided roots and head ref. // The reset can be performed on a non-current branch and working set. // Returns an error if the reset fails. func ResetHard( ctx context.Context, dbData env.DbData, doltDb *doltdb.DoltDB, username, email string, cSpecStr string, roots doltdb.Roots, headRef ref.DoltRef, ws *doltdb.WorkingSet, ) error { newHead, roots, err := resetHardTables(ctx, dbData, cSpecStr, roots) if err != nil { return err } currentWs, err := doltDb.ResolveWorkingSet(ctx, ws.Ref()) if err != nil { return err } h, err := currentWs.HashOf() if err != nil { return err } err = doltDb.UpdateWorkingSet(ctx, ws.Ref(), ws.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged).ClearMerge(), h, &datas.WorkingSetMeta{ Name: username, Email: email, Timestamp: uint64(time.Now().Unix()), Description: "reset hard", }, nil) if err != nil { return err } if newHead != nil { err = doltDb.SetHeadToCommit(ctx, headRef, newHead) if err != nil { return err } } return nil } func ResetSoftTables(ctx context.Context, dbData env.DbData, apr *argparser.ArgParseResults, roots doltdb.Roots) (doltdb.Roots, error) { tables, err := getUnionedTables(ctx, apr.Args, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } roots.Staged, err = MoveTablesBetweenRoots(ctx, tables, roots.Head, roots.Staged) if err != nil { return doltdb.Roots{}, err } return roots, nil } // ResetSoft resets the staged value from HEAD for the tables given and returns the updated roots. func ResetSoft(ctx context.Context, dbData env.DbData, tables []string, roots doltdb.Roots) (doltdb.Roots, error) { tables, err := getUnionedTables(ctx, tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head) if err != nil { return doltdb.Roots{}, err } return resetStaged(ctx, roots, tables) } // ResetSoftToRef matches the `git reset --soft <REF>` pattern. It returns a new Roots with the Staged and Head values // set to the commit specified by the spec string. The Working root is not set func ResetSoftToRef(ctx context.Context, dbData env.DbData, cSpecStr string) (doltdb.Roots, error) { cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return doltdb.Roots{}, err } headRef, err := dbData.Rsr.CWBHeadRef() if err != nil { return doltdb.Roots{}, err } newHead, err := dbData.Ddb.Resolve(ctx, cs, headRef) if err != nil { return doltdb.Roots{}, err } foundRoot, err := newHead.GetRootValue(ctx) if err != nil { return doltdb.Roots{}, err } // Update the head to this commit if err = dbData.Ddb.SetHeadToCommit(ctx, headRef, newHead); err != nil { return doltdb.Roots{}, err } return doltdb.Roots{ Head: foundRoot, Staged: foundRoot, }, err } func getUnionedTables(ctx context.Context, tables []string, stagedRoot, headRoot *doltdb.RootValue) ([]string, error) { if len(tables) == 0 || (len(tables) == 1 && tables[0] == ".") { var err error tables, err = doltdb.UnionTableNames(ctx, stagedRoot, headRoot) if err != nil { return nil, err } } return tables, nil } func resetStaged(ctx context.Context, roots doltdb.Roots, tbls []string) (doltdb.Roots, error) { newStaged, err := MoveTablesBetweenRoots(ctx, tbls, roots.Head, roots.Staged) if err != nil { return doltdb.Roots{}, err } roots.Staged = newStaged return roots, nil } // IsValidRef validates whether the input parameter is a valid cString // TODO: this doesn't belong in this package func IsValidRef(ctx context.Context, cSpecStr string, ddb *doltdb.DoltDB, rsr env.RepoStateReader) (bool, error) { // The error return value is only for propagating unhandled errors from rsr.CWBHeadRef() // All other errors merely indicate an invalid ref spec. // TODO: It's much better to enumerate the expected errors, to make sure we don't suppress any unexpected ones. cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { return false, nil } headRef, err := rsr.CWBHeadRef() if err == doltdb.ErrOperationNotSupportedInDetachedHead { // This is safe because ddb.Resolve checks if headRef is nil, but only when the value is actually needed. // Basically, this guarentees that resolving "HEAD" or similar will return an error but other resolves will work. headRef = nil } else if err != nil { return false, err } _, err = ddb.Resolve(ctx, cs, headRef) if err != nil { return false, nil } return true, nil } // CleanUntracked deletes untracked tables from the working root. // Evaluates untracked tables as: all working tables - all staged tables. func CleanUntracked(ctx context.Context, roots doltdb.Roots, tables []string, dryrun bool, force bool) (doltdb.Roots, error) { untrackedTables := make(map[string]struct{}) var err error if len(tables) == 0 { tables, err = roots.Working.GetTableNames(ctx) if err != nil { return doltdb.Roots{}, nil } } for i := range tables { name := tables[i] _, _, err = roots.Working.GetTable(ctx, name) if err != nil { return doltdb.Roots{}, err } untrackedTables[name] = struct{}{} } // untracked tables = working tables - staged tables headTblNames, err := roots.Staged.GetTableNames(ctx) if err != nil { return doltdb.Roots{}, err } for _, name := range headTblNames { delete(untrackedTables, name) } newRoot := roots.Working var toDelete []string for t := range untrackedTables { toDelete = append(toDelete, t) }
} if dryrun { return roots, nil } roots.Working = newRoot return roots, nil } // mapColumnTags takes a map from table name to schema.Schema and generates // a map from column tags to table names (see RootValue.GetAllSchemas). func mapColumnTags(tables map[string]schema.Schema) (m map[uint64]string) { m = make(map[uint64]string, len(tables)) for tbl, sch := range tables { for _, tag := range sch.GetAllCols().Tags { m[tag] = tbl } } return }
newRoot, err = newRoot.RemoveTables(ctx, force, force, toDelete...) if err != nil { return doltdb.Roots{}, fmt.Errorf("failed to remove tables; %w", err)
random_line_split
clickhouse_output.go
package output import ( "database/sql" "encoding/json" "fmt" "math/rand" "strconv" "strings" "sync" "sync/atomic" "time" clickhouse "github.com/ClickHouse/clickhouse-go" "github.com/childe/gohangout/topology" "github.com/golang/glog" "github.com/spf13/cast" ) const ( CLICKHOUSE_DEFAULT_BULK_ACTIONS = 1000 CLICKHOUSE_DEFAULT_FLUSH_INTERVAL = 30 ) type ClickhouseOutput struct { config map[interface{}]interface{} bulk_actions int hosts []string fields []string table string username string password string fieldsLength int query string desc map[string]*rowDesc defaultValue map[string]interface{} // columnName -> defaultValue bulkChan chan []map[string]interface{} concurrent int events []map[string]interface{} execution_id uint64 dbSelector HostSelector mux sync.Mutex wg sync.WaitGroup closeChan chan bool autoConvert bool transIntColumn []string transFloatColumn []string transIntArrayColumn []string } type rowDesc struct { Name string `json:"name"` Type string `json:"type"` DefaultType string `json:"default_type"` DefaultExpression string `json:"default_expression"` } func (c *ClickhouseOutput) setTableDesc() { c.desc = make(map[string]*rowDesc) query := fmt.Sprintf("desc table %s", c.table) glog.V(5).Info(query) for i := 0; i < c.dbSelector.Size(); i++
string, number and ip DEFAULT expression is supported for now func (c *ClickhouseOutput) setColumnDefault() { c.setTableDesc() c.defaultValue = make(map[string]interface{}) var defaultValue *string for columnName, d := range c.desc { switch d.DefaultType { case "DEFAULT": defaultValue = &(d.DefaultExpression) case "MATERIALIZED": glog.Fatal("parse default value: MATERIALIZED expression not supported") case "ALIAS": glog.Fatal("parse default value: ALIAS expression not supported") case "": defaultValue = nil default: glog.Fatal("parse default value: only DEFAULT expression supported") } switch d.Type { case "String", "LowCardinality(String)": if defaultValue == nil { c.defaultValue[columnName] = "" } else { c.defaultValue[columnName] = *defaultValue } case "Date", "DateTime", "DateTime64": c.defaultValue[columnName] = time.Unix(0, 0) case "Nullable(Int64)", "Nullable(Int32)", "Nullable(Int16)", "Nullable(Int8)", "Nullable(Float32)", "Nullable(Float64)": c.defaultValue[columnName] = nil case "UInt8", "UInt16", "UInt32", "UInt64", "Int8", "Int16", "Int32", "Int64": if defaultValue == nil { c.defaultValue[columnName] = 0 } else { i, e := strconv.ParseInt(*defaultValue, 10, 64) if e == nil { c.defaultValue[columnName] = i } else { glog.Fatalf("parse default value `%v` error: %v", defaultValue, e) } } case "Float32", "Float64": if defaultValue == nil { c.defaultValue[columnName] = 0.0 } else { i, e := strconv.ParseFloat(*defaultValue, 64) if e == nil { c.defaultValue[columnName] = i } else { glog.Fatalf("parse default value `%v` error: %v", defaultValue, e) } } case "IPv4": c.defaultValue[columnName] = "0.0.0.0" case "IPv6": c.defaultValue[columnName] = "::" case "Array(String)", "Array(IPv4)", "Array(IPv6)", "Array(Date)", "Array(DateTime)": c.defaultValue[columnName] = clickhouse.Array([]string{}) case "Array(UInt8)": c.defaultValue[columnName] = clickhouse.Array([]uint8{}) case "Array(UInt16)": c.defaultValue[columnName] = clickhouse.Array([]uint16{}) case "Array(UInt32)": c.defaultValue[columnName] = clickhouse.Array([]uint32{}) case "Array(UInt64)": c.defaultValue[columnName] = clickhouse.Array([]uint64{}) case "Array(Int8)": c.defaultValue[columnName] = clickhouse.Array([]int8{}) case "Array(Int16)": c.defaultValue[columnName] = clickhouse.Array([]int16{}) case "Array(Int32)": c.defaultValue[columnName] = clickhouse.Array([]int32{}) case "Array(Int64)": c.defaultValue[columnName] = clickhouse.Array([]int64{}) case "Array(Float32)": c.defaultValue[columnName] = clickhouse.Array([]float32{}) case "Array(Float64)": c.defaultValue[columnName] = clickhouse.Array([]float64{}) case "Enum16": // 需要要求列声明的最小枚举值为 '' c.defaultValue[columnName] = "" case "Enum8": // 需要要求列声明的最小枚举值为 '' c.defaultValue[columnName] = "" default: glog.Errorf("column: %s, type: %s. unsupported column type, ignore.", columnName, d.Type) continue } } } func (c *ClickhouseOutput) getDatabase() string { dbAndTable := strings.Split(c.table, ".") dbName := "default" if len(dbAndTable) == 2 { dbName = dbAndTable[0] } return dbName } func init() { Register("Clickhouse", newClickhouseOutput) } func newClickhouseOutput(config map[interface{}]interface{}) topology.Output { rand.Seed(time.Now().UnixNano()) p := &ClickhouseOutput{ config: config, } if v, ok := config["fields"]; ok { for _, f := range v.([]interface{}) { p.fields = append(p.fields, f.(string)) } } if v, ok := config["auto_convert"]; ok { p.autoConvert = v.(bool) } else { p.autoConvert = true } if v, ok := config["table"]; ok { p.table = v.(string) } else { glog.Fatalf("table must be set in clickhouse output") } if v, ok := config["hosts"]; ok { for _, h := range v.([]interface{}) { p.hosts = append(p.hosts, h.(string)) } } else { glog.Fatalf("hosts must be set in clickhouse output") } if v, ok := config["username"]; ok { p.username = v.(string) } if v, ok := config["password"]; ok { p.password = v.(string) } debug := false if v, ok := config["debug"]; ok { debug = v.(bool) } connMaxLifetime := 0 if v, ok := config["conn_max_life_time"]; ok { connMaxLifetime = v.(int) } dbs := make([]*sql.DB, 0) for _, host := range p.hosts { dataSourceName := fmt.Sprintf("%s?database=%s&username=%s&password=%s&debug=%v", host, p.getDatabase(), p.username, p.password, debug) if db, err := sql.Open("clickhouse", dataSourceName); err == nil { if err := db.Ping(); err != nil { if exception, ok := err.(*clickhouse.Exception); ok { glog.Errorf("[%d] %s \n%s\n", exception.Code, exception.Message, exception.StackTrace) } else { glog.Errorf("clickhouse ping error: %s", err) } } else { db.SetConnMaxLifetime(time.Second * time.Duration(connMaxLifetime)) dbs = append(dbs, db) } } else { glog.Errorf("open %s error: %s", host, err) } } glog.V(5).Infof("%d available clickhouse hosts", len(dbs)) if len(dbs) == 0 { glog.Fatal("no available host") } dbsI := make([]interface{}, len(dbs)) for i, h := range dbs { dbsI[i] = h } p.dbSelector = NewRRHostSelector(dbsI, 3) p.setColumnDefault() if len(p.fields) <= 0 { glog.Fatalf("fields not set in clickhouse output and could get fields from clickhouse table") } p.fieldsLength = len(p.fields) fields := make([]string, p.fieldsLength) for i := range fields { fields[i] = fmt.Sprintf(`"%s"`, p.fields[i]) } questionMarks := make([]string, p.fieldsLength) for i := 0; i < p.fieldsLength; i++ { questionMarks[i] = "?" } p.query = fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", p.table, strings.Join(fields, ","), strings.Join(questionMarks, ",")) glog.V(5).Infof("query: %s", p.query) concurrent := 1 if v, ok := config["concurrent"]; ok { concurrent = v.(int) } p.concurrent = concurrent p.closeChan = make(chan bool, concurrent) p.bulkChan = make(chan []map[string]interface{}, concurrent) for i := 0; i < concurrent; i++ { p.wg.Add(1) go func() { for { select { case events := <-p.bulkChan: p.innerFlush(events) case <-p.closeChan: p.wg.Done() return } } }() } if v, ok := config["bulk_actions"]; ok { p.bulk_actions = v.(int) } else { p.bulk_actions = CLICKHOUSE_DEFAULT_BULK_ACTIONS } var flush_interval int if v, ok := config["flush_interval"]; ok { flush_interval = v.(int) } else { flush_interval = CLICKHOUSE_DEFAULT_FLUSH_INTERVAL } go func() { for range time.NewTicker(time.Second * time.Duration(flush_interval)).C { p.flush() } }() return p } // convert int and float fields to coresponding type func (c *ClickhouseOutput) convert(event map[string]interface{}) { for _, key := range c.transIntColumn { if keyIntValue, ok := event[key]; ok { if intConverterValue, err := cast.ToInt64E(keyIntValue); err == nil { event[key] = intConverterValue } else { glog.V(10).Infof("ch_output convert intType error: %s", err) event[key] = nil } } } for _, key := range c.transIntArrayColumn { if keyArrayValue, ok := event[key]; ok { arrayIntValue := keyArrayValue.([]interface{}) ints := make([]int64, len(arrayIntValue)) for i, v := range arrayIntValue { if v, err := cast.ToInt64E(v); err == nil { ints[i] = v } else { glog.V(10).Infof("ch_output convert arrayIntType error: %s", err) ints[i] = 0 } event[key] = ints } } } for _, key := range c.transFloatColumn { if keyFloatValue, ok := event[key]; ok { floatConverterValue, err := cast.ToFloat64E(keyFloatValue) if err == nil { event[key] = floatConverterValue } else { glog.V(10).Infof("ch_output convert floatType error: %s", err) event[key] = nil } } } } func (c *ClickhouseOutput) innerFlush(events []map[string]interface{}) { execution_id := atomic.AddUint64(&c.execution_id, 1) glog.Infof("write %d docs to clickhouse with execution_id %d", len(events), execution_id) for { nextdb := c.dbSelector.Next() /*** not ReduceWeight for now , so this should not happen if nextdb == nil { glog.Info("no available db, wait for 30s") time.Sleep(30 * time.Second) continue } ****/ tx, err := nextdb.(*sql.DB).Begin() if err != nil { glog.Errorf("db begin to create transaction error: %s", err) continue } defer tx.Rollback() stmt, err := tx.Prepare(c.query) if err != nil { glog.Errorf("transaction prepare statement error: %s", err) return } defer stmt.Close() for _, event := range events { if c.autoConvert { c.convert(event) } args := make([]interface{}, c.fieldsLength) for i, field := range c.fields { if v, ok := event[field]; ok && v != nil { args[i] = v } else { if vv, ok := c.defaultValue[field]; ok { args[i] = vv } else { // this should not happen args[i] = "" } } } if _, err := stmt.Exec(args...); err != nil { glog.Errorf("exec clickhouse insert %v error: %s", event, err) return } } if err := tx.Commit(); err != nil { glog.Errorf("exec clickhouse commit error: %s", err) return } glog.Infof("%d docs has been committed to clickhouse", len(events)) return } } func (c *ClickhouseOutput) flush() { c.mux.Lock() if len(c.events) > 0 { events := c.events c.events = make([]map[string]interface{}, 0, c.bulk_actions) c.bulkChan <- events } c.mux.Unlock() } // Emit appends event to c.events, and push to bulkChan if needed func (c *ClickhouseOutput) Emit(event map[string]interface{}) { c.mux.Lock() c.events = append(c.events, event) if len(c.events) < c.bulk_actions { c.mux.Unlock() return } events := c.events c.events = make([]map[string]interface{}, 0, c.bulk_actions) c.mux.Unlock() c.bulkChan <- events } func (c *ClickhouseOutput) awaitclose(timeout time.Duration) { exit := make(chan bool) defer func() { select { case <-exit: glog.Info("all clickhouse flush job done. return") return case <-time.After(timeout): glog.Info("clickhouse await timeout. return") return } }() defer func() { go func() { c.wg.Wait() exit <- true }() }() glog.Info("try to write remaining docs to clickhouse") c.mux.Lock() if len(c.events) <= 0 { glog.Info("no docs remain, return") c.mux.Unlock() } else { events := c.events c.events = make([]map[string]interface{}, 0, c.bulk_actions) c.mux.Unlock() glog.Infof("ramain %d docs, write them to clickhouse", len(events)) c.wg.Add(1) go func() { c.innerFlush(events) c.wg.Done() }() } glog.Info("check if there are events blocking in bulk channel") for { select { case events := <-c.bulkChan: c.wg.Add(1) go func() { c.innerFlush(events) c.wg.Done() }() default: return } } } // Shutdown would stop receiving message and emiting func (c *ClickhouseOutput) Shutdown() { for i := 0; i < c.concurrent; i++ { c.closeChan <- true } c.awaitclose(30 * time.Second) }
{ nextdb := c.dbSelector.Next() db := nextdb.(*sql.DB) rows, err := db.Query(query) if err != nil { glog.Errorf("query %q error: %s", query, err) continue } defer rows.Close() columns, err := rows.Columns() if err != nil { glog.Fatalf("could not get columns from query `%s`: %s", query, err) } glog.V(10).Infof("desc table columns: %v", columns) descMap := make(map[string]string) for _, c := range columns { descMap[c] = "" } for rows.Next() { values := make([]interface{}, 0) for range columns { var a string values = append(values, &a) } if err := rows.Scan(values...); err != nil { glog.Fatalf("scan rows error: %s", err) } descMap := make(map[string]string) for i, c := range columns { value := *values[i].(*string) if c == "type" { // 特殊处理枚举类型 if strings.HasPrefix(value, "Enum16") { value = "Enum16" } else if strings.HasPrefix(value, "Enum8") { value = "Enum8" } } descMap[c] = value } b, err := json.Marshal(descMap) if err != nil { glog.Fatalf("marshal desc error: %s", err) } rowDesc := rowDesc{} err = json.Unmarshal(b, &rowDesc) if err != nil { glog.Fatalf("marshal desc error: %s", err) } glog.V(5).Infof("row desc: %#v", rowDesc) c.desc[rowDesc.Name] = &rowDesc } for key1, value1 := range c.desc { switch value1.Type { case "Int64", "UInt64", "Int32", "UInt32", "Int16", "UInt16", "Int8", "UInt8", "Nullable(Int64)", "Nullable(Int32)", "Nullable(Int16)", "Nullable(Int8)": c.transIntColumn = append(c.transIntColumn, key1) case "Array(Int64)", "Array(Int32)", "Array(Int16)", "Array(Int8)": c.transIntArrayColumn = append(c.transIntArrayColumn, key1) case "Float64", "Float32", "Nullable(Float32)", "Nullable(Float64)": c.transFloatColumn = append(c.transFloatColumn, key1) } } if len(c.fields) == 0 { for key1 := range c.desc { c.fields = append(c.fields, key1) } } return } } // TODO only
conditional_block
clickhouse_output.go
package output import ( "database/sql" "encoding/json" "fmt" "math/rand" "strconv" "strings" "sync" "sync/atomic" "time" clickhouse "github.com/ClickHouse/clickhouse-go" "github.com/childe/gohangout/topology" "github.com/golang/glog" "github.com/spf13/cast" ) const ( CLICKHOUSE_DEFAULT_BULK_ACTIONS = 1000 CLICKHOUSE_DEFAULT_FLUSH_INTERVAL = 30 ) type ClickhouseOutput struct { config map[interface{}]interface{} bulk_actions int hosts []string fields []string table string username string password string fieldsLength int query string desc map[string]*rowDesc defaultValue map[string]interface{} // columnName -> defaultValue bulkChan chan []map[string]interface{} concurrent int events []map[string]interface{} execution_id uint64 dbSelector HostSelector mux sync.Mutex wg sync.WaitGroup closeChan chan bool autoConvert bool transIntColumn []string transFloatColumn []string transIntArrayColumn []string } type rowDesc struct { Name string `json:"name"` Type string `json:"type"` DefaultType string `json:"default_type"` DefaultExpression string `json:"default_expression"` } func (c *ClickhouseOutput) setTableDesc() { c.desc = make(map[string]*rowDesc) query := fmt.Sprintf("desc table %s", c.table) glog.V(5).Info(query) for i := 0; i < c.dbSelector.Size(); i++ { nextdb := c.dbSelector.Next() db := nextdb.(*sql.DB) rows, err := db.Query(query) if err != nil { glog.Errorf("query %q error: %s", query, err) continue } defer rows.Close() columns, err := rows.Columns() if err != nil { glog.Fatalf("could not get columns from query `%s`: %s", query, err) } glog.V(10).Infof("desc table columns: %v", columns) descMap := make(map[string]string) for _, c := range columns { descMap[c] = "" } for rows.Next() { values := make([]interface{}, 0) for range columns { var a string values = append(values, &a) } if err := rows.Scan(values...); err != nil { glog.Fatalf("scan rows error: %s", err) } descMap := make(map[string]string) for i, c := range columns { value := *values[i].(*string) if c == "type" { // 特殊处理枚举类型 if strings.HasPrefix(value, "Enum16") { value = "Enum16" } else if strings.HasPrefix(value, "Enum8") { value = "Enum8" } } descMap[c] = value } b, err := json.Marshal(descMap) if err != nil { glog.Fatalf("marshal desc error: %s", err) } rowDesc := rowDesc{} err = json.Unmarshal(b, &rowDesc) if err != nil { glog.Fatalf("marshal desc error: %s", err) } glog.V(5).Infof("row desc: %#v", rowDesc) c.desc[rowDesc.Name] = &rowDesc } for key1, value1 := range c.desc { switch value1.Type { case "Int64", "UInt64", "Int32", "UInt32", "Int16", "UInt16", "Int8", "UInt8", "Nullable(Int64)", "Nullable(Int32)", "Nullable(Int16)", "Nullable(Int8)": c.transIntColumn = append(c.transIntColumn, key1) case "Array(Int64)", "Array(Int32)", "Array(Int16)", "Array(Int8)": c.transIntArrayColumn = append(c.transIntArrayColumn, key1) case "Float64", "Float32", "Nullable(Float32)", "Nullable(Float64)": c.transFloatColumn = append(c.transFloatColumn, key1) } } if len(c.fields) == 0 { for key1 := range c.desc { c.fields = append(c.fields, key1) } } return } } // TODO only string, number and ip DEFAULT expression is supported for now func (c *ClickhouseOutput) setColumnDefault() { c.setTableDesc() c.defaultValue = make(map[string]interface{}) var defaultValue *string for columnName, d := range c.desc { switch d.DefaultType { case "DEFAULT": defaultValue = &(d.DefaultExpression) case "MATERIALIZED": glog.Fatal("parse default value: MATERIALIZED expression not supported") case "ALIAS": glog.Fatal("parse default value: ALIAS expression not supported") case "": defaultValue = nil default: glog.Fatal("parse default value: only DEFAULT expression supported") } switch d.Type { case "String", "LowCardinality(String)": if defaultValue == nil { c.defaultValue[columnName] = "" } else { c.defaultValue[columnName] = *defaultValue } case "Date", "DateTime", "DateTime64": c.defaultValue[columnName] = time.Unix(0, 0) case "Nullable(Int64)", "Nullable(Int32)", "Nullable(Int16)", "Nullable(Int8)", "Nullable(Float32)", "Nullable(Float64)": c.defaultValue[columnName] = nil case "UInt8", "UInt16", "UInt32", "UInt64", "Int8", "Int16", "Int32", "Int64": if defaultValue == nil { c.defaultValue[columnName] = 0 } else { i, e := strconv.ParseInt(*defaultValue, 10, 64) if e == nil { c.defaultValue[columnName] = i } else { glog.Fatalf("parse default value `%v` error: %v", defaultValue, e) } } case "Float32", "Float64": if defaultValue == nil { c.defaultValue[columnName] = 0.0 } else { i, e := strconv.ParseFloat(*defaultValue, 64) if e == nil { c.defaultValue[columnName] = i } else { glog.Fatalf("parse default value `%v` error: %v", defaultValue, e) } } case "IPv4": c.defaultValue[columnName] = "0.0.0.0" case "IPv6": c.defaultValue[columnName] = "::" case "Array(String)", "Array(IPv4)", "Array(IPv6)", "Array(Date)", "Array(DateTime)": c.defaultValue[columnName] = clickhouse.Array([]string{}) case "Array(UInt8)": c.defaultValue[columnName] = clickhouse.Array([]uint8{}) case "Array(UInt16)": c.defaultValue[columnName] = clickhouse.Array([]uint16{}) case "Array(UInt32)": c.defaultValue[columnName] = clickhouse.Array([]uint32{}) case "Array(UInt64)": c.defaultValue[columnName] = clickhouse.Array([]uint64{}) case "Array(Int8)": c.defaultValue[columnName] = clickhouse.Array([]int8{}) case "Array(Int16)": c.defaultValue[columnName] = clickhouse.Array([]int16{}) case "Array(Int32)": c.defaultValue[columnName] = clickhouse.Array([]int32{}) case "Array(Int64)": c.defaultValue[columnName] = clickhouse.Array([]int64{}) case "Array(Float32)": c.defaultValue[columnName] = clickhouse.Array([]float32{}) case "Array(Float64)": c.defaultValue[columnName] = clickhouse.Array([]float64{}) case "Enum16": // 需要要求列声明的最小枚举值为 '' c.defaultValue[columnName] = "" case "Enum8": // 需要要求列声明的最小枚举值为 '' c.defaultValue[columnName] = "" default: glog.Errorf("column: %s, type: %s. unsupported column type, ignore.", columnName, d.Type) continue } } } func (c *ClickhouseOutput) getDatabase() string { dbAndTable := strings.Split(c.table, ".") dbNam
lt" if len(dbAndTable) == 2 { dbName = dbAndTable[0] } return dbName } func init() { Register("Clickhouse", newClickhouseOutput) } func newClickhouseOutput(config map[interface{}]interface{}) topology.Output { rand.Seed(time.Now().UnixNano()) p := &ClickhouseOutput{ config: config, } if v, ok := config["fields"]; ok { for _, f := range v.([]interface{}) { p.fields = append(p.fields, f.(string)) } } if v, ok := config["auto_convert"]; ok { p.autoConvert = v.(bool) } else { p.autoConvert = true } if v, ok := config["table"]; ok { p.table = v.(string) } else { glog.Fatalf("table must be set in clickhouse output") } if v, ok := config["hosts"]; ok { for _, h := range v.([]interface{}) { p.hosts = append(p.hosts, h.(string)) } } else { glog.Fatalf("hosts must be set in clickhouse output") } if v, ok := config["username"]; ok { p.username = v.(string) } if v, ok := config["password"]; ok { p.password = v.(string) } debug := false if v, ok := config["debug"]; ok { debug = v.(bool) } connMaxLifetime := 0 if v, ok := config["conn_max_life_time"]; ok { connMaxLifetime = v.(int) } dbs := make([]*sql.DB, 0) for _, host := range p.hosts { dataSourceName := fmt.Sprintf("%s?database=%s&username=%s&password=%s&debug=%v", host, p.getDatabase(), p.username, p.password, debug) if db, err := sql.Open("clickhouse", dataSourceName); err == nil { if err := db.Ping(); err != nil { if exception, ok := err.(*clickhouse.Exception); ok { glog.Errorf("[%d] %s \n%s\n", exception.Code, exception.Message, exception.StackTrace) } else { glog.Errorf("clickhouse ping error: %s", err) } } else { db.SetConnMaxLifetime(time.Second * time.Duration(connMaxLifetime)) dbs = append(dbs, db) } } else { glog.Errorf("open %s error: %s", host, err) } } glog.V(5).Infof("%d available clickhouse hosts", len(dbs)) if len(dbs) == 0 { glog.Fatal("no available host") } dbsI := make([]interface{}, len(dbs)) for i, h := range dbs { dbsI[i] = h } p.dbSelector = NewRRHostSelector(dbsI, 3) p.setColumnDefault() if len(p.fields) <= 0 { glog.Fatalf("fields not set in clickhouse output and could get fields from clickhouse table") } p.fieldsLength = len(p.fields) fields := make([]string, p.fieldsLength) for i := range fields { fields[i] = fmt.Sprintf(`"%s"`, p.fields[i]) } questionMarks := make([]string, p.fieldsLength) for i := 0; i < p.fieldsLength; i++ { questionMarks[i] = "?" } p.query = fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", p.table, strings.Join(fields, ","), strings.Join(questionMarks, ",")) glog.V(5).Infof("query: %s", p.query) concurrent := 1 if v, ok := config["concurrent"]; ok { concurrent = v.(int) } p.concurrent = concurrent p.closeChan = make(chan bool, concurrent) p.bulkChan = make(chan []map[string]interface{}, concurrent) for i := 0; i < concurrent; i++ { p.wg.Add(1) go func() { for { select { case events := <-p.bulkChan: p.innerFlush(events) case <-p.closeChan: p.wg.Done() return } } }() } if v, ok := config["bulk_actions"]; ok { p.bulk_actions = v.(int) } else { p.bulk_actions = CLICKHOUSE_DEFAULT_BULK_ACTIONS } var flush_interval int if v, ok := config["flush_interval"]; ok { flush_interval = v.(int) } else { flush_interval = CLICKHOUSE_DEFAULT_FLUSH_INTERVAL } go func() { for range time.NewTicker(time.Second * time.Duration(flush_interval)).C { p.flush() } }() return p } // convert int and float fields to coresponding type func (c *ClickhouseOutput) convert(event map[string]interface{}) { for _, key := range c.transIntColumn { if keyIntValue, ok := event[key]; ok { if intConverterValue, err := cast.ToInt64E(keyIntValue); err == nil { event[key] = intConverterValue } else { glog.V(10).Infof("ch_output convert intType error: %s", err) event[key] = nil } } } for _, key := range c.transIntArrayColumn { if keyArrayValue, ok := event[key]; ok { arrayIntValue := keyArrayValue.([]interface{}) ints := make([]int64, len(arrayIntValue)) for i, v := range arrayIntValue { if v, err := cast.ToInt64E(v); err == nil { ints[i] = v } else { glog.V(10).Infof("ch_output convert arrayIntType error: %s", err) ints[i] = 0 } event[key] = ints } } } for _, key := range c.transFloatColumn { if keyFloatValue, ok := event[key]; ok { floatConverterValue, err := cast.ToFloat64E(keyFloatValue) if err == nil { event[key] = floatConverterValue } else { glog.V(10).Infof("ch_output convert floatType error: %s", err) event[key] = nil } } } } func (c *ClickhouseOutput) innerFlush(events []map[string]interface{}) { execution_id := atomic.AddUint64(&c.execution_id, 1) glog.Infof("write %d docs to clickhouse with execution_id %d", len(events), execution_id) for { nextdb := c.dbSelector.Next() /*** not ReduceWeight for now , so this should not happen if nextdb == nil { glog.Info("no available db, wait for 30s") time.Sleep(30 * time.Second) continue } ****/ tx, err := nextdb.(*sql.DB).Begin() if err != nil { glog.Errorf("db begin to create transaction error: %s", err) continue } defer tx.Rollback() stmt, err := tx.Prepare(c.query) if err != nil { glog.Errorf("transaction prepare statement error: %s", err) return } defer stmt.Close() for _, event := range events { if c.autoConvert { c.convert(event) } args := make([]interface{}, c.fieldsLength) for i, field := range c.fields { if v, ok := event[field]; ok && v != nil { args[i] = v } else { if vv, ok := c.defaultValue[field]; ok { args[i] = vv } else { // this should not happen args[i] = "" } } } if _, err := stmt.Exec(args...); err != nil { glog.Errorf("exec clickhouse insert %v error: %s", event, err) return } } if err := tx.Commit(); err != nil { glog.Errorf("exec clickhouse commit error: %s", err) return } glog.Infof("%d docs has been committed to clickhouse", len(events)) return } } func (c *ClickhouseOutput) flush() { c.mux.Lock() if len(c.events) > 0 { events := c.events c.events = make([]map[string]interface{}, 0, c.bulk_actions) c.bulkChan <- events } c.mux.Unlock() } // Emit appends event to c.events, and push to bulkChan if needed func (c *ClickhouseOutput) Emit(event map[string]interface{}) { c.mux.Lock() c.events = append(c.events, event) if len(c.events) < c.bulk_actions { c.mux.Unlock() return } events := c.events c.events = make([]map[string]interface{}, 0, c.bulk_actions) c.mux.Unlock() c.bulkChan <- events } func (c *ClickhouseOutput) awaitclose(timeout time.Duration) { exit := make(chan bool) defer func() { select { case <-exit: glog.Info("all clickhouse flush job done. return") return case <-time.After(timeout): glog.Info("clickhouse await timeout. return") return } }() defer func() { go func() { c.wg.Wait() exit <- true }() }() glog.Info("try to write remaining docs to clickhouse") c.mux.Lock() if len(c.events) <= 0 { glog.Info("no docs remain, return") c.mux.Unlock() } else { events := c.events c.events = make([]map[string]interface{}, 0, c.bulk_actions) c.mux.Unlock() glog.Infof("ramain %d docs, write them to clickhouse", len(events)) c.wg.Add(1) go func() { c.innerFlush(events) c.wg.Done() }() } glog.Info("check if there are events blocking in bulk channel") for { select { case events := <-c.bulkChan: c.wg.Add(1) go func() { c.innerFlush(events) c.wg.Done() }() default: return } } } // Shutdown would stop receiving message and emiting func (c *ClickhouseOutput) Shutdown() { for i := 0; i < c.concurrent; i++ { c.closeChan <- true } c.awaitclose(30 * time.Second) }
e := "defau
identifier_name
clickhouse_output.go
package output import ( "database/sql" "encoding/json" "fmt" "math/rand" "strconv" "strings" "sync" "sync/atomic" "time" clickhouse "github.com/ClickHouse/clickhouse-go" "github.com/childe/gohangout/topology" "github.com/golang/glog" "github.com/spf13/cast" ) const ( CLICKHOUSE_DEFAULT_BULK_ACTIONS = 1000 CLICKHOUSE_DEFAULT_FLUSH_INTERVAL = 30 ) type ClickhouseOutput struct { config map[interface{}]interface{} bulk_actions int hosts []string fields []string table string username string password string fieldsLength int query string desc map[string]*rowDesc defaultValue map[string]interface{} // columnName -> defaultValue bulkChan chan []map[string]interface{} concurrent int events []map[string]interface{} execution_id uint64 dbSelector HostSelector mux sync.Mutex wg sync.WaitGroup closeChan chan bool autoConvert bool transIntColumn []string transFloatColumn []string transIntArrayColumn []string } type rowDesc struct { Name string `json:"name"` Type string `json:"type"` DefaultType string `json:"default_type"` DefaultExpression string `json:"default_expression"` } func (c *ClickhouseOutput) setTableDesc() { c.desc = make(map[string]*rowDesc) query := fmt.Sprintf("desc table %s", c.table) glog.V(5).Info(query) for i := 0; i < c.dbSelector.Size(); i++ { nextdb := c.dbSelector.Next() db := nextdb.(*sql.DB) rows, err := db.Query(query) if err != nil { glog.Errorf("query %q error: %s", query, err) continue } defer rows.Close() columns, err := rows.Columns() if err != nil { glog.Fatalf("could not get columns from query `%s`: %s", query, err) } glog.V(10).Infof("desc table columns: %v", columns) descMap := make(map[string]string) for _, c := range columns { descMap[c] = "" } for rows.Next() { values := make([]interface{}, 0) for range columns { var a string values = append(values, &a) } if err := rows.Scan(values...); err != nil { glog.Fatalf("scan rows error: %s", err) } descMap := make(map[string]string) for i, c := range columns { value := *values[i].(*string) if c == "type" { // 特殊处理枚举类型 if strings.HasPrefix(value, "Enum16") { value = "Enum16" } else if strings.HasPrefix(value, "Enum8") { value = "Enum8" } } descMap[c] = value } b, err := json.Marshal(descMap) if err != nil { glog.Fatalf("marshal desc error: %s", err) } rowDesc := rowDesc{} err = json.Unmarshal(b, &rowDesc) if err != nil { glog.Fatalf("marshal desc error: %s", err) } glog.V(5).Infof("row desc: %#v", rowDesc) c.desc[rowDesc.Name] = &rowDesc } for key1, value1 := range c.desc { switch value1.Type { case "Int64", "UInt64", "Int32", "UInt32", "Int16", "UInt16", "Int8", "UInt8", "Nullable(Int64)", "Nullable(Int32)", "Nullable(Int16)", "Nullable(Int8)": c.transIntColumn = append(c.transIntColumn, key1) case "Array(Int64)", "Array(Int32)", "Array(Int16)", "Array(Int8)": c.transIntArrayColumn = append(c.transIntArrayColumn, key1) case "Float64", "Float32", "Nullable(Float32)", "Nullable(Float64)": c.transFloatColumn = append(c.transFloatColumn, key1) } } if len(c.fields) == 0 { for key1 := range c.desc { c.fields = append(c.fields, key1) } } return } } // TODO only string, number and ip DEFAULT expression is supported for now func (c *ClickhouseOutput) setColumnDefault() { c.setTableDesc() c.defaultValue = make(map[string]interface{}) var defaultValue *string for columnName, d := range c.desc { switch d.DefaultType { case "DEFAULT": defaultValue = &(d.DefaultExpression) case "MATERIALIZED": glog.Fatal("parse default value: MATERIALIZED expression not supported") case "ALIAS": glog.Fatal("parse default value: ALIAS expression not supported") case "": defaultValue = nil default: glog.Fatal("parse default value: only DEFAULT expression supported") } switch d.Type { case "String", "LowCardinality(String)": if defaultValue == nil { c.defaultValue[columnName] = "" } else { c.defaultValue[columnName] = *defaultValue } case "Date", "DateTime", "DateTime64": c.defaultValue[columnName] = time.Unix(0, 0) case "Nullable(Int64)", "Nullable(Int32)", "Nullable(Int16)", "Nullable(Int8)", "Nullable(Float32)", "Nullable(Float64)": c.defaultValue[columnName] = nil case "UInt8", "UInt16", "UInt32", "UInt64", "Int8", "Int16", "Int32", "Int64": if defaultValue == nil { c.defaultValue[columnName] = 0 } else { i, e := strconv.ParseInt(*defaultValue, 10, 64) if e == nil { c.defaultValue[columnName] = i } else { glog.Fatalf("parse default value `%v` error: %v", defaultValue, e) } } case "Float32", "Float64": if defaultValue == nil { c.defaultValue[columnName] = 0.0 } else { i, e := strconv.ParseFloat(*defaultValue, 64) if e == nil { c.defaultValue[columnName] = i } else { glog.Fatalf("parse default value `%v` error: %v", defaultValue, e) } } case "IPv4": c.defaultValue[columnName] = "0.0.0.0" case "IPv6": c.defaultValue[columnName] = "::" case "Array(String)", "Array(IPv4)", "Array(IPv6)", "Array(Date)", "Array(DateTime)": c.defaultValue[columnName] = clickhouse.Array([]string{}) case "Array(UInt8)": c.defaultValue[columnName] = clickhouse.Array([]uint8{}) case "Array(UInt16)": c.defaultValue[columnName] = clickhouse.Array([]uint16{}) case "Array(UInt32)": c.defaultValue[columnName] = clickhouse.Array([]uint32{}) case "Array(UInt64)": c.defaultValue[columnName] = clickhouse.Array([]uint64{}) case "Array(Int8)": c.defaultValue[columnName] = clickhouse.Array([]int8{}) case "Array(Int16)": c.defaultValue[columnName] = clickhouse.Array([]int16{}) case "Array(Int32)": c.defaultValue[columnName] = clickhouse.Array([]int32{}) case "Array(Int64)": c.defaultValue[columnName] = clickhouse.Array([]int64{}) case "Array(Float32)": c.defaultValue[columnName] = clickhouse.Array([]float32{}) case "Array(Float64)": c.defaultValue[columnName] = clickhouse.Array([]float64{}) case "Enum16": // 需要要求列声明的最小枚举值为 '' c.defaultValue[columnName] = "" case "Enum8": // 需要要求列声明的最小枚举值为 '' c.defaultValue[columnName] = "" default: glog.Errorf("column: %s, type: %s. unsupported column type, ignore.", columnName, d.Type) continue } } } func (c *ClickhouseOutput) getDatabase() string { dbAndTable := strings.Split(c.table, ".") dbName := "default" if len(dbAndTable) == 2 { dbName = dbAndTable[0] } return dbName } func init() { Register("Clickhouse", newClickhouseOutput) } func newClickhouseOutput(config map[interface{}]interface{}) topology.Output { rand.Seed(time.Now().UnixNano()) p := &ClickhouseOutput{ config: c
seOutput) convert(event map[string]interface{}) { for _, key := range c.transIntColumn { if keyIntValue, ok := event[key]; ok { if intConverterValue, err := cast.ToInt64E(keyIntValue); err == nil { event[key] = intConverterValue } else { glog.V(10).Infof("ch_output convert intType error: %s", err) event[key] = nil } } } for _, key := range c.transIntArrayColumn { if keyArrayValue, ok := event[key]; ok { arrayIntValue := keyArrayValue.([]interface{}) ints := make([]int64, len(arrayIntValue)) for i, v := range arrayIntValue { if v, err := cast.ToInt64E(v); err == nil { ints[i] = v } else { glog.V(10).Infof("ch_output convert arrayIntType error: %s", err) ints[i] = 0 } event[key] = ints } } } for _, key := range c.transFloatColumn { if keyFloatValue, ok := event[key]; ok { floatConverterValue, err := cast.ToFloat64E(keyFloatValue) if err == nil { event[key] = floatConverterValue } else { glog.V(10).Infof("ch_output convert floatType error: %s", err) event[key] = nil } } } } func (c *ClickhouseOutput) innerFlush(events []map[string]interface{}) { execution_id := atomic.AddUint64(&c.execution_id, 1) glog.Infof("write %d docs to clickhouse with execution_id %d", len(events), execution_id) for { nextdb := c.dbSelector.Next() /*** not ReduceWeight for now , so this should not happen if nextdb == nil { glog.Info("no available db, wait for 30s") time.Sleep(30 * time.Second) continue } ****/ tx, err := nextdb.(*sql.DB).Begin() if err != nil { glog.Errorf("db begin to create transaction error: %s", err) continue } defer tx.Rollback() stmt, err := tx.Prepare(c.query) if err != nil { glog.Errorf("transaction prepare statement error: %s", err) return } defer stmt.Close() for _, event := range events { if c.autoConvert { c.convert(event) } args := make([]interface{}, c.fieldsLength) for i, field := range c.fields { if v, ok := event[field]; ok && v != nil { args[i] = v } else { if vv, ok := c.defaultValue[field]; ok { args[i] = vv } else { // this should not happen args[i] = "" } } } if _, err := stmt.Exec(args...); err != nil { glog.Errorf("exec clickhouse insert %v error: %s", event, err) return } } if err := tx.Commit(); err != nil { glog.Errorf("exec clickhouse commit error: %s", err) return } glog.Infof("%d docs has been committed to clickhouse", len(events)) return } } func (c *ClickhouseOutput) flush() { c.mux.Lock() if len(c.events) > 0 { events := c.events c.events = make([]map[string]interface{}, 0, c.bulk_actions) c.bulkChan <- events } c.mux.Unlock() } // Emit appends event to c.events, and push to bulkChan if needed func (c *ClickhouseOutput) Emit(event map[string]interface{}) { c.mux.Lock() c.events = append(c.events, event) if len(c.events) < c.bulk_actions { c.mux.Unlock() return } events := c.events c.events = make([]map[string]interface{}, 0, c.bulk_actions) c.mux.Unlock() c.bulkChan <- events } func (c *ClickhouseOutput) awaitclose(timeout time.Duration) { exit := make(chan bool) defer func() { select { case <-exit: glog.Info("all clickhouse flush job done. return") return case <-time.After(timeout): glog.Info("clickhouse await timeout. return") return } }() defer func() { go func() { c.wg.Wait() exit <- true }() }() glog.Info("try to write remaining docs to clickhouse") c.mux.Lock() if len(c.events) <= 0 { glog.Info("no docs remain, return") c.mux.Unlock() } else { events := c.events c.events = make([]map[string]interface{}, 0, c.bulk_actions) c.mux.Unlock() glog.Infof("ramain %d docs, write them to clickhouse", len(events)) c.wg.Add(1) go func() { c.innerFlush(events) c.wg.Done() }() } glog.Info("check if there are events blocking in bulk channel") for { select { case events := <-c.bulkChan: c.wg.Add(1) go func() { c.innerFlush(events) c.wg.Done() }() default: return } } } // Shutdown would stop receiving message and emiting func (c *ClickhouseOutput) Shutdown() { for i := 0; i < c.concurrent; i++ { c.closeChan <- true } c.awaitclose(30 * time.Second) }
onfig, } if v, ok := config["fields"]; ok { for _, f := range v.([]interface{}) { p.fields = append(p.fields, f.(string)) } } if v, ok := config["auto_convert"]; ok { p.autoConvert = v.(bool) } else { p.autoConvert = true } if v, ok := config["table"]; ok { p.table = v.(string) } else { glog.Fatalf("table must be set in clickhouse output") } if v, ok := config["hosts"]; ok { for _, h := range v.([]interface{}) { p.hosts = append(p.hosts, h.(string)) } } else { glog.Fatalf("hosts must be set in clickhouse output") } if v, ok := config["username"]; ok { p.username = v.(string) } if v, ok := config["password"]; ok { p.password = v.(string) } debug := false if v, ok := config["debug"]; ok { debug = v.(bool) } connMaxLifetime := 0 if v, ok := config["conn_max_life_time"]; ok { connMaxLifetime = v.(int) } dbs := make([]*sql.DB, 0) for _, host := range p.hosts { dataSourceName := fmt.Sprintf("%s?database=%s&username=%s&password=%s&debug=%v", host, p.getDatabase(), p.username, p.password, debug) if db, err := sql.Open("clickhouse", dataSourceName); err == nil { if err := db.Ping(); err != nil { if exception, ok := err.(*clickhouse.Exception); ok { glog.Errorf("[%d] %s \n%s\n", exception.Code, exception.Message, exception.StackTrace) } else { glog.Errorf("clickhouse ping error: %s", err) } } else { db.SetConnMaxLifetime(time.Second * time.Duration(connMaxLifetime)) dbs = append(dbs, db) } } else { glog.Errorf("open %s error: %s", host, err) } } glog.V(5).Infof("%d available clickhouse hosts", len(dbs)) if len(dbs) == 0 { glog.Fatal("no available host") } dbsI := make([]interface{}, len(dbs)) for i, h := range dbs { dbsI[i] = h } p.dbSelector = NewRRHostSelector(dbsI, 3) p.setColumnDefault() if len(p.fields) <= 0 { glog.Fatalf("fields not set in clickhouse output and could get fields from clickhouse table") } p.fieldsLength = len(p.fields) fields := make([]string, p.fieldsLength) for i := range fields { fields[i] = fmt.Sprintf(`"%s"`, p.fields[i]) } questionMarks := make([]string, p.fieldsLength) for i := 0; i < p.fieldsLength; i++ { questionMarks[i] = "?" } p.query = fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", p.table, strings.Join(fields, ","), strings.Join(questionMarks, ",")) glog.V(5).Infof("query: %s", p.query) concurrent := 1 if v, ok := config["concurrent"]; ok { concurrent = v.(int) } p.concurrent = concurrent p.closeChan = make(chan bool, concurrent) p.bulkChan = make(chan []map[string]interface{}, concurrent) for i := 0; i < concurrent; i++ { p.wg.Add(1) go func() { for { select { case events := <-p.bulkChan: p.innerFlush(events) case <-p.closeChan: p.wg.Done() return } } }() } if v, ok := config["bulk_actions"]; ok { p.bulk_actions = v.(int) } else { p.bulk_actions = CLICKHOUSE_DEFAULT_BULK_ACTIONS } var flush_interval int if v, ok := config["flush_interval"]; ok { flush_interval = v.(int) } else { flush_interval = CLICKHOUSE_DEFAULT_FLUSH_INTERVAL } go func() { for range time.NewTicker(time.Second * time.Duration(flush_interval)).C { p.flush() } }() return p } // convert int and float fields to coresponding type func (c *Clickhou
identifier_body
clickhouse_output.go
package output import ( "database/sql" "encoding/json" "fmt" "math/rand" "strconv" "strings" "sync" "sync/atomic" "time" clickhouse "github.com/ClickHouse/clickhouse-go" "github.com/childe/gohangout/topology" "github.com/golang/glog" "github.com/spf13/cast" ) const ( CLICKHOUSE_DEFAULT_BULK_ACTIONS = 1000 CLICKHOUSE_DEFAULT_FLUSH_INTERVAL = 30 ) type ClickhouseOutput struct { config map[interface{}]interface{} bulk_actions int hosts []string fields []string table string username string password string fieldsLength int query string desc map[string]*rowDesc defaultValue map[string]interface{} // columnName -> defaultValue bulkChan chan []map[string]interface{} concurrent int events []map[string]interface{} execution_id uint64 dbSelector HostSelector mux sync.Mutex wg sync.WaitGroup closeChan chan bool autoConvert bool transIntColumn []string transFloatColumn []string transIntArrayColumn []string } type rowDesc struct { Name string `json:"name"` Type string `json:"type"` DefaultType string `json:"default_type"` DefaultExpression string `json:"default_expression"` } func (c *ClickhouseOutput) setTableDesc() { c.desc = make(map[string]*rowDesc) query := fmt.Sprintf("desc table %s", c.table) glog.V(5).Info(query) for i := 0; i < c.dbSelector.Size(); i++ { nextdb := c.dbSelector.Next() db := nextdb.(*sql.DB) rows, err := db.Query(query) if err != nil { glog.Errorf("query %q error: %s", query, err) continue } defer rows.Close() columns, err := rows.Columns() if err != nil { glog.Fatalf("could not get columns from query `%s`: %s", query, err) } glog.V(10).Infof("desc table columns: %v", columns) descMap := make(map[string]string) for _, c := range columns { descMap[c] = "" } for rows.Next() { values := make([]interface{}, 0) for range columns { var a string values = append(values, &a) } if err := rows.Scan(values...); err != nil { glog.Fatalf("scan rows error: %s", err) } descMap := make(map[string]string) for i, c := range columns { value := *values[i].(*string) if c == "type" { // 特殊处理枚举类型 if strings.HasPrefix(value, "Enum16") { value = "Enum16" } else if strings.HasPrefix(value, "Enum8") { value = "Enum8" } } descMap[c] = value }
} rowDesc := rowDesc{} err = json.Unmarshal(b, &rowDesc) if err != nil { glog.Fatalf("marshal desc error: %s", err) } glog.V(5).Infof("row desc: %#v", rowDesc) c.desc[rowDesc.Name] = &rowDesc } for key1, value1 := range c.desc { switch value1.Type { case "Int64", "UInt64", "Int32", "UInt32", "Int16", "UInt16", "Int8", "UInt8", "Nullable(Int64)", "Nullable(Int32)", "Nullable(Int16)", "Nullable(Int8)": c.transIntColumn = append(c.transIntColumn, key1) case "Array(Int64)", "Array(Int32)", "Array(Int16)", "Array(Int8)": c.transIntArrayColumn = append(c.transIntArrayColumn, key1) case "Float64", "Float32", "Nullable(Float32)", "Nullable(Float64)": c.transFloatColumn = append(c.transFloatColumn, key1) } } if len(c.fields) == 0 { for key1 := range c.desc { c.fields = append(c.fields, key1) } } return } } // TODO only string, number and ip DEFAULT expression is supported for now func (c *ClickhouseOutput) setColumnDefault() { c.setTableDesc() c.defaultValue = make(map[string]interface{}) var defaultValue *string for columnName, d := range c.desc { switch d.DefaultType { case "DEFAULT": defaultValue = &(d.DefaultExpression) case "MATERIALIZED": glog.Fatal("parse default value: MATERIALIZED expression not supported") case "ALIAS": glog.Fatal("parse default value: ALIAS expression not supported") case "": defaultValue = nil default: glog.Fatal("parse default value: only DEFAULT expression supported") } switch d.Type { case "String", "LowCardinality(String)": if defaultValue == nil { c.defaultValue[columnName] = "" } else { c.defaultValue[columnName] = *defaultValue } case "Date", "DateTime", "DateTime64": c.defaultValue[columnName] = time.Unix(0, 0) case "Nullable(Int64)", "Nullable(Int32)", "Nullable(Int16)", "Nullable(Int8)", "Nullable(Float32)", "Nullable(Float64)": c.defaultValue[columnName] = nil case "UInt8", "UInt16", "UInt32", "UInt64", "Int8", "Int16", "Int32", "Int64": if defaultValue == nil { c.defaultValue[columnName] = 0 } else { i, e := strconv.ParseInt(*defaultValue, 10, 64) if e == nil { c.defaultValue[columnName] = i } else { glog.Fatalf("parse default value `%v` error: %v", defaultValue, e) } } case "Float32", "Float64": if defaultValue == nil { c.defaultValue[columnName] = 0.0 } else { i, e := strconv.ParseFloat(*defaultValue, 64) if e == nil { c.defaultValue[columnName] = i } else { glog.Fatalf("parse default value `%v` error: %v", defaultValue, e) } } case "IPv4": c.defaultValue[columnName] = "0.0.0.0" case "IPv6": c.defaultValue[columnName] = "::" case "Array(String)", "Array(IPv4)", "Array(IPv6)", "Array(Date)", "Array(DateTime)": c.defaultValue[columnName] = clickhouse.Array([]string{}) case "Array(UInt8)": c.defaultValue[columnName] = clickhouse.Array([]uint8{}) case "Array(UInt16)": c.defaultValue[columnName] = clickhouse.Array([]uint16{}) case "Array(UInt32)": c.defaultValue[columnName] = clickhouse.Array([]uint32{}) case "Array(UInt64)": c.defaultValue[columnName] = clickhouse.Array([]uint64{}) case "Array(Int8)": c.defaultValue[columnName] = clickhouse.Array([]int8{}) case "Array(Int16)": c.defaultValue[columnName] = clickhouse.Array([]int16{}) case "Array(Int32)": c.defaultValue[columnName] = clickhouse.Array([]int32{}) case "Array(Int64)": c.defaultValue[columnName] = clickhouse.Array([]int64{}) case "Array(Float32)": c.defaultValue[columnName] = clickhouse.Array([]float32{}) case "Array(Float64)": c.defaultValue[columnName] = clickhouse.Array([]float64{}) case "Enum16": // 需要要求列声明的最小枚举值为 '' c.defaultValue[columnName] = "" case "Enum8": // 需要要求列声明的最小枚举值为 '' c.defaultValue[columnName] = "" default: glog.Errorf("column: %s, type: %s. unsupported column type, ignore.", columnName, d.Type) continue } } } func (c *ClickhouseOutput) getDatabase() string { dbAndTable := strings.Split(c.table, ".") dbName := "default" if len(dbAndTable) == 2 { dbName = dbAndTable[0] } return dbName } func init() { Register("Clickhouse", newClickhouseOutput) } func newClickhouseOutput(config map[interface{}]interface{}) topology.Output { rand.Seed(time.Now().UnixNano()) p := &ClickhouseOutput{ config: config, } if v, ok := config["fields"]; ok { for _, f := range v.([]interface{}) { p.fields = append(p.fields, f.(string)) } } if v, ok := config["auto_convert"]; ok { p.autoConvert = v.(bool) } else { p.autoConvert = true } if v, ok := config["table"]; ok { p.table = v.(string) } else { glog.Fatalf("table must be set in clickhouse output") } if v, ok := config["hosts"]; ok { for _, h := range v.([]interface{}) { p.hosts = append(p.hosts, h.(string)) } } else { glog.Fatalf("hosts must be set in clickhouse output") } if v, ok := config["username"]; ok { p.username = v.(string) } if v, ok := config["password"]; ok { p.password = v.(string) } debug := false if v, ok := config["debug"]; ok { debug = v.(bool) } connMaxLifetime := 0 if v, ok := config["conn_max_life_time"]; ok { connMaxLifetime = v.(int) } dbs := make([]*sql.DB, 0) for _, host := range p.hosts { dataSourceName := fmt.Sprintf("%s?database=%s&username=%s&password=%s&debug=%v", host, p.getDatabase(), p.username, p.password, debug) if db, err := sql.Open("clickhouse", dataSourceName); err == nil { if err := db.Ping(); err != nil { if exception, ok := err.(*clickhouse.Exception); ok { glog.Errorf("[%d] %s \n%s\n", exception.Code, exception.Message, exception.StackTrace) } else { glog.Errorf("clickhouse ping error: %s", err) } } else { db.SetConnMaxLifetime(time.Second * time.Duration(connMaxLifetime)) dbs = append(dbs, db) } } else { glog.Errorf("open %s error: %s", host, err) } } glog.V(5).Infof("%d available clickhouse hosts", len(dbs)) if len(dbs) == 0 { glog.Fatal("no available host") } dbsI := make([]interface{}, len(dbs)) for i, h := range dbs { dbsI[i] = h } p.dbSelector = NewRRHostSelector(dbsI, 3) p.setColumnDefault() if len(p.fields) <= 0 { glog.Fatalf("fields not set in clickhouse output and could get fields from clickhouse table") } p.fieldsLength = len(p.fields) fields := make([]string, p.fieldsLength) for i := range fields { fields[i] = fmt.Sprintf(`"%s"`, p.fields[i]) } questionMarks := make([]string, p.fieldsLength) for i := 0; i < p.fieldsLength; i++ { questionMarks[i] = "?" } p.query = fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", p.table, strings.Join(fields, ","), strings.Join(questionMarks, ",")) glog.V(5).Infof("query: %s", p.query) concurrent := 1 if v, ok := config["concurrent"]; ok { concurrent = v.(int) } p.concurrent = concurrent p.closeChan = make(chan bool, concurrent) p.bulkChan = make(chan []map[string]interface{}, concurrent) for i := 0; i < concurrent; i++ { p.wg.Add(1) go func() { for { select { case events := <-p.bulkChan: p.innerFlush(events) case <-p.closeChan: p.wg.Done() return } } }() } if v, ok := config["bulk_actions"]; ok { p.bulk_actions = v.(int) } else { p.bulk_actions = CLICKHOUSE_DEFAULT_BULK_ACTIONS } var flush_interval int if v, ok := config["flush_interval"]; ok { flush_interval = v.(int) } else { flush_interval = CLICKHOUSE_DEFAULT_FLUSH_INTERVAL } go func() { for range time.NewTicker(time.Second * time.Duration(flush_interval)).C { p.flush() } }() return p } // convert int and float fields to coresponding type func (c *ClickhouseOutput) convert(event map[string]interface{}) { for _, key := range c.transIntColumn { if keyIntValue, ok := event[key]; ok { if intConverterValue, err := cast.ToInt64E(keyIntValue); err == nil { event[key] = intConverterValue } else { glog.V(10).Infof("ch_output convert intType error: %s", err) event[key] = nil } } } for _, key := range c.transIntArrayColumn { if keyArrayValue, ok := event[key]; ok { arrayIntValue := keyArrayValue.([]interface{}) ints := make([]int64, len(arrayIntValue)) for i, v := range arrayIntValue { if v, err := cast.ToInt64E(v); err == nil { ints[i] = v } else { glog.V(10).Infof("ch_output convert arrayIntType error: %s", err) ints[i] = 0 } event[key] = ints } } } for _, key := range c.transFloatColumn { if keyFloatValue, ok := event[key]; ok { floatConverterValue, err := cast.ToFloat64E(keyFloatValue) if err == nil { event[key] = floatConverterValue } else { glog.V(10).Infof("ch_output convert floatType error: %s", err) event[key] = nil } } } } func (c *ClickhouseOutput) innerFlush(events []map[string]interface{}) { execution_id := atomic.AddUint64(&c.execution_id, 1) glog.Infof("write %d docs to clickhouse with execution_id %d", len(events), execution_id) for { nextdb := c.dbSelector.Next() /*** not ReduceWeight for now , so this should not happen if nextdb == nil { glog.Info("no available db, wait for 30s") time.Sleep(30 * time.Second) continue } ****/ tx, err := nextdb.(*sql.DB).Begin() if err != nil { glog.Errorf("db begin to create transaction error: %s", err) continue } defer tx.Rollback() stmt, err := tx.Prepare(c.query) if err != nil { glog.Errorf("transaction prepare statement error: %s", err) return } defer stmt.Close() for _, event := range events { if c.autoConvert { c.convert(event) } args := make([]interface{}, c.fieldsLength) for i, field := range c.fields { if v, ok := event[field]; ok && v != nil { args[i] = v } else { if vv, ok := c.defaultValue[field]; ok { args[i] = vv } else { // this should not happen args[i] = "" } } } if _, err := stmt.Exec(args...); err != nil { glog.Errorf("exec clickhouse insert %v error: %s", event, err) return } } if err := tx.Commit(); err != nil { glog.Errorf("exec clickhouse commit error: %s", err) return } glog.Infof("%d docs has been committed to clickhouse", len(events)) return } } func (c *ClickhouseOutput) flush() { c.mux.Lock() if len(c.events) > 0 { events := c.events c.events = make([]map[string]interface{}, 0, c.bulk_actions) c.bulkChan <- events } c.mux.Unlock() } // Emit appends event to c.events, and push to bulkChan if needed func (c *ClickhouseOutput) Emit(event map[string]interface{}) { c.mux.Lock() c.events = append(c.events, event) if len(c.events) < c.bulk_actions { c.mux.Unlock() return } events := c.events c.events = make([]map[string]interface{}, 0, c.bulk_actions) c.mux.Unlock() c.bulkChan <- events } func (c *ClickhouseOutput) awaitclose(timeout time.Duration) { exit := make(chan bool) defer func() { select { case <-exit: glog.Info("all clickhouse flush job done. return") return case <-time.After(timeout): glog.Info("clickhouse await timeout. return") return } }() defer func() { go func() { c.wg.Wait() exit <- true }() }() glog.Info("try to write remaining docs to clickhouse") c.mux.Lock() if len(c.events) <= 0 { glog.Info("no docs remain, return") c.mux.Unlock() } else { events := c.events c.events = make([]map[string]interface{}, 0, c.bulk_actions) c.mux.Unlock() glog.Infof("ramain %d docs, write them to clickhouse", len(events)) c.wg.Add(1) go func() { c.innerFlush(events) c.wg.Done() }() } glog.Info("check if there are events blocking in bulk channel") for { select { case events := <-c.bulkChan: c.wg.Add(1) go func() { c.innerFlush(events) c.wg.Done() }() default: return } } } // Shutdown would stop receiving message and emiting func (c *ClickhouseOutput) Shutdown() { for i := 0; i < c.concurrent; i++ { c.closeChan <- true } c.awaitclose(30 * time.Second) }
b, err := json.Marshal(descMap) if err != nil { glog.Fatalf("marshal desc error: %s", err)
random_line_split
privacy-statement.js
import Layout from "../components/layout" import Metadata from "../components/metadata" import React from "react" const PrivacyStatement = () => ( <Layout> <Metadata title="Privacy Statement" /> <section> <div className="row"> <div className="col-xs-12 col-sm-9 col-md-9 col-lg-9"> <h1 className="title">Privacy Statement</h1> <p> This privacy policy applies between you, the User of this Website and Jugendstil Ltd, the owner and provider of this Website. Jugendstil Ltd takes the privacy of your information very seriously. This privacy policy applies to our use of any and all Data collected by us or provided by you in relation to your use of the Website. </p> <p>Please read this privacy policy carefully.</p> <h2>Definitions and interpretation</h2> <p>In this privacy policy, the following definitions are used:</p> <table> <tbody> <tr> <th scope="row">Data</th> <td> collectively all information that you submit to Jugendstil Ltd via the Website. This definition incorporates, where applicable, the definitions provided in the Data Protection Laws; </td> </tr> <tr> <th scope="row">Cookies</th> <td> a small text file placed on your computer by this Website when you visit certain parts of the Website and/or when you use certain features of the Website. Details of the cookies used
<td> any applicable law relating to the processing of personal Data, including but not limited to the Directive 96/46/EC (Data Protection Directive) or the GDPR, and any national implementing laws, regulations and secondary legislation, for as long as the GDPR is effective in the UK; </td> </tr> <tr> <th scope="row">GDPR</th> <td>the General Data Protection Regulation (EU) 2016/679;</td> </tr> <tr> <th scope="row">Jugendstil Ltd, we or us</th> <td> Jugendstil Ltd, a company incorporated in England and Wales with registered number 11226642 whose registered office is at 20-22 Wenlock Road, N1 7GU London; </td> </tr> <tr> <th scope="row">UK and EU Cookie Law</th> <td> the Privacy and Electronic Communications (EC Directive) Regulations 2003 as amended by the Privacy and Electronic Communications (EC Directive) (Amendment) Regulations 2011; </td> </tr> <tr> <th scope="row">User or you</th> <td> any third party that accesses the Website and is not either (i) employed by Jugendstil Ltd and acting in the course of their employment or (ii) engaged as a consultant or otherwise providing services to Jugendstil Ltd and accessing the Website in connection with the provision of such services; and </td> </tr> <tr> <th scope="row">Website</th> <td> the website that you are currently using, https://jugendstil.io, and any sub-domains of this site unless expressly excluded by their own terms and conditions. </td> </tr> </tbody> </table> <p> In this privacy policy, unless the context requires a different interpretation: </p> <ol> <li>the singular includes the plural and vice versa;</li> <li> references to sub-clauses, clauses, schedules or appendices are to sub-clauses, clauses, schedules or appendices of this privacy policy; </li> <li> a reference to a person includes firms, companies, government entities, trusts and partnerships; </li> <li> "including" is understood to mean "including without limitation"; </li> <li> reference to any statutory provision includes any modification or amendment of it; </li> <li> the headings and sub-headings do not form part of this privacy policy. </li> </ol> <h2>Scope of this privacy policy</h2> <p> This privacy policy applies only to the actions of Jugendstil Ltd and Users with respect to this Website. It does not extend to any websites that can be accessed from this Website including, but not limited to, any links we may provide to social media websites. </p> <p> For purposes of the applicable Data Protection Laws, Jugendstil Ltd is the "data controller". This means that Jugendstil Ltd determines the purposes for which, and the manner in which, your Data is processed. </p> <h2>Data collected</h2> <p> We may collect the following Data, which includes personal Data, from you: </p> <ol> <li>name;</li> <li>date of birth;</li> <li>gender;</li> <li>job title;</li> <li>profession;</li> <li> contact Information such as email addresses and telephone numbers; </li> <li> demographic information such as postcode, preferences and interests; </li> <li>financial information such as credit / debit card numbers;</li> <li>IP address (automatically collected);</li> <li>web browser type and version (automatically collected);</li> <li>operating system (automatically collected);</li> <li> a list of URLs starting with a referring site, your activity on this Website, and the site you exit to (automatically collected); </li> </ol> <p>in each case, in accordance with this privacy policy.</p> <h2>How we collect Data</h2> <p>We collect Data in the following ways:</p> <ol> <li>data is given to us by you; and</li> <li>data is collected automatically.</li> </ol> <h2>Data that is given to us by you</h2> <p> Jugendstil Ltd will collect your Data in a number of ways, for example: </p> <ol> <li> when you contact us through the Website, by telephone, post, e-mail or through any other means; </li> <li> when you register with us and set up an account to receive our products/services; </li> <li> when you complete surveys that we use for research purposes (although you are not obliged to respond to them); </li> <li> when you enter a competition or promotion through a social media channel; </li> <li> when you make payments to us, through this Website or otherwise; </li> <li>when you elect to receive marketing communications from us;</li> <li>when you use our services;</li> </ol> <p>in each case, in accordance with this privacy policy.</p> <h2>Data that is collected automatically</h2> <p> To the extent that you access the Website, we will collect your Data automatically, for example: </p> <ol> <li> we automatically collect some information about your visit to the Website. This information helps us to make improvements to Website content and navigation, and includes your IP address, the date, times and frequency with which you access the Website and the way you use and interact with its content. </li> <li> we will collect your Data automatically via cookies, in line with the cookie settings on your browser. For more information about cookies, and how we use them on the Website, see the section below, headed "Cookies". </li> </ol> <h2>Our use of Data</h2> <p> Any or all of the above Data may be required by us from time to time in order to provide you with the best possible service and experience when using our Website. Specifically, Data may be used by us for the following reasons: </p> <ol> <li>internal record keeping;</li> <li>improvement of our products / services;</li> <li> transmission by email of marketing materials that may be of interest to you; </li> <li> contact for market research purposes which may be done using email, telephone, fax or mail. Such information may be used to customise or update the Website; </li> </ol> <p>in each case, in accordance with this privacy policy.</p> <p> We may use your Data for the above purposes if we deem it necessary to do so for our legitimate interests. If you are not satisfied with this, you have the right to object in certain circumstances (see the section headed "Your rights" below). </p> p> <p> For the delivery of direct marketing to you via e-mail, we'll need your consent, whether via an opt-in or soft-opt-in: </p> <ol> <li> soft opt-in consent is a specific type of consent which applies when you have previously engaged with us (for example, you contact us to ask us for more details about a particular product/service, and we are marketing similar products/services). Under "soft opt-in" consent, we will take your consent as given unless you opt-out. </li> <li> for other types of e-marketing, we are required to obtain your explicit consent; that is, you need to take positive and affirmative action when consenting by, for example, checking a tick box that we'll provide. </li> <li> if you are not satisfied about our approach to marketing, you have the right to withdraw consent at any time. To find out how to withdraw your consent, see the section headed "Your rights" below. </li> </ol> <p> When you register with us and set up an account to receive our services, the legal basis for this processing is the performance of a contract between you and us and/or taking steps, at your request, to enter into such a contract. </p> <h2>Keeping Data secure</h2> <p> We will use technical and organisational measures to safeguard your Data, for example: </p> <ol> <li> access to your account is controlled by a password and a user name that is unique to you. </li> <li>we store your Data on secure servers.</li> <li> payment details are encrypted using SSL technology (typically you will see a lock icon or green address bar (or both) in your browser when we use this technology. </li> </ol> <p> Technical and organisational measures include measures to deal with any suspected data breach. If you suspect any misuse or loss or unauthorised access to your Data, please let us know immediately by contacting us via this e-mail address: [email protected]. </p> <p> If you want detailed information from Get Safe Online on how to protect your information and your computers and devices against fraud, identity theft, viruses and many other online problems, please visit www.getsafeonline.org. Get Safe Online is supported by HM Government and leading businesses. </p> <h2>Data retention</h2> <p> Unless a longer retention period is required or permitted by law, we will only hold your Data on our systems for the period necessary to fulfil the purposes outlined in this privacy policy or until you request that the Data be deleted. </p> <p> Even if we delete your Data, it may persist on backup or archival media for legal, tax or regulatory purposes. </p> <h2>Your rights</h2> <p>You have the following rights in relation to your Data:</p> <ol> <li> <strong>Right to access</strong> - the right to request (i) copies of the information we hold about you at any time, or (ii) that we modify, update or delete such information. If we provide you with access to the information we hold about you, we will not charge you for this, unless your request is "manifestly unfounded or excessive." Where we are legally permitted to do so, we may refuse your request. If we refuse your request, we will tell you the reasons why. </li> <li> <strong>Right to correct</strong> - the right to have your Data rectified if it is inaccurate or incomplete. </li> <li> <strong>Right to erase</strong> - the right to request that we delete or remove your Data from our systems. </li> <li> <strong>Right to restrict our use of your Data</strong> - the right to "block" us from using your Data or limit the way in which we can use it. </li> <li> <strong>Right to data portability</strong> - the right to request that we move, copy or transfer your Data. </li> <li> <strong>Right to object</strong>- the right to object to our use of your Data including where we use it for our legitimate interests. </li> </ol> <p> To make enquiries, exercise any of your rights set out above, or withdraw your consent to the processing of your Data (where consent is our legal basis for processing your Data), please contact us via this e-mail address: [email protected]. </p> <p> If you are not satisfied with the way a complaint you make in relation to your Data is handled by us, you may be able to refer your complaint to the relevant data protection authority. For the UK, this is the Information Commissioner's Office (ICO). The ICO's contact details can be found on their website at https://ico.org.uk/. </p> <p> It is important that the Data we hold about you is accurate and current. Please keep us informed if your Data changes during the period for which we hold it. </p> <h2>Transfers outside the European Economic Area</h2> <p> Data which we collect from you may be stored and processed in and transferred to countries outside of the European Economic Area (EEA). For example, this could occur if our servers are located in a country outside the EEA or one of our service providers is situated in a country outside the EEA. We also share information with our group companies, some of which are located outside the EEA. </p> <p> We will only transfer Data outside the EEA where it is compliant with data protection legislation and the means of transfer provides adequate safeguards in relation to your data, eg by way of data transfer agreement, incorporating the current standard contractual clauses adopted by the European Commission, or by signing up to the EU-US Privacy Shield Framework, in the event that the organisation in receipt of the Data is based in the United States of America. </p> <p> To ensure that your Data receives an adequate level of protection, we have put in place appropriate safeguards and procedures with the third parties we share your Data with. This ensures your Data is treated by those third parties in a way that is consistent with the Data Protection Laws. </p> <h2>Links to other websites</h2> <p> This Website may, from time to time, provide links to other websites. We have no control over such websites and are not responsible for the content of these websites. This privacy policy does not extend to your use of such websites. You are advised to read the privacy policy or statement of other websites prior to using them. </p> <h2>Changes of business ownership and control</h2> <p> Jugendstil Ltd may, from time to time, expand or reduce our business and this may involve the sale and/or the transfer of control of all or part of Jugendstil Ltd. Data provided by Users will, where it is relevant to any part of our business so transferred, be transferred along with that part and the new owner or newly controlling party will, under the terms of this privacy policy, be permitted to use the Data for the purposes for which it was originally supplied to us. </p> <p> We may also disclose Data to a prospective purchaser of our business or any part of it. </p> <p> In the above instances, we will take steps with the aim of ensuring your privacy is protected. </p> <h2>Cookies</h2> <p> This Website may place and access certain Cookies on your computer. Jugendstil Ltd uses Cookies to improve your experience of using the Website and to improve our range of products and services. Jugendstil Ltd has carefully chosen these Cookies and has taken steps to ensure that your privacy is protected and respected at all times. </p> <p> All Cookies used by this Website are used in accordance with current UK and EU Cookie Law. </p> <p> Before the Website places Cookies on your computer, you will be presented with a message bar requesting your consent to set those Cookies. By giving your consent to the placing of Cookies, you are enabling Jugendstil Ltd to provide a better experience and service to you. You may, if you wish, deny consent to the placing of Cookies; however certain features of the Website may not function fully or as intended. </p> <p>This Website may place the following Cookies:</p> <table> <thead> <tr> <th>Type of Cookie</th> <th>Purpose</th> </tr> </thead> <tbody> <tr> <td>Strictly necessary cookies</td> <td> These are cookies that are required for the operation of our website. They include, for example, cookies that enable you to log into secure areas of our website, use a shopping cart or make use of e-billing services. </td> </tr> <tr> <td>Analytical/ performance cookies</td> <td> They allow us to recognise and count the number of visitors and to see how visitors move around our website when they are using it. This helps us to improve the way our website works, for example, by ensuring that users are finding what they are looking for easily. </td> </tr> </tbody> </table> <p> You can find a list of Cookies that we use in the Cookies Schedule. </p> <p> You can choose to enable or disable Cookies in your internet browser. By default, most internet browsers accept Cookies but this can be changed. For further details, please consult the help menu in your internet browser. </p> <p> You can choose to delete Cookies at any time; however you may lose any information that enables you to access the Website more quickly and efficiently including, but not limited to, personalisation settings. </p> <p> It is recommended that you ensure that your internet browser is up-to-date and that you consult the help and guidance provided by the developer of your internet browser if you are unsure about adjusting your privacy settings. </p> <p> For more information generally on cookies, including how to disable them, please refer to aboutcookies.org. You will also find details on how to delete cookies from your computer. </p> <h2>General</h2> <p> You may not transfer any of your rights under this privacy policy to any other person. We may transfer our rights under this privacy policy where we reasonably believe your rights will not be affected. </p> <p> If any court or competent authority finds that any provision of this privacy policy (or part of any provision) is invalid, illegal or unenforceable, that provision or part-provision will, to the extent required, be deemed to be deleted, and the validity and enforceability of the other provisions of this privacy policy will not be affected. </p> <p> Unless otherwise agreed, no delay, act or omission by a party in exercising any right or remedy will be deemed a waiver of that, or any other, right or remedy. </p> <p> This Agreement will be governed by and interpreted according to the law of England and Wales. All disputes arising under the Agreement will be subject to the exclusive jurisdiction of the English and Welsh courts. </p> <h2>Changes to this privacy policy</h2> <p> Jugendstil Ltd reserves the right to change this privacy policy as we may deem necessary from time to time or as may be required by law. Any changes will be immediately posted on the Website and you are deemed to have accepted the terms of the privacy policy on your first use of the Website following the alterations. You may contact Jugendstil Ltd by email at [email protected]. </p> <h2>Cookies Schedule</h2> <p> Below is a list of the cookies that we use. We have tried to ensure this is complete and up to date, but if you think that we have missed a cookie or there is any discrepancy, please let us know. </p> <p>We use the following strictly necessary cookies:</p> <table> <thead> <tr> <th>Cookie Name</th> <th>Purpose</th> </tr> </thead> <tbody> <tr> <td>Essential cookies</td> <td> For improving the user’s experience and navigation of the website. </td> </tr> </tbody> </table> <p>We use the following analytical/performance cookies:</p> <table> <thead> <tr> <th>Cookie Name</th> <th>Purpose</th> </tr> </thead> <tbody> <tr> <td>Google Analytics</td> <td> For monitoring traffic to our website. Knowing how many people have visited and from where can help us build a better site. </td> </tr> <tr> <td>Google Tag Manager</td> <td> For capturing whether a visitor has come through to the website via an advertisement on a social media platform. </td> </tr> <tr> <td>LinkedIn</td> <td> For campaign reporting, tracking conversions and monitoring traffic to the website. </td> </tr> </tbody> </table> <p> This privacy policy was created using a document from Rocket Lawyer (https://www.rocketlawyer.com/gb/en). </p> <strong>30 January, 2020</strong> </div> </div> </section> </Layout> ) export default PrivacyStatement
by this Website are set out in the clause below (Cookies); </td> </tr> <tr> <th scope="row">Data Protection Laws</th>
random_line_split
lib.rs
// Copyright 2021-2023 Vector 35 Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use binaryninja::{ binaryview::{BinaryView, BinaryViewExt}, command::{register, Command}, disassembly::{DisassemblyTextLine, InstructionTextToken, InstructionTextTokenContents}, flowgraph::{BranchType, EdgeStyle, FlowGraph, FlowGraphNode, FlowGraphOption}, string::BnString, }; use dwarfreader::is_valid; use gimli::{ AttributeValue::{Encoding, Flag, UnitRef}, // BigEndian, DebuggingInformationEntry, Dwarf, EntriesTreeNode, Reader, ReaderOffset, SectionId, Unit, UnitSectionOffset, }; static PADDING: [&'static str; 23] = [ "", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", ]; // TODO : This is very much not comprehensive: see https://github.com/gimli-rs/gimli/blob/master/examples/dwarfdump.rs fn get_info_string<R: Reader>( view: &BinaryView, dwarf: &Dwarf<R>, unit: &Unit<R>, die_node: &DebuggingInformationEntry<R>, ) -> Vec<DisassemblyTextLine> { let mut disassembly_lines: Vec<DisassemblyTextLine> = Vec::with_capacity(10); // This is an estimate so "most" things won't need to resize let label_value = match die_node.offset().to_unit_section_offset(unit) { UnitSectionOffset::DebugInfoOffset(o) => o.0, UnitSectionOffset::DebugTypesOffset(o) => o.0, } .into_u64(); let label_string = format!("#0x{:08x}", label_value); disassembly_lines.push(DisassemblyTextLine::from(vec![ InstructionTextToken::new( BnString::new(label_string), InstructionTextTokenContents::GotoLabel(label_value), ), InstructionTextToken::new(BnString::new(":"), InstructionTextTokenContents::Text), ])); disassembly_lines.push(DisassemblyTextLine::from(vec![InstructionTextToken::new( BnString::new(die_node.tag().static_string().unwrap()), InstructionTextTokenContents::TypeName, // TODO : KeywordToken? )])); let mut attrs = die_node.attrs(); while let Some(attr) = attrs.next().unwrap() { let mut attr_line: Vec<InstructionTextToken> = Vec::with_capacity(5); attr_line.push(InstructionTextToken::new( BnString::new(" "), InstructionTextTokenContents::Indentation, )); let len; if let Some(n) = attr.name().static_string() { len = n.len(); attr_line.push(InstructionTextToken::new( BnString::new(n), InstructionTextTokenContents::FieldName, )); } else { // This is rather unlikely, I think len = 1; attr_line.push(InstructionTextToken::new( BnString::new("?"), InstructionTextTokenContents::FieldName, )); } // On command line the magic number that looks good is 22, but that's too much whitespace in a basic block, so I chose 18 (22 is the max with the current padding provided) if len < 18 { attr_line.push(InstructionTextToken::new( BnString::new(PADDING[18 - len]), InstructionTextTokenContents::Text, )); } attr_line.push(InstructionTextToken::new( BnString::new(" = "), InstructionTextTokenContents::Text, )); if let Ok(Some(addr)) = dwarf.attr_address(unit, attr.value()) { let addr_string = format!("0x{:08x}", addr); attr_line.push(InstructionTextToken::new( BnString::new(addr_string), InstructionTextTokenContents::Integer(addr), )); } else if let Ok(attr_reader) = dwarf.attr_string(unit, attr.value()) { if let Ok(attr_string) = attr_reader.to_string() { attr_line.push(InstructionTextToken::new( BnString::new(attr_string.as_ref()), InstructionTextTokenContents::String({ let (_, id, offset) = dwarf.lookup_offset_id(attr_reader.offset_id()).unwrap(); offset.into_u64() + view.section_by_name(id.name()).unwrap().start() }), )); } else { attr_line.push(InstructionTextToken::new( BnString::new("??"), InstructionTextTokenContents::Text, )); } } else if let Encoding(type_class) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new(type_class.static_string().unwrap()), InstructionTextTokenContents::TypeName, )); } else if let UnitRef(offset) = attr.value() { let addr = match offset.to_unit_section_offset(unit) { UnitSectionOffset::DebugInfoOffset(o) => o.0, UnitSectionOffset::DebugTypesOffset(o) => o.0, } .into_u64(); let addr_string = format!("#0x{:08x}", addr); attr_line.push(InstructionTextToken::new( BnString::new(addr_string), InstructionTextTokenContents::GotoLabel(addr), )); } else if let Flag(true) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new("true"), InstructionTextTokenContents::Integer(1), )); } else if let Flag(false) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new("false"), InstructionTextTokenContents::Integer(1), )); // Fall-back cases } else if let Some(value) = attr.u8_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.u16_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.udata_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.sdata_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value as u64), )); } else { let attr_string = format!("{:?}", attr.value()); attr_line.push(InstructionTextToken::new( BnString::new(attr_string), InstructionTextTokenContents::Text, )); } disassembly_lines.push(DisassemblyTextLine::from(attr_line)); } disassembly_lines } fn process_tree<R: Reader>( view: &BinaryView, dwarf: &Dwarf<R>, unit: &Unit<R>, graph: &FlowGraph, graph_parent: &FlowGraphNode, die_node: EntriesTreeNode<R>, ) { // Namespaces only - really interesting to look at! // if (die_node.entry().tag() == constants::DW_TAG_namespace) // || (die_node.entry().tag() == constants::DW_TAG_class_type) // || (die_node.entry().tag() == constants::DW_TAG_compile_unit) // || (die_node.entry().tag() == constants::DW_TAG_subprogram) // { let new_node = FlowGraphNode::new(graph); let attr_string = get_info_string(view, dwarf, unit, die_node.entry()); new_node.set_disassembly_lines(&attr_string); graph.append(&new_node); graph_parent.add_outgoing_edge( BranchType::UnconditionalBranch, &new_node, &EdgeStyle::default(), ); let mut children = die_node.children(); while let Some(child) = children.next().unwrap() { process_tree(view, dwarf, unit, graph, &new_node, child); } // } } fn dump_dwarf(bv: &BinaryView) { let view = if bv.section_by_name(".debug_info").is_ok() { bv.to_owned() } else { bv.parent_view().unwrap() }; let graph = FlowGraph::new(); graph.set_option(FlowGraphOption::FlowGraphUsesBlockHighlights, true); graph.set_option(FlowGraphOption::FlowGraphUsesInstructionHighlights, true); let graph_root = FlowGraphNode::new(&graph); graph_root.set_lines(vec!["Graph Root"]); graph.append(&graph_root); let endian = dwarfreader::get_endian(bv); let section_reader = |section_id: SectionId| -> _ { dwarfreader::create_section_reader(section_id, bv, endian, false) }; let dwarf = Dwarf::load(&section_reader).unwrap(); let mut iter = dwarf.units(); while let Some(header) = iter.next().unwrap() { let unit = dwarf.unit(header).unwrap(); let mut entries = unit.entries(); let mut depth = 0; if let Some((delta_depth, entry)) = entries.next_dfs().unwrap() { depth += delta_depth; assert!(depth >= 0); let mut tree = unit.entries_tree(Some(entry.offset())).unwrap(); let root = tree.root().unwrap(); process_tree(&view, &dwarf, &unit, &graph, &graph_root, root); } } view.show_graph_report("DWARF", graph); } struct DWARFDump; impl Command for DWARFDump { fn action(&self, view: &BinaryView) { dump_dwarf(view); } fn valid(&self, view: &BinaryView) -> bool { is_valid(view) } } #[no_mangle] pub extern "C" fn UIPluginInit() -> bool
{ register( "DWARF Dump", "Show embedded DWARF info as a tree structure for you to navigate", DWARFDump {}, ); true }
identifier_body
lib.rs
// Copyright 2021-2023 Vector 35 Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use binaryninja::{ binaryview::{BinaryView, BinaryViewExt}, command::{register, Command}, disassembly::{DisassemblyTextLine, InstructionTextToken, InstructionTextTokenContents}, flowgraph::{BranchType, EdgeStyle, FlowGraph, FlowGraphNode, FlowGraphOption}, string::BnString, }; use dwarfreader::is_valid; use gimli::{ AttributeValue::{Encoding, Flag, UnitRef}, // BigEndian, DebuggingInformationEntry, Dwarf, EntriesTreeNode, Reader, ReaderOffset, SectionId, Unit, UnitSectionOffset, }; static PADDING: [&'static str; 23] = [ "", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", ]; // TODO : This is very much not comprehensive: see https://github.com/gimli-rs/gimli/blob/master/examples/dwarfdump.rs fn get_info_string<R: Reader>( view: &BinaryView, dwarf: &Dwarf<R>, unit: &Unit<R>, die_node: &DebuggingInformationEntry<R>, ) -> Vec<DisassemblyTextLine> { let mut disassembly_lines: Vec<DisassemblyTextLine> = Vec::with_capacity(10); // This is an estimate so "most" things won't need to resize let label_value = match die_node.offset().to_unit_section_offset(unit) { UnitSectionOffset::DebugInfoOffset(o) => o.0, UnitSectionOffset::DebugTypesOffset(o) => o.0, } .into_u64(); let label_string = format!("#0x{:08x}", label_value); disassembly_lines.push(DisassemblyTextLine::from(vec![ InstructionTextToken::new( BnString::new(label_string), InstructionTextTokenContents::GotoLabel(label_value), ), InstructionTextToken::new(BnString::new(":"), InstructionTextTokenContents::Text), ])); disassembly_lines.push(DisassemblyTextLine::from(vec![InstructionTextToken::new( BnString::new(die_node.tag().static_string().unwrap()), InstructionTextTokenContents::TypeName, // TODO : KeywordToken? )])); let mut attrs = die_node.attrs(); while let Some(attr) = attrs.next().unwrap() { let mut attr_line: Vec<InstructionTextToken> = Vec::with_capacity(5); attr_line.push(InstructionTextToken::new( BnString::new(" "), InstructionTextTokenContents::Indentation, )); let len; if let Some(n) = attr.name().static_string() { len = n.len(); attr_line.push(InstructionTextToken::new( BnString::new(n), InstructionTextTokenContents::FieldName, )); } else { // This is rather unlikely, I think len = 1; attr_line.push(InstructionTextToken::new( BnString::new("?"), InstructionTextTokenContents::FieldName, )); } // On command line the magic number that looks good is 22, but that's too much whitespace in a basic block, so I chose 18 (22 is the max with the current padding provided) if len < 18 { attr_line.push(InstructionTextToken::new( BnString::new(PADDING[18 - len]), InstructionTextTokenContents::Text, )); } attr_line.push(InstructionTextToken::new( BnString::new(" = "), InstructionTextTokenContents::Text, )); if let Ok(Some(addr)) = dwarf.attr_address(unit, attr.value()) { let addr_string = format!("0x{:08x}", addr); attr_line.push(InstructionTextToken::new( BnString::new(addr_string), InstructionTextTokenContents::Integer(addr), )); } else if let Ok(attr_reader) = dwarf.attr_string(unit, attr.value()) { if let Ok(attr_string) = attr_reader.to_string() { attr_line.push(InstructionTextToken::new( BnString::new(attr_string.as_ref()), InstructionTextTokenContents::String({ let (_, id, offset) = dwarf.lookup_offset_id(attr_reader.offset_id()).unwrap(); offset.into_u64() + view.section_by_name(id.name()).unwrap().start() }), )); } else { attr_line.push(InstructionTextToken::new( BnString::new("??"), InstructionTextTokenContents::Text, )); } } else if let Encoding(type_class) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new(type_class.static_string().unwrap()), InstructionTextTokenContents::TypeName, )); } else if let UnitRef(offset) = attr.value() { let addr = match offset.to_unit_section_offset(unit) { UnitSectionOffset::DebugInfoOffset(o) => o.0, UnitSectionOffset::DebugTypesOffset(o) => o.0, } .into_u64(); let addr_string = format!("#0x{:08x}", addr); attr_line.push(InstructionTextToken::new( BnString::new(addr_string), InstructionTextTokenContents::GotoLabel(addr), )); } else if let Flag(true) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new("true"), InstructionTextTokenContents::Integer(1), )); } else if let Flag(false) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new("false"), InstructionTextTokenContents::Integer(1), )); // Fall-back cases } else if let Some(value) = attr.u8_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.u16_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.udata_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.sdata_value()
else { let attr_string = format!("{:?}", attr.value()); attr_line.push(InstructionTextToken::new( BnString::new(attr_string), InstructionTextTokenContents::Text, )); } disassembly_lines.push(DisassemblyTextLine::from(attr_line)); } disassembly_lines } fn process_tree<R: Reader>( view: &BinaryView, dwarf: &Dwarf<R>, unit: &Unit<R>, graph: &FlowGraph, graph_parent: &FlowGraphNode, die_node: EntriesTreeNode<R>, ) { // Namespaces only - really interesting to look at! // if (die_node.entry().tag() == constants::DW_TAG_namespace) // || (die_node.entry().tag() == constants::DW_TAG_class_type) // || (die_node.entry().tag() == constants::DW_TAG_compile_unit) // || (die_node.entry().tag() == constants::DW_TAG_subprogram) // { let new_node = FlowGraphNode::new(graph); let attr_string = get_info_string(view, dwarf, unit, die_node.entry()); new_node.set_disassembly_lines(&attr_string); graph.append(&new_node); graph_parent.add_outgoing_edge( BranchType::UnconditionalBranch, &new_node, &EdgeStyle::default(), ); let mut children = die_node.children(); while let Some(child) = children.next().unwrap() { process_tree(view, dwarf, unit, graph, &new_node, child); } // } } fn dump_dwarf(bv: &BinaryView) { let view = if bv.section_by_name(".debug_info").is_ok() { bv.to_owned() } else { bv.parent_view().unwrap() }; let graph = FlowGraph::new(); graph.set_option(FlowGraphOption::FlowGraphUsesBlockHighlights, true); graph.set_option(FlowGraphOption::FlowGraphUsesInstructionHighlights, true); let graph_root = FlowGraphNode::new(&graph); graph_root.set_lines(vec!["Graph Root"]); graph.append(&graph_root); let endian = dwarfreader::get_endian(bv); let section_reader = |section_id: SectionId| -> _ { dwarfreader::create_section_reader(section_id, bv, endian, false) }; let dwarf = Dwarf::load(&section_reader).unwrap(); let mut iter = dwarf.units(); while let Some(header) = iter.next().unwrap() { let unit = dwarf.unit(header).unwrap(); let mut entries = unit.entries(); let mut depth = 0; if let Some((delta_depth, entry)) = entries.next_dfs().unwrap() { depth += delta_depth; assert!(depth >= 0); let mut tree = unit.entries_tree(Some(entry.offset())).unwrap(); let root = tree.root().unwrap(); process_tree(&view, &dwarf, &unit, &graph, &graph_root, root); } } view.show_graph_report("DWARF", graph); } struct DWARFDump; impl Command for DWARFDump { fn action(&self, view: &BinaryView) { dump_dwarf(view); } fn valid(&self, view: &BinaryView) -> bool { is_valid(view) } } #[no_mangle] pub extern "C" fn UIPluginInit() -> bool { register( "DWARF Dump", "Show embedded DWARF info as a tree structure for you to navigate", DWARFDump {}, ); true }
{ let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value as u64), )); }
conditional_block
lib.rs
// Copyright 2021-2023 Vector 35 Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software
// See the License for the specific language governing permissions and // limitations under the License. use binaryninja::{ binaryview::{BinaryView, BinaryViewExt}, command::{register, Command}, disassembly::{DisassemblyTextLine, InstructionTextToken, InstructionTextTokenContents}, flowgraph::{BranchType, EdgeStyle, FlowGraph, FlowGraphNode, FlowGraphOption}, string::BnString, }; use dwarfreader::is_valid; use gimli::{ AttributeValue::{Encoding, Flag, UnitRef}, // BigEndian, DebuggingInformationEntry, Dwarf, EntriesTreeNode, Reader, ReaderOffset, SectionId, Unit, UnitSectionOffset, }; static PADDING: [&'static str; 23] = [ "", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", ]; // TODO : This is very much not comprehensive: see https://github.com/gimli-rs/gimli/blob/master/examples/dwarfdump.rs fn get_info_string<R: Reader>( view: &BinaryView, dwarf: &Dwarf<R>, unit: &Unit<R>, die_node: &DebuggingInformationEntry<R>, ) -> Vec<DisassemblyTextLine> { let mut disassembly_lines: Vec<DisassemblyTextLine> = Vec::with_capacity(10); // This is an estimate so "most" things won't need to resize let label_value = match die_node.offset().to_unit_section_offset(unit) { UnitSectionOffset::DebugInfoOffset(o) => o.0, UnitSectionOffset::DebugTypesOffset(o) => o.0, } .into_u64(); let label_string = format!("#0x{:08x}", label_value); disassembly_lines.push(DisassemblyTextLine::from(vec![ InstructionTextToken::new( BnString::new(label_string), InstructionTextTokenContents::GotoLabel(label_value), ), InstructionTextToken::new(BnString::new(":"), InstructionTextTokenContents::Text), ])); disassembly_lines.push(DisassemblyTextLine::from(vec![InstructionTextToken::new( BnString::new(die_node.tag().static_string().unwrap()), InstructionTextTokenContents::TypeName, // TODO : KeywordToken? )])); let mut attrs = die_node.attrs(); while let Some(attr) = attrs.next().unwrap() { let mut attr_line: Vec<InstructionTextToken> = Vec::with_capacity(5); attr_line.push(InstructionTextToken::new( BnString::new(" "), InstructionTextTokenContents::Indentation, )); let len; if let Some(n) = attr.name().static_string() { len = n.len(); attr_line.push(InstructionTextToken::new( BnString::new(n), InstructionTextTokenContents::FieldName, )); } else { // This is rather unlikely, I think len = 1; attr_line.push(InstructionTextToken::new( BnString::new("?"), InstructionTextTokenContents::FieldName, )); } // On command line the magic number that looks good is 22, but that's too much whitespace in a basic block, so I chose 18 (22 is the max with the current padding provided) if len < 18 { attr_line.push(InstructionTextToken::new( BnString::new(PADDING[18 - len]), InstructionTextTokenContents::Text, )); } attr_line.push(InstructionTextToken::new( BnString::new(" = "), InstructionTextTokenContents::Text, )); if let Ok(Some(addr)) = dwarf.attr_address(unit, attr.value()) { let addr_string = format!("0x{:08x}", addr); attr_line.push(InstructionTextToken::new( BnString::new(addr_string), InstructionTextTokenContents::Integer(addr), )); } else if let Ok(attr_reader) = dwarf.attr_string(unit, attr.value()) { if let Ok(attr_string) = attr_reader.to_string() { attr_line.push(InstructionTextToken::new( BnString::new(attr_string.as_ref()), InstructionTextTokenContents::String({ let (_, id, offset) = dwarf.lookup_offset_id(attr_reader.offset_id()).unwrap(); offset.into_u64() + view.section_by_name(id.name()).unwrap().start() }), )); } else { attr_line.push(InstructionTextToken::new( BnString::new("??"), InstructionTextTokenContents::Text, )); } } else if let Encoding(type_class) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new(type_class.static_string().unwrap()), InstructionTextTokenContents::TypeName, )); } else if let UnitRef(offset) = attr.value() { let addr = match offset.to_unit_section_offset(unit) { UnitSectionOffset::DebugInfoOffset(o) => o.0, UnitSectionOffset::DebugTypesOffset(o) => o.0, } .into_u64(); let addr_string = format!("#0x{:08x}", addr); attr_line.push(InstructionTextToken::new( BnString::new(addr_string), InstructionTextTokenContents::GotoLabel(addr), )); } else if let Flag(true) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new("true"), InstructionTextTokenContents::Integer(1), )); } else if let Flag(false) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new("false"), InstructionTextTokenContents::Integer(1), )); // Fall-back cases } else if let Some(value) = attr.u8_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.u16_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.udata_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.sdata_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value as u64), )); } else { let attr_string = format!("{:?}", attr.value()); attr_line.push(InstructionTextToken::new( BnString::new(attr_string), InstructionTextTokenContents::Text, )); } disassembly_lines.push(DisassemblyTextLine::from(attr_line)); } disassembly_lines } fn process_tree<R: Reader>( view: &BinaryView, dwarf: &Dwarf<R>, unit: &Unit<R>, graph: &FlowGraph, graph_parent: &FlowGraphNode, die_node: EntriesTreeNode<R>, ) { // Namespaces only - really interesting to look at! // if (die_node.entry().tag() == constants::DW_TAG_namespace) // || (die_node.entry().tag() == constants::DW_TAG_class_type) // || (die_node.entry().tag() == constants::DW_TAG_compile_unit) // || (die_node.entry().tag() == constants::DW_TAG_subprogram) // { let new_node = FlowGraphNode::new(graph); let attr_string = get_info_string(view, dwarf, unit, die_node.entry()); new_node.set_disassembly_lines(&attr_string); graph.append(&new_node); graph_parent.add_outgoing_edge( BranchType::UnconditionalBranch, &new_node, &EdgeStyle::default(), ); let mut children = die_node.children(); while let Some(child) = children.next().unwrap() { process_tree(view, dwarf, unit, graph, &new_node, child); } // } } fn dump_dwarf(bv: &BinaryView) { let view = if bv.section_by_name(".debug_info").is_ok() { bv.to_owned() } else { bv.parent_view().unwrap() }; let graph = FlowGraph::new(); graph.set_option(FlowGraphOption::FlowGraphUsesBlockHighlights, true); graph.set_option(FlowGraphOption::FlowGraphUsesInstructionHighlights, true); let graph_root = FlowGraphNode::new(&graph); graph_root.set_lines(vec!["Graph Root"]); graph.append(&graph_root); let endian = dwarfreader::get_endian(bv); let section_reader = |section_id: SectionId| -> _ { dwarfreader::create_section_reader(section_id, bv, endian, false) }; let dwarf = Dwarf::load(&section_reader).unwrap(); let mut iter = dwarf.units(); while let Some(header) = iter.next().unwrap() { let unit = dwarf.unit(header).unwrap(); let mut entries = unit.entries(); let mut depth = 0; if let Some((delta_depth, entry)) = entries.next_dfs().unwrap() { depth += delta_depth; assert!(depth >= 0); let mut tree = unit.entries_tree(Some(entry.offset())).unwrap(); let root = tree.root().unwrap(); process_tree(&view, &dwarf, &unit, &graph, &graph_root, root); } } view.show_graph_report("DWARF", graph); } struct DWARFDump; impl Command for DWARFDump { fn action(&self, view: &BinaryView) { dump_dwarf(view); } fn valid(&self, view: &BinaryView) -> bool { is_valid(view) } } #[no_mangle] pub extern "C" fn UIPluginInit() -> bool { register( "DWARF Dump", "Show embedded DWARF info as a tree structure for you to navigate", DWARFDump {}, ); true }
// distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
random_line_split
lib.rs
// Copyright 2021-2023 Vector 35 Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use binaryninja::{ binaryview::{BinaryView, BinaryViewExt}, command::{register, Command}, disassembly::{DisassemblyTextLine, InstructionTextToken, InstructionTextTokenContents}, flowgraph::{BranchType, EdgeStyle, FlowGraph, FlowGraphNode, FlowGraphOption}, string::BnString, }; use dwarfreader::is_valid; use gimli::{ AttributeValue::{Encoding, Flag, UnitRef}, // BigEndian, DebuggingInformationEntry, Dwarf, EntriesTreeNode, Reader, ReaderOffset, SectionId, Unit, UnitSectionOffset, }; static PADDING: [&'static str; 23] = [ "", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", ]; // TODO : This is very much not comprehensive: see https://github.com/gimli-rs/gimli/blob/master/examples/dwarfdump.rs fn
<R: Reader>( view: &BinaryView, dwarf: &Dwarf<R>, unit: &Unit<R>, die_node: &DebuggingInformationEntry<R>, ) -> Vec<DisassemblyTextLine> { let mut disassembly_lines: Vec<DisassemblyTextLine> = Vec::with_capacity(10); // This is an estimate so "most" things won't need to resize let label_value = match die_node.offset().to_unit_section_offset(unit) { UnitSectionOffset::DebugInfoOffset(o) => o.0, UnitSectionOffset::DebugTypesOffset(o) => o.0, } .into_u64(); let label_string = format!("#0x{:08x}", label_value); disassembly_lines.push(DisassemblyTextLine::from(vec![ InstructionTextToken::new( BnString::new(label_string), InstructionTextTokenContents::GotoLabel(label_value), ), InstructionTextToken::new(BnString::new(":"), InstructionTextTokenContents::Text), ])); disassembly_lines.push(DisassemblyTextLine::from(vec![InstructionTextToken::new( BnString::new(die_node.tag().static_string().unwrap()), InstructionTextTokenContents::TypeName, // TODO : KeywordToken? )])); let mut attrs = die_node.attrs(); while let Some(attr) = attrs.next().unwrap() { let mut attr_line: Vec<InstructionTextToken> = Vec::with_capacity(5); attr_line.push(InstructionTextToken::new( BnString::new(" "), InstructionTextTokenContents::Indentation, )); let len; if let Some(n) = attr.name().static_string() { len = n.len(); attr_line.push(InstructionTextToken::new( BnString::new(n), InstructionTextTokenContents::FieldName, )); } else { // This is rather unlikely, I think len = 1; attr_line.push(InstructionTextToken::new( BnString::new("?"), InstructionTextTokenContents::FieldName, )); } // On command line the magic number that looks good is 22, but that's too much whitespace in a basic block, so I chose 18 (22 is the max with the current padding provided) if len < 18 { attr_line.push(InstructionTextToken::new( BnString::new(PADDING[18 - len]), InstructionTextTokenContents::Text, )); } attr_line.push(InstructionTextToken::new( BnString::new(" = "), InstructionTextTokenContents::Text, )); if let Ok(Some(addr)) = dwarf.attr_address(unit, attr.value()) { let addr_string = format!("0x{:08x}", addr); attr_line.push(InstructionTextToken::new( BnString::new(addr_string), InstructionTextTokenContents::Integer(addr), )); } else if let Ok(attr_reader) = dwarf.attr_string(unit, attr.value()) { if let Ok(attr_string) = attr_reader.to_string() { attr_line.push(InstructionTextToken::new( BnString::new(attr_string.as_ref()), InstructionTextTokenContents::String({ let (_, id, offset) = dwarf.lookup_offset_id(attr_reader.offset_id()).unwrap(); offset.into_u64() + view.section_by_name(id.name()).unwrap().start() }), )); } else { attr_line.push(InstructionTextToken::new( BnString::new("??"), InstructionTextTokenContents::Text, )); } } else if let Encoding(type_class) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new(type_class.static_string().unwrap()), InstructionTextTokenContents::TypeName, )); } else if let UnitRef(offset) = attr.value() { let addr = match offset.to_unit_section_offset(unit) { UnitSectionOffset::DebugInfoOffset(o) => o.0, UnitSectionOffset::DebugTypesOffset(o) => o.0, } .into_u64(); let addr_string = format!("#0x{:08x}", addr); attr_line.push(InstructionTextToken::new( BnString::new(addr_string), InstructionTextTokenContents::GotoLabel(addr), )); } else if let Flag(true) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new("true"), InstructionTextTokenContents::Integer(1), )); } else if let Flag(false) = attr.value() { attr_line.push(InstructionTextToken::new( BnString::new("false"), InstructionTextTokenContents::Integer(1), )); // Fall-back cases } else if let Some(value) = attr.u8_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.u16_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.udata_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value.into()), )); } else if let Some(value) = attr.sdata_value() { let value_string = format!("{}", value); attr_line.push(InstructionTextToken::new( BnString::new(value_string), InstructionTextTokenContents::Integer(value as u64), )); } else { let attr_string = format!("{:?}", attr.value()); attr_line.push(InstructionTextToken::new( BnString::new(attr_string), InstructionTextTokenContents::Text, )); } disassembly_lines.push(DisassemblyTextLine::from(attr_line)); } disassembly_lines } fn process_tree<R: Reader>( view: &BinaryView, dwarf: &Dwarf<R>, unit: &Unit<R>, graph: &FlowGraph, graph_parent: &FlowGraphNode, die_node: EntriesTreeNode<R>, ) { // Namespaces only - really interesting to look at! // if (die_node.entry().tag() == constants::DW_TAG_namespace) // || (die_node.entry().tag() == constants::DW_TAG_class_type) // || (die_node.entry().tag() == constants::DW_TAG_compile_unit) // || (die_node.entry().tag() == constants::DW_TAG_subprogram) // { let new_node = FlowGraphNode::new(graph); let attr_string = get_info_string(view, dwarf, unit, die_node.entry()); new_node.set_disassembly_lines(&attr_string); graph.append(&new_node); graph_parent.add_outgoing_edge( BranchType::UnconditionalBranch, &new_node, &EdgeStyle::default(), ); let mut children = die_node.children(); while let Some(child) = children.next().unwrap() { process_tree(view, dwarf, unit, graph, &new_node, child); } // } } fn dump_dwarf(bv: &BinaryView) { let view = if bv.section_by_name(".debug_info").is_ok() { bv.to_owned() } else { bv.parent_view().unwrap() }; let graph = FlowGraph::new(); graph.set_option(FlowGraphOption::FlowGraphUsesBlockHighlights, true); graph.set_option(FlowGraphOption::FlowGraphUsesInstructionHighlights, true); let graph_root = FlowGraphNode::new(&graph); graph_root.set_lines(vec!["Graph Root"]); graph.append(&graph_root); let endian = dwarfreader::get_endian(bv); let section_reader = |section_id: SectionId| -> _ { dwarfreader::create_section_reader(section_id, bv, endian, false) }; let dwarf = Dwarf::load(&section_reader).unwrap(); let mut iter = dwarf.units(); while let Some(header) = iter.next().unwrap() { let unit = dwarf.unit(header).unwrap(); let mut entries = unit.entries(); let mut depth = 0; if let Some((delta_depth, entry)) = entries.next_dfs().unwrap() { depth += delta_depth; assert!(depth >= 0); let mut tree = unit.entries_tree(Some(entry.offset())).unwrap(); let root = tree.root().unwrap(); process_tree(&view, &dwarf, &unit, &graph, &graph_root, root); } } view.show_graph_report("DWARF", graph); } struct DWARFDump; impl Command for DWARFDump { fn action(&self, view: &BinaryView) { dump_dwarf(view); } fn valid(&self, view: &BinaryView) -> bool { is_valid(view) } } #[no_mangle] pub extern "C" fn UIPluginInit() -> bool { register( "DWARF Dump", "Show embedded DWARF info as a tree structure for you to navigate", DWARFDump {}, ); true }
get_info_string
identifier_name
tree.py
#!/usr/bin/python import copy import sys import random last_state_list = list() current_state_list = list() class tictactoe(object): def __init__(self, gameState=['.','.','.','.','.','.','.','.','.']): self.gameState = gameState #Check if 3 in a row in any way on the board def three_in_a_row(self, player):
#If there are 3 x's in a row, x's win def win_state_x(self): if self.three_in_a_row('x') and not(self.three_in_a_row('o')): return True return False #If there are 3 o's in a row, o's win def win_state_o(self): if self.three_in_a_row('o') and not(self.three_in_a_row('x')): return True return False #Returns the game children of the gameState def get_game_children(self, player): empty = [] for pos in range(len(self.gameState)): if self.gameState[pos] == '.': newState = list(self.gameState) newState[pos] = player empty.append(newState) return empty #Checks if 2 in a row in any way on the board def two_in_a_row(self, player): #Horizontal for it in range(0,3): if self.gameState[it*3] == player and self.gameState[3*it+1] == player and self.gameState[3*it+2] == '.': return 3*it+2 elif self.gameState[it*3] == player and self.gameState[3*it+1] == '.' and self.gameState[3*it+2] == player: return 3*it+1 elif self.gameState[it*3] == '.' and self.gameState[3*it+1] == player and self.gameState[3*it+2] == player: return 3*it #Vertical for it in range(0,3): if self.gameState[it] == player and self.gameState[it+3] == player and self.gameState[it+6] == '.': return it+6 elif self.gameState[it] == player and self.gameState[it+3] == '.' and self.gameState[it+6] == player: return it+3 elif self.gameState[it] == '.' and self.gameState[it+3] == player and self.gameState[it+6] == player: return it #Diagonal if self.gameState[0] == player and self.gameState[4] == player and self.gameState[8] == '.': return 8 elif self.gameState[0] == player and self.gameState[4] == '.' and self.gameState[8] == player: return 4 elif self.gameState[0] == '.' and self.gameState[4] == player and self.gameState[8] == player: return 0 elif self.gameState[6] == player and self.gameState[4] == player and self.gameState[2] == '.': return 2 elif self.gameState[6] == player and self.gameState[4] == '.' and self.gameState[2] == player: return 4 elif self.gameState[6] == '.' and self.gameState[4] == player and self.gameState[2] == player: return 6 return None def trump(self, player, player2): if (self.gameState[0] == player or self.gameState[2] == player or self.gameState[6] == player or self.gameState[8] == player) and self.gameState[4] == '.': return 4 if (self.gameState[0] == player or self.gameState[2] == player or self.gameState[6] == player or self.gameState[8] == player) and self.gameState[4] == player2: pick = [1,3,5,7] while True: check = random.choice(pick) if check != player or check != player2: return check #If they play in the middle, play in 2 corners if (self.gameState[4] == player): pick = [0,2,6,8] while True: check = random.choice(pick) if check != player or check != player2: return check return None #Prints the game board def print_state(self): string = str() for i in range(0,9): if i%3 == 0 and i != 0: print string string = str() string += self.gameState[i] + " " print string + "\n" #Gets last gamestate in a list (to compare for AI move) def get_AI_lastmove(self): global last_state_list last_state_list = self.gameState[:] #Gets current gamestate in a list (to compare for AI move) def get_AI_currmove(self): global current_state_list current_state_list = self.gameState[:] class Node(object): def __init__(self, tictactoe): self.tictactoe = tictactoe self.gameState = self.tictactoe.gameState self.parent = None self.children = [] self.depth = None def get_leaves(self): returnList = [] if len(self.children) == 0: returnList.append(self) else: for node in self.children: for leaf in node.get_leaves(): returnList.append(leaf) return returnList def get_children(self): return self.children def insert_child(self, node): node.parent = self node.depth = self.depth+1 self.children.append(node) def print_state(self): self.tictactoe.print_state() def get_gameState(self): return self.gameState def get_AI_lastmove(self): self.tictactoe.get_AI_lastmove() def get_AI_currmove(self): self.tictactoe.get_AI_currmove() def trump(self, player): self.tictactoe.trump(player, player2) class Tree(object): def __init__(self, root): root.depth = 0 self.root = root self.currentNode = self.root self.lastNode = self.currentNode def fill_game_tree(self, first_player, node): if (not(node.tictactoe.win_state_o()) and not(node.tictactoe.win_state_x())): gameStates = node.tictactoe.get_game_children(first_player) for s in gameStates: n = Node(tictactoe(s)) node.insert_child(n) if first_player == 'x': self.fill_game_tree('o',n) else: self.fill_game_tree('x',n) def set_currentNode(self, node): self.currentNode = node def end_state(self): if self.currentNode.tictactoe.win_state_x(): print "Player 1 has won" return True elif self.currentNode.tictactoe.win_state_o(): print "Player 2 has won" return True elif len(self.currentNode.get_children()) <= 0: print "The game is a tie." return True return False def valid_position(currentNode, position): # Converts position 1-9 and returns position x,y or None,None if invalid position = int(position) if currentNode.get_gameState()[position-1] != '.': return None return position-1 def minimax(turn, currentNode, children): best_node = 0 most_wins = 0 # Guarantees to make the winning move for i in range(0,len(children)): if turn%2 == 0 and children[i].tictactoe.win_state_x() or turn%2 == 1 and children[i].tictactoe.win_state_o(): return i # Blocks other player from winning two_in_a_row = None if turn%2 == 0: two_in_a_row = currentNode.tictactoe.two_in_a_row('o') elif turn%2 == 1: two_in_a_row = currentNode.tictactoe.two_in_a_row('x') if two_in_a_row != None: pos = two_in_a_row for n in range(0, len(children)): if (turn%2 == 0 and children[n].get_gameState()[pos] == 'x') or (turn%2 == 1 and children[n].get_gameState()[pos] == 'o'): return n #Trump move trump = None if turn%2 == 0: trump = currentNode.tictactoe.trump('o', 'x') elif turn%2 == 1: trump = currentNode.tictactoe.trump('x', 'o') if trump != None: pos = trump for n in range(0, len(children)): if (turn%2 == 0 and children[n].get_gameState()[pos] == 'x') or (turn%2 == 1 and children[n].get_gameState()[pos] == 'o'): return n #print "Didn't need to make a trump move" # Checks for move with highest chance of winning using minimax algorithm for i in range(0,len(children)): #score = [0] score = 0 for n in children[i].get_leaves(): if (n.tictactoe.win_state_x() and turn%2 == 1) or (n.tictactoe.win_state_o() and turn%2 == 0): score += 10 #score.append(10-(n.depth-currentNode.depth)) elif n.tictactoe.win_state_o(): score -= 10 #score.append((n.depth-currentNode.depth)-10) if turn%2 == 0: # highest score is the optimal move for x #if max(score) > most_wins: #most_wins = max(score) if score > most_wins: most_wins = score best_node = i else: # lowest score is optimal move for o #if max(score) < most_wins: #most_wins = max(score) if score < most_wins: most_wins = score best_node = i return best_node def print_move(turn): sys.stdout.write("Move #" + str(turn+1) + ": enter choice for player " + str(turn%2+1) + " : ") def print_AImove(turn, position): sys.stdout.write("Move #" + str(turn+1) + ": enter choice for player " + str(turn%2+1) + " : " + str(position) + "\n\n") def compare_states(last_state, current_state): last_state.get_AI_lastmove() current_state.get_AI_currmove() for i in range(0,9): if current_state_list[i] != last_state_list[i]: return i+1 if __name__ == "__main__": ###Example usage #initialize a tree with a root node with an empty game state #print "Setting up game tree" t = Tree(Node(tictactoe())) #fill up the game tree. This sets each nodes children to be the game states of the next possible moves t.fill_game_tree('x', t.root) print "Game tree set up. Ready to play" turn = 0 # keeps track of player's turn; x == 0, o == 1 # User input for single or dual agent COMPUTER = input('Enter choice (1 for single agent, 2 for dual agents): ') while COMPUTER != 1 and COMPUTER != 2: COMPUTER = input('Enter choice (1 for single agent, 2 for dual agents): ') sys.stdout.write("\n") t.currentNode.print_state() # Game loop while(not(t.end_state())): children = t.currentNode.get_children() # User input if COMPUTER == 1 and turn%2 == 0: print_move(turn) position = input('') sys.stdout.write("\n") pos = valid_position(t.currentNode, position) while pos == None: print "Invalid input. Please enter a number 1-9 that corresponds to an empty position" print_move(turn) pos = valid_position(t.currentNode, input('')) sys.stdout.write("\n") for child in children: if child.get_gameState()[pos] == 'x': t.currentNode = child #print_move(turn, position) # AI else: t.lastNode = copy.copy(t.currentNode) #Keeps track of last gamestate best_node = minimax(turn, t.currentNode, children) t.currentNode = children[best_node] #pos variable is the move the AI takes pos = compare_states(t.lastNode, t.currentNode) #Compares last and curr state print_AImove(turn, pos) t.currentNode.print_state() turn+=1
for it in range(0,3): if self.gameState[it*3] == player and self.gameState[3*it+1] == player and self.gameState[3*it+2] == player: #horizontally return True if self.gameState[it] == player and self.gameState[it+3] == player and self.gameState[it+6] == player: #vertically return True if self.gameState[0] == player and self.gameState[4] == player and self.gameState[8] == player: #diagonally return True if self.gameState[6] == player and self.gameState[4] == player and self.gameState[2] == player: #diagonally return True return False
identifier_body
tree.py
#!/usr/bin/python import copy import sys import random last_state_list = list() current_state_list = list() class tictactoe(object): def __init__(self, gameState=['.','.','.','.','.','.','.','.','.']): self.gameState = gameState #Check if 3 in a row in any way on the board def three_in_a_row(self, player): for it in range(0,3): if self.gameState[it*3] == player and self.gameState[3*it+1] == player and self.gameState[3*it+2] == player: #horizontally return True if self.gameState[it] == player and self.gameState[it+3] == player and self.gameState[it+6] == player: #vertically return True if self.gameState[0] == player and self.gameState[4] == player and self.gameState[8] == player: #diagonally return True if self.gameState[6] == player and self.gameState[4] == player and self.gameState[2] == player: #diagonally return True return False #If there are 3 x's in a row, x's win def win_state_x(self): if self.three_in_a_row('x') and not(self.three_in_a_row('o')): return True return False #If there are 3 o's in a row, o's win def win_state_o(self): if self.three_in_a_row('o') and not(self.three_in_a_row('x')): return True return False #Returns the game children of the gameState def get_game_children(self, player): empty = [] for pos in range(len(self.gameState)): if self.gameState[pos] == '.': newState = list(self.gameState) newState[pos] = player empty.append(newState) return empty #Checks if 2 in a row in any way on the board def two_in_a_row(self, player): #Horizontal for it in range(0,3): if self.gameState[it*3] == player and self.gameState[3*it+1] == player and self.gameState[3*it+2] == '.': return 3*it+2 elif self.gameState[it*3] == player and self.gameState[3*it+1] == '.' and self.gameState[3*it+2] == player: return 3*it+1 elif self.gameState[it*3] == '.' and self.gameState[3*it+1] == player and self.gameState[3*it+2] == player: return 3*it #Vertical for it in range(0,3): if self.gameState[it] == player and self.gameState[it+3] == player and self.gameState[it+6] == '.': return it+6 elif self.gameState[it] == player and self.gameState[it+3] == '.' and self.gameState[it+6] == player: return it+3 elif self.gameState[it] == '.' and self.gameState[it+3] == player and self.gameState[it+6] == player: return it #Diagonal if self.gameState[0] == player and self.gameState[4] == player and self.gameState[8] == '.': return 8 elif self.gameState[0] == player and self.gameState[4] == '.' and self.gameState[8] == player: return 4 elif self.gameState[0] == '.' and self.gameState[4] == player and self.gameState[8] == player: return 0 elif self.gameState[6] == player and self.gameState[4] == player and self.gameState[2] == '.': return 2 elif self.gameState[6] == player and self.gameState[4] == '.' and self.gameState[2] == player: return 4 elif self.gameState[6] == '.' and self.gameState[4] == player and self.gameState[2] == player: return 6 return None def trump(self, player, player2): if (self.gameState[0] == player or self.gameState[2] == player or self.gameState[6] == player or self.gameState[8] == player) and self.gameState[4] == '.': return 4 if (self.gameState[0] == player or self.gameState[2] == player or self.gameState[6] == player or self.gameState[8] == player) and self.gameState[4] == player2: pick = [1,3,5,7] while True: check = random.choice(pick) if check != player or check != player2: return check #If they play in the middle, play in 2 corners if (self.gameState[4] == player): pick = [0,2,6,8] while True: check = random.choice(pick) if check != player or check != player2: return check return None #Prints the game board def print_state(self): string = str() for i in range(0,9): if i%3 == 0 and i != 0: print string string = str() string += self.gameState[i] + " " print string + "\n" #Gets last gamestate in a list (to compare for AI move) def get_AI_lastmove(self): global last_state_list last_state_list = self.gameState[:] #Gets current gamestate in a list (to compare for AI move) def get_AI_currmove(self): global current_state_list current_state_list = self.gameState[:] class Node(object): def __init__(self, tictactoe): self.tictactoe = tictactoe self.gameState = self.tictactoe.gameState self.parent = None self.children = [] self.depth = None def get_leaves(self): returnList = [] if len(self.children) == 0: returnList.append(self) else: for node in self.children: for leaf in node.get_leaves(): returnList.append(leaf) return returnList def get_children(self): return self.children def insert_child(self, node): node.parent = self node.depth = self.depth+1 self.children.append(node) def print_state(self): self.tictactoe.print_state() def get_gameState(self): return self.gameState def get_AI_lastmove(self): self.tictactoe.get_AI_lastmove() def get_AI_currmove(self): self.tictactoe.get_AI_currmove() def trump(self, player): self.tictactoe.trump(player, player2) class Tree(object): def __init__(self, root): root.depth = 0 self.root = root self.currentNode = self.root self.lastNode = self.currentNode def fill_game_tree(self, first_player, node): if (not(node.tictactoe.win_state_o()) and not(node.tictactoe.win_state_x())): gameStates = node.tictactoe.get_game_children(first_player) for s in gameStates: n = Node(tictactoe(s)) node.insert_child(n) if first_player == 'x': self.fill_game_tree('o',n) else: self.fill_game_tree('x',n) def set_currentNode(self, node): self.currentNode = node def end_state(self): if self.currentNode.tictactoe.win_state_x(): print "Player 1 has won" return True elif self.currentNode.tictactoe.win_state_o(): print "Player 2 has won" return True elif len(self.currentNode.get_children()) <= 0: print "The game is a tie." return True return False def
(currentNode, position): # Converts position 1-9 and returns position x,y or None,None if invalid position = int(position) if currentNode.get_gameState()[position-1] != '.': return None return position-1 def minimax(turn, currentNode, children): best_node = 0 most_wins = 0 # Guarantees to make the winning move for i in range(0,len(children)): if turn%2 == 0 and children[i].tictactoe.win_state_x() or turn%2 == 1 and children[i].tictactoe.win_state_o(): return i # Blocks other player from winning two_in_a_row = None if turn%2 == 0: two_in_a_row = currentNode.tictactoe.two_in_a_row('o') elif turn%2 == 1: two_in_a_row = currentNode.tictactoe.two_in_a_row('x') if two_in_a_row != None: pos = two_in_a_row for n in range(0, len(children)): if (turn%2 == 0 and children[n].get_gameState()[pos] == 'x') or (turn%2 == 1 and children[n].get_gameState()[pos] == 'o'): return n #Trump move trump = None if turn%2 == 0: trump = currentNode.tictactoe.trump('o', 'x') elif turn%2 == 1: trump = currentNode.tictactoe.trump('x', 'o') if trump != None: pos = trump for n in range(0, len(children)): if (turn%2 == 0 and children[n].get_gameState()[pos] == 'x') or (turn%2 == 1 and children[n].get_gameState()[pos] == 'o'): return n #print "Didn't need to make a trump move" # Checks for move with highest chance of winning using minimax algorithm for i in range(0,len(children)): #score = [0] score = 0 for n in children[i].get_leaves(): if (n.tictactoe.win_state_x() and turn%2 == 1) or (n.tictactoe.win_state_o() and turn%2 == 0): score += 10 #score.append(10-(n.depth-currentNode.depth)) elif n.tictactoe.win_state_o(): score -= 10 #score.append((n.depth-currentNode.depth)-10) if turn%2 == 0: # highest score is the optimal move for x #if max(score) > most_wins: #most_wins = max(score) if score > most_wins: most_wins = score best_node = i else: # lowest score is optimal move for o #if max(score) < most_wins: #most_wins = max(score) if score < most_wins: most_wins = score best_node = i return best_node def print_move(turn): sys.stdout.write("Move #" + str(turn+1) + ": enter choice for player " + str(turn%2+1) + " : ") def print_AImove(turn, position): sys.stdout.write("Move #" + str(turn+1) + ": enter choice for player " + str(turn%2+1) + " : " + str(position) + "\n\n") def compare_states(last_state, current_state): last_state.get_AI_lastmove() current_state.get_AI_currmove() for i in range(0,9): if current_state_list[i] != last_state_list[i]: return i+1 if __name__ == "__main__": ###Example usage #initialize a tree with a root node with an empty game state #print "Setting up game tree" t = Tree(Node(tictactoe())) #fill up the game tree. This sets each nodes children to be the game states of the next possible moves t.fill_game_tree('x', t.root) print "Game tree set up. Ready to play" turn = 0 # keeps track of player's turn; x == 0, o == 1 # User input for single or dual agent COMPUTER = input('Enter choice (1 for single agent, 2 for dual agents): ') while COMPUTER != 1 and COMPUTER != 2: COMPUTER = input('Enter choice (1 for single agent, 2 for dual agents): ') sys.stdout.write("\n") t.currentNode.print_state() # Game loop while(not(t.end_state())): children = t.currentNode.get_children() # User input if COMPUTER == 1 and turn%2 == 0: print_move(turn) position = input('') sys.stdout.write("\n") pos = valid_position(t.currentNode, position) while pos == None: print "Invalid input. Please enter a number 1-9 that corresponds to an empty position" print_move(turn) pos = valid_position(t.currentNode, input('')) sys.stdout.write("\n") for child in children: if child.get_gameState()[pos] == 'x': t.currentNode = child #print_move(turn, position) # AI else: t.lastNode = copy.copy(t.currentNode) #Keeps track of last gamestate best_node = minimax(turn, t.currentNode, children) t.currentNode = children[best_node] #pos variable is the move the AI takes pos = compare_states(t.lastNode, t.currentNode) #Compares last and curr state print_AImove(turn, pos) t.currentNode.print_state() turn+=1
valid_position
identifier_name
tree.py
#!/usr/bin/python import copy import sys import random last_state_list = list() current_state_list = list() class tictactoe(object): def __init__(self, gameState=['.','.','.','.','.','.','.','.','.']): self.gameState = gameState #Check if 3 in a row in any way on the board def three_in_a_row(self, player): for it in range(0,3): if self.gameState[it*3] == player and self.gameState[3*it+1] == player and self.gameState[3*it+2] == player: #horizontally return True if self.gameState[it] == player and self.gameState[it+3] == player and self.gameState[it+6] == player: #vertically return True if self.gameState[0] == player and self.gameState[4] == player and self.gameState[8] == player: #diagonally return True if self.gameState[6] == player and self.gameState[4] == player and self.gameState[2] == player: #diagonally return True return False #If there are 3 x's in a row, x's win def win_state_x(self): if self.three_in_a_row('x') and not(self.three_in_a_row('o')): return True return False #If there are 3 o's in a row, o's win def win_state_o(self): if self.three_in_a_row('o') and not(self.three_in_a_row('x')): return True return False #Returns the game children of the gameState def get_game_children(self, player): empty = [] for pos in range(len(self.gameState)): if self.gameState[pos] == '.': newState = list(self.gameState) newState[pos] = player empty.append(newState) return empty #Checks if 2 in a row in any way on the board def two_in_a_row(self, player): #Horizontal for it in range(0,3): if self.gameState[it*3] == player and self.gameState[3*it+1] == player and self.gameState[3*it+2] == '.': return 3*it+2 elif self.gameState[it*3] == player and self.gameState[3*it+1] == '.' and self.gameState[3*it+2] == player: return 3*it+1 elif self.gameState[it*3] == '.' and self.gameState[3*it+1] == player and self.gameState[3*it+2] == player: return 3*it #Vertical for it in range(0,3): if self.gameState[it] == player and self.gameState[it+3] == player and self.gameState[it+6] == '.': return it+6 elif self.gameState[it] == player and self.gameState[it+3] == '.' and self.gameState[it+6] == player: return it+3 elif self.gameState[it] == '.' and self.gameState[it+3] == player and self.gameState[it+6] == player: return it
elif self.gameState[0] == player and self.gameState[4] == '.' and self.gameState[8] == player: return 4 elif self.gameState[0] == '.' and self.gameState[4] == player and self.gameState[8] == player: return 0 elif self.gameState[6] == player and self.gameState[4] == player and self.gameState[2] == '.': return 2 elif self.gameState[6] == player and self.gameState[4] == '.' and self.gameState[2] == player: return 4 elif self.gameState[6] == '.' and self.gameState[4] == player and self.gameState[2] == player: return 6 return None def trump(self, player, player2): if (self.gameState[0] == player or self.gameState[2] == player or self.gameState[6] == player or self.gameState[8] == player) and self.gameState[4] == '.': return 4 if (self.gameState[0] == player or self.gameState[2] == player or self.gameState[6] == player or self.gameState[8] == player) and self.gameState[4] == player2: pick = [1,3,5,7] while True: check = random.choice(pick) if check != player or check != player2: return check #If they play in the middle, play in 2 corners if (self.gameState[4] == player): pick = [0,2,6,8] while True: check = random.choice(pick) if check != player or check != player2: return check return None #Prints the game board def print_state(self): string = str() for i in range(0,9): if i%3 == 0 and i != 0: print string string = str() string += self.gameState[i] + " " print string + "\n" #Gets last gamestate in a list (to compare for AI move) def get_AI_lastmove(self): global last_state_list last_state_list = self.gameState[:] #Gets current gamestate in a list (to compare for AI move) def get_AI_currmove(self): global current_state_list current_state_list = self.gameState[:] class Node(object): def __init__(self, tictactoe): self.tictactoe = tictactoe self.gameState = self.tictactoe.gameState self.parent = None self.children = [] self.depth = None def get_leaves(self): returnList = [] if len(self.children) == 0: returnList.append(self) else: for node in self.children: for leaf in node.get_leaves(): returnList.append(leaf) return returnList def get_children(self): return self.children def insert_child(self, node): node.parent = self node.depth = self.depth+1 self.children.append(node) def print_state(self): self.tictactoe.print_state() def get_gameState(self): return self.gameState def get_AI_lastmove(self): self.tictactoe.get_AI_lastmove() def get_AI_currmove(self): self.tictactoe.get_AI_currmove() def trump(self, player): self.tictactoe.trump(player, player2) class Tree(object): def __init__(self, root): root.depth = 0 self.root = root self.currentNode = self.root self.lastNode = self.currentNode def fill_game_tree(self, first_player, node): if (not(node.tictactoe.win_state_o()) and not(node.tictactoe.win_state_x())): gameStates = node.tictactoe.get_game_children(first_player) for s in gameStates: n = Node(tictactoe(s)) node.insert_child(n) if first_player == 'x': self.fill_game_tree('o',n) else: self.fill_game_tree('x',n) def set_currentNode(self, node): self.currentNode = node def end_state(self): if self.currentNode.tictactoe.win_state_x(): print "Player 1 has won" return True elif self.currentNode.tictactoe.win_state_o(): print "Player 2 has won" return True elif len(self.currentNode.get_children()) <= 0: print "The game is a tie." return True return False def valid_position(currentNode, position): # Converts position 1-9 and returns position x,y or None,None if invalid position = int(position) if currentNode.get_gameState()[position-1] != '.': return None return position-1 def minimax(turn, currentNode, children): best_node = 0 most_wins = 0 # Guarantees to make the winning move for i in range(0,len(children)): if turn%2 == 0 and children[i].tictactoe.win_state_x() or turn%2 == 1 and children[i].tictactoe.win_state_o(): return i # Blocks other player from winning two_in_a_row = None if turn%2 == 0: two_in_a_row = currentNode.tictactoe.two_in_a_row('o') elif turn%2 == 1: two_in_a_row = currentNode.tictactoe.two_in_a_row('x') if two_in_a_row != None: pos = two_in_a_row for n in range(0, len(children)): if (turn%2 == 0 and children[n].get_gameState()[pos] == 'x') or (turn%2 == 1 and children[n].get_gameState()[pos] == 'o'): return n #Trump move trump = None if turn%2 == 0: trump = currentNode.tictactoe.trump('o', 'x') elif turn%2 == 1: trump = currentNode.tictactoe.trump('x', 'o') if trump != None: pos = trump for n in range(0, len(children)): if (turn%2 == 0 and children[n].get_gameState()[pos] == 'x') or (turn%2 == 1 and children[n].get_gameState()[pos] == 'o'): return n #print "Didn't need to make a trump move" # Checks for move with highest chance of winning using minimax algorithm for i in range(0,len(children)): #score = [0] score = 0 for n in children[i].get_leaves(): if (n.tictactoe.win_state_x() and turn%2 == 1) or (n.tictactoe.win_state_o() and turn%2 == 0): score += 10 #score.append(10-(n.depth-currentNode.depth)) elif n.tictactoe.win_state_o(): score -= 10 #score.append((n.depth-currentNode.depth)-10) if turn%2 == 0: # highest score is the optimal move for x #if max(score) > most_wins: #most_wins = max(score) if score > most_wins: most_wins = score best_node = i else: # lowest score is optimal move for o #if max(score) < most_wins: #most_wins = max(score) if score < most_wins: most_wins = score best_node = i return best_node def print_move(turn): sys.stdout.write("Move #" + str(turn+1) + ": enter choice for player " + str(turn%2+1) + " : ") def print_AImove(turn, position): sys.stdout.write("Move #" + str(turn+1) + ": enter choice for player " + str(turn%2+1) + " : " + str(position) + "\n\n") def compare_states(last_state, current_state): last_state.get_AI_lastmove() current_state.get_AI_currmove() for i in range(0,9): if current_state_list[i] != last_state_list[i]: return i+1 if __name__ == "__main__": ###Example usage #initialize a tree with a root node with an empty game state #print "Setting up game tree" t = Tree(Node(tictactoe())) #fill up the game tree. This sets each nodes children to be the game states of the next possible moves t.fill_game_tree('x', t.root) print "Game tree set up. Ready to play" turn = 0 # keeps track of player's turn; x == 0, o == 1 # User input for single or dual agent COMPUTER = input('Enter choice (1 for single agent, 2 for dual agents): ') while COMPUTER != 1 and COMPUTER != 2: COMPUTER = input('Enter choice (1 for single agent, 2 for dual agents): ') sys.stdout.write("\n") t.currentNode.print_state() # Game loop while(not(t.end_state())): children = t.currentNode.get_children() # User input if COMPUTER == 1 and turn%2 == 0: print_move(turn) position = input('') sys.stdout.write("\n") pos = valid_position(t.currentNode, position) while pos == None: print "Invalid input. Please enter a number 1-9 that corresponds to an empty position" print_move(turn) pos = valid_position(t.currentNode, input('')) sys.stdout.write("\n") for child in children: if child.get_gameState()[pos] == 'x': t.currentNode = child #print_move(turn, position) # AI else: t.lastNode = copy.copy(t.currentNode) #Keeps track of last gamestate best_node = minimax(turn, t.currentNode, children) t.currentNode = children[best_node] #pos variable is the move the AI takes pos = compare_states(t.lastNode, t.currentNode) #Compares last and curr state print_AImove(turn, pos) t.currentNode.print_state() turn+=1
#Diagonal if self.gameState[0] == player and self.gameState[4] == player and self.gameState[8] == '.': return 8
random_line_split
tree.py
#!/usr/bin/python import copy import sys import random last_state_list = list() current_state_list = list() class tictactoe(object): def __init__(self, gameState=['.','.','.','.','.','.','.','.','.']): self.gameState = gameState #Check if 3 in a row in any way on the board def three_in_a_row(self, player): for it in range(0,3): if self.gameState[it*3] == player and self.gameState[3*it+1] == player and self.gameState[3*it+2] == player: #horizontally return True if self.gameState[it] == player and self.gameState[it+3] == player and self.gameState[it+6] == player: #vertically return True if self.gameState[0] == player and self.gameState[4] == player and self.gameState[8] == player: #diagonally return True if self.gameState[6] == player and self.gameState[4] == player and self.gameState[2] == player: #diagonally return True return False #If there are 3 x's in a row, x's win def win_state_x(self): if self.three_in_a_row('x') and not(self.three_in_a_row('o')): return True return False #If there are 3 o's in a row, o's win def win_state_o(self): if self.three_in_a_row('o') and not(self.three_in_a_row('x')): return True return False #Returns the game children of the gameState def get_game_children(self, player): empty = [] for pos in range(len(self.gameState)): if self.gameState[pos] == '.': newState = list(self.gameState) newState[pos] = player empty.append(newState) return empty #Checks if 2 in a row in any way on the board def two_in_a_row(self, player): #Horizontal for it in range(0,3): if self.gameState[it*3] == player and self.gameState[3*it+1] == player and self.gameState[3*it+2] == '.': return 3*it+2 elif self.gameState[it*3] == player and self.gameState[3*it+1] == '.' and self.gameState[3*it+2] == player: return 3*it+1 elif self.gameState[it*3] == '.' and self.gameState[3*it+1] == player and self.gameState[3*it+2] == player: return 3*it #Vertical for it in range(0,3): if self.gameState[it] == player and self.gameState[it+3] == player and self.gameState[it+6] == '.': return it+6 elif self.gameState[it] == player and self.gameState[it+3] == '.' and self.gameState[it+6] == player: return it+3 elif self.gameState[it] == '.' and self.gameState[it+3] == player and self.gameState[it+6] == player: return it #Diagonal if self.gameState[0] == player and self.gameState[4] == player and self.gameState[8] == '.': return 8 elif self.gameState[0] == player and self.gameState[4] == '.' and self.gameState[8] == player: return 4 elif self.gameState[0] == '.' and self.gameState[4] == player and self.gameState[8] == player: return 0 elif self.gameState[6] == player and self.gameState[4] == player and self.gameState[2] == '.': return 2 elif self.gameState[6] == player and self.gameState[4] == '.' and self.gameState[2] == player: return 4 elif self.gameState[6] == '.' and self.gameState[4] == player and self.gameState[2] == player: return 6 return None def trump(self, player, player2): if (self.gameState[0] == player or self.gameState[2] == player or self.gameState[6] == player or self.gameState[8] == player) and self.gameState[4] == '.': return 4 if (self.gameState[0] == player or self.gameState[2] == player or self.gameState[6] == player or self.gameState[8] == player) and self.gameState[4] == player2: pick = [1,3,5,7] while True: check = random.choice(pick) if check != player or check != player2: return check #If they play in the middle, play in 2 corners if (self.gameState[4] == player): pick = [0,2,6,8] while True: check = random.choice(pick) if check != player or check != player2: return check return None #Prints the game board def print_state(self): string = str() for i in range(0,9): if i%3 == 0 and i != 0: print string string = str() string += self.gameState[i] + " " print string + "\n" #Gets last gamestate in a list (to compare for AI move) def get_AI_lastmove(self): global last_state_list last_state_list = self.gameState[:] #Gets current gamestate in a list (to compare for AI move) def get_AI_currmove(self): global current_state_list current_state_list = self.gameState[:] class Node(object): def __init__(self, tictactoe): self.tictactoe = tictactoe self.gameState = self.tictactoe.gameState self.parent = None self.children = [] self.depth = None def get_leaves(self): returnList = [] if len(self.children) == 0: returnList.append(self) else: for node in self.children: for leaf in node.get_leaves(): returnList.append(leaf) return returnList def get_children(self): return self.children def insert_child(self, node): node.parent = self node.depth = self.depth+1 self.children.append(node) def print_state(self): self.tictactoe.print_state() def get_gameState(self): return self.gameState def get_AI_lastmove(self): self.tictactoe.get_AI_lastmove() def get_AI_currmove(self): self.tictactoe.get_AI_currmove() def trump(self, player): self.tictactoe.trump(player, player2) class Tree(object): def __init__(self, root): root.depth = 0 self.root = root self.currentNode = self.root self.lastNode = self.currentNode def fill_game_tree(self, first_player, node): if (not(node.tictactoe.win_state_o()) and not(node.tictactoe.win_state_x())): gameStates = node.tictactoe.get_game_children(first_player) for s in gameStates: n = Node(tictactoe(s)) node.insert_child(n) if first_player == 'x': self.fill_game_tree('o',n) else: self.fill_game_tree('x',n) def set_currentNode(self, node): self.currentNode = node def end_state(self): if self.currentNode.tictactoe.win_state_x(): print "Player 1 has won" return True elif self.currentNode.tictactoe.win_state_o(): print "Player 2 has won" return True elif len(self.currentNode.get_children()) <= 0: print "The game is a tie." return True return False def valid_position(currentNode, position): # Converts position 1-9 and returns position x,y or None,None if invalid position = int(position) if currentNode.get_gameState()[position-1] != '.': return None return position-1 def minimax(turn, currentNode, children): best_node = 0 most_wins = 0 # Guarantees to make the winning move for i in range(0,len(children)): if turn%2 == 0 and children[i].tictactoe.win_state_x() or turn%2 == 1 and children[i].tictactoe.win_state_o(): return i # Blocks other player from winning two_in_a_row = None if turn%2 == 0:
elif turn%2 == 1: two_in_a_row = currentNode.tictactoe.two_in_a_row('x') if two_in_a_row != None: pos = two_in_a_row for n in range(0, len(children)): if (turn%2 == 0 and children[n].get_gameState()[pos] == 'x') or (turn%2 == 1 and children[n].get_gameState()[pos] == 'o'): return n #Trump move trump = None if turn%2 == 0: trump = currentNode.tictactoe.trump('o', 'x') elif turn%2 == 1: trump = currentNode.tictactoe.trump('x', 'o') if trump != None: pos = trump for n in range(0, len(children)): if (turn%2 == 0 and children[n].get_gameState()[pos] == 'x') or (turn%2 == 1 and children[n].get_gameState()[pos] == 'o'): return n #print "Didn't need to make a trump move" # Checks for move with highest chance of winning using minimax algorithm for i in range(0,len(children)): #score = [0] score = 0 for n in children[i].get_leaves(): if (n.tictactoe.win_state_x() and turn%2 == 1) or (n.tictactoe.win_state_o() and turn%2 == 0): score += 10 #score.append(10-(n.depth-currentNode.depth)) elif n.tictactoe.win_state_o(): score -= 10 #score.append((n.depth-currentNode.depth)-10) if turn%2 == 0: # highest score is the optimal move for x #if max(score) > most_wins: #most_wins = max(score) if score > most_wins: most_wins = score best_node = i else: # lowest score is optimal move for o #if max(score) < most_wins: #most_wins = max(score) if score < most_wins: most_wins = score best_node = i return best_node def print_move(turn): sys.stdout.write("Move #" + str(turn+1) + ": enter choice for player " + str(turn%2+1) + " : ") def print_AImove(turn, position): sys.stdout.write("Move #" + str(turn+1) + ": enter choice for player " + str(turn%2+1) + " : " + str(position) + "\n\n") def compare_states(last_state, current_state): last_state.get_AI_lastmove() current_state.get_AI_currmove() for i in range(0,9): if current_state_list[i] != last_state_list[i]: return i+1 if __name__ == "__main__": ###Example usage #initialize a tree with a root node with an empty game state #print "Setting up game tree" t = Tree(Node(tictactoe())) #fill up the game tree. This sets each nodes children to be the game states of the next possible moves t.fill_game_tree('x', t.root) print "Game tree set up. Ready to play" turn = 0 # keeps track of player's turn; x == 0, o == 1 # User input for single or dual agent COMPUTER = input('Enter choice (1 for single agent, 2 for dual agents): ') while COMPUTER != 1 and COMPUTER != 2: COMPUTER = input('Enter choice (1 for single agent, 2 for dual agents): ') sys.stdout.write("\n") t.currentNode.print_state() # Game loop while(not(t.end_state())): children = t.currentNode.get_children() # User input if COMPUTER == 1 and turn%2 == 0: print_move(turn) position = input('') sys.stdout.write("\n") pos = valid_position(t.currentNode, position) while pos == None: print "Invalid input. Please enter a number 1-9 that corresponds to an empty position" print_move(turn) pos = valid_position(t.currentNode, input('')) sys.stdout.write("\n") for child in children: if child.get_gameState()[pos] == 'x': t.currentNode = child #print_move(turn, position) # AI else: t.lastNode = copy.copy(t.currentNode) #Keeps track of last gamestate best_node = minimax(turn, t.currentNode, children) t.currentNode = children[best_node] #pos variable is the move the AI takes pos = compare_states(t.lastNode, t.currentNode) #Compares last and curr state print_AImove(turn, pos) t.currentNode.print_state() turn+=1
two_in_a_row = currentNode.tictactoe.two_in_a_row('o')
conditional_block
uploadSourcemap.ts
import {djaty} from '@djaty/djaty-nodejs'; import * as glob from 'glob'; import * as fs from 'fs'; import * as path from 'path'; import * as ora from 'ora'; import * as tar from 'tar'; import * as os from 'os'; import * as requestPromise from 'request-promise'; // tslint:disable-next-line no-require-imports const urlRegex = require('url-regex'); import {CommandParams} from '../interfaces/commandParams'; import {config} from '../config/config'; import {logger} from '..'; import {ValidationError} from '../utils/validationError'; import {dealWithCommandActionAsPromise} from '../utils/utils'; interface UploadSourcemapCMDParams { apiKey: string; apiSecret: string; release: string; minifiedDir: string; projectRoot: string; endPoint?: string; } export class UploadSourcemap { static sourcemapApi = 'sourcemap'; static abortSourcemapApi = 'abortUploadingSourcemap'; static djatyPathPrefix = 'djaty'; static sourcemapFileSuffix = '_sourcemap_files.tgz'; initializationDetails: CommandParams; constructor()
async commandAction(cmd: UploadSourcemapCMDParams) { UploadSourcemap.validateOnCLIInput(cmd); const baseURL = cmd.endPoint || config.baseURL; const absolutePath = path.resolve(cmd.minifiedDir); const spinner = ora('Compressing sourcemap files').start(); let minifiedFileListPaths: string[] = []; let isUploadReqFired = false; let isAbortReqFired = false; const osTmpDir = os.tmpdir(); const djatyTmpPathDir = path.resolve(osTmpDir, UploadSourcemap.djatyPathPrefix); if (!fs.existsSync(djatyTmpPathDir)) { fs.mkdirSync(djatyTmpPathDir); } const uniquePrefix = (Math.random() * Date.now()).toString(); const compressedFileName = path.resolve(djatyTmpPathDir, `${Date.now()}${UploadSourcemap.sourcemapFileSuffix}`); process.on('SIGINT', dealWithCommandActionAsPromise(async() => { UploadSourcemap.saveRemove(compressedFileName); if (!isUploadReqFired || isAbortReqFired) { spinner.stop(); logger.info('Uploading stopped successfully.'); process.exit(); } isAbortReqFired = true; await requestPromise.put({ url: `${baseURL}/${config.baseAPI}/${UploadSourcemap.abortSourcemapApi}`, body: { apiKey: cmd.apiKey, apiSecret: cmd.apiSecret, release: cmd.release, uniquePrefix, }, headers: { 'djaty-escape-html-entities': true, }, // Automatically stringifies the body to JSON, json: true, timeout: config.requestTimeout, }).catch(err => { spinner.stop(); let errMsg = ''; if (err.statusCode === 400) { // Handle AJV validation errors. const error = JSON.parse(err.error.replace(')]}\',\n', '')); if (error.code === 'NOT_RELEASE_TO_ABORT') { // noinspection JSIgnoredPromiseFromCall djaty.trackBug(err); return; } errMsg = 'Validation error: \n\t'; const ignoredError = 'Unable to abort release. Release doesn\'t found.'; errMsg += error.errors ? error.errors .map((errItem: {message: string}) => errItem.message) .join('\n\t') : ''; throw new ValidationError(`Unable to stop uploading sourcemap files: ${errMsg}`); } else { errMsg += 'Something went wrong and a bug has been reported and will be resolved soon.'; } logger.error(`Unable to stop uploading sourcemap files: ${errMsg}`); throw err; }); spinner.stop(); logger.info('Uploading stopped successfully.'); process.exit(); })); try { // `*` Matches 0 or more characters in a single path portion // `@(*.js|*.js.map)` Matches exactly one of the patterns provided // `**` If a "globstar" is alone in a path portion, then it matches zero // or more directories and subdirectories searching for matches. minifiedFileListPaths = glob.sync('**/@(*.js|*.js.map)', {cwd: absolutePath}); } catch (err) { spinner.stop(); if (err.code === 'EACCES') { throw new ValidationError(err); } throw err; } try { await tar.c({ gzip: true, file: compressedFileName, cwd: absolutePath, }, minifiedFileListPaths, ); } catch (err) { spinner.stop(); logger.error('Unable to compress sourcemap files.', err); UploadSourcemap.saveRemove(compressedFileName); throw err; } spinner.stopAndPersist({text: 'The sourcemap files compressed successfully.', symbol: '#'}); spinner.start('Uploading sourcemap files to Djaty...'); const uploadFormData = { apiKey: cmd.apiKey, apiSecret: cmd.apiSecret, release: cmd.release, projectRoot: cmd.projectRoot, // I submit this data as a `formData` so it always been converted to `string` and I reuse the // same upcoming fields in normal request to abort current upload so, // I convert use them a string in the first place. maxFiles: minifiedFileListPaths.length.toString(), uniquePrefix, sourcemapFiles: fs.createReadStream(compressedFileName), }; isUploadReqFired = true; await requestPromise.post({ url: `${baseURL}/${config.baseAPI}/${UploadSourcemap.sourcemapApi}`, formData: uploadFormData, timeout: config.requestTimeout, headers: { 'djaty-escape-html-entities': true, }, }).catch(async err => { let errMsg = ''; UploadSourcemap.saveRemove(compressedFileName); spinner.stop(); // Handle request errors const requestErrorsCodes: {[p: string]: string} = { UNABLE_TO_VERIFY_LEAF_SIGNATURE: 'Current connection to Djaty is secured ' + 'with a self-signed certificate but the current config has not passed the `server`' + 'object with a `ca` (Certification Authority) property!', CERT_HAS_EXPIRED: 'The certificate of the HTTPS connection to djaty has been expired!', ECONNREFUSED: 'Make sure `server` config is correct', ENOTFOUND: 'Make sure `server` config is correct', ECONNRESET: 'Client network socket disconnected before secure TLS connection' + ' was established', }; if (err.error && err.error.code && requestErrorsCodes[err.error.code]) { errMsg += requestErrorsCodes[err.error.code]; throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } // Handle nginx errors const nginxErrors: {[p: number]: string} = { 301: 'Redirection are not supported', 404: '404 Not Found', 413: 'Uploaded sourcemap file is too large, It should be less than 15MB', }; if (err.statusCode && nginxErrors[err.statusCode]) { errMsg += nginxErrors[err.statusCode]; throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } // Handle server errors const serverErrors: {[p: number]: Function} = { 400: (error: any) => 'Validation error: \n\t' + error.errors .map((errItem: {message: string}) => errItem.message).join('\n\t'), 428: (error: any) => error.message, }; if (err.statusCode && serverErrors[err.statusCode]) { const error = JSON.parse(err.error.replace(')]}\',\n', '')); errMsg += serverErrors[err.statusCode](error); throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } errMsg += 'Something went wrong and a bug has been reported and will be resolved soon'; logger.error(`Unable to upload sourcemap files: ${errMsg}.`); throw err; }); UploadSourcemap.saveRemove(compressedFileName); spinner.stop(); logger.info('Uploading finished successfully. Please wait few minutes for' + ' the uploaded files to be processed.'); }; private static validateOnCLIInput(cmd: UploadSourcemapCMDParams | string) { if (typeof cmd === 'string') { throw new ValidationError(`Invalid args params: '${cmd}'`); } if (!cmd.apiKey || !cmd.apiSecret || !cmd.release || !cmd.minifiedDir || !cmd.projectRoot) { throw new ValidationError('Command params (apiKey, apiSecret,' + ' release, projectRoot and minifiedDir) are required'); } if (!fs.existsSync(cmd.minifiedDir)) { throw new ValidationError('Command param `minifiedDir` is not exists'); } if (!fs.lstatSync(cmd.minifiedDir).isDirectory()) { throw new ValidationError('Command param `minifiedDir` should be directory path'); } const urlRegexValidator = urlRegex({exact: true, strict: false}); if (!urlRegexValidator.test(cmd.projectRoot)) { throw new ValidationError('Invalid `project-root`.' + ' You should add valid url like `http://your-domain.com`'); } if (cmd.endPoint && !urlRegexValidator.test(cmd.endPoint)) { throw new ValidationError('Invalid `end-point`.' + ' You should add valid url like `http://your-domain.com`'); } }; private static saveRemove(path: string) { return fs.existsSync(path) && fs.unlinkSync(path); } }
{ this.initializationDetails = { command: 'uploadSourcemap', description: 'Upload project sourcemap files.', version: '1.0.0', optionList: [ ['--api-key <key>', 'An API key for project'], ['--api-secret <secret>', 'An API secret for project'], ['--release <v>', 'when requesting to resolve a stack trace, we check the bug release' + 'against current uploaded releases and if a release is matched, the stack trace will be' + ' resolved. So, if a bug is not configured with a release, it\'ll not be able to have ' + 'its stack trace resolved. And due to the probability of having multiple devices running' + ' different releases concurrently, we let the user upload up to 5 releases per project.'], ['--minified-dir <path>', 'Path to the directory that contains the minified and ' + ' sourcemap files (I.e, `dist`). Only `.js` and `.map` files will be uploaded.'], ['--project-root <domain>', 'The path of the project root. It helps us locate the' + ' original files from the stack frame, e.g., http://example.com.'], ['--end-point [server]', 'The server URL The default is `djaty.com`' + ' (on-premises installations).'], ], action: this.commandAction.bind(this), }; }
identifier_body
uploadSourcemap.ts
import {djaty} from '@djaty/djaty-nodejs'; import * as glob from 'glob'; import * as fs from 'fs'; import * as path from 'path'; import * as ora from 'ora'; import * as tar from 'tar'; import * as os from 'os'; import * as requestPromise from 'request-promise'; // tslint:disable-next-line no-require-imports const urlRegex = require('url-regex'); import {CommandParams} from '../interfaces/commandParams'; import {config} from '../config/config'; import {logger} from '..'; import {ValidationError} from '../utils/validationError'; import {dealWithCommandActionAsPromise} from '../utils/utils'; interface UploadSourcemapCMDParams { apiKey: string; apiSecret: string; release: string; minifiedDir: string; projectRoot: string; endPoint?: string; } export class UploadSourcemap { static sourcemapApi = 'sourcemap'; static abortSourcemapApi = 'abortUploadingSourcemap'; static djatyPathPrefix = 'djaty'; static sourcemapFileSuffix = '_sourcemap_files.tgz'; initializationDetails: CommandParams; constructor() { this.initializationDetails = { command: 'uploadSourcemap', description: 'Upload project sourcemap files.', version: '1.0.0', optionList: [ ['--api-key <key>', 'An API key for project'], ['--api-secret <secret>', 'An API secret for project'], ['--release <v>', 'when requesting to resolve a stack trace, we check the bug release' + 'against current uploaded releases and if a release is matched, the stack trace will be' + ' resolved. So, if a bug is not configured with a release, it\'ll not be able to have ' + 'its stack trace resolved. And due to the probability of having multiple devices running' + ' different releases concurrently, we let the user upload up to 5 releases per project.'], ['--minified-dir <path>', 'Path to the directory that contains the minified and ' + ' sourcemap files (I.e, `dist`). Only `.js` and `.map` files will be uploaded.'], ['--project-root <domain>', 'The path of the project root. It helps us locate the' + ' original files from the stack frame, e.g., http://example.com.'], ['--end-point [server]', 'The server URL The default is `djaty.com`' + ' (on-premises installations).'], ], action: this.commandAction.bind(this), }; } async commandAction(cmd: UploadSourcemapCMDParams) { UploadSourcemap.validateOnCLIInput(cmd); const baseURL = cmd.endPoint || config.baseURL; const absolutePath = path.resolve(cmd.minifiedDir); const spinner = ora('Compressing sourcemap files').start(); let minifiedFileListPaths: string[] = []; let isUploadReqFired = false; let isAbortReqFired = false; const osTmpDir = os.tmpdir(); const djatyTmpPathDir = path.resolve(osTmpDir, UploadSourcemap.djatyPathPrefix); if (!fs.existsSync(djatyTmpPathDir)) { fs.mkdirSync(djatyTmpPathDir); } const uniquePrefix = (Math.random() * Date.now()).toString(); const compressedFileName = path.resolve(djatyTmpPathDir, `${Date.now()}${UploadSourcemap.sourcemapFileSuffix}`); process.on('SIGINT', dealWithCommandActionAsPromise(async() => { UploadSourcemap.saveRemove(compressedFileName); if (!isUploadReqFired || isAbortReqFired) { spinner.stop(); logger.info('Uploading stopped successfully.'); process.exit(); } isAbortReqFired = true; await requestPromise.put({ url: `${baseURL}/${config.baseAPI}/${UploadSourcemap.abortSourcemapApi}`, body: { apiKey: cmd.apiKey, apiSecret: cmd.apiSecret, release: cmd.release, uniquePrefix, }, headers: { 'djaty-escape-html-entities': true, }, // Automatically stringifies the body to JSON, json: true, timeout: config.requestTimeout, }).catch(err => { spinner.stop(); let errMsg = ''; if (err.statusCode === 400) { // Handle AJV validation errors. const error = JSON.parse(err.error.replace(')]}\',\n', '')); if (error.code === 'NOT_RELEASE_TO_ABORT') { // noinspection JSIgnoredPromiseFromCall djaty.trackBug(err); return; } errMsg = 'Validation error: \n\t'; const ignoredError = 'Unable to abort release. Release doesn\'t found.'; errMsg += error.errors ? error.errors .map((errItem: {message: string}) => errItem.message) .join('\n\t') : ''; throw new ValidationError(`Unable to stop uploading sourcemap files: ${errMsg}`); } else { errMsg += 'Something went wrong and a bug has been reported and will be resolved soon.'; } logger.error(`Unable to stop uploading sourcemap files: ${errMsg}`); throw err; }); spinner.stop(); logger.info('Uploading stopped successfully.'); process.exit(); })); try { // `*` Matches 0 or more characters in a single path portion // `@(*.js|*.js.map)` Matches exactly one of the patterns provided // `**` If a "globstar" is alone in a path portion, then it matches zero // or more directories and subdirectories searching for matches. minifiedFileListPaths = glob.sync('**/@(*.js|*.js.map)', {cwd: absolutePath}); } catch (err) { spinner.stop(); if (err.code === 'EACCES') { throw new ValidationError(err); } throw err; } try { await tar.c({ gzip: true, file: compressedFileName, cwd: absolutePath, }, minifiedFileListPaths, ); } catch (err) { spinner.stop(); logger.error('Unable to compress sourcemap files.', err); UploadSourcemap.saveRemove(compressedFileName); throw err; } spinner.stopAndPersist({text: 'The sourcemap files compressed successfully.', symbol: '#'}); spinner.start('Uploading sourcemap files to Djaty...'); const uploadFormData = { apiKey: cmd.apiKey, apiSecret: cmd.apiSecret, release: cmd.release, projectRoot: cmd.projectRoot, // I submit this data as a `formData` so it always been converted to `string` and I reuse the // same upcoming fields in normal request to abort current upload so, // I convert use them a string in the first place. maxFiles: minifiedFileListPaths.length.toString(), uniquePrefix, sourcemapFiles: fs.createReadStream(compressedFileName), }; isUploadReqFired = true; await requestPromise.post({ url: `${baseURL}/${config.baseAPI}/${UploadSourcemap.sourcemapApi}`, formData: uploadFormData, timeout: config.requestTimeout, headers: { 'djaty-escape-html-entities': true, }, }).catch(async err => { let errMsg = ''; UploadSourcemap.saveRemove(compressedFileName); spinner.stop(); // Handle request errors const requestErrorsCodes: {[p: string]: string} = { UNABLE_TO_VERIFY_LEAF_SIGNATURE: 'Current connection to Djaty is secured ' + 'with a self-signed certificate but the current config has not passed the `server`' + 'object with a `ca` (Certification Authority) property!', CERT_HAS_EXPIRED: 'The certificate of the HTTPS connection to djaty has been expired!', ECONNREFUSED: 'Make sure `server` config is correct', ENOTFOUND: 'Make sure `server` config is correct', ECONNRESET: 'Client network socket disconnected before secure TLS connection' + ' was established', }; if (err.error && err.error.code && requestErrorsCodes[err.error.code]) { errMsg += requestErrorsCodes[err.error.code]; throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } // Handle nginx errors const nginxErrors: {[p: number]: string} = { 301: 'Redirection are not supported', 404: '404 Not Found', 413: 'Uploaded sourcemap file is too large, It should be less than 15MB', }; if (err.statusCode && nginxErrors[err.statusCode]) { errMsg += nginxErrors[err.statusCode]; throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } // Handle server errors const serverErrors: {[p: number]: Function} = { 400: (error: any) => 'Validation error: \n\t' + error.errors .map((errItem: {message: string}) => errItem.message).join('\n\t'), 428: (error: any) => error.message, }; if (err.statusCode && serverErrors[err.statusCode]) { const error = JSON.parse(err.error.replace(')]}\',\n', '')); errMsg += serverErrors[err.statusCode](error); throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } errMsg += 'Something went wrong and a bug has been reported and will be resolved soon'; logger.error(`Unable to upload sourcemap files: ${errMsg}.`); throw err; }); UploadSourcemap.saveRemove(compressedFileName); spinner.stop(); logger.info('Uploading finished successfully. Please wait few minutes for' + ' the uploaded files to be processed.'); }; private static
(cmd: UploadSourcemapCMDParams | string) { if (typeof cmd === 'string') { throw new ValidationError(`Invalid args params: '${cmd}'`); } if (!cmd.apiKey || !cmd.apiSecret || !cmd.release || !cmd.minifiedDir || !cmd.projectRoot) { throw new ValidationError('Command params (apiKey, apiSecret,' + ' release, projectRoot and minifiedDir) are required'); } if (!fs.existsSync(cmd.minifiedDir)) { throw new ValidationError('Command param `minifiedDir` is not exists'); } if (!fs.lstatSync(cmd.minifiedDir).isDirectory()) { throw new ValidationError('Command param `minifiedDir` should be directory path'); } const urlRegexValidator = urlRegex({exact: true, strict: false}); if (!urlRegexValidator.test(cmd.projectRoot)) { throw new ValidationError('Invalid `project-root`.' + ' You should add valid url like `http://your-domain.com`'); } if (cmd.endPoint && !urlRegexValidator.test(cmd.endPoint)) { throw new ValidationError('Invalid `end-point`.' + ' You should add valid url like `http://your-domain.com`'); } }; private static saveRemove(path: string) { return fs.existsSync(path) && fs.unlinkSync(path); } }
validateOnCLIInput
identifier_name
uploadSourcemap.ts
import {djaty} from '@djaty/djaty-nodejs'; import * as glob from 'glob'; import * as fs from 'fs'; import * as path from 'path'; import * as ora from 'ora'; import * as tar from 'tar'; import * as os from 'os'; import * as requestPromise from 'request-promise'; // tslint:disable-next-line no-require-imports const urlRegex = require('url-regex'); import {CommandParams} from '../interfaces/commandParams'; import {config} from '../config/config'; import {logger} from '..'; import {ValidationError} from '../utils/validationError'; import {dealWithCommandActionAsPromise} from '../utils/utils'; interface UploadSourcemapCMDParams { apiKey: string; apiSecret: string; release: string; minifiedDir: string; projectRoot: string; endPoint?: string; } export class UploadSourcemap { static sourcemapApi = 'sourcemap'; static abortSourcemapApi = 'abortUploadingSourcemap'; static djatyPathPrefix = 'djaty'; static sourcemapFileSuffix = '_sourcemap_files.tgz'; initializationDetails: CommandParams; constructor() { this.initializationDetails = { command: 'uploadSourcemap', description: 'Upload project sourcemap files.', version: '1.0.0', optionList: [ ['--api-key <key>', 'An API key for project'], ['--api-secret <secret>', 'An API secret for project'], ['--release <v>', 'when requesting to resolve a stack trace, we check the bug release' + 'against current uploaded releases and if a release is matched, the stack trace will be' + ' resolved. So, if a bug is not configured with a release, it\'ll not be able to have ' + 'its stack trace resolved. And due to the probability of having multiple devices running' + ' different releases concurrently, we let the user upload up to 5 releases per project.'], ['--minified-dir <path>', 'Path to the directory that contains the minified and ' + ' sourcemap files (I.e, `dist`). Only `.js` and `.map` files will be uploaded.'], ['--project-root <domain>', 'The path of the project root. It helps us locate the' + ' original files from the stack frame, e.g., http://example.com.'], ['--end-point [server]', 'The server URL The default is `djaty.com`' + ' (on-premises installations).'], ], action: this.commandAction.bind(this), }; } async commandAction(cmd: UploadSourcemapCMDParams) { UploadSourcemap.validateOnCLIInput(cmd); const baseURL = cmd.endPoint || config.baseURL; const absolutePath = path.resolve(cmd.minifiedDir); const spinner = ora('Compressing sourcemap files').start(); let minifiedFileListPaths: string[] = []; let isUploadReqFired = false; let isAbortReqFired = false; const osTmpDir = os.tmpdir(); const djatyTmpPathDir = path.resolve(osTmpDir, UploadSourcemap.djatyPathPrefix); if (!fs.existsSync(djatyTmpPathDir)) { fs.mkdirSync(djatyTmpPathDir); } const uniquePrefix = (Math.random() * Date.now()).toString(); const compressedFileName = path.resolve(djatyTmpPathDir, `${Date.now()}${UploadSourcemap.sourcemapFileSuffix}`); process.on('SIGINT', dealWithCommandActionAsPromise(async() => { UploadSourcemap.saveRemove(compressedFileName); if (!isUploadReqFired || isAbortReqFired) { spinner.stop(); logger.info('Uploading stopped successfully.'); process.exit(); } isAbortReqFired = true; await requestPromise.put({ url: `${baseURL}/${config.baseAPI}/${UploadSourcemap.abortSourcemapApi}`, body: { apiKey: cmd.apiKey, apiSecret: cmd.apiSecret, release: cmd.release, uniquePrefix, }, headers: { 'djaty-escape-html-entities': true, }, // Automatically stringifies the body to JSON, json: true, timeout: config.requestTimeout, }).catch(err => { spinner.stop(); let errMsg = '';
if (err.statusCode === 400) { // Handle AJV validation errors. const error = JSON.parse(err.error.replace(')]}\',\n', '')); if (error.code === 'NOT_RELEASE_TO_ABORT') { // noinspection JSIgnoredPromiseFromCall djaty.trackBug(err); return; } errMsg = 'Validation error: \n\t'; const ignoredError = 'Unable to abort release. Release doesn\'t found.'; errMsg += error.errors ? error.errors .map((errItem: {message: string}) => errItem.message) .join('\n\t') : ''; throw new ValidationError(`Unable to stop uploading sourcemap files: ${errMsg}`); } else { errMsg += 'Something went wrong and a bug has been reported and will be resolved soon.'; } logger.error(`Unable to stop uploading sourcemap files: ${errMsg}`); throw err; }); spinner.stop(); logger.info('Uploading stopped successfully.'); process.exit(); })); try { // `*` Matches 0 or more characters in a single path portion // `@(*.js|*.js.map)` Matches exactly one of the patterns provided // `**` If a "globstar" is alone in a path portion, then it matches zero // or more directories and subdirectories searching for matches. minifiedFileListPaths = glob.sync('**/@(*.js|*.js.map)', {cwd: absolutePath}); } catch (err) { spinner.stop(); if (err.code === 'EACCES') { throw new ValidationError(err); } throw err; } try { await tar.c({ gzip: true, file: compressedFileName, cwd: absolutePath, }, minifiedFileListPaths, ); } catch (err) { spinner.stop(); logger.error('Unable to compress sourcemap files.', err); UploadSourcemap.saveRemove(compressedFileName); throw err; } spinner.stopAndPersist({text: 'The sourcemap files compressed successfully.', symbol: '#'}); spinner.start('Uploading sourcemap files to Djaty...'); const uploadFormData = { apiKey: cmd.apiKey, apiSecret: cmd.apiSecret, release: cmd.release, projectRoot: cmd.projectRoot, // I submit this data as a `formData` so it always been converted to `string` and I reuse the // same upcoming fields in normal request to abort current upload so, // I convert use them a string in the first place. maxFiles: minifiedFileListPaths.length.toString(), uniquePrefix, sourcemapFiles: fs.createReadStream(compressedFileName), }; isUploadReqFired = true; await requestPromise.post({ url: `${baseURL}/${config.baseAPI}/${UploadSourcemap.sourcemapApi}`, formData: uploadFormData, timeout: config.requestTimeout, headers: { 'djaty-escape-html-entities': true, }, }).catch(async err => { let errMsg = ''; UploadSourcemap.saveRemove(compressedFileName); spinner.stop(); // Handle request errors const requestErrorsCodes: {[p: string]: string} = { UNABLE_TO_VERIFY_LEAF_SIGNATURE: 'Current connection to Djaty is secured ' + 'with a self-signed certificate but the current config has not passed the `server`' + 'object with a `ca` (Certification Authority) property!', CERT_HAS_EXPIRED: 'The certificate of the HTTPS connection to djaty has been expired!', ECONNREFUSED: 'Make sure `server` config is correct', ENOTFOUND: 'Make sure `server` config is correct', ECONNRESET: 'Client network socket disconnected before secure TLS connection' + ' was established', }; if (err.error && err.error.code && requestErrorsCodes[err.error.code]) { errMsg += requestErrorsCodes[err.error.code]; throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } // Handle nginx errors const nginxErrors: {[p: number]: string} = { 301: 'Redirection are not supported', 404: '404 Not Found', 413: 'Uploaded sourcemap file is too large, It should be less than 15MB', }; if (err.statusCode && nginxErrors[err.statusCode]) { errMsg += nginxErrors[err.statusCode]; throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } // Handle server errors const serverErrors: {[p: number]: Function} = { 400: (error: any) => 'Validation error: \n\t' + error.errors .map((errItem: {message: string}) => errItem.message).join('\n\t'), 428: (error: any) => error.message, }; if (err.statusCode && serverErrors[err.statusCode]) { const error = JSON.parse(err.error.replace(')]}\',\n', '')); errMsg += serverErrors[err.statusCode](error); throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } errMsg += 'Something went wrong and a bug has been reported and will be resolved soon'; logger.error(`Unable to upload sourcemap files: ${errMsg}.`); throw err; }); UploadSourcemap.saveRemove(compressedFileName); spinner.stop(); logger.info('Uploading finished successfully. Please wait few minutes for' + ' the uploaded files to be processed.'); }; private static validateOnCLIInput(cmd: UploadSourcemapCMDParams | string) { if (typeof cmd === 'string') { throw new ValidationError(`Invalid args params: '${cmd}'`); } if (!cmd.apiKey || !cmd.apiSecret || !cmd.release || !cmd.minifiedDir || !cmd.projectRoot) { throw new ValidationError('Command params (apiKey, apiSecret,' + ' release, projectRoot and minifiedDir) are required'); } if (!fs.existsSync(cmd.minifiedDir)) { throw new ValidationError('Command param `minifiedDir` is not exists'); } if (!fs.lstatSync(cmd.minifiedDir).isDirectory()) { throw new ValidationError('Command param `minifiedDir` should be directory path'); } const urlRegexValidator = urlRegex({exact: true, strict: false}); if (!urlRegexValidator.test(cmd.projectRoot)) { throw new ValidationError('Invalid `project-root`.' + ' You should add valid url like `http://your-domain.com`'); } if (cmd.endPoint && !urlRegexValidator.test(cmd.endPoint)) { throw new ValidationError('Invalid `end-point`.' + ' You should add valid url like `http://your-domain.com`'); } }; private static saveRemove(path: string) { return fs.existsSync(path) && fs.unlinkSync(path); } }
random_line_split
uploadSourcemap.ts
import {djaty} from '@djaty/djaty-nodejs'; import * as glob from 'glob'; import * as fs from 'fs'; import * as path from 'path'; import * as ora from 'ora'; import * as tar from 'tar'; import * as os from 'os'; import * as requestPromise from 'request-promise'; // tslint:disable-next-line no-require-imports const urlRegex = require('url-regex'); import {CommandParams} from '../interfaces/commandParams'; import {config} from '../config/config'; import {logger} from '..'; import {ValidationError} from '../utils/validationError'; import {dealWithCommandActionAsPromise} from '../utils/utils'; interface UploadSourcemapCMDParams { apiKey: string; apiSecret: string; release: string; minifiedDir: string; projectRoot: string; endPoint?: string; } export class UploadSourcemap { static sourcemapApi = 'sourcemap'; static abortSourcemapApi = 'abortUploadingSourcemap'; static djatyPathPrefix = 'djaty'; static sourcemapFileSuffix = '_sourcemap_files.tgz'; initializationDetails: CommandParams; constructor() { this.initializationDetails = { command: 'uploadSourcemap', description: 'Upload project sourcemap files.', version: '1.0.0', optionList: [ ['--api-key <key>', 'An API key for project'], ['--api-secret <secret>', 'An API secret for project'], ['--release <v>', 'when requesting to resolve a stack trace, we check the bug release' + 'against current uploaded releases and if a release is matched, the stack trace will be' + ' resolved. So, if a bug is not configured with a release, it\'ll not be able to have ' + 'its stack trace resolved. And due to the probability of having multiple devices running' + ' different releases concurrently, we let the user upload up to 5 releases per project.'], ['--minified-dir <path>', 'Path to the directory that contains the minified and ' + ' sourcemap files (I.e, `dist`). Only `.js` and `.map` files will be uploaded.'], ['--project-root <domain>', 'The path of the project root. It helps us locate the' + ' original files from the stack frame, e.g., http://example.com.'], ['--end-point [server]', 'The server URL The default is `djaty.com`' + ' (on-premises installations).'], ], action: this.commandAction.bind(this), }; } async commandAction(cmd: UploadSourcemapCMDParams) { UploadSourcemap.validateOnCLIInput(cmd); const baseURL = cmd.endPoint || config.baseURL; const absolutePath = path.resolve(cmd.minifiedDir); const spinner = ora('Compressing sourcemap files').start(); let minifiedFileListPaths: string[] = []; let isUploadReqFired = false; let isAbortReqFired = false; const osTmpDir = os.tmpdir(); const djatyTmpPathDir = path.resolve(osTmpDir, UploadSourcemap.djatyPathPrefix); if (!fs.existsSync(djatyTmpPathDir)) { fs.mkdirSync(djatyTmpPathDir); } const uniquePrefix = (Math.random() * Date.now()).toString(); const compressedFileName = path.resolve(djatyTmpPathDir, `${Date.now()}${UploadSourcemap.sourcemapFileSuffix}`); process.on('SIGINT', dealWithCommandActionAsPromise(async() => { UploadSourcemap.saveRemove(compressedFileName); if (!isUploadReqFired || isAbortReqFired) { spinner.stop(); logger.info('Uploading stopped successfully.'); process.exit(); } isAbortReqFired = true; await requestPromise.put({ url: `${baseURL}/${config.baseAPI}/${UploadSourcemap.abortSourcemapApi}`, body: { apiKey: cmd.apiKey, apiSecret: cmd.apiSecret, release: cmd.release, uniquePrefix, }, headers: { 'djaty-escape-html-entities': true, }, // Automatically stringifies the body to JSON, json: true, timeout: config.requestTimeout, }).catch(err => { spinner.stop(); let errMsg = ''; if (err.statusCode === 400) { // Handle AJV validation errors. const error = JSON.parse(err.error.replace(')]}\',\n', '')); if (error.code === 'NOT_RELEASE_TO_ABORT') { // noinspection JSIgnoredPromiseFromCall djaty.trackBug(err); return; } errMsg = 'Validation error: \n\t'; const ignoredError = 'Unable to abort release. Release doesn\'t found.'; errMsg += error.errors ? error.errors .map((errItem: {message: string}) => errItem.message) .join('\n\t') : ''; throw new ValidationError(`Unable to stop uploading sourcemap files: ${errMsg}`); } else { errMsg += 'Something went wrong and a bug has been reported and will be resolved soon.'; } logger.error(`Unable to stop uploading sourcemap files: ${errMsg}`); throw err; }); spinner.stop(); logger.info('Uploading stopped successfully.'); process.exit(); })); try { // `*` Matches 0 or more characters in a single path portion // `@(*.js|*.js.map)` Matches exactly one of the patterns provided // `**` If a "globstar" is alone in a path portion, then it matches zero // or more directories and subdirectories searching for matches. minifiedFileListPaths = glob.sync('**/@(*.js|*.js.map)', {cwd: absolutePath}); } catch (err) { spinner.stop(); if (err.code === 'EACCES') { throw new ValidationError(err); } throw err; } try { await tar.c({ gzip: true, file: compressedFileName, cwd: absolutePath, }, minifiedFileListPaths, ); } catch (err) { spinner.stop(); logger.error('Unable to compress sourcemap files.', err); UploadSourcemap.saveRemove(compressedFileName); throw err; } spinner.stopAndPersist({text: 'The sourcemap files compressed successfully.', symbol: '#'}); spinner.start('Uploading sourcemap files to Djaty...'); const uploadFormData = { apiKey: cmd.apiKey, apiSecret: cmd.apiSecret, release: cmd.release, projectRoot: cmd.projectRoot, // I submit this data as a `formData` so it always been converted to `string` and I reuse the // same upcoming fields in normal request to abort current upload so, // I convert use them a string in the first place. maxFiles: minifiedFileListPaths.length.toString(), uniquePrefix, sourcemapFiles: fs.createReadStream(compressedFileName), }; isUploadReqFired = true; await requestPromise.post({ url: `${baseURL}/${config.baseAPI}/${UploadSourcemap.sourcemapApi}`, formData: uploadFormData, timeout: config.requestTimeout, headers: { 'djaty-escape-html-entities': true, }, }).catch(async err => { let errMsg = ''; UploadSourcemap.saveRemove(compressedFileName); spinner.stop(); // Handle request errors const requestErrorsCodes: {[p: string]: string} = { UNABLE_TO_VERIFY_LEAF_SIGNATURE: 'Current connection to Djaty is secured ' + 'with a self-signed certificate but the current config has not passed the `server`' + 'object with a `ca` (Certification Authority) property!', CERT_HAS_EXPIRED: 'The certificate of the HTTPS connection to djaty has been expired!', ECONNREFUSED: 'Make sure `server` config is correct', ENOTFOUND: 'Make sure `server` config is correct', ECONNRESET: 'Client network socket disconnected before secure TLS connection' + ' was established', }; if (err.error && err.error.code && requestErrorsCodes[err.error.code]) { errMsg += requestErrorsCodes[err.error.code]; throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } // Handle nginx errors const nginxErrors: {[p: number]: string} = { 301: 'Redirection are not supported', 404: '404 Not Found', 413: 'Uploaded sourcemap file is too large, It should be less than 15MB', }; if (err.statusCode && nginxErrors[err.statusCode]) { errMsg += nginxErrors[err.statusCode]; throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } // Handle server errors const serverErrors: {[p: number]: Function} = { 400: (error: any) => 'Validation error: \n\t' + error.errors .map((errItem: {message: string}) => errItem.message).join('\n\t'), 428: (error: any) => error.message, }; if (err.statusCode && serverErrors[err.statusCode]) { const error = JSON.parse(err.error.replace(')]}\',\n', '')); errMsg += serverErrors[err.statusCode](error); throw new ValidationError(`Unable to upload sourcemap files: ${errMsg}.`); } errMsg += 'Something went wrong and a bug has been reported and will be resolved soon'; logger.error(`Unable to upload sourcemap files: ${errMsg}.`); throw err; }); UploadSourcemap.saveRemove(compressedFileName); spinner.stop(); logger.info('Uploading finished successfully. Please wait few minutes for' + ' the uploaded files to be processed.'); }; private static validateOnCLIInput(cmd: UploadSourcemapCMDParams | string) { if (typeof cmd === 'string') { throw new ValidationError(`Invalid args params: '${cmd}'`); } if (!cmd.apiKey || !cmd.apiSecret || !cmd.release || !cmd.minifiedDir || !cmd.projectRoot) { throw new ValidationError('Command params (apiKey, apiSecret,' + ' release, projectRoot and minifiedDir) are required'); } if (!fs.existsSync(cmd.minifiedDir)) { throw new ValidationError('Command param `minifiedDir` is not exists'); } if (!fs.lstatSync(cmd.minifiedDir).isDirectory()) { throw new ValidationError('Command param `minifiedDir` should be directory path'); } const urlRegexValidator = urlRegex({exact: true, strict: false}); if (!urlRegexValidator.test(cmd.projectRoot)) { throw new ValidationError('Invalid `project-root`.' + ' You should add valid url like `http://your-domain.com`'); } if (cmd.endPoint && !urlRegexValidator.test(cmd.endPoint))
}; private static saveRemove(path: string) { return fs.existsSync(path) && fs.unlinkSync(path); } }
{ throw new ValidationError('Invalid `end-point`.' + ' You should add valid url like `http://your-domain.com`'); }
conditional_block
decl.go
// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package noder import ( "go/constant" "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/syntax" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/compile/internal/types2" ) // TODO(mdempsky): Skip blank declarations? Probably only safe // for declarations without pragmas. func (g *irgen) decls(res *ir.Nodes, decls []syntax.Decl) { for _, decl := range decls { switch decl := decl.(type) { case *syntax.ConstDecl: g.constDecl(res, decl) case *syntax.FuncDecl: g.funcDecl(res, decl) case *syntax.TypeDecl: if ir.CurFunc == nil { continue // already handled in irgen.generate } g.typeDecl(res, decl) case *syntax.VarDecl: g.varDecl(res, decl) default: g.unhandled("declaration", decl) } } } func (g *irgen) importDecl(p *noder, decl *syntax.ImportDecl) { g.pragmaFlags(decl.Pragma, 0) // Get the imported package's path, as resolved already by types2 // and gcimporter. This is the same path as would be computed by // parseImportPath. switch pkgNameOf(g.info, decl).Imported().Path() { case "unsafe": p.importedUnsafe = true case "embed": p.importedEmbed = true } } // pkgNameOf returns the PkgName associated with the given ImportDecl. func pkgNameOf(info *types2.Info, decl *syntax.ImportDecl) *types2.PkgName { if name := decl.LocalPkgName; name != nil { return info.Defs[name].(*types2.PkgName) } return info.Implicits[decl].(*types2.PkgName) } func (g *irgen) constDecl(out *ir.Nodes, decl *syntax.ConstDecl) { g.pragmaFlags(decl.Pragma, 0) for _, name := range decl.NameList { name, obj := g.def(name) // For untyped numeric constants, make sure the value // representation matches what the rest of the // compiler (really just iexport) expects. // TODO(mdempsky): Revisit after #43891 is resolved. val := obj.(*types2.Const).Val() switch name.Type() { case types.UntypedInt, types.UntypedRune: val = constant.ToInt(val) case types.UntypedFloat: val = constant.ToFloat(val) case types.UntypedComplex: val = constant.ToComplex(val) } name.SetVal(val) out.Append(ir.NewDecl(g.pos(decl), ir.ODCLCONST, name)) } } func (g *irgen) funcDecl(out *ir.Nodes, decl *syntax.FuncDecl) { assert(g.curDecl == "") // Set g.curDecl to the function name, as context for the type params declared // during types2-to-types1 translation if this is a generic function. g.curDecl = decl.Name.Value obj2 := g.info.Defs[decl.Name] recv := types2.AsSignature(obj2.Type()).Recv() if recv != nil { t2 := deref2(recv.Type()) // This is a method, so set g.curDecl to recvTypeName.methName instead. g.curDecl = t2.(*types2.Named).Obj().Name() + "." + g.curDecl } fn := ir.NewFunc(g.pos(decl)) fn.Nname, _ = g.def(decl.Name) fn.Nname.Func = fn fn.Nname.Defn = fn fn.Pragma = g.pragmaFlags(decl.Pragma, funcPragmas) if fn.Pragma&ir.Systemstack != 0 && fn.Pragma&ir.Nosplit != 0 { base.ErrorfAt(fn.Pos(), "go:nosplit and go:systemstack cannot be combined") } if fn.Pragma&ir.Nointerface != 0 { // Propagate //go:nointerface from Func.Pragma to Field.Nointerface. // This is a bit roundabout, but this is the earliest point where we've // processed the function's pragma flags, and we've also already created // the Fields to represent the receiver's method set. if recv := fn.Type().Recv(); recv != nil { typ := types.ReceiverBaseType(recv.Type) if orig := typ.OrigType(); orig != nil { // For a generic method, we mark the methods on the // base generic type, since those are the methods // that will be stenciled. typ = orig } meth := typecheck.Lookdot1(fn, typecheck.Lookup(decl.Name.Value), typ, typ.Methods(), 0) meth.SetNointerface(true) } } if decl.Body != nil && fn.Pragma&ir.Noescape != 0 { base.ErrorfAt(fn.Pos(), "can only use //go:noescape with external func implementations") } if decl.Name.Value == "init" && decl.Recv == nil { g.target.Inits = append(g.target.Inits, fn) } saveHaveEmbed := g.haveEmbed saveCurDecl := g.curDecl g.curDecl = "" g.later(func() { defer func(b bool, s string) { // Revert haveEmbed and curDecl back to what they were before // the "later" function. g.haveEmbed = b g.curDecl = s }(g.haveEmbed, g.curDecl) // Set haveEmbed and curDecl to what they were for this funcDecl. g.haveEmbed = saveHaveEmbed g.curDecl = saveCurDecl if fn.Type().HasTParam() { g.topFuncIsGeneric = true } g.funcBody(fn, decl.Recv, decl.Type, decl.Body) g.topFuncIsGeneric = false if fn.Type().HasTParam() && fn.Body != nil { // Set pointers to the dcls/body of a generic function/method in // the Inl struct, so it is marked for export, is available for // stenciling, and works with Inline_Flood(). fn.Inl = &ir.Inline{ Cost: 1, Dcl: fn.Dcl, Body: fn.Body, } } out.Append(fn) }) } func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) { // Set the position for any error messages we might print (e.g. too large types). base.Pos = g.pos(decl) assert(ir.CurFunc != nil || g.curDecl == "") // Set g.curDecl to the type name, as context for the type params declared // during types2-to-types1 translation if this is a generic type. saveCurDecl := g.curDecl g.curDecl = decl.Name.Value if decl.Alias { name, _ := g.def(decl.Name) g.pragmaFlags(decl.Pragma, 0) assert(name.Alias()) // should be set by irgen.obj out.Append(ir.NewDecl(g.pos(decl), ir.ODCLTYPE, name)) g.curDecl = "" return } // Prevent size calculations until we set the underlying type. types.DeferCheckSize() name, obj := g.def(decl.Name) ntyp, otyp := name.Type(), obj.Type() if ir.CurFunc != nil { ntyp.SetVargen() } pragmas := g.pragmaFlags(decl.Pragma, typePragmas) name.SetPragma(pragmas) // TODO(mdempsky): Is this still needed? if pragmas&ir.NotInHeap != 0 { ntyp.SetNotInHeap(true) } // We need to use g.typeExpr(decl.Type) here to ensure that for // chained, defined-type declarations like: // // type T U // // //go:notinheap // type U struct { … } // // we mark both T and U as NotInHeap. If we instead used just // g.typ(otyp.Underlying()), then we'd instead set T's underlying // type directly to the struct type (which is not marked NotInHeap) // and fail to mark T as NotInHeap. // // Also, we rely here on Type.SetUnderlying allowing passing a // defined type and handling forward references like from T to U // above. Contrast with go/types's Named.SetUnderlying, which // disallows this. // // [mdempsky: Subtleties like these are why I always vehemently // object to new type pragmas.] ntyp.SetUnderlying(g.typeExpr(decl.Type)) tparams := otyp.(*types2.Named).TypeParams() if n := tparams.Len(); n > 0 { rparams := make([]*types.Type, n) for i := range rparams { rparams[i] = g.typ(tparams.At(i)) } // This will set hasTParam flag if any rparams are not concrete types. ntyp.SetRParams(rparams) } types.ResumeCheckSize() g.curDecl = saveCurDecl if otyp, ok := otyp.(*types2.Named); ok && otyp.NumMethods() != 0 { methods := make([]*types.Field, otyp.NumMethods()) for i := range methods { m := otyp.Method(i) // Set g.curDecl to recvTypeName.methName, as context for the // method-specific type params in the receiver. g.curDecl = decl.Name.Value + "." + m.Name() meth := g.obj(m) methods[i] = types.NewField(meth.Pos(), g.selector(m), meth.Type()) methods[i].Nname = meth g.curDecl = "" } ntyp.Methods().Set(methods) } out.Append(ir.NewDecl(g.pos(decl), ir.ODCLTYPE, name)) } func (g *irgen) varDecl(out *ir.Nodes, decl *syntax.VarDecl) { pos := g.pos(decl) // Set the position for any error messages we might print (e.g. too large types). base.Pos = pos names := make([]*ir.Name, len(decl.NameList)) for i, name := range decl.NameList { names[i], _ = g.def(name) } if decl.Pragma != nil { pragma := decl.Pragma.(*pragmas) varEmbed(g.makeXPos, names[0], decl, pragma, g.haveEmbed) g.reportUnused(pragma) } haveEmbed := g.haveEmbed do := func() { defer func(b bool) { g.haveEmbed = b }(g.haveEmbed) g.haveEmbed = haveEmbed values := g.exprList(decl.Values) var as2 *ir.AssignListStmt if len(values) != 0 && len(names) != len(values) {
for i, name := range names { if ir.CurFunc != nil { out.Append(ir.NewDecl(pos, ir.ODCL, name)) } if as2 != nil { as2.Lhs[i] = name name.Defn = as2 } else { as := ir.NewAssignStmt(pos, name, nil) if len(values) != 0 { as.Y = values[i] name.Defn = as } else if ir.CurFunc == nil { name.Defn = as } if !g.delayTransform() { lhs := []ir.Node{as.X} rhs := []ir.Node{} if as.Y != nil { rhs = []ir.Node{as.Y} } transformAssign(as, lhs, rhs) as.X = lhs[0] if as.Y != nil { as.Y = rhs[0] } } as.SetTypecheck(1) out.Append(as) } } if as2 != nil { if !g.delayTransform() { transformAssign(as2, as2.Lhs, as2.Rhs) } as2.SetTypecheck(1) out.Append(as2) } } // If we're within a function, we need to process the assignment // part of the variable declaration right away. Otherwise, we leave // it to be handled after all top-level declarations are processed. if ir.CurFunc != nil { do() } else { g.later(do) } } // pragmaFlags returns any specified pragma flags included in allowed, // and reports errors about any other, unexpected pragmas. func (g *irgen) pragmaFlags(pragma syntax.Pragma, allowed ir.PragmaFlag) ir.PragmaFlag { if pragma == nil { return 0 } p := pragma.(*pragmas) present := p.Flag & allowed p.Flag &^= allowed g.reportUnused(p) return present } // reportUnused reports errors about any unused pragmas. func (g *irgen) reportUnused(pragma *pragmas) { for _, pos := range pragma.Pos { if pos.Flag&pragma.Flag != 0 { base.ErrorfAt(g.makeXPos(pos.Pos), "misplaced compiler directive") } } if len(pragma.Embeds) > 0 { for _, e := range pragma.Embeds { base.ErrorfAt(g.makeXPos(e.Pos), "misplaced go:embed directive") } } }
as2 = ir.NewAssignListStmt(pos, ir.OAS2, make([]ir.Node, len(names)), values) }
conditional_block