file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
12.1k
suffix
large_stringlengths
0
12k
middle
large_stringlengths
0
7.51k
fim_type
large_stringclasses
4 values
http.go
== nil { keyVals[k] = int(i) } else if f, err := t.Float64(); err == nil { keyVals[k] = f } else { keyVals[k] = t.String() } } } return keyVals, err } // Key-val format: key=val,key2=val2, ... for _, kv := range strings.Split(headers.Get(headerName), ";") { if kv != "" { kvSlice := strings.Split(kv, "=") if len(kvSlice) != 2 { return nil, fmt.Errorf("invalid key=value pair in %s: %s", headerName, kv) } keyVals[trim(kvSlice[0])] = trim(kvSlice[1]) } } for k, v := range keyVals { s := v.(string) if i, err := strconv.Atoi(s); err == nil { keyVals[k] = i } else if f, err := strconv.ParseFloat(s, 64); err == nil { keyVals[k] = f } // else: No need to do anything } return keyVals, nil } func strIfToStrStr(m map[string]interface{}, err error) (map[string]string, error) { if err != nil { return nil, err } result := make(map[string]string, len(m)) for k, v := range m { s, ok := v.(string) if !ok { return nil, fmt.Errorf("%v is not a valid string", v) } result[k] = s } return result, nil } func readEnumSpec(headers http.Header) (map[string][]string, error) { enumSpecJson := headers.Get("X-QCache-enum-specs") if enumSpecJson == "" { return nil, nil } result := map[string][]string{} if err := json.Unmarshal([]byte(enumSpecJson), &result); err != nil { return nil, fmt.Errorf("could not decode JSON content in X-QCache-enum-specs: %s", err.Error()) } return result, nil } func headersToCsvConfig(headers http.Header) ([]csv.ConfigFunc, error) { typs, err := strIfToStrStr(headerToKeyValues(headers, "X-QCache-types")) if err != nil { return nil, err } enumVals, err := readEnumSpec(headers) if err != nil { return nil, err } rowCountHint := 0 if rowCountHintStr := headers.Get("X-QCache-row-count-hint"); rowCountHintStr != "" { rowCountHint, err = strconv.Atoi(rowCountHintStr) if err != nil { return nil, err } } return []csv.ConfigFunc{csv.Types(typs), csv.EnumValues(enumVals), csv.EmptyNull(true), csv.RowCountHint(rowCountHint), csv.IgnoreEmptyLines(true)}, nil } func headersToJsonConfig(headers http.Header) ([]newqf.ConfigFunc, error) { enumVals, err := readEnumSpec(headers) if err != nil { return nil, err } return []newqf.ConfigFunc{newqf.Enums(enumVals)}, nil } func firstErr(errs ...error) error { for _, err := range errs { if err != nil { return err } } return nil } func parseContentType(h string) (string, string) { result := strings.Split(h, ";") if len(result) == 1 { return strings.TrimSpace(h), "" } if len(result) >= 2 { contentType, charset := strings.TrimSpace(result[0]), "" match := charsetRegex.FindStringSubmatch(result[1]) if len(match) > 1 { charset = match[1] } return contentType, charset } return "", "" } func (a *application) log(msg string, params ...interface{}) string { result := fmt.Sprintf(msg, params...) a.logger.Printf(result) return result } func (a *application) logError(source string, err error) { if err != nil { a.logger.Printf("Error %s: %v", source, err) } } func (a *application) badRequest(w http.ResponseWriter, msg string, params ...interface{}) { http.Error(w, a.log(msg, params...), http.StatusBadRequest) } func (a *application) newDataset(w http.ResponseWriter, r *http.Request)
frame = qf.ReadCSV(r.Body, configFns...) case contentTypeJson: configFns, err := headersToJsonConfig(r.Header) if err != nil { a.badRequest(w, err.Error()) return } frame = qf.ReadJSON(r.Body, configFns...) default: a.badRequest(w, "Unknown content type: %s", contentType) return } if frame.Err != nil { a.badRequest(w, "Could not decode data: %v", frame.Err) return } frame, _, err := addStandInColumns(frame, r.Header) if err = firstErr(err, frame.Err); err != nil { a.badRequest(w, err.Error()) return } err = a.cache.Put(key, frame, frame.ByteSize()) a.logError("Put new dataset in cache", err) w.WriteHeader(http.StatusCreated) statsProbe.Success(frame.Len()) } func addStandInColumns(frame qf.QFrame, headers http.Header) (qf.QFrame, bool, error) { standIns, err := headerToKeyValues(headers, "X-QCache-stand-in-columns") if err != nil { return frame, false, err } columnAdded := false for col, standIn := range standIns { if !frame.Contains(col) { if s, ok := standIn.(string); ok { if qostrings.IsQuoted(s) { // String constant standIn = qostrings.TrimQuotes(s) } else { // Column reference standIn = types.ColumnName(s) } } frame = frame.Eval(col, qf.Val(standIn)) columnAdded = true } } return frame, columnAdded, nil } func formatContentType(ct string) string { return fmt.Sprintf("%s; charset=utf-8", ct) } func (a *application) queryDatasetGet(w http.ResponseWriter, r *http.Request) { // The query is located in the URL a.queryDataset(w, r, func(r *http.Request) (string, error) { err := r.ParseForm() a.logError("Form parse query", err) return r.Form.Get("q"), nil }) } func (a *application) queryDatasetPost(w http.ResponseWriter, r *http.Request) { // The query is located in the body a.queryDataset(w, r, func(r *http.Request) (string, error) { defer r.Body.Close() b, err := ioutil.ReadAll(r.Body) if err != nil { return "", err } return string(b), nil }) } func (a *application) queryDataset(w http.ResponseWriter, r *http.Request, qFn func(r *http.Request) (string, error)) { statsProbe := statistics.NewQueryProbe(r.Context()) vars := mux.Vars(r) key := vars["key"] item, ok := a.cache.Get(key) if !ok { w.WriteHeader(http.StatusNotFound) _, err := w.Write([]byte(fmt.Sprintf("Dataset '%s' not found", key))) a.logError("Query dataset write not found", err) statsProbe.Missing() return } frame := item.(qf.QFrame) qstring, err := qFn(r) if err != nil { a.badRequest(w, "Error reading query: %s", err.Error()) return } frame, columnAdded, err := addStandInColumns(frame, r.Header) if err != nil { a.badRequest(w, "Error adding standin columns: %s", err.Error()) return } if columnAdded { // Need to replace existing frame in cache since the new one contains // additional columns. err := a.cache.Put(key, frame, frame.ByteSize()) a.logError("Column added put dataset in cache", err) } if qstring != "" { result := query.Query(frame, qstring) if result.Err != nil { a.badRequest(w, "Error executing query: %s", result.Err.Error()) return } frame = result.Qframe w.Header().Set("X-QCache-unsliced-length", fmt.Sprintf("%d", result.UnslicedLen)) } // This is a bit simplistic since we assume that only one content type // is listed and not a prioritized . Good enough for now. accept :=
{ statsProbe := statistics.NewStoreProbe(r.Context()) defer r.Body.Close() vars := mux.Vars(r) key := vars["key"] var frame qf.QFrame contentType, charset := parseContentType(r.Header.Get("Content-Type")) if charset != "" && charset != "utf-8" { a.badRequest(w, "Unsupported charset: %s", charset) return } switch contentType { case contentTypeCsv: configFns, err := headersToCsvConfig(r.Header) if err != nil { a.badRequest(w, err.Error()) return }
identifier_body
http.go
err == nil { keyVals[k] = int(i) } else if f, err := t.Float64(); err == nil { keyVals[k] = f } else { keyVals[k] = t.String() } } } return keyVals, err } // Key-val format: key=val,key2=val2, ... for _, kv := range strings.Split(headers.Get(headerName), ";") { if kv != "" { kvSlice := strings.Split(kv, "=") if len(kvSlice) != 2 { return nil, fmt.Errorf("invalid key=value pair in %s: %s", headerName, kv) } keyVals[trim(kvSlice[0])] = trim(kvSlice[1]) } } for k, v := range keyVals { s := v.(string) if i, err := strconv.Atoi(s); err == nil { keyVals[k] = i } else if f, err := strconv.ParseFloat(s, 64); err == nil { keyVals[k] = f } // else: No need to do anything } return keyVals, nil } func strIfToStrStr(m map[string]interface{}, err error) (map[string]string, error) { if err != nil { return nil, err } result := make(map[string]string, len(m)) for k, v := range m { s, ok := v.(string) if !ok { return nil, fmt.Errorf("%v is not a valid string", v) } result[k] = s } return result, nil } func readEnumSpec(headers http.Header) (map[string][]string, error) { enumSpecJson := headers.Get("X-QCache-enum-specs") if enumSpecJson == "" { return nil, nil } result := map[string][]string{} if err := json.Unmarshal([]byte(enumSpecJson), &result); err != nil { return nil, fmt.Errorf("could not decode JSON content in X-QCache-enum-specs: %s", err.Error()) } return result, nil } func headersToCsvConfig(headers http.Header) ([]csv.ConfigFunc, error) { typs, err := strIfToStrStr(headerToKeyValues(headers, "X-QCache-types")) if err != nil { return nil, err } enumVals, err := readEnumSpec(headers) if err != nil { return nil, err } rowCountHint := 0 if rowCountHintStr := headers.Get("X-QCache-row-count-hint"); rowCountHintStr != "" { rowCountHint, err = strconv.Atoi(rowCountHintStr) if err != nil { return nil, err } } return []csv.ConfigFunc{csv.Types(typs), csv.EnumValues(enumVals), csv.EmptyNull(true), csv.RowCountHint(rowCountHint), csv.IgnoreEmptyLines(true)}, nil } func headersToJsonConfig(headers http.Header) ([]newqf.ConfigFunc, error) { enumVals, err := readEnumSpec(headers) if err != nil { return nil, err } return []newqf.ConfigFunc{newqf.Enums(enumVals)}, nil } func firstErr(errs ...error) error { for _, err := range errs { if err != nil { return err } } return nil } func parseContentType(h string) (string, string) { result := strings.Split(h, ";") if len(result) == 1 { return strings.TrimSpace(h), "" } if len(result) >= 2 { contentType, charset := strings.TrimSpace(result[0]), "" match := charsetRegex.FindStringSubmatch(result[1]) if len(match) > 1 { charset = match[1] } return contentType, charset } return "", "" } func (a *application)
(msg string, params ...interface{}) string { result := fmt.Sprintf(msg, params...) a.logger.Printf(result) return result } func (a *application) logError(source string, err error) { if err != nil { a.logger.Printf("Error %s: %v", source, err) } } func (a *application) badRequest(w http.ResponseWriter, msg string, params ...interface{}) { http.Error(w, a.log(msg, params...), http.StatusBadRequest) } func (a *application) newDataset(w http.ResponseWriter, r *http.Request) { statsProbe := statistics.NewStoreProbe(r.Context()) defer r.Body.Close() vars := mux.Vars(r) key := vars["key"] var frame qf.QFrame contentType, charset := parseContentType(r.Header.Get("Content-Type")) if charset != "" && charset != "utf-8" { a.badRequest(w, "Unsupported charset: %s", charset) return } switch contentType { case contentTypeCsv: configFns, err := headersToCsvConfig(r.Header) if err != nil { a.badRequest(w, err.Error()) return } frame = qf.ReadCSV(r.Body, configFns...) case contentTypeJson: configFns, err := headersToJsonConfig(r.Header) if err != nil { a.badRequest(w, err.Error()) return } frame = qf.ReadJSON(r.Body, configFns...) default: a.badRequest(w, "Unknown content type: %s", contentType) return } if frame.Err != nil { a.badRequest(w, "Could not decode data: %v", frame.Err) return } frame, _, err := addStandInColumns(frame, r.Header) if err = firstErr(err, frame.Err); err != nil { a.badRequest(w, err.Error()) return } err = a.cache.Put(key, frame, frame.ByteSize()) a.logError("Put new dataset in cache", err) w.WriteHeader(http.StatusCreated) statsProbe.Success(frame.Len()) } func addStandInColumns(frame qf.QFrame, headers http.Header) (qf.QFrame, bool, error) { standIns, err := headerToKeyValues(headers, "X-QCache-stand-in-columns") if err != nil { return frame, false, err } columnAdded := false for col, standIn := range standIns { if !frame.Contains(col) { if s, ok := standIn.(string); ok { if qostrings.IsQuoted(s) { // String constant standIn = qostrings.TrimQuotes(s) } else { // Column reference standIn = types.ColumnName(s) } } frame = frame.Eval(col, qf.Val(standIn)) columnAdded = true } } return frame, columnAdded, nil } func formatContentType(ct string) string { return fmt.Sprintf("%s; charset=utf-8", ct) } func (a *application) queryDatasetGet(w http.ResponseWriter, r *http.Request) { // The query is located in the URL a.queryDataset(w, r, func(r *http.Request) (string, error) { err := r.ParseForm() a.logError("Form parse query", err) return r.Form.Get("q"), nil }) } func (a *application) queryDatasetPost(w http.ResponseWriter, r *http.Request) { // The query is located in the body a.queryDataset(w, r, func(r *http.Request) (string, error) { defer r.Body.Close() b, err := ioutil.ReadAll(r.Body) if err != nil { return "", err } return string(b), nil }) } func (a *application) queryDataset(w http.ResponseWriter, r *http.Request, qFn func(r *http.Request) (string, error)) { statsProbe := statistics.NewQueryProbe(r.Context()) vars := mux.Vars(r) key := vars["key"] item, ok := a.cache.Get(key) if !ok { w.WriteHeader(http.StatusNotFound) _, err := w.Write([]byte(fmt.Sprintf("Dataset '%s' not found", key))) a.logError("Query dataset write not found", err) statsProbe.Missing() return } frame := item.(qf.QFrame) qstring, err := qFn(r) if err != nil { a.badRequest(w, "Error reading query: %s", err.Error()) return } frame, columnAdded, err := addStandInColumns(frame, r.Header) if err != nil { a.badRequest(w, "Error adding standin columns: %s", err.Error()) return } if columnAdded { // Need to replace existing frame in cache since the new one contains // additional columns. err := a.cache.Put(key, frame, frame.ByteSize()) a.logError("Column added put dataset in cache", err) } if qstring != "" { result := query.Query(frame, qstring) if result.Err != nil { a.badRequest(w, "Error executing query: %s", result.Err.Error()) return } frame = result.Qframe w.Header().Set("X-QCache-unsliced-length", fmt.Sprintf("%d", result.UnslicedLen)) } // This is a bit simplistic since we assume that only one content type // is listed and not a prioritized . Good enough for now. accept :=
log
identifier_name
http.go
string) { result := strings.Split(h, ";") if len(result) == 1 { return strings.TrimSpace(h), "" } if len(result) >= 2 { contentType, charset := strings.TrimSpace(result[0]), "" match := charsetRegex.FindStringSubmatch(result[1]) if len(match) > 1 { charset = match[1] } return contentType, charset } return "", "" } func (a *application) log(msg string, params ...interface{}) string { result := fmt.Sprintf(msg, params...) a.logger.Printf(result) return result } func (a *application) logError(source string, err error) { if err != nil { a.logger.Printf("Error %s: %v", source, err) } } func (a *application) badRequest(w http.ResponseWriter, msg string, params ...interface{}) { http.Error(w, a.log(msg, params...), http.StatusBadRequest) } func (a *application) newDataset(w http.ResponseWriter, r *http.Request) { statsProbe := statistics.NewStoreProbe(r.Context()) defer r.Body.Close() vars := mux.Vars(r) key := vars["key"] var frame qf.QFrame contentType, charset := parseContentType(r.Header.Get("Content-Type")) if charset != "" && charset != "utf-8" { a.badRequest(w, "Unsupported charset: %s", charset) return } switch contentType { case contentTypeCsv: configFns, err := headersToCsvConfig(r.Header) if err != nil { a.badRequest(w, err.Error()) return } frame = qf.ReadCSV(r.Body, configFns...) case contentTypeJson: configFns, err := headersToJsonConfig(r.Header) if err != nil { a.badRequest(w, err.Error()) return } frame = qf.ReadJSON(r.Body, configFns...) default: a.badRequest(w, "Unknown content type: %s", contentType) return } if frame.Err != nil { a.badRequest(w, "Could not decode data: %v", frame.Err) return } frame, _, err := addStandInColumns(frame, r.Header) if err = firstErr(err, frame.Err); err != nil { a.badRequest(w, err.Error()) return } err = a.cache.Put(key, frame, frame.ByteSize()) a.logError("Put new dataset in cache", err) w.WriteHeader(http.StatusCreated) statsProbe.Success(frame.Len()) } func addStandInColumns(frame qf.QFrame, headers http.Header) (qf.QFrame, bool, error) { standIns, err := headerToKeyValues(headers, "X-QCache-stand-in-columns") if err != nil { return frame, false, err } columnAdded := false for col, standIn := range standIns { if !frame.Contains(col) { if s, ok := standIn.(string); ok { if qostrings.IsQuoted(s) { // String constant standIn = qostrings.TrimQuotes(s) } else { // Column reference standIn = types.ColumnName(s) } } frame = frame.Eval(col, qf.Val(standIn)) columnAdded = true } } return frame, columnAdded, nil } func formatContentType(ct string) string { return fmt.Sprintf("%s; charset=utf-8", ct) } func (a *application) queryDatasetGet(w http.ResponseWriter, r *http.Request) { // The query is located in the URL a.queryDataset(w, r, func(r *http.Request) (string, error) { err := r.ParseForm() a.logError("Form parse query", err) return r.Form.Get("q"), nil }) } func (a *application) queryDatasetPost(w http.ResponseWriter, r *http.Request) { // The query is located in the body a.queryDataset(w, r, func(r *http.Request) (string, error) { defer r.Body.Close() b, err := ioutil.ReadAll(r.Body) if err != nil { return "", err } return string(b), nil }) } func (a *application) queryDataset(w http.ResponseWriter, r *http.Request, qFn func(r *http.Request) (string, error)) { statsProbe := statistics.NewQueryProbe(r.Context()) vars := mux.Vars(r) key := vars["key"] item, ok := a.cache.Get(key) if !ok { w.WriteHeader(http.StatusNotFound) _, err := w.Write([]byte(fmt.Sprintf("Dataset '%s' not found", key))) a.logError("Query dataset write not found", err) statsProbe.Missing() return } frame := item.(qf.QFrame) qstring, err := qFn(r) if err != nil { a.badRequest(w, "Error reading query: %s", err.Error()) return } frame, columnAdded, err := addStandInColumns(frame, r.Header) if err != nil { a.badRequest(w, "Error adding standin columns: %s", err.Error()) return } if columnAdded { // Need to replace existing frame in cache since the new one contains // additional columns. err := a.cache.Put(key, frame, frame.ByteSize()) a.logError("Column added put dataset in cache", err) } if qstring != "" { result := query.Query(frame, qstring) if result.Err != nil { a.badRequest(w, "Error executing query: %s", result.Err.Error()) return } frame = result.Qframe w.Header().Set("X-QCache-unsliced-length", fmt.Sprintf("%d", result.UnslicedLen)) } // This is a bit simplistic since we assume that only one content type // is listed and not a prioritized . Good enough for now. accept := r.Header.Get("Accept") w.Header().Set("Content-Type", formatContentType(accept)) switch accept { case contentTypeCsv: err = frame.ToCSV(w) case contentTypeJson: err = frame.ToJSON(w) default: a.badRequest(w, "Unknown accept type: %s", accept) return } if err != nil { // Panic for now, will be picked up by recover middleware panic(fmt.Sprintf("Failed writing query response: %v", err)) } statsProbe.Success() } func (a *application) statistics(w http.ResponseWriter, r *http.Request) { accept := r.Header.Get("Accept") if accept == "" || accept == "*/*" { accept = contentTypeJson } if accept != contentTypeJson { a.badRequest(w, "Unknown accept type: %s, statistics only available in JSON format", accept) return } w.Header().Set("Content-Type", formatContentType(accept)) stats := a.stats.Stats() enc := json.NewEncoder(w) err := enc.Encode(stats) a.logError("Encoding stats", err) } func (a *application) status(w http.ResponseWriter, r *http.Request) { _, err := w.Write([]byte("OK")) a.logError("Write status", err) } func attachProfiler(router *mux.Router) { router.HandleFunc("/debug/pprof/", pprof.Index) router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) router.HandleFunc("/debug/pprof/profile", pprof.Profile) router.HandleFunc("/debug/pprof/symbol", pprof.Symbol) router.Handle("/debug/pprof/goroutine", pprof.Handler("goroutine")) router.Handle("/debug/pprof/heap", pprof.Handler("heap")) router.Handle("/debug/pprof/threadcreate", pprof.Handler("threadcreate")) router.Handle("/debug/pprof/block", pprof.Handler("block")) } func Application(conf config.Config, logger qlog.Logger) (*mux.Router, error) { c := cache.New(conf.Size, time.Duration(conf.Age)*time.Second) s := statistics.New(c, conf.StatisticsBufferSize) app := &application{cache: c, stats: s, logger: logger} r := mux.NewRouter() middleWares := make([]middleware, 0) middleWares = append(middleWares, withRecover(logger)) middleWares = append(middleWares, withStatistics(s)) if conf.RequestLog { middleWares = append(middleWares, withRequestLog(app)) } if conf.BasicAuth != "" { user, password, err := parseBasicAuth(conf.BasicAuth) if err != nil { return nil, err } middleWares = append(middleWares, withBasicAuth(app.logger, user, password)) } middleWares = append(middleWares, withLz4(app)) mw := chainMiddleware(middleWares...) // Mount on both qcache and qocache for compatibility with qcache for _, root := range []string{"/qcache", "/qocache"}
{ r.HandleFunc(root+"/dataset/{key}", mw(app.newDataset)).Methods("POST") r.HandleFunc(root+"/dataset/{key}/q", mw(app.queryDatasetPost)).Methods("POST") r.HandleFunc(root+"/dataset/{key}", mw(app.queryDatasetGet)).Methods("GET") r.HandleFunc(root+"/statistics", mw(app.statistics)).Methods("GET") r.HandleFunc(root+"/status", mw(app.status)).Methods("GET") }
conditional_block
http.go
err == nil { keyVals[k] = int(i) } else if f, err := t.Float64(); err == nil { keyVals[k] = f } else { keyVals[k] = t.String() } } } return keyVals, err } // Key-val format: key=val,key2=val2, ... for _, kv := range strings.Split(headers.Get(headerName), ";") { if kv != "" { kvSlice := strings.Split(kv, "=") if len(kvSlice) != 2 { return nil, fmt.Errorf("invalid key=value pair in %s: %s", headerName, kv) } keyVals[trim(kvSlice[0])] = trim(kvSlice[1]) } } for k, v := range keyVals { s := v.(string) if i, err := strconv.Atoi(s); err == nil { keyVals[k] = i } else if f, err := strconv.ParseFloat(s, 64); err == nil { keyVals[k] = f } // else: No need to do anything } return keyVals, nil } func strIfToStrStr(m map[string]interface{}, err error) (map[string]string, error) { if err != nil { return nil, err } result := make(map[string]string, len(m)) for k, v := range m { s, ok := v.(string) if !ok { return nil, fmt.Errorf("%v is not a valid string", v) } result[k] = s } return result, nil } func readEnumSpec(headers http.Header) (map[string][]string, error) { enumSpecJson := headers.Get("X-QCache-enum-specs") if enumSpecJson == "" { return nil, nil } result := map[string][]string{} if err := json.Unmarshal([]byte(enumSpecJson), &result); err != nil { return nil, fmt.Errorf("could not decode JSON content in X-QCache-enum-specs: %s", err.Error()) } return result, nil } func headersToCsvConfig(headers http.Header) ([]csv.ConfigFunc, error) { typs, err := strIfToStrStr(headerToKeyValues(headers, "X-QCache-types")) if err != nil { return nil, err } enumVals, err := readEnumSpec(headers) if err != nil { return nil, err } rowCountHint := 0 if rowCountHintStr := headers.Get("X-QCache-row-count-hint"); rowCountHintStr != "" { rowCountHint, err = strconv.Atoi(rowCountHintStr) if err != nil { return nil, err } } return []csv.ConfigFunc{csv.Types(typs), csv.EnumValues(enumVals), csv.EmptyNull(true), csv.RowCountHint(rowCountHint), csv.IgnoreEmptyLines(true)}, nil } func headersToJsonConfig(headers http.Header) ([]newqf.ConfigFunc, error) { enumVals, err := readEnumSpec(headers) if err != nil { return nil, err } return []newqf.ConfigFunc{newqf.Enums(enumVals)}, nil } func firstErr(errs ...error) error { for _, err := range errs { if err != nil { return err } } return nil } func parseContentType(h string) (string, string) { result := strings.Split(h, ";") if len(result) == 1 { return strings.TrimSpace(h), "" } if len(result) >= 2 { contentType, charset := strings.TrimSpace(result[0]), "" match := charsetRegex.FindStringSubmatch(result[1]) if len(match) > 1 { charset = match[1] } return contentType, charset } return "", ""
func (a *application) log(msg string, params ...interface{}) string { result := fmt.Sprintf(msg, params...) a.logger.Printf(result) return result } func (a *application) logError(source string, err error) { if err != nil { a.logger.Printf("Error %s: %v", source, err) } } func (a *application) badRequest(w http.ResponseWriter, msg string, params ...interface{}) { http.Error(w, a.log(msg, params...), http.StatusBadRequest) } func (a *application) newDataset(w http.ResponseWriter, r *http.Request) { statsProbe := statistics.NewStoreProbe(r.Context()) defer r.Body.Close() vars := mux.Vars(r) key := vars["key"] var frame qf.QFrame contentType, charset := parseContentType(r.Header.Get("Content-Type")) if charset != "" && charset != "utf-8" { a.badRequest(w, "Unsupported charset: %s", charset) return } switch contentType { case contentTypeCsv: configFns, err := headersToCsvConfig(r.Header) if err != nil { a.badRequest(w, err.Error()) return } frame = qf.ReadCSV(r.Body, configFns...) case contentTypeJson: configFns, err := headersToJsonConfig(r.Header) if err != nil { a.badRequest(w, err.Error()) return } frame = qf.ReadJSON(r.Body, configFns...) default: a.badRequest(w, "Unknown content type: %s", contentType) return } if frame.Err != nil { a.badRequest(w, "Could not decode data: %v", frame.Err) return } frame, _, err := addStandInColumns(frame, r.Header) if err = firstErr(err, frame.Err); err != nil { a.badRequest(w, err.Error()) return } err = a.cache.Put(key, frame, frame.ByteSize()) a.logError("Put new dataset in cache", err) w.WriteHeader(http.StatusCreated) statsProbe.Success(frame.Len()) } func addStandInColumns(frame qf.QFrame, headers http.Header) (qf.QFrame, bool, error) { standIns, err := headerToKeyValues(headers, "X-QCache-stand-in-columns") if err != nil { return frame, false, err } columnAdded := false for col, standIn := range standIns { if !frame.Contains(col) { if s, ok := standIn.(string); ok { if qostrings.IsQuoted(s) { // String constant standIn = qostrings.TrimQuotes(s) } else { // Column reference standIn = types.ColumnName(s) } } frame = frame.Eval(col, qf.Val(standIn)) columnAdded = true } } return frame, columnAdded, nil } func formatContentType(ct string) string { return fmt.Sprintf("%s; charset=utf-8", ct) } func (a *application) queryDatasetGet(w http.ResponseWriter, r *http.Request) { // The query is located in the URL a.queryDataset(w, r, func(r *http.Request) (string, error) { err := r.ParseForm() a.logError("Form parse query", err) return r.Form.Get("q"), nil }) } func (a *application) queryDatasetPost(w http.ResponseWriter, r *http.Request) { // The query is located in the body a.queryDataset(w, r, func(r *http.Request) (string, error) { defer r.Body.Close() b, err := ioutil.ReadAll(r.Body) if err != nil { return "", err } return string(b), nil }) } func (a *application) queryDataset(w http.ResponseWriter, r *http.Request, qFn func(r *http.Request) (string, error)) { statsProbe := statistics.NewQueryProbe(r.Context()) vars := mux.Vars(r) key := vars["key"] item, ok := a.cache.Get(key) if !ok { w.WriteHeader(http.StatusNotFound) _, err := w.Write([]byte(fmt.Sprintf("Dataset '%s' not found", key))) a.logError("Query dataset write not found", err) statsProbe.Missing() return } frame := item.(qf.QFrame) qstring, err := qFn(r) if err != nil { a.badRequest(w, "Error reading query: %s", err.Error()) return } frame, columnAdded, err := addStandInColumns(frame, r.Header) if err != nil { a.badRequest(w, "Error adding standin columns: %s", err.Error()) return } if columnAdded { // Need to replace existing frame in cache since the new one contains // additional columns. err := a.cache.Put(key, frame, frame.ByteSize()) a.logError("Column added put dataset in cache", err) } if qstring != "" { result := query.Query(frame, qstring) if result.Err != nil { a.badRequest(w, "Error executing query: %s", result.Err.Error()) return } frame = result.Qframe w.Header().Set("X-QCache-unsliced-length", fmt.Sprintf("%d", result.UnslicedLen)) } // This is a bit simplistic since we assume that only one content type // is listed and not a prioritized . Good enough for now. accept := r
}
random_line_split
HeaderCell.ts
from './HeaderRow'; import Cell, {IOptions as ICellOptions} from './Cell'; export interface IOptions<T> extends ICellOptions<T> { shadowVisibility?: string; backgroundStyle?: string; sorting?: string; cellPadding?: IItemPadding; } const DEFAULT_CELL_TEMPLATE = 'Controls/gridNew:HeaderContent'; const FIXED_HEADER_Z_INDEX = 4; const STICKY_HEADER_Z_INDEX = 3; export default class HeaderCell<T> extends Cell<T, HeaderRow<T>> { protected _$owner: HeaderRow<T>; protected _$column: IHeaderCell; protected _$cellPadding: IItemPadding; protected _$align?: string; protected _$valign?: string; protected _$shadowVisibility?: string; protected _$backgroundStyle?: string; protected _$sorting?: string; get shadowVisibility(): string { return this._$shadowVisibility; } get backgroundStyle(): string { return this._$backgroundStyle; } constructor(options?: IOptions<T>) { super(options); if (!this.isCheckBoxCell()) { const {align, valign} = this.getContentOrientation(); this._$align = align; this._$valign = valign; } } getContentOrientation(): {align?: string; valign?: string} { /* * Выравнивание задается со следующим приоритетом * 1) Выравнивание заданное на ячейки шапки * 2) Если колонка растянута, то по умолчанию контент выравнивается по середине * 3) Контент выравнивается также, как контент колонки данных * 4) По верхнему левому углу * */ const hasAlign = 'align' in this._$column; const hasValign = 'valign' in this._$column; let align = hasAlign ? this._$column.align : undefined; let valign = hasValign ? this._$column.valign : undefined; const get = (prop: 'align' | 'valign'): string | undefined => { const gridUnit = prop === 'align' ? 'Column' : 'Row'; if (typeof this._$column[`start${gridUnit}`] !== 'undefined' && typeof this._$column[`end${gridUnit}`] !== 'undefined' && ( (this._$column[`end${gridUnit}`] - this._$column[`start${gridUnit}`]) > 1) ) { return 'center'; } else if (typeof this._$column[`start${gridUnit}`] !== 'undefined') { return this._$owner.getColumnsConfig()[this._$column[`start${gridUnit}`] - 1][prop]; } else { return this._$owner.getColumnsConfig()[this._$owner.getHeaderConfig().indexOf(this._$column)][prop]; } }; if (!hasAlign) { align = get('align'); } if (!hasValign) { valign = get('valign'); } return { align, valign }; } isCheckBoxCell(): boolean { return this._$owner.hasMultiSelectColumn() && this._$owner.getHeaderConfig().indexOf(this._$column) === -1; } // region Аспект "Объединение колонок" _getColspanParams(): IColspanParams { if (this._$column.startColumn && this._$column.endColumn) { const multiSelectOffset = this.isCheckBoxCell() ? 0 : +this._$owner.hasMultiSelectColumn(); return { startColumn: this._$column.startColumn + multiSelectOffset, endColumn: this._$column.endColumn + multiSelectOffset }; } return super._getColspanParams(); } // endregion // region Аспект "Объединение строк" _getRowspanParams(): Required<IRowspanParams> { const startRow = typeof this._$column.startRow === 'number' ? this._$column.startRow : (this._$owner.getIndex() + 1); let endRow; if (typeof this._$column.endRow === 'number') { endRow = this._$column.endRow; } else if (typeof this._$column.rowspan === 'number') { endRow = startRow + this._$column.rowspan; } else { endRow = startRow + 1; } return { startRow, endRow, rowspan: endRow - startRow }; } getRowspan(): string { if (!this._$owner.isFullGridSupport()) { return this._getRowspanParams().rowspan; } const {startRow, endRow} = this._getRowspanParams(); return `grid-row: ${startRow} / ${endRow};`; } // endregion getWrapperStyles(): string { let zIndex; if (this._$owner.hasColumnScroll()) { zIndex = this._$isFixed ? FIXED_HEADER_Z_INDEX : STICKY_HEADER_Z_INDEX; } else { zIndex = FIXED_HEADER_Z_INDEX; } let styles = super.getWrapperStyles(); if (this._$owner.isFullGridSupport()) { styles += this.getRowspan(); } styles += ` z-index: ${zIndex};`; return styles; } getWrapperClasses(theme: string, backgroundColorStyle: string, style: string): string { let wrapperClasses = `controls-Grid__header-cell controls-Grid__cell_${style}` + ` controls-Grid__header-cell_theme-${theme}` + ` ${this._getWrapperPaddingClasses(theme)}` + ` ${this._getColumnSeparatorClasses(theme)}` + ` controls-background-${backgroundColorStyle || style}_theme-${theme}`; const isMultilineHeader = this._$owner.isMultiline(); const isStickySupport = this._$owner.isStickyHeader(); if (isMultilineHeader) { wrapperClasses += ` controls-Grid__multi-header-cell_min-height_theme-${theme}`; } else { wrapperClasses += ` controls-Grid__header-cell_min-height_theme-${theme}`; } if (!isStickySupport) { wrapperClasses += ' controls-Grid__header-cell_static'; } if (!this.isMultiSelectColumn()) { wrapperClasses += ' controls-Grid__header-cell_min-width'; } if (this._$valign) { wrapperClasses += ` controls-Grid__header-cell__content_valign-${this._$valign}`; } if (this._$owner.hasColumnScroll()) { wrapperClasses += ` ${this._getColumnScrollWrapperClasses(theme)}`; } // _private.getBackgroundStyle(this._options, true); return wrapperClasses; } getContentClasses(theme: string): string { const isMultiLineHeader = this._$owner.isMultiline(); let contentClasses = 'controls-Grid__header-cell__content'; contentClasses += ` controls-Grid__header-cell__content_theme-${theme}`; contentClasses += this._getContentSeparatorClasses(theme); if (isMultiLineHeader) { contentClasses += ` controls-Grid__row-multi-header__content_baseline_theme-${theme}`; } else { contentClasses += ` controls-Grid__row-header__content_baseline_theme-${theme}`;
} return contentClasses; } protected _getContentSeparatorClasses(theme: string): string { let headerEndRow = this._$owner.getBounds().row.end; const isMultiLineHeader = this._$owner.isMultiline(); let classes = ''; if (isMultiLineHeader) { if (this._$column.endRow !== headerEndRow && this._$column.endRow - this._$column.startRow === 1) { classes += ` controls-Grid__cell_header-content_border-bottom_theme-${theme}`; } } return classes; } getTemplate(): TemplateFunction|string { return this._$column.template || DEFAULT_CELL_TEMPLATE; } getCaption(): string { // todo "title" - is deprecated property, use "caption" return this._$column.caption || this._$column.title; } getSortingProperty(): string { return this._$column.sortingProperty; } setSorting(sorting: string): void { this._$sorting = sorting; this._nextVersion(); } getSorting(): string { return this._$sorting; } getAlign(): string { return this._$align; } getVAlign(): string { return this._$valign; } getTextOverflow(): string { return this._$column.textOverflow; } // todo <<< START >>> compatible with old gridHeaderModel get column(): IHeaderCell { return this._$column; } // todo <<< END >>> isLastColumn(): boolean { const isMultilineHeader = this._$owner.isMultiline(); if (isMultilineHeader) { let headerEndColumn = this._$owner.getBounds().column.end; const currentEndColumn = this._getColspanParams().endColumn; if (this._$
} if (this._$align) { contentClasses += ` controls-Grid__header-cell_justify_content_${this._$align}`;
random_line_split
HeaderCell.ts
from './HeaderRow'; import Cell, {IOptions as ICellOptions} from './Cell'; export interface IOptions<T> extends ICellOptions<T> { shadowVisibility?: string; backgroundStyle?: string; sorting?: string; cellPadding?: IItemPadding; } const DEFAULT_CELL_TEMPLATE = 'Controls/gridNew:HeaderContent'; const FIXED_HEADER_Z_INDEX = 4; const STICKY_HEADER_Z_INDEX = 3; export default class HeaderCell<T> extends Cell<T, HeaderRow<T>> { protected _$owner: HeaderRow<T>; protected _$column: IHeaderCell; protected _$cellPadding: IItemPadding; protected _$align?: string; protected _$valign?: string; protected _$shadowVisibility?: string; protected _$backgroundStyle?: string; protected _$sorting?: string; get shadowVisibility(): string { return this._$shadowVisibility; } get backgroundStyle(): string { return this._$backgroundStyle; } constructor(options?: IOptions<T>) { super(options); if (!this.isCheckBoxCell()) { const {align, valign} = this.getContentOrientation(); this._$align = align; this._$valign = valign; } } getContentOrientation(): {align?: string; valign?: string} { /* * Выравнивание задается со следующим приоритетом * 1) Выравнивание заданное на ячейки шапки * 2) Если колонка растянута, то по умолчанию контент выравнивается по середине * 3) Контент выравнивается также, как контент колонки данных * 4) По верхнему левому углу * */ const hasAlign = 'align' in this._$column; const hasValign = 'valign' in this._$column; let align = hasAlign ? this._$column.align : undefined; let valign = hasValign ? this._$column.valign : undefined; const get = (prop: 'align' | 'valign'): string | undefined => { const gridUnit = prop === 'align' ? 'Column' : 'Row'; if (typeof this._$column[`start${gridUnit}`] !== 'undefined' && typeof this._$column[`end${gridUnit}`] !== 'undefined' && ( (this._$column[`end${gridUnit}`] - this._$column[`start${gridUnit}`]) > 1) ) { return 'center'; } else if (typeof this._$column[`start${gridUnit}`] !== 'undefined') { return this._$owner.getColumnsConfig()[this._$column[`start${gridUnit}`] - 1][prop]; } else { return this._$owner.getColumnsConfig()[this._$owner.getHeaderConfig().indexOf(this._$column)][prop]; } }; if (!hasAlign) { align = get('align'); } if (!hasValign) { valign = get('valign'); } return { align, valign }; } isCheckBoxCell(): boolean { return this._$owner.hasMultiSelectColumn() && this._$owner.getHeaderConfig().indexOf(this._$column) === -1; } // region Аспект "Объединение колонок" _getColspanParams(): IColspanParams { if (this._$column.startColumn && this._$column.endColumn) { const multiSelectOffset = this.isCheckBoxCell() ? 0 : +this._$owner.hasMultiSelectColumn(); return { startColumn: this._$column.startColumn + multiSelectOffset, endColumn: this._$column.endColumn + multiSelectOffset }; } return super._getColspanParams(); } // endregion // region Аспект "Объединение строк" _getRowspanParams(): Required<IRowspanParams> { const startRow = typeof this._$column.startRow === 'number' ? this._$column.startRow : (this._$owner.getIndex() + 1); let endRow; if (typeof this._$column.endRow === 'number') { endRow = this._$column.endRow; } else if (typeof this._$column.rowspan === 'number') { endRow = startRow + this._$column.rowspan; } else { endRow = startRow + 1; } return { startRow, endRow, rowspan: endRow - startRow }; } getRowspan(): string { if (!this._$owner.isFullGridSupport()) { return this._getRowspanParams().rowspan; } const {startRow, endRow} = this._getRowspanParams(); return `grid-row: ${startRow} / ${endRow};`; } // endregion getWrapperStyles(): string { let zIndex; if (this._$owner.hasColumnScroll()) { zIndex = this._$isFixed ? FIXED_HEADER_Z_INDEX : STICKY_HEADER_Z_INDEX; } else { zIndex = FIXED_HEADER_Z_INDEX; } let styles = super.getWrapperStyles(); if (this._$owner.isFullGridSupport()) { styles += this.getRowspan(); } styles += ` z-index: ${zIndex};`; return styles; } getWrapperClasses(theme: string, backgroundColorStyle: string, style: string): string { let wrapperClasses = `controls-Grid__header-cell controls-Grid__cell_${style}` + ` controls-Grid__header-cell_theme-${theme}` + ` ${this._getWrapperPaddingClasses(theme)}` + ` ${this._getColumnSeparatorClasses(theme)}` + ` controls-background-${backgroundColorStyle || style}_theme-${theme}`; const isMultilineHeader = this._$owner.isMultiline(); const isStickySupport = this._$owner.isStickyHeader(); if (isMultilineHeader) { wrapperClasses += ` controls-Grid__multi-header-cell_min-height_theme-${theme}`; } else { wrapperClasses += ` controls-Grid__header-cell_min-height_theme-${theme}`; } if (!isStickySupport) { wrapperClasses += ' controls-Grid__header-cell_static'; } if (!this.isMultiSelectColumn()) { wrapperClasses += ' controls-Grid__header-cell_min-width'; } if (this._$valign) { wrapperClasses += ` controls-Grid__header-cell__content_valign-${this._$valign}`; } if (this._$owner.hasColumnScroll()) { wrapperClasses += ` ${this._getColumnScrollWrapperClasses(theme)}`; } // _private.getBackgroundStyle(this._options, true); return wrapperClasses; } getContentClasses(theme: string): string { const isMultiLineHeader = this._$owner.isMultiline(); let contentClasses = 'controls-Grid__header-cell__content'; contentClasses += ` controls-Grid__header-cell__content_theme-${theme}`; contentClasses += this._getContentSeparatorClasses(theme); if (isMultiLineHeader) { contentClasses += ` controls-Grid__row-multi-header__content_baseline_theme-${theme}`; } else { contentClasses += ` controls-Grid__row-header__content_baseline_theme-${theme}`; } if (this._$align) { contentClasses += ` controls-Grid__header-cell_justify_content_${this._$align}`; } return contentClasses; } protected _getContentSeparatorClasses(theme: string): string { let headerEndRow = this._$owner.getBounds().row.end; const isMultiLineHeader = this._$owner.isMultiline(); let classes = ''; if (isMultiLineHeader) { if (this._$column.endRow !== headerEndRow && this._$column.endRow - this._$column.startRow === 1) { classes += ` controls-Grid__cell_header-content_border-bottom_theme-${theme}`; } } return classes; } getTemplate(): TemplateFunction|string { return this._$column.template || DEFAULT_CELL_TEMPLATE; } getCaption(): string { // todo "title" - is deprecated property, use "caption" return this._$column.caption || this._$column.title; } getSortingProperty(): string { return this._$column.sortingProperty; } setSorting(sorting: string): void { this._$sorting = sorting; this._nextVersion(); } getSorting(): string { return this._$sorting; } getAlign(): string { return this._$align; } getVAlign(): string { return this._$valign; } getTextOverflow(): string { return this._$column.textOverflow;
// todo <<< START >>> compatible with old gridHeaderModel get column(): IHeaderCell { return this._$column; } // todo <<< END >>> isLastColumn(): boolean { const isMultilineHeader = this._$owner.isMultiline(); if (isMultilineHeader) { let headerEndColumn = this._$owner.getBounds().column.end; const currentEndColumn = this._getColspanParams().endColumn; if (this._$owner
}
identifier_name
HeaderCell.ts
from './HeaderRow'; import Cell, {IOptions as ICellOptions} from './Cell'; export interface IOptions<T> extends ICellOptions<T> { shadowVisibility?: string; backgroundStyle?: string; sorting?: string; cellPadding?: IItemPadding; } const DEFAULT_CELL_TEMPLATE = 'Controls/gridNew:HeaderContent'; const FIXED_HEADER_Z_INDEX = 4; const STICKY_HEADER_Z_INDEX = 3; export default class HeaderCell<T> extends Cell<T, HeaderRow<T>> { protected _$owner: HeaderRow<T>; protected _$column: IHeaderCell; protected _$cellPadding: IItemPadding; protected _$align?: string; protected _$valign?: string; protected _$shadowVisibility?: string; protected _$backgroundStyle?: string; protected _$sorting?: string; get shadowVisibility(): string { return this._$shadowVisibility; } get backgroundStyle(): string { return this._$backgroundStyle; } constructor(options?: IOptions<T>) { super(options); if (!this.isCheckBoxCell()) { const {align, valign} = this.getContentOrientation(); this._$align = align; this._$valign = valign; } } getContentOrientation(): {align?: string; valign?: string} { /* * Выравнивание задается со следующим приоритетом * 1) Выравнивание заданное на ячейки шапки * 2) Если колонка растянута, то по умолчанию контент выравнивается по середине * 3) Контент выравнивается также, как контент колонки данных * 4) По верхнему левому углу * */ const hasAlign = 'align' in this._$column; const hasValign = 'valign' in this._$column; let align = hasAlign ? this._$column.align : undefined; let valign = hasValign ? this._$column.valign : undefined; const get = (prop: 'align' | 'valign'): string | undefined => { const gridUnit = prop === 'align' ? 'Column' : 'Row'; if (typeof this._$column[`start${gridUnit}`] !== 'undefined' && typeof this._$column[`end${gridUnit}`] !== 'undefined' && ( (this._$column[`end${gridUnit}`] - this._$column[`start${gridUnit}`]) > 1) ) { return 'center'; } else if (typeof this._$column[`start${gridUnit}`] !== 'undefined') { return this._$owner.getColumnsConfig()[this._$column[`start${gridUnit}`] - 1][prop]; } else { return this._$owner.getColumnsConfig()[this._$owner.getHeaderConfig().indexOf(this._$column)][prop]; } }; if (!hasAlign) { align = get('align'); } if (!hasValign) { valign = get('valign'); } return { align, valign }; } isCheckBoxCell(): boolean { return this._$owner.hasMultiSelectColumn() && this._$owner.getHeaderConfig().indexOf(this._$column) === -1; } // region Аспект "Объединение колонок" _getColspanParams(): IColspanParams { if (this._$column.startColumn && this._$column.endColumn) { const multiSelectOffset = this.isCheckBoxCell() ? 0 : +this._$owner.hasMultiSelectColumn(); return { startColumn: this._$column.startColumn + multiSelectOffset, endColumn: this._$column.endColumn + multiSelectOffset }; } return super._getColspanParams(); } // endregion // region Аспект "Объединение строк" _getRowspanParams(): Required<IRowspanParams> { const startRow = typeof this._$column.startRow === 'number' ? this._$column.startRow : (this._$owner.getIndex() + 1); let endRow; if (typeof this._$column.endRow === 'number') { endRow = this._$column.endRow; } else if (typeof this._$column.rowspan === 'number') { endRow = startRow + this._$column.rowspan; } else { endRow = startRow + 1; } return { startRow, endRow, rowspan: endRow - startRow }; } getRowspan(): string { if (!this._$owner.isFullGridSupport()) { return this._getRowspanParams().rowspan; } const {startRow, endRow} = this._getRowspanParams(); return `grid-row: ${startRow} / ${endRow};`; } // endregion getWrapperStyles(): string { let zIndex; if (this._$owner.hasColumnScroll()) { zIndex = this._$isFixed ? FIXED_HEADER_Z_INDEX : STICKY_HEADER_Z_INDEX; } else { zIndex = FIXED_HEADER_Z_INDEX; } let styles = super.getWrapperStyles(); if (this._$owner.isFullGridSupport()) { styles += this.getRowspan(); } styles += ` z-index: ${zIndex};`; return styles; } getWrapperClasses(theme: string, backgroundColorStyle: string, style: string): string { let wrapperClasses = `controls-Grid__header-cell controls-Grid__cell_${style}` + ` controls-Grid__header-cell_theme-${theme}` + ` ${this._ge
ht_theme-${theme}`; } else { wrapperClasses += ` controls-Grid__header-cell_min-height_theme-${theme}`; } if (!isStickySupport) { wrapperClasses += ' controls-Grid__header-cell_static'; } if (!this.isMultiSelectColumn()) { wrapperClasses += ' controls-Grid__header-cell_min-width'; } if (this._$valign) { wrapperClasses += ` controls-Grid__header-cell__content_valign-${this._$valign}`; } if (this._$owner.hasColumnScroll()) { wrapperClasses += ` ${this._getColumnScrollWrapperClasses(theme)}`; } // _private.getBackgroundStyle(this._options, true); return wrapperClasses; } getContentClasses(theme: string): string { const isMultiLineHeader = this._$owner.isMultiline(); let contentClasses = 'controls-Grid__header-cell__content'; contentClasses += ` controls-Grid__header-cell__content_theme-${theme}`; contentClasses += this._getContentSeparatorClasses(theme); if (isMultiLineHeader) { contentClasses += ` controls-Grid__row-multi-header__content_baseline_theme-${theme}`; } else { contentClasses += ` controls-Grid__row-header__content_baseline_theme-${theme}`; } if (this._$align) { contentClasses += ` controls-Grid__header-cell_justify_content_${this._$align}`; } return contentClasses; } protected _getContentSeparatorClasses(theme: string): string { let headerEndRow = this._$owner.getBounds().row.end; const isMultiLineHeader = this._$owner.isMultiline(); let classes = ''; if (isMultiLineHeader) { if (this._$column.endRow !== headerEndRow && this._$column.endRow - this._$column.startRow === 1) { classes += ` controls-Grid__cell_header-content_border-bottom_theme-${theme}`; } } return classes; } getTemplate(): TemplateFunction|string { return this._$column.template || DEFAULT_CELL_TEMPLATE; } getCaption(): string { // todo "title" - is deprecated property, use "caption" return this._$column.caption || this._$column.title; } getSortingProperty(): string { return this._$column.sortingProperty; } setSorting(sorting: string): void { this._$sorting = sorting; this._nextVersion(); } getSorting(): string { return this._$sorting; } getAlign(): string { return this._$align; } getVAlign(): string { return this._$valign; } getTextOverflow(): string { return this._$column.textOverflow; } // todo <<< START >>> compatible with old gridHeaderModel get column(): IHeaderCell { return this._$column; } // todo <<< END >>> isLastColumn(): boolean { const isMultilineHeader = this._$owner.isMultiline(); if (isMultilineHeader) { let headerEndColumn = this._$owner.getBounds().column.end; const currentEndColumn = this._getColspanParams().endColumn; if (this._
tWrapperPaddingClasses(theme)}` + ` ${this._getColumnSeparatorClasses(theme)}` + ` controls-background-${backgroundColorStyle || style}_theme-${theme}`; const isMultilineHeader = this._$owner.isMultiline(); const isStickySupport = this._$owner.isStickyHeader(); if (isMultilineHeader) { wrapperClasses += ` controls-Grid__multi-header-cell_min-heig
identifier_body
HeaderCell.ts
Row from './HeaderRow'; import Cell, {IOptions as ICellOptions} from './Cell'; export interface IOptions<T> extends ICellOptions<T> { shadowVisibility?: string; backgroundStyle?: string; sorting?: string; cellPadding?: IItemPadding; } const DEFAULT_CELL_TEMPLATE = 'Controls/gridNew:HeaderContent'; const FIXED_HEADER_Z_INDEX = 4; const STICKY_HEADER_Z_INDEX = 3; export default class HeaderCell<T> extends Cell<T, HeaderRow<T>> { protected _$owner: HeaderRow<T>; protected _$column: IHeaderCell; protected _$cellPadding: IItemPadding; protected _$align?: string; protected _$valign?: string; protected _$shadowVisibility?: string; protected _$backgroundStyle?: string; protected _$sorting?: string; get shadowVisibility(): string { return this._$shadowVisibility; } get backgroundStyle(): string { return this._$backgroundStyle; } constructor(options?: IOptions<T>) { super(options); if (!this.isCheckBoxCell()) { const {align, valign} = this.getContentOrientation(); this._$align = align; this._$valign = valign; } } getContentOrientation(): {align?: string; valign?: string} { /* * Выравнивание задается со следующим приоритетом * 1) Выравнивание заданное на ячейки шапки * 2) Если колонка растянута, то по умолчанию контент выравнивается по середине * 3) Контент выравнивается также, как контент колонки данных * 4) По верхнему левому углу * */ const hasAlign = 'align' in this._$column; const hasValign = 'valign' in this._$column; let align = hasAlign ? this._$column.align : undefined; let valign = hasValign ? this._$column.valign : undefined; const get = (prop: 'align' | 'valign'): string | undefined => { const gridUnit = prop === 'align' ? 'Column' : 'Row'; if (typeof this._$column[`start${gridUnit}`] !== 'undefined' && typeof this._$column[`end${gridUnit}`] !== 'undefined' && ( (this._$column[`end${gridUnit}`] - this._$column[`start${gridUnit}`]) > 1) ) { return 'center'; } else if (typeof this._$column[`start${gridUnit}`] !== 'undefined') { return this._$owner.getColumnsConfig()[this._$column[`start${gridUnit}`] - 1][prop]; } else { return this._$owner.getColumnsConfig()[this._$owner.getHeaderConfig().indexOf(this._$column)][prop]; } }; if (!hasAlign) { align = get('align'); } if (!hasValign) { valign = get('valign'); } return { align, valign }; } isCheckBoxCell(): boolean { return this._$owner.hasMultiSelectColumn() && this._$owner.getHeaderConfig().indexOf(this._$column) === -1; } // region Аспект "Объединение колонок" _getColspanParams(): IColspanParams { if (this._$column.startColumn && this._$column.endColumn) { const multiSelectOffset = this.isCheckBoxCell() ? 0 : +this._$owner.hasMultiSelectColumn(); return { startColumn: this._$column.startColumn + multiSelectOffset, endColumn: this._$column.endColumn + multiSelectOffset }; } return super._getColspanParams(); } // endregion // region Аспект "Объединение строк" _getRowspanParams(): Required<IRowspanParams> { const startRow = typeof this._$column.startRow === 'number' ? this._$column.startRow : (this._$owner.getIndex() + 1); let endRow; if (typeof this._$column.endRow === 'number') { endRow = this._$column.endRow; } else if (typeof this._$column.rowspan === 'number') { endRow = startRow + this._$column.rowspan; } else { endRow = startRow + 1; } return { startRow, endRow, rowspan: endRow - startRow }; } getRowspan(): string { if (!this._$owner.isFullGridSupport()) { return this._getRowspanParams().rowspan; } const {startRow, endRow} = this._getRowspanParams(); return `grid-row: ${startRow} / ${endRow};`; } // endregion getWrapperStyles(): string { let zIndex; if (this._$owner.hasColumnScroll()) { zIndex = this._$isFixed ? FIXED_HEADER_Z_INDEX : STICKY_HEADER_Z_INDEX; } else { zIndex = FIXED_HEADER_Z_INDEX; } let styles = super.getWrapperStyles(); if (this._$owner.isFullGridSupport()) { styles += this.getRowspan(); } styles += ` z-index: ${zIndex};`; return styles; } getWrapperClasses(theme: string, backgroundColorStyle: string, style: string): string { let wrapperClasses = `controls-Grid__header-cell controls-Grid__cell_${style}` + ` controls-Grid__header-cell_theme-${theme}` + ` ${this._getWrapperPaddingClasses(theme)}` + ` ${this._getColumnSeparatorClasses(theme)}` + ` controls-background-${backgroundColorStyle || style}_theme-${theme}`; const isMultilineHeader = this._$owner.isMultiline(); const isStickySupport = this._$owner.isStickyHeader(); if (isMultilineHeader) { wrapperClasses += ` controls-Grid__multi-header-cell_min-height_theme-${theme}`; } else { wrapperClasses += ` controls-Grid__header-cell_min-height_theme-${theme}`; } if (!isStickySupport) { wrapperClasses += ' controls-Grid__header-cell_static'; } if (!this.isMultiSelectColumn()) { wrapperClasses += ' controls-Grid__header-cell_min-width'; } if (this._$valign) { wrapperClasses += ` controls-Grid__header-cell__content_valign-${this._$valign}`; } if (this._$owner.hasColumnScroll()) { wrapperClasses += ` ${this._getColumnScrollWrapperClasses(theme)}`; } // _private.getBackgroundStyle(this._options, true); return wrapperClasses; } getContentClasses(theme: string): string { const isMultiLineHeader = this._$owner.isMultiline(); let contentClasses = 'controls-Grid__header-cell__content'; contentClasses += ` controls-Grid__header-cell__content_theme-${theme}`; contentClasses += this._getContentSeparatorClasses(theme); if (isMultiLineHeader) { contentClasses += ` controls-Grid__row-multi-header__content_baseline_theme-${theme}`; } else { contentClasses += ` controls-Grid__row-header__content_baseline_theme-${theme}`; } if (this._$align) { contentClasses += ` controls-Grid__header-cell_justify_content_${this._$align}`; } return contentClasses; } protected _getContentSeparatorClasses(theme: string): string { let headerEndRow = this._$owner.getBounds().row.end; const isMultiLineHeader = this._$owner.isMultiline(); let classes = ''; if (isMultiLineHeader) { if (this._$column.endRow !== headerEndRow && this._$column.endRow - this._$column
}`; } } return classes; } getTemplate(): TemplateFunction|string { return this._$column.template || DEFAULT_CELL_TEMPLATE; } getCaption(): string { // todo "title" - is deprecated property, use "caption" return this._$column.caption || this._$column.title; } getSortingProperty(): string { return this._$column.sortingProperty; } setSorting(sorting: string): void { this._$sorting = sorting; this._nextVersion(); } getSorting(): string { return this._$sorting; } getAlign(): string { return this._$align; } getVAlign(): string { return this._$valign; } getTextOverflow(): string { return this._$column.textOverflow; } // todo <<< START >>> compatible with old gridHeaderModel get column(): IHeaderCell { return this._$column; } // todo <<< END >>> isLastColumn(): boolean { const isMultilineHeader = this._$owner.isMultiline(); if (isMultilineHeader) { let headerEndColumn = this._$owner.getBounds().column.end; const currentEndColumn = this._getColspanParams().endColumn; if (this._$owner
.startRow === 1) { classes += ` controls-Grid__cell_header-content_border-bottom_theme-${theme
conditional_block
util.py
ils(shape): npts = shape.shape[0] if npts == 29: dist_pupils = np.linalg.norm(shape[7-1,:] - shape[16-1,:]) elif npts == 68: left_eye_4 = [38 - 1, 39 - 1, 41 - 1, 42 - 1] right_eye_4 = [44 - 1, 45 - 1, 47 - 1, 48 - 1] left_center = np.mean(shape(left_eye_4, :), 0) right_center = np.mean(shape(right_eye_4, :), 0) dist_pupils = np.linalg.norm(left_center - right_center) return dist_pupils def initialization(init_train_set, N_aug, stage = 'train'): number_samples = len(init_train_set) train_set = [] # when training we use permuted truth as initial state if stage == 'train': for sample_index in len(init_train_set): random_index = np.random.permutation(number_samples)[:N_aug] for index in range(N_aug): train_set.append(init_train_set[sample_index]) train_set[-1].guess = init_train_set[random_index[index]].truth # align the guess shape with the box train_set[-1].guess = alignShapeToBox(train_set[-1].guess, init_train_set[random_index[index]].box, train_set[-1].box) print('Initialization done. Number of augumented samples: {} x {} = {}'.format(number_samples, N_aug, number_samples*N_aug)) else: # when testing, we take representive shape from train set pass def alignShapeToBox(shape0, box0, box): npts = shape0.shape[0] # number of landmarks # shape = reshape(shape0, npts, 2) shape = np.zeros(shape0.shape) scale = box[1,0] / box0[1,0] # align the center of the shape to the center of the box box_c_x, boc_c_y = np.mean(box, 0) shape = shape0 - np.tile(np.mean(shape0, 0), (npts, 1)) shape = shape .* scale shape = shape + np.tile([xc, yc], (npts, 1)) return shape def computeMeanShape(train_set): # compute in a iterative fashion: # 1) using truth shape(dataset.guess) of the first image as meanshape # 2) align all other truth shape to meanshape # 3) take average of all shapes as meanshape # 4) repeat 2)-3) until condition is met refshape = train_set[0].guess.reshape(1, -1) npts = refshape.size / 2 # align all other shapes to this shape nshapes = len(train_set) alignedShapes = zeros(nshapes, npts*2) for i in range(nshapes) alignedShapes[i, :] = train_set[i].guess refshape = alignedShapes[1, :] iters = 0 diff = float("inf") maxIters = 4 while diff > 1e-2 && iters < maxIters: iters = iters + 1 for i in range(nshapes):
refshape_new = np.mean(alignedShapes, 0) diff = np.abs(np.max(refshape - refshape_new)) refshape = refshape_new print('MeanShape finished in {} iterations.\n'.format(iters)) return refshape.reshape(-1, 2) def alignShape(s1, s0): npts = len(s1)/2 s1 = s1.reshape(npts, 2) s0 = s0.reshape(npts, 2) [s, R, t] = estimateTransform(s1, s0) s1 = s * R * s1.T + tile(t, (1, npts)) s1 = s1.T s1 = s1.reshape(1, npts*2) return s1 def estimateTransform(source_shape, target_shape): n, m = source_shape.shape mu_source = mean(source_shape, 0) mu_target = mean(target_shape, 0) d_source = source_shape - tile(mu_source, (n, 1)) sig_source2 = np.sum(d_source*d_source)/n d_target = target_shape - repmat(mu_target, n, 1) sig_target2 = np.sum(d_target*d_target))/n sig_source_target = d_target.T.dot(d_source) / n det_sig_source_target = np.linalg.det(sig_p_target) S = np.eye(m) if det_sig_source_target < 0: S[n-1, m-1] = -1 u, d, vh = np.linalg.svd(sig_source_target, full_matrices=True) R = u*d.dot(vh) s = np.trace(d*S)/sig_source2 t = mu_target.T - s * R.dot(mu_p.T) return s, R, t def normalizedShapeTargets(train_set, mean_shape): nsamples = len(train_set) npts = mean_shape.shape[0] M_norm = [] # M_norm contains the similarity transform matrix for each sample # Mnorm = cell(nsamples, 1) Y = np.zeros(nsamples, npts) for i in range(nsamples): [s, R, ~] = estimateTransform(trainset[i].guess, mean_shape) M_norm.append(s*R) # Mnorm{i}.invM = inv(Mnorm{i}.M) diff = trainset[i].truth - trainset[i].guess tdiff = M_norm[i].dot(diff.T) Y(i,:) = tdiff.T.reshape(1, -1) return Y, M_norm def learnStageRegressor(train_set, Y, M_norm, params): npts = trainset[0].truth.shape[0] P = params['P'] T = params['T'] F = params['F'] K = params['K'] beta = params['beta'] kappa = params['kappa'] # generate local coordinates print('Generating local coordinates...') localCoords = np.zeros(P, 3) # fpidx, x, y for i in range(P): localCoords[i, 0] = np.randint(0, npts) # randomly choose a landmark localCoords[i, 1:] = (np.random.uniform(size=(1,2)) - 0.5) * kappa # fluctuate around landmark # extract shape indexed pixels print('Extracting shape indexed pixels...') nsamples = len(train_set) M_rho = np.zeros(nsamples, P) for i in range(nsamples): M_norm_inv = np.linalg.inv(M_norm[i]) dp = M_norm_inv.dot(localCoords[:,1:].T).T # fpPos = reshape(train_set[i].guess, Nfp, 2) # pixPos = fpPos(ind2sub([Nfp 2],localCoords(:,1)), :) + dp pixPos = train_set[i].guess(localCoords[:,0], :) + dp rows, cols = trainset[i].image.shape pixPos = np.round(pixPos) pixPos(:,0) = np.minimum(np.maximum(pixPos[:,0], 0), cols-1) pixPos(:,1) = np.minimum(np.maximum(pixPos[:,1], 0), rows-1) # in case pixel position out of range M_rho[i,:] = trainset[i].image[pixPos(:,2).T, pixPos(:,1).T] # compute pixel-pixel covariance cov_Rho = np.cov(M_rho, rowvar=False) M_rho_centered = M_rho - tile(mean(M_rho, 0), (M_rho.shape[0], 1)) diagCovRho = np.diag(cov_Rho) varRhoDRho = -2.0 * covRho + repmat(diagCovRho.T, 1, P) + repmat(diagCovRho, P, 1) inv_varRhoDRho = 1.0/varRhoDRho # element-wise inverse # compute all ferns print('Constructing ferns...') ferns = [] features = [] for k in range(K): features.append(correlationBasedFeatureSelection(Y, M_rho, M_rho_centered, inv_varRhoDRho, F)) ferns.append(trainFern(features[-1], Y, M_rho, beta)) # update the normalized target M_diff_rho = np.zeros(nsamples, F) for f in range(F): M_diff_rho[:,f] = features[k,f].rho_m - features[k,f].rho_n updateMat = evaluateFern_batch(M_diff_rho, ferns[k]) print('fern %d/%d\tmax(Y) = %.6g, min(Y) = %.6g'%(k,
alignedShapes(i,:) = alignShape(alignedShapes(i,:), refshape)
conditional_block
util.py
(data_path): number_files = len(os.listdir(data_path)) / 4 # because for a image we have 4 related files train_set = [] for i in range(number_files): if i % 50 == 0: print('Samples loaded {} / {} ...'.format(i, number_files)) img = cv2.imread(data_path + 'image_%04d.png'%(i+1), cv2.IMREAD_GRAYSCALE) file = open('lfpw-test/image_%04d_original.ljson'%(i+1),'r') content = json.load(file) pts = content['groups'][0]['landmarks'] for j in range(len(pts)): pts[i] = pts[i]['point'] pts[i][0] = int(float(pts[i][0])) pts[i][1] = int(float(pts[i][1])) pts[i] = pts[i][::-1] file = open('lfpw-test/image_%04d_original.ljson'%(i+1),'r') content = json.load(file) rect = content['landmarks']["points"] rect[0][0] = int(float(rect[0][0])) rect[0][1] = int(float(rect[0][1])) rect[2][0] = int(float(rect[2][0])) rect[2][1] = int(float(rect[2][1])) rect = [rect[0][::-1], rect[2][::-1]] train_set.append(sample(img, np.array(pts), np.array(rect))) return train_set def getDistPupils(shape): npts = shape.shape[0] if npts == 29: dist_pupils = np.linalg.norm(shape[7-1,:] - shape[16-1,:]) elif npts == 68: left_eye_4 = [38 - 1, 39 - 1, 41 - 1, 42 - 1] right_eye_4 = [44 - 1, 45 - 1, 47 - 1, 48 - 1] left_center = np.mean(shape(left_eye_4, :), 0) right_center = np.mean(shape(right_eye_4, :), 0) dist_pupils = np.linalg.norm(left_center - right_center) return dist_pupils def initialization(init_train_set, N_aug, stage = 'train'): number_samples = len(init_train_set) train_set = [] # when training we use permuted truth as initial state if stage == 'train': for sample_index in len(init_train_set): random_index = np.random.permutation(number_samples)[:N_aug] for index in range(N_aug): train_set.append(init_train_set[sample_index]) train_set[-1].guess = init_train_set[random_index[index]].truth # align the guess shape with the box train_set[-1].guess = alignShapeToBox(train_set[-1].guess, init_train_set[random_index[index]].box, train_set[-1].box) print('Initialization done. Number of augumented samples: {} x {} = {}'.format(number_samples, N_aug, number_samples*N_aug)) else: # when testing, we take representive shape from train set pass def alignShapeToBox(shape0, box0, box): npts = shape0.shape[0] # number of landmarks # shape = reshape(shape0, npts, 2) shape = np.zeros(shape0.shape) scale = box[1,0] / box0[1,0] # align the center of the shape to the center of the box box_c_x, boc_c_y = np.mean(box, 0) shape = shape0 - np.tile(np.mean(shape0, 0), (npts, 1)) shape = shape .* scale shape = shape + np.tile([xc, yc], (npts, 1)) return shape def computeMeanShape(train_set): # compute in a iterative fashion: # 1) using truth shape(dataset.guess) of the first image as meanshape # 2) align all other truth shape to meanshape # 3) take average of all shapes as meanshape # 4) repeat 2)-3) until condition is met refshape = train_set[0].guess.reshape(1, -1) npts = refshape.size / 2 # align all other shapes to this shape nshapes = len(train_set) alignedShapes = zeros(nshapes, npts*2) for i in range(nshapes) alignedShapes[i, :] = train_set[i].guess refshape = alignedShapes[1, :] iters = 0 diff = float("inf") maxIters = 4 while diff > 1e-2 && iters < maxIters: iters = iters + 1 for i in range(nshapes): alignedShapes(i,:) = alignShape(alignedShapes(i,:), refshape) refshape_new = np.mean(alignedShapes, 0) diff = np.abs(np.max(refshape - refshape_new)) refshape = refshape_new print('MeanShape finished in {} iterations.\n'.format(iters)) return refshape.reshape(-1, 2) def alignShape(s1, s0): npts = len(s1)/2 s1 = s1.reshape(npts, 2) s0 = s0.reshape(npts, 2) [s, R, t] = estimateTransform(s1, s0) s1 = s * R * s1.T + tile(t, (1, npts)) s1 = s1.T s1 = s1.reshape(1, npts*2) return s1 def estimateTransform(source_shape, target_shape): n, m = source_shape.shape mu_source = mean(source_shape, 0) mu_target = mean(target_shape, 0) d_source = source_shape - tile(mu_source, (n, 1)) sig_source2 = np.sum(d_source*d_source)/n d_target = target_shape - repmat(mu_target, n, 1) sig_target2 = np.sum(d_target*d_target))/n sig_source_target = d_target.T.dot(d_source) / n det_sig_source_target = np.linalg.det(sig_p_target) S = np.eye(m) if det_sig_source_target < 0: S[n-1, m-1] = -1 u, d, vh = np.linalg.svd(sig_source_target, full_matrices=True) R = u*d.dot(vh) s = np.trace(d*S)/sig_source2 t = mu_target.T - s * R.dot(mu_p.T) return s, R, t def normalizedShapeTargets(train_set, mean_shape): nsamples = len(train_set) npts = mean_shape.shape[0] M_norm = [] # M_norm contains the similarity transform matrix for each sample # Mnorm = cell(nsamples, 1) Y = np.zeros(nsamples, npts) for i in range(nsamples): [s, R, ~] = estimateTransform(trainset[i].guess, mean_shape) M_norm.append(s*R) # Mnorm{i}.invM = inv(Mnorm{i}.M) diff = trainset[i].truth - trainset[i].guess tdiff = M_norm[i].dot(diff.T) Y(i,:) = tdiff.T.reshape(1, -1) return Y, M_norm def learnStageRegressor(train_set, Y, M_norm, params): npts = trainset[0].truth.shape[0] P = params['P'] T = params['T'] F = params['F'] K = params['K'] beta = params['beta'] kappa = params['kappa'] # generate local coordinates print('Generating local coordinates...') localCoords = np.zeros(P, 3) # fpidx, x, y for i in range(P): localCoords[i, 0] = np.randint(0, npts) # randomly choose a landmark localCoords[i, 1:] = (np.random.uniform(size=(1,2)) - 0.5) * kappa # fluctuate around landmark # extract shape indexed pixels print('Extracting shape indexed pixels...') nsamples = len(train_set) M_rho = np.zeros(nsamples, P) for i in range(nsamples): M_norm_inv = np.linalg.inv(M_norm[i]) dp = M_norm_inv.dot(localCoords[:,1:].T).T # fpPos = reshape(train_set[i].guess, Nfp, 2) # pixPos = fpPos(ind2sub([Nfp 2],localCoords(:,1)), :) + dp pixPos = train_set[i].guess(localCoords[:,0], :) + dp rows, cols = trainset[i].image.shape pixPos = np.round(pixPos) pixPos(:,0) = np.minimum(np.maximum(pixPos[:,0], 0), cols-1) pixPos(:,1) = np.minimum(np
loadTrainData
identifier_name
util.py
ils(shape): npts = shape.shape[0] if npts == 29: dist_pupils = np.linalg.norm(shape[7-1,:] - shape[16-1,:]) elif npts == 68: left_eye_4 = [38 - 1, 39 - 1, 41 - 1, 42 - 1] right_eye_4 = [44 - 1, 45 - 1, 47 - 1, 48 - 1] left_center = np.mean(shape(left_eye_4, :), 0) right_center = np.mean(shape(right_eye_4, :), 0) dist_pupils = np.linalg.norm(left_center - right_center) return dist_pupils def initialization(init_train_set, N_aug, stage = 'train'): number_samples = len(init_train_set) train_set = [] # when training we use permuted truth as initial state if stage == 'train': for sample_index in len(init_train_set): random_index = np.random.permutation(number_samples)[:N_aug] for index in range(N_aug): train_set.append(init_train_set[sample_index]) train_set[-1].guess = init_train_set[random_index[index]].truth # align the guess shape with the box train_set[-1].guess = alignShapeToBox(train_set[-1].guess, init_train_set[random_index[index]].box, train_set[-1].box) print('Initialization done. Number of augumented samples: {} x {} = {}'.format(number_samples, N_aug, number_samples*N_aug)) else: # when testing, we take representive shape from train set pass def alignShapeToBox(shape0, box0, box): npts = shape0.shape[0] # number of landmarks # shape = reshape(shape0, npts, 2) shape = np.zeros(shape0.shape) scale = box[1,0] / box0[1,0] # align the center of the shape to the center of the box box_c_x, boc_c_y = np.mean(box, 0) shape = shape0 - np.tile(np.mean(shape0, 0), (npts, 1)) shape = shape .* scale shape = shape + np.tile([xc, yc], (npts, 1)) return shape def computeMeanShape(train_set): # compute in a iterative fashion: # 1) using truth shape(dataset.guess) of the first image as meanshape # 2) align all other truth shape to meanshape # 3) take average of all shapes as meanshape # 4) repeat 2)-3) until condition is met refshape = train_set[0].guess.reshape(1, -1) npts = refshape.size / 2 # align all other shapes to this shape nshapes = len(train_set) alignedShapes = zeros(nshapes, npts*2) for i in range(nshapes) alignedShapes[i, :] = train_set[i].guess refshape = alignedShapes[1, :] iters = 0 diff = float("inf") maxIters = 4 while diff > 1e-2 && iters < maxIters: iters = iters + 1 for i in range(nshapes): alignedShapes(i,:) = alignShape(alignedShapes(i,:), refshape) refshape_new = np.mean(alignedShapes, 0) diff = np.abs(np.max(refshape - refshape_new)) refshape = refshape_new print('MeanShape finished in {} iterations.\n'.format(iters)) return refshape.reshape(-1, 2) def alignShape(s1, s0): npts = len(s1)/2 s1 = s1.reshape(npts, 2) s0 = s0.reshape(npts, 2) [s, R, t] = estimateTransform(s1, s0) s1 = s * R * s1.T + tile(t, (1, npts)) s1 = s1.T s1 = s1.reshape(1, npts*2) return s1 def estimateTransform(source_shape, target_shape): n, m = source_shape.shape mu_source = mean(source_shape, 0) mu_target = mean(target_shape, 0) d_source = source_shape - tile(mu_source, (n, 1)) sig_source2 = np.sum(d_source*d_source)/n d_target = target_shape - repmat(mu_target, n, 1) sig_target2 = np.sum(d_target*d_target))/n sig_source_target = d_target.T.dot(d_source) / n det_sig_source_target = np.linalg.det(sig_p_target) S = np.eye(m) if det_sig_source_target < 0: S[n-1, m-1] = -1 u, d, vh = np.linalg.svd(sig_source_target, full_matrices=True) R = u*d.dot(vh) s = np.trace(d*S)/sig_source2 t = mu_target.T - s * R.dot(mu_p.T) return s, R, t def normalizedShapeTargets(train_set, mean_shape): nsamples = len(train_set) npts = mean_shape.shape[0] M_norm = [] # M_norm contains the similarity transform matrix for each sample # Mnorm = cell(nsamples, 1) Y = np.zeros(nsamples, npts) for i in range(nsamples): [s, R, ~] = estimateTransform(trainset[i].guess, mean_shape) M_norm.append(s*R) # Mnorm{i}.invM = inv(Mnorm{i}.M) diff = trainset[i].truth - trainset[i].guess tdiff = M_norm[i].dot(diff.T) Y(i,:) = tdiff.T.reshape(1, -1) return Y, M_norm def learnStageRegressor(train_set, Y, M_norm, params): npts = trainset[0].truth.shape[0] P = params['P'] T = params['T'] F = params['F'] K = params['K'] beta = params['beta'] kappa = params['kappa'] # generate local coordinates print('Generating local coordinates...') localCoords = np.zeros(P, 3) # fpidx, x, y for i in range(P): localCoords[i, 0] = np.randint(0, npts) # randomly choose a landmark localCoords[i, 1:] = (np.random.uniform(size=(1,2)) - 0.5) * kappa # fluctuate around landmark # extract shape indexed pixels print('Extracting shape indexed pixels...') nsamples = len(train_set) M_rho = np.zeros(nsamples, P) for i in range(nsamples): M_norm_inv = np.linalg.inv(M_norm[i]) dp = M_norm_inv.dot(localCoords[:,1:].T).T # fpPos = reshape(train_set[i].guess, Nfp, 2) # pixPos = fpPos(ind2sub([Nfp 2],localCoords(:,1)), :) + dp pixPos = train_set[i].guess(localCoords[:,0], :) + dp rows, cols = trainset[i].image.shape pixPos = np.round(pixPos) pixPos(:,0) = np.minimum(np.maximum(pixPos[:,0], 0), cols-1) pixPos(:,1) = np.minimum(np.maximum(pixPos[:,1], 0), rows-1) # in case pixel position out of range M_rho[i,:] = trainset[i].image[pixPos(:,2).T, pixPos(:,1).T] # compute pixel-pixel covariance cov_Rho = np.cov(M_rho, rowvar=False) M_rho_centered = M_rho - tile(mean(M_rho, 0), (M_rho.shape[0], 1)) diagCovRho = np.diag(cov_Rho) varRhoDRho = -2.0 * covRho + repmat(diagCovRho.T, 1, P) + repmat(diagCovRho, P, 1) inv_varRhoDRho = 1.0/varRhoDRho # element-wise inverse # compute all ferns print('Constructing ferns...') ferns = [] features = [] for k in range(K): features.append(correlationBasedFeatureSelection(Y, M_rho, M_rho_centered, inv_varRhoDRho, F))
M_diff_rho[:,f] = features[k,f].rho_m - features[k,f].rho_n updateMat = evaluateFern_batch(M_diff_rho, ferns[k]) print('fern %d/%d\tmax(Y) = %.6g, min(Y) = %.6g'%(k
ferns.append(trainFern(features[-1], Y, M_rho, beta)) # update the normalized target M_diff_rho = np.zeros(nsamples, F) for f in range(F):
random_line_split
util.py
def estimateTransform(source_shape, target_shape): n, m = source_shape.shape mu_source = mean(source_shape, 0) mu_target = mean(target_shape, 0) d_source = source_shape - tile(mu_source, (n, 1)) sig_source2 = np.sum(d_source*d_source)/n d_target = target_shape - repmat(mu_target, n, 1) sig_target2 = np.sum(d_target*d_target))/n sig_source_target = d_target.T.dot(d_source) / n det_sig_source_target = np.linalg.det(sig_p_target) S = np.eye(m) if det_sig_source_target < 0: S[n-1, m-1] = -1 u, d, vh = np.linalg.svd(sig_source_target, full_matrices=True) R = u*d.dot(vh) s = np.trace(d*S)/sig_source2 t = mu_target.T - s * R.dot(mu_p.T) return s, R, t def normalizedShapeTargets(train_set, mean_shape): nsamples = len(train_set) npts = mean_shape.shape[0] M_norm = [] # M_norm contains the similarity transform matrix for each sample # Mnorm = cell(nsamples, 1) Y = np.zeros(nsamples, npts) for i in range(nsamples): [s, R, ~] = estimateTransform(trainset[i].guess, mean_shape) M_norm.append(s*R) # Mnorm{i}.invM = inv(Mnorm{i}.M) diff = trainset[i].truth - trainset[i].guess tdiff = M_norm[i].dot(diff.T) Y(i,:) = tdiff.T.reshape(1, -1) return Y, M_norm def learnStageRegressor(train_set, Y, M_norm, params): npts = trainset[0].truth.shape[0] P = params['P'] T = params['T'] F = params['F'] K = params['K'] beta = params['beta'] kappa = params['kappa'] # generate local coordinates print('Generating local coordinates...') localCoords = np.zeros(P, 3) # fpidx, x, y for i in range(P): localCoords[i, 0] = np.randint(0, npts) # randomly choose a landmark localCoords[i, 1:] = (np.random.uniform(size=(1,2)) - 0.5) * kappa # fluctuate around landmark # extract shape indexed pixels print('Extracting shape indexed pixels...') nsamples = len(train_set) M_rho = np.zeros(nsamples, P) for i in range(nsamples): M_norm_inv = np.linalg.inv(M_norm[i]) dp = M_norm_inv.dot(localCoords[:,1:].T).T # fpPos = reshape(train_set[i].guess, Nfp, 2) # pixPos = fpPos(ind2sub([Nfp 2],localCoords(:,1)), :) + dp pixPos = train_set[i].guess(localCoords[:,0], :) + dp rows, cols = trainset[i].image.shape pixPos = np.round(pixPos) pixPos(:,0) = np.minimum(np.maximum(pixPos[:,0], 0), cols-1) pixPos(:,1) = np.minimum(np.maximum(pixPos[:,1], 0), rows-1) # in case pixel position out of range M_rho[i,:] = trainset[i].image[pixPos(:,2).T, pixPos(:,1).T] # compute pixel-pixel covariance cov_Rho = np.cov(M_rho, rowvar=False) M_rho_centered = M_rho - tile(mean(M_rho, 0), (M_rho.shape[0], 1)) diagCovRho = np.diag(cov_Rho) varRhoDRho = -2.0 * covRho + repmat(diagCovRho.T, 1, P) + repmat(diagCovRho, P, 1) inv_varRhoDRho = 1.0/varRhoDRho # element-wise inverse # compute all ferns print('Constructing ferns...') ferns = [] features = [] for k in range(K): features.append(correlationBasedFeatureSelection(Y, M_rho, M_rho_centered, inv_varRhoDRho, F)) ferns.append(trainFern(features[-1], Y, M_rho, beta)) # update the normalized target M_diff_rho = np.zeros(nsamples, F) for f in range(F): M_diff_rho[:,f] = features[k,f].rho_m - features[k,f].rho_n updateMat = evaluateFern_batch(M_diff_rho, ferns[k]) print('fern %d/%d\tmax(Y) = %.6g, min(Y) = %.6g'%(k, K, np.max(Y), np.min(Y))) Y = Y - updateMat regressor = regressor(localCoords, ferns, features) return regressor def correlationBasedFeatureSelection(Y, M_rho, M_rho_centered, inv_varRhoDRho, F): Lfp = Y.shape[1] Nfp = Lfp/2 n, P = M_rho.shape features = [] for i in range(F): nu = np.random.randn(Lfp, 1) Yprob = Y.dot(nu) covYprob_rho = (sum(Yprob-mean(Yprob)*M_rho_centered),0)/(n-1) # R^{1xP} covRhoMcovRho = tile(covYprob_rho.T, (1, P)) - tile(covYprob_rho, (P, 1)) corrYprob_rhoDrho = covRhoMcovRho*np.sqrt(inv_varRhoDRho) #corrYprob_rhoDrho(logical(eye(size(corrYprob_rhoDrho)))) = -10000.0 for j in range(P): corrYprob_rhoDrho[j,j] = -10000.0 maxCorr = max(corrYprob_rhoDrho) maxLoc_row, maxLoc_col = np.unravel_index(np.argmax(corrYprob_rhoDrho, axis=None), corrYprob_rhoDrho.shape) features.append(feature(maxLoc_row, maxLoc_col, Mrho[:,f.m], Mrho[:,f.n], maxCorr)) return features # def covVM(v, M_centered): # [n, ~] = size(M_centered) # # mu_v = mean(v) # res = sum( bsxfun(@times, v-mu_v, M_centered) ) / (n-1) # res = res' # return res # fern training def trainFern(features, Y, Mrho, beta): F = len(features) # compute thresholds for ferns thresholds = np.random.uniform(size=(F, 1)) for f in range(F): fdiff = features[f].rho_m - features[f].rho_n maxval = max(fdiff) minval = min(fdiff) meanval = np.mean(fdiff) range = min(maxval-meanval, meanval-minval) thresholds[f] = (thresholds[f]-0.5)*0.2*range + meanval # partition the samples into 2^F bins bins = partitionSamples(Mrho, features, thresholds) # compute the outputs of each bin outputs = computeBinOutputs(bins, Y, beta) fern = fern(thresholds, outputs) # fern.thresholds = thresholds # fern.outputs = outputs return fern def partitionSamples(Mrho, features, thresholds): F = len(features) # bins = cell(2^F, 1) binss = [] nsamples = Mrho.shape[0] diffvecs = np.zeros(nsamples, F) for i in range(F): diffvecs[:,i] = Mrho[:, features[i].m] - Mrho[:, features[i].n] for i in range(F): di = diffvecs[:,i] lset = np.where(di < thresholds[i]) rset = np.setdiff1d(array(range(nsamples)), lset) diffvecs[lset, i] = 0 diffvecs[rset, i] = 1 wvec = np.array(range(F)) wvec = 2**wvec[:, np.newaxis] idxvec = diffvecs.dot(wvec) for i in range(2**F): bins.append(np.where(idxvec==i)) return bins def computeBinOutputs(bins, Y, beta):
Lfp = Y.shape[1] nbins = len(bins) outputs = np.zeros(nbins, Lfp) for i in range(nbins): if bins[i].size == 0: # empty bin continue outputs[i,:] = sum(Y[bins[i], :]) ni = len(bins[i]) factor = 1.0 / ((1 + beta/ni)*ni) outputs[i,:] = outputs[i,:] * factor return outputs
identifier_body
request_error.pb.go
RESOURCE_NOT_FOUND", 7: "INVALID_PAGE_TOKEN", 8: "EXPIRED_PAGE_TOKEN", 22: "INVALID_PAGE_SIZE", 9: "REQUIRED_FIELD_MISSING", 11: "IMMUTABLE_FIELD", 13: "TOO_MANY_MUTATE_OPERATIONS", 14: "CANNOT_BE_EXECUTED_BY_MANAGER_ACCOUNT", 15: "CANNOT_MODIFY_FOREIGN_FIELD", 18: "INVALID_ENUM_VALUE", 19: "DEVELOPER_TOKEN_PARAMETER_MISSING", 20: "LOGIN_CUSTOMER_ID_PARAMETER_MISSING", 21: "VALIDATE_ONLY_REQUEST_HAS_PAGE_TOKEN", } var RequestErrorEnum_RequestError_value = map[string]int32{ "UNSPECIFIED": 0, "UNKNOWN": 1, "RESOURCE_NAME_MISSING": 3, "RESOURCE_NAME_MALFORMED": 4, "BAD_RESOURCE_ID": 17, "INVALID_CUSTOMER_ID": 16, "OPERATION_REQUIRED": 5, "RESOURCE_NOT_FOUND": 6, "INVALID_PAGE_TOKEN": 7, "EXPIRED_PAGE_TOKEN": 8, "INVALID_PAGE_SIZE": 22, "REQUIRED_FIELD_MISSING": 9, "IMMUTABLE_FIELD": 11, "TOO_MANY_MUTATE_OPERATIONS": 13, "CANNOT_BE_EXECUTED_BY_MANAGER_ACCOUNT": 14, "CANNOT_MODIFY_FOREIGN_FIELD": 15, "INVALID_ENUM_VALUE": 18, "DEVELOPER_TOKEN_PARAMETER_MISSING": 19, "LOGIN_CUSTOMER_ID_PARAMETER_MISSING": 20, "VALIDATE_ONLY_REQUEST_HAS_PAGE_TOKEN": 21, } func (x RequestErrorEnum_RequestError) String() string { return proto.EnumName(RequestErrorEnum_RequestError_name, int32(x)) } func (RequestErrorEnum_RequestError) EnumDescriptor() ([]byte, []int) { return fileDescriptor_request_error_8d0326a66c39a8b8, []int{0, 0} } // Container for enum describing possible request errors. type RequestErrorEnum struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RequestErrorEnum) Reset() { *m = RequestErrorEnum{} } func (m *RequestErrorEnum) String() string { return proto.CompactTextString(m) } func (*RequestErrorEnum) ProtoMessage() {} func (*RequestErrorEnum) Descriptor() ([]byte, []int) { return fileDescriptor_request_error_8d0326a66c39a8b8, []int{0} } func (m *RequestErrorEnum) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RequestErrorEnum.Unmarshal(m, b) } func (m *RequestErrorEnum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RequestErrorEnum.Marshal(b, m, deterministic) } func (dst *RequestErrorEnum) XXX_Merge(src proto.Message) { xxx_messageInfo_RequestErrorEnum.Merge(dst, src) } func (m *RequestErrorEnum) XXX_Size() int
func (m *RequestErrorEnum) XXX_DiscardUnknown() { xxx_messageInfo_RequestErrorEnum.DiscardUnknown(m) } var xxx_messageInfo_RequestErrorEnum proto.InternalMessageInfo func init() { proto.RegisterType((*RequestErrorEnum)(nil), "google.ads.googleads.v1.errors.RequestErrorEnum") proto.RegisterEnum("google.ads.googleads.v1.errors.RequestErrorEnum_RequestError", RequestErrorEnum_RequestError_name, RequestErrorEnum_RequestError_value) } func init() { proto.RegisterFile("google/ads/googleads/v1/errors/request_error.proto", fileDescriptor_request_error_8d0326a66c39a8b8) } var fileDescriptor_request_error_8d0326a66c39a8b8 = []byte{ // 573 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0xdd, 0x6e, 0xd3, 0x30, 0x14, 0x66, 0x3f, 0x6c, 0xe0, 0x01, 0xf3, 0x3c, 0xb6, 0x89, 0x0d, 0x0d, 0x51, 0x98, 0x80, 0x9b, 0x54, 0x85, 0xbb, 0x70, 0xe5, 0xc6, 0xa7, 0xc1, 0x5a, 0x62, 0x07, 0x27, 0x29, 0xeb, 0x54, 0xc9, 0x2a, 0xb4, 0xaa, 0x26, 0x6d, 0xc9, 0x48, 0xba, 0x3d, 0x10, 0x97, 0x3c, 0x01, 0xcf, 0x80, 0xc4, 0x8b, 0x20, 0x1e, 0x02, 0x39, 0x6e, 0x43, 0x40, 0xc0, 0x55, 0x4e, 0xbe, 0xf3, 0x7d, 0xe7, 0x3b, 0xe7, 0xe8, 0x18, 0xbd, 0x9c, 0xe6, 0xf9, 0xf4, 0x7c, 0xd2, 0x1e, 0x8d, 0xcb, 0xb6, 0x0d, 0x4d, 0x74, 0xdd, 0x69, 0x4f, 0x8a, 0x22, 0x2f, 0xca, 0x76, 0x31, 0xf9, 0x78, 0x35, 0x29, 0x67, 0xba, 0xfa, 0x75, 0x2e, 0x8b, 0x7c, 0x96, 0x93, 0x43, 0x4b, 0x74, 0x46, 0xe3, 0xd2, 0xa9, 0x35, 0xce, 0x75, 0xc7, 0xb1, 0x9a, 0xfd, 0x87, 0x8b, 0x9a, 0x97, 0x67, 0xed, 0x51, 0x96, 0xe5, 0xb3, 0xd1, 0xec, 0x2c, 0xcf, 0x4a, 0xab, 0x6e, 0x7d, 0x5b, 0x45, 0x58, 0xd9, 0xaa, 0x60, 0xf8, 0x90, 0x5d, 0x5d, 0xb4, 0xbe, 0xac, 0xa2, 0x3b, 0x4d, 0x90, 0x6c, 0xa2, 0x8d, 0x54, 0xc4, 0x11, 0x78, 0xbc, 0xc7, 0x81, 0xe1, 0x1b, 0x64, 0x03, 0xad, 0xa7, 0xe2, 0x58, 0xc8, 0x77, 0x02, 0x2f, 0x91, 0x07, 0x68, 0x47, 0x41, 0x2c, 0x53, 0xe5, 0x81, 0x16, 0x34, 0x04, 0x1
{ return xxx_messageInfo_RequestErrorEnum.Size(m) }
identifier_body
request_error.pb.go
22: "INVALID_PAGE_SIZE", 9: "REQUIRED_FIELD_MISSING", 11: "IMMUTABLE_FIELD", 13: "TOO_MANY_MUTATE_OPERATIONS", 14: "CANNOT_BE_EXECUTED_BY_MANAGER_ACCOUNT", 15: "CANNOT_MODIFY_FOREIGN_FIELD", 18: "INVALID_ENUM_VALUE", 19: "DEVELOPER_TOKEN_PARAMETER_MISSING", 20: "LOGIN_CUSTOMER_ID_PARAMETER_MISSING", 21: "VALIDATE_ONLY_REQUEST_HAS_PAGE_TOKEN", } var RequestErrorEnum_RequestError_value = map[string]int32{ "UNSPECIFIED": 0, "UNKNOWN": 1, "RESOURCE_NAME_MISSING": 3, "RESOURCE_NAME_MALFORMED": 4, "BAD_RESOURCE_ID": 17, "INVALID_CUSTOMER_ID": 16, "OPERATION_REQUIRED": 5, "RESOURCE_NOT_FOUND": 6, "INVALID_PAGE_TOKEN": 7, "EXPIRED_PAGE_TOKEN": 8, "INVALID_PAGE_SIZE": 22, "REQUIRED_FIELD_MISSING": 9, "IMMUTABLE_FIELD": 11, "TOO_MANY_MUTATE_OPERATIONS": 13, "CANNOT_BE_EXECUTED_BY_MANAGER_ACCOUNT": 14, "CANNOT_MODIFY_FOREIGN_FIELD": 15, "INVALID_ENUM_VALUE": 18, "DEVELOPER_TOKEN_PARAMETER_MISSING": 19, "LOGIN_CUSTOMER_ID_PARAMETER_MISSING": 20, "VALIDATE_ONLY_REQUEST_HAS_PAGE_TOKEN": 21, } func (x RequestErrorEnum_RequestError) String() string { return proto.EnumName(RequestErrorEnum_RequestError_name, int32(x)) } func (RequestErrorEnum_RequestError) EnumDescriptor() ([]byte, []int) { return fileDescriptor_request_error_8d0326a66c39a8b8, []int{0, 0} } // Container for enum describing possible request errors. type RequestErrorEnum struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RequestErrorEnum) Reset() { *m = RequestErrorEnum{} } func (m *RequestErrorEnum) String() string { return proto.CompactTextString(m) } func (*RequestErrorEnum) ProtoMessage() {} func (*RequestErrorEnum) Descriptor() ([]byte, []int) { return fileDescriptor_request_error_8d0326a66c39a8b8, []int{0} } func (m *RequestErrorEnum) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RequestErrorEnum.Unmarshal(m, b) } func (m *RequestErrorEnum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RequestErrorEnum.Marshal(b, m, deterministic) } func (dst *RequestErrorEnum) XXX_Merge(src proto.Message) { xxx_messageInfo_RequestErrorEnum.Merge(dst, src) } func (m *RequestErrorEnum) XXX_Size() int { return xxx_messageInfo_RequestErrorEnum.Size(m) } func (m *RequestErrorEnum) XXX_DiscardUnknown() { xxx_messageInfo_RequestErrorEnum.DiscardUnknown(m) } var xxx_messageInfo_RequestErrorEnum proto.InternalMessageInfo func init() { proto.RegisterType((*RequestErrorEnum)(nil), "google.ads.googleads.v1.errors.RequestErrorEnum") proto.RegisterEnum("google.ads.googleads.v1.errors.RequestErrorEnum_RequestError", RequestErrorEnum_RequestError_name, RequestErrorEnum_RequestError_value) } func init() { proto.RegisterFile("google/ads/googleads/v1/errors/request_error.proto", fileDescriptor_request_error_8d0326a66c39a8b8) } var fileDescriptor_request_error_8d0326a66c39a8b8 = []byte{ // 573 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0xdd, 0x6e, 0xd3, 0x30, 0x14, 0x66, 0x3f, 0x6c, 0xe0, 0x01, 0xf3, 0x3c, 0xb6, 0x89, 0x0d, 0x0d, 0x51, 0x98, 0x80, 0x9b, 0x54, 0x85, 0xbb, 0x70, 0xe5, 0xc6, 0xa7, 0xc1, 0x5a, 0x62, 0x07, 0x27, 0x29, 0xeb, 0x54, 0xc9, 0x2a, 0xb4, 0xaa, 0x26, 0x6d, 0xc9, 0x48, 0xba, 0x3d, 0x10, 0x97, 0x3c, 0x01, 0xcf, 0x80, 0xc4, 0x8b, 0x20, 0x1e, 0x02, 0x39, 0x6e, 0x43, 0x40, 0xc0, 0x55, 0x4e, 0xbe, 0xf3, 0x7d, 0xe7, 0x3b, 0xe7, 0xe8, 0x18, 0xbd, 0x9c, 0xe6, 0xf9, 0xf4, 0x7c, 0xd2, 0x1e, 0x8d, 0xcb, 0xb6, 0x0d, 0x4d, 0x74, 0xdd, 0x69, 0x4f, 0x8a, 0x22, 0x2f, 0xca, 0x76, 0x31, 0xf9, 0x78, 0x35, 0x29, 0x67, 0xba, 0xfa, 0x75, 0x2e, 0x8b, 0x7c, 0x96, 0x93, 0x43, 0x4b, 0x74, 0x46, 0xe3, 0xd2, 0xa9, 0x35, 0xce, 0x75, 0xc7, 0xb1, 0x9a, 0xfd, 0x87, 0x8b, 0x9a, 0x97, 0x67, 0xed, 0x51, 0x96, 0xe5, 0xb3, 0xd1, 0xec, 0x2c, 0xcf, 0x4a, 0xab, 0x6e, 0x7d, 0x5b, 0x45, 0x58, 0xd9, 0xaa, 0x60, 0xf8, 0x90, 0x5d, 0x5d, 0xb4, 0xbe, 0xac, 0xa2, 0x3b, 0x4d, 0x90, 0x6c, 0xa2, 0x8d, 0x54, 0xc4, 0x11, 0x78, 0xbc, 0xc7, 0x81, 0xe1, 0x1b, 0x64, 0x03, 0xad, 0xa7, 0xe2, 0x58, 0xc8, 0x77, 0x02, 0x2f, 0x91, 0x07, 0x68, 0x47, 0x41, 0x2c, 0x53, 0xe5, 0x81, 0x16, 0x34, 0x04,
6: "RESOURCE_NOT_FOUND", 7: "INVALID_PAGE_TOKEN", 8: "EXPIRED_PAGE_TOKEN",
random_line_split
request_error.pb.go
RESOURCE_NOT_FOUND", 7: "INVALID_PAGE_TOKEN", 8: "EXPIRED_PAGE_TOKEN", 22: "INVALID_PAGE_SIZE", 9: "REQUIRED_FIELD_MISSING", 11: "IMMUTABLE_FIELD", 13: "TOO_MANY_MUTATE_OPERATIONS", 14: "CANNOT_BE_EXECUTED_BY_MANAGER_ACCOUNT", 15: "CANNOT_MODIFY_FOREIGN_FIELD", 18: "INVALID_ENUM_VALUE", 19: "DEVELOPER_TOKEN_PARAMETER_MISSING", 20: "LOGIN_CUSTOMER_ID_PARAMETER_MISSING", 21: "VALIDATE_ONLY_REQUEST_HAS_PAGE_TOKEN", } var RequestErrorEnum_RequestError_value = map[string]int32{ "UNSPECIFIED": 0, "UNKNOWN": 1, "RESOURCE_NAME_MISSING": 3, "RESOURCE_NAME_MALFORMED": 4, "BAD_RESOURCE_ID": 17, "INVALID_CUSTOMER_ID": 16, "OPERATION_REQUIRED": 5, "RESOURCE_NOT_FOUND": 6, "INVALID_PAGE_TOKEN": 7, "EXPIRED_PAGE_TOKEN": 8, "INVALID_PAGE_SIZE": 22, "REQUIRED_FIELD_MISSING": 9, "IMMUTABLE_FIELD": 11, "TOO_MANY_MUTATE_OPERATIONS": 13, "CANNOT_BE_EXECUTED_BY_MANAGER_ACCOUNT": 14, "CANNOT_MODIFY_FOREIGN_FIELD": 15, "INVALID_ENUM_VALUE": 18, "DEVELOPER_TOKEN_PARAMETER_MISSING": 19, "LOGIN_CUSTOMER_ID_PARAMETER_MISSING": 20, "VALIDATE_ONLY_REQUEST_HAS_PAGE_TOKEN": 21, } func (x RequestErrorEnum_RequestError) String() string { return proto.EnumName(RequestErrorEnum_RequestError_name, int32(x)) } func (RequestErrorEnum_RequestError) EnumDescriptor() ([]byte, []int) { return fileDescriptor_request_error_8d0326a66c39a8b8, []int{0, 0} } // Container for enum describing possible request errors. type RequestErrorEnum struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RequestErrorEnum) Reset() { *m = RequestErrorEnum{} } func (m *RequestErrorEnum) String() string { return proto.CompactTextString(m) } func (*RequestErrorEnum) ProtoMessage() {} func (*RequestErrorEnum) Descriptor() ([]byte, []int) { return fileDescriptor_request_error_8d0326a66c39a8b8, []int{0} } func (m *RequestErrorEnum) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RequestErrorEnum.Unmarshal(m, b) } func (m *RequestErrorEnum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RequestErrorEnum.Marshal(b, m, deterministic) } func (dst *RequestErrorEnum)
(src proto.Message) { xxx_messageInfo_RequestErrorEnum.Merge(dst, src) } func (m *RequestErrorEnum) XXX_Size() int { return xxx_messageInfo_RequestErrorEnum.Size(m) } func (m *RequestErrorEnum) XXX_DiscardUnknown() { xxx_messageInfo_RequestErrorEnum.DiscardUnknown(m) } var xxx_messageInfo_RequestErrorEnum proto.InternalMessageInfo func init() { proto.RegisterType((*RequestErrorEnum)(nil), "google.ads.googleads.v1.errors.RequestErrorEnum") proto.RegisterEnum("google.ads.googleads.v1.errors.RequestErrorEnum_RequestError", RequestErrorEnum_RequestError_name, RequestErrorEnum_RequestError_value) } func init() { proto.RegisterFile("google/ads/googleads/v1/errors/request_error.proto", fileDescriptor_request_error_8d0326a66c39a8b8) } var fileDescriptor_request_error_8d0326a66c39a8b8 = []byte{ // 573 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0xdd, 0x6e, 0xd3, 0x30, 0x14, 0x66, 0x3f, 0x6c, 0xe0, 0x01, 0xf3, 0x3c, 0xb6, 0x89, 0x0d, 0x0d, 0x51, 0x98, 0x80, 0x9b, 0x54, 0x85, 0xbb, 0x70, 0xe5, 0xc6, 0xa7, 0xc1, 0x5a, 0x62, 0x07, 0x27, 0x29, 0xeb, 0x54, 0xc9, 0x2a, 0xb4, 0xaa, 0x26, 0x6d, 0xc9, 0x48, 0xba, 0x3d, 0x10, 0x97, 0x3c, 0x01, 0xcf, 0x80, 0xc4, 0x8b, 0x20, 0x1e, 0x02, 0x39, 0x6e, 0x43, 0x40, 0xc0, 0x55, 0x4e, 0xbe, 0xf3, 0x7d, 0xe7, 0x3b, 0xe7, 0xe8, 0x18, 0xbd, 0x9c, 0xe6, 0xf9, 0xf4, 0x7c, 0xd2, 0x1e, 0x8d, 0xcb, 0xb6, 0x0d, 0x4d, 0x74, 0xdd, 0x69, 0x4f, 0x8a, 0x22, 0x2f, 0xca, 0x76, 0x31, 0xf9, 0x78, 0x35, 0x29, 0x67, 0xba, 0xfa, 0x75, 0x2e, 0x8b, 0x7c, 0x96, 0x93, 0x43, 0x4b, 0x74, 0x46, 0xe3, 0xd2, 0xa9, 0x35, 0xce, 0x75, 0xc7, 0xb1, 0x9a, 0xfd, 0x87, 0x8b, 0x9a, 0x97, 0x67, 0xed, 0x51, 0x96, 0xe5, 0xb3, 0xd1, 0xec, 0x2c, 0xcf, 0x4a, 0xab, 0x6e, 0x7d, 0x5b, 0x45, 0x58, 0xd9, 0xaa, 0x60, 0xf8, 0x90, 0x5d, 0x5d, 0xb4, 0xbe, 0xac, 0xa2, 0x3b, 0x4d, 0x90, 0x6c, 0xa2, 0x8d, 0x54, 0xc4, 0x11, 0x78, 0xbc, 0xc7, 0x81, 0xe1, 0x1b, 0x64, 0x03, 0xad, 0xa7, 0xe2, 0x58, 0xc8, 0x77, 0x02, 0x2f, 0x91, 0x07, 0x68, 0x47, 0x41, 0x2c, 0x53, 0xe5, 0x81, 0x16, 0x34, 0x04, 0x1d
XXX_Merge
identifier_name
http_server_utils.go
Params map[string]*HttpParam ErrorCodes []string Errors []error postJson map[string]interface{} body []byte } type HttpParam struct { Name string InvalidErrorCode string DataType HttpParamDataType Type HttpParamType Required bool MinLength int MaxLength int Post bool Value interface{} Raw string Valid bool Present bool // If value is present and parsed properly } // Make sure your params are present and valid before trying to access. func (self *HttpParam) Int() int { if self.Value != nil { return self.Value.(int) } else { return int(0) } } func (self *HttpParam) Float() float64 { if self.Value != nil { return self.Value.(float64) } else { return float64(0) } } func (self *HttpParam) String() string { if self.Value != nil { return self.Value.(string) } else { return nadaStr } } func (self *HttpParam) Bool() bool { if self.Value != nil { return self.Value.(bool) } else { return false } } func (self *HttpParam) ObjectId() *bson.ObjectId { if self.Value != nil { return self.Value.(*bson.ObjectId) } else { return nil } } func (self *HttpParam) Json() map[string]interface{} { if self.Value == nil { return nil } else { return self.Value.(map[string]interface{}) } } func (self *HttpParam) JsonArray() []interface{} { if self.Value == nil { return nil } else { return self.Value.([]interface{}) } } // Set a valid value for a param. Missing can be valid, but not present. func (self *HttpParam) setPresentValue(value interface{}) { self.Present = true self.Value = value } // Validate the params. If any of the params are invalid, false is returned. You must call // this first before calling the ErrorCodes []string. If not params are defined, this always // returns "true". If there are raw data extraction errors, this is always false (e.g., body missing or incorrect). func (self *HttpContext) ParamsAreValid() bool { if len(self.Errors) != 0 { return false } if len(self.Params) == 0 { return true } for _, param := range self.Params { switch param.DataType { case HttpIntParam: validateIntParam(self, param) case HttpStringParam: validateStringParam(self, param) case HttpFloatParam: validateFloatParam(self, param) case HttpBoolParam: validateBoolParam(self, param) case HttpObjectIdParam: validateObjectIdParam(self, param) case HttpJsonParam: validateJsonParam(self, param) case HttpJsonArrayParam: validateJsonParam(self, param) } } return len(self.ErrorCodes) == 0 } func (self *HttpContext) ParamInt(name string) int { return self.Params[name].Int() } func (self *HttpContext) ParamFloat(name string) float64 { return self.Params[name].Float() } func (self *HttpContext) ParamString(name string) string { return self.Params[name].String() } func (self *HttpContext) HasParam(name string) bool { if val, found := self.Params[name]; !found || val == nil || val.Value == nil { return false } return true } func (self *HttpContext) ParamBool(name string) bool { return self.Params[name].Bool() } func (self *HttpContext) ParamObjectId(name string) *bson.ObjectId { return self.Params[name].ObjectId() } func (self *HttpContext) ParamJson(name string) map[string]interface{} { return self.Params[name].Json() } func (self *HttpContext) ParamJsonArray(name string) []interface{} { return self.Params[name].JsonArray() } func (self *HttpContext) HasRawErrors() bool { return len(self.Errors) > 0 } // This returns the param value as a string. If the param is missing or empty, // the string will be len == 0. func retrieveParamValue(ctx *HttpContext, param *HttpParam) interface{} { switch param.Type { case HttpParamPost: return strings.TrimSpace(ctx.Request.PostFormValue(param.Name)) case HttpParamJsonPost: return retrieveJsonParamValue(ctx, param) case HttpParamQuery: return strings.TrimSpace(ctx.Request.FormValue(param.Name)) case HttpParamHeader: return strings.TrimSpace(ctx.Request.Header.Get(param.Name)) case HttpParamPath: return strings.TrimSpace(mux.Vars(ctx.Request)[param.Name]) } return nadaStr } func retrieveJsonParamValue(ctx *HttpContext, param *HttpParam) interface{} { var noData interface{} if param.DataType != HttpJsonParam { noData = nadaStr } if len(ctx.Errors) > 0 { return noData } // If this is the first access, read the body if len(ctx.body) == 0 { var err error ctx.body, err = ioutil.ReadAll(ctx.Request.Body) if err != nil { ctx.Errors = append(ctx.Errors, NewStackError("Error in raw data extraction - error: %v", err)) return noData } } if ctx.postJson == nil { var genJson interface{} err := json.Unmarshal(ctx.body, &genJson) if err != nil { ctx.Errors = append(ctx.Errors, NewStackError("Error in raw json data extraction - error: %v", err)) return noData } ctx.postJson = genJson.(map[string]interface{}) } // Look for the value in the json. The json may hold the data in a variety // of formats. Convert back to a string to deal with the other data types :-( val, found := ctx.postJson[param.Name] if !found { return noData } // If this is json, return the value. if param.DataType == HttpJsonParam || param.DataType == HttpJsonArrayParam { return val } valType := reflect.TypeOf(val) if valType == nil { return noData } switch valType.Kind() { case reflect.Invalid: return noData case reflect.Bool: return fmt.Sprintf("%t", val.(bool)) case reflect.Float64: return fmt.Sprintf("%g", val.(float64)) case reflect.String: return val.(string) default: return noData } return noData } func appendInvalidErrorCode(ctx *HttpContext, param *HttpParam) { // Do not dupplicate error codes. for i := range ctx.ErrorCodes { if ctx.ErrorCodes[i] == param.InvalidErrorCode { return } } if len(param.InvalidErrorCode) == 0 { panic(fmt.Sprintf("We do not have an error code defined for param: %s - required: %t", param.Name, param.Required)) } ctx.ErrorCodes = append(ctx.ErrorCodes, param.InvalidErrorCode) param.Valid = false } func validateIntParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param); return } if len(param.Raw) == 0 { return } if val, err := strconv.Atoi(param.Raw); err != nil { appendInvalidErrorCode(ctx, param) } else { param.setPresentValue(val) } } func validateStringParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param) return } if param.Required && param.MinLength > 0 && len(param.Raw) < param.MinLength { appendInvalidErrorCode(ctx, param) return } if param.Required && param.MaxLength > 0 && len(param.Raw) > param.MaxLength { appendInvalidErrorCode(ctx, param) return } param.setPresentValue(param.Raw) } func validateFloatParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param); return } if len(param.Raw) == 0 { return } if val, err := strconv.ParseFloat(param.Raw, 64); err != nil { appendInvalidErrorCode(ctx, param) } else { param.setPresentValue(val) } } func validateObjectIdParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param); return } if len(param.Raw) == 0 { return } if !bson.IsObjectIdHex(param.Raw) { appendInvalidErrorCode(ctx, param); return } value := bson.ObjectIdHex(param.Raw) param.setPresentValue(&value) } func validateJsonParam(ctx *HttpContext, param *HttpParam) { val := retrieveParamValue(ctx, param) if val == nil && param.Required { appendInvalidErrorCode(ctx, param); return } if val == nil { return } param.setPresentValue(val) } // Boolean types include: 1, t, T, TRUE, true, True, 0, f, F, FALSE, false func validateBoolParam(ctx *HttpContext, param *HttpParam)
{ param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param) return } if len(param.Raw) == 0 { return } if val, err := strconv.ParseBool(param.Raw); err != nil { appendInvalidErrorCode(ctx, param) } else { param.setPresentValue(val) } }
identifier_body
http_server_utils.go
HttpContext struct { Response http.ResponseWriter Request *http.Request Params map[string]*HttpParam ErrorCodes []string Errors []error postJson map[string]interface{} body []byte } type HttpParam struct { Name string InvalidErrorCode string DataType HttpParamDataType Type HttpParamType Required bool MinLength int MaxLength int Post bool Value interface{} Raw string Valid bool Present bool // If value is present and parsed properly } // Make sure your params are present and valid before trying to access. func (self *HttpParam) Int() int { if self.Value != nil { return self.Value.(int) } else { return int(0) } } func (self *HttpParam) Float() float64 { if self.Value != nil { return self.Value.(float64) } else { return float64(0) } } func (self *HttpParam) String() string { if self.Value != nil { return self.Value.(string) } else { return nadaStr } } func (self *HttpParam) Bool() bool { if self.Value != nil { return self.Value.(bool) } else { return false } } func (self *HttpParam) ObjectId() *bson.ObjectId { if self.Value != nil { return self.Value.(*bson.ObjectId) } else { return nil } } func (self *HttpParam) Json() map[string]interface{} { if self.Value == nil { return nil } else { return self.Value.(map[string]interface{}) } } func (self *HttpParam) JsonArray() []interface{} { if self.Value == nil { return nil } else { return self.Value.([]interface{}) } } // Set a valid value for a param. Missing can be valid, but not present. func (self *HttpParam) setPresentValue(value interface{}) { self.Present = true self.Value = value } // Validate the params. If any of the params are invalid, false is returned. You must call // this first before calling the ErrorCodes []string. If not params are defined, this always // returns "true". If there are raw data extraction errors, this is always false (e.g., body missing or incorrect). func (self *HttpContext) ParamsAreValid() bool { if len(self.Errors) != 0 { return false } if len(self.Params) == 0
for _, param := range self.Params { switch param.DataType { case HttpIntParam: validateIntParam(self, param) case HttpStringParam: validateStringParam(self, param) case HttpFloatParam: validateFloatParam(self, param) case HttpBoolParam: validateBoolParam(self, param) case HttpObjectIdParam: validateObjectIdParam(self, param) case HttpJsonParam: validateJsonParam(self, param) case HttpJsonArrayParam: validateJsonParam(self, param) } } return len(self.ErrorCodes) == 0 } func (self *HttpContext) ParamInt(name string) int { return self.Params[name].Int() } func (self *HttpContext) ParamFloat(name string) float64 { return self.Params[name].Float() } func (self *HttpContext) ParamString(name string) string { return self.Params[name].String() } func (self *HttpContext) HasParam(name string) bool { if val, found := self.Params[name]; !found || val == nil || val.Value == nil { return false } return true } func (self *HttpContext) ParamBool(name string) bool { return self.Params[name].Bool() } func (self *HttpContext) ParamObjectId(name string) *bson.ObjectId { return self.Params[name].ObjectId() } func (self *HttpContext) ParamJson(name string) map[string]interface{} { return self.Params[name].Json() } func (self *HttpContext) ParamJsonArray(name string) []interface{} { return self.Params[name].JsonArray() } func (self *HttpContext) HasRawErrors() bool { return len(self.Errors) > 0 } // This returns the param value as a string. If the param is missing or empty, // the string will be len == 0. func retrieveParamValue(ctx *HttpContext, param *HttpParam) interface{} { switch param.Type { case HttpParamPost: return strings.TrimSpace(ctx.Request.PostFormValue(param.Name)) case HttpParamJsonPost: return retrieveJsonParamValue(ctx, param) case HttpParamQuery: return strings.TrimSpace(ctx.Request.FormValue(param.Name)) case HttpParamHeader: return strings.TrimSpace(ctx.Request.Header.Get(param.Name)) case HttpParamPath: return strings.TrimSpace(mux.Vars(ctx.Request)[param.Name]) } return nadaStr } func retrieveJsonParamValue(ctx *HttpContext, param *HttpParam) interface{} { var noData interface{} if param.DataType != HttpJsonParam { noData = nadaStr } if len(ctx.Errors) > 0 { return noData } // If this is the first access, read the body if len(ctx.body) == 0 { var err error ctx.body, err = ioutil.ReadAll(ctx.Request.Body) if err != nil { ctx.Errors = append(ctx.Errors, NewStackError("Error in raw data extraction - error: %v", err)) return noData } } if ctx.postJson == nil { var genJson interface{} err := json.Unmarshal(ctx.body, &genJson) if err != nil { ctx.Errors = append(ctx.Errors, NewStackError("Error in raw json data extraction - error: %v", err)) return noData } ctx.postJson = genJson.(map[string]interface{}) } // Look for the value in the json. The json may hold the data in a variety // of formats. Convert back to a string to deal with the other data types :-( val, found := ctx.postJson[param.Name] if !found { return noData } // If this is json, return the value. if param.DataType == HttpJsonParam || param.DataType == HttpJsonArrayParam { return val } valType := reflect.TypeOf(val) if valType == nil { return noData } switch valType.Kind() { case reflect.Invalid: return noData case reflect.Bool: return fmt.Sprintf("%t", val.(bool)) case reflect.Float64: return fmt.Sprintf("%g", val.(float64)) case reflect.String: return val.(string) default: return noData } return noData } func appendInvalidErrorCode(ctx *HttpContext, param *HttpParam) { // Do not dupplicate error codes. for i := range ctx.ErrorCodes { if ctx.ErrorCodes[i] == param.InvalidErrorCode { return } } if len(param.InvalidErrorCode) == 0 { panic(fmt.Sprintf("We do not have an error code defined for param: %s - required: %t", param.Name, param.Required)) } ctx.ErrorCodes = append(ctx.ErrorCodes, param.InvalidErrorCode) param.Valid = false } func validateIntParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param); return } if len(param.Raw) == 0 { return } if val, err := strconv.Atoi(param.Raw); err != nil { appendInvalidErrorCode(ctx, param) } else { param.setPresentValue(val) } } func validateStringParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param) return } if param.Required && param.MinLength > 0 && len(param.Raw) < param.MinLength { appendInvalidErrorCode(ctx, param) return } if param.Required && param.MaxLength > 0 && len(param.Raw) > param.MaxLength { appendInvalidErrorCode(ctx, param) return } param.setPresentValue(param.Raw) } func validateFloatParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param); return } if len(param.Raw) == 0 { return } if val, err := strconv.ParseFloat(param.Raw, 64); err != nil { appendInvalidErrorCode(ctx, param) } else { param.setPresentValue(val) } } func validateObjectIdParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param); return } if len(param.Raw) == 0 { return } if !bson.IsObjectIdHex(param.Raw) { appendInvalidErrorCode(ctx, param); return } value := bson.ObjectIdHex(param.Raw) param.setPresentValue(&value) } func validateJsonParam(ctx *HttpContext, param *HttpParam) { val := retrieveParamValue(ctx, param) if val == nil && param.Required { appendInvalidErrorCode(ctx, param); return } if val == nil { return } param.setPresentValue(val) } // Boolean types include: 1, t, T, TRUE, true, True, 0, f, F, FALSE, false func validateBoolParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param) return } if len(param.Raw) == 0 { return } if val, err := strconv.ParseBool(param.Raw); err != nil { appendInvalidErrorCode(ctx, param)
{ return true }
conditional_block
http_server_utils.go
HttpContext struct { Response http.ResponseWriter Request *http.Request Params map[string]*HttpParam ErrorCodes []string Errors []error postJson map[string]interface{} body []byte } type HttpParam struct { Name string InvalidErrorCode string DataType HttpParamDataType Type HttpParamType Required bool MinLength int MaxLength int Post bool Value interface{} Raw string Valid bool Present bool // If value is present and parsed properly } // Make sure your params are present and valid before trying to access. func (self *HttpParam) Int() int { if self.Value != nil { return self.Value.(int) } else { return int(0) } } func (self *HttpParam) Float() float64 { if self.Value != nil { return self.Value.(float64) } else { return float64(0) } } func (self *HttpParam) String() string { if self.Value != nil { return self.Value.(string) } else { return nadaStr } } func (self *HttpParam) Bool() bool { if self.Value != nil { return self.Value.(bool) } else { return false } } func (self *HttpParam) ObjectId() *bson.ObjectId { if self.Value != nil { return self.Value.(*bson.ObjectId) } else { return nil } } func (self *HttpParam) Json() map[string]interface{} { if self.Value == nil { return nil } else { return self.Value.(map[string]interface{}) } } func (self *HttpParam) JsonArray() []interface{} { if self.Value == nil { return nil } else { return self.Value.([]interface{}) } } // Set a valid value for a param. Missing can be valid, but not present. func (self *HttpParam) setPresentValue(value interface{}) { self.Present = true self.Value = value } // Validate the params. If any of the params are invalid, false is returned. You must call // this first before calling the ErrorCodes []string. If not params are defined, this always // returns "true". If there are raw data extraction errors, this is always false (e.g., body missing or incorrect). func (self *HttpContext) ParamsAreValid() bool { if len(self.Errors) != 0 { return false } if len(self.Params) == 0 { return true } for _, param := range self.Params { switch param.DataType { case HttpIntParam: validateIntParam(self, param) case HttpStringParam: validateStringParam(self, param) case HttpFloatParam: validateFloatParam(self, param) case HttpBoolParam: validateBoolParam(self, param) case HttpObjectIdParam: validateObjectIdParam(self, param) case HttpJsonParam: validateJsonParam(self, param) case HttpJsonArrayParam: validateJsonParam(self, param) } } return len(self.ErrorCodes) == 0 } func (self *HttpContext) ParamInt(name string) int { return self.Params[name].Int() } func (self *HttpContext) ParamFloat(name string) float64 { return self.Params[name].Float() } func (self *HttpContext) ParamString(name string) string { return self.Params[name].String() } func (self *HttpContext) HasParam(name string) bool { if val, found := self.Params[name]; !found || val == nil || val.Value == nil { return false } return true }
func (self *HttpContext) ParamObjectId(name string) *bson.ObjectId { return self.Params[name].ObjectId() } func (self *HttpContext) ParamJson(name string) map[string]interface{} { return self.Params[name].Json() } func (self *HttpContext) ParamJsonArray(name string) []interface{} { return self.Params[name].JsonArray() } func (self *HttpContext) HasRawErrors() bool { return len(self.Errors) > 0 } // This returns the param value as a string. If the param is missing or empty, // the string will be len == 0. func retrieveParamValue(ctx *HttpContext, param *HttpParam) interface{} { switch param.Type { case HttpParamPost: return strings.TrimSpace(ctx.Request.PostFormValue(param.Name)) case HttpParamJsonPost: return retrieveJsonParamValue(ctx, param) case HttpParamQuery: return strings.TrimSpace(ctx.Request.FormValue(param.Name)) case HttpParamHeader: return strings.TrimSpace(ctx.Request.Header.Get(param.Name)) case HttpParamPath: return strings.TrimSpace(mux.Vars(ctx.Request)[param.Name]) } return nadaStr } func retrieveJsonParamValue(ctx *HttpContext, param *HttpParam) interface{} { var noData interface{} if param.DataType != HttpJsonParam { noData = nadaStr } if len(ctx.Errors) > 0 { return noData } // If this is the first access, read the body if len(ctx.body) == 0 { var err error ctx.body, err = ioutil.ReadAll(ctx.Request.Body) if err != nil { ctx.Errors = append(ctx.Errors, NewStackError("Error in raw data extraction - error: %v", err)) return noData } } if ctx.postJson == nil { var genJson interface{} err := json.Unmarshal(ctx.body, &genJson) if err != nil { ctx.Errors = append(ctx.Errors, NewStackError("Error in raw json data extraction - error: %v", err)) return noData } ctx.postJson = genJson.(map[string]interface{}) } // Look for the value in the json. The json may hold the data in a variety // of formats. Convert back to a string to deal with the other data types :-( val, found := ctx.postJson[param.Name] if !found { return noData } // If this is json, return the value. if param.DataType == HttpJsonParam || param.DataType == HttpJsonArrayParam { return val } valType := reflect.TypeOf(val) if valType == nil { return noData } switch valType.Kind() { case reflect.Invalid: return noData case reflect.Bool: return fmt.Sprintf("%t", val.(bool)) case reflect.Float64: return fmt.Sprintf("%g", val.(float64)) case reflect.String: return val.(string) default: return noData } return noData } func appendInvalidErrorCode(ctx *HttpContext, param *HttpParam) { // Do not dupplicate error codes. for i := range ctx.ErrorCodes { if ctx.ErrorCodes[i] == param.InvalidErrorCode { return } } if len(param.InvalidErrorCode) == 0 { panic(fmt.Sprintf("We do not have an error code defined for param: %s - required: %t", param.Name, param.Required)) } ctx.ErrorCodes = append(ctx.ErrorCodes, param.InvalidErrorCode) param.Valid = false } func validateIntParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param); return } if len(param.Raw) == 0 { return } if val, err := strconv.Atoi(param.Raw); err != nil { appendInvalidErrorCode(ctx, param) } else { param.setPresentValue(val) } } func validateStringParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param) return } if param.Required && param.MinLength > 0 && len(param.Raw) < param.MinLength { appendInvalidErrorCode(ctx, param) return } if param.Required && param.MaxLength > 0 && len(param.Raw) > param.MaxLength { appendInvalidErrorCode(ctx, param) return } param.setPresentValue(param.Raw) } func validateFloatParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param); return } if len(param.Raw) == 0 { return } if val, err := strconv.ParseFloat(param.Raw, 64); err != nil { appendInvalidErrorCode(ctx, param) } else { param.setPresentValue(val) } } func validateObjectIdParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param); return } if len(param.Raw) == 0 { return } if !bson.IsObjectIdHex(param.Raw) { appendInvalidErrorCode(ctx, param); return } value := bson.ObjectIdHex(param.Raw) param.setPresentValue(&value) } func validateJsonParam(ctx *HttpContext, param *HttpParam) { val := retrieveParamValue(ctx, param) if val == nil && param.Required { appendInvalidErrorCode(ctx, param); return } if val == nil { return } param.setPresentValue(val) } // Boolean types include: 1, t, T, TRUE, true, True, 0, f, F, FALSE, false func validateBoolParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param) return } if len(param.Raw) == 0 { return } if val, err := strconv.ParseBool(param.Raw); err != nil { appendInvalidErrorCode(ctx, param) }
func (self *HttpContext) ParamBool(name string) bool { return self.Params[name].Bool() }
random_line_split
http_server_utils.go
HttpContext struct { Response http.ResponseWriter Request *http.Request Params map[string]*HttpParam ErrorCodes []string Errors []error postJson map[string]interface{} body []byte } type HttpParam struct { Name string InvalidErrorCode string DataType HttpParamDataType Type HttpParamType Required bool MinLength int MaxLength int Post bool Value interface{} Raw string Valid bool Present bool // If value is present and parsed properly } // Make sure your params are present and valid before trying to access. func (self *HttpParam) Int() int { if self.Value != nil { return self.Value.(int) } else { return int(0) } } func (self *HttpParam) Float() float64 { if self.Value != nil { return self.Value.(float64) } else { return float64(0) } } func (self *HttpParam) String() string { if self.Value != nil { return self.Value.(string) } else { return nadaStr } } func (self *HttpParam) Bool() bool { if self.Value != nil { return self.Value.(bool) } else { return false } } func (self *HttpParam)
() *bson.ObjectId { if self.Value != nil { return self.Value.(*bson.ObjectId) } else { return nil } } func (self *HttpParam) Json() map[string]interface{} { if self.Value == nil { return nil } else { return self.Value.(map[string]interface{}) } } func (self *HttpParam) JsonArray() []interface{} { if self.Value == nil { return nil } else { return self.Value.([]interface{}) } } // Set a valid value for a param. Missing can be valid, but not present. func (self *HttpParam) setPresentValue(value interface{}) { self.Present = true self.Value = value } // Validate the params. If any of the params are invalid, false is returned. You must call // this first before calling the ErrorCodes []string. If not params are defined, this always // returns "true". If there are raw data extraction errors, this is always false (e.g., body missing or incorrect). func (self *HttpContext) ParamsAreValid() bool { if len(self.Errors) != 0 { return false } if len(self.Params) == 0 { return true } for _, param := range self.Params { switch param.DataType { case HttpIntParam: validateIntParam(self, param) case HttpStringParam: validateStringParam(self, param) case HttpFloatParam: validateFloatParam(self, param) case HttpBoolParam: validateBoolParam(self, param) case HttpObjectIdParam: validateObjectIdParam(self, param) case HttpJsonParam: validateJsonParam(self, param) case HttpJsonArrayParam: validateJsonParam(self, param) } } return len(self.ErrorCodes) == 0 } func (self *HttpContext) ParamInt(name string) int { return self.Params[name].Int() } func (self *HttpContext) ParamFloat(name string) float64 { return self.Params[name].Float() } func (self *HttpContext) ParamString(name string) string { return self.Params[name].String() } func (self *HttpContext) HasParam(name string) bool { if val, found := self.Params[name]; !found || val == nil || val.Value == nil { return false } return true } func (self *HttpContext) ParamBool(name string) bool { return self.Params[name].Bool() } func (self *HttpContext) ParamObjectId(name string) *bson.ObjectId { return self.Params[name].ObjectId() } func (self *HttpContext) ParamJson(name string) map[string]interface{} { return self.Params[name].Json() } func (self *HttpContext) ParamJsonArray(name string) []interface{} { return self.Params[name].JsonArray() } func (self *HttpContext) HasRawErrors() bool { return len(self.Errors) > 0 } // This returns the param value as a string. If the param is missing or empty, // the string will be len == 0. func retrieveParamValue(ctx *HttpContext, param *HttpParam) interface{} { switch param.Type { case HttpParamPost: return strings.TrimSpace(ctx.Request.PostFormValue(param.Name)) case HttpParamJsonPost: return retrieveJsonParamValue(ctx, param) case HttpParamQuery: return strings.TrimSpace(ctx.Request.FormValue(param.Name)) case HttpParamHeader: return strings.TrimSpace(ctx.Request.Header.Get(param.Name)) case HttpParamPath: return strings.TrimSpace(mux.Vars(ctx.Request)[param.Name]) } return nadaStr } func retrieveJsonParamValue(ctx *HttpContext, param *HttpParam) interface{} { var noData interface{} if param.DataType != HttpJsonParam { noData = nadaStr } if len(ctx.Errors) > 0 { return noData } // If this is the first access, read the body if len(ctx.body) == 0 { var err error ctx.body, err = ioutil.ReadAll(ctx.Request.Body) if err != nil { ctx.Errors = append(ctx.Errors, NewStackError("Error in raw data extraction - error: %v", err)) return noData } } if ctx.postJson == nil { var genJson interface{} err := json.Unmarshal(ctx.body, &genJson) if err != nil { ctx.Errors = append(ctx.Errors, NewStackError("Error in raw json data extraction - error: %v", err)) return noData } ctx.postJson = genJson.(map[string]interface{}) } // Look for the value in the json. The json may hold the data in a variety // of formats. Convert back to a string to deal with the other data types :-( val, found := ctx.postJson[param.Name] if !found { return noData } // If this is json, return the value. if param.DataType == HttpJsonParam || param.DataType == HttpJsonArrayParam { return val } valType := reflect.TypeOf(val) if valType == nil { return noData } switch valType.Kind() { case reflect.Invalid: return noData case reflect.Bool: return fmt.Sprintf("%t", val.(bool)) case reflect.Float64: return fmt.Sprintf("%g", val.(float64)) case reflect.String: return val.(string) default: return noData } return noData } func appendInvalidErrorCode(ctx *HttpContext, param *HttpParam) { // Do not dupplicate error codes. for i := range ctx.ErrorCodes { if ctx.ErrorCodes[i] == param.InvalidErrorCode { return } } if len(param.InvalidErrorCode) == 0 { panic(fmt.Sprintf("We do not have an error code defined for param: %s - required: %t", param.Name, param.Required)) } ctx.ErrorCodes = append(ctx.ErrorCodes, param.InvalidErrorCode) param.Valid = false } func validateIntParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param); return } if len(param.Raw) == 0 { return } if val, err := strconv.Atoi(param.Raw); err != nil { appendInvalidErrorCode(ctx, param) } else { param.setPresentValue(val) } } func validateStringParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param) return } if param.Required && param.MinLength > 0 && len(param.Raw) < param.MinLength { appendInvalidErrorCode(ctx, param) return } if param.Required && param.MaxLength > 0 && len(param.Raw) > param.MaxLength { appendInvalidErrorCode(ctx, param) return } param.setPresentValue(param.Raw) } func validateFloatParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param); return } if len(param.Raw) == 0 { return } if val, err := strconv.ParseFloat(param.Raw, 64); err != nil { appendInvalidErrorCode(ctx, param) } else { param.setPresentValue(val) } } func validateObjectIdParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param); return } if len(param.Raw) == 0 { return } if !bson.IsObjectIdHex(param.Raw) { appendInvalidErrorCode(ctx, param); return } value := bson.ObjectIdHex(param.Raw) param.setPresentValue(&value) } func validateJsonParam(ctx *HttpContext, param *HttpParam) { val := retrieveParamValue(ctx, param) if val == nil && param.Required { appendInvalidErrorCode(ctx, param); return } if val == nil { return } param.setPresentValue(val) } // Boolean types include: 1, t, T, TRUE, true, True, 0, f, F, FALSE, false func validateBoolParam(ctx *HttpContext, param *HttpParam) { param.Raw = retrieveParamValue(ctx, param).(string) if len(param.Raw) == 0 && param.Required { appendInvalidErrorCode(ctx, param) return } if len(param.Raw) == 0 { return } if val, err := strconv.ParseBool(param.Raw); err != nil { appendInvalidErrorCode(ctx, param) }
ObjectId
identifier_name
mod.rs
Handle raft ready to process the side affect and send IO tasks to //! background threads //! - Receive IO tasks completion and update the raft state machine //! //! There two steps can be processed concurrently. mod async_writer; use engine_traits::{KvEngine, RaftEngine}; use error_code::ErrorCodeExt; use kvproto::raft_serverpb::RaftMessage; use protobuf::Message as _; use raft::{eraftpb, Ready}; use raftstore::store::{FetchedLogs, Transport, WriteTask}; use slog::{debug, error, trace, warn}; pub use self::async_writer::AsyncWriter; use crate::{ batch::StoreContext, fsm::{PeerFsm, PeerFsmDelegate}, raft::{Peer, Storage}, router::PeerTick, }; impl<'a, EK: KvEngine, ER: RaftEngine, T: Transport> PeerFsmDelegate<'a, EK, ER, T> { /// Raft relies on periodic ticks to keep the state machine sync with other /// peers. pub fn on_raft_tick(&mut self) { if self.fsm.peer_mut().tick() { self.fsm.peer_mut().set_has_ready(); } self.schedule_tick(PeerTick::Raft); } } impl<EK: KvEngine, ER: RaftEngine> Peer<EK, ER> { #[inline] fn tick(&mut self) -> bool
/// Callback for fetching logs asynchronously. pub fn on_fetched_logs(&mut self, fetched_logs: FetchedLogs) { let FetchedLogs { context, logs } = fetched_logs; let low = logs.low; if !self.is_leader() { self.entry_storage_mut().clean_async_fetch_res(low); return; } if self.term() != logs.term { self.entry_storage_mut().clean_async_fetch_res(low); } else { self.entry_storage_mut() .update_async_fetch_res(low, Some(logs)); } self.raft_group_mut().on_entries_fetched(context); // clean the async fetch result immediately if not used to free memory self.entry_storage_mut().update_async_fetch_res(low, None); self.set_has_ready(); } /// Partially filled a raft message that will be sent to other peer. fn prepare_raft_message(&mut self) -> RaftMessage { let mut raft_msg = RaftMessage::new(); raft_msg.set_region_id(self.region().id); raft_msg.set_from_peer(self.peer().clone()); // set current epoch let epoch = self.storage().region().get_region_epoch(); let msg_epoch = raft_msg.mut_region_epoch(); msg_epoch.set_version(epoch.get_version()); msg_epoch.set_conf_ver(epoch.get_conf_ver()); raft_msg } /// Transform a message from raft lib to a message that can be sent to other /// peers. /// /// If the recipient can't be found, `None` is returned. #[inline] fn build_raft_message<T>( &mut self, ctx: &mut StoreContext<EK, ER, T>, msg: eraftpb::Message, ) -> Option<RaftMessage> { let to_peer = match self.peer_from_cache(msg.to) { Some(p) => p, None => { warn!(self.logger, "failed to look up recipient peer"; "to_peer" => msg.to); return None; } }; let mut raft_msg = self.prepare_raft_message(); raft_msg.set_to_peer(to_peer); if msg.from != self.peer().id { debug!( self.logger, "redirecting message"; "msg_type" => ?msg.get_msg_type(), "from" => msg.get_from(), "to" => msg.get_to(), ); } raft_msg.set_message(msg); Some(raft_msg) } /// Send a message. /// /// The message is pushed into the send buffer, it may not be sent out until /// transport is flushed explicitly. fn send_raft_message<T: Transport>( &mut self, ctx: &mut StoreContext<EK, ER, T>, msg: RaftMessage, ) { let msg_type = msg.get_message().get_msg_type(); let to_peer_id = msg.get_to_peer().get_id(); let to_store_id = msg.get_to_peer().get_store_id(); trace!( self.logger, "send raft msg"; "msg_type" => ?msg_type, "msg_size" => msg.get_message().compute_size(), "to" => to_peer_id, ); match ctx.trans.send(msg) { Ok(()) => ctx.raft_metrics.send_message.add(msg_type, true), Err(e) => { // We use metrics to observe failure on production. debug!( self.logger, "failed to send msg to other peer"; "target_peer_id" => to_peer_id, "target_store_id" => to_store_id, "err" => ?e, "error_code" => %e.error_code(), ); // unreachable store self.raft_group_mut().report_unreachable(to_peer_id); ctx.raft_metrics.send_message.add(msg_type, false); } } } fn handle_raft_committed_entries<T>( &self, _ctx: &mut crate::batch::StoreContext<EK, ER, T>, _take_committed_entries: Vec<raft::prelude::Entry>, ) { unimplemented!() } /// Processing the ready of raft. A detail description of how it's handled /// can be found at https://docs.rs/raft/latest/raft/#processing-the-ready-state. /// /// It's should be called at the end of every round of processing. Any /// writes will be handled asynchronously, and be notified once writes /// are persisted. #[inline] pub fn handle_raft_ready<T: Transport>(&mut self, ctx: &mut StoreContext<EK, ER, T>) { let has_ready = self.reset_has_ready(); if !has_ready { return; } ctx.has_ready = true; if !self.raft_group().has_ready() { return; } debug!(self.logger, "handle raft ready"); let mut ready = self.raft_group_mut().ready(); // Update it after unstable entries pagination is introduced. debug_assert!(ready.entries().last().map_or_else( || true, |entry| entry.index == self.raft_group().raft.raft_log.last_index() )); if !ready.messages().is_empty() { debug_assert!(self.is_leader()); for msg in ready.take_messages() { if let Some(msg) = self.build_raft_message(ctx, msg) { self.send_raft_message(ctx, msg); } } } if !ready.committed_entries().is_empty() { self.handle_raft_committed_entries(ctx, ready.take_committed_entries()); } let ready_number = ready.number(); let mut write_task = WriteTask::new(self.region_id(), self.peer_id(), ready_number); self.storage_mut() .handle_raft_ready(&mut ready, &mut write_task); if !ready.persisted_messages().is_empty() { write_task.messages = ready .take_persisted_messages() .into_iter() .flat_map(|m| self.build_raft_message(ctx, m)) .collect(); } // Ready number should increase monotonically. assert!(self.async_writer.known_largest_number() < ready.number()); if let Some(task) = self.async_writer.write(ctx, write_task) { // So the task doesn't need to be process asynchronously, directly advance. let mut light_rd = self.raft_group_mut().advance_append(ready); if !task.messages.is_empty() { for m in task.messages { self.send_raft_message(ctx, m); } } if !light_rd.messages().is_empty() || light_rd.commit_index().is_some() { panic!( "{:?} unexpected messages [{}] commit index [{:?}]", self.logger.list(), light_rd.messages().len(), light_rd.commit_index() ); } if !light_rd.committed_entries().is_empty() { self.handle_raft_committed_entries(ctx, light_rd.take_committed_entries()); } } else { // The task will be written asynchronously. Once it's persisted, it will be // notified by `on_persisted`. self.raft_group_mut().advance_append_async(ready); } ctx.raft_metrics.ready.has_ready_region.inc(); } /// Called when an asynchronously write finishes. pub fn on_persisted<T: Transport>( &mut self, ctx: &mut StoreContext<EK, ER, T>, peer_id: u64, ready_number: u64, ) { if peer_id != self.peer_id() { error!(self.logger, "peer id not matched"; "persisted_peer_id" => peer_id, "persisted_number" => ready_number); return; } let persisted_message = self .async_writer .on_persisted(ctx, ready_number, &self.logger); for msgs in persisted_message { for msg in msgs { self.send_raft_message(ctx, msg); } } let persisted_number = self.async
{ self.raft_group_mut().tick() }
identifier_body
mod.rs
- Handle raft ready to process the side affect and send IO tasks to //! background threads //! - Receive IO tasks completion and update the raft state machine //! //! There two steps can be processed concurrently. mod async_writer; use engine_traits::{KvEngine, RaftEngine}; use error_code::ErrorCodeExt; use kvproto::raft_serverpb::RaftMessage; use protobuf::Message as _; use raft::{eraftpb, Ready}; use raftstore::store::{FetchedLogs, Transport, WriteTask}; use slog::{debug, error, trace, warn}; pub use self::async_writer::AsyncWriter; use crate::{ batch::StoreContext, fsm::{PeerFsm, PeerFsmDelegate}, raft::{Peer, Storage}, router::PeerTick, }; impl<'a, EK: KvEngine, ER: RaftEngine, T: Transport> PeerFsmDelegate<'a, EK, ER, T> { /// Raft relies on periodic ticks to keep the state machine sync with other /// peers. pub fn on_raft_tick(&mut self) { if self.fsm.peer_mut().tick() { self.fsm.peer_mut().set_has_ready(); } self.schedule_tick(PeerTick::Raft); } } impl<EK: KvEngine, ER: RaftEngine> Peer<EK, ER> { #[inline] fn tick(&mut self) -> bool { self.raft_group_mut().tick() } /// Callback for fetching logs asynchronously. pub fn on_fetched_logs(&mut self, fetched_logs: FetchedLogs) { let FetchedLogs { context, logs } = fetched_logs; let low = logs.low; if !self.is_leader() { self.entry_storage_mut().clean_async_fetch_res(low); return; } if self.term() != logs.term { self.entry_storage_mut().clean_async_fetch_res(low); } else { self.entry_storage_mut() .update_async_fetch_res(low, Some(logs)); } self.raft_group_mut().on_entries_fetched(context); // clean the async fetch result immediately if not used to free memory self.entry_storage_mut().update_async_fetch_res(low, None); self.set_has_ready(); } /// Partially filled a raft message that will be sent to other peer. fn prepare_raft_message(&mut self) -> RaftMessage { let mut raft_msg = RaftMessage::new(); raft_msg.set_region_id(self.region().id); raft_msg.set_from_peer(self.peer().clone()); // set current epoch let epoch = self.storage().region().get_region_epoch(); let msg_epoch = raft_msg.mut_region_epoch(); msg_epoch.set_version(epoch.get_version()); msg_epoch.set_conf_ver(epoch.get_conf_ver()); raft_msg } /// Transform a message from raft lib to a message that can be sent to other /// peers. /// /// If the recipient can't be found, `None` is returned. #[inline] fn build_raft_message<T>( &mut self, ctx: &mut StoreContext<EK, ER, T>, msg: eraftpb::Message, ) -> Option<RaftMessage> { let to_peer = match self.peer_from_cache(msg.to) { Some(p) => p, None => { warn!(self.logger, "failed to look up recipient peer"; "to_peer" => msg.to); return None; } }; let mut raft_msg = self.prepare_raft_message(); raft_msg.set_to_peer(to_peer); if msg.from != self.peer().id { debug!( self.logger, "redirecting message"; "msg_type" => ?msg.get_msg_type(), "from" => msg.get_from(), "to" => msg.get_to(), ); } raft_msg.set_message(msg); Some(raft_msg) } /// Send a message. /// /// The message is pushed into the send buffer, it may not be sent out until /// transport is flushed explicitly. fn send_raft_message<T: Transport>( &mut self, ctx: &mut StoreContext<EK, ER, T>, msg: RaftMessage, ) { let msg_type = msg.get_message().get_msg_type(); let to_peer_id = msg.get_to_peer().get_id(); let to_store_id = msg.get_to_peer().get_store_id(); trace!( self.logger, "send raft msg"; "msg_type" => ?msg_type, "msg_size" => msg.get_message().compute_size(), "to" => to_peer_id, ); match ctx.trans.send(msg) { Ok(()) => ctx.raft_metrics.send_message.add(msg_type, true), Err(e) => { // We use metrics to observe failure on production. debug!( self.logger, "failed to send msg to other peer"; "target_peer_id" => to_peer_id, "target_store_id" => to_store_id, "err" => ?e, "error_code" => %e.error_code(), ); // unreachable store self.raft_group_mut().report_unreachable(to_peer_id); ctx.raft_metrics.send_message.add(msg_type, false); } } } fn handle_raft_committed_entries<T>( &self, _ctx: &mut crate::batch::StoreContext<EK, ER, T>,
_take_committed_entries: Vec<raft::prelude::Entry>, ) { unimplemented!() } /// Processing the ready of raft. A detail description of how it's handled /// can be found at https://docs.rs/raft/latest/raft/#processing-the-ready-state. /// /// It's should be called at the end of every round of processing. Any /// writes will be handled asynchronously, and be notified once writes /// are persisted. #[inline] pub fn handle_raft_ready<T: Transport>(&mut self, ctx: &mut StoreContext<EK, ER, T>) { let has_ready = self.reset_has_ready(); if !has_ready { return; } ctx.has_ready = true; if !self.raft_group().has_ready() { return; } debug!(self.logger, "handle raft ready"); let mut ready = self.raft_group_mut().ready(); // Update it after unstable entries pagination is introduced. debug_assert!(ready.entries().last().map_or_else( || true, |entry| entry.index == self.raft_group().raft.raft_log.last_index() )); if !ready.messages().is_empty() { debug_assert!(self.is_leader()); for msg in ready.take_messages() { if let Some(msg) = self.build_raft_message(ctx, msg) { self.send_raft_message(ctx, msg); } } } if !ready.committed_entries().is_empty() { self.handle_raft_committed_entries(ctx, ready.take_committed_entries()); } let ready_number = ready.number(); let mut write_task = WriteTask::new(self.region_id(), self.peer_id(), ready_number); self.storage_mut() .handle_raft_ready(&mut ready, &mut write_task); if !ready.persisted_messages().is_empty() { write_task.messages = ready .take_persisted_messages() .into_iter() .flat_map(|m| self.build_raft_message(ctx, m)) .collect(); } // Ready number should increase monotonically. assert!(self.async_writer.known_largest_number() < ready.number()); if let Some(task) = self.async_writer.write(ctx, write_task) { // So the task doesn't need to be process asynchronously, directly advance. let mut light_rd = self.raft_group_mut().advance_append(ready); if !task.messages.is_empty() { for m in task.messages { self.send_raft_message(ctx, m); } } if !light_rd.messages().is_empty() || light_rd.commit_index().is_some() { panic!( "{:?} unexpected messages [{}] commit index [{:?}]", self.logger.list(), light_rd.messages().len(), light_rd.commit_index() ); } if !light_rd.committed_entries().is_empty() { self.handle_raft_committed_entries(ctx, light_rd.take_committed_entries()); } } else { // The task will be written asynchronously. Once it's persisted, it will be // notified by `on_persisted`. self.raft_group_mut().advance_append_async(ready); } ctx.raft_metrics.ready.has_ready_region.inc(); } /// Called when an asynchronously write finishes. pub fn on_persisted<T: Transport>( &mut self, ctx: &mut StoreContext<EK, ER, T>, peer_id: u64, ready_number: u64, ) { if peer_id != self.peer_id() { error!(self.logger, "peer id not matched"; "persisted_peer_id" => peer_id, "persisted_number" => ready_number); return; } let persisted_message = self .async_writer .on_persisted(ctx, ready_number, &self.logger); for msgs in persisted_message { for msg in msgs { self.send_raft_message(ctx, msg); } } let persisted_number = self.async_writer
random_line_split
mod.rs
- Handle raft ready to process the side affect and send IO tasks to //! background threads //! - Receive IO tasks completion and update the raft state machine //! //! There two steps can be processed concurrently. mod async_writer; use engine_traits::{KvEngine, RaftEngine}; use error_code::ErrorCodeExt; use kvproto::raft_serverpb::RaftMessage; use protobuf::Message as _; use raft::{eraftpb, Ready}; use raftstore::store::{FetchedLogs, Transport, WriteTask}; use slog::{debug, error, trace, warn}; pub use self::async_writer::AsyncWriter; use crate::{ batch::StoreContext, fsm::{PeerFsm, PeerFsmDelegate}, raft::{Peer, Storage}, router::PeerTick, }; impl<'a, EK: KvEngine, ER: RaftEngine, T: Transport> PeerFsmDelegate<'a, EK, ER, T> { /// Raft relies on periodic ticks to keep the state machine sync with other /// peers. pub fn on_raft_tick(&mut self) { if self.fsm.peer_mut().tick() { self.fsm.peer_mut().set_has_ready(); } self.schedule_tick(PeerTick::Raft); } } impl<EK: KvEngine, ER: RaftEngine> Peer<EK, ER> { #[inline] fn
(&mut self) -> bool { self.raft_group_mut().tick() } /// Callback for fetching logs asynchronously. pub fn on_fetched_logs(&mut self, fetched_logs: FetchedLogs) { let FetchedLogs { context, logs } = fetched_logs; let low = logs.low; if !self.is_leader() { self.entry_storage_mut().clean_async_fetch_res(low); return; } if self.term() != logs.term { self.entry_storage_mut().clean_async_fetch_res(low); } else { self.entry_storage_mut() .update_async_fetch_res(low, Some(logs)); } self.raft_group_mut().on_entries_fetched(context); // clean the async fetch result immediately if not used to free memory self.entry_storage_mut().update_async_fetch_res(low, None); self.set_has_ready(); } /// Partially filled a raft message that will be sent to other peer. fn prepare_raft_message(&mut self) -> RaftMessage { let mut raft_msg = RaftMessage::new(); raft_msg.set_region_id(self.region().id); raft_msg.set_from_peer(self.peer().clone()); // set current epoch let epoch = self.storage().region().get_region_epoch(); let msg_epoch = raft_msg.mut_region_epoch(); msg_epoch.set_version(epoch.get_version()); msg_epoch.set_conf_ver(epoch.get_conf_ver()); raft_msg } /// Transform a message from raft lib to a message that can be sent to other /// peers. /// /// If the recipient can't be found, `None` is returned. #[inline] fn build_raft_message<T>( &mut self, ctx: &mut StoreContext<EK, ER, T>, msg: eraftpb::Message, ) -> Option<RaftMessage> { let to_peer = match self.peer_from_cache(msg.to) { Some(p) => p, None => { warn!(self.logger, "failed to look up recipient peer"; "to_peer" => msg.to); return None; } }; let mut raft_msg = self.prepare_raft_message(); raft_msg.set_to_peer(to_peer); if msg.from != self.peer().id { debug!( self.logger, "redirecting message"; "msg_type" => ?msg.get_msg_type(), "from" => msg.get_from(), "to" => msg.get_to(), ); } raft_msg.set_message(msg); Some(raft_msg) } /// Send a message. /// /// The message is pushed into the send buffer, it may not be sent out until /// transport is flushed explicitly. fn send_raft_message<T: Transport>( &mut self, ctx: &mut StoreContext<EK, ER, T>, msg: RaftMessage, ) { let msg_type = msg.get_message().get_msg_type(); let to_peer_id = msg.get_to_peer().get_id(); let to_store_id = msg.get_to_peer().get_store_id(); trace!( self.logger, "send raft msg"; "msg_type" => ?msg_type, "msg_size" => msg.get_message().compute_size(), "to" => to_peer_id, ); match ctx.trans.send(msg) { Ok(()) => ctx.raft_metrics.send_message.add(msg_type, true), Err(e) => { // We use metrics to observe failure on production. debug!( self.logger, "failed to send msg to other peer"; "target_peer_id" => to_peer_id, "target_store_id" => to_store_id, "err" => ?e, "error_code" => %e.error_code(), ); // unreachable store self.raft_group_mut().report_unreachable(to_peer_id); ctx.raft_metrics.send_message.add(msg_type, false); } } } fn handle_raft_committed_entries<T>( &self, _ctx: &mut crate::batch::StoreContext<EK, ER, T>, _take_committed_entries: Vec<raft::prelude::Entry>, ) { unimplemented!() } /// Processing the ready of raft. A detail description of how it's handled /// can be found at https://docs.rs/raft/latest/raft/#processing-the-ready-state. /// /// It's should be called at the end of every round of processing. Any /// writes will be handled asynchronously, and be notified once writes /// are persisted. #[inline] pub fn handle_raft_ready<T: Transport>(&mut self, ctx: &mut StoreContext<EK, ER, T>) { let has_ready = self.reset_has_ready(); if !has_ready { return; } ctx.has_ready = true; if !self.raft_group().has_ready() { return; } debug!(self.logger, "handle raft ready"); let mut ready = self.raft_group_mut().ready(); // Update it after unstable entries pagination is introduced. debug_assert!(ready.entries().last().map_or_else( || true, |entry| entry.index == self.raft_group().raft.raft_log.last_index() )); if !ready.messages().is_empty() { debug_assert!(self.is_leader()); for msg in ready.take_messages() { if let Some(msg) = self.build_raft_message(ctx, msg) { self.send_raft_message(ctx, msg); } } } if !ready.committed_entries().is_empty() { self.handle_raft_committed_entries(ctx, ready.take_committed_entries()); } let ready_number = ready.number(); let mut write_task = WriteTask::new(self.region_id(), self.peer_id(), ready_number); self.storage_mut() .handle_raft_ready(&mut ready, &mut write_task); if !ready.persisted_messages().is_empty() { write_task.messages = ready .take_persisted_messages() .into_iter() .flat_map(|m| self.build_raft_message(ctx, m)) .collect(); } // Ready number should increase monotonically. assert!(self.async_writer.known_largest_number() < ready.number()); if let Some(task) = self.async_writer.write(ctx, write_task) { // So the task doesn't need to be process asynchronously, directly advance. let mut light_rd = self.raft_group_mut().advance_append(ready); if !task.messages.is_empty() { for m in task.messages { self.send_raft_message(ctx, m); } } if !light_rd.messages().is_empty() || light_rd.commit_index().is_some() { panic!( "{:?} unexpected messages [{}] commit index [{:?}]", self.logger.list(), light_rd.messages().len(), light_rd.commit_index() ); } if !light_rd.committed_entries().is_empty() { self.handle_raft_committed_entries(ctx, light_rd.take_committed_entries()); } } else { // The task will be written asynchronously. Once it's persisted, it will be // notified by `on_persisted`. self.raft_group_mut().advance_append_async(ready); } ctx.raft_metrics.ready.has_ready_region.inc(); } /// Called when an asynchronously write finishes. pub fn on_persisted<T: Transport>( &mut self, ctx: &mut StoreContext<EK, ER, T>, peer_id: u64, ready_number: u64, ) { if peer_id != self.peer_id() { error!(self.logger, "peer id not matched"; "persisted_peer_id" => peer_id, "persisted_number" => ready_number); return; } let persisted_message = self .async_writer .on_persisted(ctx, ready_number, &self.logger); for msgs in persisted_message { for msg in msgs { self.send_raft_message(ctx, msg); } } let persisted_number = self.async
tick
identifier_name
gl_scene.py
8: (0.4, 0.4, 0.4), 9: (0.0, 0.0, 0.0), } def make_plane(): glNewList(G_OBJ_PLANE, GL_COMPILE) glBegin(GL_LINES) glColor3f(0, 0, 0) for i in range(41): glVertex3f(-10.0 + 0.5 * i, 0, -10) glVertex3f(-10.0 + 0.5 * i, 0, 10) glVertex3f(-10.0, 0, -10 + 0.5 * i) glVertex3f(10.0, 0, -10 + 0.5 * i) # Axes glEnd() glLineWidth(5) glBegin(GL_LINES) glColor3f(0.5, 0.7, 0.5) glVertex3f(0.0, 0.0, 0.0) glVertex3f(5, 0.0, 0.0) glEnd() glBegin(GL_LINES) glColor3f(0.5, 0.7, 0.5) glVertex3f(0.0, 0.0, 0.0) glVertex3f(0.0, 5, 0.0) glEnd() glBegin(GL_LINES) glColor3f(0.5, 0.7, 0.5) glVertex3f(0.0, 0.0, 0.0) glVertex3f(0.0, 0.0, 5) glEnd() # Draw the Y. glBegin(GL_LINES) glColor3f(0.0, 0.0, 0.0) glVertex3f(0.0, 5.0, 0.0) glVertex3f(0.0, 5.5, 0.0) glVertex3f(0.0, 5.5, 0.0) glVertex3f(-0.5, 6.0, 0.0) glVertex3f(0.0, 5.5, 0.0) glVertex3f(0.5, 6.0, 0.0) # Draw the Z. glVertex3f(-0.5, 0.0, 5.0) glVertex3f(0.5, 0.0, 5.0) glVertex3f(0.5, 0.0, 5.0) glVertex3f(-0.5, 0.0, 6.0) glVertex3f(-0.5, 0.0, 6.0) glVertex3f(0.5, 0.0, 6.0) # Draw the X. glVertex3f(5.0, 0.0, 0.5) glVertex3f(6.0, 0.0, -0.5) glVertex3f(5.0, 0.0, -0.5) glVertex3f(6.0, 0.0, 0.5) glEnd() glLineWidth(1) glEndList() def make_sphere(): """ 创建球形的渲染函数列表 """ glNewList(G_OBJ_SPHERE, GL_COMPILE) quad = gluNewQuadric() gluSphere(quad, 0.5, 30, 30) gluDeleteQuadric(quad) glEndList() def make_cube(): glNewList(G_OBJ_CUBE, GL_COMPILE) vertices = [((-0.5, -0.5, -0.5), (-0.5, -0.5, 0.5), (-0.5, 0.5, 0.5), (-0.5, 0.5, -0.5)), ((-0.5, -0.5, -0.5), (-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), (0.5, -0.5, -0.5)), ((0.5, -0.5, -0.5), (0.5, 0.5, -0.5), (0.5, 0.5, 0.5), (0.5, -0.5, 0.5)), ((-0.5, -0.5, 0.5), (0.5, -0.5, 0.5), (0.5, 0.5, 0.5), (-0.5, 0.5, 0.5)), ((-0.5, -0.5, 0.5), (-0.5, -0.5, -0.5), (0.5, -0.5, -0.5), (0.5, -0.5, 0.5)), ((-0.5, 0.5, -0.5), (-0.5, 0.5, 0.5), (0.5, 0.5, 0.5), (0.5, 0.5, -0.5))] normals = [(-1.0, 0.0, 0.0), (0.0, 0.0, -1.0), (1.0, 0.0, 0.0), (0.0, 0.0, 1.0), (0.0, -1.0, 0.0), (0.0, 1.0, 0.0)] glBegin(GL_QUADS) for i in range(6): glNormal3f(normals[i][
List() def init_primitives(): """ 初始化所有的图元渲染函数列表 """ make_plane() make_sphere() make_cube() def translation(displacement): """ 生成平移矩阵 """ t = numpy.identity(4) t[0, 3] = displacement[0] t[1, 3] = displacement[1] t[2, 3] = displacement[2] return t def scaling(scale): """ 生成缩放矩阵 """ s = numpy.identity(4) s[0, 0] = scale[0] s[1, 1] = scale[1] s[2, 2] = scale[2] s[3, 3] = 1 return s class Scene(object): # 放置节点的深度,放置的节点距离摄像机15个单位 PLACE_DEPTH = 15.0 def __init__(self): # 场景下的节点队列 self.node_list = list() def add_node(self, node): """ 在场景中加入一个新节点 """ self.node_list.append(node) def render(self): """ 遍历场景下所有节点并渲染 """ for node in self.node_list: node.render() class Node(object): def __init__(self): # 该节点的颜色序号 self.color_index = random.randint(MIN_COLOR, MAX_COLOR) # 该节点的平移矩阵,决定了该节点在场景中的位置 self.translation_matrix = numpy.identity(4) # 该节点的缩放矩阵,决定了该节点的大小 self.scaling_matrix = numpy.identity(4) def render(self): """ 渲染节点 """ glPushMatrix() # 实现平移 glMultMatrixf(numpy.transpose(self.translation_matrix)) # 实现缩放 glMultMatrixf(self.scaling_matrix) cur_color = COLORS[self.color_index] # 设置颜色 glColor3f(cur_color[0], cur_color[1], cur_color[2]) # 渲染对象模型 self.render_self() glPopMatrix() def render_self(self): raise NotImplementedError( "The Abstract Node Class doesn't define 'render_self'") def translate(self, x, y, z): self.translation_matrix = numpy.dot(self.translation_matrix, translation([x, y, z])) def scale(self, s): self.scaling_matrix = numpy.dot(self.scaling_matrix, scaling([s, s, s])) class Primitive(Node): def __init__(self): super(Primitive, self).__init__() self.call_list = None def render_self(self): glCallList(self.call_list) class Sphere(Primitive): """ 球形图元 """ def __init__(self): super(Sphere, self).__init__() self.call_list = G_OBJ_SPHERE class Cube(Primitive): """ 立方体图元 """ def __init__(self): super(Cube, self).__init__() self.call_list = G_OBJ_CUBE class Plane(Primitive): def __init__(self): super(
0], normals[i][1], normals[i][2]) for j in range(4): glVertex3f(vertices[i][j][0], vertices[i][j][1], vertices[i][j][2]) glEnd() glEnd
conditional_block
gl_scene.py
0.5, -0.5))] normals = [(-1.0, 0.0, 0.0), (0.0, 0.0, -1.0), (1.0, 0.0, 0.0), (0.0, 0.0, 1.0), (0.0, -1.0, 0.0), (0.0, 1.0, 0.0)] glBegin(GL_QUADS) for i in range(6): glNormal3f(normals[i][0], normals[i][1], normals[i][2]) for j in range(4): glVertex3f(vertices[i][j][0], vertices[i][j][1], vertices[i][j][2]) glEnd() glEndList() def init_primitives(): """ 初始化所有的图元渲染函数列表 """ make_plane() make_sphere() make_cube() def translation(displacement): """ 生成平移矩阵 """ t = numpy.identity(4) t[0, 3] = displacement[0] t[1, 3] = displacement[1] t[2, 3] = displacement[2] return t def scaling(scale): """ 生成缩放矩阵 """ s = numpy.identity(4) s[0, 0] = scale[0] s[1, 1] = scale[1] s[2, 2] = scale[2] s[3, 3] = 1 return s class Scene(object): # 放置节点的深度,放置的节点距离摄像机15个单位 PLACE_DEPTH = 15.0 def __init__(self): # 场景下的节点队列 self.node_list = list() def add_node(self, node): """ 在场景中加入一个新节点 """ self.node_list.append(node) def render(self): """ 遍历场景下所有节点并渲染 """ for node in self.node_list: node.render() class Node(object): def __init__(self): # 该节点的颜色序号 self.color_index = random.randint(MIN_COLOR, MAX_COLOR) # 该节点的平移矩阵,决定了该节点在场景中的位置 self.translation_matrix = numpy.identity(4) # 该节点的缩放矩阵,决定了该节点的大小 self.scaling_matrix = numpy.identity(4) def render(self): """ 渲染节点 """ glPushMatrix() # 实现平移 glMultMatrixf(numpy.transpose(self.translation_matrix)) # 实现缩放 glMultMatrixf(self.scaling_matrix) cur_color = COLORS[self.color_index] # 设置颜色 glColor3f(cur_color[0], cur_color[1], cur_color[2]) # 渲染对象模型 self.render_self() glPopMatrix() def render_self(self): raise NotImplementedError( "The Abstract Node Class doesn't define 'render_self'") def translate(self, x, y, z): self.translation_matrix = numpy.dot(self.translation_matrix, translation([x, y, z])) def scale(self, s): self.scaling_matrix = numpy.dot(self.scaling_matrix, scaling([s, s, s])) class Primitive(Node): def __init__(self): super(Primitive, self).__init__() self.call_list = None def render_self(self): glCallList(self.call_list) class Sphere(Primitive): """ 球形图元 """ def __init__(self): super(Sphere, self).__init__() self.call_list = G_OBJ_SPHERE class Cube(Primitive): """ 立方体图元 """ def __init__(self): super(Cube, self).__init__() self.call_list = G_OBJ_CUBE class Plane(Primitive): def __init__(self): super(Plane, self).__init__() self.call_list = G_OBJ_PLANE class HierarchicalNode(Node): def __init__(self): super(HierarchicalNode, self).__init__() self.child_nodes = [] def render_self(self): for child in self.child_nodes: child.render() class SnowFigure(HierarchicalNode): def __init__(self): super(SnowFigure, self).__init__() self.child_nodes = [Sphere(), Sphere(), Sphere()] self.child_nodes[0].translate(0, -0.6, 0) self.child_nodes[1].translate(0, 0.1, 0) self.child_nodes[1].scale(0.8) self.child_nodes[2].translate(0, 0.75, 0) self.child_nodes[2].scale(0.7) for child_node in self.child_nodes: child_node.color_index = MIN_COLOR class Viewer(object): def __init__(self): """ Initialize the viewer. """ # 初始化接口,创建窗口并注册渲染函数 self.init_interface() # 初始化opengl的配置 self.init_opengl() # 初始化3d场景 self.init_scene() # 初始化交互操作相关的代码 self.init_interaction() init_primitives() def init_interface(self): """ 初始化窗口并注册渲染函数 """ glutInit() glutInitWindowSize(640, 480) glutCreateWindow("3D Modeller") glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB) # 注册窗口渲染函数 glutDisplayFunc(self.render) def init_opengl(self): """ 初始化opengl的配置 """ # 模型视图矩阵 self.inverseModelView = numpy.identity(4) # 模型视图矩阵的逆矩阵 self.modelView = numpy.identity(4) # 开启剔除操作效果 glEnable(GL_CULL_FACE) # 取消对多边形背面进行渲染的计算(看不到的部分不渲染) glCullFace(GL_BACK) # 开启深度测试 glEnable(GL_DEPTH_TEST) # 测试是否被遮挡,被遮挡的物体不予渲染 glDepthFunc(GL_LESS) # 启用0号光源 glEnable(GL_LIGHT0) # 设置光源的位置 glLightfv(GL_LIGHT0, GL_POSITION, GLfloat_4(0, 0, 1, 0)) # 设置光源的照射方向 glLightfv(GL_LIGHT0, GL_SPOT_DIRECTION, GLfloat_3(0, 0, -1)) # 设置材质颜色 glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE) glEnable(GL_COLOR_MATERIAL) # 设置清屏的颜色 glClearColor(0.4, 0.4, 0.4, 0.0) def init_scene(self): # 初始化场景,之后实现 # 创建一个场景实例 self.scene = Scene() # 初始化场景内的对象 self.create_sample_scene() def create_sample_scene(self): # 创建一个球体 sphere_node = Sphere() # 设置球体的颜色 sphere_node.color_index = 2 # 将球体放进场景中,默认在正中央 sphere_node.translate(2, 2, 0) sphere_node.scale(4) self.scene.add_node(sphere_node) # 添加小雪人 hierarchical_node = SnowFigure() hierarchical_node.translate(-2, 0, -2) hierarchical_node.scale(2) self.scene.add_node(hierarchical_node) # 添加立方体 cube_node = Cube() cube_node.color_index = 5 cube_node.translate(5, 5, 0) cube_node.scale(1.8) self.scene.add_node(cube_node) # 添加Plane plane_node = Plane() plane_node.color_index = 2 self.scene.add_node(plane_node) def init_interaction(self): # 初始化交互操作相关的代码,之后实现 pass def main_loop(self): # 程序主循环开始 glutMainLoop() def render(self): # 程序进入主循环后每一次循环调用的渲染函数 # 初始化投影矩阵 self.init_view() # 启动光照 glEnable(GL_LIGHTING) # 清空颜色缓存与深度缓存 glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # 设置模型视图矩阵,目前为止用单位矩阵就行了。 glMatrixMode(GL_MODELVIEW) glPushMatrix() glLoadIdentity() # 渲染场景 self.scene.render() # 每次渲染后复位光照状态 glDisable(GL_LIGHTING) glPopMatrix() # 把数据刷新到显存上 glFlush() def init_view(self): """ 初始化投影矩阵 """ xSize, ySize = glutGet(GLUT_WINDOW_WIDTH), glutGet(GLUT_WINDOW_HEIGHT) # 得到屏幕宽高比 aspect_ratio = float(xSize) / float(ySize) # 设置投影矩阵 glMatrixMode(GL_PROJECTION) glLoadIdentity() # 设置视口,应与窗口重合 glViewport(0, 0, xSize, ySize) # 设置透视,摄像机上下视野幅度70度 #
视野范围到
identifier_name
gl_scene.py
0, 0, -10 + 0.5 * i) # Axes glEnd() glLineWidth(5) glBegin(GL_LINES) glColor3f(0.5, 0.7, 0.5) glVertex3f(0.0, 0.0, 0.0) glVertex3f(5, 0.0, 0.0) glEnd() glBegin(GL_LINES) glColor3f(0.5, 0.7, 0.5) glVertex3f(0.0, 0.0, 0.0) glVertex3f(0.0, 5, 0.0) glEnd() glBegin(GL_LINES) glColor3f(0.5, 0.7, 0.5) glVertex3f(0.0, 0.0, 0.0) glVertex3f(0.0, 0.0, 5) glEnd() # Draw the Y. glBegin(GL_LINES) glColor3f(0.0, 0.0, 0.0) glVertex3f(0.0, 5.0, 0.0) glVertex3f(0.0, 5.5, 0.0) glVertex3f(0.0, 5.5, 0.0) glVertex3f(-0.5, 6.0, 0.0) glVertex3f(0.0, 5.5, 0.0) glVertex3f(0.5, 6.0, 0.0) # Draw the Z. glVertex3f(-0.5, 0.0, 5.0) glVertex3f(0.5, 0.0, 5.0) glVertex3f(0.5, 0.0, 5.0) glVertex3f(-0.5, 0.0, 6.0) glVertex3f(-0.5, 0.0, 6.0) glVertex3f(0.5, 0.0, 6.0) # Draw the X. glVertex3f(5.0, 0.0, 0.5) glVertex3f(6.0, 0.0, -0.5) glVertex3f(5.0, 0.0, -0.5) glVertex3f(6.0, 0.0, 0.5) glEnd() glLineWidth(1) glEndList() def make_sphere(): """ 创建球形的渲染函数列表 """ glNewList(G_OBJ_SPHERE, GL_COMPILE) quad = gluNewQuadric() gluSphere(quad, 0.5, 30, 30) gluDeleteQuadric(quad) glEndList() def make_cube(): glNewList(G_OBJ_CUBE, GL_COMPILE) vertices = [((-0.5, -0.5, -0.5), (-0.5, -0.5, 0.5), (-0.5, 0.5, 0.5), (-0.5, 0.5, -0.5)), ((-0.5, -0.5, -0.5), (-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), (0.5, -0.5, -0.5)), ((0.5, -0.5, -0.5), (0.5, 0.5, -0.5), (0.5, 0.5, 0.5), (0.5, -0.5, 0.5)), ((-0.5, -0.5, 0.5), (0.5, -0.5, 0.5), (0.5, 0.5, 0.5), (-0.5, 0.5, 0.5)), ((-0.5, -0.5, 0.5), (-0.5, -0.5, -0.5), (0.5, -0.5, -0.5), (0.5, -0.5, 0.5)), ((-0.5, 0.5, -0.5), (-0.5, 0.5, 0.5), (0.5, 0.5, 0.5), (0.5, 0.5, -0.5))] normals = [(-1.0, 0.0, 0.0), (0.0, 0.0, -1.0), (1.0, 0.0, 0.0), (0.0, 0.0, 1.0), (0.0, -1.0, 0.0), (0.0, 1.0, 0.0)] glBegin(GL_QUADS) for i in range(6): glNormal3f(normals[i][0], normals[i][1], normals[i][2]) for j in range(4): glVertex3f(vertices[i][j][0], vertices[i][j][1], vertices[i][j][2]) glEnd() glEndList() def init_primitives(): """ 初始化所有的图元渲染函数列表 """ make_plane() make_sphere() make_cube() def translation(displacement): """ 生成平移矩阵 """ t = numpy.identity(4) t[0, 3] = displacement[0] t[1, 3] = displacement[1] t[2, 3] = displacement[2] return t def scaling(scale): """ 生成缩放矩阵 """ s = numpy.identity(4) s[0, 0] = scale[0] s[1, 1] = scale[1] s[2, 2] = scale[2] s[3, 3] = 1 return s class Scene(object): # 放置节点的深度,放置的节点距离摄像机15个单位 PLACE_DEPTH = 15.0 def __init__(self): # 场景下的节点队列 self.node_list = list() def add_node(self, node): """ 在场景中加入一个新节点 """ self.node_list.append(node) def render(self): """ 遍历场景下所有节点并渲染 """ for node in self.node_list: node.render() class Node(object): def __init__(self): # 该节点的颜色序号 self.color_index = random.randint(MIN_COLOR, MAX_COLOR) # 该节点的平移矩阵,决定了该节点在场景中的位置 self.translation_matrix = numpy.identity(4) # 该节点的缩放矩阵,决定了该节点的大小 self.scaling_matrix = numpy.identity(4) def render(self): """ 渲染节点 """ glPushMatrix() # 实现平移 glMultMatrixf(numpy.transpose(self.translation_matrix)) # 实现缩放 glMultMatrixf(self.scaling_matrix) cur_color = COLORS[self.color_index] # 设置颜色 glColor3f(cur_color[0], cur_color[1], cur_color[2]) # 渲染对象模型 self.render_self() glPopMatrix() def render_self(self): raise NotImplementedError( "The Abstract Node Class doesn't define 'render_self'") def translate(self, x, y, z): self.translation_matrix = numpy.dot(self.translation_matrix, translation([x, y, z])) def scale(self, s): self.scaling_matrix = numpy.dot(self.scaling_matrix, scaling([s, s, s])) class Primitive(Node): def __init__(self): super(Primitive, self).__init__() self.call_list = None def render_self(self): glCallList(self.call_list) class Sphere(Primitive): """ 球形图元 """ def __init__(self): super(Sphere, self).__init__() self.call_list = G_OBJ_SPHERE class Cube(Primitive): """ 立方体图元 """ def __init__(self): super(Cube, self).__init__() self.call_list = G_OBJ_CUBE class Plane(Primitive): def __init__(self): super(Plane, self).__init__() self.call_list = G_OBJ_PLANE class HierarchicalNode(Node): def __init__(self): super(HierarchicalNode, self).__init__() self.child_nodes = [] def render_self(self): for child in self.child_nodes: child.render() class SnowFigure(HierarchicalNode): def __init__(self): super(SnowFigure, self).__init__() self.child_nodes = [Sphere(), Sphere(), Sphere()] self.child_nodes[0].translate(0, -0.6, 0) self.child_nodes[1].translate(0, 0.1, 0) self.chi
ld_nodes[1].scale(0.8) self.child_nodes[2].transl
identifier_body
gl_scene.py
MIN_COLOR = 0 COLORS = { # RGB Colors 0: (1.0, 1.0, 1.0), 1: (0.05, 0.05, 0.9), 2: (0.05, 0.9, 0.05), 3: (0.9, 0.05, 0.05), 4: (0.9, 0.9, 0.0), 5: (0.1, 0.8, 0.7), 6: (0.7, 0.2, 0.7), 7: (0.7, 0.7, 0.7), 8: (0.4, 0.4, 0.4), 9: (0.0, 0.0, 0.0), } def make_plane(): glNewList(G_OBJ_PLANE, GL_COMPILE) glBegin(GL_LINES) glColor3f(0, 0, 0) for i in range(41): glVertex3f(-10.0 + 0.5 * i, 0, -10) glVertex3f(-10.0 + 0.5 * i, 0, 10) glVertex3f(-10.0, 0, -10 + 0.5 * i) glVertex3f(10.0, 0, -10 + 0.5 * i) # Axes glEnd() glLineWidth(5) glBegin(GL_LINES) glColor3f(0.5, 0.7, 0.5) glVertex3f(0.0, 0.0, 0.0) glVertex3f(5, 0.0, 0.0) glEnd() glBegin(GL_LINES) glColor3f(0.5, 0.7, 0.5) glVertex3f(0.0, 0.0, 0.0) glVertex3f(0.0, 5, 0.0) glEnd() glBegin(GL_LINES) glColor3f(0.5, 0.7, 0.5) glVertex3f(0.0, 0.0, 0.0) glVertex3f(0.0, 0.0, 5) glEnd() # Draw the Y. glBegin(GL_LINES) glColor3f(0.0, 0.0, 0.0) glVertex3f(0.0, 5.0, 0.0) glVertex3f(0.0, 5.5, 0.0) glVertex3f(0.0, 5.5, 0.0) glVertex3f(-0.5, 6.0, 0.0) glVertex3f(0.0, 5.5, 0.0) glVertex3f(0.5, 6.0, 0.0) # Draw the Z. glVertex3f(-0.5, 0.0, 5.0) glVertex3f(0.5, 0.0, 5.0) glVertex3f(0.5, 0.0, 5.0) glVertex3f(-0.5, 0.0, 6.0) glVertex3f(-0.5, 0.0, 6.0) glVertex3f(0.5, 0.0, 6.0) # Draw the X. glVertex3f(5.0, 0.0, 0.5) glVertex3f(6.0, 0.0, -0.5) glVertex3f(5.0, 0.0, -0.5) glVertex3f(6.0, 0.0, 0.5) glEnd() glLineWidth(1) glEndList() def make_sphere(): """ 创建球形的渲染函数列表 """ glNewList(G_OBJ_SPHERE, GL_COMPILE) quad = gluNewQuadric() gluSphere(quad, 0.5, 30, 30) gluDeleteQuadric(quad) glEndList() def make_cube(): glNewList(G_OBJ_CUBE, GL_COMPILE) vertices = [((-0.5, -0.5, -0.5), (-0.5, -0.5, 0.5), (-0.5, 0.5, 0.5), (-0.5, 0.5, -0.5)), ((-0.5, -0.5, -0.5), (-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), (0.5, -0.5, -0.5)), ((0.5, -0.5, -0.5), (0.5, 0.5, -0.5), (0.5, 0.5, 0.5), (0.5, -0.5, 0.5)), ((-0.5, -0.5, 0.5), (0.5, -0.5, 0.5), (0.5, 0.5, 0.5), (-0.5, 0.5, 0.5)), ((-0.5, -0.5, 0.5), (-0.5, -0.5, -0.5), (0.5, -0.5, -0.5), (0.5, -0.5, 0.5)), ((-0.5, 0.5, -0.5), (-0.5, 0.5, 0.5), (0.5, 0.5, 0.5), (0.5, 0.5, -0.5))] normals = [(-1.0, 0.0, 0.0), (0.0, 0.0, -1.0), (1.0, 0.0, 0.0), (0.0, 0.0, 1.0), (0.0, -1.0, 0.0), (0.0, 1.0, 0.0)] glBegin(GL_QUADS) for i in range(6): glNormal3f(normals[i][0], normals[i][1], normals[i][2]) for j in range(4): glVertex3f(vertices[i][j][0], vertices[i][j][1], vertices[i][j][2]) glEnd() glEndList() def init_primitives(): """ 初始化所有的图元渲染函数列表 """ make_plane() make_sphere() make_cube() def translation(displacement): """ 生成平移矩阵 """ t = numpy.identity(4) t[0, 3] = displacement[0] t[1, 3] = displacement[1] t[2, 3] = displacement[2] return t def scaling(scale): """ 生成缩放矩阵 """ s = numpy.identity(4) s[0, 0] = scale[0] s[1, 1] = scale[1] s[2, 2] = scale[2] s[3, 3] = 1 return s class Scene(object): # 放置节点的深度,放置的节点距离摄像机15个单位 PLACE_DEPTH = 15.0 def __init__(self): # 场景下的节点队列 self.node_list = list() def add_node(self, node): """ 在场景中加入一个新节点 """ self.node_list.append(node) def render(self): """ 遍历场景下所有节点并渲染 """ for node in self.node_list: node.render() class Node(object): def __init__(self): # 该节点的颜色序号 self.color_index = random.randint(MIN_COLOR, MAX_COLOR) # 该节点的平移矩阵,决定了该节点在场景中的位置 self.translation_matrix = numpy.identity(4) # 该节点的缩放矩阵,决定了该节点的大小 self.scaling_matrix = numpy.identity(4) def render(self): """ 渲染节点 """ glPushMatrix() # 实现平移 glMultMatrixf(numpy.transpose(self.translation_matrix)) # 实现缩放 glMultMatrixf(self.scaling_matrix) cur_color = COLORS[self.color_index] # 设置颜色 glColor3f(cur_color[0], cur_color[1], cur_color[2]) # 渲染对象模型 self.render_self() glPopMatrix() def render_self(self): raise NotImplementedError( "The Abstract Node Class doesn't define 'render_self'") def translate(self, x, y,
G_OBJ_CUBE = 3 MAX_COLOR = 9
random_line_split
driver.rs
(previous) = &previous { (previous)(sess, lint_store); } let conf = clippy_lints::read_conf(&[], &sess); clippy_lints::register_plugins(&mut lint_store, &sess, &conf); clippy_lints::register_pre_expansion_lints(&mut lint_store, &conf); clippy_lints::register_renamed(&mut lint_store); })); // FIXME: #4825; This is required, because Clippy lints that are based on MIR have to be // run on the unoptimized MIR. On the other hand this results in some false negatives. If // MIR passes can be enabled / disabled separately, we should figure out, what passes to // use for Clippy. config.opts.debugging_opts.mir_opt_level = 0; } } #[allow(clippy::find_map, clippy::filter_map)] fn describe_lints() { use lintlist::{Level, Lint, ALL_LINTS, LINT_LEVELS}; use std::collections::HashSet; println!( " Available lint options: -W <foo> Warn about <foo> -A <foo> Allow <foo> -D <foo> Deny <foo> -F <foo> Forbid <foo> (deny <foo> and all attempts to override) " ); let lint_level = |lint: &Lint| { LINT_LEVELS .iter() .find(|level_mapping| level_mapping.0 == lint.group) .map(|(_, level)| match level { Level::Allow => "allow", Level::Warn => "warn", Level::Deny => "deny", }) .unwrap() }; let mut lints: Vec<_> = ALL_LINTS.iter().collect(); // The sort doesn't case-fold but it's doubtful we care. lints.sort_by_cached_key(|x: &&Lint| (lint_level(x), x.name)); let max_lint_name_len = lints .iter() .map(|lint| lint.name.len()) .map(|len| len + "clippy::".len()) .max() .unwrap_or(0); let padded = |x: &str| { let mut s = " ".repeat(max_lint_name_len - x.chars().count()); s.push_str(x); s }; let scoped = |x: &str| format!("clippy::{}", x); let lint_groups: HashSet<_> = lints.iter().map(|lint| lint.group).collect(); println!("Lint checks provided by clippy:\n"); println!(" {} {:7.7} meaning", padded("name"), "default"); println!(" {} {:7.7} -------", padded("----"), "-------"); let print_lints = |lints: &[&Lint]| { for lint in lints { let name = lint.name.replace("_", "-"); println!( " {} {:7.7} {}", padded(&scoped(&name)), lint_level(lint), lint.desc ); } println!("\n"); }; print_lints(&lints); let max_group_name_len = std::cmp::max( "clippy::all".len(), lint_groups .iter() .map(|group| group.len()) .map(|len| len + "clippy::".len()) .max() .unwrap_or(0), ); let padded_group = |x: &str| { let mut s = " ".repeat(max_group_name_len - x.chars().count()); s.push_str(x); s }; println!("Lint groups provided by clippy:\n"); println!(" {} sub-lints", padded_group("name")); println!(" {} ---------", padded_group("----")); println!(" {} the set of all clippy lints", padded_group("clippy::all")); let print_lint_groups = || { for group in lint_groups { let name = group.to_lowercase().replace("_", "-"); let desc = lints .iter() .filter(|&lint| lint.group == group) .map(|lint| lint.name) .map(|name| name.replace("_", "-")) .collect::<Vec<String>>() .join(", "); println!(" {} {}", padded_group(&scoped(&name)), desc); } println!("\n"); }; print_lint_groups(); } fn display_help() { println!( "\ Checks a package to catch common mistakes and improve your Rust code. Usage: cargo clippy [options] [--] [<opts>...] Common options: -h, --help Print this message -V, --version Print version info and exit Other options are the same as `cargo check`. To allow or deny a lint from the command line you can use `cargo clippy --` with: -W --warn OPT Set lint warnings -A --allow OPT Set lint allowed -D --deny OPT Set lint denied -F --forbid OPT Set lint forbidden You can use tool lints to allow or deny lints from your code, eg.: #[allow(clippy::needless_lifetimes)] " ); } const BUG_REPORT_URL: &str = "https://github.com/rust-lang/rust-clippy/issues/new"; lazy_static! { static ref ICE_HOOK: Box<dyn Fn(&panic::PanicInfo<'_>) + Sync + Send + 'static> = { let hook = panic::take_hook(); panic::set_hook(Box::new(|info| report_clippy_ice(info, BUG_REPORT_URL))); hook }; } fn report_clippy_ice(info: &panic::PanicInfo<'_>, bug_report_url: &str) { // Invoke our ICE handler, which prints the actual panic message and optionally a backtrace (*ICE_HOOK)(info); // Separate the output with an empty line eprintln!(); let emitter = Box::new(rustc_errors::emitter::EmitterWriter::stderr( rustc_errors::ColorConfig::Auto, None, false, false, None, false, )); let handler = rustc_errors::Handler::with_emitter(true, None, emitter); // a .span_bug or .bug call has already printed what // it wants to print. if !info.payload().is::<rustc_errors::ExplicitBug>() { let d = rustc_errors::Diagnostic::new(rustc_errors::Level::Bug, "unexpected panic"); handler.emit_diagnostic(&d); } let version_info = rustc_tools_util::get_version_info!(); let xs: Vec<Cow<'static, str>> = vec![ "the compiler unexpectedly panicked. this is a bug.".into(), format!("we would appreciate a bug report: {}", bug_report_url).into(), format!("Clippy version: {}", version_info).into(), ]; for note in &xs { handler.note_without_error(&note); } // If backtraces are enabled, also print the query stack let backtrace = env::var_os("RUST_BACKTRACE").map_or(false, |x| &x != "0"); if backtrace { TyCtxt::try_print_query_stack(&handler); } } fn toolchain_path(home: Option<String>, toolchain: Option<String>) -> Option<PathBuf> { home.and_then(|home| { toolchain.map(|toolchain| { let mut path = PathBuf::from(home); path.push("toolchains"); path.push(toolchain); path }) }) } pub fn main() { rustc_driver::init_rustc_env_logger(); lazy_static::initialize(&ICE_HOOK); exit( rustc_driver::catch_fatal_errors(move || { let mut orig_args: Vec<String> = env::args().collect(); if orig_args.iter().any(|a| a == "--version" || a == "-V") { let version_info = rustc_tools_util::get_version_info!(); println!("{}", version_info); exit(0); } // Get the sysroot, looking from most specific to this invocation to the least: // - command line // - runtime environment // - SYSROOT // - RUSTUP_HOME, MULTIRUST_HOME, RUSTUP_TOOLCHAIN, MULTIRUST_TOOLCHAIN // - sysroot from rustc in the path // - compile-time environment // - SYSROOT // - RUSTUP_HOME, MULTIRUST_HOME, RUSTUP_TOOLCHAIN, MULTIRUST_TOOLCHAIN let sys_root_arg = arg_value(&orig_args, "--sysroot", |_| true); let have_sys_root_arg = sys_root_arg.is_some(); let sys_root = sys_root_arg .map(PathBuf::from) .or_else(|| std::env::var("SYSROOT").ok().map(PathBuf::from)) .or_else(|| { let home = std::env::var("RUSTUP_HOME")
.or_else(|_| std::env::var("MULTIRUST_HOME")) .ok(); let toolchain = std::env::var("RUSTUP_TOOLCHAIN")
random_line_split
driver.rs
.map(|group| group.len()) .map(|len| len + "clippy::".len()) .max() .unwrap_or(0), ); let padded_group = |x: &str| { let mut s = " ".repeat(max_group_name_len - x.chars().count()); s.push_str(x); s }; println!("Lint groups provided by clippy:\n"); println!(" {} sub-lints", padded_group("name")); println!(" {} ---------", padded_group("----")); println!(" {} the set of all clippy lints", padded_group("clippy::all")); let print_lint_groups = || { for group in lint_groups { let name = group.to_lowercase().replace("_", "-"); let desc = lints .iter() .filter(|&lint| lint.group == group) .map(|lint| lint.name) .map(|name| name.replace("_", "-")) .collect::<Vec<String>>() .join(", "); println!(" {} {}", padded_group(&scoped(&name)), desc); } println!("\n"); }; print_lint_groups(); } fn display_help() { println!( "\ Checks a package to catch common mistakes and improve your Rust code. Usage: cargo clippy [options] [--] [<opts>...] Common options: -h, --help Print this message -V, --version Print version info and exit Other options are the same as `cargo check`. To allow or deny a lint from the command line you can use `cargo clippy --` with: -W --warn OPT Set lint warnings -A --allow OPT Set lint allowed -D --deny OPT Set lint denied -F --forbid OPT Set lint forbidden You can use tool lints to allow or deny lints from your code, eg.: #[allow(clippy::needless_lifetimes)] " ); } const BUG_REPORT_URL: &str = "https://github.com/rust-lang/rust-clippy/issues/new"; lazy_static! { static ref ICE_HOOK: Box<dyn Fn(&panic::PanicInfo<'_>) + Sync + Send + 'static> = { let hook = panic::take_hook(); panic::set_hook(Box::new(|info| report_clippy_ice(info, BUG_REPORT_URL))); hook }; } fn report_clippy_ice(info: &panic::PanicInfo<'_>, bug_report_url: &str) { // Invoke our ICE handler, which prints the actual panic message and optionally a backtrace (*ICE_HOOK)(info); // Separate the output with an empty line eprintln!(); let emitter = Box::new(rustc_errors::emitter::EmitterWriter::stderr( rustc_errors::ColorConfig::Auto, None, false, false, None, false, )); let handler = rustc_errors::Handler::with_emitter(true, None, emitter); // a .span_bug or .bug call has already printed what // it wants to print. if !info.payload().is::<rustc_errors::ExplicitBug>() { let d = rustc_errors::Diagnostic::new(rustc_errors::Level::Bug, "unexpected panic"); handler.emit_diagnostic(&d); } let version_info = rustc_tools_util::get_version_info!(); let xs: Vec<Cow<'static, str>> = vec![ "the compiler unexpectedly panicked. this is a bug.".into(), format!("we would appreciate a bug report: {}", bug_report_url).into(), format!("Clippy version: {}", version_info).into(), ]; for note in &xs { handler.note_without_error(&note); } // If backtraces are enabled, also print the query stack let backtrace = env::var_os("RUST_BACKTRACE").map_or(false, |x| &x != "0"); if backtrace { TyCtxt::try_print_query_stack(&handler); } } fn toolchain_path(home: Option<String>, toolchain: Option<String>) -> Option<PathBuf> { home.and_then(|home| { toolchain.map(|toolchain| { let mut path = PathBuf::from(home); path.push("toolchains"); path.push(toolchain); path }) }) } pub fn main() { rustc_driver::init_rustc_env_logger(); lazy_static::initialize(&ICE_HOOK); exit( rustc_driver::catch_fatal_errors(move || { let mut orig_args: Vec<String> = env::args().collect(); if orig_args.iter().any(|a| a == "--version" || a == "-V") { let version_info = rustc_tools_util::get_version_info!(); println!("{}", version_info); exit(0); } // Get the sysroot, looking from most specific to this invocation to the least: // - command line // - runtime environment // - SYSROOT // - RUSTUP_HOME, MULTIRUST_HOME, RUSTUP_TOOLCHAIN, MULTIRUST_TOOLCHAIN // - sysroot from rustc in the path // - compile-time environment // - SYSROOT // - RUSTUP_HOME, MULTIRUST_HOME, RUSTUP_TOOLCHAIN, MULTIRUST_TOOLCHAIN let sys_root_arg = arg_value(&orig_args, "--sysroot", |_| true); let have_sys_root_arg = sys_root_arg.is_some(); let sys_root = sys_root_arg .map(PathBuf::from) .or_else(|| std::env::var("SYSROOT").ok().map(PathBuf::from)) .or_else(|| { let home = std::env::var("RUSTUP_HOME") .or_else(|_| std::env::var("MULTIRUST_HOME")) .ok(); let toolchain = std::env::var("RUSTUP_TOOLCHAIN") .or_else(|_| std::env::var("MULTIRUST_TOOLCHAIN")) .ok(); toolchain_path(home, toolchain) }) .or_else(|| { Command::new("rustc") .arg("--print") .arg("sysroot") .output() .ok() .and_then(|out| String::from_utf8(out.stdout).ok()) .map(|s| PathBuf::from(s.trim())) }) .or_else(|| option_env!("SYSROOT").map(PathBuf::from)) .or_else(|| { let home = option_env!("RUSTUP_HOME") .or(option_env!("MULTIRUST_HOME")) .map(ToString::to_string); let toolchain = option_env!("RUSTUP_TOOLCHAIN") .or(option_env!("MULTIRUST_TOOLCHAIN")) .map(ToString::to_string); toolchain_path(home, toolchain) }) .map(|pb| pb.to_string_lossy().to_string()) .expect("need to specify SYSROOT env var during clippy compilation, or use rustup or multirust"); // Setting RUSTC_WRAPPER causes Cargo to pass 'rustc' as the first argument. // We're invoking the compiler programmatically, so we ignore this/ let wrapper_mode = orig_args.get(1).map(Path::new).and_then(Path::file_stem) == Some("rustc".as_ref()); if wrapper_mode { // we still want to be able to invoke it normally though orig_args.remove(1); } if !wrapper_mode && (orig_args.iter().any(|a| a == "--help" || a == "-h") || orig_args.len() == 1) { display_help(); exit(0); } let should_describe_lints = || { let args: Vec<_> = env::args().collect(); args.windows(2).any(|args| { args[1] == "help" && match args[0].as_str() { "-W" | "-A" | "-D" | "-F" => true, _ => false, } }) }; if !wrapper_mode && should_describe_lints() { describe_lints(); exit(0); } // this conditional check for the --sysroot flag is there so users can call // `clippy_driver` directly // without having to pass --sysroot or anything let mut args: Vec<String> = orig_args.clone(); if !have_sys_root_arg { args.extend(vec!["--sysroot".into(), sys_root]); }; // this check ensures that dependencies are built but not linted and the final // crate is linted but not built let clippy_enabled = env::var("CLIPPY_TESTS").map_or(false, |val| val == "true") || arg_value(&orig_args, "--cap-lints", |val| val == "allow").is_none(); if clippy_enabled { args.extend(vec!["--cfg".into(), r#"feature="cargo-clippy""#.into()]); if let Ok(extra_args) = env::var("CLIPPY_ARGS") { args.extend(extra_args.split("__CLIPPY_HACKERY__").filter_map(|s| { if s.is_empty() { None } else
{ Some(s.to_string()) }
conditional_block
driver.rs
() { let args = &["--bar=bar", "--foobar", "123", "--foo"]; assert_eq!(arg_value(&[] as &[&str], "--foobar", |_| true), None); assert_eq!(arg_value(args, "--bar", |_| false), None); assert_eq!(arg_value(args, "--bar", |_| true), Some("bar")); assert_eq!(arg_value(args, "--bar", |p| p == "bar"), Some("bar")); assert_eq!(arg_value(args, "--bar", |p| p == "foo"), None); assert_eq!(arg_value(args, "--foobar", |p| p == "foo"), None); assert_eq!(arg_value(args, "--foobar", |p| p == "123"), Some("123")); assert_eq!(arg_value(args, "--foo", |_| true), None); } struct DefaultCallbacks; impl rustc_driver::Callbacks for DefaultCallbacks {} struct ClippyCallbacks; impl rustc_driver::Callbacks for ClippyCallbacks { fn config(&mut self, config: &mut interface::Config) { let previous = config.register_lints.take(); config.register_lints = Some(Box::new(move |sess, mut lint_store| { // technically we're ~guaranteed that this is none but might as well call anything that // is there already. Certainly it can't hurt. if let Some(previous) = &previous { (previous)(sess, lint_store); } let conf = clippy_lints::read_conf(&[], &sess); clippy_lints::register_plugins(&mut lint_store, &sess, &conf); clippy_lints::register_pre_expansion_lints(&mut lint_store, &conf); clippy_lints::register_renamed(&mut lint_store); })); // FIXME: #4825; This is required, because Clippy lints that are based on MIR have to be // run on the unoptimized MIR. On the other hand this results in some false negatives. If // MIR passes can be enabled / disabled separately, we should figure out, what passes to // use for Clippy. config.opts.debugging_opts.mir_opt_level = 0; } } #[allow(clippy::find_map, clippy::filter_map)] fn describe_lints() { use lintlist::{Level, Lint, ALL_LINTS, LINT_LEVELS}; use std::collections::HashSet; println!( " Available lint options: -W <foo> Warn about <foo> -A <foo> Allow <foo> -D <foo> Deny <foo> -F <foo> Forbid <foo> (deny <foo> and all attempts to override) " ); let lint_level = |lint: &Lint| { LINT_LEVELS .iter() .find(|level_mapping| level_mapping.0 == lint.group) .map(|(_, level)| match level { Level::Allow => "allow", Level::Warn => "warn", Level::Deny => "deny", }) .unwrap() }; let mut lints: Vec<_> = ALL_LINTS.iter().collect(); // The sort doesn't case-fold but it's doubtful we care. lints.sort_by_cached_key(|x: &&Lint| (lint_level(x), x.name)); let max_lint_name_len = lints .iter() .map(|lint| lint.name.len()) .map(|len| len + "clippy::".len()) .max() .unwrap_or(0); let padded = |x: &str| { let mut s = " ".repeat(max_lint_name_len - x.chars().count()); s.push_str(x); s }; let scoped = |x: &str| format!("clippy::{}", x); let lint_groups: HashSet<_> = lints.iter().map(|lint| lint.group).collect(); println!("Lint checks provided by clippy:\n"); println!(" {} {:7.7} meaning", padded("name"), "default"); println!(" {} {:7.7} -------", padded("----"), "-------"); let print_lints = |lints: &[&Lint]| { for lint in lints { let name = lint.name.replace("_", "-"); println!( " {} {:7.7} {}", padded(&scoped(&name)), lint_level(lint), lint.desc ); } println!("\n"); }; print_lints(&lints); let max_group_name_len = std::cmp::max( "clippy::all".len(), lint_groups .iter() .map(|group| group.len()) .map(|len| len + "clippy::".len()) .max() .unwrap_or(0), ); let padded_group = |x: &str| { let mut s = " ".repeat(max_group_name_len - x.chars().count()); s.push_str(x); s }; println!("Lint groups provided by clippy:\n"); println!(" {} sub-lints", padded_group("name")); println!(" {} ---------", padded_group("----")); println!(" {} the set of all clippy lints", padded_group("clippy::all")); let print_lint_groups = || { for group in lint_groups { let name = group.to_lowercase().replace("_", "-"); let desc = lints .iter() .filter(|&lint| lint.group == group) .map(|lint| lint.name) .map(|name| name.replace("_", "-")) .collect::<Vec<String>>() .join(", "); println!(" {} {}", padded_group(&scoped(&name)), desc); } println!("\n"); }; print_lint_groups(); } fn display_help() { println!( "\ Checks a package to catch common mistakes and improve your Rust code. Usage: cargo clippy [options] [--] [<opts>...] Common options: -h, --help Print this message -V, --version Print version info and exit Other options are the same as `cargo check`. To allow or deny a lint from the command line you can use `cargo clippy --` with: -W --warn OPT Set lint warnings -A --allow OPT Set lint allowed -D --deny OPT Set lint denied -F --forbid OPT Set lint forbidden You can use tool lints to allow or deny lints from your code, eg.: #[allow(clippy::needless_lifetimes)] " ); } const BUG_REPORT_URL: &str = "https://github.com/rust-lang/rust-clippy/issues/new"; lazy_static! { static ref ICE_HOOK: Box<dyn Fn(&panic::PanicInfo<'_>) + Sync + Send + 'static> = { let hook = panic::take_hook(); panic::set_hook(Box::new(|info| report_clippy_ice(info, BUG_REPORT_URL))); hook }; } fn report_clippy_ice(info: &panic::PanicInfo<'_>, bug_report_url: &str) { // Invoke our ICE handler, which prints the actual panic message and optionally a backtrace (*ICE_HOOK)(info); // Separate the output with an empty line eprintln!(); let emitter = Box::new(rustc_errors::emitter::EmitterWriter::stderr( rustc_errors::ColorConfig::Auto, None, false, false, None, false, )); let handler = rustc_errors::Handler::with_emitter(true, None, emitter); // a .span_bug or .bug call has already printed what // it wants to print. if !info.payload().is::<rustc_errors::ExplicitBug>() { let d = rustc_errors::Diagnostic::new(rustc_errors::Level::Bug, "unexpected panic"); handler.emit_diagnostic(&d); } let version_info = rustc_tools_util::get_version_info!(); let xs: Vec<Cow<'static, str>> = vec![ "the compiler unexpectedly panicked. this is a bug.".into(), format!("we would appreciate a bug report: {}", bug_report_url).into(), format!("Clippy version: {}", version_info).into(), ]; for note in &xs { handler.note_without_error(&note); } // If backtraces are enabled, also print the query stack let backtrace = env::var_os("RUST_BACKTRACE").map_or(false, |x| &x != "0"); if backtrace { TyCtxt::try_print_query_stack(&handler); } } fn toolchain_path(home: Option<String>, toolchain: Option<String>) -> Option<PathBuf> { home.and_then(|home| { toolchain.map(|toolchain| { let mut path = PathBuf::from(home); path.push("toolchains"); path.push(toolchain); path }) }) } pub fn main() { rustc_driver::init_rustc_env_logger(); lazy_static::initialize(&ICE_HOOK); exit( rustc_driver::catch_fatal_errors(move || { let mut orig_args: Vec<String
test_arg_value
identifier_name
matrix_invert.rs
= matrix_a[idx] - 3.0; // just do it!!! // make a unit matrix let mut matrix_i = vec![0f32; nxy]; for r in 0..nx { matrix_i[r * nx + r] = 1.0; } println!("orignal matrix_a: "); print_matrix(&matrix_a, nx, nx); println!("orignal matrix_i: "); print_matrix(&matrix_i, nx, nx); // Create a context associated to this device let _context = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device)?; let mut d_matrix_a = DeviceBuffer::from_slice(&matrix_a)?; let mut d_matrix_i = DeviceBuffer::from_slice(&matrix_i)?; // println!("include_str!(env!(KERNEL_PTX_PATH)) = {}", include_str!(env!("KERNEL_PTX_PATH"))); // Load the module containing the function we want to call let module_data = CString::new(include_str!(env!("KERNEL_PTX_PATH")))?; let module = Module::load_from_string(&module_data)?; // Create a stream to submit work to let stream = Stream::new(StreamFlags::NON_BLOCKING, None)?; let blocksize = 1; let threads_per_block = (blocksize, blocksize, 1); let b = (blocksize, blocksize, 1); let block = (b.0 as u32, b.1 as u32, b.2 as u32); let g = ( (nx as i32 + blocksize as i32 - 1) / blocksize as i32, (nx as i32 + blocksize as i32 - 1) / blocksize as i32, 1 as i32, ); let grid = (g.0 as u32, g.1 as u32, 1 as u32); println!("block = {:?}, grid = {:?}", block, grid); let start = Instant::now(); for i in 0..nx { unsafe { // Launch the `add` function with one block containing four threads on the stream. let res = launch!(module.nodiag_normalize<<<grid, block, 0, stream>>>( d_matrix_a.as_device_ptr(), d_matrix_i.as_device_ptr(), nx, i, block.0, block.1 )); match res { Ok(_o) => (), Err(e) => println!("an error occured: {}", e), } } unsafe { // Launch the `add` function with one block containing four threads on the stream. let res = launch!(module.diag_normalize<<<grid, block, 0, stream>>>( d_matrix_a.as_device_ptr(), d_matrix_i.as_device_ptr(), nx, i, block.0, block.1 )); match res { Ok(_o) => (), Err(e) => println!("an error occured: {}", e), } } unsafe { // Launch the `add` function with one block containing four threads on the stream. let res = launch!(module.gaussjordan<<<grid, block, 0, stream>>>( d_matrix_a.as_device_ptr(), d_matrix_i.as_device_ptr(), nx, i, block.0, block.1 )); match res { Ok(_o) => (), Err(e) => println!("an error occured: {}", e), } } unsafe { // Launch the `add` function with one block containing four threads on the stream. let res = launch!(module.set_zero<<<grid, block, 0, stream>>>( d_matrix_a.as_device_ptr(), d_matrix_i.as_device_ptr(), nx, i, block.0, block.1 )); match res { Ok(_o) => (), Err(e) => println!("an error occured: {}", e), } } } stream.synchronize()?; let duration_cuda = start.elapsed(); d_matrix_a.copy_to(&mut matrix_a)?; d_matrix_i.copy_to(&mut matrix_i)?; println!("duration gpu invert_matrix_2D2D: {:?}", duration_cuda); println!("gpu result inverted matrix: \n\n"); print_matrix(&matrix_i, nx, nx); println!("former input matrix "); print_matrix(&matrix_a, nx, nx); // // let start_cpu = Instant::now(); // let res_cpu = invert_matrix_cpu(&matrix_a, nx, ny); // let duration_cpu = start_cpu.elapsed(); // // println!("duration cpu: {:?}", duration_cpu); // // for x in 0..res_cpu.len() { // // assert_eq!(res_cpu[x], out_host[x]); // } // The kernel launch is asynchronous, so we wait for the kernel to finish executing Ok(()) } impl fmt::Display for Matrix { // This trait requires `fmt` with this exact signature. fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "\nrows: {}, cols: {}\n", self.rows, self.cols)?; for row in 0..self.rows { for col in 0..self.cols { write!(f, " {} ", self.get(row, col))?; } write!(f, "\n ")?; } write!(f, "\n ") } } pub fn test_matrix_invert_cpu() { let mut m = Matrix::zero(3, 3); m.set(0, 0, 1.0); m.set(0, 1, 2.0); m.set(0, 2, 3.0); m.set(1, 0, 0.0); m.set(1, 1, 1.0); m.set(1, 2, 4.0); m.set(2, 0, 5.0); m.set(2, 1, 6.0); m.set(2, 2, 0.0); let mut expected = Matrix::zero(3, 3); expected.set(0, 0, -24.0); expected.set(0, 1, 18.0); expected.set(0, 2, 5.0); expected.set(1, 0, 20.0); expected.set(1, 1, -15.0); expected.set(1, 2, -4.0); expected.set(2, 0, -5.0); expected.set(2, 1, 4.0);
// calculate the inverse and compare with expected result let inv = matrix_invert_cpu(&m).unwrap(); assert_eq!(expected, inv); println!("orignal: {}", m); println!("inverted: {}", inv); } #[derive(Debug, PartialEq, Clone)] pub struct Matrix { data: Vec<f32>, rows: usize, cols: usize, } impl Matrix { pub fn one(rows: usize, cols: usize) -> Matrix { Matrix { rows: rows, cols: cols, data: vec![1.0; cols * rows], } } pub fn zero(rows: usize, cols: usize) -> Matrix { Matrix { rows: rows, cols: cols, data: vec![0.0; cols * rows], } } pub fn identiy(rows: usize) -> Matrix { let mut m = Matrix::zero(rows, rows); for i in 0..rows { m.set(i, i, 1.0); } m } pub fn get_rows(&self) -> usize { self.rows } pub fn get_cols(&self) -> usize { self.cols } pub fn set(&mut self, row: usize, col: usize, value: f32) -> &mut Matrix { self.data[row * self.cols + col] = value; self } pub fn get(&self, row: usize, col: usize) -> f32 { self.data[row * self.cols + col] } } pub fn matrix_invert_cpu(mat_a: &Matrix) -> Result<Matrix, MathError> { if mat_a.rows != mat_a.cols { return Err(MathError::MatrixNotInvertableNotSquare); } let rows = mat_a.rows; let mut cols = mat_a.cols; // helper matrix for inverting let mut dummy = Matrix::zero(rows, 2 * cols); // copy matrix a to dummy (left half of dummy) for row in 0..rows { for col in 0..cols { dummy.set(row, col, mat_a.get(row, col)); } } // set identiy matrix elements for row in 0..rows { dummy.set(row, cols + row, 1.0); } // apply all transformations to the identiy matrix as well cols = 2 * mat_a.cols; let mut tmp: f32 = 0.0; for
expected.set(2, 2, 1.0);
random_line_split
matrix_invert.rs
= matrix_a[idx] - 3.0; // just do it!!! // make a unit matrix let mut matrix_i = vec![0f32; nxy]; for r in 0..nx { matrix_i[r * nx + r] = 1.0; } println!("orignal matrix_a: "); print_matrix(&matrix_a, nx, nx); println!("orignal matrix_i: "); print_matrix(&matrix_i, nx, nx); // Create a context associated to this device let _context = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device)?; let mut d_matrix_a = DeviceBuffer::from_slice(&matrix_a)?; let mut d_matrix_i = DeviceBuffer::from_slice(&matrix_i)?; // println!("include_str!(env!(KERNEL_PTX_PATH)) = {}", include_str!(env!("KERNEL_PTX_PATH"))); // Load the module containing the function we want to call let module_data = CString::new(include_str!(env!("KERNEL_PTX_PATH")))?; let module = Module::load_from_string(&module_data)?; // Create a stream to submit work to let stream = Stream::new(StreamFlags::NON_BLOCKING, None)?; let blocksize = 1; let threads_per_block = (blocksize, blocksize, 1); let b = (blocksize, blocksize, 1); let block = (b.0 as u32, b.1 as u32, b.2 as u32); let g = ( (nx as i32 + blocksize as i32 - 1) / blocksize as i32, (nx as i32 + blocksize as i32 - 1) / blocksize as i32, 1 as i32, ); let grid = (g.0 as u32, g.1 as u32, 1 as u32); println!("block = {:?}, grid = {:?}", block, grid); let start = Instant::now(); for i in 0..nx { unsafe { // Launch the `add` function with one block containing four threads on the stream. let res = launch!(module.nodiag_normalize<<<grid, block, 0, stream>>>( d_matrix_a.as_device_ptr(), d_matrix_i.as_device_ptr(), nx, i, block.0, block.1 )); match res { Ok(_o) => (), Err(e) => println!("an error occured: {}", e), } } unsafe { // Launch the `add` function with one block containing four threads on the stream. let res = launch!(module.diag_normalize<<<grid, block, 0, stream>>>( d_matrix_a.as_device_ptr(), d_matrix_i.as_device_ptr(), nx, i, block.0, block.1 )); match res { Ok(_o) => (), Err(e) => println!("an error occured: {}", e), } } unsafe { // Launch the `add` function with one block containing four threads on the stream. let res = launch!(module.gaussjordan<<<grid, block, 0, stream>>>( d_matrix_a.as_device_ptr(), d_matrix_i.as_device_ptr(), nx, i, block.0, block.1 )); match res { Ok(_o) => (), Err(e) => println!("an error occured: {}", e), } } unsafe { // Launch the `add` function with one block containing four threads on the stream. let res = launch!(module.set_zero<<<grid, block, 0, stream>>>( d_matrix_a.as_device_ptr(), d_matrix_i.as_device_ptr(), nx, i, block.0, block.1 )); match res { Ok(_o) => (), Err(e) => println!("an error occured: {}", e), } } } stream.synchronize()?; let duration_cuda = start.elapsed(); d_matrix_a.copy_to(&mut matrix_a)?; d_matrix_i.copy_to(&mut matrix_i)?; println!("duration gpu invert_matrix_2D2D: {:?}", duration_cuda); println!("gpu result inverted matrix: \n\n"); print_matrix(&matrix_i, nx, nx); println!("former input matrix "); print_matrix(&matrix_a, nx, nx); // // let start_cpu = Instant::now(); // let res_cpu = invert_matrix_cpu(&matrix_a, nx, ny); // let duration_cpu = start_cpu.elapsed(); // // println!("duration cpu: {:?}", duration_cpu); // // for x in 0..res_cpu.len() { // // assert_eq!(res_cpu[x], out_host[x]); // } // The kernel launch is asynchronous, so we wait for the kernel to finish executing Ok(()) } impl fmt::Display for Matrix { // This trait requires `fmt` with this exact signature. fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "\nrows: {}, cols: {}\n", self.rows, self.cols)?; for row in 0..self.rows { for col in 0..self.cols { write!(f, " {} ", self.get(row, col))?; } write!(f, "\n ")?; } write!(f, "\n ") } } pub fn test_matrix_invert_cpu() { let mut m = Matrix::zero(3, 3); m.set(0, 0, 1.0); m.set(0, 1, 2.0); m.set(0, 2, 3.0); m.set(1, 0, 0.0); m.set(1, 1, 1.0); m.set(1, 2, 4.0); m.set(2, 0, 5.0); m.set(2, 1, 6.0); m.set(2, 2, 0.0); let mut expected = Matrix::zero(3, 3); expected.set(0, 0, -24.0); expected.set(0, 1, 18.0); expected.set(0, 2, 5.0); expected.set(1, 0, 20.0); expected.set(1, 1, -15.0); expected.set(1, 2, -4.0); expected.set(2, 0, -5.0); expected.set(2, 1, 4.0); expected.set(2, 2, 1.0); // calculate the inverse and compare with expected result let inv = matrix_invert_cpu(&m).unwrap(); assert_eq!(expected, inv); println!("orignal: {}", m); println!("inverted: {}", inv); } #[derive(Debug, PartialEq, Clone)] pub struct Matrix { data: Vec<f32>, rows: usize, cols: usize, } impl Matrix { pub fn one(rows: usize, cols: usize) -> Matrix { Matrix { rows: rows, cols: cols, data: vec![1.0; cols * rows], } } pub fn
(rows: usize, cols: usize) -> Matrix { Matrix { rows: rows, cols: cols, data: vec![0.0; cols * rows], } } pub fn identiy(rows: usize) -> Matrix { let mut m = Matrix::zero(rows, rows); for i in 0..rows { m.set(i, i, 1.0); } m } pub fn get_rows(&self) -> usize { self.rows } pub fn get_cols(&self) -> usize { self.cols } pub fn set(&mut self, row: usize, col: usize, value: f32) -> &mut Matrix { self.data[row * self.cols + col] = value; self } pub fn get(&self, row: usize, col: usize) -> f32 { self.data[row * self.cols + col] } } pub fn matrix_invert_cpu(mat_a: &Matrix) -> Result<Matrix, MathError> { if mat_a.rows != mat_a.cols { return Err(MathError::MatrixNotInvertableNotSquare); } let rows = mat_a.rows; let mut cols = mat_a.cols; // helper matrix for inverting let mut dummy = Matrix::zero(rows, 2 * cols); // copy matrix a to dummy (left half of dummy) for row in 0..rows { for col in 0..cols { dummy.set(row, col, mat_a.get(row, col)); } } // set identiy matrix elements for row in 0..rows { dummy.set(row, cols + row, 1.0); } // apply all transformations to the identiy matrix as well cols = 2 * mat_a.cols; let mut tmp: f32 = 0.0;
zero
identifier_name
matrix_invert.rs
= matrix_a[idx] - 3.0; // just do it!!! // make a unit matrix let mut matrix_i = vec![0f32; nxy]; for r in 0..nx { matrix_i[r * nx + r] = 1.0; } println!("orignal matrix_a: "); print_matrix(&matrix_a, nx, nx); println!("orignal matrix_i: "); print_matrix(&matrix_i, nx, nx); // Create a context associated to this device let _context = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device)?; let mut d_matrix_a = DeviceBuffer::from_slice(&matrix_a)?; let mut d_matrix_i = DeviceBuffer::from_slice(&matrix_i)?; // println!("include_str!(env!(KERNEL_PTX_PATH)) = {}", include_str!(env!("KERNEL_PTX_PATH"))); // Load the module containing the function we want to call let module_data = CString::new(include_str!(env!("KERNEL_PTX_PATH")))?; let module = Module::load_from_string(&module_data)?; // Create a stream to submit work to let stream = Stream::new(StreamFlags::NON_BLOCKING, None)?; let blocksize = 1; let threads_per_block = (blocksize, blocksize, 1); let b = (blocksize, blocksize, 1); let block = (b.0 as u32, b.1 as u32, b.2 as u32); let g = ( (nx as i32 + blocksize as i32 - 1) / blocksize as i32, (nx as i32 + blocksize as i32 - 1) / blocksize as i32, 1 as i32, ); let grid = (g.0 as u32, g.1 as u32, 1 as u32); println!("block = {:?}, grid = {:?}", block, grid); let start = Instant::now(); for i in 0..nx { unsafe { // Launch the `add` function with one block containing four threads on the stream. let res = launch!(module.nodiag_normalize<<<grid, block, 0, stream>>>( d_matrix_a.as_device_ptr(), d_matrix_i.as_device_ptr(), nx, i, block.0, block.1 )); match res { Ok(_o) => (), Err(e) => println!("an error occured: {}", e), } } unsafe { // Launch the `add` function with one block containing four threads on the stream. let res = launch!(module.diag_normalize<<<grid, block, 0, stream>>>( d_matrix_a.as_device_ptr(), d_matrix_i.as_device_ptr(), nx, i, block.0, block.1 )); match res { Ok(_o) => (), Err(e) => println!("an error occured: {}", e), } } unsafe { // Launch the `add` function with one block containing four threads on the stream. let res = launch!(module.gaussjordan<<<grid, block, 0, stream>>>( d_matrix_a.as_device_ptr(), d_matrix_i.as_device_ptr(), nx, i, block.0, block.1 )); match res { Ok(_o) => (), Err(e) => println!("an error occured: {}", e), } } unsafe { // Launch the `add` function with one block containing four threads on the stream. let res = launch!(module.set_zero<<<grid, block, 0, stream>>>( d_matrix_a.as_device_ptr(), d_matrix_i.as_device_ptr(), nx, i, block.0, block.1 )); match res { Ok(_o) => (), Err(e) => println!("an error occured: {}", e), } } } stream.synchronize()?; let duration_cuda = start.elapsed(); d_matrix_a.copy_to(&mut matrix_a)?; d_matrix_i.copy_to(&mut matrix_i)?; println!("duration gpu invert_matrix_2D2D: {:?}", duration_cuda); println!("gpu result inverted matrix: \n\n"); print_matrix(&matrix_i, nx, nx); println!("former input matrix "); print_matrix(&matrix_a, nx, nx); // // let start_cpu = Instant::now(); // let res_cpu = invert_matrix_cpu(&matrix_a, nx, ny); // let duration_cpu = start_cpu.elapsed(); // // println!("duration cpu: {:?}", duration_cpu); // // for x in 0..res_cpu.len() { // // assert_eq!(res_cpu[x], out_host[x]); // } // The kernel launch is asynchronous, so we wait for the kernel to finish executing Ok(()) } impl fmt::Display for Matrix { // This trait requires `fmt` with this exact signature. fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "\nrows: {}, cols: {}\n", self.rows, self.cols)?; for row in 0..self.rows { for col in 0..self.cols { write!(f, " {} ", self.get(row, col))?; } write!(f, "\n ")?; } write!(f, "\n ") } } pub fn test_matrix_invert_cpu() { let mut m = Matrix::zero(3, 3); m.set(0, 0, 1.0); m.set(0, 1, 2.0); m.set(0, 2, 3.0); m.set(1, 0, 0.0); m.set(1, 1, 1.0); m.set(1, 2, 4.0); m.set(2, 0, 5.0); m.set(2, 1, 6.0); m.set(2, 2, 0.0); let mut expected = Matrix::zero(3, 3); expected.set(0, 0, -24.0); expected.set(0, 1, 18.0); expected.set(0, 2, 5.0); expected.set(1, 0, 20.0); expected.set(1, 1, -15.0); expected.set(1, 2, -4.0); expected.set(2, 0, -5.0); expected.set(2, 1, 4.0); expected.set(2, 2, 1.0); // calculate the inverse and compare with expected result let inv = matrix_invert_cpu(&m).unwrap(); assert_eq!(expected, inv); println!("orignal: {}", m); println!("inverted: {}", inv); } #[derive(Debug, PartialEq, Clone)] pub struct Matrix { data: Vec<f32>, rows: usize, cols: usize, } impl Matrix { pub fn one(rows: usize, cols: usize) -> Matrix { Matrix { rows: rows, cols: cols, data: vec![1.0; cols * rows], } } pub fn zero(rows: usize, cols: usize) -> Matrix { Matrix { rows: rows, cols: cols, data: vec![0.0; cols * rows], } } pub fn identiy(rows: usize) -> Matrix { let mut m = Matrix::zero(rows, rows); for i in 0..rows { m.set(i, i, 1.0); } m } pub fn get_rows(&self) -> usize { self.rows } pub fn get_cols(&self) -> usize { self.cols } pub fn set(&mut self, row: usize, col: usize, value: f32) -> &mut Matrix { self.data[row * self.cols + col] = value; self } pub fn get(&self, row: usize, col: usize) -> f32 { self.data[row * self.cols + col] } } pub fn matrix_invert_cpu(mat_a: &Matrix) -> Result<Matrix, MathError>
// apply all transformations to the identiy matrix as well cols = 2 * mat_a.cols; let mut tmp: f32 = 0.0;
{ if mat_a.rows != mat_a.cols { return Err(MathError::MatrixNotInvertableNotSquare); } let rows = mat_a.rows; let mut cols = mat_a.cols; // helper matrix for inverting let mut dummy = Matrix::zero(rows, 2 * cols); // copy matrix a to dummy (left half of dummy) for row in 0..rows { for col in 0..cols { dummy.set(row, col, mat_a.get(row, col)); } } // set identiy matrix elements for row in 0..rows { dummy.set(row, cols + row, 1.0); }
identifier_body
classifier.py
h_pool2 = max_pool_2x2(h_conv2) # Fully connected layer 1 h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) W_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # Dropout h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # Fully connected layer 2 (Output layer) W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2, name='y') return y def main(): from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # Input layer x = tf.placeholder(tf.float32, [None, 784], name='x') y_ = tf.placeholder(tf.float32, [None, 10], name='y_') keep_prob = tf.placeholder(tf.float32) x_image = tf.reshape(x, [-1, 28, 28, 1]) y = models_mnist.cnn_classifier_2(x=x_image, name='classifier',keep_prob=keep_prob, reuse=False)#create model # Evaluation functions cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy') # Training algorithm c_var = tf.trainable_variables('classifier') train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy, var_list=c_var) # Saver c_saver = tf.train.Saver(var_list=c_var) saver = tf.train.Saver() # Training steps with tf.Session() as sess: sess.run(tf.initialize_all_variables()) # c_saver.restore(sess, "results/cnn_classifier/checkpoint/model.ckpt") max_steps = 120 for step in range(max_steps):
print(max_steps, sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) save_path = saver.save(sess, "results/cnn_classifier-med-train/checkpoint/model.ckpt") # print('Test Acc', sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) print("Model saved in path: %s" % save_path) print(" [*] Close main session!") sess.close() main() # import utils # import traceback # import numpy as np # import tensorflow as tf # import models_mnist as models # import datetime # import my_utils # # from classifier import cnn_classifier # # # """ param """ # epoch = 200 # batch_size = 128 # batch_size2 = 64 # lr = 0.0002 # z_dim = 100 # beta = 1 #diversity hyper param # # clip = 0.01 # n_critic = 1 # # n_generator = 1 # gan_type="experiment" # dir="results/"+gan_type+"-"+datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # # np.random.seed(0) # tf.set_random_seed(1234) # # # restore = False # # ckpt_dir = # # ''' data ''' # data_pool = my_utils.getMNISTDatapool(batch_size, keep=[0, 1,8]) # # """ graphs """ # generator = models.ss_generator_2 # discriminator = models.ss_discriminator # optimizer = tf.train.AdamOptimizer # # # # inputs # real = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) # z = tf.placeholder(tf.float32, shape=[None, z_dim]) # # # # generator # fake = generator(z, reuse=False, name="g1") # fake2 = generator(z, reuse=False, name="g2") # # # discriminator # r_logit = discriminator(real, reuse=False, name="d1") # f1_logit = discriminator(fake, name="d1") # f2_logit = discriminator(fake2, name="d1") # # #supplement discriminator # f1_c = cnn_classifier(x_image=fake,keep_prob=1., reuse=False)#create model # f2_c = cnn_classifier(x_image=fake2, keep_prob=1.)#create model # # f1_c = discriminator(fake, reuse=False, name="d2") # # f2_c = discriminator(fake2, name="d2") # # #discriminator loss # D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=r_logit, labels=tf.ones_like(r_logit))) # D_loss_fake1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f1_logit, labels=tf.zeros_like(f1_logit))) # D_loss_fake2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f2_logit, labels=tf.zeros_like(f2_logit))) # d_loss = D_loss_real + D_loss_fake1 + D_loss_fake2 # # d_loss = D_loss_real + D_loss_fake1 # # #supplement discriminator loss # onehot_labels_zero = tf.one_hot(indices=tf.zeros(batch_size, tf.int32), depth=10) # onehot_labels_one = tf.one_hot(indices=tf.ones(batch_size, tf.int32), depth=10) # D2_loss_f1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=f1_c, labels=onehot_labels_zero)) # D2_loss_f2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=f2_c, labels=onehot_labels_one)) # # d2_loss = D2_loss_f1 + D2_loss_f2 # # #generator loss # g1_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f1_logit, labels=tf.ones_like(f1_logit))) # g2_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f2_logit, labels=tf.ones_like(f2_logit))) # g1_loss += beta*D2_loss_f1 # g2_loss += beta*D2_loss_f2 # g_loss = g1_loss + g2_loss # # # trainable variables for each network # T_vars = tf.trainable_variables() # # G_vars = tf.global_variables() # d_var = [var for var in T_vars if var.name.startswith('d1')] # g1_var = [var for var in T_vars if var.name.startswith('g1')] # g2_var = [var for var in T_vars if var.name.startswith('g2')] # c_var = [var for var in T_vars if var.name.startswith('classifier')] # # # optims # global_step = tf.Variable(0, name='global_step',trainable=False) # d_step = optimizer(learning_rate=lr, beta1=0.5).minimize(d_loss, var_list=d_var, global_step=global_step) # # d2_step = optimizer(learning_rate=lr, beta1=0.5).minimize(d2_loss, var_list=d2_var) # # g_step = optimizer(learning_rate=lr).minimize(g1_loss, var_list=g1_var) # # g2_step = optimizer(learning_rate=lr).minimize(g2_loss, var_list=g2_var) # G_step = optimizer(learning_rate=lr, beta1=0.5).minimize(g_loss, var_list=g1_var + g2_var) # """ train """ # ''' init ''' # # session # sess = tf.InteractiveSession() # # # saver # saver = tf.train.Saver(max_to_keep=5) # c_saver = tf.train.Saver(var_list=c_var) # # summary writer # # Send summary statistics to TensorBoard # tf.summary.scalar('G1_loss', g1_loss) # tf.summary.scalar('G2_loss', g2_loss) # tf.summary.scalar('G_loss', g_loss) # tf.summary.scalar('Discriminator_loss', d_loss) # # tf.summary.scalar('Supplement_Discriminator_loss', d2_loss) # images_form_g1 = generator(z, name="g1", training= False) # images_form_g2 = generator(z, name="
batch_xs, batch_ys = mnist.train.next_batch(50) # 0 ~ 1 # batch_xs = batch_xs*2-1 # -1 ~ 1, bad results if (step % 10) == 0: print(step, sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5})
conditional_block
classifier.py
def cnn_classifier(x_image, keep_prob, name="classifier", reuse=True): with tf.variable_scope(name, reuse=reuse): # Convolutional layer 1 W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) # Convolutional layer 2 W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) # Fully connected layer 1 h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) W_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # Dropout h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # Fully connected layer 2 (Output layer) W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2, name='y') return y def main(): from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # Input layer x = tf.placeholder(tf.float32, [None, 784], name='x') y_ = tf.placeholder(tf.float32, [None, 10], name='y_') keep_prob = tf.placeholder(tf.float32) x_image = tf.reshape(x, [-1, 28, 28, 1]) y = models_mnist.cnn_classifier_2(x=x_image, name='classifier',keep_prob=keep_prob, reuse=False)#create model # Evaluation functions cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy') # Training algorithm c_var = tf.trainable_variables('classifier') train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy, var_list=c_var) # Saver c_saver = tf.train.Saver(var_list=c_var) saver = tf.train.Saver() # Training steps with tf.Session() as sess: sess.run(tf.initialize_all_variables()) # c_saver.restore(sess, "results/cnn_classifier/checkpoint/model.ckpt") max_steps = 120 for step in range(max_steps): batch_xs, batch_ys = mnist.train.next_batch(50) # 0 ~ 1 # batch_xs = batch_xs*2-1 # -1 ~ 1, bad results if (step % 10) == 0: print(step, sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5}) print(max_steps, sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) save_path = saver.save(sess, "results/cnn_classifier-med-train/checkpoint/model.ckpt") # print('Test Acc', sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) print("Model saved in path: %s" % save_path) print(" [*] Close main session!") sess.close() main() # import utils # import traceback # import numpy as np # import tensorflow as tf # import models_mnist as models # import datetime # import my_utils # # from classifier import cnn_classifier # # # """ param """ # epoch = 200 # batch_size = 128 # batch_size2 = 64 # lr = 0.0002 # z_dim = 100 # beta = 1 #diversity hyper param # # clip = 0.01 # n_critic = 1 # # n_generator = 1 # gan_type="experiment" # dir="results/"+gan_type+"-"+datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # # np.random.seed(0) # tf.set_random_seed(1234) # # # restore = False # # ckpt_dir = # # ''' data ''' # data_pool = my_utils.getMNISTDatapool(batch_size, keep=[0, 1,8]) # # """ graphs """ # generator = models.ss_generator_2 # discriminator = models.ss_discriminator # optimizer = tf.train.AdamOptimizer # # # # inputs # real = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) # z = tf.placeholder(tf.float32, shape=[None, z_dim]) # # # # generator # fake = generator(z, reuse=False, name="g1") # fake2 = generator(z, reuse=False, name="g2") # # # discriminator # r_logit = discriminator(real, reuse=False, name="d1") # f1_logit = discriminator(fake, name="d1") # f2_logit = discriminator(fake2, name="d1") # # #supplement discriminator # f1_c = cnn_classifier(x_image=fake,keep_prob=1., reuse=False)#create model # f2_c = cnn_classifier(x_image=fake2, keep_prob=1.)#create model # # f1_c = discriminator(fake, reuse=False, name="d2") # # f2_c = discriminator(fake2, name="d2") # # #discriminator loss # D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=r_logit, labels=tf.ones_like(r_logit))) # D_loss_fake1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f1_logit, labels=tf.zeros_like(f1_logit))) # D_loss_fake2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f2_logit, labels=tf.zeros_like(f2_logit))) # d_loss = D_loss_real + D_loss_fake1 + D_loss_fake2 # # d_loss = D_loss_real + D_loss_fake1 # # #supplement discriminator loss # onehot_labels_zero = tf.one_hot(indices=tf.zeros(batch_size, tf.int32), depth=10) # onehot_labels_one = tf.one_hot(indices=tf.ones(batch_size, tf.int32), depth=10) # D2_loss_f1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=f1_c, labels=onehot_labels_zero)) # D2_loss_f2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=f2_c, labels=onehot_labels_one)) # # d2_loss = D2_loss_f1 + D2_loss_f2 # # #generator loss # g1_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f1_logit, labels=tf.ones_like(f1_logit))) # g2_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f2_logit, labels=tf.ones_like(f2_logit))) # g1_loss += beta*D2_loss_f1 # g2_loss += beta*D2_loss_f2 # g_loss = g1_loss + g2_loss # # # trainable variables for each network # T_vars = tf.trainable_variables() # # G_vars = tf.global_variables() # d_var = [var for var in T_vars if var.name.startswith('d1')] # g1_var = [var for var in T_vars if var.name.startswith('g1')] # g2_var = [var for var in T_vars if var.name.startswith('g2')] # c_var = [var for var in T_vars if var.name.startswith('classifier')] # # # optims # global_step = tf.Variable(0, name='global_step',trainable=False) # d_step = optimizer(learning_rate=lr, beta1=0.5).minimize(d_loss, var_list=d_var, global_step=global_step) # # d2_step = optimizer(learning_rate=lr, beta1=0.5).minimize(d2_loss, var_list=d2_var) # # g_step = optimizer(learning_rate=lr).minimize(g1_loss, var_list=g1_var) # # g2_step = optimizer(learning_rate
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
identifier_body
classifier.py
(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def cnn_classifier(x_image, keep_prob, name="classifier", reuse=True): with tf.variable_scope(name, reuse=reuse): # Convolutional layer 1 W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) # Convolutional layer 2 W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) # Fully connected layer 1 h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) W_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # Dropout h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # Fully connected layer 2 (Output layer) W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2, name='y') return y def main(): from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # Input layer x = tf.placeholder(tf.float32, [None, 784], name='x') y_ = tf.placeholder(tf.float32, [None, 10], name='y_') keep_prob = tf.placeholder(tf.float32) x_image = tf.reshape(x, [-1, 28, 28, 1]) y = models_mnist.cnn_classifier_2(x=x_image, name='classifier',keep_prob=keep_prob, reuse=False)#create model # Evaluation functions cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy') # Training algorithm c_var = tf.trainable_variables('classifier') train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy, var_list=c_var) # Saver c_saver = tf.train.Saver(var_list=c_var) saver = tf.train.Saver() # Training steps with tf.Session() as sess: sess.run(tf.initialize_all_variables()) # c_saver.restore(sess, "results/cnn_classifier/checkpoint/model.ckpt") max_steps = 120 for step in range(max_steps): batch_xs, batch_ys = mnist.train.next_batch(50) # 0 ~ 1 # batch_xs = batch_xs*2-1 # -1 ~ 1, bad results if (step % 10) == 0: print(step, sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5}) print(max_steps, sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) save_path = saver.save(sess, "results/cnn_classifier-med-train/checkpoint/model.ckpt") # print('Test Acc', sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) print("Model saved in path: %s" % save_path) print(" [*] Close main session!") sess.close() main() # import utils # import traceback # import numpy as np # import tensorflow as tf # import models_mnist as models # import datetime # import my_utils # # from classifier import cnn_classifier # # # """ param """ # epoch = 200 # batch_size = 128 # batch_size2 = 64 # lr = 0.0002 # z_dim = 100 # beta = 1 #diversity hyper param # # clip = 0.01 # n_critic = 1 # # n_generator = 1 # gan_type="experiment" # dir="results/"+gan_type+"-"+datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # # np.random.seed(0) # tf.set_random_seed(1234) # # # restore = False # # ckpt_dir = # # ''' data ''' # data_pool = my_utils.getMNISTDatapool(batch_size, keep=[0, 1,8]) # # """ graphs """ # generator = models.ss_generator_2 # discriminator = models.ss_discriminator # optimizer = tf.train.AdamOptimizer # # # # inputs # real = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) # z = tf.placeholder(tf.float32, shape=[None, z_dim]) # # # # generator # fake = generator(z, reuse=False, name="g1") # fake2 = generator(z, reuse=False, name="g2") # # # discriminator # r_logit = discriminator(real, reuse=False, name="d1") # f1_logit = discriminator(fake, name="d1") # f2_logit = discriminator(fake2, name="d1") # # #supplement discriminator # f1_c = cnn_classifier(x_image=fake,keep_prob=1., reuse=False)#create model # f2_c = cnn_classifier(x_image=fake2, keep_prob=1.)#create model # # f1_c = discriminator(fake, reuse=False, name="d2") # # f2_c = discriminator(fake2, name="d2") # # #discriminator loss # D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=r_logit, labels=tf.ones_like(r_logit))) # D_loss_fake1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f1_logit, labels=tf.zeros_like(f1_logit))) # D_loss_fake2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f2_logit, labels=tf.zeros_like(f2_logit))) # d_loss = D_loss_real + D_loss_fake1 + D_loss_fake2 # # d_loss = D_loss_real + D_loss_fake1 # # #supplement discriminator loss # onehot_labels_zero = tf.one_hot(indices=tf.zeros(batch_size, tf.int32), depth=10) # onehot_labels_one = tf.one_hot(indices=tf.ones(batch_size, tf.int32), depth=10) # D2_loss_f1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=f1_c, labels=onehot_labels_zero)) # D2_loss_f2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=f2_c, labels=onehot_labels_one)) # # d2_loss = D2_loss_f1 + D2_loss_f2 # # #generator loss # g1_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f1_logit, labels=tf.ones_like(f1_logit))) # g2_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f2_logit, labels=tf.ones_like(f2_logit))) # g1_loss += beta*D2_loss_f1 # g2_loss += beta*D2_loss_f2 # g_loss = g1_loss + g2_loss # # # trainable variables for each network # T_vars = tf.trainable_variables() # # G_vars = tf.global_variables() # d_var = [var for var in T_vars if var.name.startswith('d1')] # g1_var = [var for var in T_vars if var.name.startswith('g1')] # g2_var = [var for var in T_vars if var.name.startswith('g2')] # c_var = [var for var in T_vars if var.name.startswith('classifier')] # # # optims # global_step = tf.Variable(0, name='global_step',trainable=False) # d_step = optimizer
weight_variable
identifier_name
classifier.py
h_pool2 = max_pool_2x2(h_conv2) # Fully connected layer 1 h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) W_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # Dropout h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Fully connected layer 2 (Output layer) W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2, name='y') return y def main(): from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # Input layer x = tf.placeholder(tf.float32, [None, 784], name='x') y_ = tf.placeholder(tf.float32, [None, 10], name='y_') keep_prob = tf.placeholder(tf.float32) x_image = tf.reshape(x, [-1, 28, 28, 1]) y = models_mnist.cnn_classifier_2(x=x_image, name='classifier',keep_prob=keep_prob, reuse=False)#create model # Evaluation functions cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy') # Training algorithm c_var = tf.trainable_variables('classifier') train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy, var_list=c_var) # Saver c_saver = tf.train.Saver(var_list=c_var) saver = tf.train.Saver() # Training steps with tf.Session() as sess: sess.run(tf.initialize_all_variables()) # c_saver.restore(sess, "results/cnn_classifier/checkpoint/model.ckpt") max_steps = 120 for step in range(max_steps): batch_xs, batch_ys = mnist.train.next_batch(50) # 0 ~ 1 # batch_xs = batch_xs*2-1 # -1 ~ 1, bad results if (step % 10) == 0: print(step, sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5}) print(max_steps, sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) save_path = saver.save(sess, "results/cnn_classifier-med-train/checkpoint/model.ckpt") # print('Test Acc', sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) print("Model saved in path: %s" % save_path) print(" [*] Close main session!") sess.close() main() # import utils # import traceback # import numpy as np # import tensorflow as tf # import models_mnist as models # import datetime # import my_utils # # from classifier import cnn_classifier # # # """ param """ # epoch = 200 # batch_size = 128 # batch_size2 = 64 # lr = 0.0002 # z_dim = 100 # beta = 1 #diversity hyper param # # clip = 0.01 # n_critic = 1 # # n_generator = 1 # gan_type="experiment" # dir="results/"+gan_type+"-"+datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # # np.random.seed(0) # tf.set_random_seed(1234) # # # restore = False # # ckpt_dir = # # ''' data ''' # data_pool = my_utils.getMNISTDatapool(batch_size, keep=[0, 1,8]) # # """ graphs """ # generator = models.ss_generator_2 # discriminator = models.ss_discriminator # optimizer = tf.train.AdamOptimizer # # # # inputs # real = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) # z = tf.placeholder(tf.float32, shape=[None, z_dim]) # # # # generator # fake = generator(z, reuse=False, name="g1") # fake2 = generator(z, reuse=False, name="g2") # # # discriminator # r_logit = discriminator(real, reuse=False, name="d1") # f1_logit = discriminator(fake, name="d1") # f2_logit = discriminator(fake2, name="d1") # # #supplement discriminator # f1_c = cnn_classifier(x_image=fake,keep_prob=1., reuse=False)#create model # f2_c = cnn_classifier(x_image=fake2, keep_prob=1.)#create model # # f1_c = discriminator(fake, reuse=False, name="d2") # # f2_c = discriminator(fake2, name="d2") # # #discriminator loss # D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=r_logit, labels=tf.ones_like(r_logit))) # D_loss_fake1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f1_logit, labels=tf.zeros_like(f1_logit))) # D_loss_fake2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f2_logit, labels=tf.zeros_like(f2_logit))) # d_loss = D_loss_real + D_loss_fake1 + D_loss_fake2 # # d_loss = D_loss_real + D_loss_fake1 # # #supplement discriminator loss # onehot_labels_zero = tf.one_hot(indices=tf.zeros(batch_size, tf.int32), depth=10) # onehot_labels_one = tf.one_hot(indices=tf.ones(batch_size, tf.int32), depth=10) # D2_loss_f1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=f1_c, labels=onehot_labels_zero)) # D2_loss_f2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=f2_c, labels=onehot_labels_one)) # # d2_loss = D2_loss_f1 + D2_loss_f2 # # #generator loss # g1_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f1_logit, labels=tf.ones_like(f1_logit))) # g2_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f2_logit, labels=tf.ones_like(f2_logit))) # g1_loss += beta*D2_loss_f1 # g2_loss += beta*D2_loss_f2 # g_loss = g1_loss + g2_loss # # # trainable variables for each network # T_vars = tf.trainable_variables() # # G_vars = tf.global_variables() # d_var = [var for var in T_vars if var.name.startswith('d1')] # g1_var = [var for var in T_vars if var.name.startswith('g1')] # g2_var = [var for var in T_vars if var.name.startswith('g2')] # c_var = [var for var in T_vars if var.name.startswith('classifier')] # # # optims # global_step = tf.Variable(0, name='global_step',trainable=False) # d_step = optimizer(learning_rate=lr, beta1=0.5).minimize(d_loss, var_list=d_var, global_step=global_step) # # d2_step = optimizer(learning_rate=lr, beta1=0.5).minimize(d2_loss, var_list=d2_var) # # g_step = optimizer(learning_rate=lr).minimize(g1_loss, var_list=g1_var) # # g2_step = optimizer(learning_rate=lr).minimize(g2_loss, var_list=g2_var) # G_step = optimizer(learning_rate=lr, beta1=0.5).minimize(g_loss, var_list=g1_var + g2_var) # """ train """ # ''' init ''' # # session # sess = tf.InteractiveSession() # # # saver # saver = tf.train.Saver(max_to_keep=5) # c_saver = tf.train.Saver(var_list=c_var) # # summary writer # # Send summary statistics to TensorBoard # tf.summary.scalar('G1_loss', g1_loss) # tf.summary.scalar('G2_loss', g2_loss) # tf.summary.scalar('G_loss', g_loss) # tf.summary.scalar('Discriminator_loss', d_loss) # # tf.summary.scalar('Supplement_Discriminator_loss', d2_loss) # images_form_g1 = generator(z, name="g1", training= False) # images_form_g2 = generator(z, name="g
random_line_split
replica.go
create_and_lock(USERLIST_FILENAME) // lock userlist file for editing defer lock_for_files_map[USERLIST_FILENAME].Unlock() file, open_err := os.OpenFile(USERLIST_FILENAME, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600) defer file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } text := uname + " : " + psw + "\r\n" if _, write_err := file.WriteString(text); write_err != nil { return fmt.Sprintf("error: Server write file error%s\n", END_TAG), false } //create user data file u_file_name := uname + ".txt" create_and_lock(u_file_name) // lock user file for deleting and recreating defer lock_for_files_map[u_file_name].Unlock() os.Remove(u_file_name) // clear old junk created_file, create_err := os.Create(u_file_name) defer created_file.Close() if create_err != nil { return fmt.Sprintf("error: Server create error%s\n", END_TAG), false } else { //response return fmt.Sprintf("success: I added user %s.%s\n", uname, END_TAG), true } } else { //negative response return fmt.Sprintf("error: user, %s, already exists.%s\n", uname, END_TAG), false } } /*Add a new message under the user with given uname, by writing to database file containing stored messsages the user Locks message file of user*/ func add_message(uname string, new_message string) (string, bool) { filename := uname + ".txt" create_and_lock(filename) // lock user message file for editing defer lock_for_files_map[filename].Unlock() message_file, open_err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600) defer message_file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } //write new message to file newline := "\r\n" text_to_write := new_message + newline + USER_MESSAGE_SEPERATOR + newline if _, write_err := message_file.WriteString(text_to_write); write_err != nil { return fmt.Sprintf("error: server failed to write.%s\n", END_TAG), false } else { return fmt.Sprintf("success: added message for %s.%s\n", uname, END_TAG), true } } /*deletes user from userlist file and delete message file asscioated with that user locks usermap and message file of user that is being deleted*/ func delete_user(uname string) (string, bool) { //delete user from server memory user_map_lock.Lock() delete(user_map, uname) user_map_lock.Unlock() err := rewrite_userlist() //delete user from user list file if err != nil { return fmt.Sprintf("error: Server rewrite uselist error%s\n", END_TAG), false } //delete user message file filename := uname + ".txt" create_and_lock(filename) // lock the file we want to delete defer lock_for_files_map[filename].Unlock() os.Remove(filename) //repond sucess return fmt.Sprintf("success: Deleted user %s.%s\n", uname, END_TAG), true } /*reads messages from user file database locks message file of user*/ func read_messages(uname string, num_messages int) (string, bool) { filename := uname + ".txt" create_and_lock(filename) // lock user message file defer lock_for_files_map[filename].Unlock() message_file, open_err := os.OpenFile(filename, os.O_CREATE, 0600) //create file if not exist defer message_file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } messages_in_byte, read_err := ioutil.ReadFile(filename) if read_err != nil { return fmt.Sprintf("error: Server read error%s\n", END_TAG), false } messages_in_string := string(messages_in_byte) message_array := strings.SplitAfter(messages_in_string, USER_MESSAGE_SEPERATOR) message_array = message_array[0 : len(message_array)-1] //last index is empty cause of how splitafter works recent_messages := message_array if num_messages < len(message_array) { //only show recent num messages if there exist more than that recent_messages = message_array[len(message_array)-num_messages:] } //fmt.Fprintf(os.Stderr, "printing message%s\n", recent_messages) //give back most recent num_messages of messages response := "" for _, message := range recent_messages { response += message + "\n" } return fmt.Sprintf("success: %s%s\n", response, END_TAG), false } func sub_feed(uname string, num_messages int) (string, bool) { sub_filename := uname + SUBSCRIPTION_FILE_TAG create_and_lock(sub_filename) defer lock_for_files_map[sub_filename].Unlock() //open sublist file and store subscribed names to sublist sublist_file, open_err := os.OpenFile(sub_filename, os.O_CREATE|os.O_RDONLY, 0600) defer sublist_file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } sublist := []string{} scanner := bufio.NewScanner(sublist_file) for scanner.Scan() { scanned_name := scanner.Text() sublist = append(sublist, scanned_name) } response := "" //append num_messages frome each sub_uname for _, sub_uname := range sublist { message_filename := sub_uname + ".txt" create_and_lock(message_filename) // lock user message file defer lock_for_files_map[message_filename].Unlock() message_file, open_err := os.OpenFile(message_filename, os.O_CREATE, 0600) //create file if not exist defer message_file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } messages_in_byte, read_err := ioutil.ReadFile(message_filename) if read_err != nil { return fmt.Sprintf("error: Server read error%s\n", END_TAG), false } messages_in_string := string(messages_in_byte) message_array := strings.SplitAfter(messages_in_string, USER_MESSAGE_SEPERATOR) message_array = message_array[0 : len(message_array)-1] //last index is empty cause of how splitafter works recent_messages := message_array if num_messages < len(message_array) { //only show recent num messages if there exist more than that recent_messages = message_array[len(message_array)-num_messages:] } response += "<br /><b>Recent messages from - " + sub_uname + "</b><br />" for _, message := range recent_messages { response += message + "\n" } } return fmt.Sprintf("success: %s%s\n", response, END_TAG), false } /*subscribes user to another user by writing sub_uname, the subscribe target, in the main user's sublist.txt*/ func subscribe(uname string, sub_uname string) (string, bool) { filename := uname + SUBSCRIPTION_FILE_TAG create_and_lock(filename) defer lock_for_files_map[filename].Unlock() //open sublist file sublist_file, open_err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND, 0600) defer sublist_file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } //scan file to see if subscription exists scanner := bufio.NewScanner(sublist_file) for scanner.Scan() { scanned_name := scanner.Text() //check if already subscribed if scanned_name == sub_uname { //already subscribed so do nothing return fmt.Sprintf("success: Already subscribed.%s\n", END_TAG), false } } //subscription don't exist, so add subscription text := sub_uname + "\n" if _, write_err := sublist_file.WriteString(text); write_err != nil { return fmt.Sprintf("error: Server write error%s\n", END_TAG), false } else { return fmt.Sprintf("success: Added subscription.%s\n", END_TAG), true } } /*un-subscribes user to another user by deleting sub_uname, the subscribe target, from the main user's sublist.txt*/ func unsubscribe(uname string, sub_uname string) (string, bool) { filename := uname + SUBSCRIPTION_FILE_TAG create_and_lock(filename) defer lock_for_files_map[filename].Unlock() //open sublist file sublist_file, open_err := os.OpenFile(filename, os.O_CREATE|os.O_RDONLY, 0600) if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } sublist := []string{} removed := false //scan file to see if subscription exists scanner := bufio.NewScanner(sublist_file) for scanner.Scan() { scanned_name := scanner.Text() if scanned_name == sub_uname
{ removed = true //didn't add scanned_name to sublist continue }
conditional_block
replica.go
delete user from server memory user_map_lock.Lock() delete(user_map, uname) user_map_lock.Unlock() err := rewrite_userlist() //delete user from user list file if err != nil { return fmt.Sprintf("error: Server rewrite uselist error%s\n", END_TAG), false } //delete user message file filename := uname + ".txt" create_and_lock(filename) // lock the file we want to delete defer lock_for_files_map[filename].Unlock() os.Remove(filename) //repond sucess return fmt.Sprintf("success: Deleted user %s.%s\n", uname, END_TAG), true } /*reads messages from user file database locks message file of user*/ func read_messages(uname string, num_messages int) (string, bool) { filename := uname + ".txt" create_and_lock(filename) // lock user message file defer lock_for_files_map[filename].Unlock() message_file, open_err := os.OpenFile(filename, os.O_CREATE, 0600) //create file if not exist defer message_file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } messages_in_byte, read_err := ioutil.ReadFile(filename) if read_err != nil { return fmt.Sprintf("error: Server read error%s\n", END_TAG), false } messages_in_string := string(messages_in_byte) message_array := strings.SplitAfter(messages_in_string, USER_MESSAGE_SEPERATOR) message_array = message_array[0 : len(message_array)-1] //last index is empty cause of how splitafter works recent_messages := message_array if num_messages < len(message_array) { //only show recent num messages if there exist more than that recent_messages = message_array[len(message_array)-num_messages:] } //fmt.Fprintf(os.Stderr, "printing message%s\n", recent_messages) //give back most recent num_messages of messages response := "" for _, message := range recent_messages { response += message + "\n" } return fmt.Sprintf("success: %s%s\n", response, END_TAG), false } func sub_feed(uname string, num_messages int) (string, bool) { sub_filename := uname + SUBSCRIPTION_FILE_TAG create_and_lock(sub_filename) defer lock_for_files_map[sub_filename].Unlock() //open sublist file and store subscribed names to sublist sublist_file, open_err := os.OpenFile(sub_filename, os.O_CREATE|os.O_RDONLY, 0600) defer sublist_file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } sublist := []string{} scanner := bufio.NewScanner(sublist_file) for scanner.Scan() { scanned_name := scanner.Text() sublist = append(sublist, scanned_name) } response := "" //append num_messages frome each sub_uname for _, sub_uname := range sublist { message_filename := sub_uname + ".txt" create_and_lock(message_filename) // lock user message file defer lock_for_files_map[message_filename].Unlock() message_file, open_err := os.OpenFile(message_filename, os.O_CREATE, 0600) //create file if not exist defer message_file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } messages_in_byte, read_err := ioutil.ReadFile(message_filename) if read_err != nil { return fmt.Sprintf("error: Server read error%s\n", END_TAG), false } messages_in_string := string(messages_in_byte) message_array := strings.SplitAfter(messages_in_string, USER_MESSAGE_SEPERATOR) message_array = message_array[0 : len(message_array)-1] //last index is empty cause of how splitafter works recent_messages := message_array if num_messages < len(message_array) { //only show recent num messages if there exist more than that recent_messages = message_array[len(message_array)-num_messages:] } response += "<br /><b>Recent messages from - " + sub_uname + "</b><br />" for _, message := range recent_messages { response += message + "\n" } } return fmt.Sprintf("success: %s%s\n", response, END_TAG), false } /*subscribes user to another user by writing sub_uname, the subscribe target, in the main user's sublist.txt*/ func subscribe(uname string, sub_uname string) (string, bool) { filename := uname + SUBSCRIPTION_FILE_TAG create_and_lock(filename) defer lock_for_files_map[filename].Unlock() //open sublist file sublist_file, open_err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND, 0600) defer sublist_file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } //scan file to see if subscription exists scanner := bufio.NewScanner(sublist_file) for scanner.Scan() { scanned_name := scanner.Text() //check if already subscribed if scanned_name == sub_uname { //already subscribed so do nothing return fmt.Sprintf("success: Already subscribed.%s\n", END_TAG), false } } //subscription don't exist, so add subscription text := sub_uname + "\n" if _, write_err := sublist_file.WriteString(text); write_err != nil { return fmt.Sprintf("error: Server write error%s\n", END_TAG), false } else { return fmt.Sprintf("success: Added subscription.%s\n", END_TAG), true } } /*un-subscribes user to another user by deleting sub_uname, the subscribe target, from the main user's sublist.txt*/ func unsubscribe(uname string, sub_uname string) (string, bool) { filename := uname + SUBSCRIPTION_FILE_TAG create_and_lock(filename) defer lock_for_files_map[filename].Unlock() //open sublist file sublist_file, open_err := os.OpenFile(filename, os.O_CREATE|os.O_RDONLY, 0600) if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } sublist := []string{} removed := false //scan file to see if subscription exists scanner := bufio.NewScanner(sublist_file) for scanner.Scan() { scanned_name := scanner.Text() if scanned_name == sub_uname { removed = true //didn't add scanned_name to sublist continue } else { sublist = append(sublist, scanned_name) } } sublist_file.Close() //rewrite file if removed sub_uname if removed { os.Remove(filename) new_sublist_file, open_err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0600) defer new_sublist_file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } for _, uname := range sublist { text := uname + "\n" if _, write_err := new_sublist_file.WriteString(text); write_err != nil { return fmt.Sprintf("error: Server write error%s\n", END_TAG), false } } } return fmt.Sprintf("success: removed subscription.%s\n", END_TAG), removed } //-----------------------userlist operations----------------------------------------------- /*loads list of existing users from file database into server memory for faster checks that user exist Locks userlist file and usermap in memory*/ func load_user_list() { create_and_lock(USERLIST_FILENAME) // lock userlist file defer lock_for_files_map[USERLIST_FILENAME].Unlock() file, err := os.OpenFile(USERLIST_FILENAME, os.O_CREATE|os.O_RDONLY, 0600) defer file.Close() check_err(err) scanner := bufio.NewScanner(file) for scanner.Scan() { line := scanner.Text() splitted_string := strings.Split(line, ":") uname := strings.TrimSpace(splitted_string[0]) psw := strings.TrimSpace(splitted_string[1]) // store user in server memory user_map_lock.Lock() user_map[uname] = psw user_map_lock.Unlock() } } /*rewrite list of existing user from server memory to userlist file. Needed after removing a user Locks userlist file and usermap in memory*/ func rewrite_userlist() error { create_and_lock(USERLIST_FILENAME) // lock userlist file for editing defer lock_for_files_map[USERLIST_FILENAME].Unlock() os.Remove(USERLIST_FILENAME) //delete old file //rewrite new user list file file, err := os.OpenFile(USERLIST_FILENAME, os.O_CREATE|os.O_WRONLY, 0600) defer file.Close() if err != nil { return err } // user_map_lock.Lock() //locks user map for reading defer user_map_lock.Unlock() for uname, psw := range user_map { text := uname + " : " + psw + "\r\n" if _, write_err := file.WriteString(text); write_err != nil { return write_err } } return nil //no errors = success } //--------------common functions-------------------------------------------------- //basic check for err func check_err(err error)
{ if err != nil { panic(err) } }
identifier_body
replica.go
data, so it should not be using locks. This function directs to other function that will lock files for read/write. Pre-Condition: Request will be in the form of "queryname: arg1: arg2: ..." Post Condition: complete request and return the response, along with bool saying if there was an update returns (response, is_updated) response with be in the form of "success/error: response" */ func evaluate(query string) (string, bool) { delimiter := ":" parsed_query := strings.Split(query, delimiter) //trims white space at the ends for i := 0; i < len(parsed_query); i++ { parsed_query[i] = strings.TrimSpace(parsed_query[i]) } query_function := parsed_query[0] //check if query function is valid valid_queries := []string{DOES_USER_EXIST, CHECK_PASS, ADD_USER, DELETE_USER, ADD_MESSAGE, READ_MESSAGES, SUB, UNSUB, SUB_FEED} is_valid_query := false for _, query := range valid_queries { if query_function == query { is_valid_query = true } } if !is_valid_query { //not a valid queries return fmt.Sprintf("error: %s is not a valid query.%s\n", parsed_query[0], END_TAG), false } //for all queries, args should start with query, username //all queries have >= 2 args if !check_args(parsed_query, 2) { //check args failed return WRONG_NUM_ARGS, false } uname := parsed_query[1] //check the only query with 2 arg, does user exist, else check for >= 3 args if query_function == DOES_USER_EXIST { return does_user_exist(uname) } else { // check for more args if !check_args(parsed_query, 3) { //check args failed return WRONG_NUM_ARGS, false } } //------following requires >=3 args; passed checked args 3 above if query_function == ADD_USER { //doesn't need password authentication return add_user(uname, parsed_query[2]) } else if query_function == READ_MESSAGES { //args should be query, username, num_message if num_message, convert_err := strconv.Atoi(parsed_query[2]); convert_err != nil { return fmt.Sprintf("error: third arg must be integer.%s\n", END_TAG), false } else { return read_messages(uname, num_message) } } psw := parsed_query[2] //following functions needs password authentication if !authenticate(uname, psw) { //uname and psw don't match response := fmt.Sprintf("error: Username and Password combination not found. %s\n", END_TAG) return response, false } switch query_function { case CHECK_PASS: //reply passed username + password check //already passed when called authenticate return fmt.Sprintf("success: correct username and password %s\n", END_TAG), false case DELETE_USER: return delete_user(uname) case ADD_MESSAGE: //args should be query, username, password, message if !check_args(parsed_query, 4) { //check args failed return WRONG_NUM_ARGS, false } message := parsed_query[3] return add_message(uname, message) case SUB: if !check_args(parsed_query, 4) { //check args failed return WRONG_NUM_ARGS, false } sub_uname := parsed_query[3] return subscribe(uname, sub_uname) case UNSUB: if !check_args(parsed_query, 4) { //check args failed return WRONG_NUM_ARGS, false } sub_uname := parsed_query[3] return unsubscribe(uname, sub_uname) case SUB_FEED: if num_messages, convert_err := strconv.Atoi(parsed_query[3]); convert_err != nil { return fmt.Sprintf("error: fourth arg must be integer.%s\n", END_TAG), false
} else { return sub_feed(uname, num_messages) } } return fmt.Sprintf("error: unknown error.%s\n", END_TAG), false } //========================================================================================== //Functions that respond to queries, used by Evalute //============================================================================================ /*checks if num args from query is AT LEAST the num expected return false if args is wrong, true otherwise*/ func check_args(parsed_query []string, num_expected int) bool { return (len(parsed_query) >= num_expected) } /* checks password against username returns false if not match Locks user map for reading*/ func authenticate(uname string, psw string) bool { user_map_lock.Lock() defer user_map_lock.Unlock() if _, is_exist := user_map[uname]; is_exist && user_map[uname] == psw { return true } else { return false } } /*simple check if username from query is a existing user. Does not check password. respond sucess if user exists, else respond error Locks usermap*/ func does_user_exist(uname string) (string, bool) { user_map_lock.Lock() defer user_map_lock.Unlock() if _, is_exist := user_map[uname]; is_exist { return fmt.Sprintf("success: user exists %s\n", END_TAG), false } else { return fmt.Sprintf("error: no such user %s\n", END_TAG), false } } /*Create new user and write new user info to user list file send error response if user already exists Locks user map. May lock Userlist file and user message file*/ func add_user(uname string, psw string) (string, bool) { user_map_lock.Lock() defer user_map_lock.Unlock() _, is_exist := user_map[uname] if !is_exist { //create user if not exist user_map[uname] = psw //open user list file to write to end of it create_and_lock(USERLIST_FILENAME) // lock userlist file for editing defer lock_for_files_map[USERLIST_FILENAME].Unlock() file, open_err := os.OpenFile(USERLIST_FILENAME, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600) defer file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } text := uname + " : " + psw + "\r\n" if _, write_err := file.WriteString(text); write_err != nil { return fmt.Sprintf("error: Server write file error%s\n", END_TAG), false } //create user data file u_file_name := uname + ".txt" create_and_lock(u_file_name) // lock user file for deleting and recreating defer lock_for_files_map[u_file_name].Unlock() os.Remove(u_file_name) // clear old junk created_file, create_err := os.Create(u_file_name) defer created_file.Close() if create_err != nil { return fmt.Sprintf("error: Server create error%s\n", END_TAG), false } else { //response return fmt.Sprintf("success: I added user %s.%s\n", uname, END_TAG), true } } else { //negative response return fmt.Sprintf("error: user, %s, already exists.%s\n", uname, END_TAG), false } } /*Add a new message under the user with given uname, by writing to database file containing stored messsages the user Locks message file of user*/ func add_message(uname string, new_message string) (string, bool) { filename := uname + ".txt" create_and_lock(filename) // lock user message file for editing defer lock_for_files_map[filename].Unlock() message_file, open_err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600) defer message_file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } //write new message to file newline := "\r\n" text_to_write := new_message + newline + USER_MESSAGE_SEPERATOR + newline if _, write_err := message_file.WriteString(text_to_write); write_err != nil { return fmt.Sprintf("error: server failed to write.%s\n", END_TAG), false } else { return fmt.Sprintf("success: added message for %s.%s\n", uname, END_TAG), true } } /*deletes user from userlist file and delete message file asscioated with that user locks usermap and message file of user that is being deleted*/ func delete_user(uname string) (string, bool) { //delete user from server memory user_map_lock.Lock() delete(user_map, uname) user_map_lock.Unlock() err := rewrite_userlist() //delete user from user list file if err != nil { return fmt.Sprintf("error: Server rewrite uselist error%s\n", END_TAG), false } //delete user message file filename := uname + ".txt" create_and_lock(filename) // lock the file we want to delete defer lock_for_files_map[filename].Unlock() os.Remove(filename) //repond sucess return fmt.Sprintf("success: Deleted user %s.%s\n", uname,
random_line_split
replica.go
.Sprintf("error: Server rewrite uselist error%s\n", END_TAG), false } //delete user message file filename := uname + ".txt" create_and_lock(filename) // lock the file we want to delete defer lock_for_files_map[filename].Unlock() os.Remove(filename) //repond sucess return fmt.Sprintf("success: Deleted user %s.%s\n", uname, END_TAG), true } /*reads messages from user file database locks message file of user*/ func read_messages(uname string, num_messages int) (string, bool) { filename := uname + ".txt" create_and_lock(filename) // lock user message file defer lock_for_files_map[filename].Unlock() message_file, open_err := os.OpenFile(filename, os.O_CREATE, 0600) //create file if not exist defer message_file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } messages_in_byte, read_err := ioutil.ReadFile(filename) if read_err != nil { return fmt.Sprintf("error: Server read error%s\n", END_TAG), false } messages_in_string := string(messages_in_byte) message_array := strings.SplitAfter(messages_in_string, USER_MESSAGE_SEPERATOR) message_array = message_array[0 : len(message_array)-1] //last index is empty cause of how splitafter works recent_messages := message_array if num_messages < len(message_array) { //only show recent num messages if there exist more than that recent_messages = message_array[len(message_array)-num_messages:] } //fmt.Fprintf(os.Stderr, "printing message%s\n", recent_messages) //give back most recent num_messages of messages response := "" for _, message := range recent_messages { response += message + "\n" } return fmt.Sprintf("success: %s%s\n", response, END_TAG), false } func sub_feed(uname string, num_messages int) (string, bool) { sub_filename := uname + SUBSCRIPTION_FILE_TAG create_and_lock(sub_filename) defer lock_for_files_map[sub_filename].Unlock() //open sublist file and store subscribed names to sublist sublist_file, open_err := os.OpenFile(sub_filename, os.O_CREATE|os.O_RDONLY, 0600) defer sublist_file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } sublist := []string{} scanner := bufio.NewScanner(sublist_file) for scanner.Scan() { scanned_name := scanner.Text() sublist = append(sublist, scanned_name) } response := "" //append num_messages frome each sub_uname for _, sub_uname := range sublist { message_filename := sub_uname + ".txt" create_and_lock(message_filename) // lock user message file defer lock_for_files_map[message_filename].Unlock() message_file, open_err := os.OpenFile(message_filename, os.O_CREATE, 0600) //create file if not exist defer message_file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } messages_in_byte, read_err := ioutil.ReadFile(message_filename) if read_err != nil { return fmt.Sprintf("error: Server read error%s\n", END_TAG), false } messages_in_string := string(messages_in_byte) message_array := strings.SplitAfter(messages_in_string, USER_MESSAGE_SEPERATOR) message_array = message_array[0 : len(message_array)-1] //last index is empty cause of how splitafter works recent_messages := message_array if num_messages < len(message_array) { //only show recent num messages if there exist more than that recent_messages = message_array[len(message_array)-num_messages:] } response += "<br /><b>Recent messages from - " + sub_uname + "</b><br />" for _, message := range recent_messages { response += message + "\n" } } return fmt.Sprintf("success: %s%s\n", response, END_TAG), false } /*subscribes user to another user by writing sub_uname, the subscribe target, in the main user's sublist.txt*/ func subscribe(uname string, sub_uname string) (string, bool) { filename := uname + SUBSCRIPTION_FILE_TAG create_and_lock(filename) defer lock_for_files_map[filename].Unlock() //open sublist file sublist_file, open_err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND, 0600) defer sublist_file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } //scan file to see if subscription exists scanner := bufio.NewScanner(sublist_file) for scanner.Scan() { scanned_name := scanner.Text() //check if already subscribed if scanned_name == sub_uname { //already subscribed so do nothing return fmt.Sprintf("success: Already subscribed.%s\n", END_TAG), false } } //subscription don't exist, so add subscription text := sub_uname + "\n" if _, write_err := sublist_file.WriteString(text); write_err != nil { return fmt.Sprintf("error: Server write error%s\n", END_TAG), false } else { return fmt.Sprintf("success: Added subscription.%s\n", END_TAG), true } } /*un-subscribes user to another user by deleting sub_uname, the subscribe target, from the main user's sublist.txt*/ func unsubscribe(uname string, sub_uname string) (string, bool) { filename := uname + SUBSCRIPTION_FILE_TAG create_and_lock(filename) defer lock_for_files_map[filename].Unlock() //open sublist file sublist_file, open_err := os.OpenFile(filename, os.O_CREATE|os.O_RDONLY, 0600) if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } sublist := []string{} removed := false //scan file to see if subscription exists scanner := bufio.NewScanner(sublist_file) for scanner.Scan() { scanned_name := scanner.Text() if scanned_name == sub_uname { removed = true //didn't add scanned_name to sublist continue } else { sublist = append(sublist, scanned_name) } } sublist_file.Close() //rewrite file if removed sub_uname if removed { os.Remove(filename) new_sublist_file, open_err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0600) defer new_sublist_file.Close() if open_err != nil { return fmt.Sprintf("error: Server open error%s\n", END_TAG), false } for _, uname := range sublist { text := uname + "\n" if _, write_err := new_sublist_file.WriteString(text); write_err != nil { return fmt.Sprintf("error: Server write error%s\n", END_TAG), false } } } return fmt.Sprintf("success: removed subscription.%s\n", END_TAG), removed } //-----------------------userlist operations----------------------------------------------- /*loads list of existing users from file database into server memory for faster checks that user exist Locks userlist file and usermap in memory*/ func load_user_list() { create_and_lock(USERLIST_FILENAME) // lock userlist file defer lock_for_files_map[USERLIST_FILENAME].Unlock() file, err := os.OpenFile(USERLIST_FILENAME, os.O_CREATE|os.O_RDONLY, 0600) defer file.Close() check_err(err) scanner := bufio.NewScanner(file) for scanner.Scan() { line := scanner.Text() splitted_string := strings.Split(line, ":") uname := strings.TrimSpace(splitted_string[0]) psw := strings.TrimSpace(splitted_string[1]) // store user in server memory user_map_lock.Lock() user_map[uname] = psw user_map_lock.Unlock() } } /*rewrite list of existing user from server memory to userlist file. Needed after removing a user Locks userlist file and usermap in memory*/ func rewrite_userlist() error { create_and_lock(USERLIST_FILENAME) // lock userlist file for editing defer lock_for_files_map[USERLIST_FILENAME].Unlock() os.Remove(USERLIST_FILENAME) //delete old file //rewrite new user list file file, err := os.OpenFile(USERLIST_FILENAME, os.O_CREATE|os.O_WRONLY, 0600) defer file.Close() if err != nil { return err } // user_map_lock.Lock() //locks user map for reading defer user_map_lock.Unlock() for uname, psw := range user_map { text := uname + " : " + psw + "\r\n" if _, write_err := file.WriteString(text); write_err != nil { return write_err } } return nil //no errors = success } //--------------common functions-------------------------------------------------- //basic check for err func check_err(err error) { if err != nil { panic(err) } } /*locking function for files. Called when want to lock a file. creates lock and store into map under filename key if lock doesn't exist lastly, calls lock on lock associated with filename*/ func
create_and_lock
identifier_name
upb.rs
NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //! UPB FFI wrapper code for use by Rust Protobuf. use crate::__internal::{Private, RawArena, RawMessage}; use std::alloc; use std::alloc::Layout; use std::cell::UnsafeCell; use std::fmt; use std::marker::PhantomData; use std::mem::MaybeUninit; use std::ops::Deref; use std::ptr::{self, NonNull}; use std::slice; /// See `upb/port/def.inc`. const UPB_MALLOC_ALIGN: usize = 8; /// A wrapper over a `upb_Arena`. /// /// This is not a safe wrapper per se, because the allocation functions still /// have sharp edges (see their safety docs for more info). /// /// This is an owning type and will automatically free the arena when /// dropped. /// /// Note that this type is neither `Sync` nor `Send`. #[derive(Debug)] pub struct Arena { // Safety invariant: this must always be a valid arena raw: RawArena, _not_sync: PhantomData<UnsafeCell<()>>, } extern "C" { // `Option<NonNull<T: Sized>>` is ABI-compatible with `*mut T` fn upb_Arena_New() -> Option<RawArena>; fn upb_Arena_Free(arena: RawArena); fn upb_Arena_Malloc(arena: RawArena, size: usize) -> *mut u8; fn upb_Arena_Realloc(arena: RawArena, ptr: *mut u8, old: usize, new: usize) -> *mut u8; } impl Arena { /// Allocates a fresh arena. #[inline] pub fn new() -> Self { #[inline(never)] #[cold] fn arena_new_failed() -> ! { panic!("Could not create a new UPB arena"); } // SAFETY: // - `upb_Arena_New` is assumed to be implemented correctly and always sound to // call; if it returned a non-null pointer, it is a valid arena. unsafe { let Some(raw) = upb_Arena_New() else { arena_new_failed() }; Self { raw, _not_sync: PhantomData } } } /// Returns the raw, UPB-managed pointer to the arena. #[inline] pub fn raw(&self) -> RawArena { self.raw } /// Allocates some memory on the arena. /// /// # Safety /// /// - `layout`'s alignment must be less than `UPB_MALLOC_ALIGN`. #[inline] pub unsafe fn alloc(&self, layout: Layout) -> &mut [MaybeUninit<u8>] { debug_assert!(layout.align() <= UPB_MALLOC_ALIGN); // SAFETY: `self.raw` is a valid UPB arena let ptr = unsafe { upb_Arena_Malloc(self.raw, layout.size()) }; if ptr.is_null() { alloc::handle_alloc_error(layout); } // SAFETY: // - `upb_Arena_Malloc` promises that if the return pointer is non-null, it is // dereferencable for `size` bytes and has an alignment of `UPB_MALLOC_ALIGN` // until the arena is destroyed. // - `[MaybeUninit<u8>]` has no alignment requirement, and `ptr` is aligned to a // `UPB_MALLOC_ALIGN` boundary. unsafe { slice::from_raw_parts_mut(ptr.cast(), layout.size()) } } /// Resizes some memory on the arena. /// /// # Safety /// /// - `ptr` must be the data pointer returned by a previous call to `alloc` /// or `resize` on `self`. /// - After calling this function, `ptr` is no longer dereferencable - it is /// zapped. /// - `old` must be the layout `ptr` was allocated with via `alloc` or /// `realloc`. /// - `new`'s alignment must be less than `UPB_MALLOC_ALIGN`. #[inline] pub unsafe fn resize(&self, ptr: *mut u8, old: Layout, new: Layout) -> &mut [MaybeUninit<u8>] { debug_assert!(new.align() <= UPB_MALLOC_ALIGN); // SAFETY: // - `self.raw` is a valid UPB arena // - `ptr` was allocated by a previous call to `alloc` or `realloc` as promised // by the caller. let ptr = unsafe { upb_Arena_Realloc(self.raw, ptr, old.size(), new.size()) }; if ptr.is_null()
// SAFETY: // - `upb_Arena_Realloc` promises that if the return pointer is non-null, it is // dereferencable for the new `size` in bytes until the arena is destroyed. // - `[MaybeUninit<u8>]` has no alignment requirement, and `ptr` is aligned to a // `UPB_MALLOC_ALIGN` boundary. unsafe { slice::from_raw_parts_mut(ptr.cast(), new.size()) } } } impl Drop for Arena { #[inline] fn drop(&mut self) { unsafe { upb_Arena_Free(self.raw); } } } /// Serialized Protobuf wire format data. /// /// It's typically produced by `<Message>::serialize()`. pub struct SerializedData { data: NonNull<u8>, len: usize, // The arena that owns `data`. _arena: Arena, } impl SerializedData { /// Construct `SerializedData` from raw pointers and its owning arena. /// /// # Safety /// - `arena` must be have allocated `data` /// - `data` must be readable for `len` bytes and not mutate while this /// struct exists pub unsafe fn from_raw_parts(arena: Arena, data: NonNull<u8>, len: usize) -> Self { SerializedData { _arena: arena, data, len } } /// Gets a raw slice pointer. pub fn as_ptr(&self) -> *const [u8] { ptr::slice_from_raw_parts(self.data.as_ptr(), self.len) } } impl Deref for SerializedData { type Target = [u8]; fn deref(&self) -> &Self::Target { // SAFETY: `data` is valid for `len` bytes as promised by // the caller of `SerializedData::from_raw_parts`. unsafe { slice::from_raw_parts(self.data.as_ptr(), self.len) } } } impl fmt::Debug for SerializedData { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(self.deref(), f) } } // TODO(b/293919363): Investigate replacing this with direct access to UPB bits. pub type BytesPresentMutData<'msg> = crate::vtable::RawVTableOptionalMutatorData<'msg, [u8]>; pub type BytesAbsentMutData<'msg> = crate::vtable::RawVTableOptionalMutatorData<'msg, [u8]>; pub type InnerBytesMut<'msg> = crate::vtable::RawVTableMutator<'msg, [u8]>; /// The raw contents of every generated message. #[derive(Debug)] pub struct MessageInner { pub msg: RawMessage, pub arena: Arena, } /// Mutators that point to their original message use this to do so. /// /// Since UPB expects runtimes to manage their own arenas, this needs to have /// access to an `Arena`. /// /// This has two possible designs: /// - Store two pointers here, `RawMessage` and `&'msg Arena`. This doesn't /// place any restriction on the layout of generated messages and their /// mutators. This makes a vtable-based mutator three pointers, which can no /// longer be returned in registers on most platforms. /// - Store one pointer here, `&'msg MessageInner`, where `MessageInner` stores /// a `RawMessage` and an `Arena`. This would require all generated messages /// to store `MessageInner`, and since their mutators need to be able to /// generate `BytesMut`, would also require `BytesMut` to store a `&'msg /// MessageInner` since they can't store an owned `Arena`. /// /// Note: even though this type is `Copy`, it should only be copied by /// protobuf internals that can maintain mutation invariants: /// /// - No concurrent mutation for any two fields in a message: this means /// mutators cannot be `Send` but are `Sync`. /// - If there are multiple accessible `Mut` to a single message at a time, they /// must be different fields
{ alloc::handle_alloc_error(new); }
conditional_block
upb.rs
eref; use std::ptr::{self, NonNull}; use std::slice; /// See `upb/port/def.inc`. const UPB_MALLOC_ALIGN: usize = 8; /// A wrapper over a `upb_Arena`. /// /// This is not a safe wrapper per se, because the allocation functions still /// have sharp edges (see their safety docs for more info). /// /// This is an owning type and will automatically free the arena when /// dropped. /// /// Note that this type is neither `Sync` nor `Send`. #[derive(Debug)] pub struct Arena { // Safety invariant: this must always be a valid arena raw: RawArena, _not_sync: PhantomData<UnsafeCell<()>>, } extern "C" { // `Option<NonNull<T: Sized>>` is ABI-compatible with `*mut T` fn upb_Arena_New() -> Option<RawArena>; fn upb_Arena_Free(arena: RawArena); fn upb_Arena_Malloc(arena: RawArena, size: usize) -> *mut u8; fn upb_Arena_Realloc(arena: RawArena, ptr: *mut u8, old: usize, new: usize) -> *mut u8; } impl Arena { /// Allocates a fresh arena. #[inline] pub fn new() -> Self { #[inline(never)] #[cold] fn arena_new_failed() -> ! { panic!("Could not create a new UPB arena"); } // SAFETY: // - `upb_Arena_New` is assumed to be implemented correctly and always sound to // call; if it returned a non-null pointer, it is a valid arena. unsafe { let Some(raw) = upb_Arena_New() else { arena_new_failed() }; Self { raw, _not_sync: PhantomData } } } /// Returns the raw, UPB-managed pointer to the arena. #[inline] pub fn raw(&self) -> RawArena { self.raw } /// Allocates some memory on the arena. /// /// # Safety /// /// - `layout`'s alignment must be less than `UPB_MALLOC_ALIGN`. #[inline] pub unsafe fn alloc(&self, layout: Layout) -> &mut [MaybeUninit<u8>] { debug_assert!(layout.align() <= UPB_MALLOC_ALIGN); // SAFETY: `self.raw` is a valid UPB arena let ptr = unsafe { upb_Arena_Malloc(self.raw, layout.size()) }; if ptr.is_null() { alloc::handle_alloc_error(layout); } // SAFETY: // - `upb_Arena_Malloc` promises that if the return pointer is non-null, it is // dereferencable for `size` bytes and has an alignment of `UPB_MALLOC_ALIGN` // until the arena is destroyed. // - `[MaybeUninit<u8>]` has no alignment requirement, and `ptr` is aligned to a // `UPB_MALLOC_ALIGN` boundary. unsafe { slice::from_raw_parts_mut(ptr.cast(), layout.size()) } } /// Resizes some memory on the arena. /// /// # Safety /// /// - `ptr` must be the data pointer returned by a previous call to `alloc` /// or `resize` on `self`. /// - After calling this function, `ptr` is no longer dereferencable - it is /// zapped. /// - `old` must be the layout `ptr` was allocated with via `alloc` or /// `realloc`. /// - `new`'s alignment must be less than `UPB_MALLOC_ALIGN`. #[inline] pub unsafe fn resize(&self, ptr: *mut u8, old: Layout, new: Layout) -> &mut [MaybeUninit<u8>] { debug_assert!(new.align() <= UPB_MALLOC_ALIGN); // SAFETY: // - `self.raw` is a valid UPB arena // - `ptr` was allocated by a previous call to `alloc` or `realloc` as promised // by the caller. let ptr = unsafe { upb_Arena_Realloc(self.raw, ptr, old.size(), new.size()) }; if ptr.is_null() { alloc::handle_alloc_error(new); } // SAFETY: // - `upb_Arena_Realloc` promises that if the return pointer is non-null, it is // dereferencable for the new `size` in bytes until the arena is destroyed. // - `[MaybeUninit<u8>]` has no alignment requirement, and `ptr` is aligned to a // `UPB_MALLOC_ALIGN` boundary. unsafe { slice::from_raw_parts_mut(ptr.cast(), new.size()) } } } impl Drop for Arena { #[inline] fn drop(&mut self) { unsafe { upb_Arena_Free(self.raw); } } } /// Serialized Protobuf wire format data. /// /// It's typically produced by `<Message>::serialize()`. pub struct SerializedData { data: NonNull<u8>, len: usize, // The arena that owns `data`. _arena: Arena, } impl SerializedData { /// Construct `SerializedData` from raw pointers and its owning arena. /// /// # Safety /// - `arena` must be have allocated `data` /// - `data` must be readable for `len` bytes and not mutate while this /// struct exists pub unsafe fn from_raw_parts(arena: Arena, data: NonNull<u8>, len: usize) -> Self { SerializedData { _arena: arena, data, len } } /// Gets a raw slice pointer. pub fn as_ptr(&self) -> *const [u8] { ptr::slice_from_raw_parts(self.data.as_ptr(), self.len) } } impl Deref for SerializedData { type Target = [u8]; fn deref(&self) -> &Self::Target { // SAFETY: `data` is valid for `len` bytes as promised by // the caller of `SerializedData::from_raw_parts`. unsafe { slice::from_raw_parts(self.data.as_ptr(), self.len) } } } impl fmt::Debug for SerializedData { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(self.deref(), f) } } // TODO(b/293919363): Investigate replacing this with direct access to UPB bits. pub type BytesPresentMutData<'msg> = crate::vtable::RawVTableOptionalMutatorData<'msg, [u8]>; pub type BytesAbsentMutData<'msg> = crate::vtable::RawVTableOptionalMutatorData<'msg, [u8]>; pub type InnerBytesMut<'msg> = crate::vtable::RawVTableMutator<'msg, [u8]>; /// The raw contents of every generated message. #[derive(Debug)] pub struct MessageInner { pub msg: RawMessage, pub arena: Arena, } /// Mutators that point to their original message use this to do so. /// /// Since UPB expects runtimes to manage their own arenas, this needs to have /// access to an `Arena`. /// /// This has two possible designs: /// - Store two pointers here, `RawMessage` and `&'msg Arena`. This doesn't /// place any restriction on the layout of generated messages and their /// mutators. This makes a vtable-based mutator three pointers, which can no /// longer be returned in registers on most platforms. /// - Store one pointer here, `&'msg MessageInner`, where `MessageInner` stores /// a `RawMessage` and an `Arena`. This would require all generated messages /// to store `MessageInner`, and since their mutators need to be able to /// generate `BytesMut`, would also require `BytesMut` to store a `&'msg /// MessageInner` since they can't store an owned `Arena`. /// /// Note: even though this type is `Copy`, it should only be copied by /// protobuf internals that can maintain mutation invariants: /// /// - No concurrent mutation for any two fields in a message: this means /// mutators cannot be `Send` but are `Sync`. /// - If there are multiple accessible `Mut` to a single message at a time, they /// must be different fields, and not be in the same oneof. As such, a `Mut` /// cannot be `Clone` but *can* reborrow itself with `.as_mut()`, which /// converts `&'b mut Mut<'a, T>` to `Mut<'b, T>`. #[derive(Clone, Copy, Debug)] pub struct MutatorMessageRef<'msg> { msg: RawMessage, arena: &'msg Arena, } impl<'msg> MutatorMessageRef<'msg> { #[doc(hidden)] #[allow(clippy::needless_pass_by_ref_mut)] // Sound construction requires mutable access. pub fn new(_private: Private, msg: &'msg mut MessageInner) -> Self { MutatorMessageRef { msg: msg.msg, arena: &msg.arena } } pub fn msg(&self) -> RawMessage {
self.msg } }
random_line_split
upb.rs
, RawMessage}; use std::alloc; use std::alloc::Layout; use std::cell::UnsafeCell; use std::fmt; use std::marker::PhantomData; use std::mem::MaybeUninit; use std::ops::Deref; use std::ptr::{self, NonNull}; use std::slice; /// See `upb/port/def.inc`. const UPB_MALLOC_ALIGN: usize = 8; /// A wrapper over a `upb_Arena`. /// /// This is not a safe wrapper per se, because the allocation functions still /// have sharp edges (see their safety docs for more info). /// /// This is an owning type and will automatically free the arena when /// dropped. /// /// Note that this type is neither `Sync` nor `Send`. #[derive(Debug)] pub struct Arena { // Safety invariant: this must always be a valid arena raw: RawArena, _not_sync: PhantomData<UnsafeCell<()>>, } extern "C" { // `Option<NonNull<T: Sized>>` is ABI-compatible with `*mut T` fn upb_Arena_New() -> Option<RawArena>; fn upb_Arena_Free(arena: RawArena); fn upb_Arena_Malloc(arena: RawArena, size: usize) -> *mut u8; fn upb_Arena_Realloc(arena: RawArena, ptr: *mut u8, old: usize, new: usize) -> *mut u8; } impl Arena { /// Allocates a fresh arena. #[inline] pub fn new() -> Self { #[inline(never)] #[cold] fn arena_new_failed() -> ! { panic!("Could not create a new UPB arena"); } // SAFETY: // - `upb_Arena_New` is assumed to be implemented correctly and always sound to // call; if it returned a non-null pointer, it is a valid arena. unsafe { let Some(raw) = upb_Arena_New() else { arena_new_failed() }; Self { raw, _not_sync: PhantomData } } } /// Returns the raw, UPB-managed pointer to the arena. #[inline] pub fn raw(&self) -> RawArena { self.raw } /// Allocates some memory on the arena. /// /// # Safety /// /// - `layout`'s alignment must be less than `UPB_MALLOC_ALIGN`. #[inline] pub unsafe fn alloc(&self, layout: Layout) -> &mut [MaybeUninit<u8>] { debug_assert!(layout.align() <= UPB_MALLOC_ALIGN); // SAFETY: `self.raw` is a valid UPB arena let ptr = unsafe { upb_Arena_Malloc(self.raw, layout.size()) }; if ptr.is_null() { alloc::handle_alloc_error(layout); } // SAFETY: // - `upb_Arena_Malloc` promises that if the return pointer is non-null, it is // dereferencable for `size` bytes and has an alignment of `UPB_MALLOC_ALIGN` // until the arena is destroyed. // - `[MaybeUninit<u8>]` has no alignment requirement, and `ptr` is aligned to a // `UPB_MALLOC_ALIGN` boundary. unsafe { slice::from_raw_parts_mut(ptr.cast(), layout.size()) } } /// Resizes some memory on the arena. /// /// # Safety /// /// - `ptr` must be the data pointer returned by a previous call to `alloc` /// or `resize` on `self`. /// - After calling this function, `ptr` is no longer dereferencable - it is /// zapped. /// - `old` must be the layout `ptr` was allocated with via `alloc` or /// `realloc`. /// - `new`'s alignment must be less than `UPB_MALLOC_ALIGN`. #[inline] pub unsafe fn resize(&self, ptr: *mut u8, old: Layout, new: Layout) -> &mut [MaybeUninit<u8>] { debug_assert!(new.align() <= UPB_MALLOC_ALIGN); // SAFETY: // - `self.raw` is a valid UPB arena // - `ptr` was allocated by a previous call to `alloc` or `realloc` as promised // by the caller. let ptr = unsafe { upb_Arena_Realloc(self.raw, ptr, old.size(), new.size()) }; if ptr.is_null() { alloc::handle_alloc_error(new); } // SAFETY: // - `upb_Arena_Realloc` promises that if the return pointer is non-null, it is // dereferencable for the new `size` in bytes until the arena is destroyed. // - `[MaybeUninit<u8>]` has no alignment requirement, and `ptr` is aligned to a // `UPB_MALLOC_ALIGN` boundary. unsafe { slice::from_raw_parts_mut(ptr.cast(), new.size()) } } } impl Drop for Arena { #[inline] fn drop(&mut self) { unsafe { upb_Arena_Free(self.raw); } } } /// Serialized Protobuf wire format data. /// /// It's typically produced by `<Message>::serialize()`. pub struct SerializedData { data: NonNull<u8>, len: usize, // The arena that owns `data`. _arena: Arena, } impl SerializedData { /// Construct `SerializedData` from raw pointers and its owning arena. /// /// # Safety /// - `arena` must be have allocated `data` /// - `data` must be readable for `len` bytes and not mutate while this /// struct exists pub unsafe fn from_raw_parts(arena: Arena, data: NonNull<u8>, len: usize) -> Self { SerializedData { _arena: arena, data, len } } /// Gets a raw slice pointer. pub fn as_ptr(&self) -> *const [u8] { ptr::slice_from_raw_parts(self.data.as_ptr(), self.len) } } impl Deref for SerializedData { type Target = [u8]; fn deref(&self) -> &Self::Target { // SAFETY: `data` is valid for `len` bytes as promised by // the caller of `SerializedData::from_raw_parts`. unsafe { slice::from_raw_parts(self.data.as_ptr(), self.len) } } } impl fmt::Debug for SerializedData { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(self.deref(), f) } } // TODO(b/293919363): Investigate replacing this with direct access to UPB bits. pub type BytesPresentMutData<'msg> = crate::vtable::RawVTableOptionalMutatorData<'msg, [u8]>; pub type BytesAbsentMutData<'msg> = crate::vtable::RawVTableOptionalMutatorData<'msg, [u8]>; pub type InnerBytesMut<'msg> = crate::vtable::RawVTableMutator<'msg, [u8]>; /// The raw contents of every generated message. #[derive(Debug)] pub struct MessageInner { pub msg: RawMessage, pub arena: Arena, } /// Mutators that point to their original message use this to do so. /// /// Since UPB expects runtimes to manage their own arenas, this needs to have /// access to an `Arena`. /// /// This has two possible designs: /// - Store two pointers here, `RawMessage` and `&'msg Arena`. This doesn't /// place any restriction on the layout of generated messages and their /// mutators. This makes a vtable-based mutator three pointers, which can no /// longer be returned in registers on most platforms. /// - Store one pointer here, `&'msg MessageInner`, where `MessageInner` stores /// a `RawMessage` and an `Arena`. This would require all generated messages /// to store `MessageInner`, and since their mutators need to be able to /// generate `BytesMut`, would also require `BytesMut` to store a `&'msg /// MessageInner` since they can't store an owned `Arena`. /// /// Note: even though this type is `Copy`, it should only be copied by /// protobuf internals that can maintain mutation invariants: /// /// - No concurrent mutation for any two fields in a message: this means /// mutators cannot be `Send` but are `Sync`. /// - If there are multiple accessible `Mut` to a single message at a time, they /// must be different fields, and not be in the same oneof. As such, a `Mut` /// cannot be `Clone` but *can* reborrow itself with `.as_mut()`, which /// converts `&'b mut Mut<'a, T>` to `Mut<'b, T>`. #[derive(Clone, Copy, Debug)] pub struct MutatorMessageRef<'msg> { msg: RawMessage, arena: &'msg Arena, } impl<'msg> MutatorMessageRef<'msg> { #[doc(hidden)] #[allow(clippy::needless_pass_by_ref_mut)] // Sound construction requires mutable access. pub fn
new
identifier_name
upb.rs
NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //! UPB FFI wrapper code for use by Rust Protobuf. use crate::__internal::{Private, RawArena, RawMessage}; use std::alloc; use std::alloc::Layout; use std::cell::UnsafeCell; use std::fmt; use std::marker::PhantomData; use std::mem::MaybeUninit; use std::ops::Deref; use std::ptr::{self, NonNull}; use std::slice; /// See `upb/port/def.inc`. const UPB_MALLOC_ALIGN: usize = 8; /// A wrapper over a `upb_Arena`. /// /// This is not a safe wrapper per se, because the allocation functions still /// have sharp edges (see their safety docs for more info). /// /// This is an owning type and will automatically free the arena when /// dropped. /// /// Note that this type is neither `Sync` nor `Send`. #[derive(Debug)] pub struct Arena { // Safety invariant: this must always be a valid arena raw: RawArena, _not_sync: PhantomData<UnsafeCell<()>>, } extern "C" { // `Option<NonNull<T: Sized>>` is ABI-compatible with `*mut T` fn upb_Arena_New() -> Option<RawArena>; fn upb_Arena_Free(arena: RawArena); fn upb_Arena_Malloc(arena: RawArena, size: usize) -> *mut u8; fn upb_Arena_Realloc(arena: RawArena, ptr: *mut u8, old: usize, new: usize) -> *mut u8; } impl Arena { /// Allocates a fresh arena. #[inline] pub fn new() -> Self { #[inline(never)] #[cold] fn arena_new_failed() -> ! { panic!("Could not create a new UPB arena"); } // SAFETY: // - `upb_Arena_New` is assumed to be implemented correctly and always sound to // call; if it returned a non-null pointer, it is a valid arena. unsafe { let Some(raw) = upb_Arena_New() else { arena_new_failed() }; Self { raw, _not_sync: PhantomData } } } /// Returns the raw, UPB-managed pointer to the arena. #[inline] pub fn raw(&self) -> RawArena { self.raw } /// Allocates some memory on the arena. /// /// # Safety /// /// - `layout`'s alignment must be less than `UPB_MALLOC_ALIGN`. #[inline] pub unsafe fn alloc(&self, layout: Layout) -> &mut [MaybeUninit<u8>] { debug_assert!(layout.align() <= UPB_MALLOC_ALIGN); // SAFETY: `self.raw` is a valid UPB arena let ptr = unsafe { upb_Arena_Malloc(self.raw, layout.size()) }; if ptr.is_null() { alloc::handle_alloc_error(layout); } // SAFETY: // - `upb_Arena_Malloc` promises that if the return pointer is non-null, it is // dereferencable for `size` bytes and has an alignment of `UPB_MALLOC_ALIGN` // until the arena is destroyed. // - `[MaybeUninit<u8>]` has no alignment requirement, and `ptr` is aligned to a // `UPB_MALLOC_ALIGN` boundary. unsafe { slice::from_raw_parts_mut(ptr.cast(), layout.size()) } } /// Resizes some memory on the arena. /// /// # Safety /// /// - `ptr` must be the data pointer returned by a previous call to `alloc` /// or `resize` on `self`. /// - After calling this function, `ptr` is no longer dereferencable - it is /// zapped. /// - `old` must be the layout `ptr` was allocated with via `alloc` or /// `realloc`. /// - `new`'s alignment must be less than `UPB_MALLOC_ALIGN`. #[inline] pub unsafe fn resize(&self, ptr: *mut u8, old: Layout, new: Layout) -> &mut [MaybeUninit<u8>]
} impl Drop for Arena { #[inline] fn drop(&mut self) { unsafe { upb_Arena_Free(self.raw); } } } /// Serialized Protobuf wire format data. /// /// It's typically produced by `<Message>::serialize()`. pub struct SerializedData { data: NonNull<u8>, len: usize, // The arena that owns `data`. _arena: Arena, } impl SerializedData { /// Construct `SerializedData` from raw pointers and its owning arena. /// /// # Safety /// - `arena` must be have allocated `data` /// - `data` must be readable for `len` bytes and not mutate while this /// struct exists pub unsafe fn from_raw_parts(arena: Arena, data: NonNull<u8>, len: usize) -> Self { SerializedData { _arena: arena, data, len } } /// Gets a raw slice pointer. pub fn as_ptr(&self) -> *const [u8] { ptr::slice_from_raw_parts(self.data.as_ptr(), self.len) } } impl Deref for SerializedData { type Target = [u8]; fn deref(&self) -> &Self::Target { // SAFETY: `data` is valid for `len` bytes as promised by // the caller of `SerializedData::from_raw_parts`. unsafe { slice::from_raw_parts(self.data.as_ptr(), self.len) } } } impl fmt::Debug for SerializedData { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(self.deref(), f) } } // TODO(b/293919363): Investigate replacing this with direct access to UPB bits. pub type BytesPresentMutData<'msg> = crate::vtable::RawVTableOptionalMutatorData<'msg, [u8]>; pub type BytesAbsentMutData<'msg> = crate::vtable::RawVTableOptionalMutatorData<'msg, [u8]>; pub type InnerBytesMut<'msg> = crate::vtable::RawVTableMutator<'msg, [u8]>; /// The raw contents of every generated message. #[derive(Debug)] pub struct MessageInner { pub msg: RawMessage, pub arena: Arena, } /// Mutators that point to their original message use this to do so. /// /// Since UPB expects runtimes to manage their own arenas, this needs to have /// access to an `Arena`. /// /// This has two possible designs: /// - Store two pointers here, `RawMessage` and `&'msg Arena`. This doesn't /// place any restriction on the layout of generated messages and their /// mutators. This makes a vtable-based mutator three pointers, which can no /// longer be returned in registers on most platforms. /// - Store one pointer here, `&'msg MessageInner`, where `MessageInner` stores /// a `RawMessage` and an `Arena`. This would require all generated messages /// to store `MessageInner`, and since their mutators need to be able to /// generate `BytesMut`, would also require `BytesMut` to store a `&'msg /// MessageInner` since they can't store an owned `Arena`. /// /// Note: even though this type is `Copy`, it should only be copied by /// protobuf internals that can maintain mutation invariants: /// /// - No concurrent mutation for any two fields in a message: this means /// mutators cannot be `Send` but are `Sync`. /// - If there are multiple accessible `Mut` to a single message at a time, they /// must be different fields
{ debug_assert!(new.align() <= UPB_MALLOC_ALIGN); // SAFETY: // - `self.raw` is a valid UPB arena // - `ptr` was allocated by a previous call to `alloc` or `realloc` as promised // by the caller. let ptr = unsafe { upb_Arena_Realloc(self.raw, ptr, old.size(), new.size()) }; if ptr.is_null() { alloc::handle_alloc_error(new); } // SAFETY: // - `upb_Arena_Realloc` promises that if the return pointer is non-null, it is // dereferencable for the new `size` in bytes until the arena is destroyed. // - `[MaybeUninit<u8>]` has no alignment requirement, and `ptr` is aligned to a // `UPB_MALLOC_ALIGN` boundary. unsafe { slice::from_raw_parts_mut(ptr.cast(), new.size()) } }
identifier_body
Content.js
_createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; } function
() { if (typeof Reflect === "undefined" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === "function") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } } /** @jsx jsx */ import React from 'react'; import { jsx } from '@emotion/core'; import rafSchedule from 'raf-schd'; import ScrollLock, { TouchScrollable } from 'react-scrolllock'; import { bodyStyles, Body as DefaultBody, keylineHeight, wrapperStyles } from '../styled/Content'; import Footer from './Footer'; import Header from './Header'; function getInitialState() { return { showFooterKeyline: false, showHeaderKeyline: false, showContentFocus: false, tabbableElements: [] }; } function mergeRefs(refs) { return function (value) { refs.forEach(function (ref) { if (typeof ref === 'function') { ref(value); } else if (ref != null) { ref.current = value; } }); }; } var Content = /*#__PURE__*/function (_React$Component) { _inherits(Content, _React$Component); var _super = _createSuper(Content); function Content() { var _this; _classCallCheck(this, Content); for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { args[_key] = arguments[_key]; } _this = _super.call.apply(_super, [this].concat(args)); _defineProperty(_assertThisInitialized(_this), "escapeIsHeldDown", false); _defineProperty(_assertThisInitialized(_this), "_isMounted", false); _defineProperty(_assertThisInitialized(_this), "scrollContainer", null); _defineProperty(_assertThisInitialized(_this), "state", getInitialState()); _defineProperty(_assertThisInitialized(_this), "determineKeylines", rafSchedule(function () { if (!_this.scrollContainer) { return; } var _this$scrollContainer = _this.scrollContainer, scrollTop = _this$scrollContainer.scrollTop, scrollHeight = _this$scrollContainer.scrollHeight, clientHeight = _this$scrollContainer.clientHeight; var scrollableDistance = scrollHeight - clientHeight; var showHeaderKeyline = scrollTop > keylineHeight; var showFooterKeyline = scrollTop <= scrollableDistance - keylineHeight; var showContentFocus = scrollHeight > clientHeight; _this.setState({ showHeaderKeyline: showHeaderKeyline, showFooterKeyline: showFooterKeyline, showContentFocus: showContentFocus }); })); _defineProperty(_assertThisInitialized(_this), "getScrollContainer", function (ref) { if (!ref) { return; } _this.scrollContainer = ref; }); _defineProperty(_assertThisInitialized(_this), "handleKeyUp", function () { _this.escapeIsHeldDown = false; }); _defineProperty(_assertThisInitialized(_this), "handleKeyDown", function (event) { var _this$props = _this.props, onClose = _this$props.onClose, shouldCloseOnEscapePress = _this$props.shouldCloseOnEscapePress, _this$props$stackInde = _this$props.stackIndex, stackIndex = _this$props$stackInde === void 0 ? 0 : _this$props$stackInde; var isEscapeKeyPressed = event.key === 'Escape'; // avoid consumers accidentally closing multiple modals if they hold escape. if (_this.escapeIsHeldDown) { return; } if (isEscapeKeyPressed) { _this.escapeIsHeldDown = true; } // only the foremost modal should be interactive. if (!_this._isMounted || stackIndex > 0) { return; } if (isEscapeKeyPressed && shouldCloseOnEscapePress) { onClose(event); } }); _defineProperty(_assertThisInitialized(_this), "handleStackChange", function (stackIndex) { var onStackChange = _this.props.onStackChange; if (onStackChange) { onStackChange(stackIndex); } }); return _this; } _createClass(Content, [{ key: "componentDidMount", value: function componentDidMount() { this._isMounted = true; document.addEventListener('keydown', this.handleKeyDown, false); document.addEventListener('keyup', this.handleKeyUp, false); if (this.scrollContainer) { var capturedScrollContainer = this.scrollContainer; window.addEventListener('resize', this.determineKeylines, false); capturedScrollContainer.addEventListener('scroll', this.determineKeylines, false); this.determineKeylines(); } /* eslint-disable no-console */ // Check for deprecated props if (this.props.header) { console.warn("@atlaskit/modal-dialog: Deprecation warning - Use of the header prop in ModalDialog is deprecated. Please compose your ModalDialog using the 'components' prop instead"); } if (this.props.footer) { console.warn("@atlaskit/modal-dialog: Deprecation warning - Use of the footer prop in ModalDialog is deprecated. Please compose your ModalDialog using the 'components' prop instead"); } if (this.props.body) { console.warn("@atlaskit/modal-dialog: Deprecation warning - Use of the body prop in ModalDialog is deprecated. Please compose your ModalDialog using the 'components' prop instead"); } // Check that custom body components have used ForwardRef to attach to a DOM element if (this.props.components.Body) { if (!(this.scrollContainer instanceof HTMLElement)) { console.warn('@atlaskit/modal-dialog: Warning - Ref must attach to a DOM element; check you are using forwardRef and attaching the ref to an appropriate element. Check the examples for more details.'); } } /* eslint-enable no-console */ } }, { key: "UNSAFE_componentWillReceiveProps", value: function UNSAFE_componentWillReceiveProps(nextProps) { var stackIndex = this.props.stackIndex; // update focus scope and let consumer know when stack index has changed if (nextProps.stackIndex && nextProps.stackIndex !== stackIndex) { this.handleStackChange(nextProps.stackIndex); } } }, { key: "componentWillUnmount", value: function componentWillUnmount() { this._isMounted = false; document.removeEventListener('keydown', this.handleKeyDown, false); document.removeEventListener('keyup', this.handleKeyUp, false); if (this.scrollContainer) { var capturedScrollContainer = this.scrollContainer; window.removeEventListener('resize', this.determineKeylines, false); capturedScrollContainer.removeEventListener('scroll', this.determineKeylines, false); } } }, { key: "render", value: function render() { var _this2 = this; var _this$props2 = this.props, actions = _this$props2.actions, appearance = _this$props2.appearance, DeprecatedBody = _this$props2.body, children = _this$props2.children, components = _this$props2.components, footer = _this$props2.footer, header = _this$props2.header, heading = _this$props2.heading, isChromeless = _this$props2.isChromeless, isHeadingMultiline = _this$props2.isHeadingMultiline, onClose = _this$props2.onClose, shouldScroll = _this$props2.shouldScroll, testId = _this$props2.testId, headingId = _this$props2.headingId; var _this$state = this.state, showFooterKeyline = _this$state.showFooterKeyline, showHeaderKeyline = _this$state.showHeaderKeyline, showContentFocus = _this$state.showContentFocus; var _components$Container = components.Container, Container = _components$Container === void 0 ? 'div' : _components$Container, CustomBody = components.Body; var Body = CustomBody || DeprecatedBody || DefaultBody; return jsx(Container, { css: wrapperStyles, "data-testid": testId }, isChromeless ? children : jsx(React.Fragment, null, jsx(Header, { id: headingId, appearance: appearance, component: components.Header ? components.Header : header, heading: heading, onClose: onClose, isHeadingMultiline: isHeadingMultiline, showKeyline: showHeaderKeyline, testId: testId }), this.scrollContainer instanceof HTMLElement ? jsx(TouchScrollable, null, function (touchRef) { return jsx(Body, _extends({ tabIndex: showContentFocus ? 0 : undefined, css: bodyStyles(should
_isNativeReflectConstruct
identifier_name
Content.js
'../styled/Content'; import Footer from './Footer'; import Header from './Header'; function getInitialState() { return { showFooterKeyline: false, showHeaderKeyline: false, showContentFocus: false, tabbableElements: [] }; } function mergeRefs(refs) { return function (value) { refs.forEach(function (ref) { if (typeof ref === 'function') { ref(value); } else if (ref != null) { ref.current = value; } }); }; } var Content = /*#__PURE__*/function (_React$Component) { _inherits(Content, _React$Component); var _super = _createSuper(Content); function Content() { var _this; _classCallCheck(this, Content); for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { args[_key] = arguments[_key]; } _this = _super.call.apply(_super, [this].concat(args)); _defineProperty(_assertThisInitialized(_this), "escapeIsHeldDown", false); _defineProperty(_assertThisInitialized(_this), "_isMounted", false); _defineProperty(_assertThisInitialized(_this), "scrollContainer", null); _defineProperty(_assertThisInitialized(_this), "state", getInitialState()); _defineProperty(_assertThisInitialized(_this), "determineKeylines", rafSchedule(function () { if (!_this.scrollContainer) { return; } var _this$scrollContainer = _this.scrollContainer, scrollTop = _this$scrollContainer.scrollTop, scrollHeight = _this$scrollContainer.scrollHeight, clientHeight = _this$scrollContainer.clientHeight; var scrollableDistance = scrollHeight - clientHeight; var showHeaderKeyline = scrollTop > keylineHeight; var showFooterKeyline = scrollTop <= scrollableDistance - keylineHeight; var showContentFocus = scrollHeight > clientHeight; _this.setState({ showHeaderKeyline: showHeaderKeyline, showFooterKeyline: showFooterKeyline, showContentFocus: showContentFocus }); })); _defineProperty(_assertThisInitialized(_this), "getScrollContainer", function (ref) { if (!ref) { return; } _this.scrollContainer = ref; }); _defineProperty(_assertThisInitialized(_this), "handleKeyUp", function () { _this.escapeIsHeldDown = false; }); _defineProperty(_assertThisInitialized(_this), "handleKeyDown", function (event) { var _this$props = _this.props, onClose = _this$props.onClose, shouldCloseOnEscapePress = _this$props.shouldCloseOnEscapePress, _this$props$stackInde = _this$props.stackIndex, stackIndex = _this$props$stackInde === void 0 ? 0 : _this$props$stackInde; var isEscapeKeyPressed = event.key === 'Escape'; // avoid consumers accidentally closing multiple modals if they hold escape. if (_this.escapeIsHeldDown) { return; } if (isEscapeKeyPressed) { _this.escapeIsHeldDown = true; } // only the foremost modal should be interactive. if (!_this._isMounted || stackIndex > 0) { return; } if (isEscapeKeyPressed && shouldCloseOnEscapePress) { onClose(event); } }); _defineProperty(_assertThisInitialized(_this), "handleStackChange", function (stackIndex) { var onStackChange = _this.props.onStackChange; if (onStackChange) { onStackChange(stackIndex); } }); return _this; } _createClass(Content, [{ key: "componentDidMount", value: function componentDidMount() { this._isMounted = true; document.addEventListener('keydown', this.handleKeyDown, false); document.addEventListener('keyup', this.handleKeyUp, false); if (this.scrollContainer) { var capturedScrollContainer = this.scrollContainer; window.addEventListener('resize', this.determineKeylines, false); capturedScrollContainer.addEventListener('scroll', this.determineKeylines, false); this.determineKeylines(); } /* eslint-disable no-console */ // Check for deprecated props if (this.props.header) { console.warn("@atlaskit/modal-dialog: Deprecation warning - Use of the header prop in ModalDialog is deprecated. Please compose your ModalDialog using the 'components' prop instead"); } if (this.props.footer) { console.warn("@atlaskit/modal-dialog: Deprecation warning - Use of the footer prop in ModalDialog is deprecated. Please compose your ModalDialog using the 'components' prop instead"); } if (this.props.body) { console.warn("@atlaskit/modal-dialog: Deprecation warning - Use of the body prop in ModalDialog is deprecated. Please compose your ModalDialog using the 'components' prop instead"); } // Check that custom body components have used ForwardRef to attach to a DOM element if (this.props.components.Body) { if (!(this.scrollContainer instanceof HTMLElement)) { console.warn('@atlaskit/modal-dialog: Warning - Ref must attach to a DOM element; check you are using forwardRef and attaching the ref to an appropriate element. Check the examples for more details.'); } } /* eslint-enable no-console */ } }, { key: "UNSAFE_componentWillReceiveProps", value: function UNSAFE_componentWillReceiveProps(nextProps) { var stackIndex = this.props.stackIndex; // update focus scope and let consumer know when stack index has changed if (nextProps.stackIndex && nextProps.stackIndex !== stackIndex) { this.handleStackChange(nextProps.stackIndex); } } }, { key: "componentWillUnmount", value: function componentWillUnmount() { this._isMounted = false; document.removeEventListener('keydown', this.handleKeyDown, false); document.removeEventListener('keyup', this.handleKeyUp, false); if (this.scrollContainer) { var capturedScrollContainer = this.scrollContainer; window.removeEventListener('resize', this.determineKeylines, false); capturedScrollContainer.removeEventListener('scroll', this.determineKeylines, false); } } }, { key: "render", value: function render() { var _this2 = this; var _this$props2 = this.props, actions = _this$props2.actions, appearance = _this$props2.appearance, DeprecatedBody = _this$props2.body, children = _this$props2.children, components = _this$props2.components, footer = _this$props2.footer, header = _this$props2.header, heading = _this$props2.heading, isChromeless = _this$props2.isChromeless, isHeadingMultiline = _this$props2.isHeadingMultiline, onClose = _this$props2.onClose, shouldScroll = _this$props2.shouldScroll, testId = _this$props2.testId, headingId = _this$props2.headingId; var _this$state = this.state, showFooterKeyline = _this$state.showFooterKeyline, showHeaderKeyline = _this$state.showHeaderKeyline, showContentFocus = _this$state.showContentFocus; var _components$Container = components.Container, Container = _components$Container === void 0 ? 'div' : _components$Container, CustomBody = components.Body; var Body = CustomBody || DeprecatedBody || DefaultBody; return jsx(Container, { css: wrapperStyles, "data-testid": testId }, isChromeless ? children : jsx(React.Fragment, null, jsx(Header, { id: headingId, appearance: appearance, component: components.Header ? components.Header : header, heading: heading, onClose: onClose, isHeadingMultiline: isHeadingMultiline, showKeyline: showHeaderKeyline, testId: testId }), this.scrollContainer instanceof HTMLElement ? jsx(TouchScrollable, null, function (touchRef) { return jsx(Body, _extends({ tabIndex: showContentFocus ? 0 : undefined, css: bodyStyles(shouldScroll) }, !Body.hasOwnProperty('styledComponentId') ? { ref: mergeRefs([touchRef, _this2.getScrollContainer]) } : { innerRef: mergeRefs([touchRef, _this2.getScrollContainer]) }), children); }) : jsx(Body, _extends({ tabIndex: showContentFocus ? 0 : undefined, css: bodyStyles(shouldScroll) }, !Body.hasOwnProperty('styledComponentId') ? { ref: this.getScrollContainer } : { innerRef: this.getScrollContainer }), children), jsx(Footer, { actions: actions, appearance: appearance, component: components.Footer ? components.Footer : footer, onClose: onClose, showKeyline: showFooterKeyline })), jsx(ScrollLock, null)); } }]); return Content; }(React.Component); _defineProperty(Content, "defaultProps", { autoFocus: false, components: {}, isChromeless: false, stackIndex: 0, isHeadingMultiline: true
}); export { Content as default };
random_line_split
Content.js
createSuper(Derived)
function _isNativeReflectConstruct() { if (typeof Reflect === "undefined" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === "function") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } } /** @jsx jsx */ import React from 'react'; import { jsx } from '@emotion/core'; import rafSchedule from 'raf-schd'; import ScrollLock, { TouchScrollable } from 'react-scrolllock'; import { bodyStyles, Body as DefaultBody, keylineHeight, wrapperStyles } from '../styled/Content'; import Footer from './Footer'; import Header from './Header'; function getInitialState() { return { showFooterKeyline: false, showHeaderKeyline: false, showContentFocus: false, tabbableElements: [] }; } function mergeRefs(refs) { return function (value) { refs.forEach(function (ref) { if (typeof ref === 'function') { ref(value); } else if (ref != null) { ref.current = value; } }); }; } var Content = /*#__PURE__*/function (_React$Component) { _inherits(Content, _React$Component); var _super = _createSuper(Content); function Content() { var _this; _classCallCheck(this, Content); for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { args[_key] = arguments[_key]; } _this = _super.call.apply(_super, [this].concat(args)); _defineProperty(_assertThisInitialized(_this), "escapeIsHeldDown", false); _defineProperty(_assertThisInitialized(_this), "_isMounted", false); _defineProperty(_assertThisInitialized(_this), "scrollContainer", null); _defineProperty(_assertThisInitialized(_this), "state", getInitialState()); _defineProperty(_assertThisInitialized(_this), "determineKeylines", rafSchedule(function () { if (!_this.scrollContainer) { return; } var _this$scrollContainer = _this.scrollContainer, scrollTop = _this$scrollContainer.scrollTop, scrollHeight = _this$scrollContainer.scrollHeight, clientHeight = _this$scrollContainer.clientHeight; var scrollableDistance = scrollHeight - clientHeight; var showHeaderKeyline = scrollTop > keylineHeight; var showFooterKeyline = scrollTop <= scrollableDistance - keylineHeight; var showContentFocus = scrollHeight > clientHeight; _this.setState({ showHeaderKeyline: showHeaderKeyline, showFooterKeyline: showFooterKeyline, showContentFocus: showContentFocus }); })); _defineProperty(_assertThisInitialized(_this), "getScrollContainer", function (ref) { if (!ref) { return; } _this.scrollContainer = ref; }); _defineProperty(_assertThisInitialized(_this), "handleKeyUp", function () { _this.escapeIsHeldDown = false; }); _defineProperty(_assertThisInitialized(_this), "handleKeyDown", function (event) { var _this$props = _this.props, onClose = _this$props.onClose, shouldCloseOnEscapePress = _this$props.shouldCloseOnEscapePress, _this$props$stackInde = _this$props.stackIndex, stackIndex = _this$props$stackInde === void 0 ? 0 : _this$props$stackInde; var isEscapeKeyPressed = event.key === 'Escape'; // avoid consumers accidentally closing multiple modals if they hold escape. if (_this.escapeIsHeldDown) { return; } if (isEscapeKeyPressed) { _this.escapeIsHeldDown = true; } // only the foremost modal should be interactive. if (!_this._isMounted || stackIndex > 0) { return; } if (isEscapeKeyPressed && shouldCloseOnEscapePress) { onClose(event); } }); _defineProperty(_assertThisInitialized(_this), "handleStackChange", function (stackIndex) { var onStackChange = _this.props.onStackChange; if (onStackChange) { onStackChange(stackIndex); } }); return _this; } _createClass(Content, [{ key: "componentDidMount", value: function componentDidMount() { this._isMounted = true; document.addEventListener('keydown', this.handleKeyDown, false); document.addEventListener('keyup', this.handleKeyUp, false); if (this.scrollContainer) { var capturedScrollContainer = this.scrollContainer; window.addEventListener('resize', this.determineKeylines, false); capturedScrollContainer.addEventListener('scroll', this.determineKeylines, false); this.determineKeylines(); } /* eslint-disable no-console */ // Check for deprecated props if (this.props.header) { console.warn("@atlaskit/modal-dialog: Deprecation warning - Use of the header prop in ModalDialog is deprecated. Please compose your ModalDialog using the 'components' prop instead"); } if (this.props.footer) { console.warn("@atlaskit/modal-dialog: Deprecation warning - Use of the footer prop in ModalDialog is deprecated. Please compose your ModalDialog using the 'components' prop instead"); } if (this.props.body) { console.warn("@atlaskit/modal-dialog: Deprecation warning - Use of the body prop in ModalDialog is deprecated. Please compose your ModalDialog using the 'components' prop instead"); } // Check that custom body components have used ForwardRef to attach to a DOM element if (this.props.components.Body) { if (!(this.scrollContainer instanceof HTMLElement)) { console.warn('@atlaskit/modal-dialog: Warning - Ref must attach to a DOM element; check you are using forwardRef and attaching the ref to an appropriate element. Check the examples for more details.'); } } /* eslint-enable no-console */ } }, { key: "UNSAFE_componentWillReceiveProps", value: function UNSAFE_componentWillReceiveProps(nextProps) { var stackIndex = this.props.stackIndex; // update focus scope and let consumer know when stack index has changed if (nextProps.stackIndex && nextProps.stackIndex !== stackIndex) { this.handleStackChange(nextProps.stackIndex); } } }, { key: "componentWillUnmount", value: function componentWillUnmount() { this._isMounted = false; document.removeEventListener('keydown', this.handleKeyDown, false); document.removeEventListener('keyup', this.handleKeyUp, false); if (this.scrollContainer) { var capturedScrollContainer = this.scrollContainer; window.removeEventListener('resize', this.determineKeylines, false); capturedScrollContainer.removeEventListener('scroll', this.determineKeylines, false); } } }, { key: "render", value: function render() { var _this2 = this; var _this$props2 = this.props, actions = _this$props2.actions, appearance = _this$props2.appearance, DeprecatedBody = _this$props2.body, children = _this$props2.children, components = _this$props2.components, footer = _this$props2.footer, header = _this$props2.header, heading = _this$props2.heading, isChromeless = _this$props2.isChromeless, isHeadingMultiline = _this$props2.isHeadingMultiline, onClose = _this$props2.onClose, shouldScroll = _this$props2.shouldScroll, testId = _this$props2.testId, headingId = _this$props2.headingId; var _this$state = this.state, showFooterKeyline = _this$state.showFooterKeyline, showHeaderKeyline = _this$state.showHeaderKeyline, showContentFocus = _this$state.showContentFocus; var _components$Container = components.Container, Container = _components$Container === void 0 ? 'div' : _components$Container, CustomBody = components.Body; var Body = CustomBody || DeprecatedBody || DefaultBody; return jsx(Container, { css: wrapperStyles, "data-testid": testId }, isChromeless ? children : jsx(React.Fragment, null, jsx(Header, { id: headingId, appearance: appearance, component: components.Header ? components.Header : header, heading: heading, onClose: onClose, isHeadingMultiline: isHeadingMultiline, showKeyline: showHeaderKeyline, testId: testId }), this.scrollContainer instanceof HTMLElement ? jsx(TouchScrollable, null, function (touchRef) { return jsx(Body, _extends({ tabIndex: showContentFocus ? 0 : undefined, css: bodyStyles(
{ var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }
identifier_body
Content.js
createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; } function _isNativeReflectConstruct() { if (typeof Reflect === "undefined" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === "function") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } } /** @jsx jsx */ import React from 'react'; import { jsx } from '@emotion/core'; import rafSchedule from 'raf-schd'; import ScrollLock, { TouchScrollable } from 'react-scrolllock'; import { bodyStyles, Body as DefaultBody, keylineHeight, wrapperStyles } from '../styled/Content'; import Footer from './Footer'; import Header from './Header'; function getInitialState() { return { showFooterKeyline: false, showHeaderKeyline: false, showContentFocus: false, tabbableElements: [] }; } function mergeRefs(refs) { return function (value) { refs.forEach(function (ref) { if (typeof ref === 'function') { ref(value); } else if (ref != null) { ref.current = value; } }); }; } var Content = /*#__PURE__*/function (_React$Component) { _inherits(Content, _React$Component); var _super = _createSuper(Content); function Content() { var _this; _classCallCheck(this, Content); for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { args[_key] = arguments[_key]; } _this = _super.call.apply(_super, [this].concat(args)); _defineProperty(_assertThisInitialized(_this), "escapeIsHeldDown", false); _defineProperty(_assertThisInitialized(_this), "_isMounted", false); _defineProperty(_assertThisInitialized(_this), "scrollContainer", null); _defineProperty(_assertThisInitialized(_this), "state", getInitialState()); _defineProperty(_assertThisInitialized(_this), "determineKeylines", rafSchedule(function () { if (!_this.scrollContainer) { return; } var _this$scrollContainer = _this.scrollContainer, scrollTop = _this$scrollContainer.scrollTop, scrollHeight = _this$scrollContainer.scrollHeight, clientHeight = _this$scrollContainer.clientHeight; var scrollableDistance = scrollHeight - clientHeight; var showHeaderKeyline = scrollTop > keylineHeight; var showFooterKeyline = scrollTop <= scrollableDistance - keylineHeight; var showContentFocus = scrollHeight > clientHeight; _this.setState({ showHeaderKeyline: showHeaderKeyline, showFooterKeyline: showFooterKeyline, showContentFocus: showContentFocus }); })); _defineProperty(_assertThisInitialized(_this), "getScrollContainer", function (ref) { if (!ref) { return; } _this.scrollContainer = ref; }); _defineProperty(_assertThisInitialized(_this), "handleKeyUp", function () { _this.escapeIsHeldDown = false; }); _defineProperty(_assertThisInitialized(_this), "handleKeyDown", function (event) { var _this$props = _this.props, onClose = _this$props.onClose, shouldCloseOnEscapePress = _this$props.shouldCloseOnEscapePress, _this$props$stackInde = _this$props.stackIndex, stackIndex = _this$props$stackInde === void 0 ? 0 : _this$props$stackInde; var isEscapeKeyPressed = event.key === 'Escape'; // avoid consumers accidentally closing multiple modals if they hold escape. if (_this.escapeIsHeldDown) { return; } if (isEscapeKeyPressed) { _this.escapeIsHeldDown = true; } // only the foremost modal should be interactive. if (!_this._isMounted || stackIndex > 0) { return; } if (isEscapeKeyPressed && shouldCloseOnEscapePress) { onClose(event); } }); _defineProperty(_assertThisInitialized(_this), "handleStackChange", function (stackIndex) { var onStackChange = _this.props.onStackChange; if (onStackChange) { onStackChange(stackIndex); } }); return _this; } _createClass(Content, [{ key: "componentDidMount", value: function componentDidMount() { this._isMounted = true; document.addEventListener('keydown', this.handleKeyDown, false); document.addEventListener('keyup', this.handleKeyUp, false); if (this.scrollContainer) { var capturedScrollContainer = this.scrollContainer; window.addEventListener('resize', this.determineKeylines, false); capturedScrollContainer.addEventListener('scroll', this.determineKeylines, false); this.determineKeylines(); } /* eslint-disable no-console */ // Check for deprecated props if (this.props.header) { console.warn("@atlaskit/modal-dialog: Deprecation warning - Use of the header prop in ModalDialog is deprecated. Please compose your ModalDialog using the 'components' prop instead"); } if (this.props.footer) { console.warn("@atlaskit/modal-dialog: Deprecation warning - Use of the footer prop in ModalDialog is deprecated. Please compose your ModalDialog using the 'components' prop instead"); } if (this.props.body) { console.warn("@atlaskit/modal-dialog: Deprecation warning - Use of the body prop in ModalDialog is deprecated. Please compose your ModalDialog using the 'components' prop instead"); } // Check that custom body components have used ForwardRef to attach to a DOM element if (this.props.components.Body) { if (!(this.scrollContainer instanceof HTMLElement)) { console.warn('@atlaskit/modal-dialog: Warning - Ref must attach to a DOM element; check you are using forwardRef and attaching the ref to an appropriate element. Check the examples for more details.'); } } /* eslint-enable no-console */ } }, { key: "UNSAFE_componentWillReceiveProps", value: function UNSAFE_componentWillReceiveProps(nextProps) { var stackIndex = this.props.stackIndex; // update focus scope and let consumer know when stack index has changed if (nextProps.stackIndex && nextProps.stackIndex !== stackIndex)
} }, { key: "componentWillUnmount", value: function componentWillUnmount() { this._isMounted = false; document.removeEventListener('keydown', this.handleKeyDown, false); document.removeEventListener('keyup', this.handleKeyUp, false); if (this.scrollContainer) { var capturedScrollContainer = this.scrollContainer; window.removeEventListener('resize', this.determineKeylines, false); capturedScrollContainer.removeEventListener('scroll', this.determineKeylines, false); } } }, { key: "render", value: function render() { var _this2 = this; var _this$props2 = this.props, actions = _this$props2.actions, appearance = _this$props2.appearance, DeprecatedBody = _this$props2.body, children = _this$props2.children, components = _this$props2.components, footer = _this$props2.footer, header = _this$props2.header, heading = _this$props2.heading, isChromeless = _this$props2.isChromeless, isHeadingMultiline = _this$props2.isHeadingMultiline, onClose = _this$props2.onClose, shouldScroll = _this$props2.shouldScroll, testId = _this$props2.testId, headingId = _this$props2.headingId; var _this$state = this.state, showFooterKeyline = _this$state.showFooterKeyline, showHeaderKeyline = _this$state.showHeaderKeyline, showContentFocus = _this$state.showContentFocus; var _components$Container = components.Container, Container = _components$Container === void 0 ? 'div' : _components$Container, CustomBody = components.Body; var Body = CustomBody || DeprecatedBody || DefaultBody; return jsx(Container, { css: wrapperStyles, "data-testid": testId }, isChromeless ? children : jsx(React.Fragment, null, jsx(Header, { id: headingId, appearance: appearance, component: components.Header ? components.Header : header, heading: heading, onClose: onClose, isHeadingMultiline: isHeadingMultiline, showKeyline: showHeaderKeyline, testId: testId }), this.scrollContainer instanceof HTMLElement ? jsx(TouchScrollable, null, function (touchRef) { return jsx(Body, _extends({ tabIndex: showContentFocus ? 0 : undefined, css: bodyStyles(
{ this.handleStackChange(nextProps.stackIndex); }
conditional_block
datascraper_whoscored.py
.whoscored.com%s" % link) time.sleep(2) # let the link load soup = BeautifulSoup(driver.page_source, 'lxml') date = soup.find_all('dd')[4] # [3].find('dl').find_all('dd') score = soup.find_all('dd')[2] scores = [int(s) for s in score.text.split() if s.isdigit()] teams = [] teams_link = soup.find_all('a', class_='team-link') for team in teams_link: if team.text not in teams: teams.append(team.text) stats.append(date.text) stats.append(link) stats += teams stats += scores options_header = driver.find_element_by_xpath("//*[@id='live-chart-stats-options']") options = options_header.find_elements_by_tag_name('li') match_report_stat = [] match_report_stat = extract_report_data(match_report_stat, 'live-goals-info', "//div[@id='live-goals-content']//div[@class='stat']") # gather passing data options[1].click() wait = WebDriverWait(driver,10) wait.until(ec.visibility_of_all_elements_located((By.XPATH,"//div[@id='live-passes-content']//div[@class='stat']"))) # Get box for live score content to click on match_report_stat = extract_report_data(match_report_stat, 'live-passes-info', "//div[@id='live-passes-content']//div[@class='stat']") # gather aggression data options[2].click() # wait = WebDriverWait(driver, 10) # wait.until(ec.presence_of_all_elements_located((By.ID, "live-aggression"))) time.sleep(1) soup = BeautifulSoup(driver.page_source, 'lxml') # print(soup.prettify()) aggression_div = soup.find(id='live-aggression') for stat in aggression_div.find_all('div', class_='stat'): for span in stat.find_all('span', class_='stat-value'): match_report_stat.append(span.text) stats += match_report_stat # Match centre data collection match_centre_button = driver.find_element_by_xpath("//*[@id='sub-navigation']/ul/li[4]") match_centre_button.click() time.sleep(1) # wait = WebDriverWait(driver, 10) # wait.until(ec.invisibility_of_element_located((By.XPATH,"//*[@class='match-centre-stat has-stats selected']"))) # Total shots get data # total_shots_more_button = driver.find_element_by_xpath("//*[@id='match-centre-stats']/div[1]/ul[1]/li[2]/div[2]") # total_shots_more_button.click() soup = BeautifulSoup(driver.page_source, 'lxml') stat_box = soup.find_all('li', class_='match-centre-stat match-centre-sub-stat') match_centre_stat = [] for s in stat_box: for p in s.find_all('span', class_="match-centre-stat-value"): match_centre_stat.append(p.text) match_centre_stat = match_centre_stat[:62] if DEBUG: print("match centre stats: " + str(match_centre_stat)) stats += match_centre_stat return stats def extract_report_data(match_report_stat, info_panel_id, clickable_xpath): # Get the page and now scrap the data from the bottom panel
match_report_stat.append(span.text) return match_report_stat if len(sys.argv) == 3: # Parameters to write the data to links_csv = sys.argv[1] # "data/whoscored/match-links.csv" filepath = sys.argv[2] # leaguename = 'premierleague' # filepath = 'data/whoscored/%s-%d.csv' % (leaguename, 20182019) chromepath = "chromedriver.exe" driver = webdriver.Chrome(chromepath) driver.maximize_window() # accept cookies once driver.get("https://www.whoscored.com/") wait = WebDriverWait(driver, 10) wait.until(ec.element_to_be_clickable((By.XPATH, "//div[@class='qc-cmp2-summary-buttons']/button[2]"))) # accept cookies cookie_button = driver.find_element_by_xpath("//div[@class='qc-cmp2-summary-buttons']/button[2]") cookie_button.click() match_links = [] # Load match links with open(links_csv, 'r+', newline='') as myfile: csv_reader = csv.reader(myfile, delimiter=',') for row in csv_reader: match_links = row print(len(match_links)) matches = [] all_match_stats = [] columns = ["date", "link", "home team", "away team", "home score", "away score", "home total shots", "away total shots", "home total goals", "away total goals", "home total conversion rate", "away total conversion rate", "home open play shots", "away open play shots", "home open play goals", "away open play goals", "home open play conversion rate", "away open play conversion rate", "home set piece shots", "away set piece shots", "home set piece goals", "away set piece goals", "home set piece conversion", "away set piece conversion", "home counter attack shots", "away counter attack shots", "home counter attack goals", "away counter attack goals", "home counter attack conversion", "away counter attack conversion", "home penalty shots", "away penalty shots", "home penalty goals", "away penalty goals", "home penalty conversion", "away penalty conversion", "home own goals shots", "away own goals shots", "home own goals goals", "away own goals goals", "home own goals conversion", "away own goals conversion", "home total passes", "away total passes", "home total average pass streak", "away total average pass streak", "home crosses", "away crosses", "home crosses average pass streak", "away crosses average pass streak", "home through balls", "away through balls", "home through balls average streak", "away through balls average streak", "home long balls", "away long balls", "home long balls average streak", "away long balls average streak", "home short passes", "away short passes", "home short passes average streak", "away short passes average streak", "home cards", "away cards", "home fouls", "away fouls", "home unprofessional", "away unprofessional", "home dive", "away dive", "home other", "away other", "home red cards", "away red cards", "home yellow cards", "away yellow cards", "home cards per foul", "away cards per foul", "home fouls", "away fouls", "home total shots", "away total shots", "home woodwork", "away woodwork", "home shots on target", "away shots on target", "home shots off target", "away shots off target", "home shots blocked", "away shots blocked", "home possession", "away possession", "home touches", "away touches", "home passes success", "away passes success", "home total passes", "away total passes", "home accurate passes", "away accurate passes", "home key passes", "away key passes", "home dribbles won", "away dribbles won", "home dribbles attempted", "away dribbles attempted", "home dribbled past", "away dribbled past", "home dribble success", "away dribble success", "home aerials won", "away aerials won", "home aerials won%", "away aerials won%", "home offensive aerials", "away offensive aerials", "home defensive aerials", "away defensive aerials", "home successful tackles", "away successful tackles", "home tackles attempted", "away tackles attempted", "home was dribbled", "away was dribbled", "home tackles success %", "away tackles success %", "home clearances", "away clearances", "home interceptions", "away interceptions", "home corners", "away corners", "home corner accuracy", "away corner accuracy", "home dispossessed", "away dispossessed", "home errors", "away errors", "home fouls", "away fouls", "home offsides", "away
soup = BeautifulSoup(driver.page_source, 'lxml') info_div = soup.find('div', id=info_panel_id) for stat in info_div.find_all('div', class_='stat'): for span in stat.find_all('span', class_='stat-value'): match_report_stat.append(span.text) clickable_fields = driver.find_elements_by_xpath(clickable_xpath) for clickable_stat in clickable_fields: # click on every stat for more details clickable_stat.click() wait = WebDriverWait(driver, 10) wait.until(ec.visibility_of_all_elements_located((By.ID, info_panel_id))) # Get the page and now scrap the data from the bottom panel soup = BeautifulSoup(driver.page_source, 'lxml') info_div = soup.find('div', id=info_panel_id) for stat in info_div.find_all('div', class_='stat'): for span in stat.find_all('span', class_='stat-value'):
identifier_body
datascraper_whoscored.py
.whoscored.com%s" % link) time.sleep(2) # let the link load soup = BeautifulSoup(driver.page_source, 'lxml') date = soup.find_all('dd')[4] # [3].find('dl').find_all('dd') score = soup.find_all('dd')[2] scores = [int(s) for s in score.text.split() if s.isdigit()] teams = [] teams_link = soup.find_all('a', class_='team-link') for team in teams_link: if team.text not in teams: teams.append(team.text) stats.append(date.text) stats.append(link) stats += teams stats += scores options_header = driver.find_element_by_xpath("//*[@id='live-chart-stats-options']") options = options_header.find_elements_by_tag_name('li') match_report_stat = [] match_report_stat = extract_report_data(match_report_stat, 'live-goals-info', "//div[@id='live-goals-content']//div[@class='stat']") # gather passing data options[1].click() wait = WebDriverWait(driver,10) wait.until(ec.visibility_of_all_elements_located((By.XPATH,"//div[@id='live-passes-content']//div[@class='stat']"))) # Get box for live score content to click on match_report_stat = extract_report_data(match_report_stat, 'live-passes-info', "//div[@id='live-passes-content']//div[@class='stat']") # gather aggression data options[2].click() # wait = WebDriverWait(driver, 10) # wait.until(ec.presence_of_all_elements_located((By.ID, "live-aggression"))) time.sleep(1) soup = BeautifulSoup(driver.page_source, 'lxml') # print(soup.prettify()) aggression_div = soup.find(id='live-aggression') for stat in aggression_div.find_all('div', class_='stat'): for span in stat.find_all('span', class_='stat-value'): match_report_stat.append(span.text) stats += match_report_stat # Match centre data collection match_centre_button = driver.find_element_by_xpath("//*[@id='sub-navigation']/ul/li[4]") match_centre_button.click() time.sleep(1) # wait = WebDriverWait(driver, 10) # wait.until(ec.invisibility_of_element_located((By.XPATH,"//*[@class='match-centre-stat has-stats selected']"))) # Total shots get data # total_shots_more_button = driver.find_element_by_xpath("//*[@id='match-centre-stats']/div[1]/ul[1]/li[2]/div[2]") # total_shots_more_button.click() soup = BeautifulSoup(driver.page_source, 'lxml') stat_box = soup.find_all('li', class_='match-centre-stat match-centre-sub-stat') match_centre_stat = [] for s in stat_box: for p in s.find_all('span', class_="match-centre-stat-value"): match_centre_stat.append(p.text) match_centre_stat = match_centre_stat[:62] if DEBUG: print("match centre stats: " + str(match_centre_stat)) stats += match_centre_stat return stats def extract_report_data(match_report_stat, info_panel_id, clickable_xpath): # Get the page and now scrap the data from the bottom panel soup = BeautifulSoup(driver.page_source, 'lxml') info_div = soup.find('div', id=info_panel_id) for stat in info_div.find_all('div', class_='stat'): for span in stat.find_all('span', class_='stat-value'): match_report_stat.append(span.text) clickable_fields = driver.find_elements_by_xpath(clickable_xpath) for clickable_stat in clickable_fields: # click on every stat for more details clickable_stat.click() wait = WebDriverWait(driver, 10) wait.until(ec.visibility_of_all_elements_located((By.ID, info_panel_id))) # Get the page and now scrap the data from the bottom panel soup = BeautifulSoup(driver.page_source, 'lxml') info_div = soup.find('div', id=info_panel_id) for stat in info_div.find_all('div', class_='stat'): for span in stat.find_all('span', class_='stat-value'):
return match_report_stat if len(sys.argv) == 3: # Parameters to write the data to links_csv = sys.argv[1] # "data/whoscored/match-links.csv" filepath = sys.argv[2] # leaguename = 'premierleague' # filepath = 'data/whoscored/%s-%d.csv' % (leaguename, 20182019) chromepath = "chromedriver.exe" driver = webdriver.Chrome(chromepath) driver.maximize_window() # accept cookies once driver.get("https://www.whoscored.com/") wait = WebDriverWait(driver, 10) wait.until(ec.element_to_be_clickable((By.XPATH, "//div[@class='qc-cmp2-summary-buttons']/button[2]"))) # accept cookies cookie_button = driver.find_element_by_xpath("//div[@class='qc-cmp2-summary-buttons']/button[2]") cookie_button.click() match_links = [] # Load match links with open(links_csv, 'r+', newline='') as myfile: csv_reader = csv.reader(myfile, delimiter=',') for row in csv_reader: match_links = row print(len(match_links)) matches = [] all_match_stats = [] columns = ["date", "link", "home team", "away team", "home score", "away score", "home total shots", "away total shots", "home total goals", "away total goals", "home total conversion rate", "away total conversion rate", "home open play shots", "away open play shots", "home open play goals", "away open play goals", "home open play conversion rate", "away open play conversion rate", "home set piece shots", "away set piece shots", "home set piece goals", "away set piece goals", "home set piece conversion", "away set piece conversion", "home counter attack shots", "away counter attack shots", "home counter attack goals", "away counter attack goals", "home counter attack conversion", "away counter attack conversion", "home penalty shots", "away penalty shots", "home penalty goals", "away penalty goals", "home penalty conversion", "away penalty conversion", "home own goals shots", "away own goals shots", "home own goals goals", "away own goals goals", "home own goals conversion", "away own goals conversion", "home total passes", "away total passes", "home total average pass streak", "away total average pass streak", "home crosses", "away crosses", "home crosses average pass streak", "away crosses average pass streak", "home through balls", "away through balls", "home through balls average streak", "away through balls average streak", "home long balls", "away long balls", "home long balls average streak", "away long balls average streak", "home short passes", "away short passes", "home short passes average streak", "away short passes average streak", "home cards", "away cards", "home fouls", "away fouls", "home unprofessional", "away unprofessional", "home dive", "away dive", "home other", "away other", "home red cards", "away red cards", "home yellow cards", "away yellow cards", "home cards per foul", "away cards per foul", "home fouls", "away fouls", "home total shots", "away total shots", "home woodwork", "away woodwork", "home shots on target", "away shots on target", "home shots off target", "away shots off target", "home shots blocked", "away shots blocked", "home possession", "away possession", "home touches", "away touches", "home passes success", "away passes success", "home total passes", "away total passes", "home accurate passes", "away accurate passes", "home key passes", "away key passes", "home dribbles won", "away dribbles won", "home dribbles attempted", "away dribbles attempted", "home dribbled past", "away dribbled past", "home dribble success", "away dribble success", "home aerials won", "away aerials won", "home aerials won%", "away aerials won%", "home offensive aerials", "away offensive aerials", "home defensive aerials", "away defensive aerials", "home successful tackles", "away successful tackles", "home tackles attempted", "away tackles attempted", "home was dribbled", "away was dribbled", "home tackles success %", "away tackles success %", "home clearances", "away clearances", "home interceptions", "away interceptions", "home corners", "away corners", "home corner accuracy", "away corner accuracy", "home dispossessed", "away dispossessed", "home errors", "away errors", "home fouls", "away fouls", "home offsides", "
match_report_stat.append(span.text)
conditional_block
datascraper_whoscored.py
.whoscored.com%s" % link) time.sleep(2) # let the link load soup = BeautifulSoup(driver.page_source, 'lxml') date = soup.find_all('dd')[4] # [3].find('dl').find_all('dd') score = soup.find_all('dd')[2] scores = [int(s) for s in score.text.split() if s.isdigit()] teams = [] teams_link = soup.find_all('a', class_='team-link') for team in teams_link: if team.text not in teams: teams.append(team.text) stats.append(date.text) stats.append(link) stats += teams stats += scores options_header = driver.find_element_by_xpath("//*[@id='live-chart-stats-options']") options = options_header.find_elements_by_tag_name('li') match_report_stat = [] match_report_stat = extract_report_data(match_report_stat, 'live-goals-info', "//div[@id='live-goals-content']//div[@class='stat']") # gather passing data options[1].click() wait = WebDriverWait(driver,10) wait.until(ec.visibility_of_all_elements_located((By.XPATH,"//div[@id='live-passes-content']//div[@class='stat']"))) # Get box for live score content to click on match_report_stat = extract_report_data(match_report_stat, 'live-passes-info', "//div[@id='live-passes-content']//div[@class='stat']") # gather aggression data options[2].click() # wait = WebDriverWait(driver, 10) # wait.until(ec.presence_of_all_elements_located((By.ID, "live-aggression"))) time.sleep(1) soup = BeautifulSoup(driver.page_source, 'lxml') # print(soup.prettify()) aggression_div = soup.find(id='live-aggression') for stat in aggression_div.find_all('div', class_='stat'): for span in stat.find_all('span', class_='stat-value'): match_report_stat.append(span.text) stats += match_report_stat # Match centre data collection match_centre_button = driver.find_element_by_xpath("//*[@id='sub-navigation']/ul/li[4]") match_centre_button.click() time.sleep(1) # wait = WebDriverWait(driver, 10) # wait.until(ec.invisibility_of_element_located((By.XPATH,"//*[@class='match-centre-stat has-stats selected']"))) # Total shots get data # total_shots_more_button = driver.find_element_by_xpath("//*[@id='match-centre-stats']/div[1]/ul[1]/li[2]/div[2]") # total_shots_more_button.click() soup = BeautifulSoup(driver.page_source, 'lxml') stat_box = soup.find_all('li', class_='match-centre-stat match-centre-sub-stat') match_centre_stat = [] for s in stat_box: for p in s.find_all('span', class_="match-centre-stat-value"): match_centre_stat.append(p.text) match_centre_stat = match_centre_stat[:62] if DEBUG: print("match centre stats: " + str(match_centre_stat)) stats += match_centre_stat return stats def
(match_report_stat, info_panel_id, clickable_xpath): # Get the page and now scrap the data from the bottom panel soup = BeautifulSoup(driver.page_source, 'lxml') info_div = soup.find('div', id=info_panel_id) for stat in info_div.find_all('div', class_='stat'): for span in stat.find_all('span', class_='stat-value'): match_report_stat.append(span.text) clickable_fields = driver.find_elements_by_xpath(clickable_xpath) for clickable_stat in clickable_fields: # click on every stat for more details clickable_stat.click() wait = WebDriverWait(driver, 10) wait.until(ec.visibility_of_all_elements_located((By.ID, info_panel_id))) # Get the page and now scrap the data from the bottom panel soup = BeautifulSoup(driver.page_source, 'lxml') info_div = soup.find('div', id=info_panel_id) for stat in info_div.find_all('div', class_='stat'): for span in stat.find_all('span', class_='stat-value'): match_report_stat.append(span.text) return match_report_stat if len(sys.argv) == 3: # Parameters to write the data to links_csv = sys.argv[1] # "data/whoscored/match-links.csv" filepath = sys.argv[2] # leaguename = 'premierleague' # filepath = 'data/whoscored/%s-%d.csv' % (leaguename, 20182019) chromepath = "chromedriver.exe" driver = webdriver.Chrome(chromepath) driver.maximize_window() # accept cookies once driver.get("https://www.whoscored.com/") wait = WebDriverWait(driver, 10) wait.until(ec.element_to_be_clickable((By.XPATH, "//div[@class='qc-cmp2-summary-buttons']/button[2]"))) # accept cookies cookie_button = driver.find_element_by_xpath("//div[@class='qc-cmp2-summary-buttons']/button[2]") cookie_button.click() match_links = [] # Load match links with open(links_csv, 'r+', newline='') as myfile: csv_reader = csv.reader(myfile, delimiter=',') for row in csv_reader: match_links = row print(len(match_links)) matches = [] all_match_stats = [] columns = ["date", "link", "home team", "away team", "home score", "away score", "home total shots", "away total shots", "home total goals", "away total goals", "home total conversion rate", "away total conversion rate", "home open play shots", "away open play shots", "home open play goals", "away open play goals", "home open play conversion rate", "away open play conversion rate", "home set piece shots", "away set piece shots", "home set piece goals", "away set piece goals", "home set piece conversion", "away set piece conversion", "home counter attack shots", "away counter attack shots", "home counter attack goals", "away counter attack goals", "home counter attack conversion", "away counter attack conversion", "home penalty shots", "away penalty shots", "home penalty goals", "away penalty goals", "home penalty conversion", "away penalty conversion", "home own goals shots", "away own goals shots", "home own goals goals", "away own goals goals", "home own goals conversion", "away own goals conversion", "home total passes", "away total passes", "home total average pass streak", "away total average pass streak", "home crosses", "away crosses", "home crosses average pass streak", "away crosses average pass streak", "home through balls", "away through balls", "home through balls average streak", "away through balls average streak", "home long balls", "away long balls", "home long balls average streak", "away long balls average streak", "home short passes", "away short passes", "home short passes average streak", "away short passes average streak", "home cards", "away cards", "home fouls", "away fouls", "home unprofessional", "away unprofessional", "home dive", "away dive", "home other", "away other", "home red cards", "away red cards", "home yellow cards", "away yellow cards", "home cards per foul", "away cards per foul", "home fouls", "away fouls", "home total shots", "away total shots", "home woodwork", "away woodwork", "home shots on target", "away shots on target", "home shots off target", "away shots off target", "home shots blocked", "away shots blocked", "home possession", "away possession", "home touches", "away touches", "home passes success", "away passes success", "home total passes", "away total passes", "home accurate passes", "away accurate passes", "home key passes", "away key passes", "home dribbles won", "away dribbles won", "home dribbles attempted", "away dribbles attempted", "home dribbled past", "away dribbled past", "home dribble success", "away dribble success", "home aerials won", "away aerials won", "home aerials won%", "away aerials won%", "home offensive aerials", "away offensive aerials", "home defensive aerials", "away defensive aerials", "home successful tackles", "away successful tackles", "home tackles attempted", "away tackles attempted", "home was dribbled", "away was dribbled", "home tackles success %", "away tackles success %", "home clearances", "away clearances", "home interceptions", "away interceptions", "home corners", "away corners", "home corner accuracy", "away corner accuracy", "home dispossessed", "away dispossessed", "home errors", "away errors", "home fouls", "away fouls", "home offsides", "
extract_report_data
identifier_name
datascraper_whoscored.py
.whoscored.com%s" % link) time.sleep(2) # let the link load soup = BeautifulSoup(driver.page_source, 'lxml') date = soup.find_all('dd')[4] # [3].find('dl').find_all('dd') score = soup.find_all('dd')[2] scores = [int(s) for s in score.text.split() if s.isdigit()] teams = [] teams_link = soup.find_all('a', class_='team-link') for team in teams_link: if team.text not in teams: teams.append(team.text) stats.append(date.text) stats.append(link) stats += teams stats += scores options_header = driver.find_element_by_xpath("//*[@id='live-chart-stats-options']") options = options_header.find_elements_by_tag_name('li') match_report_stat = [] match_report_stat = extract_report_data(match_report_stat, 'live-goals-info', "//div[@id='live-goals-content']//div[@class='stat']") # gather passing data options[1].click() wait = WebDriverWait(driver,10) wait.until(ec.visibility_of_all_elements_located((By.XPATH,"//div[@id='live-passes-content']//div[@class='stat']"))) # Get box for live score content to click on match_report_stat = extract_report_data(match_report_stat, 'live-passes-info', "//div[@id='live-passes-content']//div[@class='stat']") # gather aggression data options[2].click() # wait = WebDriverWait(driver, 10) # wait.until(ec.presence_of_all_elements_located((By.ID, "live-aggression"))) time.sleep(1) soup = BeautifulSoup(driver.page_source, 'lxml') # print(soup.prettify()) aggression_div = soup.find(id='live-aggression') for stat in aggression_div.find_all('div', class_='stat'): for span in stat.find_all('span', class_='stat-value'): match_report_stat.append(span.text) stats += match_report_stat # Match centre data collection match_centre_button = driver.find_element_by_xpath("//*[@id='sub-navigation']/ul/li[4]") match_centre_button.click() time.sleep(1) # wait = WebDriverWait(driver, 10) # wait.until(ec.invisibility_of_element_located((By.XPATH,"//*[@class='match-centre-stat has-stats selected']"))) # Total shots get data # total_shots_more_button = driver.find_element_by_xpath("//*[@id='match-centre-stats']/div[1]/ul[1]/li[2]/div[2]") # total_shots_more_button.click() soup = BeautifulSoup(driver.page_source, 'lxml') stat_box = soup.find_all('li', class_='match-centre-stat match-centre-sub-stat') match_centre_stat = [] for s in stat_box: for p in s.find_all('span', class_="match-centre-stat-value"): match_centre_stat.append(p.text) match_centre_stat = match_centre_stat[:62] if DEBUG: print("match centre stats: " + str(match_centre_stat)) stats += match_centre_stat return stats def extract_report_data(match_report_stat, info_panel_id, clickable_xpath): # Get the page and now scrap the data from the bottom panel soup = BeautifulSoup(driver.page_source, 'lxml') info_div = soup.find('div', id=info_panel_id) for stat in info_div.find_all('div', class_='stat'): for span in stat.find_all('span', class_='stat-value'): match_report_stat.append(span.text) clickable_fields = driver.find_elements_by_xpath(clickable_xpath) for clickable_stat in clickable_fields: # click on every stat for more details clickable_stat.click() wait = WebDriverWait(driver, 10) wait.until(ec.visibility_of_all_elements_located((By.ID, info_panel_id))) # Get the page and now scrap the data from the bottom panel soup = BeautifulSoup(driver.page_source, 'lxml') info_div = soup.find('div', id=info_panel_id) for stat in info_div.find_all('div', class_='stat'): for span in stat.find_all('span', class_='stat-value'): match_report_stat.append(span.text) return match_report_stat if len(sys.argv) == 3: # Parameters to write the data to links_csv = sys.argv[1] # "data/whoscored/match-links.csv" filepath = sys.argv[2] # leaguename = 'premierleague' # filepath = 'data/whoscored/%s-%d.csv' % (leaguename, 20182019) chromepath = "chromedriver.exe" driver = webdriver.Chrome(chromepath) driver.maximize_window() # accept cookies once driver.get("https://www.whoscored.com/") wait = WebDriverWait(driver, 10) wait.until(ec.element_to_be_clickable((By.XPATH, "//div[@class='qc-cmp2-summary-buttons']/button[2]"))) # accept cookies cookie_button = driver.find_element_by_xpath("//div[@class='qc-cmp2-summary-buttons']/button[2]") cookie_button.click() match_links = [] # Load match links with open(links_csv, 'r+', newline='') as myfile: csv_reader = csv.reader(myfile, delimiter=',') for row in csv_reader: match_links = row print(len(match_links)) matches = [] all_match_stats = [] columns = ["date", "link", "home team", "away team", "home score", "away score", "home total shots", "away total shots", "home total goals", "away total goals", "home total conversion rate", "away total conversion rate", "home open play shots", "away open play shots", "home open play goals", "away open play goals", "home open play conversion rate", "away open play conversion rate", "home set piece shots", "away set piece shots", "home set piece goals", "away set piece goals", "home set piece conversion", "away set piece conversion", "home counter attack shots", "away counter attack shots", "home counter attack goals", "away counter attack goals", "home counter attack conversion", "away counter attack conversion", "home penalty shots", "away penalty shots", "home penalty goals", "away penalty goals", "home penalty conversion", "away penalty conversion", "home own goals shots", "away own goals shots", "home own goals goals", "away own goals goals", "home own goals conversion", "away own goals conversion", "home total passes", "away total passes", "home total average pass streak", "away total average pass streak", "home crosses", "away crosses", "home crosses average pass streak", "away crosses average pass streak", "home through balls", "away through balls", "home through balls average streak", "away through balls average streak", "home long balls", "away long balls", "home long balls average streak", "away long balls average streak", "home short passes", "away short passes", "home short passes average streak", "away short passes average streak", "home cards", "away cards", "home fouls", "away fouls", "home unprofessional", "away unprofessional", "home dive", "away dive", "home other", "away other", "home red cards", "away red cards", "home yellow cards", "away yellow cards", "home cards per foul", "away cards per foul", "home fouls", "away fouls",
"home total shots", "away total shots", "home woodwork", "away woodwork", "home shots on target", "away shots on target", "home shots off target", "away shots off target", "home shots blocked", "away shots blocked", "home possession", "away possession", "home touches", "away touches", "home passes success", "away passes success", "home total passes", "away total passes", "home accurate passes", "away accurate passes", "home key passes", "away key passes", "home dribbles won", "away dribbles won", "home dribbles attempted", "away dribbles attempted", "home dribbled past", "away dribbled past", "home dribble success", "away dribble success", "home aerials won", "away aerials won", "home aerials won%", "away aerials won%", "home offensive aerials", "away offensive aerials", "home defensive aerials", "away defensive aerials", "home successful tackles", "away successful tackles", "home tackles attempted", "away tackles attempted", "home was dribbled", "away was dribbled", "home tackles success %", "away tackles success %", "home clearances", "away clearances", "home interceptions", "away interceptions", "home corners", "away corners", "home corner accuracy", "away corner accuracy", "home dispossessed", "away dispossessed", "home errors", "away errors", "home fouls", "away fouls", "home offsides", "away
random_line_split
function.go
// Use StaticReturnType if the function's return type does not vary // depending on its arguments. Type TypeFunc // Impl is the ImplFunc that implements the function's behavior. // // Functions are expected to behave as pure functions, and not create // any visible side-effects. // // If a TypeFunc is also provided, the value returned from Impl *must* // conform to the type it returns, or a call to the function will panic. Impl ImplFunc } // New creates a new function with the given specification. // // After passing a Spec to this function, the caller must no longer read from // or mutate it. func New(spec *Spec) Function { f := Function{ spec: spec, } return f } // TypeFunc is a callback type for determining the return type of a function // given its arguments. // // Any of the values passed to this function may be unknown, even if the // parameters are not configured to accept unknowns. // // If any of the given values are *not* unknown, the TypeFunc may use the // values for pre-validation and for choosing the return type. For example, // a hypothetical JSON-unmarshalling function could return // cty.DynamicPseudoType if the given JSON string is unknown, but return // a concrete type based on the JSON structure if the JSON string is already // known. type TypeFunc func(args []cty.Value) (cty.Type, error) // ImplFunc is a callback type for the main implementation of a function. // // "args" are the values for the arguments, and this slice will always be at // least as long as the argument definition slice for the function. // // "retType" is the type returned from the Type callback, included as a // convenience to avoid the need to re-compute the return type for generic // functions whose return type is a function of the arguments. type ImplFunc func(args []cty.Value, retType cty.Type) (cty.Value, error) // StaticReturnType returns a TypeFunc that always returns the given type. // // This is provided as a convenience for defining a function whose return // type does not depend on the argument types. func StaticReturnType(ty cty.Type) TypeFunc { return func([]cty.Value) (cty.Type, error) { return ty, nil } } // ReturnType returns the return type of a function given a set of candidate // argument types, or returns an error if the given types are unacceptable. // // If the caller already knows values for at least some of the arguments // it can be better to call ReturnTypeForValues, since certain functions may // determine their return types from their values and return DynamicVal if // the values are unknown. func (f Function) ReturnType(argTypes []cty.Type) (cty.Type, error) { vals := make([]cty.Value, len(argTypes)) for i, ty := range argTypes { vals[i] = cty.UnknownVal(ty) } return f.ReturnTypeForValues(vals) } // ReturnTypeForValues is similar to ReturnType but can be used if the caller // already knows the values of some or all of the arguments, in which case // the function may be able to determine a more definite result if its // return type depends on the argument *values*. // // For any arguments whose values are not known, pass an Unknown value of // the appropriate type. func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) { var posArgs []cty.Value var varArgs []cty.Value if f.spec.VarParam == nil { if len(args) != len(f.spec.Params) { return cty.Type{}, fmt.Errorf( "wrong number of arguments (%d required; %d given)", len(f.spec.Params), len(args), ) } posArgs = args varArgs = nil } else { if len(args) < len(f.spec.Params) { return cty.Type{}, fmt.Errorf( "wrong number of arguments (at least %d required; %d given)", len(f.spec.Params), len(args), ) } posArgs = args[0:len(f.spec.Params)] varArgs = args[len(f.spec.Params):] } for i, spec := range f.spec.Params { val := posArgs[i] if val.ContainsMarked() && !spec.AllowMarked
if val.IsNull() && !spec.AllowNull { return cty.Type{}, NewArgErrorf(i, "argument must not be null") } // AllowUnknown is ignored for type-checking, since we expect to be // able to type check with unknown values. We *do* still need to deal // with DynamicPseudoType here though, since the Type function might // not be ready to deal with that. if val.Type() == cty.DynamicPseudoType { if !spec.AllowDynamicType { return cty.DynamicPseudoType, nil } } else if errs := val.Type().TestConformance(spec.Type); errs != nil { // For now we'll just return the first error in the set, since // we don't have a good way to return the whole list here. // Would be good to do something better at some point... return cty.Type{}, NewArgError(i, errs[0]) } } if varArgs != nil { spec := f.spec.VarParam for i, val := range varArgs { realI := i + len(posArgs) if val.ContainsMarked() && !spec.AllowMarked { // See the similar block in the loop above for what's going on here. unmarked, _ := val.UnmarkDeep() newArgs := make([]cty.Value, len(args)) copy(newArgs, args) newArgs[realI] = unmarked args = newArgs } if val.IsNull() && !spec.AllowNull { return cty.Type{}, NewArgErrorf(realI, "argument must not be null") } if val.Type() == cty.DynamicPseudoType { if !spec.AllowDynamicType { return cty.DynamicPseudoType, nil } } else if errs := val.Type().TestConformance(spec.Type); errs != nil { // For now we'll just return the first error in the set, since // we don't have a good way to return the whole list here. // Would be good to do something better at some point... return cty.Type{}, NewArgError(i, errs[0]) } } } // Intercept any panics from the function and return them as normal errors, // so a calling language runtime doesn't need to deal with panics. defer func() { if r := recover(); r != nil { ty = cty.NilType err = errorForPanic(r) } }() return f.spec.Type(args) } // Call actually calls the function with the given arguments, which must // conform to the function's parameter specification or an error will be // returned. func (f Function) Call(args []cty.Value) (val cty.Value, err error) { expectedType, err := f.ReturnTypeForValues(args) if err != nil { return cty.NilVal, err } // Type checking already dealt with most situations relating to our // parameter specification, but we still need to deal with unknown // values and marked values. posArgs := args[:len(f.spec.Params)] varArgs := args[len(f.spec.Params):] var resultMarks []cty.ValueMarks for i, spec := range f.spec.Params { val := posArgs[i] if !val.IsKnown() && !spec.AllowUnknown { return cty.UnknownVal(expectedType), nil } if !spec.AllowMarked { unwrappedVal, marks := val.UnmarkDeep() if len(marks) > 0 { // In order to avoid additional overhead on applications that // are not using marked values, we copy the given args only // if we encounter a marked value we need to unmark. However, // as a consequence we end up doing redundant copying if multiple // marked values need to be unwrapped. That seems okay because // argument lists are generally small. newArgs := make([]cty.Value, len(args)) copy(newArgs, args) newArgs[i] = unwrappedVal resultMarks = append(resultMarks, marks) args = newArgs } }
{ // During type checking we just unmark values and discard their // marks, under the assumption that during actual execution of // the function we'll do similarly and then re-apply the marks // afterwards. Note that this does mean that a function that // inspects values (rather than just types) in its Type // implementation can potentially fail to take into account marks, // unless it specifically opts in to seeing them. unmarked, _ := val.UnmarkDeep() newArgs := make([]cty.Value, len(args)) copy(newArgs, args) newArgs[i] = unmarked args = newArgs }
conditional_block
function.go
// Use StaticReturnType if the function's return type does not vary // depending on its arguments. Type TypeFunc // Impl is the ImplFunc that implements the function's behavior. // // Functions are expected to behave as pure functions, and not create // any visible side-effects. // // If a TypeFunc is also provided, the value returned from Impl *must* // conform to the type it returns, or a call to the function will panic. Impl ImplFunc } // New creates a new function with the given specification. // // After passing a Spec to this function, the caller must no longer read from // or mutate it. func New(spec *Spec) Function { f := Function{ spec: spec, } return f } // TypeFunc is a callback type for determining the return type of a function // given its arguments. // // Any of the values passed to this function may be unknown, even if the // parameters are not configured to accept unknowns. // // If any of the given values are *not* unknown, the TypeFunc may use the // values for pre-validation and for choosing the return type. For example, // a hypothetical JSON-unmarshalling function could return // cty.DynamicPseudoType if the given JSON string is unknown, but return // a concrete type based on the JSON structure if the JSON string is already // known. type TypeFunc func(args []cty.Value) (cty.Type, error) // ImplFunc is a callback type for the main implementation of a function. // // "args" are the values for the arguments, and this slice will always be at // least as long as the argument definition slice for the function. // // "retType" is the type returned from the Type callback, included as a // convenience to avoid the need to re-compute the return type for generic // functions whose return type is a function of the arguments. type ImplFunc func(args []cty.Value, retType cty.Type) (cty.Value, error) // StaticReturnType returns a TypeFunc that always returns the given type. // // This is provided as a convenience for defining a function whose return // type does not depend on the argument types. func StaticReturnType(ty cty.Type) TypeFunc { return func([]cty.Value) (cty.Type, error) { return ty, nil } } // ReturnType returns the return type of a function given a set of candidate // argument types, or returns an error if the given types are unacceptable. // // If the caller already knows values for at least some of the arguments // it can be better to call ReturnTypeForValues, since certain functions may // determine their return types from their values and return DynamicVal if // the values are unknown. func (f Function) ReturnType(argTypes []cty.Type) (cty.Type, error) { vals := make([]cty.Value, len(argTypes)) for i, ty := range argTypes { vals[i] = cty.UnknownVal(ty) } return f.ReturnTypeForValues(vals) } // ReturnTypeForValues is similar to ReturnType but can be used if the caller // already knows the values of some or all of the arguments, in which case // the function may be able to determine a more definite result if its // return type depends on the argument *values*. // // For any arguments whose values are not known, pass an Unknown value of // the appropriate type. func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) { var posArgs []cty.Value var varArgs []cty.Value if f.spec.VarParam == nil { if len(args) != len(f.spec.Params) { return cty.Type{}, fmt.Errorf( "wrong number of arguments (%d required; %d given)", len(f.spec.Params), len(args), ) } posArgs = args varArgs = nil } else { if len(args) < len(f.spec.Params) { return cty.Type{}, fmt.Errorf( "wrong number of arguments (at least %d required; %d given)", len(f.spec.Params), len(args), ) } posArgs = args[0:len(f.spec.Params)] varArgs = args[len(f.spec.Params):] } for i, spec := range f.spec.Params { val := posArgs[i] if val.ContainsMarked() && !spec.AllowMarked { // During type checking we just unmark values and discard their // marks, under the assumption that during actual execution of // the function we'll do similarly and then re-apply the marks // afterwards. Note that this does mean that a function that // inspects values (rather than just types) in its Type // implementation can potentially fail to take into account marks, // unless it specifically opts in to seeing them. unmarked, _ := val.UnmarkDeep() newArgs := make([]cty.Value, len(args)) copy(newArgs, args) newArgs[i] = unmarked args = newArgs } if val.IsNull() && !spec.AllowNull { return cty.Type{}, NewArgErrorf(i, "argument must not be null") }
if val.Type() == cty.DynamicPseudoType { if !spec.AllowDynamicType { return cty.DynamicPseudoType, nil } } else if errs := val.Type().TestConformance(spec.Type); errs != nil { // For now we'll just return the first error in the set, since // we don't have a good way to return the whole list here. // Would be good to do something better at some point... return cty.Type{}, NewArgError(i, errs[0]) } } if varArgs != nil { spec := f.spec.VarParam for i, val := range varArgs { realI := i + len(posArgs) if val.ContainsMarked() && !spec.AllowMarked { // See the similar block in the loop above for what's going on here. unmarked, _ := val.UnmarkDeep() newArgs := make([]cty.Value, len(args)) copy(newArgs, args) newArgs[realI] = unmarked args = newArgs } if val.IsNull() && !spec.AllowNull { return cty.Type{}, NewArgErrorf(realI, "argument must not be null") } if val.Type() == cty.DynamicPseudoType { if !spec.AllowDynamicType { return cty.DynamicPseudoType, nil } } else if errs := val.Type().TestConformance(spec.Type); errs != nil { // For now we'll just return the first error in the set, since // we don't have a good way to return the whole list here. // Would be good to do something better at some point... return cty.Type{}, NewArgError(i, errs[0]) } } } // Intercept any panics from the function and return them as normal errors, // so a calling language runtime doesn't need to deal with panics. defer func() { if r := recover(); r != nil { ty = cty.NilType err = errorForPanic(r) } }() return f.spec.Type(args) } // Call actually calls the function with the given arguments, which must // conform to the function's parameter specification or an error will be // returned. func (f Function) Call(args []cty.Value) (val cty.Value, err error) { expectedType, err := f.ReturnTypeForValues(args) if err != nil { return cty.NilVal, err } // Type checking already dealt with most situations relating to our // parameter specification, but we still need to deal with unknown // values and marked values. posArgs := args[:len(f.spec.Params)] varArgs := args[len(f.spec.Params):] var resultMarks []cty.ValueMarks for i, spec := range f.spec.Params { val := posArgs[i] if !val.IsKnown() && !spec.AllowUnknown { return cty.UnknownVal(expectedType), nil } if !spec.AllowMarked { unwrappedVal, marks := val.UnmarkDeep() if len(marks) > 0 { // In order to avoid additional overhead on applications that // are not using marked values, we copy the given args only // if we encounter a marked value we need to unmark. However, // as a consequence we end up doing redundant copying if multiple // marked values need to be unwrapped. That seems okay because // argument lists are generally small. newArgs := make([]cty.Value, len(args)) copy(newArgs, args) newArgs[i] = unwrappedVal resultMarks = append(resultMarks, marks) args = newArgs } } }
// AllowUnknown is ignored for type-checking, since we expect to be // able to type check with unknown values. We *do* still need to deal // with DynamicPseudoType here though, since the Type function might // not be ready to deal with that.
random_line_split
function.go
// Use StaticReturnType if the function's return type does not vary // depending on its arguments. Type TypeFunc // Impl is the ImplFunc that implements the function's behavior. // // Functions are expected to behave as pure functions, and not create // any visible side-effects. // // If a TypeFunc is also provided, the value returned from Impl *must* // conform to the type it returns, or a call to the function will panic. Impl ImplFunc } // New creates a new function with the given specification. // // After passing a Spec to this function, the caller must no longer read from // or mutate it. func New(spec *Spec) Function { f := Function{ spec: spec, } return f } // TypeFunc is a callback type for determining the return type of a function // given its arguments. // // Any of the values passed to this function may be unknown, even if the // parameters are not configured to accept unknowns. // // If any of the given values are *not* unknown, the TypeFunc may use the // values for pre-validation and for choosing the return type. For example, // a hypothetical JSON-unmarshalling function could return // cty.DynamicPseudoType if the given JSON string is unknown, but return // a concrete type based on the JSON structure if the JSON string is already // known. type TypeFunc func(args []cty.Value) (cty.Type, error) // ImplFunc is a callback type for the main implementation of a function. // // "args" are the values for the arguments, and this slice will always be at // least as long as the argument definition slice for the function. // // "retType" is the type returned from the Type callback, included as a // convenience to avoid the need to re-compute the return type for generic // functions whose return type is a function of the arguments. type ImplFunc func(args []cty.Value, retType cty.Type) (cty.Value, error) // StaticReturnType returns a TypeFunc that always returns the given type. // // This is provided as a convenience for defining a function whose return // type does not depend on the argument types. func StaticReturnType(ty cty.Type) TypeFunc
// ReturnType returns the return type of a function given a set of candidate // argument types, or returns an error if the given types are unacceptable. // // If the caller already knows values for at least some of the arguments // it can be better to call ReturnTypeForValues, since certain functions may // determine their return types from their values and return DynamicVal if // the values are unknown. func (f Function) ReturnType(argTypes []cty.Type) (cty.Type, error) { vals := make([]cty.Value, len(argTypes)) for i, ty := range argTypes { vals[i] = cty.UnknownVal(ty) } return f.ReturnTypeForValues(vals) } // ReturnTypeForValues is similar to ReturnType but can be used if the caller // already knows the values of some or all of the arguments, in which case // the function may be able to determine a more definite result if its // return type depends on the argument *values*. // // For any arguments whose values are not known, pass an Unknown value of // the appropriate type. func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) { var posArgs []cty.Value var varArgs []cty.Value if f.spec.VarParam == nil { if len(args) != len(f.spec.Params) { return cty.Type{}, fmt.Errorf( "wrong number of arguments (%d required; %d given)", len(f.spec.Params), len(args), ) } posArgs = args varArgs = nil } else { if len(args) < len(f.spec.Params) { return cty.Type{}, fmt.Errorf( "wrong number of arguments (at least %d required; %d given)", len(f.spec.Params), len(args), ) } posArgs = args[0:len(f.spec.Params)] varArgs = args[len(f.spec.Params):] } for i, spec := range f.spec.Params { val := posArgs[i] if val.ContainsMarked() && !spec.AllowMarked { // During type checking we just unmark values and discard their // marks, under the assumption that during actual execution of // the function we'll do similarly and then re-apply the marks // afterwards. Note that this does mean that a function that // inspects values (rather than just types) in its Type // implementation can potentially fail to take into account marks, // unless it specifically opts in to seeing them. unmarked, _ := val.UnmarkDeep() newArgs := make([]cty.Value, len(args)) copy(newArgs, args) newArgs[i] = unmarked args = newArgs } if val.IsNull() && !spec.AllowNull { return cty.Type{}, NewArgErrorf(i, "argument must not be null") } // AllowUnknown is ignored for type-checking, since we expect to be // able to type check with unknown values. We *do* still need to deal // with DynamicPseudoType here though, since the Type function might // not be ready to deal with that. if val.Type() == cty.DynamicPseudoType { if !spec.AllowDynamicType { return cty.DynamicPseudoType, nil } } else if errs := val.Type().TestConformance(spec.Type); errs != nil { // For now we'll just return the first error in the set, since // we don't have a good way to return the whole list here. // Would be good to do something better at some point... return cty.Type{}, NewArgError(i, errs[0]) } } if varArgs != nil { spec := f.spec.VarParam for i, val := range varArgs { realI := i + len(posArgs) if val.ContainsMarked() && !spec.AllowMarked { // See the similar block in the loop above for what's going on here. unmarked, _ := val.UnmarkDeep() newArgs := make([]cty.Value, len(args)) copy(newArgs, args) newArgs[realI] = unmarked args = newArgs } if val.IsNull() && !spec.AllowNull { return cty.Type{}, NewArgErrorf(realI, "argument must not be null") } if val.Type() == cty.DynamicPseudoType { if !spec.AllowDynamicType { return cty.DynamicPseudoType, nil } } else if errs := val.Type().TestConformance(spec.Type); errs != nil { // For now we'll just return the first error in the set, since // we don't have a good way to return the whole list here. // Would be good to do something better at some point... return cty.Type{}, NewArgError(i, errs[0]) } } } // Intercept any panics from the function and return them as normal errors, // so a calling language runtime doesn't need to deal with panics. defer func() { if r := recover(); r != nil { ty = cty.NilType err = errorForPanic(r) } }() return f.spec.Type(args) } // Call actually calls the function with the given arguments, which must // conform to the function's parameter specification or an error will be // returned. func (f Function) Call(args []cty.Value) (val cty.Value, err error) { expectedType, err := f.ReturnTypeForValues(args) if err != nil { return cty.NilVal, err } // Type checking already dealt with most situations relating to our // parameter specification, but we still need to deal with unknown // values and marked values. posArgs := args[:len(f.spec.Params)] varArgs := args[len(f.spec.Params):] var resultMarks []cty.ValueMarks for i, spec := range f.spec.Params { val := posArgs[i] if !val.IsKnown() && !spec.AllowUnknown { return cty.UnknownVal(expectedType), nil } if !spec.AllowMarked { unwrappedVal, marks := val.UnmarkDeep() if len(marks) > 0 { // In order to avoid additional overhead on applications that // are not using marked values, we copy the given args only // if we encounter a marked value we need to unmark. However, // as a consequence we end up doing redundant copying if multiple // marked values need to be unwrapped. That seems okay because // argument lists are generally small. newArgs := make([]cty.Value, len(args)) copy(newArgs, args) newArgs[i] = unwrappedVal resultMarks = append(resultMarks, marks) args = newArgs } }
{ return func([]cty.Value) (cty.Type, error) { return ty, nil } }
identifier_body
function.go
// Use StaticReturnType if the function's return type does not vary // depending on its arguments. Type TypeFunc // Impl is the ImplFunc that implements the function's behavior. // // Functions are expected to behave as pure functions, and not create // any visible side-effects. // // If a TypeFunc is also provided, the value returned from Impl *must* // conform to the type it returns, or a call to the function will panic. Impl ImplFunc } // New creates a new function with the given specification. // // After passing a Spec to this function, the caller must no longer read from // or mutate it. func New(spec *Spec) Function { f := Function{ spec: spec, } return f } // TypeFunc is a callback type for determining the return type of a function // given its arguments. // // Any of the values passed to this function may be unknown, even if the // parameters are not configured to accept unknowns. // // If any of the given values are *not* unknown, the TypeFunc may use the // values for pre-validation and for choosing the return type. For example, // a hypothetical JSON-unmarshalling function could return // cty.DynamicPseudoType if the given JSON string is unknown, but return // a concrete type based on the JSON structure if the JSON string is already // known. type TypeFunc func(args []cty.Value) (cty.Type, error) // ImplFunc is a callback type for the main implementation of a function. // // "args" are the values for the arguments, and this slice will always be at // least as long as the argument definition slice for the function. // // "retType" is the type returned from the Type callback, included as a // convenience to avoid the need to re-compute the return type for generic // functions whose return type is a function of the arguments. type ImplFunc func(args []cty.Value, retType cty.Type) (cty.Value, error) // StaticReturnType returns a TypeFunc that always returns the given type. // // This is provided as a convenience for defining a function whose return // type does not depend on the argument types. func
(ty cty.Type) TypeFunc { return func([]cty.Value) (cty.Type, error) { return ty, nil } } // ReturnType returns the return type of a function given a set of candidate // argument types, or returns an error if the given types are unacceptable. // // If the caller already knows values for at least some of the arguments // it can be better to call ReturnTypeForValues, since certain functions may // determine their return types from their values and return DynamicVal if // the values are unknown. func (f Function) ReturnType(argTypes []cty.Type) (cty.Type, error) { vals := make([]cty.Value, len(argTypes)) for i, ty := range argTypes { vals[i] = cty.UnknownVal(ty) } return f.ReturnTypeForValues(vals) } // ReturnTypeForValues is similar to ReturnType but can be used if the caller // already knows the values of some or all of the arguments, in which case // the function may be able to determine a more definite result if its // return type depends on the argument *values*. // // For any arguments whose values are not known, pass an Unknown value of // the appropriate type. func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) { var posArgs []cty.Value var varArgs []cty.Value if f.spec.VarParam == nil { if len(args) != len(f.spec.Params) { return cty.Type{}, fmt.Errorf( "wrong number of arguments (%d required; %d given)", len(f.spec.Params), len(args), ) } posArgs = args varArgs = nil } else { if len(args) < len(f.spec.Params) { return cty.Type{}, fmt.Errorf( "wrong number of arguments (at least %d required; %d given)", len(f.spec.Params), len(args), ) } posArgs = args[0:len(f.spec.Params)] varArgs = args[len(f.spec.Params):] } for i, spec := range f.spec.Params { val := posArgs[i] if val.ContainsMarked() && !spec.AllowMarked { // During type checking we just unmark values and discard their // marks, under the assumption that during actual execution of // the function we'll do similarly and then re-apply the marks // afterwards. Note that this does mean that a function that // inspects values (rather than just types) in its Type // implementation can potentially fail to take into account marks, // unless it specifically opts in to seeing them. unmarked, _ := val.UnmarkDeep() newArgs := make([]cty.Value, len(args)) copy(newArgs, args) newArgs[i] = unmarked args = newArgs } if val.IsNull() && !spec.AllowNull { return cty.Type{}, NewArgErrorf(i, "argument must not be null") } // AllowUnknown is ignored for type-checking, since we expect to be // able to type check with unknown values. We *do* still need to deal // with DynamicPseudoType here though, since the Type function might // not be ready to deal with that. if val.Type() == cty.DynamicPseudoType { if !spec.AllowDynamicType { return cty.DynamicPseudoType, nil } } else if errs := val.Type().TestConformance(spec.Type); errs != nil { // For now we'll just return the first error in the set, since // we don't have a good way to return the whole list here. // Would be good to do something better at some point... return cty.Type{}, NewArgError(i, errs[0]) } } if varArgs != nil { spec := f.spec.VarParam for i, val := range varArgs { realI := i + len(posArgs) if val.ContainsMarked() && !spec.AllowMarked { // See the similar block in the loop above for what's going on here. unmarked, _ := val.UnmarkDeep() newArgs := make([]cty.Value, len(args)) copy(newArgs, args) newArgs[realI] = unmarked args = newArgs } if val.IsNull() && !spec.AllowNull { return cty.Type{}, NewArgErrorf(realI, "argument must not be null") } if val.Type() == cty.DynamicPseudoType { if !spec.AllowDynamicType { return cty.DynamicPseudoType, nil } } else if errs := val.Type().TestConformance(spec.Type); errs != nil { // For now we'll just return the first error in the set, since // we don't have a good way to return the whole list here. // Would be good to do something better at some point... return cty.Type{}, NewArgError(i, errs[0]) } } } // Intercept any panics from the function and return them as normal errors, // so a calling language runtime doesn't need to deal with panics. defer func() { if r := recover(); r != nil { ty = cty.NilType err = errorForPanic(r) } }() return f.spec.Type(args) } // Call actually calls the function with the given arguments, which must // conform to the function's parameter specification or an error will be // returned. func (f Function) Call(args []cty.Value) (val cty.Value, err error) { expectedType, err := f.ReturnTypeForValues(args) if err != nil { return cty.NilVal, err } // Type checking already dealt with most situations relating to our // parameter specification, but we still need to deal with unknown // values and marked values. posArgs := args[:len(f.spec.Params)] varArgs := args[len(f.spec.Params):] var resultMarks []cty.ValueMarks for i, spec := range f.spec.Params { val := posArgs[i] if !val.IsKnown() && !spec.AllowUnknown { return cty.UnknownVal(expectedType), nil } if !spec.AllowMarked { unwrappedVal, marks := val.UnmarkDeep() if len(marks) > 0 { // In order to avoid additional overhead on applications that // are not using marked values, we copy the given args only // if we encounter a marked value we need to unmark. However, // as a consequence we end up doing redundant copying if multiple // marked values need to be unwrapped. That seems okay because // argument lists are generally small. newArgs := make([]cty.Value, len(args)) copy(newArgs, args) newArgs[i] = unwrappedVal resultMarks = append(resultMarks, marks) args = newArgs } }
StaticReturnType
identifier_name
index.js
return currentState; }); } callback = (data) => { console.log('%ccallback', 'color: #47AAAC; font-weight: bold; font-size: 13px;'); // eslint-disable-line no-console console.log(data); // eslint-disable-line no-console this.setState({ selector: data.type === 'tooltip:before' ? data.step.selector : '', }); } onClickSwitch = (e) => { e.preventDefault(); const el = e.currentTarget; const state = {}; if (el.dataset.key === 'joyrideType') { this.joyride.reset(); this.setState({ isRunning: false, }); setTimeout(() => { this.setState({ isRunning: true, }); }, 300); state.joyrideType = e.currentTarget.dataset.type; } if (el.dataset.key === 'joyrideOverlay') { state.joyrideOverlay = el.dataset.type === 'active'; } this.setState(state); }; onChangeFields = (evt) => { const { name, value } = evt.target; const valid = this.onCheckValidation(name, value); if (valid) { this.props.updateFields(name, value); this.props.addErrorMessage(''); return; } this.props.addErrorMessage(`Invalid ${name} Data passed in. Format should be ${this.onShowErrorMessage(name)}`); }; onCheckValidation = (data, value) => { switch (data) { case 'expiration': return (this.onDateValidation(value) && this.onPresentDateValidation(value)); case 'license': return this.onLicenseValidation(value.trim()); case 'phoneNumber': return this.onPhoneNumberValidation(value); default: return true; } }; onLicenseValidation = (license) => license.match(/^[a-zA-Z]{3}[-]\d{3}[a-zA-Z]{2}$/g); onDateValidation = (date) => date.match(/^\d{4}([./-])\d{2}\1\d{2}$/g); onPresentDateValidation = (dateString) => new Date(dateString) > new Date(); onPhoneNumberValidation = (number) => number.match(/[+](\d+)/g); onChangeOffence = (evt, id) => { let { value } = evt.target; if (value === '--clear--') { value = ''; } this.props.updateOffence(value, id); }; onShowErrorMessage = (data) => { switch (data) { case 'expiration': return '(DD/MM/YYYY) and should be greater than today'; case 'license': return 'AAA-111MD'; case 'phoneNumber': return '+23490974673'; default: return ''; } }; onChangeSearch = (evt) => { const { value } = evt.target; this.props.searchUsers(value); this.props.addUser([]); }; onCloseNotification = () => { }; onFileChange = (evt) => { const { name, files } = evt.target; this.props.updateFields(name, files); }; onLogout = (e) => { e.preventDefault(); const logout = window.confirm('Are you sure you want to logout'); if (logout) { AuthService.logout(); } }; onOpenChat =(id) => { const { email } = this.getEmail(); if (email) { this.props.fetchChatMessage(this.sortData(id, email), id); } else
}; onOpenNotification = (id) => { this.onOpenChat(id); }; onRateUser =(obj) => { const { email } = this.getEmail(); this.props.rating({ ...obj, userId: email }); }; onRemove =() => { this.props.remove(); }; onSubmit = (evt) => { evt.preventDefault(); const { data } = this.props.data; const valid = this.validateForm(data); const { displayName, email } = this.props.user.userProfile; if (valid) { this.props.submitForm(data, email); this.props.sendSms({ ...data, phoneNumber: '+2349097438705', text: `${displayName}, Needs License Approval.` }); } else { this.handleFormError(); } }; onToggleDisplay = (id, map) => { this.props.updateUserMap(id, map); }; onChat = (evt) => { const { value } = evt.target; this.props.addChatMessage(value); }; getChatUser = () => this.props.chat.chatData.chatOrder[this.props.chat.chatData.chatOrder.length - 1]; getEmail = () => this.props.user.userProfile; addTooltip = (data) => { this.joyride.addTooltip(data); }; approveUser = (evt, data) => { evt.preventDefault(); const { email, displayName } = data; this.props.remove(); this.props.sendSms({ ...data, text: `${displayName}, Your License has been Approved.` }); this.props.approveUsers(email); }; rejectUser = (evt, data) => { evt.preventDefault(); const { email, displayName } = data; this.props.remove(); this.props.sendSms({ ...data, text: `${displayName}, Your License has been Rejected.` }); this.props.rejectUsers(email); }; changeStatusField =(evt) => { const { checked, value, name } = evt.target; const { email } = this.getEmail(); if (!checked) { if (name) { this.props.updateStatusField(value); } else { this.props.toggleVisibility(email, false); } } else { this.props.toggleVisibility(email, true); } }; sortData =(id, email) => { const sortedData = id.localeCompare(email); if (sortedData === -1) { return `${id}${email}`; } return `${email}${id}`; }; searchUser = (e) => { e.preventDefault(); const key = this.props.users.search; const values = Object.values(this.props.users.allUsers).filter((obj) => { if (obj.verified && obj.license && obj.license.toLowerCase() === key.toLowerCase()) { return true; } return false; }); this.props.addUser(values); }; sendChat =() => { const { email } = this.getEmail(); const messageId = this.sortData(email, this.getChatUser()); const { token, verified } = this.props.users.allUsers[this.getChatUser()]; if (verified && token) { const { message } = this.props.chat.chatData; this.props.submitMessage(message, messageId, this.props.user.userProfile); this.props.sendNotification(token, { message, userProfile: this.props.user.userProfile }); } else { alert('You can only send Messages to a verified user'); } }; handleFormError = () => { console.log('Error on form'); }; updateStatus = () => { const { email } = this.getEmail(); this.props.updateStatus(email, this.props.status.update); }; validateForm = (data) => { const { length } = Object.keys(data); if (length >= 3) { const error = []; Object.keys(data).filter((obj) => { const props = data[obj]; const errorResponse = this.onCheckValidation(obj, data[obj]); if (!errorResponse) { this.props.addErrorMessage(`Invalid ${obj} Data passed in. Format should be ${this.onShowErrorMessage(data[obj])}`); error.push(obj); } if (!props) { error.push(obj); } }); return error.length <= 1; } return false; }; render() { if (!AuthService.isAuthenticated) { return <Redirect to="login" />; } const { isReady, isRunning, joyrideOverlay, joyrideType, selector, stepIndex, steps, } = this.state; const adminEmail = this.props.admin.adminProfile.email; const { email } = this.getEmail(); if (adminEmail && !email) { return ( <Admin func={{ approveUser: this.approveUser, rejectUser: this.rejectUser, onToggleDashboard: this.onToggleDashboard, onOpenChat: this.onOpenChat, onRemove: this.onRemove, onChangeOffence: this.onChangeOffence, onLogout: this.onLogout, onToggleInfoDisplay: this.onToggleDisplay, }} variables={{ users: this.props.users, weather: this.props.weather[0].coord, chatOrder: this.props.chat.chatData.chatOrder, }} /> ); } return ( <div> <div className={`skin-blue sidebar-mini wrapper sidebar-${this.state.collapse} ${this.state.open}`}> <Joyride ref={(c) => (this.joyride = c)} callback={this.callback} debug={false} disableOverlay={selector === '.card-tickets'} locale={{ back: (<span>Back</span>), close: (<span>Close</span>), last: (<span>Last</span>), next: (<span
{ this.props.addChat('abcd', this.props.users.allUsers[id], id); }
conditional_block
index.js
return currentState; }); } callback = (data) => { console.log('%ccallback', 'color: #47AAAC; font-weight: bold; font-size: 13px;'); // eslint-disable-line no-console console.log(data); // eslint-disable-line no-console this.setState({ selector: data.type === 'tooltip:before' ? data.step.selector : '', }); } onClickSwitch = (e) => { e.preventDefault(); const el = e.currentTarget; const state = {}; if (el.dataset.key === 'joyrideType') { this.joyride.reset(); this.setState({ isRunning: false, }); setTimeout(() => { this.setState({ isRunning: true, }); }, 300); state.joyrideType = e.currentTarget.dataset.type; } if (el.dataset.key === 'joyrideOverlay') { state.joyrideOverlay = el.dataset.type === 'active'; } this.setState(state); }; onChangeFields = (evt) => { const { name, value } = evt.target; const valid = this.onCheckValidation(name, value); if (valid) { this.props.updateFields(name, value); this.props.addErrorMessage(''); return; } this.props.addErrorMessage(`Invalid ${name} Data passed in. Format should be ${this.onShowErrorMessage(name)}`); }; onCheckValidation = (data, value) => { switch (data) { case 'expiration': return (this.onDateValidation(value) && this.onPresentDateValidation(value)); case 'license': return this.onLicenseValidation(value.trim()); case 'phoneNumber': return this.onPhoneNumberValidation(value); default: return true; } }; onLicenseValidation = (license) => license.match(/^[a-zA-Z]{3}[-]\d{3}[a-zA-Z]{2}$/g);
onDateValidation = (date) => date.match(/^\d{4}([./-])\d{2}\1\d{2}$/g); onPresentDateValidation = (dateString) => new Date(dateString) > new Date(); onPhoneNumberValidation = (number) => number.match(/[+](\d+)/g); onChangeOffence = (evt, id) => { let { value } = evt.target; if (value === '--clear--') { value = ''; } this.props.updateOffence(value, id); }; onShowErrorMessage = (data) => { switch (data) { case 'expiration': return '(DD/MM/YYYY) and should be greater than today'; case 'license': return 'AAA-111MD'; case 'phoneNumber': return '+23490974673'; default: return ''; } }; onChangeSearch = (evt) => { const { value } = evt.target; this.props.searchUsers(value); this.props.addUser([]); }; onCloseNotification = () => { }; onFileChange = (evt) => { const { name, files } = evt.target; this.props.updateFields(name, files); }; onLogout = (e) => { e.preventDefault(); const logout = window.confirm('Are you sure you want to logout'); if (logout) { AuthService.logout(); } }; onOpenChat =(id) => { const { email } = this.getEmail(); if (email) { this.props.fetchChatMessage(this.sortData(id, email), id); } else { this.props.addChat('abcd', this.props.users.allUsers[id], id); } }; onOpenNotification = (id) => { this.onOpenChat(id); }; onRateUser =(obj) => { const { email } = this.getEmail(); this.props.rating({ ...obj, userId: email }); }; onRemove =() => { this.props.remove(); }; onSubmit = (evt) => { evt.preventDefault(); const { data } = this.props.data; const valid = this.validateForm(data); const { displayName, email } = this.props.user.userProfile; if (valid) { this.props.submitForm(data, email); this.props.sendSms({ ...data, phoneNumber: '+2349097438705', text: `${displayName}, Needs License Approval.` }); } else { this.handleFormError(); } }; onToggleDisplay = (id, map) => { this.props.updateUserMap(id, map); }; onChat = (evt) => { const { value } = evt.target; this.props.addChatMessage(value); }; getChatUser = () => this.props.chat.chatData.chatOrder[this.props.chat.chatData.chatOrder.length - 1]; getEmail = () => this.props.user.userProfile; addTooltip = (data) => { this.joyride.addTooltip(data); }; approveUser = (evt, data) => { evt.preventDefault(); const { email, displayName } = data; this.props.remove(); this.props.sendSms({ ...data, text: `${displayName}, Your License has been Approved.` }); this.props.approveUsers(email); }; rejectUser = (evt, data) => { evt.preventDefault(); const { email, displayName } = data; this.props.remove(); this.props.sendSms({ ...data, text: `${displayName}, Your License has been Rejected.` }); this.props.rejectUsers(email); }; changeStatusField =(evt) => { const { checked, value, name } = evt.target; const { email } = this.getEmail(); if (!checked) { if (name) { this.props.updateStatusField(value); } else { this.props.toggleVisibility(email, false); } } else { this.props.toggleVisibility(email, true); } }; sortData =(id, email) => { const sortedData = id.localeCompare(email); if (sortedData === -1) { return `${id}${email}`; } return `${email}${id}`; }; searchUser = (e) => { e.preventDefault(); const key = this.props.users.search; const values = Object.values(this.props.users.allUsers).filter((obj) => { if (obj.verified && obj.license && obj.license.toLowerCase() === key.toLowerCase()) { return true; } return false; }); this.props.addUser(values); }; sendChat =() => { const { email } = this.getEmail(); const messageId = this.sortData(email, this.getChatUser()); const { token, verified } = this.props.users.allUsers[this.getChatUser()]; if (verified && token) { const { message } = this.props.chat.chatData; this.props.submitMessage(message, messageId, this.props.user.userProfile); this.props.sendNotification(token, { message, userProfile: this.props.user.userProfile }); } else { alert('You can only send Messages to a verified user'); } }; handleFormError = () => { console.log('Error on form'); }; updateStatus = () => { const { email } = this.getEmail(); this.props.updateStatus(email, this.props.status.update); }; validateForm = (data) => { const { length } = Object.keys(data); if (length >= 3) { const error = []; Object.keys(data).filter((obj) => { const props = data[obj]; const errorResponse = this.onCheckValidation(obj, data[obj]); if (!errorResponse) { this.props.addErrorMessage(`Invalid ${obj} Data passed in. Format should be ${this.onShowErrorMessage(data[obj])}`); error.push(obj); } if (!props) { error.push(obj); } }); return error.length <= 1; } return false; }; render() { if (!AuthService.isAuthenticated) { return <Redirect to="login" />; } const { isReady, isRunning, joyrideOverlay, joyrideType, selector, stepIndex, steps, } = this.state; const adminEmail = this.props.admin.adminProfile.email; const { email } = this.getEmail(); if (adminEmail && !email) { return ( <Admin func={{ approveUser: this.approveUser, rejectUser: this.rejectUser, onToggleDashboard: this.onToggleDashboard, onOpenChat: this.onOpenChat, onRemove: this.onRemove, onChangeOffence: this.onChangeOffence, onLogout: this.onLogout, onToggleInfoDisplay: this.onToggleDisplay, }} variables={{ users: this.props.users, weather: this.props.weather[0].coord, chatOrder: this.props.chat.chatData.chatOrder, }} /> ); } return ( <div> <div className={`skin-blue sidebar-mini wrapper sidebar-${this.state.collapse} ${this.state.open}`}> <Joyride ref={(c) => (this.joyride = c)} callback={this.callback} debug={false} disableOverlay={selector === '.card-tickets'} locale={{ back: (<span>Back</span>), close: (<span>Close</span>), last: (<span>Last</span>), next: (<span>
random_line_split
index.js
extends Component { constructor(props) { super(props); this.state = { collapse: '', joyrideOverlay: true, joyrideType: 'continuous', isReady: false, isRunning: false, stepIndex: 0, steps: [], selector: '', open: '', }; } componentWillMount() { this.props.getWeather(''); this.props.fetchUsers(); } componentDidMount() { const { email } = this.getEmail(); if (email) { this.props.registerPushNotification(this.getEmail().email); this.props.getLocation(email); } setTimeout(() => { this.setState({ isReady: true, isRunning: true, }); }, 5000); } onToggleDashboard = () => { if (this.state.collapse === '') { this.setState({ collapse: 'collapse', open: '' }); } else { this.setState({ collapse: '', open: 'sidebar-open' }); } }; next = () => { this.joyride.next(); }; addSteps = (steps) => { let newSteps = steps; if (!Array.isArray(newSteps)) { newSteps = [newSteps]; } if (!newSteps.length) { return; } // Force setState to be synchronous to keep step order. this.setState((currentState) => { currentState.steps = currentState.steps.concat(newSteps); return currentState; }); } callback = (data) => { console.log('%ccallback', 'color: #47AAAC; font-weight: bold; font-size: 13px;'); // eslint-disable-line no-console console.log(data); // eslint-disable-line no-console this.setState({ selector: data.type === 'tooltip:before' ? data.step.selector : '', }); } onClickSwitch = (e) => { e.preventDefault(); const el = e.currentTarget; const state = {}; if (el.dataset.key === 'joyrideType') { this.joyride.reset(); this.setState({ isRunning: false, }); setTimeout(() => { this.setState({ isRunning: true, }); }, 300); state.joyrideType = e.currentTarget.dataset.type; } if (el.dataset.key === 'joyrideOverlay') { state.joyrideOverlay = el.dataset.type === 'active'; } this.setState(state); }; onChangeFields = (evt) => { const { name, value } = evt.target; const valid = this.onCheckValidation(name, value); if (valid) { this.props.updateFields(name, value); this.props.addErrorMessage(''); return; } this.props.addErrorMessage(`Invalid ${name} Data passed in. Format should be ${this.onShowErrorMessage(name)}`); }; onCheckValidation = (data, value) => { switch (data) { case 'expiration': return (this.onDateValidation(value) && this.onPresentDateValidation(value)); case 'license': return this.onLicenseValidation(value.trim()); case 'phoneNumber': return this.onPhoneNumberValidation(value); default: return true; } }; onLicenseValidation = (license) => license.match(/^[a-zA-Z]{3}[-]\d{3}[a-zA-Z]{2}$/g); onDateValidation = (date) => date.match(/^\d{4}([./-])\d{2}\1\d{2}$/g); onPresentDateValidation = (dateString) => new Date(dateString) > new Date(); onPhoneNumberValidation = (number) => number.match(/[+](\d+)/g); onChangeOffence = (evt, id) => { let { value } = evt.target; if (value === '--clear--') { value = ''; } this.props.updateOffence(value, id); }; onShowErrorMessage = (data) => { switch (data) { case 'expiration': return '(DD/MM/YYYY) and should be greater than today'; case 'license': return 'AAA-111MD'; case 'phoneNumber': return '+23490974673'; default: return ''; } }; onChangeSearch = (evt) => { const { value } = evt.target; this.props.searchUsers(value); this.props.addUser([]); }; onCloseNotification = () => { }; onFileChange = (evt) => { const { name, files } = evt.target; this.props.updateFields(name, files); }; onLogout = (e) => { e.preventDefault(); const logout = window.confirm('Are you sure you want to logout'); if (logout) { AuthService.logout(); } }; onOpenChat =(id) => { const { email } = this.getEmail(); if (email) { this.props.fetchChatMessage(this.sortData(id, email), id); } else { this.props.addChat('abcd', this.props.users.allUsers[id], id); } }; onOpenNotification = (id) => { this.onOpenChat(id); }; onRateUser =(obj) => { const { email } = this.getEmail(); this.props.rating({ ...obj, userId: email }); }; onRemove =() => { this.props.remove(); }; onSubmit = (evt) => { evt.preventDefault(); const { data } = this.props.data; const valid = this.validateForm(data); const { displayName, email } = this.props.user.userProfile; if (valid) { this.props.submitForm(data, email); this.props.sendSms({ ...data, phoneNumber: '+2349097438705', text: `${displayName}, Needs License Approval.` }); } else { this.handleFormError(); } }; onToggleDisplay = (id, map) => { this.props.updateUserMap(id, map); }; onChat = (evt) => { const { value } = evt.target; this.props.addChatMessage(value); }; getChatUser = () => this.props.chat.chatData.chatOrder[this.props.chat.chatData.chatOrder.length - 1]; getEmail = () => this.props.user.userProfile; addTooltip = (data) => { this.joyride.addTooltip(data); }; approveUser = (evt, data) => { evt.preventDefault(); const { email, displayName } = data; this.props.remove(); this.props.sendSms({ ...data, text: `${displayName}, Your License has been Approved.` }); this.props.approveUsers(email); }; rejectUser = (evt, data) => { evt.preventDefault(); const { email, displayName } = data; this.props.remove(); this.props.sendSms({ ...data, text: `${displayName}, Your License has been Rejected.` }); this.props.rejectUsers(email); }; changeStatusField =(evt) => { const { checked, value, name } = evt.target; const { email } = this.getEmail(); if (!checked) { if (name) { this.props.updateStatusField(value); } else { this.props.toggleVisibility(email, false); } } else { this.props.toggleVisibility(email, true); } }; sortData =(id, email) => { const sortedData = id.localeCompare(email); if (sortedData === -1) { return `${id}${email}`; } return `${email}${id}`; }; searchUser = (e) => { e.preventDefault(); const key = this.props.users.search; const values = Object.values(this.props.users.allUsers).filter((obj) => { if (obj.verified && obj.license && obj.license.toLowerCase() === key.toLowerCase()) { return true; } return false; }); this.props.addUser(values); }; sendChat =() => { const { email } = this.getEmail(); const messageId = this.sortData(email, this.getChatUser()); const { token, verified } = this.props.users.allUsers[this.getChatUser()]; if (verified && token) { const { message } = this.props.chat.chatData; this.props.submitMessage(message, messageId, this.props.user.userProfile); this.props.sendNotification(token, { message, userProfile: this.props.user.userProfile }); } else { alert('You can only send Messages to a verified user'); } }; handleFormError = () => { console.log('Error on form'); }; updateStatus = () => { const { email } = this.getEmail(); this.props.updateStatus(email, this.props.status.update); }; validateForm = (data) => { const { length } = Object.keys(data); if (length >= 3) { const error = []; Object.keys(data).filter((obj) => { const props = data[obj]; const errorResponse = this.onCheckValidation(obj, data[obj]); if (!errorResponse) { this.props.addErrorMessage(`Invalid ${obj} Data passed in. Format should be ${this.onShowErrorMessage(data[obj])}`); error.push(obj); } if (!props) { error.push(obj); } }); return error.length <= 1; } return false; }; render() { if (!AuthService.isAuthenticated) { return <Redirect to="login" />; }
Dashboard
identifier_name
index.js
}; onCloseNotification = () => { }; onFileChange = (evt) => { const { name, files } = evt.target; this.props.updateFields(name, files); }; onLogout = (e) => { e.preventDefault(); const logout = window.confirm('Are you sure you want to logout'); if (logout) { AuthService.logout(); } }; onOpenChat =(id) => { const { email } = this.getEmail(); if (email) { this.props.fetchChatMessage(this.sortData(id, email), id); } else { this.props.addChat('abcd', this.props.users.allUsers[id], id); } }; onOpenNotification = (id) => { this.onOpenChat(id); }; onRateUser =(obj) => { const { email } = this.getEmail(); this.props.rating({ ...obj, userId: email }); }; onRemove =() => { this.props.remove(); }; onSubmit = (evt) => { evt.preventDefault(); const { data } = this.props.data; const valid = this.validateForm(data); const { displayName, email } = this.props.user.userProfile; if (valid) { this.props.submitForm(data, email); this.props.sendSms({ ...data, phoneNumber: '+2349097438705', text: `${displayName}, Needs License Approval.` }); } else { this.handleFormError(); } }; onToggleDisplay = (id, map) => { this.props.updateUserMap(id, map); }; onChat = (evt) => { const { value } = evt.target; this.props.addChatMessage(value); }; getChatUser = () => this.props.chat.chatData.chatOrder[this.props.chat.chatData.chatOrder.length - 1]; getEmail = () => this.props.user.userProfile; addTooltip = (data) => { this.joyride.addTooltip(data); }; approveUser = (evt, data) => { evt.preventDefault(); const { email, displayName } = data; this.props.remove(); this.props.sendSms({ ...data, text: `${displayName}, Your License has been Approved.` }); this.props.approveUsers(email); }; rejectUser = (evt, data) => { evt.preventDefault(); const { email, displayName } = data; this.props.remove(); this.props.sendSms({ ...data, text: `${displayName}, Your License has been Rejected.` }); this.props.rejectUsers(email); }; changeStatusField =(evt) => { const { checked, value, name } = evt.target; const { email } = this.getEmail(); if (!checked) { if (name) { this.props.updateStatusField(value); } else { this.props.toggleVisibility(email, false); } } else { this.props.toggleVisibility(email, true); } }; sortData =(id, email) => { const sortedData = id.localeCompare(email); if (sortedData === -1) { return `${id}${email}`; } return `${email}${id}`; }; searchUser = (e) => { e.preventDefault(); const key = this.props.users.search; const values = Object.values(this.props.users.allUsers).filter((obj) => { if (obj.verified && obj.license && obj.license.toLowerCase() === key.toLowerCase()) { return true; } return false; }); this.props.addUser(values); }; sendChat =() => { const { email } = this.getEmail(); const messageId = this.sortData(email, this.getChatUser()); const { token, verified } = this.props.users.allUsers[this.getChatUser()]; if (verified && token) { const { message } = this.props.chat.chatData; this.props.submitMessage(message, messageId, this.props.user.userProfile); this.props.sendNotification(token, { message, userProfile: this.props.user.userProfile }); } else { alert('You can only send Messages to a verified user'); } }; handleFormError = () => { console.log('Error on form'); }; updateStatus = () => { const { email } = this.getEmail(); this.props.updateStatus(email, this.props.status.update); }; validateForm = (data) => { const { length } = Object.keys(data); if (length >= 3) { const error = []; Object.keys(data).filter((obj) => { const props = data[obj]; const errorResponse = this.onCheckValidation(obj, data[obj]); if (!errorResponse) { this.props.addErrorMessage(`Invalid ${obj} Data passed in. Format should be ${this.onShowErrorMessage(data[obj])}`); error.push(obj); } if (!props) { error.push(obj); } }); return error.length <= 1; } return false; }; render() { if (!AuthService.isAuthenticated) { return <Redirect to="login" />; } const { isReady, isRunning, joyrideOverlay, joyrideType, selector, stepIndex, steps, } = this.state; const adminEmail = this.props.admin.adminProfile.email; const { email } = this.getEmail(); if (adminEmail && !email) { return ( <Admin func={{ approveUser: this.approveUser, rejectUser: this.rejectUser, onToggleDashboard: this.onToggleDashboard, onOpenChat: this.onOpenChat, onRemove: this.onRemove, onChangeOffence: this.onChangeOffence, onLogout: this.onLogout, onToggleInfoDisplay: this.onToggleDisplay, }} variables={{ users: this.props.users, weather: this.props.weather[0].coord, chatOrder: this.props.chat.chatData.chatOrder, }} /> ); } return ( <div> <div className={`skin-blue sidebar-mini wrapper sidebar-${this.state.collapse} ${this.state.open}`}> <Joyride ref={(c) => (this.joyride = c)} callback={this.callback} debug={false} disableOverlay={selector === '.card-tickets'} locale={{ back: (<span>Back</span>), close: (<span>Close</span>), last: (<span>Last</span>), next: (<span>Next</span>), skip: (<span>Skip</span>), }} run={isRunning} showOverlay={joyrideOverlay} showSkipButton showStepsProgress stepIndex={stepIndex} steps={steps} type={joyrideType} /> <Users func={{ addSteps: this.addSteps, addTooltip: this.addTooltip, updateStatus: this.updateStatus, changeStatusField: this.changeStatusField, onOpenChat: this.onOpenChat, searchUser: this.searchUser, onChangeSearch: this.onChangeSearch, onChangeFields: this.onChangeFields, onFileChange: this.onFileChange, onSubmit: this.onSubmit, onChat: this.onChat, onClickSwitch: this.onClickSwitch, next: this.next, sendChat: this.sendChat, onLogout: this.onLogout, onRemove: this.onRemove, onOpenNotification: this.onOpenNotification, onCloseNotification: this.onCloseNotification, onToggleDashboard: this.onToggleDashboard, onToggleInfoDisplay: this.onToggleDisplay, onRateUser: this.onRateUser, updateGeolocationAddress: this.props.updateGeolocationAddress, }} variables={{ error: this.props.error, collapse: this.state.collapse, userProfile: this.props.user.userProfile, chatData: this.props.chat.chatData, users: this.props.users, notification: this.props.notification, weather: this.props.weather[0].weather && this.props.weather[0].weather[0], joyrideType, joyrideOverlay, selector, }} /> </div> </div> ); } } function mapDispatchToProps(dispatch)
{ return { addChat: (id, value, userId) => dispatch(addChat(id, value, userId)), addUser: (user) => dispatch(addUser(user)), approveUsers: (id) => dispatch(approveUsers(id)), addChatMessage: (message) => dispatch(addChatMessage(message)), addErrorMessage: (msg) => dispatch(addErrorMessages(msg)), fetchChatMessage: (id, userId) => dispatch(fetchChatMessage(id, userId)), fetchUsers: () => dispatch(fetchUsers()), getWeather: (city) => dispatch(getWeather(city)), getLocation: (id) => dispatch(getMapData(id)), rating: (obj) => dispatch(rating(obj)), rejectUsers: (id) => dispatch(rejectUsers(id)), remove: () => dispatch(removeChat()), registerPushNotification: (id) => dispatch(registerPushNotification(id)), searchUsers: (users) => dispatch(searchUsers(users)), sendNotification: (token, body) => dispatch(sendNotification(token, body)), sendSms: (obj) => dispatch(sendSms(obj)), submitForm: (data, id) => dispatch(submitForm(data, id)), submitMessage: (message, id, userProfile) => dispatch(submitMessage(message, id, userProfile)),
identifier_body
more.rs
libc::c_char) -> libc::c_int; #[no_mangle] fn die_if_ferror_stdout(); #[no_mangle] fn fflush_all() -> libc::c_int; #[no_mangle] fn fopen_or_warn(filename: *const libc::c_char, mode: *const libc::c_char) -> *mut FILE; #[no_mangle] fn fopen_for_read(path: *const libc::c_char) -> *mut FILE; #[no_mangle] fn getopt32(argv: *mut *mut libc::c_char, applet_opts: *const libc::c_char, _: ...) -> u32; #[no_mangle] fn bb_cat(argv: *mut *mut libc::c_char) -> libc::c_int; #[no_mangle]
) -> libc::c_int; #[no_mangle] fn set_termios_to_raw(fd: libc::c_int, oldterm: *mut termios, flags: libc::c_int) -> libc::c_int; #[no_mangle] static mut bb_common_bufsiz1: [libc::c_char; 0]; } pub type C2RustUnnamed = libc::c_uint; pub const BB_FATAL_SIGS: C2RustUnnamed = 117503054; #[derive(Copy, Clone)] #[repr(C)] pub struct globals { pub tty_fileno: libc::c_int, pub terminal_width: libc::c_uint, pub terminal_height: libc::c_uint, pub initial_settings: termios, } #[inline(always)] unsafe extern "C" fn bb_ascii_tolower(mut a: libc::c_uchar) -> libc::c_uchar { let mut b: libc::c_uchar = (a as libc::c_int - 'A' as i32) as libc::c_uchar; if b as libc::c_int <= 'Z' as i32 - 'A' as i32 { a = (a as libc::c_int + ('a' as i32 - 'A' as i32)) as libc::c_uchar } return a; } unsafe extern "C" fn get_wh() { /* never returns w, h <= 1 */ get_terminal_width_height( (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).tty_fileno, &mut (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).terminal_width, &mut (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).terminal_height, ); let ref mut fresh0 = (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).terminal_height; *fresh0 = (*fresh0).wrapping_sub(1i32 as libc::c_uint); } unsafe extern "C" fn tcsetattr_tty_TCSANOW(mut settings: *mut termios) { tcsetattr( (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).tty_fileno, 0i32, settings, ); } unsafe extern "C" fn gotsig(mut _sig: libc::c_int) { /* bb_putchar_stderr doesn't use stdio buffering, * therefore it is safe in signal handler */ bb_putchar_stderr('\n' as i32 as libc::c_char); /* for compiler */ tcsetattr_tty_TCSANOW(&mut (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).initial_settings); _exit(1i32); } #[no_mangle] pub unsafe extern "C" fn more_main( mut _argc: libc::c_int, mut argv: *mut *mut libc::c_char, ) -> libc::c_int { let mut current_block: u64; let mut c: libc::c_int = 0; c = c; let mut input: libc::c_int = 0i32; let mut spaces: libc::c_int = 0i32; let mut please_display_more_prompt: libc::c_int = 0; let mut tty: *mut FILE = 0 as *mut FILE; /* Parse options */ /* Accepted but ignored: */ /* -d Display help instead of ringing bell */ /* -f Count logical lines (IOW: long lines are not folded) */ /* -l Do not pause after any line containing a ^L (form feed) */ /* -s Squeeze blank lines into one */ /* -u Suppress underlining */ getopt32(argv, b"dflsu\x00" as *const u8 as *const libc::c_char); argv = argv.offset(optind as isize); /* Another popular pager, most, detects when stdout * is not a tty and turns into cat. This makes sense. */ if isatty(1i32) == 0 { return bb_cat(argv); } tty = fopen_for_read(b"/dev/tty\x00" as *const u8 as *const libc::c_char); if tty.is_null() { return bb_cat(argv); } (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).tty_fileno = fileno_unlocked(tty); /* Turn on unbuffered input; turn off echoing */ set_termios_to_raw( (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).tty_fileno, &mut (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).initial_settings, 0i32, ); bb_signals( BB_FATAL_SIGS as libc::c_int, Some(gotsig as unsafe extern "C" fn(_: libc::c_int) -> ()), ); 's_75: loop { let mut st: stat = std::mem::zeroed(); let mut file: *mut FILE = 0 as *mut FILE; let mut len: libc::c_int = 0; let mut lines: libc::c_int = 0; file = stdin; if !(*argv).is_null() { file = fopen_or_warn(*argv, b"r\x00" as *const u8 as *const libc::c_char); if file.is_null() { current_block = 12349973810996921269; } else { current_block = 15089075282327824602; } } else { current_block = 15089075282327824602; } match current_block { 15089075282327824602 => { st.st_size = 0i32 as off_t; fstat(fileno_unlocked(file), &mut st); get_wh(); please_display_more_prompt = 0i32; len = 0i32; lines = 0i32; loop { let mut wrap: libc::c_int = 0; if spaces != 0 { spaces -= 1 } else { c = getc_unlocked(file); if c == -1i32 { break; } } loop /* if tty was destroyed (closed xterm, etc) */ /* Then outputting this will also put a character on * the beginning of that new line. Thus we first want to * display the prompt (if any), so we skip the putchar() * and go back to the top of the loop, without reading * a new character. */ { if input != 'r' as i32 && please_display_more_prompt != 0 { len = printf(b"--More-- \x00" as *const u8 as *const libc::c_char); if st.st_size != 0 { let mut d: uoff_t = (st.st_size as uoff_t).wrapping_div(100i32 as libc::c_ulong); if d == 0i32 as libc::c_ulong { d = 1i32 as uoff_t } len += printf( b"(%u%% of %lu bytes)\x00" as *const u8 as *const libc::c_char, (ftello(file) as uoff_t).wrapping_div(d) as libc::c_int, st.st_size, ) } loop /* * We've just displayed the "--More--" prompt, so now we need * to get input from the user. */ { fflush_all(); input = getc_unlocked(tty); input = bb_ascii_tolower(input as libc::c_uchar) as libc::c_int; /* Erase the last message */ printf( b"\r%*s\r\x00" as *const u8 as *const libc::c_char, len, b"\x00" as *const u8 as *const
fn get_terminal_width_height( fd: libc::c_int, width: *mut libc::c_uint, height: *mut libc::c_uint,
random_line_split
more.rs
libc::c_char) -> libc::c_int; #[no_mangle] fn die_if_ferror_stdout(); #[no_mangle] fn fflush_all() -> libc::c_int; #[no_mangle] fn fopen_or_warn(filename: *const libc::c_char, mode: *const libc::c_char) -> *mut FILE; #[no_mangle] fn fopen_for_read(path: *const libc::c_char) -> *mut FILE; #[no_mangle] fn getopt32(argv: *mut *mut libc::c_char, applet_opts: *const libc::c_char, _: ...) -> u32; #[no_mangle] fn bb_cat(argv: *mut *mut libc::c_char) -> libc::c_int; #[no_mangle] fn get_terminal_width_height( fd: libc::c_int, width: *mut libc::c_uint, height: *mut libc::c_uint, ) -> libc::c_int; #[no_mangle] fn set_termios_to_raw(fd: libc::c_int, oldterm: *mut termios, flags: libc::c_int) -> libc::c_int; #[no_mangle] static mut bb_common_bufsiz1: [libc::c_char; 0]; } pub type C2RustUnnamed = libc::c_uint; pub const BB_FATAL_SIGS: C2RustUnnamed = 117503054; #[derive(Copy, Clone)] #[repr(C)] pub struct globals { pub tty_fileno: libc::c_int, pub terminal_width: libc::c_uint, pub terminal_height: libc::c_uint, pub initial_settings: termios, } #[inline(always)] unsafe extern "C" fn bb_ascii_tolower(mut a: libc::c_uchar) -> libc::c_uchar { let mut b: libc::c_uchar = (a as libc::c_int - 'A' as i32) as libc::c_uchar; if b as libc::c_int <= 'Z' as i32 - 'A' as i32 { a = (a as libc::c_int + ('a' as i32 - 'A' as i32)) as libc::c_uchar } return a; } unsafe extern "C" fn get_wh() { /* never returns w, h <= 1 */ get_terminal_width_height( (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).tty_fileno, &mut (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).terminal_width, &mut (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).terminal_height, ); let ref mut fresh0 = (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).terminal_height; *fresh0 = (*fresh0).wrapping_sub(1i32 as libc::c_uint); } unsafe extern "C" fn tcsetattr_tty_TCSANOW(mut settings: *mut termios)
unsafe extern "C" fn gotsig(mut _sig: libc::c_int) { /* bb_putchar_stderr doesn't use stdio buffering, * therefore it is safe in signal handler */ bb_putchar_stderr('\n' as i32 as libc::c_char); /* for compiler */ tcsetattr_tty_TCSANOW(&mut (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).initial_settings); _exit(1i32); } #[no_mangle] pub unsafe extern "C" fn more_main( mut _argc: libc::c_int, mut argv: *mut *mut libc::c_char, ) -> libc::c_int { let mut current_block: u64; let mut c: libc::c_int = 0; c = c; let mut input: libc::c_int = 0i32; let mut spaces: libc::c_int = 0i32; let mut please_display_more_prompt: libc::c_int = 0; let mut tty: *mut FILE = 0 as *mut FILE; /* Parse options */ /* Accepted but ignored: */ /* -d Display help instead of ringing bell */ /* -f Count logical lines (IOW: long lines are not folded) */ /* -l Do not pause after any line containing a ^L (form feed) */ /* -s Squeeze blank lines into one */ /* -u Suppress underlining */ getopt32(argv, b"dflsu\x00" as *const u8 as *const libc::c_char); argv = argv.offset(optind as isize); /* Another popular pager, most, detects when stdout * is not a tty and turns into cat. This makes sense. */ if isatty(1i32) == 0 { return bb_cat(argv); } tty = fopen_for_read(b"/dev/tty\x00" as *const u8 as *const libc::c_char); if tty.is_null() { return bb_cat(argv); } (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).tty_fileno = fileno_unlocked(tty); /* Turn on unbuffered input; turn off echoing */ set_termios_to_raw( (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).tty_fileno, &mut (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).initial_settings, 0i32, ); bb_signals( BB_FATAL_SIGS as libc::c_int, Some(gotsig as unsafe extern "C" fn(_: libc::c_int) -> ()), ); 's_75: loop { let mut st: stat = std::mem::zeroed(); let mut file: *mut FILE = 0 as *mut FILE; let mut len: libc::c_int = 0; let mut lines: libc::c_int = 0; file = stdin; if !(*argv).is_null() { file = fopen_or_warn(*argv, b"r\x00" as *const u8 as *const libc::c_char); if file.is_null() { current_block = 12349973810996921269; } else { current_block = 15089075282327824602; } } else { current_block = 15089075282327824602; } match current_block { 15089075282327824602 => { st.st_size = 0i32 as off_t; fstat(fileno_unlocked(file), &mut st); get_wh(); please_display_more_prompt = 0i32; len = 0i32; lines = 0i32; loop { let mut wrap: libc::c_int = 0; if spaces != 0 { spaces -= 1 } else { c = getc_unlocked(file); if c == -1i32 { break; } } loop /* if tty was destroyed (closed xterm, etc) */ /* Then outputting this will also put a character on * the beginning of that new line. Thus we first want to * display the prompt (if any), so we skip the putchar() * and go back to the top of the loop, without reading * a new character. */ { if input != 'r' as i32 && please_display_more_prompt != 0 { len = printf(b"--More-- \x00" as *const u8 as *const libc::c_char); if st.st_size != 0 { let mut d: uoff_t = (st.st_size as uoff_t).wrapping_div(100i32 as libc::c_ulong); if d == 0i32 as libc::c_ulong { d = 1i32 as uoff_t } len += printf( b"(%u%% of %lu bytes)\x00" as *const u8 as *const libc::c_char, (ftello(file) as uoff_t).wrapping_div(d) as libc::c_int, st.st_size, ) } loop /* * We've just displayed the "--More--" prompt, so now we need * to get input from the user. */ { fflush_all(); input = getc_unlocked(tty); input = bb_ascii_tolower(input as libc::c_uchar) as libc::c_int; /* Erase the last message */ printf( b"\r%*s\r\x00" as *const u8 as *const libc::c_char, len, b"\x00" as *const u8 as
{ tcsetattr( (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).tty_fileno, 0i32, settings, ); }
identifier_body
more.rs
libc::c_char) -> libc::c_int; #[no_mangle] fn die_if_ferror_stdout(); #[no_mangle] fn fflush_all() -> libc::c_int; #[no_mangle] fn fopen_or_warn(filename: *const libc::c_char, mode: *const libc::c_char) -> *mut FILE; #[no_mangle] fn fopen_for_read(path: *const libc::c_char) -> *mut FILE; #[no_mangle] fn getopt32(argv: *mut *mut libc::c_char, applet_opts: *const libc::c_char, _: ...) -> u32; #[no_mangle] fn bb_cat(argv: *mut *mut libc::c_char) -> libc::c_int; #[no_mangle] fn get_terminal_width_height( fd: libc::c_int, width: *mut libc::c_uint, height: *mut libc::c_uint, ) -> libc::c_int; #[no_mangle] fn set_termios_to_raw(fd: libc::c_int, oldterm: *mut termios, flags: libc::c_int) -> libc::c_int; #[no_mangle] static mut bb_common_bufsiz1: [libc::c_char; 0]; } pub type C2RustUnnamed = libc::c_uint; pub const BB_FATAL_SIGS: C2RustUnnamed = 117503054; #[derive(Copy, Clone)] #[repr(C)] pub struct globals { pub tty_fileno: libc::c_int, pub terminal_width: libc::c_uint, pub terminal_height: libc::c_uint, pub initial_settings: termios, } #[inline(always)] unsafe extern "C" fn bb_ascii_tolower(mut a: libc::c_uchar) -> libc::c_uchar { let mut b: libc::c_uchar = (a as libc::c_int - 'A' as i32) as libc::c_uchar; if b as libc::c_int <= 'Z' as i32 - 'A' as i32 { a = (a as libc::c_int + ('a' as i32 - 'A' as i32)) as libc::c_uchar } return a; } unsafe extern "C" fn get_wh() { /* never returns w, h <= 1 */ get_terminal_width_height( (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).tty_fileno, &mut (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).terminal_width, &mut (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).terminal_height, ); let ref mut fresh0 = (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).terminal_height; *fresh0 = (*fresh0).wrapping_sub(1i32 as libc::c_uint); } unsafe extern "C" fn
(mut settings: *mut termios) { tcsetattr( (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).tty_fileno, 0i32, settings, ); } unsafe extern "C" fn gotsig(mut _sig: libc::c_int) { /* bb_putchar_stderr doesn't use stdio buffering, * therefore it is safe in signal handler */ bb_putchar_stderr('\n' as i32 as libc::c_char); /* for compiler */ tcsetattr_tty_TCSANOW(&mut (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).initial_settings); _exit(1i32); } #[no_mangle] pub unsafe extern "C" fn more_main( mut _argc: libc::c_int, mut argv: *mut *mut libc::c_char, ) -> libc::c_int { let mut current_block: u64; let mut c: libc::c_int = 0; c = c; let mut input: libc::c_int = 0i32; let mut spaces: libc::c_int = 0i32; let mut please_display_more_prompt: libc::c_int = 0; let mut tty: *mut FILE = 0 as *mut FILE; /* Parse options */ /* Accepted but ignored: */ /* -d Display help instead of ringing bell */ /* -f Count logical lines (IOW: long lines are not folded) */ /* -l Do not pause after any line containing a ^L (form feed) */ /* -s Squeeze blank lines into one */ /* -u Suppress underlining */ getopt32(argv, b"dflsu\x00" as *const u8 as *const libc::c_char); argv = argv.offset(optind as isize); /* Another popular pager, most, detects when stdout * is not a tty and turns into cat. This makes sense. */ if isatty(1i32) == 0 { return bb_cat(argv); } tty = fopen_for_read(b"/dev/tty\x00" as *const u8 as *const libc::c_char); if tty.is_null() { return bb_cat(argv); } (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).tty_fileno = fileno_unlocked(tty); /* Turn on unbuffered input; turn off echoing */ set_termios_to_raw( (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).tty_fileno, &mut (*(bb_common_bufsiz1.as_mut_ptr() as *mut globals)).initial_settings, 0i32, ); bb_signals( BB_FATAL_SIGS as libc::c_int, Some(gotsig as unsafe extern "C" fn(_: libc::c_int) -> ()), ); 's_75: loop { let mut st: stat = std::mem::zeroed(); let mut file: *mut FILE = 0 as *mut FILE; let mut len: libc::c_int = 0; let mut lines: libc::c_int = 0; file = stdin; if !(*argv).is_null() { file = fopen_or_warn(*argv, b"r\x00" as *const u8 as *const libc::c_char); if file.is_null() { current_block = 12349973810996921269; } else { current_block = 15089075282327824602; } } else { current_block = 15089075282327824602; } match current_block { 15089075282327824602 => { st.st_size = 0i32 as off_t; fstat(fileno_unlocked(file), &mut st); get_wh(); please_display_more_prompt = 0i32; len = 0i32; lines = 0i32; loop { let mut wrap: libc::c_int = 0; if spaces != 0 { spaces -= 1 } else { c = getc_unlocked(file); if c == -1i32 { break; } } loop /* if tty was destroyed (closed xterm, etc) */ /* Then outputting this will also put a character on * the beginning of that new line. Thus we first want to * display the prompt (if any), so we skip the putchar() * and go back to the top of the loop, without reading * a new character. */ { if input != 'r' as i32 && please_display_more_prompt != 0 { len = printf(b"--More-- \x00" as *const u8 as *const libc::c_char); if st.st_size != 0 { let mut d: uoff_t = (st.st_size as uoff_t).wrapping_div(100i32 as libc::c_ulong); if d == 0i32 as libc::c_ulong { d = 1i32 as uoff_t } len += printf( b"(%u%% of %lu bytes)\x00" as *const u8 as *const libc::c_char, (ftello(file) as uoff_t).wrapping_div(d) as libc::c_int, st.st_size, ) } loop /* * We've just displayed the "--More--" prompt, so now we need * to get input from the user. */ { fflush_all(); input = getc_unlocked(tty); input = bb_ascii_tolower(input as libc::c_uchar) as libc::c_int; /* Erase the last message */ printf( b"\r%*s\r\x00" as *const u8 as *const libc::c_char, len, b"\x00" as *const u8 as *
tcsetattr_tty_TCSANOW
identifier_name
point_cloud.rs
&path.as_ref())); let mut config = String::new(); config_file .read_to_string(&mut config) .expect(&format!("Unable to read config file {:?}", &path.as_ref())); let params_files = YamlLoader::load_from_str(&config).unwrap(); PointCloud::<M>::from_yaml(&params_files[0]) } /// Builds the point cloud from data in ram. This is for quick things with simple metadata pub fn simple_from_ram( data: Box<[f32]>, data_dim: usize, labels: Box<[f32]>, labels_dim: usize, ) -> PointCloudResult<PointCloud<M>> { assert!(data.len() / data_dim == labels.len() / labels_dim); let list = MetadataList::simple_vec(labels, labels_dim); PointCloud::<M>::from_ram(data, data_dim, list) } /// Total number of points in the point cloud pub fn len(&self) -> usize { self.data_sources.iter().fold(0, |acc, mm| acc + mm.len()) } /// Dimension of the data in the point cloud pub fn dim(&self) -> usize { self.data_dim } /// The names of the data are currently a shallow wrapper around a usize. pub fn reference_indexes(&self) -> Vec<PointIndex> { self.addresses.keys().cloned().collect() } /// Returns a arc that points to a AVX2 packed point. This also acts like a cache for these center /// points to ensure that we don't load multiple copies into memory. Used for heavily /// referenced points, like centers. pub fn get_center(&self, pn: PointIndex) -> PointCloudResult<Arc<Vec<f32>>> { let mut loaded_centers = self.loaded_centers.lock().unwrap(); Ok(Arc::clone( loaded_centers .entry(pn) .or_insert(Arc::new(Vec::from(self.get_point(pn)?))), )) } #[inline] fn get_address(&self,pn: PointIndex) -> PointCloudResult<(usize,usize)> { match self.addresses.get(&pn) { Some((i, j)) => Ok((*i,*j)), None => panic!("Index not found"), } } /// Returns a slice corresponding to the point in question. Used for rarely referenced points, /// like outliers or leaves. pub fn get_point(&self, pn: PointIndex) -> PointCloudResult<&[f32]> { let (i,j) = self.get_address(pn)?; self.data_sources[i].get(j) } /// Gets the name from an index pub fn get_name(&self, pi: &PointIndex) -> Option<&PointName> { self.indexes_to_names.get(pi) } /// Gets the index from the name pub fn get_index(&self, pn: &PointName) -> Option<&PointIndex> { self.names_to_indexes.get(pn) } /// Gets all names in the point cloud pub fn get_names(&self) -> Vec<PointName> { self.names_to_indexes.keys().cloned().collect() } /// Gets a schema to use pub fn schema_json(&self) -> String { self.labels_scheme.schema_json() } /// Returns the label of a point. /// /// This will be changed to return a label structure that can contain many different pieces of info. pub fn get_metadata(&self, pn: PointIndex) -> PointCloudResult<Metadata> { let (i,j) = self.get_address(pn)?; self.label_sources[i].get(j) } /// Returns a complex summary of a collection of metadatas associated to a point pub fn get_metasummary(&self, pns: &[PointIndex]) -> PointCloudResult<MetaSummary> { let mut disk_splits: Vec<Vec<usize>> = vec![Vec::new(); self.label_sources.len()]; for pn in pns.iter() { let (i,j) = self.get_address(*pn)?; disk_splits[i].push(j); } let disk_summaries: Vec<MetaSummary> = disk_splits .iter() .enumerate() .map(|(i, indexes)| self.label_sources[i].get_summary(indexes).unwrap()) .collect(); MetaSummary::combine(&disk_summaries) } /// The main distance function. This paralizes if there are more than 100 points. pub fn distances_to_point_indices( &self, is: &[PointIndex], js: &[PointIndex], ) -> PointCloudResult<Vec<f32>> { let mut dists: Vec<f32> = vec![0.0;is.len()*js.len()]; if is.len()*js.len() > self.chunk { let dist_iter = dists.par_chunks_mut(js.len()); let indexes_iter = is.par_iter().map(|i| (i,js)); let error: Mutex<Result<(), PointCloudError>> = Mutex::new(Ok(())); dist_iter.zip(indexes_iter).for_each(|(chunk_dists,(i,chunk_indexes))| { match self.get_point(*i) { Ok(x) => { for (d,j) in chunk_dists.iter_mut().zip(chunk_indexes) { match self.get_point(*j) { Ok(y) => *d = (M::dense)(x, y), Err(e) => { *error.lock().unwrap() = Err(e); } } } }, Err(e) => { *error.lock().unwrap() = Err(e); } }; }); (error.into_inner().unwrap())?; } else { for (k,i) in is.iter().enumerate() { let x = self.get_point(*i)?; for (l,j) in js.iter().enumerate() { let y = self.get_point(*j)?; dists[k*js.len() + l] = (M::dense)(x, y); } } } Ok(dists) } /// The main distance function. This paralizes if there are more than 100 points. pub fn distances_to_point_index( &self, i: PointIndex, indexes: &[PointIndex], ) -> PointCloudResult<Vec<f32>> { self.distances_to_point(self.get_point(i)?,indexes) } /// Create and adjacency matrix pub fn adj(&self, mut indexes: &[PointIndex], ) -> PointCloudResult<AdjMatrix> { let mut vals = HashMap::new(); while indexes.len() > 1 { let i = indexes[0]; indexes = &indexes[1..]; let distances = self.distances_to_point_index(i, &indexes)?; indexes.iter().zip(distances).for_each(|(j, d)| { if i < *j { vals.insert((i, *j), d); } else { vals.insert((*j, i), d); } }); } Ok(AdjMatrix { vals }) } /// The main distance function. This paralizes if there are more than 100 points. pub fn distances_to_point( &self, x: &[f32], indexes: &[PointIndex], ) -> PointCloudResult<Vec<f32>> { let len = indexes.len(); if len > self.chunk * 3 { let mut dists: Vec<f32> = vec![0.0;len]; let dist_iter = dists.par_chunks_mut(self.chunk); let indexes_iter = indexes.par_chunks(self.chunk); let error: Mutex<Result<(), PointCloudError>> = Mutex::new(Ok(())); dist_iter.zip(indexes_iter).for_each(|(chunk_dists,chunk_indexes)| { for (d,i) in chunk_dists.iter_mut().zip(chunk_indexes) { match self.get_point(*i) { Ok(y) => *d = (M::dense)(x, y), Err(e) => { *error.lock().unwrap() = Err(e); } } } }); (error.into_inner().unwrap())?; Ok(dists) } else { indexes .iter() .map(|i| { let y = self.get_point(*i)?; Ok((M::dense)(x, y)) }) .collect() } } } fn build_label_schema_yaml(label_scheme: &mut LabelScheme, schema_yaml: &Yaml)
{ if let Some(schema_map) = schema_yaml.as_hash() { for (k, v) in schema_map.iter() { let key = k.as_str().unwrap().to_string(); match v.as_str().unwrap() { "u32" => label_scheme.add_u32(key), "f32" => label_scheme.add_f32(key), "i32" => label_scheme.add_i32(key), "bool" => label_scheme.add_bool(key), "string" => label_scheme.add_string(key), "name" => label_scheme.add_name_column(&key), _ => panic!( "Unknown type in schema yaml, also it should be (VALUE: TYPE): {:?}", (k, v) ), } } } else { panic!("Need to correctly edit the yaml"); }
identifier_body
point_cloud.rs
::new(); let mut indexes_to_names: IndexMap<PointIndex, PointName> = IndexMap::new(); let mut current_count: u64 = 0; let mut data_sources = Vec::new(); let mut label_sources = Vec::new(); for (i,(dp,lp)) in data_path.iter().zip(labels_path).enumerate() { let new_data: Box<dyn DataSource>; if ram { new_data = Box::new((DataMemmap::new(data_dim, &dp)?).convert_to_ram()); } else { new_data = Box::new(DataMemmap::new(data_dim, &dp)?); } let new_labels = labels_scheme.open(&lp)?; if new_data.len() != new_labels.len() { panic!("The data count {:?} differs from the label count {:?} for the {}th data and label files", new_data.len(), new_labels.len(), i); } for j in 0..new_data.len() { let x = (i, j); let name = new_labels .get_name(j) .unwrap_or_else(|| format!("{}", current_count)); if names_to_indexes.contains_key(&name) { println!( "Duplicate {:?} on line {} of file {:?}", &name, j, labels_path[i] ); } else { names_to_indexes.insert(name.clone(), current_count); indexes_to_names.insert(current_count, name.clone()); addresses.insert(current_count, x); } current_count += 1; } data_sources.push(new_data); label_sources.push(new_labels); } // This could possibly be improved to be architecture specific. It depends on the CPU cache size let chunk = min(15000/data_dim,20); Ok(PointCloud { data_sources: data_sources, label_sources: label_sources, names_to_indexes: names_to_indexes, indexes_to_names: indexes_to_names, addresses: addresses, data_dim, labels_scheme, loaded_centers: Mutex::new(IndexMap::new()), chunk, metric: PhantomData, }) } /// Builds the point cloud from data in ram. /// This is for complex metadata pub fn from_ram( data: Box<[f32]>, data_dim: usize, labels: MetadataList, ) -> PointCloudResult<PointCloud<M>> { let mut addresses = IndexMap::new(); let data_source = Box::new(DataRam::new(data_dim, data)?); let labels_scheme = labels.scheme()?; let label_source = labels; let mut names_to_indexes: IndexMap<PointName, PointIndex> = IndexMap::new(); let mut indexes_to_names: IndexMap<PointIndex, PointName> = IndexMap::new(); for j in 0..(data_source.len()) { let name = label_source.get_name(j).unwrap_or_else(|| format!("{}", j)); if names_to_indexes.contains_key(&name) { println!("Duplicate {:?} on line {} of file", &name, j); } else { names_to_indexes.insert(name.clone(), j as PointIndex); indexes_to_names.insert(j as PointIndex, name.clone()); addresses.insert(j as u64, (0,j)); } } let chunk = min(15000/data_dim,20); Ok(PointCloud { data_sources: vec![data_source], label_sources: vec![label_source], names_to_indexes: names_to_indexes, indexes_to_names: indexes_to_names, addresses: addresses, data_dim, loaded_centers: Mutex::new(IndexMap::new()), labels_scheme, chunk, metric: PhantomData, }) } /// Given a yaml file on disk, it builds a point cloud. Minimal example below. /// ```yaml /// --- /// data_path: DATAMEMMAP /// labels_path: LABELS_CSV_OR_MEMMAP /// count: NUMBER_OF_DATA_POINTS /// data_dim: 784 /// labels_dim: 10 /// in_ram: True /// ``` /// This assumes that your labels are either a CSV or a memmap file. /// If one specifies a schema then this is the minimal example /// ```yaml /// --- /// data_path: DATAMEMMAP /// labels_path: LABELS_CSV_OR_MEMMAP /// count: NUMBER_OF_DATA_POINTS /// data_dim: 784 /// schema: /// natural: u32 /// integer: i32 /// real: f32 /// string: String /// boolean: bool /// ``` pub fn from_yaml(params: &Yaml) -> PointCloudResult<PointCloud<M>> { let data_paths = &get_file_list( params["data_path"] .as_str() .expect("Unable to read the 'labels_path'"), ); let labels_paths = &get_file_list( params["labels_path"] .as_str() .expect("Unable to read the 'labels_path'"), ); let data_dim = params["data_dim"] .as_i64() .expect("Unable to read the 'data_dim'") as usize; let mut deser = LabelScheme::new(); if params["schema"].is_badvalue() { let labels_dim = params["labels_dim"] .as_i64() .expect("Unable to read the 'labels_dim' or the 'schema'") as usize; deser.add_vector("y".to_string(), labels_dim, "f32"); } else { build_label_schema_yaml(&mut deser, &params["schema"]); } let ram_bool = match params["in_ram"].as_bool() { Some(b) => b, None => true, }; PointCloud::<M>::from_memmap_files(data_dim, deser, data_paths, labels_paths, ram_bool) } /// Runs `from_yaml` on the file at a given path pub fn from_file<P: AsRef<Path>>(path: P) -> PointCloudResult<PointCloud<M>> { let mut config_file = File::open(&path).expect(&format!("Unable to read config file {:?}", &path.as_ref())); let mut config = String::new(); config_file .read_to_string(&mut config) .expect(&format!("Unable to read config file {:?}", &path.as_ref())); let params_files = YamlLoader::load_from_str(&config).unwrap(); PointCloud::<M>::from_yaml(&params_files[0]) } /// Builds the point cloud from data in ram. This is for quick things with simple metadata pub fn simple_from_ram( data: Box<[f32]>, data_dim: usize, labels: Box<[f32]>, labels_dim: usize, ) -> PointCloudResult<PointCloud<M>> { assert!(data.len() / data_dim == labels.len() / labels_dim); let list = MetadataList::simple_vec(labels, labels_dim); PointCloud::<M>::from_ram(data, data_dim, list) } /// Total number of points in the point cloud pub fn
(&self) -> usize { self.data_sources.iter().fold(0, |acc, mm| acc + mm.len()) } /// Dimension of the data in the point cloud pub fn dim(&self) -> usize { self.data_dim } /// The names of the data are currently a shallow wrapper around a usize. pub fn reference_indexes(&self) -> Vec<PointIndex> { self.addresses.keys().cloned().collect() } /// Returns a arc that points to a AVX2 packed point. This also acts like a cache for these center /// points to ensure that we don't load multiple copies into memory. Used for heavily /// referenced points, like centers. pub fn get_center(&self, pn: PointIndex) -> PointCloudResult<Arc<Vec<f32>>> { let mut loaded_centers = self.loaded_centers.lock().unwrap(); Ok(Arc::clone( loaded_centers .entry(pn) .or_insert(Arc::new(Vec::from(self.get_point(pn)?))), )) } #[inline] fn get_address(&self,pn: PointIndex) -> PointCloudResult<(usize,usize)> { match self.addresses.get(&pn) { Some((i, j)) => Ok((*i,*j)), None => panic!("Index not found"), } } /// Returns a slice corresponding to the point in question. Used for rarely referenced points, /// like outliers or leaves. pub fn get_point(&self, pn: PointIndex) -> PointCloudResult<&[f32]> { let (i,j) = self.get_address(pn)?; self.data_sources[i].get(j) } /// Gets the name from an index pub fn get_name(&self, pi: &PointIndex) -> Option<&PointName> { self.indexes_to_names.get(pi) } /// Gets the index from the name pub fn get_index(&self, pn: &PointName) -> Option<&PointIndex> { self.names_to_indexes.get(pn) } /// Gets all names in the point cloud pub fn get_names(&self) -> Vec<PointName> { self.names_to_indexes.keys().
len
identifier_name
point_cloud.rs
-> PointCloudResult<PointCloud<M>> { assert!(data.len() / data_dim == labels.len() / labels_dim); let list = MetadataList::simple_vec(labels, labels_dim); PointCloud::<M>::from_ram(data, data_dim, list) } /// Total number of points in the point cloud pub fn len(&self) -> usize { self.data_sources.iter().fold(0, |acc, mm| acc + mm.len()) } /// Dimension of the data in the point cloud pub fn dim(&self) -> usize { self.data_dim } /// The names of the data are currently a shallow wrapper around a usize. pub fn reference_indexes(&self) -> Vec<PointIndex> { self.addresses.keys().cloned().collect() } /// Returns a arc that points to a AVX2 packed point. This also acts like a cache for these center /// points to ensure that we don't load multiple copies into memory. Used for heavily /// referenced points, like centers. pub fn get_center(&self, pn: PointIndex) -> PointCloudResult<Arc<Vec<f32>>> { let mut loaded_centers = self.loaded_centers.lock().unwrap(); Ok(Arc::clone( loaded_centers .entry(pn) .or_insert(Arc::new(Vec::from(self.get_point(pn)?))), )) } #[inline] fn get_address(&self,pn: PointIndex) -> PointCloudResult<(usize,usize)> { match self.addresses.get(&pn) { Some((i, j)) => Ok((*i,*j)), None => panic!("Index not found"), } } /// Returns a slice corresponding to the point in question. Used for rarely referenced points, /// like outliers or leaves. pub fn get_point(&self, pn: PointIndex) -> PointCloudResult<&[f32]> { let (i,j) = self.get_address(pn)?; self.data_sources[i].get(j) } /// Gets the name from an index pub fn get_name(&self, pi: &PointIndex) -> Option<&PointName> { self.indexes_to_names.get(pi) } /// Gets the index from the name pub fn get_index(&self, pn: &PointName) -> Option<&PointIndex> { self.names_to_indexes.get(pn) } /// Gets all names in the point cloud pub fn get_names(&self) -> Vec<PointName> { self.names_to_indexes.keys().cloned().collect() } /// Gets a schema to use pub fn schema_json(&self) -> String { self.labels_scheme.schema_json() } /// Returns the label of a point. /// /// This will be changed to return a label structure that can contain many different pieces of info. pub fn get_metadata(&self, pn: PointIndex) -> PointCloudResult<Metadata> { let (i,j) = self.get_address(pn)?; self.label_sources[i].get(j) } /// Returns a complex summary of a collection of metadatas associated to a point pub fn get_metasummary(&self, pns: &[PointIndex]) -> PointCloudResult<MetaSummary> { let mut disk_splits: Vec<Vec<usize>> = vec![Vec::new(); self.label_sources.len()]; for pn in pns.iter() { let (i,j) = self.get_address(*pn)?; disk_splits[i].push(j); } let disk_summaries: Vec<MetaSummary> = disk_splits .iter() .enumerate() .map(|(i, indexes)| self.label_sources[i].get_summary(indexes).unwrap()) .collect(); MetaSummary::combine(&disk_summaries) } /// The main distance function. This paralizes if there are more than 100 points. pub fn distances_to_point_indices( &self, is: &[PointIndex], js: &[PointIndex], ) -> PointCloudResult<Vec<f32>> { let mut dists: Vec<f32> = vec![0.0;is.len()*js.len()]; if is.len()*js.len() > self.chunk { let dist_iter = dists.par_chunks_mut(js.len()); let indexes_iter = is.par_iter().map(|i| (i,js)); let error: Mutex<Result<(), PointCloudError>> = Mutex::new(Ok(())); dist_iter.zip(indexes_iter).for_each(|(chunk_dists,(i,chunk_indexes))| { match self.get_point(*i) { Ok(x) => { for (d,j) in chunk_dists.iter_mut().zip(chunk_indexes) { match self.get_point(*j) { Ok(y) => *d = (M::dense)(x, y), Err(e) => { *error.lock().unwrap() = Err(e); } } } }, Err(e) => { *error.lock().unwrap() = Err(e); } }; }); (error.into_inner().unwrap())?; } else { for (k,i) in is.iter().enumerate() { let x = self.get_point(*i)?; for (l,j) in js.iter().enumerate() { let y = self.get_point(*j)?; dists[k*js.len() + l] = (M::dense)(x, y); } } } Ok(dists) } /// The main distance function. This paralizes if there are more than 100 points. pub fn distances_to_point_index( &self, i: PointIndex, indexes: &[PointIndex], ) -> PointCloudResult<Vec<f32>> { self.distances_to_point(self.get_point(i)?,indexes) } /// Create and adjacency matrix pub fn adj(&self, mut indexes: &[PointIndex], ) -> PointCloudResult<AdjMatrix> { let mut vals = HashMap::new(); while indexes.len() > 1 { let i = indexes[0]; indexes = &indexes[1..]; let distances = self.distances_to_point_index(i, &indexes)?; indexes.iter().zip(distances).for_each(|(j, d)| { if i < *j { vals.insert((i, *j), d); } else { vals.insert((*j, i), d); } }); } Ok(AdjMatrix { vals }) } /// The main distance function. This paralizes if there are more than 100 points. pub fn distances_to_point( &self, x: &[f32], indexes: &[PointIndex], ) -> PointCloudResult<Vec<f32>> { let len = indexes.len(); if len > self.chunk * 3 { let mut dists: Vec<f32> = vec![0.0;len]; let dist_iter = dists.par_chunks_mut(self.chunk); let indexes_iter = indexes.par_chunks(self.chunk); let error: Mutex<Result<(), PointCloudError>> = Mutex::new(Ok(())); dist_iter.zip(indexes_iter).for_each(|(chunk_dists,chunk_indexes)| { for (d,i) in chunk_dists.iter_mut().zip(chunk_indexes) { match self.get_point(*i) { Ok(y) => *d = (M::dense)(x, y), Err(e) => { *error.lock().unwrap() = Err(e); } } } }); (error.into_inner().unwrap())?; Ok(dists) } else { indexes .iter() .map(|i| { let y = self.get_point(*i)?; Ok((M::dense)(x, y)) }) .collect() } } } fn build_label_schema_yaml(label_scheme: &mut LabelScheme, schema_yaml: &Yaml) { if let Some(schema_map) = schema_yaml.as_hash() { for (k, v) in schema_map.iter() { let key = k.as_str().unwrap().to_string(); match v.as_str().unwrap() { "u32" => label_scheme.add_u32(key), "f32" => label_scheme.add_f32(key), "i32" => label_scheme.add_i32(key), "bool" => label_scheme.add_bool(key), "string" => label_scheme.add_string(key), "name" => label_scheme.add_name_column(&key), _ => panic!( "Unknown type in schema yaml, also it should be (VALUE: TYPE): {:?}", (k, v) ), } } } else { panic!("Need to correctly edit the yaml"); } } fn get_file_list(files_reg: &str) -> Vec<PathBuf> { let options = MatchOptions { case_sensitive: false, ..Default::default() }; let mut paths = Vec::new(); let glob_paths = match glob_with(files_reg, &options) { Ok(expr) => expr, Err(e) => panic!("Pattern reading error {:?}", e), }; for entry in glob_paths { let path = match entry { Ok(expr) => expr, Err(e) => panic!("Error reading path {:?}", e), }; paths.push(path) } paths } /* #[cfg(test)] mod tests {
random_line_split
machinedeployment_webhook.go
8s.io,resources=machinedeployments,versions=v1beta1,name=default.machinedeployment.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 var _ webhook.CustomDefaulter = &machineDeploymentDefaulter{} var _ webhook.Validator = &MachineDeployment{} // MachineDeploymentDefaulter creates a new CustomDefaulter for MachineDeployments. func MachineDeploymentDefaulter(scheme *runtime.Scheme) webhook.CustomDefaulter { return &machineDeploymentDefaulter{ decoder: admission.NewDecoder(scheme), } } // machineDeploymentDefaulter implements a defaulting webhook for MachineDeployment. type machineDeploymentDefaulter struct { decoder *admission.Decoder } // Default implements webhook.CustomDefaulter. func (webhook *machineDeploymentDefaulter) Default(ctx context.Context, obj runtime.Object) error { m, ok := obj.(*MachineDeployment) if !ok { return apierrors.NewBadRequest(fmt.Sprintf("expected a MachineDeployment but got a %T", obj)) } req, err := admission.RequestFromContext(ctx) if err != nil { return err } dryRun := false if req.DryRun != nil { dryRun = *req.DryRun } var oldMD *MachineDeployment if req.Operation == v1.Update { oldMD = &MachineDeployment{} if err := webhook.decoder.DecodeRaw(req.OldObject, oldMD); err != nil { return errors.Wrapf(err, "failed to decode oldObject to MachineDeployment") } } if m.Labels == nil { m.Labels = make(map[string]string) } m.Labels[ClusterNameLabel] = m.Spec.ClusterName replicas, err := calculateMachineDeploymentReplicas(ctx, oldMD, m, dryRun) if err != nil { return err } m.Spec.Replicas = pointer.Int32(replicas) if m.Spec.MinReadySeconds == nil { m.Spec.MinReadySeconds = pointer.Int32(0) } if m.Spec.RevisionHistoryLimit == nil { m.Spec.RevisionHistoryLimit = pointer.Int32(1) } if m.Spec.ProgressDeadlineSeconds == nil { m.Spec.ProgressDeadlineSeconds = pointer.Int32(600) } if m.Spec.Selector.MatchLabels == nil { m.Spec.Selector.MatchLabels = make(map[string]string) } if m.Spec.Strategy == nil { m.Spec.Strategy = &MachineDeploymentStrategy{} } if m.Spec.Strategy.Type == "" { m.Spec.Strategy.Type = RollingUpdateMachineDeploymentStrategyType } if m.Spec.Template.Labels == nil { m.Spec.Template.Labels = make(map[string]string) } // Default RollingUpdate strategy only if strategy type is RollingUpdate. if m.Spec.Strategy.Type == RollingUpdateMachineDeploymentStrategyType { if m.Spec.Strategy.RollingUpdate == nil { m.Spec.Strategy.RollingUpdate = &MachineRollingUpdateDeployment{} } if m.Spec.Strategy.RollingUpdate.MaxSurge == nil { ios1 := intstr.FromInt(1) m.Spec.Strategy.RollingUpdate.MaxSurge = &ios1 } if m.Spec.Strategy.RollingUpdate.MaxUnavailable == nil { ios0 := intstr.FromInt(0) m.Spec.Strategy.RollingUpdate.MaxUnavailable = &ios0 } } // If no selector has been provided, add label and selector for the // MachineDeployment's name as a default way of providing uniqueness. if len(m.Spec.Selector.MatchLabels) == 0 && len(m.Spec.Selector.MatchExpressions) == 0 { m.Spec.Selector.MatchLabels[MachineDeploymentNameLabel] = m.Name m.Spec.Template.Labels[MachineDeploymentNameLabel] = m.Name } // Make sure selector and template to be in the same cluster. m.Spec.Selector.MatchLabels[ClusterNameLabel] = m.Spec.ClusterName m.Spec.Template.Labels[ClusterNameLabel] = m.Spec.ClusterName // tolerate version strings without a "v" prefix: prepend it if it's not there if m.Spec.Template.Spec.Version != nil && !strings.HasPrefix(*m.Spec.Template.Spec.Version, "v") { normalizedVersion := "v" + *m.Spec.Template.Spec.Version m.Spec.Template.Spec.Version = &normalizedVersion } return nil } // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. func (m *MachineDeployment)
() (admission.Warnings, error) { return nil, m.validate(nil) } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. func (m *MachineDeployment) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { oldMD, ok := old.(*MachineDeployment) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a MachineDeployment but got a %T", old)) } return nil, m.validate(oldMD) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. func (m *MachineDeployment) ValidateDelete() (admission.Warnings, error) { return nil, nil } func (m *MachineDeployment) validate(old *MachineDeployment) error { var allErrs field.ErrorList // The MachineDeployment name is used as a label value. This check ensures names which are not be valid label values are rejected. if errs := validation.IsValidLabelValue(m.Name); len(errs) != 0 { for _, err := range errs { allErrs = append( allErrs, field.Invalid( field.NewPath("metadata", "name"), m.Name, fmt.Sprintf("must be a valid label value: %s", err), ), ) } } specPath := field.NewPath("spec") selector, err := metav1.LabelSelectorAsSelector(&m.Spec.Selector) if err != nil { allErrs = append( allErrs, field.Invalid(specPath.Child("selector"), m.Spec.Selector, err.Error()), ) } else if !selector.Matches(labels.Set(m.Spec.Template.Labels)) { allErrs = append( allErrs, field.Forbidden( specPath.Child("template", "metadata", "labels"), fmt.Sprintf("must match spec.selector %q", selector.String()), ), ) } // MachineSet preflight checks that should be skipped could also be set as annotation on the MachineDeployment // since MachineDeployment annotations are synced to the MachineSet. if feature.Gates.Enabled(feature.MachineSetPreflightChecks) { if err := validateSkippedMachineSetPreflightChecks(m); err != nil { allErrs = append(allErrs, err) } } if old != nil && old.Spec.ClusterName != m.Spec.ClusterName { allErrs = append( allErrs, field.Forbidden( specPath.Child("clusterName"), "field is immutable", ), ) } if m.Spec.Strategy != nil && m.Spec.Strategy.RollingUpdate != nil { total := 1 if m.Spec.Replicas != nil { total = int(*m.Spec.Replicas) } if m.Spec.Strategy.RollingUpdate.MaxSurge != nil { if _, err := intstr.GetScaledValueFromIntOrPercent(m.Spec.Strategy.RollingUpdate.MaxSurge, total, true); err != nil { allErrs = append( allErrs, field.Invalid(specPath.Child("strategy", "rollingUpdate", "maxSurge"), m.Spec.Strategy.RollingUpdate.MaxSurge, fmt.Sprintf("must be either an int or a percentage: %v", err.Error())), ) } } if m.Spec.Strategy.RollingUpdate.MaxUnavailable != nil { if _, err := intstr.GetScaledValueFromIntOrPercent(m.Spec.Strategy.RollingUpdate.MaxUnavailable, total, true); err != nil { allErrs = append( allErrs, field.Invalid(specPath.Child("strategy", "rollingUpdate", "maxUnavailable"), m.Spec.Strategy.RollingUpdate.MaxUnavailable, fmt.Sprintf("must be either an int or a percentage: %v", err.Error())), ) } } } if m.Spec.Template.Spec.Version != nil { if !version.KubeSemver.MatchString(*m.Spec.Template.Spec.Version) { allErrs = append(allErrs, field.Invalid(specPath.Child("template", "spec", "version"), *m.Spec.Template.Spec.Version, "must be a valid semantic version")) } } if len(allErrs) == 0 { return nil } return apierrors.NewInvalid(GroupVersion.WithKind("MachineDeployment").GroupKind(), m.Name, allErrs) } // calculateMachineDeploymentReplicas calculates the default value of the replicas field. // The value will be calculated based on the following logic: // * if replicas is already set on newMD, keep the current value // * if the autoscaler min size and max size annotations are set: // - if it's a new MachineDeployment, use min size // - if the replicas field of the old MachineDeployment is < min size, use min size // - if the replicas field of the old MachineDeployment is > max size, use max size // - if the replicas field of the
ValidateCreate
identifier_name
machinedeployment_webhook.go
-k8s.io,resources=machinedeployments,versions=v1beta1,name=default.machinedeployment.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 var _ webhook.CustomDefaulter = &machineDeploymentDefaulter{} var _ webhook.Validator = &MachineDeployment{} // MachineDeploymentDefaulter creates a new CustomDefaulter for MachineDeployments. func MachineDeploymentDefaulter(scheme *runtime.Scheme) webhook.CustomDefaulter { return &machineDeploymentDefaulter{ decoder: admission.NewDecoder(scheme), } } // machineDeploymentDefaulter implements a defaulting webhook for MachineDeployment. type machineDeploymentDefaulter struct { decoder *admission.Decoder } // Default implements webhook.CustomDefaulter. func (webhook *machineDeploymentDefaulter) Default(ctx context.Context, obj runtime.Object) error { m, ok := obj.(*MachineDeployment) if !ok { return apierrors.NewBadRequest(fmt.Sprintf("expected a MachineDeployment but got a %T", obj)) } req, err := admission.RequestFromContext(ctx) if err != nil { return err } dryRun := false if req.DryRun != nil { dryRun = *req.DryRun } var oldMD *MachineDeployment if req.Operation == v1.Update { oldMD = &MachineDeployment{} if err := webhook.decoder.DecodeRaw(req.OldObject, oldMD); err != nil { return errors.Wrapf(err, "failed to decode oldObject to MachineDeployment") } } if m.Labels == nil { m.Labels = make(map[string]string) } m.Labels[ClusterNameLabel] = m.Spec.ClusterName replicas, err := calculateMachineDeploymentReplicas(ctx, oldMD, m, dryRun) if err != nil { return err } m.Spec.Replicas = pointer.Int32(replicas) if m.Spec.MinReadySeconds == nil { m.Spec.MinReadySeconds = pointer.Int32(0) } if m.Spec.RevisionHistoryLimit == nil { m.Spec.RevisionHistoryLimit = pointer.Int32(1) } if m.Spec.ProgressDeadlineSeconds == nil { m.Spec.ProgressDeadlineSeconds = pointer.Int32(600) } if m.Spec.Selector.MatchLabels == nil { m.Spec.Selector.MatchLabels = make(map[string]string) } if m.Spec.Strategy == nil { m.Spec.Strategy = &MachineDeploymentStrategy{} } if m.Spec.Strategy.Type == "" { m.Spec.Strategy.Type = RollingUpdateMachineDeploymentStrategyType } if m.Spec.Template.Labels == nil { m.Spec.Template.Labels = make(map[string]string) } // Default RollingUpdate strategy only if strategy type is RollingUpdate. if m.Spec.Strategy.Type == RollingUpdateMachineDeploymentStrategyType { if m.Spec.Strategy.RollingUpdate == nil { m.Spec.Strategy.RollingUpdate = &MachineRollingUpdateDeployment{} } if m.Spec.Strategy.RollingUpdate.MaxSurge == nil { ios1 := intstr.FromInt(1) m.Spec.Strategy.RollingUpdate.MaxSurge = &ios1 } if m.Spec.Strategy.RollingUpdate.MaxUnavailable == nil { ios0 := intstr.FromInt(0) m.Spec.Strategy.RollingUpdate.MaxUnavailable = &ios0 } } // If no selector has been provided, add label and selector for the // MachineDeployment's name as a default way of providing uniqueness. if len(m.Spec.Selector.MatchLabels) == 0 && len(m.Spec.Selector.MatchExpressions) == 0 { m.Spec.Selector.MatchLabels[MachineDeploymentNameLabel] = m.Name m.Spec.Template.Labels[MachineDeploymentNameLabel] = m.Name } // Make sure selector and template to be in the same cluster. m.Spec.Selector.MatchLabels[ClusterNameLabel] = m.Spec.ClusterName m.Spec.Template.Labels[ClusterNameLabel] = m.Spec.ClusterName // tolerate version strings without a "v" prefix: prepend it if it's not there if m.Spec.Template.Spec.Version != nil && !strings.HasPrefix(*m.Spec.Template.Spec.Version, "v") { normalizedVersion := "v" + *m.Spec.Template.Spec.Version m.Spec.Template.Spec.Version = &normalizedVersion } return nil } // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. func (m *MachineDeployment) ValidateCreate() (admission.Warnings, error) { return nil, m.validate(nil) } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. func (m *MachineDeployment) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { oldMD, ok := old.(*MachineDeployment) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a MachineDeployment but got a %T", old)) } return nil, m.validate(oldMD) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. func (m *MachineDeployment) ValidateDelete() (admission.Warnings, error) { return nil, nil } func (m *MachineDeployment) validate(old *MachineDeployment) error { var allErrs field.ErrorList // The MachineDeployment name is used as a label value. This check ensures names which are not be valid label values are rejected. if errs := validation.IsValidLabelValue(m.Name); len(errs) != 0 { for _, err := range errs { allErrs = append( allErrs, field.Invalid( field.NewPath("metadata", "name"), m.Name, fmt.Sprintf("must be a valid label value: %s", err), ), ) } } specPath := field.NewPath("spec") selector, err := metav1.LabelSelectorAsSelector(&m.Spec.Selector) if err != nil { allErrs = append( allErrs, field.Invalid(specPath.Child("selector"), m.Spec.Selector, err.Error()), ) } else if !selector.Matches(labels.Set(m.Spec.Template.Labels)) { allErrs = append(
), ) } // MachineSet preflight checks that should be skipped could also be set as annotation on the MachineDeployment // since MachineDeployment annotations are synced to the MachineSet. if feature.Gates.Enabled(feature.MachineSetPreflightChecks) { if err := validateSkippedMachineSetPreflightChecks(m); err != nil { allErrs = append(allErrs, err) } } if old != nil && old.Spec.ClusterName != m.Spec.ClusterName { allErrs = append( allErrs, field.Forbidden( specPath.Child("clusterName"), "field is immutable", ), ) } if m.Spec.Strategy != nil && m.Spec.Strategy.RollingUpdate != nil { total := 1 if m.Spec.Replicas != nil { total = int(*m.Spec.Replicas) } if m.Spec.Strategy.RollingUpdate.MaxSurge != nil { if _, err := intstr.GetScaledValueFromIntOrPercent(m.Spec.Strategy.RollingUpdate.MaxSurge, total, true); err != nil { allErrs = append( allErrs, field.Invalid(specPath.Child("strategy", "rollingUpdate", "maxSurge"), m.Spec.Strategy.RollingUpdate.MaxSurge, fmt.Sprintf("must be either an int or a percentage: %v", err.Error())), ) } } if m.Spec.Strategy.RollingUpdate.MaxUnavailable != nil { if _, err := intstr.GetScaledValueFromIntOrPercent(m.Spec.Strategy.RollingUpdate.MaxUnavailable, total, true); err != nil { allErrs = append( allErrs, field.Invalid(specPath.Child("strategy", "rollingUpdate", "maxUnavailable"), m.Spec.Strategy.RollingUpdate.MaxUnavailable, fmt.Sprintf("must be either an int or a percentage: %v", err.Error())), ) } } } if m.Spec.Template.Spec.Version != nil { if !version.KubeSemver.MatchString(*m.Spec.Template.Spec.Version) { allErrs = append(allErrs, field.Invalid(specPath.Child("template", "spec", "version"), *m.Spec.Template.Spec.Version, "must be a valid semantic version")) } } if len(allErrs) == 0 { return nil } return apierrors.NewInvalid(GroupVersion.WithKind("MachineDeployment").GroupKind(), m.Name, allErrs) } // calculateMachineDeploymentReplicas calculates the default value of the replicas field. // The value will be calculated based on the following logic: // * if replicas is already set on newMD, keep the current value // * if the autoscaler min size and max size annotations are set: // - if it's a new MachineDeployment, use min size // - if the replicas field of the old MachineDeployment is < min size, use min size // - if the replicas field of the old MachineDeployment is > max size, use max size // - if the replicas field of the
allErrs, field.Forbidden( specPath.Child("template", "metadata", "labels"), fmt.Sprintf("must match spec.selector %q", selector.String()),
random_line_split
machinedeployment_webhook.go
8s.io,resources=machinedeployments,versions=v1beta1,name=default.machinedeployment.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 var _ webhook.CustomDefaulter = &machineDeploymentDefaulter{} var _ webhook.Validator = &MachineDeployment{} // MachineDeploymentDefaulter creates a new CustomDefaulter for MachineDeployments. func MachineDeploymentDefaulter(scheme *runtime.Scheme) webhook.CustomDefaulter { return &machineDeploymentDefaulter{ decoder: admission.NewDecoder(scheme), } } // machineDeploymentDefaulter implements a defaulting webhook for MachineDeployment. type machineDeploymentDefaulter struct { decoder *admission.Decoder } // Default implements webhook.CustomDefaulter. func (webhook *machineDeploymentDefaulter) Default(ctx context.Context, obj runtime.Object) error { m, ok := obj.(*MachineDeployment) if !ok { return apierrors.NewBadRequest(fmt.Sprintf("expected a MachineDeployment but got a %T", obj)) } req, err := admission.RequestFromContext(ctx) if err != nil { return err } dryRun := false if req.DryRun != nil { dryRun = *req.DryRun } var oldMD *MachineDeployment if req.Operation == v1.Update { oldMD = &MachineDeployment{} if err := webhook.decoder.DecodeRaw(req.OldObject, oldMD); err != nil { return errors.Wrapf(err, "failed to decode oldObject to MachineDeployment") } } if m.Labels == nil { m.Labels = make(map[string]string) } m.Labels[ClusterNameLabel] = m.Spec.ClusterName replicas, err := calculateMachineDeploymentReplicas(ctx, oldMD, m, dryRun) if err != nil { return err } m.Spec.Replicas = pointer.Int32(replicas) if m.Spec.MinReadySeconds == nil { m.Spec.MinReadySeconds = pointer.Int32(0) } if m.Spec.RevisionHistoryLimit == nil { m.Spec.RevisionHistoryLimit = pointer.Int32(1) } if m.Spec.ProgressDeadlineSeconds == nil { m.Spec.ProgressDeadlineSeconds = pointer.Int32(600) } if m.Spec.Selector.MatchLabels == nil { m.Spec.Selector.MatchLabels = make(map[string]string) } if m.Spec.Strategy == nil { m.Spec.Strategy = &MachineDeploymentStrategy{} } if m.Spec.Strategy.Type == "" { m.Spec.Strategy.Type = RollingUpdateMachineDeploymentStrategyType } if m.Spec.Template.Labels == nil { m.Spec.Template.Labels = make(map[string]string) } // Default RollingUpdate strategy only if strategy type is RollingUpdate. if m.Spec.Strategy.Type == RollingUpdateMachineDeploymentStrategyType { if m.Spec.Strategy.RollingUpdate == nil { m.Spec.Strategy.RollingUpdate = &MachineRollingUpdateDeployment{} } if m.Spec.Strategy.RollingUpdate.MaxSurge == nil { ios1 := intstr.FromInt(1) m.Spec.Strategy.RollingUpdate.MaxSurge = &ios1 } if m.Spec.Strategy.RollingUpdate.MaxUnavailable == nil { ios0 := intstr.FromInt(0) m.Spec.Strategy.RollingUpdate.MaxUnavailable = &ios0 } } // If no selector has been provided, add label and selector for the // MachineDeployment's name as a default way of providing uniqueness. if len(m.Spec.Selector.MatchLabels) == 0 && len(m.Spec.Selector.MatchExpressions) == 0 { m.Spec.Selector.MatchLabels[MachineDeploymentNameLabel] = m.Name m.Spec.Template.Labels[MachineDeploymentNameLabel] = m.Name } // Make sure selector and template to be in the same cluster. m.Spec.Selector.MatchLabels[ClusterNameLabel] = m.Spec.ClusterName m.Spec.Template.Labels[ClusterNameLabel] = m.Spec.ClusterName // tolerate version strings without a "v" prefix: prepend it if it's not there if m.Spec.Template.Spec.Version != nil && !strings.HasPrefix(*m.Spec.Template.Spec.Version, "v") { normalizedVersion := "v" + *m.Spec.Template.Spec.Version m.Spec.Template.Spec.Version = &normalizedVersion } return nil } // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. func (m *MachineDeployment) ValidateCreate() (admission.Warnings, error) { return nil, m.validate(nil) } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. func (m *MachineDeployment) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { oldMD, ok := old.(*MachineDeployment) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a MachineDeployment but got a %T", old)) } return nil, m.validate(oldMD) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. func (m *MachineDeployment) ValidateDelete() (admission.Warnings, error) { return nil, nil } func (m *MachineDeployment) validate(old *MachineDeployment) error { var allErrs field.ErrorList // The MachineDeployment name is used as a label value. This check ensures names which are not be valid label values are rejected. if errs := validation.IsValidLabelValue(m.Name); len(errs) != 0 { for _, err := range errs { allErrs = append( allErrs, field.Invalid( field.NewPath("metadata", "name"), m.Name, fmt.Sprintf("must be a valid label value: %s", err), ), ) } } specPath := field.NewPath("spec") selector, err := metav1.LabelSelectorAsSelector(&m.Spec.Selector) if err != nil { allErrs = append( allErrs, field.Invalid(specPath.Child("selector"), m.Spec.Selector, err.Error()), ) } else if !selector.Matches(labels.Set(m.Spec.Template.Labels)) { allErrs = append( allErrs, field.Forbidden( specPath.Child("template", "metadata", "labels"), fmt.Sprintf("must match spec.selector %q", selector.String()), ), ) } // MachineSet preflight checks that should be skipped could also be set as annotation on the MachineDeployment // since MachineDeployment annotations are synced to the MachineSet. if feature.Gates.Enabled(feature.MachineSetPreflightChecks) { if err := validateSkippedMachineSetPreflightChecks(m); err != nil { allErrs = append(allErrs, err) } } if old != nil && old.Spec.ClusterName != m.Spec.ClusterName { allErrs = append( allErrs, field.Forbidden( specPath.Child("clusterName"), "field is immutable", ), ) } if m.Spec.Strategy != nil && m.Spec.Strategy.RollingUpdate != nil { total := 1 if m.Spec.Replicas != nil { total = int(*m.Spec.Replicas) } if m.Spec.Strategy.RollingUpdate.MaxSurge != nil { if _, err := intstr.GetScaledValueFromIntOrPercent(m.Spec.Strategy.RollingUpdate.MaxSurge, total, true); err != nil
} if m.Spec.Strategy.RollingUpdate.MaxUnavailable != nil { if _, err := intstr.GetScaledValueFromIntOrPercent(m.Spec.Strategy.RollingUpdate.MaxUnavailable, total, true); err != nil { allErrs = append( allErrs, field.Invalid(specPath.Child("strategy", "rollingUpdate", "maxUnavailable"), m.Spec.Strategy.RollingUpdate.MaxUnavailable, fmt.Sprintf("must be either an int or a percentage: %v", err.Error())), ) } } } if m.Spec.Template.Spec.Version != nil { if !version.KubeSemver.MatchString(*m.Spec.Template.Spec.Version) { allErrs = append(allErrs, field.Invalid(specPath.Child("template", "spec", "version"), *m.Spec.Template.Spec.Version, "must be a valid semantic version")) } } if len(allErrs) == 0 { return nil } return apierrors.NewInvalid(GroupVersion.WithKind("MachineDeployment").GroupKind(), m.Name, allErrs) } // calculateMachineDeploymentReplicas calculates the default value of the replicas field. // The value will be calculated based on the following logic: // * if replicas is already set on newMD, keep the current value // * if the autoscaler min size and max size annotations are set: // - if it's a new MachineDeployment, use min size // - if the replicas field of the old MachineDeployment is < min size, use min size // - if the replicas field of the old MachineDeployment is > max size, use max size // - if the replicas field of
{ allErrs = append( allErrs, field.Invalid(specPath.Child("strategy", "rollingUpdate", "maxSurge"), m.Spec.Strategy.RollingUpdate.MaxSurge, fmt.Sprintf("must be either an int or a percentage: %v", err.Error())), ) }
conditional_block
machinedeployment_webhook.go
8s.io,resources=machinedeployments,versions=v1beta1,name=default.machinedeployment.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 var _ webhook.CustomDefaulter = &machineDeploymentDefaulter{} var _ webhook.Validator = &MachineDeployment{} // MachineDeploymentDefaulter creates a new CustomDefaulter for MachineDeployments. func MachineDeploymentDefaulter(scheme *runtime.Scheme) webhook.CustomDefaulter { return &machineDeploymentDefaulter{ decoder: admission.NewDecoder(scheme), } } // machineDeploymentDefaulter implements a defaulting webhook for MachineDeployment. type machineDeploymentDefaulter struct { decoder *admission.Decoder } // Default implements webhook.CustomDefaulter. func (webhook *machineDeploymentDefaulter) Default(ctx context.Context, obj runtime.Object) error { m, ok := obj.(*MachineDeployment) if !ok { return apierrors.NewBadRequest(fmt.Sprintf("expected a MachineDeployment but got a %T", obj)) } req, err := admission.RequestFromContext(ctx) if err != nil { return err } dryRun := false if req.DryRun != nil { dryRun = *req.DryRun } var oldMD *MachineDeployment if req.Operation == v1.Update { oldMD = &MachineDeployment{} if err := webhook.decoder.DecodeRaw(req.OldObject, oldMD); err != nil { return errors.Wrapf(err, "failed to decode oldObject to MachineDeployment") } } if m.Labels == nil { m.Labels = make(map[string]string) } m.Labels[ClusterNameLabel] = m.Spec.ClusterName replicas, err := calculateMachineDeploymentReplicas(ctx, oldMD, m, dryRun) if err != nil { return err } m.Spec.Replicas = pointer.Int32(replicas) if m.Spec.MinReadySeconds == nil { m.Spec.MinReadySeconds = pointer.Int32(0) } if m.Spec.RevisionHistoryLimit == nil { m.Spec.RevisionHistoryLimit = pointer.Int32(1) } if m.Spec.ProgressDeadlineSeconds == nil { m.Spec.ProgressDeadlineSeconds = pointer.Int32(600) } if m.Spec.Selector.MatchLabels == nil { m.Spec.Selector.MatchLabels = make(map[string]string) } if m.Spec.Strategy == nil { m.Spec.Strategy = &MachineDeploymentStrategy{} } if m.Spec.Strategy.Type == "" { m.Spec.Strategy.Type = RollingUpdateMachineDeploymentStrategyType } if m.Spec.Template.Labels == nil { m.Spec.Template.Labels = make(map[string]string) } // Default RollingUpdate strategy only if strategy type is RollingUpdate. if m.Spec.Strategy.Type == RollingUpdateMachineDeploymentStrategyType { if m.Spec.Strategy.RollingUpdate == nil { m.Spec.Strategy.RollingUpdate = &MachineRollingUpdateDeployment{} } if m.Spec.Strategy.RollingUpdate.MaxSurge == nil { ios1 := intstr.FromInt(1) m.Spec.Strategy.RollingUpdate.MaxSurge = &ios1 } if m.Spec.Strategy.RollingUpdate.MaxUnavailable == nil { ios0 := intstr.FromInt(0) m.Spec.Strategy.RollingUpdate.MaxUnavailable = &ios0 } } // If no selector has been provided, add label and selector for the // MachineDeployment's name as a default way of providing uniqueness. if len(m.Spec.Selector.MatchLabels) == 0 && len(m.Spec.Selector.MatchExpressions) == 0 { m.Spec.Selector.MatchLabels[MachineDeploymentNameLabel] = m.Name m.Spec.Template.Labels[MachineDeploymentNameLabel] = m.Name } // Make sure selector and template to be in the same cluster. m.Spec.Selector.MatchLabels[ClusterNameLabel] = m.Spec.ClusterName m.Spec.Template.Labels[ClusterNameLabel] = m.Spec.ClusterName // tolerate version strings without a "v" prefix: prepend it if it's not there if m.Spec.Template.Spec.Version != nil && !strings.HasPrefix(*m.Spec.Template.Spec.Version, "v") { normalizedVersion := "v" + *m.Spec.Template.Spec.Version m.Spec.Template.Spec.Version = &normalizedVersion } return nil } // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. func (m *MachineDeployment) ValidateCreate() (admission.Warnings, error)
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. func (m *MachineDeployment) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { oldMD, ok := old.(*MachineDeployment) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a MachineDeployment but got a %T", old)) } return nil, m.validate(oldMD) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. func (m *MachineDeployment) ValidateDelete() (admission.Warnings, error) { return nil, nil } func (m *MachineDeployment) validate(old *MachineDeployment) error { var allErrs field.ErrorList // The MachineDeployment name is used as a label value. This check ensures names which are not be valid label values are rejected. if errs := validation.IsValidLabelValue(m.Name); len(errs) != 0 { for _, err := range errs { allErrs = append( allErrs, field.Invalid( field.NewPath("metadata", "name"), m.Name, fmt.Sprintf("must be a valid label value: %s", err), ), ) } } specPath := field.NewPath("spec") selector, err := metav1.LabelSelectorAsSelector(&m.Spec.Selector) if err != nil { allErrs = append( allErrs, field.Invalid(specPath.Child("selector"), m.Spec.Selector, err.Error()), ) } else if !selector.Matches(labels.Set(m.Spec.Template.Labels)) { allErrs = append( allErrs, field.Forbidden( specPath.Child("template", "metadata", "labels"), fmt.Sprintf("must match spec.selector %q", selector.String()), ), ) } // MachineSet preflight checks that should be skipped could also be set as annotation on the MachineDeployment // since MachineDeployment annotations are synced to the MachineSet. if feature.Gates.Enabled(feature.MachineSetPreflightChecks) { if err := validateSkippedMachineSetPreflightChecks(m); err != nil { allErrs = append(allErrs, err) } } if old != nil && old.Spec.ClusterName != m.Spec.ClusterName { allErrs = append( allErrs, field.Forbidden( specPath.Child("clusterName"), "field is immutable", ), ) } if m.Spec.Strategy != nil && m.Spec.Strategy.RollingUpdate != nil { total := 1 if m.Spec.Replicas != nil { total = int(*m.Spec.Replicas) } if m.Spec.Strategy.RollingUpdate.MaxSurge != nil { if _, err := intstr.GetScaledValueFromIntOrPercent(m.Spec.Strategy.RollingUpdate.MaxSurge, total, true); err != nil { allErrs = append( allErrs, field.Invalid(specPath.Child("strategy", "rollingUpdate", "maxSurge"), m.Spec.Strategy.RollingUpdate.MaxSurge, fmt.Sprintf("must be either an int or a percentage: %v", err.Error())), ) } } if m.Spec.Strategy.RollingUpdate.MaxUnavailable != nil { if _, err := intstr.GetScaledValueFromIntOrPercent(m.Spec.Strategy.RollingUpdate.MaxUnavailable, total, true); err != nil { allErrs = append( allErrs, field.Invalid(specPath.Child("strategy", "rollingUpdate", "maxUnavailable"), m.Spec.Strategy.RollingUpdate.MaxUnavailable, fmt.Sprintf("must be either an int or a percentage: %v", err.Error())), ) } } } if m.Spec.Template.Spec.Version != nil { if !version.KubeSemver.MatchString(*m.Spec.Template.Spec.Version) { allErrs = append(allErrs, field.Invalid(specPath.Child("template", "spec", "version"), *m.Spec.Template.Spec.Version, "must be a valid semantic version")) } } if len(allErrs) == 0 { return nil } return apierrors.NewInvalid(GroupVersion.WithKind("MachineDeployment").GroupKind(), m.Name, allErrs) } // calculateMachineDeploymentReplicas calculates the default value of the replicas field. // The value will be calculated based on the following logic: // * if replicas is already set on newMD, keep the current value // * if the autoscaler min size and max size annotations are set: // - if it's a new MachineDeployment, use min size // - if the replicas field of the old MachineDeployment is < min size, use min size // - if the replicas field of the old MachineDeployment is > max size, use max size // - if the replicas field of
{ return nil, m.validate(nil) }
identifier_body
day_15.rs
fn neighbors(origin: &Point) -> Neighbors { origin.neighbors_reading_order() } fn neighbor_dist() -> usize { 1 } fn point_order(a: &Point, b: &Point) -> Ordering { Point::cmp_reading_order(*a, *b) } } type CavernPathfinder = Pathfinder<CavernWorld>; #[derive(Copy, Clone, Eq, PartialEq)] enum Team { Elf, Goblin, } impl fmt::Display for Team { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", match self { Team::Goblin => "Goblin", Team::Elf => "Elf", }) } } #[derive(Clone)] struct Fighter { team: Team, pos: Point, hp: isize, } const BASE_ATTACK_POWER: isize = 3; impl Fighter { fn new(team: Team, pos: Point) -> Self { Self { team, pos, hp: 200, } } } impl fmt::Debug for Fighter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Fighter ( {} @ {} )", self.team, self.pos) } } #[derive(Debug, Copy, Clone, Eq, PartialEq)] enum Tile { Empty, Blocked, } impl fmt::Display for Tile { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Tile::Empty => write!(f, "."), Tile::Blocked => write!(f, "#"), } } } #[derive(Clone)] struct Cavern { tiles: Vec<Tile>, width: usize, height: usize, fighters: Vec<Fighter>, fighter_positions: HashMap<Point, usize>, elf_attack_power: isize, } impl Cavern { fn parse(s: &str) -> Self { let mut width = 0; let mut height = 0; let mut fighters = Vec::new(); let mut tiles = Vec::new(); for (y, line) in s.lines().enumerate() { height += 1; width = line.len(); // assume all lines are the same length for (x, char) in line.chars().enumerate() { let point = Point::new(x as isize, y as isize); match char { '#' => tiles.push(Tile::Blocked), 'E' => { tiles.push(Tile::Empty); fighters.push(Fighter::new(Team::Elf, point)); } 'G' => { tiles.push(Tile::Empty); fighters.push(Fighter::new(Team::Goblin, point)); } _ => tiles.push(Tile::Empty), } } } let mut cavern = Self { tiles, width, height, fighters, fighter_positions: HashMap::new(), elf_attack_power: BASE_ATTACK_POWER, }; cavern.refresh_fighter_positions(); cavern } fn refresh_fighter_positions(&mut self) { self.fighter_positions.clear(); for (i, f) in self.fighters.iter().enumerate() { self.fighter_positions.insert(f.pos, i); } } fn is_free_space(&self, point: Point) -> bool { match self.tile_at(point) { Tile::Empty => self.fighter_at(point).is_none(), Tile::Blocked => false, } } fn fighter_at(&self, point: Point) -> Option<usize> { self.fighter_positions.get(&point) .filter(|&&i| self.fighters[i].hp > 0) .cloned() } fn tile_at(&self, point: Point) -> Tile { let off = self.width as isize * point.y + point.x; if off >= 0 && off < self.tiles.len() as isize { self.tiles[off as usize] } else { Tile::Blocked } } fn find_targets(&self, i: usize, targets: &mut Vec<usize>) { targets.clear(); let fighter = &self.fighters[i]; targets.extend(self.fighters.iter().enumerate() .filter(|(_, other)| other.hp > 0) .filter_map(|(j, other)| if other.team != fighter.team { Some(j) } else { None })); } fn move_fighter(&mut self, i: usize, targets: &[usize], pathfinder: &mut CavernPathfinder) { let fighter = &self.fighters[i]; let dests: HashSet<_> = targets.iter() .flat_map(|j| { let target_pos = self.fighters[*j].pos; target_pos.neighbors_reading_order() }) .filter(|p| self.is_free_space(*p) || *p == fighter.pos) .collect(); if !dests.contains(&fighter.pos)
let b_dest = *b.last().unwrap(); // sort first by shortest paths... match a.len().cmp(&b.len()) { // then by origin pos in reading order Ordering::Equal => Point::cmp_reading_order(a_dest, b_dest), dest_order => dest_order, } }); if !paths.is_empty() { // move this fighter to the first step of the chosen path self.fighters[i].pos = paths[0][0]; self.refresh_fighter_positions(); } } } fn resolve_attacks(&mut self, i: usize) { let neighbors = self.fighters[i].pos.neighbors_reading_order(); let target_index = neighbors .filter_map(|neighbor| { self.fighters.iter().enumerate() .filter_map(|(j, f)| { if f.pos == neighbor && f.hp > 0 && f.team != self.fighters[i].team { Some(j) } else { None } }) .next() }) .min_by(|a, b| { let a = &self.fighters[*a]; let b = &self.fighters[*b]; match a.hp.cmp(&b.hp) { Ordering::Equal => Point::cmp_reading_order(a.pos, b.pos), hp_order => hp_order, } }); if let Some(j) = target_index { let attack_power = match self.fighters[i].team { Team::Elf => self.elf_attack_power, Team::Goblin => BASE_ATTACK_POWER, }; self.fighters[j].hp = isize::max(0, self.fighters[j].hp - attack_power); } } fn tick(&mut self, pathfinder: &mut CavernPathfinder) -> Option<Team> { let mut targets = Vec::new(); self.fighters.sort_by(|a, b| Point::cmp_reading_order(a.pos, b.pos)); self.refresh_fighter_positions(); for i in 0..self.fighters.len() { if self.fighters[i].hp > 0 { self.find_targets(i, &mut targets); if targets.is_empty() { let winner = self.fighters[i].team; // all enemies are dead, battle is over return Some(winner); } self.move_fighter(i, &targets, pathfinder); self.resolve_attacks(i); } } None } fn elves(&self) -> impl Iterator<Item=&Fighter> { self.fighters.iter().filter(|f| f.hp > 0 && f.team == Team::Elf) } } impl fmt::Display for Cavern { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { for y in 0..self.height as isize { for x in 0..self.width as isize { let pos = Point::new(x, y); match self.fighter_at(pos) { Some(fighter_pos) => match self.fighters[fighter_pos].team { Team::Elf => write!(f, "E")?, Team::Goblin => write!(f, "G")?, } None => write!(f, "{}", self.tile_at(pos))?, } } writeln!(f)?; } Ok(()) } } struct Outcome { elf_power: isize, elves_remaining: Vec<Fighter>, winner: Team, hp_sum: isize, time: isize, } impl Outcome { fn new(cavern: &Cavern, winner: Team, time: isize) -> Self { let hp_sum = cavern.fighters.iter().map(|f| f.hp).sum::<isize>(); Self { hp_sum, elf_power: cavern.elf_attack_power, elves_remaining: cavern.elves
{ let mut paths = Vec::new(); let origin_points = fighter.pos.neighbors_reading_order() .filter(|p| self.is_free_space(*p)); let mut path = Vec::new(); for origin in origin_points { for &dest in &dests { let free_tile_pred = |p: &Point| self.is_free_space(*p); if pathfinder.find_path(origin, dest, free_tile_pred, &mut path) { paths.push(path.clone()); path.clear(); } } } paths.sort_by(|a, b| { let a_dest = *a.last().unwrap();
conditional_block
day_15.rs
fn neighbors(origin: &Point) -> Neighbors { origin.neighbors_reading_order() } fn neighbor_dist() -> usize { 1 } fn point_order(a: &Point, b: &Point) -> Ordering { Point::cmp_reading_order(*a, *b) } } type CavernPathfinder = Pathfinder<CavernWorld>; #[derive(Copy, Clone, Eq, PartialEq)] enum Team { Elf, Goblin, } impl fmt::Display for Team { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", match self { Team::Goblin => "Goblin", Team::Elf => "Elf", }) } } #[derive(Clone)] struct Fighter { team: Team, pos: Point, hp: isize, } const BASE_ATTACK_POWER: isize = 3; impl Fighter { fn new(team: Team, pos: Point) -> Self { Self { team, pos, hp: 200, } } } impl fmt::Debug for Fighter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Fighter ( {} @ {} )", self.team, self.pos) } } #[derive(Debug, Copy, Clone, Eq, PartialEq)] enum Tile { Empty, Blocked, } impl fmt::Display for Tile { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Tile::Empty => write!(f, "."), Tile::Blocked => write!(f, "#"), } } } #[derive(Clone)] struct Cavern { tiles: Vec<Tile>, width: usize, height: usize, fighters: Vec<Fighter>, fighter_positions: HashMap<Point, usize>, elf_attack_power: isize, } impl Cavern { fn parse(s: &str) -> Self { let mut width = 0; let mut height = 0; let mut fighters = Vec::new(); let mut tiles = Vec::new(); for (y, line) in s.lines().enumerate() { height += 1; width = line.len(); // assume all lines are the same length for (x, char) in line.chars().enumerate() { let point = Point::new(x as isize, y as isize); match char { '#' => tiles.push(Tile::Blocked), 'E' => { tiles.push(Tile::Empty); fighters.push(Fighter::new(Team::Elf, point)); } 'G' => { tiles.push(Tile::Empty); fighters.push(Fighter::new(Team::Goblin, point)); } _ => tiles.push(Tile::Empty), } } } let mut cavern = Self { tiles, width, height, fighters, fighter_positions: HashMap::new(), elf_attack_power: BASE_ATTACK_POWER, }; cavern.refresh_fighter_positions(); cavern } fn refresh_fighter_positions(&mut self) { self.fighter_positions.clear(); for (i, f) in self.fighters.iter().enumerate() { self.fighter_positions.insert(f.pos, i); } } fn is_free_space(&self, point: Point) -> bool { match self.tile_at(point) { Tile::Empty => self.fighter_at(point).is_none(), Tile::Blocked => false, } } fn fighter_at(&self, point: Point) -> Option<usize> { self.fighter_positions.get(&point) .filter(|&&i| self.fighters[i].hp > 0) .cloned() } fn tile_at(&self, point: Point) -> Tile { let off = self.width as isize * point.y + point.x; if off >= 0 && off < self.tiles.len() as isize { self.tiles[off as usize] } else { Tile::Blocked } } fn find_targets(&self, i: usize, targets: &mut Vec<usize>) { targets.clear(); let fighter = &self.fighters[i]; targets.extend(self.fighters.iter().enumerate() .filter(|(_, other)| other.hp > 0) .filter_map(|(j, other)| if other.team != fighter.team { Some(j) } else { None })); } fn move_fighter(&mut self, i: usize, targets: &[usize], pathfinder: &mut CavernPathfinder) { let fighter = &self.fighters[i]; let dests: HashSet<_> = targets.iter() .flat_map(|j| { let target_pos = self.fighters[*j].pos; target_pos.neighbors_reading_order() }) .filter(|p| self.is_free_space(*p) || *p == fighter.pos) .collect(); if !dests.contains(&fighter.pos) { let mut paths = Vec::new(); let origin_points = fighter.pos.neighbors_reading_order() .filter(|p| self.is_free_space(*p)); let mut path = Vec::new(); for origin in origin_points { for &dest in &dests { let free_tile_pred = |p: &Point| self.is_free_space(*p); if pathfinder.find_path(origin, dest, free_tile_pred, &mut path) { paths.push(path.clone()); path.clear(); } } } paths.sort_by(|a, b| { let a_dest = *a.last().unwrap(); let b_dest = *b.last().unwrap(); // sort first by shortest paths... match a.len().cmp(&b.len()) { // then by origin pos in reading order Ordering::Equal => Point::cmp_reading_order(a_dest, b_dest), dest_order => dest_order, } }); if !paths.is_empty() { // move this fighter to the first step of the chosen path self.fighters[i].pos = paths[0][0]; self.refresh_fighter_positions(); } } } fn resolve_attacks(&mut self, i: usize)
match a.hp.cmp(&b.hp) { Ordering::Equal => Point::cmp_reading_order(a.pos, b.pos), hp_order => hp_order, } }); if let Some(j) = target_index { let attack_power = match self.fighters[i].team { Team::Elf => self.elf_attack_power, Team::Goblin => BASE_ATTACK_POWER, }; self.fighters[j].hp = isize::max(0, self.fighters[j].hp - attack_power); } } fn tick(&mut self, pathfinder: &mut CavernPathfinder) -> Option<Team> { let mut targets = Vec::new(); self.fighters.sort_by(|a, b| Point::cmp_reading_order(a.pos, b.pos)); self.refresh_fighter_positions(); for i in 0..self.fighters.len() { if self.fighters[i].hp > 0 { self.find_targets(i, &mut targets); if targets.is_empty() { let winner = self.fighters[i].team; // all enemies are dead, battle is over return Some(winner); } self.move_fighter(i, &targets, pathfinder); self.resolve_attacks(i); } } None } fn elves(&self) -> impl Iterator<Item=&Fighter> { self.fighters.iter().filter(|f| f.hp > 0 && f.team == Team::Elf) } } impl fmt::Display for Cavern { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { for y in 0..self.height as isize { for x in 0..self.width as isize { let pos = Point::new(x, y); match self.fighter_at(pos) { Some(fighter_pos) => match self.fighters[fighter_pos].team { Team::Elf => write!(f, "E")?, Team::Goblin => write!(f, "G")?, } None => write!(f, "{}", self.tile_at(pos))?, } } writeln!(f)?; } Ok(()) } } struct Outcome { elf_power: isize, elves_remaining: Vec<Fighter>, winner: Team, hp_sum: isize, time: isize, } impl Outcome { fn new(cavern: &Cavern, winner: Team, time: isize) -> Self { let hp_sum = cavern.fighters.iter().map(|f| f.hp).sum::<isize>(); Self { hp_sum, elf_power: cavern.elf_attack_power, elves_remaining: cavern.elves
{ let neighbors = self.fighters[i].pos.neighbors_reading_order(); let target_index = neighbors .filter_map(|neighbor| { self.fighters.iter().enumerate() .filter_map(|(j, f)| { if f.pos == neighbor && f.hp > 0 && f.team != self.fighters[i].team { Some(j) } else { None } }) .next() }) .min_by(|a, b| { let a = &self.fighters[*a]; let b = &self.fighters[*b];
identifier_body
day_15.rs
fn neighbors(origin: &Point) -> Neighbors { origin.neighbors_reading_order() } fn neighbor_dist() -> usize { 1 } fn point_order(a: &Point, b: &Point) -> Ordering { Point::cmp_reading_order(*a, *b) } } type CavernPathfinder = Pathfinder<CavernWorld>; #[derive(Copy, Clone, Eq, PartialEq)] enum Team { Elf, Goblin, } impl fmt::Display for Team { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", match self { Team::Goblin => "Goblin", Team::Elf => "Elf", }) } } #[derive(Clone)] struct Fighter { team: Team, pos: Point, hp: isize, } const BASE_ATTACK_POWER: isize = 3; impl Fighter { fn new(team: Team, pos: Point) -> Self { Self { team, pos, hp: 200, } } } impl fmt::Debug for Fighter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Fighter ( {} @ {} )", self.team, self.pos) } } #[derive(Debug, Copy, Clone, Eq, PartialEq)] enum Tile { Empty, Blocked, } impl fmt::Display for Tile { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Tile::Empty => write!(f, "."), Tile::Blocked => write!(f, "#"), } } } #[derive(Clone)] struct Cavern { tiles: Vec<Tile>, width: usize, height: usize, fighters: Vec<Fighter>, fighter_positions: HashMap<Point, usize>, elf_attack_power: isize, } impl Cavern { fn parse(s: &str) -> Self { let mut width = 0; let mut height = 0; let mut fighters = Vec::new(); let mut tiles = Vec::new(); for (y, line) in s.lines().enumerate() { height += 1; width = line.len(); // assume all lines are the same length for (x, char) in line.chars().enumerate() { let point = Point::new(x as isize, y as isize); match char { '#' => tiles.push(Tile::Blocked), 'E' => { tiles.push(Tile::Empty); fighters.push(Fighter::new(Team::Elf, point)); } 'G' => { tiles.push(Tile::Empty); fighters.push(Fighter::new(Team::Goblin, point)); } _ => tiles.push(Tile::Empty), } } } let mut cavern = Self { tiles, width, height, fighters, fighter_positions: HashMap::new(), elf_attack_power: BASE_ATTACK_POWER, }; cavern.refresh_fighter_positions(); cavern } fn refresh_fighter_positions(&mut self) { self.fighter_positions.clear(); for (i, f) in self.fighters.iter().enumerate() { self.fighter_positions.insert(f.pos, i); } } fn is_free_space(&self, point: Point) -> bool { match self.tile_at(point) { Tile::Empty => self.fighter_at(point).is_none(), Tile::Blocked => false, } } fn fighter_at(&self, point: Point) -> Option<usize> { self.fighter_positions.get(&point) .filter(|&&i| self.fighters[i].hp > 0) .cloned() } fn tile_at(&self, point: Point) -> Tile { let off = self.width as isize * point.y + point.x; if off >= 0 && off < self.tiles.len() as isize { self.tiles[off as usize] } else { Tile::Blocked } } fn
(&self, i: usize, targets: &mut Vec<usize>) { targets.clear(); let fighter = &self.fighters[i]; targets.extend(self.fighters.iter().enumerate() .filter(|(_, other)| other.hp > 0) .filter_map(|(j, other)| if other.team != fighter.team { Some(j) } else { None })); } fn move_fighter(&mut self, i: usize, targets: &[usize], pathfinder: &mut CavernPathfinder) { let fighter = &self.fighters[i]; let dests: HashSet<_> = targets.iter() .flat_map(|j| { let target_pos = self.fighters[*j].pos; target_pos.neighbors_reading_order() }) .filter(|p| self.is_free_space(*p) || *p == fighter.pos) .collect(); if !dests.contains(&fighter.pos) { let mut paths = Vec::new(); let origin_points = fighter.pos.neighbors_reading_order() .filter(|p| self.is_free_space(*p)); let mut path = Vec::new(); for origin in origin_points { for &dest in &dests { let free_tile_pred = |p: &Point| self.is_free_space(*p); if pathfinder.find_path(origin, dest, free_tile_pred, &mut path) { paths.push(path.clone()); path.clear(); } } } paths.sort_by(|a, b| { let a_dest = *a.last().unwrap(); let b_dest = *b.last().unwrap(); // sort first by shortest paths... match a.len().cmp(&b.len()) { // then by origin pos in reading order Ordering::Equal => Point::cmp_reading_order(a_dest, b_dest), dest_order => dest_order, } }); if !paths.is_empty() { // move this fighter to the first step of the chosen path self.fighters[i].pos = paths[0][0]; self.refresh_fighter_positions(); } } } fn resolve_attacks(&mut self, i: usize) { let neighbors = self.fighters[i].pos.neighbors_reading_order(); let target_index = neighbors .filter_map(|neighbor| { self.fighters.iter().enumerate() .filter_map(|(j, f)| { if f.pos == neighbor && f.hp > 0 && f.team != self.fighters[i].team { Some(j) } else { None } }) .next() }) .min_by(|a, b| { let a = &self.fighters[*a]; let b = &self.fighters[*b]; match a.hp.cmp(&b.hp) { Ordering::Equal => Point::cmp_reading_order(a.pos, b.pos), hp_order => hp_order, } }); if let Some(j) = target_index { let attack_power = match self.fighters[i].team { Team::Elf => self.elf_attack_power, Team::Goblin => BASE_ATTACK_POWER, }; self.fighters[j].hp = isize::max(0, self.fighters[j].hp - attack_power); } } fn tick(&mut self, pathfinder: &mut CavernPathfinder) -> Option<Team> { let mut targets = Vec::new(); self.fighters.sort_by(|a, b| Point::cmp_reading_order(a.pos, b.pos)); self.refresh_fighter_positions(); for i in 0..self.fighters.len() { if self.fighters[i].hp > 0 { self.find_targets(i, &mut targets); if targets.is_empty() { let winner = self.fighters[i].team; // all enemies are dead, battle is over return Some(winner); } self.move_fighter(i, &targets, pathfinder); self.resolve_attacks(i); } } None } fn elves(&self) -> impl Iterator<Item=&Fighter> { self.fighters.iter().filter(|f| f.hp > 0 && f.team == Team::Elf) } } impl fmt::Display for Cavern { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { for y in 0..self.height as isize { for x in 0..self.width as isize { let pos = Point::new(x, y); match self.fighter_at(pos) { Some(fighter_pos) => match self.fighters[fighter_pos].team { Team::Elf => write!(f, "E")?, Team::Goblin => write!(f, "G")?, } None => write!(f, "{}", self.tile_at(pos))?, } } writeln!(f)?; } Ok(()) } } struct Outcome { elf_power: isize, elves_remaining: Vec<Fighter>, winner: Team, hp_sum: isize, time: isize, } impl Outcome { fn new(cavern: &Cavern, winner: Team, time: isize) -> Self { let hp_sum = cavern.fighters.iter().map(|f| f.hp).sum::<isize>(); Self { hp_sum, elf_power: cavern.elf_attack_power, elves_remaining: cavern.elves
find_targets
identifier_name
day_15.rs
fn neighbors(origin: &Point) -> Neighbors { origin.neighbors_reading_order() } fn neighbor_dist() -> usize { 1 } fn point_order(a: &Point, b: &Point) -> Ordering { Point::cmp_reading_order(*a, *b) } } type CavernPathfinder = Pathfinder<CavernWorld>; #[derive(Copy, Clone, Eq, PartialEq)] enum Team { Elf, Goblin, } impl fmt::Display for Team { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", match self { Team::Goblin => "Goblin", Team::Elf => "Elf", }) } } #[derive(Clone)] struct Fighter { team: Team, pos: Point, hp: isize, } const BASE_ATTACK_POWER: isize = 3; impl Fighter { fn new(team: Team, pos: Point) -> Self { Self { team, pos, hp: 200, } } } impl fmt::Debug for Fighter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Fighter ( {} @ {} )", self.team, self.pos) } } #[derive(Debug, Copy, Clone, Eq, PartialEq)] enum Tile { Empty, Blocked, } impl fmt::Display for Tile { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Tile::Empty => write!(f, "."), Tile::Blocked => write!(f, "#"), } } } #[derive(Clone)] struct Cavern { tiles: Vec<Tile>, width: usize, height: usize, fighters: Vec<Fighter>, fighter_positions: HashMap<Point, usize>, elf_attack_power: isize, } impl Cavern { fn parse(s: &str) -> Self { let mut width = 0; let mut height = 0;
for (y, line) in s.lines().enumerate() { height += 1; width = line.len(); // assume all lines are the same length for (x, char) in line.chars().enumerate() { let point = Point::new(x as isize, y as isize); match char { '#' => tiles.push(Tile::Blocked), 'E' => { tiles.push(Tile::Empty); fighters.push(Fighter::new(Team::Elf, point)); } 'G' => { tiles.push(Tile::Empty); fighters.push(Fighter::new(Team::Goblin, point)); } _ => tiles.push(Tile::Empty), } } } let mut cavern = Self { tiles, width, height, fighters, fighter_positions: HashMap::new(), elf_attack_power: BASE_ATTACK_POWER, }; cavern.refresh_fighter_positions(); cavern } fn refresh_fighter_positions(&mut self) { self.fighter_positions.clear(); for (i, f) in self.fighters.iter().enumerate() { self.fighter_positions.insert(f.pos, i); } } fn is_free_space(&self, point: Point) -> bool { match self.tile_at(point) { Tile::Empty => self.fighter_at(point).is_none(), Tile::Blocked => false, } } fn fighter_at(&self, point: Point) -> Option<usize> { self.fighter_positions.get(&point) .filter(|&&i| self.fighters[i].hp > 0) .cloned() } fn tile_at(&self, point: Point) -> Tile { let off = self.width as isize * point.y + point.x; if off >= 0 && off < self.tiles.len() as isize { self.tiles[off as usize] } else { Tile::Blocked } } fn find_targets(&self, i: usize, targets: &mut Vec<usize>) { targets.clear(); let fighter = &self.fighters[i]; targets.extend(self.fighters.iter().enumerate() .filter(|(_, other)| other.hp > 0) .filter_map(|(j, other)| if other.team != fighter.team { Some(j) } else { None })); } fn move_fighter(&mut self, i: usize, targets: &[usize], pathfinder: &mut CavernPathfinder) { let fighter = &self.fighters[i]; let dests: HashSet<_> = targets.iter() .flat_map(|j| { let target_pos = self.fighters[*j].pos; target_pos.neighbors_reading_order() }) .filter(|p| self.is_free_space(*p) || *p == fighter.pos) .collect(); if !dests.contains(&fighter.pos) { let mut paths = Vec::new(); let origin_points = fighter.pos.neighbors_reading_order() .filter(|p| self.is_free_space(*p)); let mut path = Vec::new(); for origin in origin_points { for &dest in &dests { let free_tile_pred = |p: &Point| self.is_free_space(*p); if pathfinder.find_path(origin, dest, free_tile_pred, &mut path) { paths.push(path.clone()); path.clear(); } } } paths.sort_by(|a, b| { let a_dest = *a.last().unwrap(); let b_dest = *b.last().unwrap(); // sort first by shortest paths... match a.len().cmp(&b.len()) { // then by origin pos in reading order Ordering::Equal => Point::cmp_reading_order(a_dest, b_dest), dest_order => dest_order, } }); if !paths.is_empty() { // move this fighter to the first step of the chosen path self.fighters[i].pos = paths[0][0]; self.refresh_fighter_positions(); } } } fn resolve_attacks(&mut self, i: usize) { let neighbors = self.fighters[i].pos.neighbors_reading_order(); let target_index = neighbors .filter_map(|neighbor| { self.fighters.iter().enumerate() .filter_map(|(j, f)| { if f.pos == neighbor && f.hp > 0 && f.team != self.fighters[i].team { Some(j) } else { None } }) .next() }) .min_by(|a, b| { let a = &self.fighters[*a]; let b = &self.fighters[*b]; match a.hp.cmp(&b.hp) { Ordering::Equal => Point::cmp_reading_order(a.pos, b.pos), hp_order => hp_order, } }); if let Some(j) = target_index { let attack_power = match self.fighters[i].team { Team::Elf => self.elf_attack_power, Team::Goblin => BASE_ATTACK_POWER, }; self.fighters[j].hp = isize::max(0, self.fighters[j].hp - attack_power); } } fn tick(&mut self, pathfinder: &mut CavernPathfinder) -> Option<Team> { let mut targets = Vec::new(); self.fighters.sort_by(|a, b| Point::cmp_reading_order(a.pos, b.pos)); self.refresh_fighter_positions(); for i in 0..self.fighters.len() { if self.fighters[i].hp > 0 { self.find_targets(i, &mut targets); if targets.is_empty() { let winner = self.fighters[i].team; // all enemies are dead, battle is over return Some(winner); } self.move_fighter(i, &targets, pathfinder); self.resolve_attacks(i); } } None } fn elves(&self) -> impl Iterator<Item=&Fighter> { self.fighters.iter().filter(|f| f.hp > 0 && f.team == Team::Elf) } } impl fmt::Display for Cavern { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { for y in 0..self.height as isize { for x in 0..self.width as isize { let pos = Point::new(x, y); match self.fighter_at(pos) { Some(fighter_pos) => match self.fighters[fighter_pos].team { Team::Elf => write!(f, "E")?, Team::Goblin => write!(f, "G")?, } None => write!(f, "{}", self.tile_at(pos))?, } } writeln!(f)?; } Ok(()) } } struct Outcome { elf_power: isize, elves_remaining: Vec<Fighter>, winner: Team, hp_sum: isize, time: isize, } impl Outcome { fn new(cavern: &Cavern, winner: Team, time: isize) -> Self { let hp_sum = cavern.fighters.iter().map(|f| f.hp).sum::<isize>(); Self { hp_sum, elf_power: cavern.elf_attack_power, elves_remaining: cavern.elves
let mut fighters = Vec::new(); let mut tiles = Vec::new();
random_line_split
main.go
for { if commandLine, err := line.Prompt("BaiduPCS-Go > "); err == nil { line.AppendHistory(commandLine) cmdArgs := args.GetArgs(commandLine) if len(cmdArgs) == 0 { continue } s := []string{os.Args[0]} s = append(s, cmdArgs...) closeLiner(line) c.App.Run(s) line = newLiner() } else if err == liner.ErrPromptAborted || err == io.EOF { break } else { log.Print("Error reading line: ", err) continue } } } else { fmt.Printf("未找到命令: %s\n运行命令 %s help 获取帮助\n", c.Args().Get(0), app.Name) } } app.Commands = []cli.Command{ { Name: "login", Usage: "使用百度BDUSS登录百度账号", Description: fmt.Sprintf("\n 示例: \n\n %s\n\n %s\n\n %s\n\n %s\n\n %s\n", app.Name+" login --bduss=123456789", app.Name+" login", "百度BDUSS获取方法: ", "参考这篇 Wiki: https://github.com/iikira/BaiduPCS-Go/wiki/关于-获取百度-BDUSS", "或者百度搜索: 获取百度BDUSS", ), Category: "百度帐号操作", Before: reloadFn, After: reloadFn, Action: func(c *cli.Context) error { bduss := "" if c.IsSet("bduss") { bduss = c.String("bduss") } else if c.NArg() == 0 { cli.ShowCommandHelp(c, c.Command.Name) line := liner.NewLiner() line.SetCtrlCAborts(true) defer line.Close() bduss, _ = line.Prompt("请输入百度BDUSS值, 回车键提交 > ") } else { cli.ShowCommandHelp(c, c.Command.Name) return nil } username, err := pcsconfig.Config.SetBDUSS(bduss) if err != nil { fmt.Println(err) return nil } fmt.Println("百度帐号登录成功:", username) return nil }, Flags: []cli.Flag{ cli.StringFlag{ Name: "bduss", Usage: "百度BDUSS", }, }, },
Name: "chuser", Usage: "切换已登录的百度帐号", Description: fmt.Sprintf("%s\n 示例:\n\n %s\n %s\n", "如果运行该条命令没有提供参数, 程序将会列出所有的百度帐号, 供选择切换", app.Name+" chuser --uid=123456789", app.Name+" chuser", ), Category: "百度帐号操作", Before: reloadFn, After: reloadFn, Action: func(c *cli.Context) error { if len(pcsconfig.Config.BaiduUserList) == 0 { fmt.Println("未设置任何百度帐号, 不能切换") return nil } var uid uint64 if c.IsSet("uid") { if pcsconfig.Config.CheckUIDExist(c.Uint64("uid")) { uid = c.Uint64("uid") } else { fmt.Println("切换用户失败, uid 不存在") } } else if c.NArg() == 0 { cli.HandleAction(app.Command("loglist").Action, c) line := liner.NewLiner() line.SetCtrlCAborts(true) defer line.Close() nLine, _ := line.Prompt("请输入要切换帐号的 index 值 > ") if n, err := strconv.Atoi(nLine); err == nil && n >= 0 && n < len(pcsconfig.Config.BaiduUserList) { uid = pcsconfig.Config.BaiduUserList[n].UID } else { fmt.Println("切换用户失败, 请检查 index 值是否正确") } } else { cli.ShowCommandHelp(c, c.Command.Name) } if uid == 0 { return nil } pcsconfig.Config.BaiduActiveUID = uid if err := pcsconfig.Config.Save(); err != nil { fmt.Println(err) return nil } fmt.Printf("切换用户成功, %v\n", pcsconfig.ActiveBaiduUser.Name) return nil }, Flags: []cli.Flag{ cli.StringFlag{ Name: "uid", Usage: "百度帐号 uid 值", }, }, }, { Name: "logout", Usage: "退出已登录的百度帐号", Description: fmt.Sprintf("%s\n 示例:\n\n %s\n %s\n", "如果运行该条命令没有提供参数, 程序将会列出所有的百度帐号, 供选择退出", app.Name+" logout --uid=123456789", app.Name+" logout", ), Category: "百度帐号操作", Before: reloadFn, After: reloadFn, Action: func(c *cli.Context) error { if len(pcsconfig.Config.BaiduUserList) == 0 { fmt.Println("未设置任何百度帐号, 不能退出") return nil } var uid uint64 if c.IsSet("uid") { if pcsconfig.Config.CheckUIDExist(c.Uint64("uid")) { uid = c.Uint64("uid") } else { fmt.Println("退出用户失败, uid 不存在") } } else if c.NArg() == 0 { cli.HandleAction(app.Command("loglist").Action, c) line := liner.NewLiner() line.SetCtrlCAborts(true) defer line.Close() nLine, _ := line.Prompt("请输入要退出帐号的 index 值 > ") if n, err := strconv.Atoi(nLine); err == nil && n >= 0 && n < len(pcsconfig.Config.BaiduUserList) { uid = pcsconfig.Config.BaiduUserList[n].UID } else { fmt.Println("退出用户失败, 请检查 index 值是否正确") } } else { cli.ShowCommandHelp(c, c.Command.Name) } if uid == 0 { return nil } // 删除之前先获取被删除的数据, 用于下文输出日志 baidu, err := pcsconfig.Config.GetBaiduUserByUID(uid) if err != nil { fmt.Println(err) return nil } if !pcsconfig.Config.DeleteBaiduUserByUID(uid) { fmt.Printf("退出用户失败, %s\n", baidu.Name) } fmt.Printf("退出用户成功, %v\n", baidu.Name) return nil }, Flags: []cli.Flag{ cli.StringFlag{ Name: "uid", Usage: "百度帐号 uid 值", }, }, }, { Name: "loglist", Usage: "获取当前帐号, 和所有已登录的百度帐号", UsageText: fmt.Sprintf("%s loglist", app.Name), Category: "百度帐号操作", Before: reloadFn, Action: func(c *cli.Context) error { fmt.Printf("\n当前帐号 uid: %d, 用户名: %s\n", pcsconfig.ActiveBaiduUser.UID, pcsconfig.ActiveBaiduUser.Name) fmt.Println(pcsconfig.Config.GetAllBaiduUser()) return nil }, }, { Name: "quota", Usage: "获取配额, 即获取网盘总空间, 和已使用空间", UsageText: fmt.Sprintf("%s quota", app.Name), Category: "网盘操作", Before: reloadFn, Action: func(c *cli.Context) error { baidupcscmd.RunGetQuota() return nil }, }, { Name: "cd", Usage: "切换工作目录", UsageText: fmt.Sprintf("%s cd <目录>", app.Name), Category: "网盘操作", Before: reloadFn, After: reloadFn, Action: func(c *cli.Context) error { if c.NArg() == 0 { cli.ShowCommandHelp(c, c.Command.Name) return nil } baidupcscmd.RunChangeDirectory(c.Args().Get(0)) return nil }, }, { Name: "ls", Aliases: []string{"l", "ll"}, Usage:
{
random_line_split
main.go
() { // change work directory folderPath, err := osext.ExecutableFolder() if err != nil { folderPath, err = filepath.Abs(filepath.Dir(os.Args[0])) if err != nil { folderPath = filepath.Dir(os.Args[0]) } } os.Chdir(folderPath) } func main() { app := cli.NewApp() app.Name = "baidupcs_go" app.Author = "iikira/BaiduPCS-Go: https://github.com/iikira/BaiduPCS-Go" app.Usage = fmt.Sprintf("百度网盘工具箱 %s/%s GoVersion %s", runtime.GOOS, runtime.GOARCH, runtime.Version()) app.Description = `baidupcs_go 使用 Go语言编写, 为操作百度网盘, 提供实用功能. 具体功能, 参见 COMMANDS 列表 特色: 网盘内列出文件和目录, 支持通配符匹配路径; 下载网盘内文件, 支持高并发下载和断点续传. 程序目前处于测试版, 后续会添加更多的实用功能.` app.Version = "beta-v1" app.Action = func(c *cli.Context) { if c.NArg() == 0 { cli.ShowAppHelp(c) line := newLiner() defer closeLiner(line) for { if commandLine, err := line.Prompt("BaiduPCS-Go > "); err == nil { line.AppendHistory(commandLine) cmdArgs := args.GetArgs(commandLine) if len(cmdArgs) == 0 { continue } s := []string{os.Args[0]} s = append(s, cmdArgs...) closeLiner(line) c.App.Run(s) line = newLiner() } else if err == liner.ErrPromptAborted || err == io.EOF { break } else { log.Print("Error reading line: ", err) continue } } } else { fmt.Printf("未找到命令: %s\n运行命令 %s help 获取帮助\n", c.Args().Get(0), app.Name) } } app.Commands = []cli.Command{ { Name: "login", Usage: "使用百度BDUSS登录百度账号", Description: fmt.Sprintf("\n 示例: \n\n %s\n\n %s\n\n %s\n\n %s\n\n %s\n", app.Name+" login --bduss=123456789", app.Name+" login", "百度BDUSS获取方法: ", "参考这篇 Wiki: https://github.com/iikira/BaiduPCS-Go/wiki/关于-获取百度-BDUSS", "或者百度搜索: 获取百度BDUSS", ), Category: "百度帐号操作", Before: reloadFn, After: reloadFn, Action: func(c *cli.Context) error { bduss := "" if c.IsSet("bduss") { bduss = c.String("bduss") } else if c.NArg() == 0 { cli.ShowCommandHelp(c, c.Command.Name) line := liner.NewLiner() line.SetCtrlCAborts(true) defer line.Close() bduss, _ = line.Prompt("请输入百度BDUSS值, 回车键提交 > ") } else { cli.ShowCommandHelp(c, c.Command.Name) return nil } username, err := pcsconfig.Config.SetBDUSS(bduss) if err != nil { fmt.Println(err) return nil } fmt.Println("百度帐号登录成功:", username) return nil }, Flags: []cli.Flag{ cli.StringFlag{ Name: "bduss", Usage: "百度BDUSS", }, }, }, { Name: "chuser", Usage: "切换已登录的百度帐号", Description: fmt.Sprintf("%s\n 示例:\n\n %s\n %s\n", "如果运行该条命令没有提供参数, 程序将会列出所有的百度帐号, 供选择切换", app.Name+" chuser --uid=123456789", app.Name+" chuser", ), Category: "百度帐号操作", Before: reloadFn, After: reloadFn, Action: func(c *cli.Context) error { if len(pcsconfig.Config.BaiduUserList) == 0 { fmt.Println("未设置任何百度帐号, 不能切换") return nil } var uid uint64 if c.IsSet("uid") { if pcsconfig.Config.CheckUIDExist(c.Uint64("uid")) { uid = c.Uint64("uid") } else { fmt.Println("切换用户失败, uid 不存在") } } else if c.NArg() == 0 { cli.HandleAction(app.Command("loglist").Action, c) line := liner.NewLiner() line.SetCtrlCAborts(true) defer line.Close() nLine, _ := line.Prompt("请输入要切换帐号的 index 值 > ") if n, err := strconv.Atoi(nLine); err == nil && n >= 0 && n < len(pcsconfig.Config.BaiduUserList) { uid = pcsconfig.Config.BaiduUserList[n].UID } else { fmt.Println("切换用户失败, 请检查 index 值是否正确") } } else { cli.ShowCommandHelp(c, c.Command.Name) } if uid == 0 { return nil } pcsconfig.Config.BaiduActiveUID = uid if err := pcsconfig.Config.Save(); err != nil { fmt.Println(err) return nil } fmt.Printf("切换用户成功, %v\n", pcsconfig.ActiveBaiduUser.Name) return nil }, Flags: []cli.Flag{ cli.StringFlag{ Name: "uid", Usage: "百度帐号 uid 值", }, }, }, { Name: "logout", Usage: "退出已登录的百度帐号", Description: fmt.Sprintf("%s\n 示例:\n\n %s\n %s\n", "如果运行该条命令没有提供参数, 程序将会列出所有的百度帐号, 供选择退出", app.Name+" logout --uid=123456789", app.Name+" logout", ), Category: "百度帐号操作", Before: reloadFn, After: reloadFn, Action: func(c *cli.Context) error { if len(pcsconfig.Config.BaiduUserList) == 0 { fmt.Println("未设置任何百度帐号, 不能退出") return nil } var uid uint64 if c.IsSet("uid") { if pcsconfig.Config.CheckUIDExist(c.Uint64("uid")) { uid = c.Uint64("uid") } else { fmt.Println("退出用户失败, uid 不存在") } } else if c.NArg() == 0 { cli.HandleAction(app.Command("loglist").Action, c) line := liner.NewLiner() line.SetCtrlCAborts(true) defer line.Close() nLine, _ := line.Prompt("请输入要退出帐号的 index 值 > ") if n, err := strconv.Atoi(nLine); err == nil && n >= 0 && n < len(pcsconfig.Config.BaiduUserList) { uid = pcsconfig.Config.BaiduUserList[n].UID } else { fmt.Println("退出用户失败, 请检查 index 值是否正确") } } else { cli.ShowCommandHelp(c, c.Command.Name) } if uid == 0 { return nil } // 删除之前先获取被删除的数据, 用于下文输出日志 baidu, err := pcsconfig.Config.GetBaiduUserByUID(uid) if err != nil { fmt.Println(err) return nil } if !pcsconfig.Config.DeleteBaiduUserByUID(uid) { fmt.Printf("退出用户失败, %s\n", baidu.Name) } fmt.Printf("退出用户成功, %v\n", baidu.Name) return nil }, Flags: []cli.Flag{ cli.StringFlag{ Name: "uid", Usage: "百度帐号 uid 值", }, }, }, { Name: "loglist", Usage: "获取当前帐号, 和所有已登录的百度帐号", UsageText: fmt.Sprintf("%s loglist", app.Name), Category: "百度帐号操作", Before: reloadFn, Action: func(c *cli.Context) error { fmt.Printf("\n当前帐号 uid: %d, 用户名
init
identifier_name
main.go
for { if commandLine, err := line.Prompt("BaiduPCS-Go > "); err == nil { line.AppendHistory(commandLine) cmdArgs := args.GetArgs(commandLine) if len(cmdArgs) == 0 { continue } s := []string{os.Args[0]} s = append(s, cmdArgs...) closeLiner(line) c.App.Run(s) line = newLiner() } else if err == liner.ErrPromptAborted || err == io.EOF { break } else { log.Print("Error reading line: ", err) continue } } } else { fmt.Printf("未找到命令: %s\n运行命令 %s help 获取帮助\n", c.Args().Get(0), app.Name) } } app.Commands = []cli.Command{ { Name: "login", Usage: "使用百度BDUSS登录百度账号", Description: fmt.Sprintf("\n 示例: \n\n %s\n\n %s\n\n %s\n\n %s\n\n %s\n", app.Name+" login --bduss=123456789", app.Name+" login", "百度BDUSS获取方法: ", "参考这篇 Wiki: https://github.com/iikira/BaiduPCS-Go/wiki/关于-获取百度-BDUSS", "或者百度搜索: 获取百度BDUSS", ), Category: "百度帐号操作", Before: reloadFn, After: reloadFn, Action: func(c *cli.Context) error { bduss := "" if c.IsSet("bduss") { bduss = c.String("bduss") } else if c.NArg() == 0 { cli.ShowCommandHelp(c, c.Command.Name) line := liner.NewLiner() line.SetCtrlCAborts(true) defer line.Close() bduss, _ = line.Prompt("请输入百度BDUSS值, 回车键提交 > ") } else { cli.ShowCommandHelp(c, c.Command.Name) return nil } username, err := pcsconfig.Config.SetBDUSS(bduss) if err != nil { fmt.Println(err) return nil } fmt.Println("百度帐号登录成功:", username) return nil }, Flags: []cli.Flag{ cli.StringFlag{ Name: "bduss", Usage: "百度BDUSS", }, }, }, { Name: "chuser", Usage: "切换已登录的百度帐号", Description: fmt.Sprintf("%s\n 示例:\n\n %s\n %s\n", "如果运行该条命令没有提供参数, 程序将会列出所有的百度帐号, 供选择切换", app.Name+" chuser --uid=123456789", app.Name+" chuser", ), Category: "百度帐号操作", Before: reloadFn, After: reloadFn, Action: func(c *cli.Context) error { if len(pcsconfig.Config.BaiduUserList) == 0 { fmt.Println("未设置任何百度帐号, 不能切换") return nil } var uid uint64 if c.IsSet("uid") { if pcsconfig.Config.CheckUIDExist(c.Uint64("uid")) { uid = c.Uint64("uid") } else { fmt.Println("切换用户失败, uid 不存在") } } else if c.NArg() == 0 { cli.HandleAction(app.Command("loglist").Action, c) line := liner.NewLiner() line.SetCtrlCAborts(true) defer line.Close() nLine, _ := line.Prompt("请输入要切换帐号的 index 值 > ") if n, err := strconv.Atoi(nLine); err == nil && n >= 0 && n < len(pcsconfig.Config.BaiduUserList) { uid = pcsconfig.Config.BaiduUserList[n].UID } else { fmt.Println("切换用户失败, 请检查 index 值是否正确") } } else { cli.ShowCommandHelp(c, c.Command.Name) } if uid == 0 { return nil } pcsconfig.Config.BaiduActiveUID = uid if err := pcsconfig.Config.Save(); err != nil { fmt.Println(err) return nil } fmt.Printf("切换用户成功, %v\n", pcsconfig.ActiveBaiduUser.Name) return nil }, Flags: []cli.Flag{ cli.StringFlag{ Name: "uid", Usage: "百度帐号 uid 值", }, }, }, { Name: "logout", Usage: "退出已登录的百度帐号", Description: fmt.Sprintf("%s\n 示例:\n\n %s\n %s\n", "如果运行该条命令没有提供参数, 程序将会列出所有的百度帐号, 供选择退出", app.Name+" logout --uid=123456789", app.Name+" logout", ), Category: "百度帐号操作", Before: reloadFn, After: reloadFn, Action: func(c *cli.Context) error { if len(pcsconfig.Config.BaiduUserList) == 0 { fmt.Println("未设置任何百度帐号, 不能退出") return nil } var uid uint64 if c.IsSet("uid") { if pcsconfig.Config.CheckUIDExist(c.Uint64("uid")) { uid = c.Uint64("uid") } else { fmt.Println("退出用户失败, uid 不存在") } } else if c.NArg() == 0 { cli.HandleAction(app.Command("loglist").Action, c) line := liner.NewLiner() line.SetCtrlCAborts(true) defer line.Close() nLine, _ := line.Prompt("请输入要退出帐号的 index 值 > ") if n, err := strconv.Atoi(nLine); err == nil && n >= 0 && n < len(pcsconfig.Config.BaiduUserList) { uid = pcsconfig.Config.BaiduUserList[n].UID } else { fmt.Println("退出用户失败, 请检查 index 值是否正确") } } else { cli.ShowCommandHelp(c, c.Command.Name) } if uid == 0 { return nil } // 删除之前先获取被删除的数据, 用于下文输出日志 baidu, err := pcsconfig.Config.GetBaiduUserByUID(uid) if err != nil { fmt.Println(err) return nil } if !pcsconfig.Config.DeleteBaiduUserByUID(uid) { fmt.Printf("退出用户失败, %s\n", baidu.Name) } fmt.Printf("退出用户成功, %v\n", baidu.Name) return nil }, Flags: []cli.Flag{ cli.StringFlag{ Name: "uid", Usage: "百度帐号 uid 值", }, }, }, { Name: "loglist", Usage: "获取当前帐号, 和所有已登录的百度帐号", UsageText: fmt.Sprintf("%s loglist", app.Name), Category: "百度帐号操作", Before: reloadFn, Action: func(c *cli.Context) error { fmt.Printf("\n当前帐号 uid: %d, 用户名: %s\n", pcsconfig.ActiveBaiduUser.UID, pcsconfig.ActiveBaiduUser.Name) fmt.Println(pcsconfig.Config.GetAllBaiduUser()) return nil }, }, { Name: "quota", Usage: "获取配额, 即获取网盘总空间, 和已使用
{ app := cli.NewApp() app.Name = "baidupcs_go" app.Author = "iikira/BaiduPCS-Go: https://github.com/iikira/BaiduPCS-Go" app.Usage = fmt.Sprintf("百度网盘工具箱 %s/%s GoVersion %s", runtime.GOOS, runtime.GOARCH, runtime.Version()) app.Description = `baidupcs_go 使用 Go语言编写, 为操作百度网盘, 提供实用功能. 具体功能, 参见 COMMANDS 列表 特色: 网盘内列出文件和目录, 支持通配符匹配路径; 下载网盘内文件, 支持高并发下载和断点续传. 程序目前处于测试版, 后续会添加更多的实用功能.` app.Version = "beta-v1" app.Action = func(c *cli.Context) { if c.NArg() == 0 { cli.ShowAppHelp(c) line := newLiner() defer closeLiner(line)
identifier_body
main.go
for { if commandLine, err := line.Prompt("BaiduPCS-Go > "); err == nil { line.AppendHistory(commandLine) cmdArgs := args.GetArgs(commandLine) if len(cmdArgs) == 0 { continue } s := []string{os.Args[0]} s = append(s, cmdArgs
命令 %s help 获取帮助\n", c.Args().Get(0), app.Name) } } app.Commands = []cli.Command{ { Name: "login", Usage: "使用百度BDUSS登录百度账号", Description: fmt.Sprintf("\n 示例: \n\n %s\n\n %s\n\n %s\n\n %s\n\n %s\n", app.Name+" login --bduss=123456789", app.Name+" login", "百度BDUSS获取方法: ", "参考这篇 Wiki: https://github.com/iikira/BaiduPCS-Go/wiki/关于-获取百度-BDUSS", "或者百度搜索: 获取百度BDUSS", ), Category: "百度帐号操作", Before: reloadFn, After: reloadFn, Action: func(c *cli.Context) error { bduss := "" if c.IsSet("bduss") { bduss = c.String("bduss") } else if c.NArg() == 0 { cli.ShowCommandHelp(c, c.Command.Name) line := liner.NewLiner() line.SetCtrlCAborts(true) defer line.Close() bduss, _ = line.Prompt("请输入百度BDUSS值, 回车键提交 > ") } else { cli.ShowCommandHelp(c, c.Command.Name) return nil } username, err := pcsconfig.Config.SetBDUSS(bduss) if err != nil { fmt.Println(err) return nil } fmt.Println("百度帐号登录成功:", username) return nil }, Flags: []cli.Flag{ cli.StringFlag{ Name: "bduss", Usage: "百度BDUSS", }, }, }, { Name: "chuser", Usage: "切换已登录的百度帐号", Description: fmt.Sprintf("%s\n 示例:\n\n %s\n %s\n", "如果运行该条命令没有提供参数, 程序将会列出所有的百度帐号, 供选择切换", app.Name+" chuser --uid=123456789", app.Name+" chuser", ), Category: "百度帐号操作", Before: reloadFn, After: reloadFn, Action: func(c *cli.Context) error { if len(pcsconfig.Config.BaiduUserList) == 0 { fmt.Println("未设置任何百度帐号, 不能切换") return nil } var uid uint64 if c.IsSet("uid") { if pcsconfig.Config.CheckUIDExist(c.Uint64("uid")) { uid = c.Uint64("uid") } else { fmt.Println("切换用户失败, uid 不存在") } } else if c.NArg() == 0 { cli.HandleAction(app.Command("loglist").Action, c) line := liner.NewLiner() line.SetCtrlCAborts(true) defer line.Close() nLine, _ := line.Prompt("请输入要切换帐号的 index 值 > ") if n, err := strconv.Atoi(nLine); err == nil && n >= 0 && n < len(pcsconfig.Config.BaiduUserList) { uid = pcsconfig.Config.BaiduUserList[n].UID } else { fmt.Println("切换用户失败, 请检查 index 值是否正确") } } else { cli.ShowCommandHelp(c, c.Command.Name) } if uid == 0 { return nil } pcsconfig.Config.BaiduActiveUID = uid if err := pcsconfig.Config.Save(); err != nil { fmt.Println(err) return nil } fmt.Printf("切换用户成功, %v\n", pcsconfig.ActiveBaiduUser.Name) return nil }, Flags: []cli.Flag{ cli.StringFlag{ Name: "uid", Usage: "百度帐号 uid 值", }, }, }, { Name: "logout", Usage: "退出已登录的百度帐号", Description: fmt.Sprintf("%s\n 示例:\n\n %s\n %s\n", "如果运行该条命令没有提供参数, 程序将会列出所有的百度帐号, 供选择退出", app.Name+" logout --uid=123456789", app.Name+" logout", ), Category: "百度帐号操作", Before: reloadFn, After: reloadFn, Action: func(c *cli.Context) error { if len(pcsconfig.Config.BaiduUserList) == 0 { fmt.Println("未设置任何百度帐号, 不能退出") return nil } var uid uint64 if c.IsSet("uid") { if pcsconfig.Config.CheckUIDExist(c.Uint64("uid")) { uid = c.Uint64("uid") } else { fmt.Println("退出用户失败, uid 不存在") } } else if c.NArg() == 0 { cli.HandleAction(app.Command("loglist").Action, c) line := liner.NewLiner() line.SetCtrlCAborts(true) defer line.Close() nLine, _ := line.Prompt("请输入要退出帐号的 index 值 > ") if n, err := strconv.Atoi(nLine); err == nil && n >= 0 && n < len(pcsconfig.Config.BaiduUserList) { uid = pcsconfig.Config.BaiduUserList[n].UID } else { fmt.Println("退出用户失败, 请检查 index 值是否正确") } } else { cli.ShowCommandHelp(c, c.Command.Name) } if uid == 0 { return nil } // 删除之前先获取被删除的数据, 用于下文输出日志 baidu, err := pcsconfig.Config.GetBaiduUserByUID(uid) if err != nil { fmt.Println(err) return nil } if !pcsconfig.Config.DeleteBaiduUserByUID(uid) { fmt.Printf("退出用户失败, %s\n", baidu.Name) } fmt.Printf("退出用户成功, %v\n", baidu.Name) return nil }, Flags: []cli.Flag{ cli.StringFlag{ Name: "uid", Usage: "百度帐号 uid 值", }, }, }, { Name: "loglist", Usage: "获取当前帐号, 和所有已登录的百度帐号", UsageText: fmt.Sprintf("%s loglist", app.Name), Category: "百度帐号操作", Before: reloadFn, Action: func(c *cli.Context) error { fmt.Printf("\n当前帐号 uid: %d, 用户名: %s\n", pcsconfig.ActiveBaiduUser.UID, pcsconfig.ActiveBaiduUser.Name) fmt.Println(pcsconfig.Config.GetAllBaiduUser()) return nil }, }, { Name: "quota", Usage: "获取配额, 即获取网盘总空间, 和已使用空间", UsageText: fmt.Sprintf("%s quota", app.Name), Category: "网盘操作", Before: reloadFn, Action: func(c *cli.Context) error { baidupcscmd.RunGetQuota() return nil }, }, { Name: "cd", Usage: "切换工作目录", UsageText: fmt.Sprintf("%s cd <目录>", app.Name), Category: "网盘操作", Before: reloadFn, After: reloadFn, Action: func(c *cli.Context) error { if c.NArg() == 0 { cli.ShowCommandHelp(c, c.Command.Name) return nil } baidupcscmd.RunChangeDirectory(c.Args().Get(0)) return nil }, }, { Name: "ls", Aliases: []string{"l", "ll"}, Usage:
...) closeLiner(line) c.App.Run(s) line = newLiner() } else if err == liner.ErrPromptAborted || err == io.EOF { break } else { log.Print("Error reading line: ", err) continue } } } else { fmt.Printf("未找到命令: %s\n运行
conditional_block
driver_suite_test.go
odes" "google.golang.org/grpc/status" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" fakedyn "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes" ) func TestDriver(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Driver Suite") } type fakeValidator struct { createdVolumes map[string]int64 } func (dv *fakeValidator) CheckCreateVolumeAllowed(req *csi.CreateVolumeRequest) error { if len(req.GetName()) == 0 { return status.Error(codes.InvalidArgument, "missing name") } if req.GetVolumeCapabilities() == nil { return status.Error(codes.InvalidArgument, "missing volume capabilities") } volumeID := strings.ToUpper(req.GetName()) val, ok := dv.getVolume(volumeID) if !ok { dv.addVolume(volumeID, req.CapacityRange.GetRequiredBytes()) return nil } if val != req.CapacityRange.GetRequiredBytes() { return status.Error(codes.AlreadyExists, "Volume exists") } dv.addVolume(volumeID, req.CapacityRange.GetRequiredBytes()) return nil } func (dv *fakeValidator) CheckDeleteVolumeAllowed(req *csi.DeleteVolumeRequest) error { if len(req.GetVolumeId()) == 0 { return status.Error(codes.InvalidArgument, "missing volume ID") } return nil } func (dv *fakeValidator) CheckValidateVolumeCapabilitiesAllowed(req *csi.ValidateVolumeCapabilitiesRequest) error { if len(req.GetVolumeId()) == 0 { return status.Error(codes.InvalidArgument, "missing volume id") } if req.GetVolumeCapabilities() == nil { return status.Error(codes.InvalidArgument, "missing volume capabilities") } volumeID := strings.ToUpper(req.GetVolumeId()) if len(dv.createdVolumes) > 0 { _, ok := dv.getVolume(volumeID) if !ok { return status.Error(codes.NotFound, "invalid volume id") } } return nil } func (dv *fakeValidator) addVolume(volumeID string, cap int64) { if len(dv.createdVolumes) == 0 { dv.createdVolumes = make(map[string]int64) } dv.createdVolumes[volumeID] = cap } func (dv *fakeValidator) getVolume(volumeID string) (int64, bool) { if len(dv.createdVolumes) == 0 { return 0, false } v, ok := dv.createdVolumes[volumeID] return v, ok } var origDriverHelper = helper.NewDriverHelper() type fakeDriverHelper struct { Clientset kubernetes.Interface Dynclient dynamic.Interface errorOnAction string validator helper.Validator command string } func (dv *fakeDriverHelper) GetClientSet() (kubernetes.Interface, dynamic.Interface, error) { if dv.errorOnAction == "getclientset" { return nil, nil, fmt.Errorf("fake error") } return dv.Clientset, dv.Dynclient, nil } func (dv *fakeDriverHelper) GetMountCommand() string { if dv.command != "" { return dv.command } return "echo" } func (dv *fakeDriverHelper) GetValidator() helper.Validator { return dv.validator } func (dv *fakeDriverHelper) CheckMount(path string) (bool, error) { if dv.errorOnAction == "checkmount" { return false, nil } if dv.errorOnAction == "checkmounterror" { return false, fmt.Errorf("fake error") } return true, nil } func (dv *fakeDriverHelper) WaitForMount(path string, timeout time.Duration) error { if dv.errorOnAction == "waitformount" { return fmt.Errorf("fake error") } return nil } func (dv *fakeDriverHelper) GetDatasetDirectoryNames(targetPath string) []string { return origDriverHelper.GetDatasetDirectoryNames(targetPath) } func (dv *fakeDriverHelper) CleanMountPoint(targetPath string) error { if dv.errorOnAction == "cleanmountpoint" { return fmt.Errorf("fake error") } return os.Remove(targetPath) } func (dv *fakeDriverHelper) MkdirAll(path string, perm os.FileMode) error { if dv.errorOnAction == "mkdirall" { return fmt.Errorf("fake error") } return origDriverHelper.MkdirAll(path, perm) } func (dv *fakeDriverHelper) WriteFile(name, content string, flag int, perm os.FileMode) error { if dv.errorOnAction == "Writefile" { return fmt.Errorf("fake error") } return origDriverHelper.WriteFile(name, content, flag, perm) } func (dv *fakeDriverHelper) FileStat(path string) (os.FileInfo, error) { if dv.errorOnAction == "filestat" { return nil, fmt.Errorf("fake error") } return origDriverHelper.FileStat(path) } func (dv *fakeDriverHelper) ReadDir(path string) ([]os.FileInfo, error) { if dv.errorOnAction == "readdir" { return nil, fmt.Errorf("fake error") } return origDriverHelper.ReadDir(path) } func (dv *fakeDriverHelper) RemoveFile(path string) error { if dv.errorOnAction == "removefileignore" { return nil } if dv.errorOnAction == "removefile" { return fmt.Errorf("fake error") } return origDriverHelper.RemoveFile(path) } func (dv *fakeDriverHelper) ParseJWTclaims(jwtString string) (jwt.MapClaims, error) { return origDriverHelper.ParseJWTclaims(jwtString) }
return origDriverHelper.GetClaimValue(claims, key) } func (dv *fakeDriverHelper) MarshaljSON(v interface{}) ([]byte, error) { if dv.errorOnAction == "marshaljson" { return nil, fmt.Errorf("fake error") } return origDriverHelper.MarshaljSON(v) } func (dv *fakeDriverHelper) UnMarshaljSON(data []byte, v interface{}) error { if dv.errorOnAction == "unmarshaljson" { return fmt.Errorf("fake error") } return origDriverHelper.UnMarshaljSON(data, v) } func getTestPodResource() *corev1.Pod { return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "testpod", }, Spec: corev1.PodSpec{ NodeName: "test-node", Containers: []corev1.Container{ { Name: "test", Image: "nginx", }, }, }, Status: corev1.PodStatus{ Phase: corev1.PodRunning, }, } } func getTestPullSecret() *corev1.Secret { data := make(map[string][]byte) fakePS := "eyJhbGciOiJIUzI1NiJ9.eyJpYW1fYXBpa2V5IjoiaWFta2V5IiwicmhtQWNjb3VudElkIjoiMTIzNDUiLCJlbnRpdGxlbWVudC5zdG9yYWdlLmhtYWMuYWNjZXNzX2tleV9pZCI6ImhtYWNrZXkiLCJlbnRpdGxlbWVudC5zdG9yYWdlLmhtYWMuc2VjcmV0X2FjY2Vzc19rZXkiOiJhY2Nlc3NrZXkiLCJpc3MiOiJJQk0gTWFya2V0cGxhY2UiLCJpYXQiOjEyMywianRpIjoiYWJjIn0.dJOBqdEpMzs4PCmnzTUT9ITGO2G_ZwiemjJbFtb3lmQ" data["PULL_SECRET"] = []byte(fakePS) return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: common.MarketplaceNamespace, Name: "redhat-marketplace-pull-secret", }, Type: corev1.SecretTypeOpaque, Data: data, } } func getTestCSIDriverResource() runtime.Object { return &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": "marketplace.redhat.com/v1alpha1", "kind": "MarketplaceCSIDriver", "metadata": map[string]interface{}{ "namespace": common.MarketplaceNamespace, "name": common.ResourceNameS3Driver, }, "spec": map[string]interface{}{ "endpoint": "a.b.com", "mountRootPath": "/var/redhat-marketplace/datasets", "credential": map[string]interface{}{ "name": "redhat-marketplace-pull-secret", "type": "rhm-pull-secret", }, "mountOptions": []interface{}{"foo", "bar
func (dv *fakeDriverHelper) GetClaimValue(claims jwt.MapClaims, key string) (string, error) {
random_line_split
driver_suite_test.go
" "google.golang.org/grpc/status" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" fakedyn "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes" ) func TestDriver(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Driver Suite") } type fakeValidator struct { createdVolumes map[string]int64 } func (dv *fakeValidator) CheckCreateVolumeAllowed(req *csi.CreateVolumeRequest) error { if len(req.GetName()) == 0 { return status.Error(codes.InvalidArgument, "missing name") } if req.GetVolumeCapabilities() == nil { return status.Error(codes.InvalidArgument, "missing volume capabilities") } volumeID := strings.ToUpper(req.GetName()) val, ok := dv.getVolume(volumeID) if !ok { dv.addVolume(volumeID, req.CapacityRange.GetRequiredBytes()) return nil } if val != req.CapacityRange.GetRequiredBytes() { return status.Error(codes.AlreadyExists, "Volume exists") } dv.addVolume(volumeID, req.CapacityRange.GetRequiredBytes()) return nil } func (dv *fakeValidator) CheckDeleteVolumeAllowed(req *csi.DeleteVolumeRequest) error { if len(req.GetVolumeId()) == 0 { return status.Error(codes.InvalidArgument, "missing volume ID") } return nil } func (dv *fakeValidator) CheckValidateVolumeCapabilitiesAllowed(req *csi.ValidateVolumeCapabilitiesRequest) error { if len(req.GetVolumeId()) == 0 { return status.Error(codes.InvalidArgument, "missing volume id") } if req.GetVolumeCapabilities() == nil { return status.Error(codes.InvalidArgument, "missing volume capabilities") } volumeID := strings.ToUpper(req.GetVolumeId()) if len(dv.createdVolumes) > 0 { _, ok := dv.getVolume(volumeID) if !ok { return status.Error(codes.NotFound, "invalid volume id") } } return nil } func (dv *fakeValidator) addVolume(volumeID string, cap int64) { if len(dv.createdVolumes) == 0 { dv.createdVolumes = make(map[string]int64) } dv.createdVolumes[volumeID] = cap } func (dv *fakeValidator) getVolume(volumeID string) (int64, bool) { if len(dv.createdVolumes) == 0 { return 0, false } v, ok := dv.createdVolumes[volumeID] return v, ok } var origDriverHelper = helper.NewDriverHelper() type fakeDriverHelper struct { Clientset kubernetes.Interface Dynclient dynamic.Interface errorOnAction string validator helper.Validator command string } func (dv *fakeDriverHelper) GetClientSet() (kubernetes.Interface, dynamic.Interface, error) { if dv.errorOnAction == "getclientset" { return nil, nil, fmt.Errorf("fake error") } return dv.Clientset, dv.Dynclient, nil } func (dv *fakeDriverHelper) GetMountCommand() string { if dv.command != "" { return dv.command } return "echo" } func (dv *fakeDriverHelper) GetValidator() helper.Validator { return dv.validator } func (dv *fakeDriverHelper) CheckMount(path string) (bool, error) { if dv.errorOnAction == "checkmount" { return false, nil } if dv.errorOnAction == "checkmounterror" { return false, fmt.Errorf("fake error") } return true, nil } func (dv *fakeDriverHelper) WaitForMount(path string, timeout time.Duration) error { if dv.errorOnAction == "waitformount" { return fmt.Errorf("fake error") } return nil } func (dv *fakeDriverHelper) GetDatasetDirectoryNames(targetPath string) []string { return origDriverHelper.GetDatasetDirectoryNames(targetPath) } func (dv *fakeDriverHelper) CleanMountPoint(targetPath string) error { if dv.errorOnAction == "cleanmountpoint" { return fmt.Errorf("fake error") } return os.Remove(targetPath) } func (dv *fakeDriverHelper) MkdirAll(path string, perm os.FileMode) error { if dv.errorOnAction == "mkdirall"
return origDriverHelper.MkdirAll(path, perm) } func (dv *fakeDriverHelper) WriteFile(name, content string, flag int, perm os.FileMode) error { if dv.errorOnAction == "Writefile" { return fmt.Errorf("fake error") } return origDriverHelper.WriteFile(name, content, flag, perm) } func (dv *fakeDriverHelper) FileStat(path string) (os.FileInfo, error) { if dv.errorOnAction == "filestat" { return nil, fmt.Errorf("fake error") } return origDriverHelper.FileStat(path) } func (dv *fakeDriverHelper) ReadDir(path string) ([]os.FileInfo, error) { if dv.errorOnAction == "readdir" { return nil, fmt.Errorf("fake error") } return origDriverHelper.ReadDir(path) } func (dv *fakeDriverHelper) RemoveFile(path string) error { if dv.errorOnAction == "removefileignore" { return nil } if dv.errorOnAction == "removefile" { return fmt.Errorf("fake error") } return origDriverHelper.RemoveFile(path) } func (dv *fakeDriverHelper) ParseJWTclaims(jwtString string) (jwt.MapClaims, error) { return origDriverHelper.ParseJWTclaims(jwtString) } func (dv *fakeDriverHelper) GetClaimValue(claims jwt.MapClaims, key string) (string, error) { return origDriverHelper.GetClaimValue(claims, key) } func (dv *fakeDriverHelper) MarshaljSON(v interface{}) ([]byte, error) { if dv.errorOnAction == "marshaljson" { return nil, fmt.Errorf("fake error") } return origDriverHelper.MarshaljSON(v) } func (dv *fakeDriverHelper) UnMarshaljSON(data []byte, v interface{}) error { if dv.errorOnAction == "unmarshaljson" { return fmt.Errorf("fake error") } return origDriverHelper.UnMarshaljSON(data, v) } func getTestPodResource() *corev1.Pod { return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "testpod", }, Spec: corev1.PodSpec{ NodeName: "test-node", Containers: []corev1.Container{ { Name: "test", Image: "nginx", }, }, }, Status: corev1.PodStatus{ Phase: corev1.PodRunning, }, } } func getTestPullSecret() *corev1.Secret { data := make(map[string][]byte) fakePS := "eyJhbGciOiJIUzI1NiJ9.eyJpYW1fYXBpa2V5IjoiaWFta2V5IiwicmhtQWNjb3VudElkIjoiMTIzNDUiLCJlbnRpdGxlbWVudC5zdG9yYWdlLmhtYWMuYWNjZXNzX2tleV9pZCI6ImhtYWNrZXkiLCJlbnRpdGxlbWVudC5zdG9yYWdlLmhtYWMuc2VjcmV0X2FjY2Vzc19rZXkiOiJhY2Nlc3NrZXkiLCJpc3MiOiJJQk0gTWFya2V0cGxhY2UiLCJpYXQiOjEyMywianRpIjoiYWJjIn0.dJOBqdEpMzs4PCmnzTUT9ITGO2G_ZwiemjJbFtb3lmQ" data["PULL_SECRET"] = []byte(fakePS) return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: common.MarketplaceNamespace, Name: "redhat-marketplace-pull-secret", }, Type: corev1.SecretTypeOpaque, Data: data, } } func getTestCSIDriverResource() runtime.Object { return &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": "marketplace.redhat.com/v1alpha1", "kind": "MarketplaceCSIDriver", "metadata": map[string]interface{}{ "namespace": common.MarketplaceNamespace, "name": common.ResourceNameS3Driver, }, "spec": map[string]interface{}{ "endpoint": "a.b.com", "mountRootPath": "/var/redhat-marketplace/datasets", "credential": map[string]interface{}{ "name": "redhat-marketplace-pull-secret", "type": "rhm-pull-secret", }, "mountOptions": []interface{}{"foo", "
{ return fmt.Errorf("fake error") }
conditional_block
driver_suite_test.go
odes" "google.golang.org/grpc/status" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" fakedyn "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes" ) func TestDriver(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Driver Suite") } type fakeValidator struct { createdVolumes map[string]int64 } func (dv *fakeValidator) CheckCreateVolumeAllowed(req *csi.CreateVolumeRequest) error { if len(req.GetName()) == 0 { return status.Error(codes.InvalidArgument, "missing name") } if req.GetVolumeCapabilities() == nil { return status.Error(codes.InvalidArgument, "missing volume capabilities") } volumeID := strings.ToUpper(req.GetName()) val, ok := dv.getVolume(volumeID) if !ok { dv.addVolume(volumeID, req.CapacityRange.GetRequiredBytes()) return nil } if val != req.CapacityRange.GetRequiredBytes() { return status.Error(codes.AlreadyExists, "Volume exists") } dv.addVolume(volumeID, req.CapacityRange.GetRequiredBytes()) return nil } func (dv *fakeValidator) CheckDeleteVolumeAllowed(req *csi.DeleteVolumeRequest) error { if len(req.GetVolumeId()) == 0 { return status.Error(codes.InvalidArgument, "missing volume ID") } return nil } func (dv *fakeValidator) CheckValidateVolumeCapabilitiesAllowed(req *csi.ValidateVolumeCapabilitiesRequest) error { if len(req.GetVolumeId()) == 0 { return status.Error(codes.InvalidArgument, "missing volume id") } if req.GetVolumeCapabilities() == nil { return status.Error(codes.InvalidArgument, "missing volume capabilities") } volumeID := strings.ToUpper(req.GetVolumeId()) if len(dv.createdVolumes) > 0 { _, ok := dv.getVolume(volumeID) if !ok { return status.Error(codes.NotFound, "invalid volume id") } } return nil } func (dv *fakeValidator) addVolume(volumeID string, cap int64) { if len(dv.createdVolumes) == 0 { dv.createdVolumes = make(map[string]int64) } dv.createdVolumes[volumeID] = cap } func (dv *fakeValidator) getVolume(volumeID string) (int64, bool) { if len(dv.createdVolumes) == 0 { return 0, false } v, ok := dv.createdVolumes[volumeID] return v, ok } var origDriverHelper = helper.NewDriverHelper() type fakeDriverHelper struct { Clientset kubernetes.Interface Dynclient dynamic.Interface errorOnAction string validator helper.Validator command string } func (dv *fakeDriverHelper) GetClientSet() (kubernetes.Interface, dynamic.Interface, error) { if dv.errorOnAction == "getclientset" { return nil, nil, fmt.Errorf("fake error") } return dv.Clientset, dv.Dynclient, nil } func (dv *fakeDriverHelper) GetMountCommand() string { if dv.command != "" { return dv.command } return "echo" } func (dv *fakeDriverHelper) GetValidator() helper.Validator { return dv.validator } func (dv *fakeDriverHelper) CheckMount(path string) (bool, error) { if dv.errorOnAction == "checkmount" { return false, nil } if dv.errorOnAction == "checkmounterror" { return false, fmt.Errorf("fake error") } return true, nil } func (dv *fakeDriverHelper) WaitForMount(path string, timeout time.Duration) error { if dv.errorOnAction == "waitformount" { return fmt.Errorf("fake error") } return nil } func (dv *fakeDriverHelper)
(targetPath string) []string { return origDriverHelper.GetDatasetDirectoryNames(targetPath) } func (dv *fakeDriverHelper) CleanMountPoint(targetPath string) error { if dv.errorOnAction == "cleanmountpoint" { return fmt.Errorf("fake error") } return os.Remove(targetPath) } func (dv *fakeDriverHelper) MkdirAll(path string, perm os.FileMode) error { if dv.errorOnAction == "mkdirall" { return fmt.Errorf("fake error") } return origDriverHelper.MkdirAll(path, perm) } func (dv *fakeDriverHelper) WriteFile(name, content string, flag int, perm os.FileMode) error { if dv.errorOnAction == "Writefile" { return fmt.Errorf("fake error") } return origDriverHelper.WriteFile(name, content, flag, perm) } func (dv *fakeDriverHelper) FileStat(path string) (os.FileInfo, error) { if dv.errorOnAction == "filestat" { return nil, fmt.Errorf("fake error") } return origDriverHelper.FileStat(path) } func (dv *fakeDriverHelper) ReadDir(path string) ([]os.FileInfo, error) { if dv.errorOnAction == "readdir" { return nil, fmt.Errorf("fake error") } return origDriverHelper.ReadDir(path) } func (dv *fakeDriverHelper) RemoveFile(path string) error { if dv.errorOnAction == "removefileignore" { return nil } if dv.errorOnAction == "removefile" { return fmt.Errorf("fake error") } return origDriverHelper.RemoveFile(path) } func (dv *fakeDriverHelper) ParseJWTclaims(jwtString string) (jwt.MapClaims, error) { return origDriverHelper.ParseJWTclaims(jwtString) } func (dv *fakeDriverHelper) GetClaimValue(claims jwt.MapClaims, key string) (string, error) { return origDriverHelper.GetClaimValue(claims, key) } func (dv *fakeDriverHelper) MarshaljSON(v interface{}) ([]byte, error) { if dv.errorOnAction == "marshaljson" { return nil, fmt.Errorf("fake error") } return origDriverHelper.MarshaljSON(v) } func (dv *fakeDriverHelper) UnMarshaljSON(data []byte, v interface{}) error { if dv.errorOnAction == "unmarshaljson" { return fmt.Errorf("fake error") } return origDriverHelper.UnMarshaljSON(data, v) } func getTestPodResource() *corev1.Pod { return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "testpod", }, Spec: corev1.PodSpec{ NodeName: "test-node", Containers: []corev1.Container{ { Name: "test", Image: "nginx", }, }, }, Status: corev1.PodStatus{ Phase: corev1.PodRunning, }, } } func getTestPullSecret() *corev1.Secret { data := make(map[string][]byte) fakePS := "eyJhbGciOiJIUzI1NiJ9.eyJpYW1fYXBpa2V5IjoiaWFta2V5IiwicmhtQWNjb3VudElkIjoiMTIzNDUiLCJlbnRpdGxlbWVudC5zdG9yYWdlLmhtYWMuYWNjZXNzX2tleV9pZCI6ImhtYWNrZXkiLCJlbnRpdGxlbWVudC5zdG9yYWdlLmhtYWMuc2VjcmV0X2FjY2Vzc19rZXkiOiJhY2Nlc3NrZXkiLCJpc3MiOiJJQk0gTWFya2V0cGxhY2UiLCJpYXQiOjEyMywianRpIjoiYWJjIn0.dJOBqdEpMzs4PCmnzTUT9ITGO2G_ZwiemjJbFtb3lmQ" data["PULL_SECRET"] = []byte(fakePS) return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: common.MarketplaceNamespace, Name: "redhat-marketplace-pull-secret", }, Type: corev1.SecretTypeOpaque, Data: data, } } func getTestCSIDriverResource() runtime.Object { return &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": "marketplace.redhat.com/v1alpha1", "kind": "MarketplaceCSIDriver", "metadata": map[string]interface{}{ "namespace": common.MarketplaceNamespace, "name": common.ResourceNameS3Driver, }, "spec": map[string]interface{}{ "endpoint": "a.b.com", "mountRootPath": "/var/redhat-marketplace/datasets", "credential": map[string]interface{}{ "name": "redhat-marketplace-pull-secret", "type": "rhm-pull-secret", }, "mountOptions": []interface{}{"foo", "
GetDatasetDirectoryNames
identifier_name
driver_suite_test.go
" "google.golang.org/grpc/status" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" fakedyn "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes" ) func TestDriver(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Driver Suite") } type fakeValidator struct { createdVolumes map[string]int64 } func (dv *fakeValidator) CheckCreateVolumeAllowed(req *csi.CreateVolumeRequest) error { if len(req.GetName()) == 0 { return status.Error(codes.InvalidArgument, "missing name") } if req.GetVolumeCapabilities() == nil { return status.Error(codes.InvalidArgument, "missing volume capabilities") } volumeID := strings.ToUpper(req.GetName()) val, ok := dv.getVolume(volumeID) if !ok { dv.addVolume(volumeID, req.CapacityRange.GetRequiredBytes()) return nil } if val != req.CapacityRange.GetRequiredBytes() { return status.Error(codes.AlreadyExists, "Volume exists") } dv.addVolume(volumeID, req.CapacityRange.GetRequiredBytes()) return nil } func (dv *fakeValidator) CheckDeleteVolumeAllowed(req *csi.DeleteVolumeRequest) error { if len(req.GetVolumeId()) == 0 { return status.Error(codes.InvalidArgument, "missing volume ID") } return nil } func (dv *fakeValidator) CheckValidateVolumeCapabilitiesAllowed(req *csi.ValidateVolumeCapabilitiesRequest) error { if len(req.GetVolumeId()) == 0 { return status.Error(codes.InvalidArgument, "missing volume id") } if req.GetVolumeCapabilities() == nil { return status.Error(codes.InvalidArgument, "missing volume capabilities") } volumeID := strings.ToUpper(req.GetVolumeId()) if len(dv.createdVolumes) > 0 { _, ok := dv.getVolume(volumeID) if !ok { return status.Error(codes.NotFound, "invalid volume id") } } return nil } func (dv *fakeValidator) addVolume(volumeID string, cap int64) { if len(dv.createdVolumes) == 0 { dv.createdVolumes = make(map[string]int64) } dv.createdVolumes[volumeID] = cap } func (dv *fakeValidator) getVolume(volumeID string) (int64, bool) { if len(dv.createdVolumes) == 0 { return 0, false } v, ok := dv.createdVolumes[volumeID] return v, ok } var origDriverHelper = helper.NewDriverHelper() type fakeDriverHelper struct { Clientset kubernetes.Interface Dynclient dynamic.Interface errorOnAction string validator helper.Validator command string } func (dv *fakeDriverHelper) GetClientSet() (kubernetes.Interface, dynamic.Interface, error) { if dv.errorOnAction == "getclientset" { return nil, nil, fmt.Errorf("fake error") } return dv.Clientset, dv.Dynclient, nil } func (dv *fakeDriverHelper) GetMountCommand() string { if dv.command != "" { return dv.command } return "echo" } func (dv *fakeDriverHelper) GetValidator() helper.Validator { return dv.validator } func (dv *fakeDriverHelper) CheckMount(path string) (bool, error) { if dv.errorOnAction == "checkmount" { return false, nil } if dv.errorOnAction == "checkmounterror" { return false, fmt.Errorf("fake error") } return true, nil } func (dv *fakeDriverHelper) WaitForMount(path string, timeout time.Duration) error { if dv.errorOnAction == "waitformount" { return fmt.Errorf("fake error") } return nil } func (dv *fakeDriverHelper) GetDatasetDirectoryNames(targetPath string) []string { return origDriverHelper.GetDatasetDirectoryNames(targetPath) } func (dv *fakeDriverHelper) CleanMountPoint(targetPath string) error { if dv.errorOnAction == "cleanmountpoint" { return fmt.Errorf("fake error") } return os.Remove(targetPath) } func (dv *fakeDriverHelper) MkdirAll(path string, perm os.FileMode) error
func (dv *fakeDriverHelper) WriteFile(name, content string, flag int, perm os.FileMode) error { if dv.errorOnAction == "Writefile" { return fmt.Errorf("fake error") } return origDriverHelper.WriteFile(name, content, flag, perm) } func (dv *fakeDriverHelper) FileStat(path string) (os.FileInfo, error) { if dv.errorOnAction == "filestat" { return nil, fmt.Errorf("fake error") } return origDriverHelper.FileStat(path) } func (dv *fakeDriverHelper) ReadDir(path string) ([]os.FileInfo, error) { if dv.errorOnAction == "readdir" { return nil, fmt.Errorf("fake error") } return origDriverHelper.ReadDir(path) } func (dv *fakeDriverHelper) RemoveFile(path string) error { if dv.errorOnAction == "removefileignore" { return nil } if dv.errorOnAction == "removefile" { return fmt.Errorf("fake error") } return origDriverHelper.RemoveFile(path) } func (dv *fakeDriverHelper) ParseJWTclaims(jwtString string) (jwt.MapClaims, error) { return origDriverHelper.ParseJWTclaims(jwtString) } func (dv *fakeDriverHelper) GetClaimValue(claims jwt.MapClaims, key string) (string, error) { return origDriverHelper.GetClaimValue(claims, key) } func (dv *fakeDriverHelper) MarshaljSON(v interface{}) ([]byte, error) { if dv.errorOnAction == "marshaljson" { return nil, fmt.Errorf("fake error") } return origDriverHelper.MarshaljSON(v) } func (dv *fakeDriverHelper) UnMarshaljSON(data []byte, v interface{}) error { if dv.errorOnAction == "unmarshaljson" { return fmt.Errorf("fake error") } return origDriverHelper.UnMarshaljSON(data, v) } func getTestPodResource() *corev1.Pod { return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "testpod", }, Spec: corev1.PodSpec{ NodeName: "test-node", Containers: []corev1.Container{ { Name: "test", Image: "nginx", }, }, }, Status: corev1.PodStatus{ Phase: corev1.PodRunning, }, } } func getTestPullSecret() *corev1.Secret { data := make(map[string][]byte) fakePS := "eyJhbGciOiJIUzI1NiJ9.eyJpYW1fYXBpa2V5IjoiaWFta2V5IiwicmhtQWNjb3VudElkIjoiMTIzNDUiLCJlbnRpdGxlbWVudC5zdG9yYWdlLmhtYWMuYWNjZXNzX2tleV9pZCI6ImhtYWNrZXkiLCJlbnRpdGxlbWVudC5zdG9yYWdlLmhtYWMuc2VjcmV0X2FjY2Vzc19rZXkiOiJhY2Nlc3NrZXkiLCJpc3MiOiJJQk0gTWFya2V0cGxhY2UiLCJpYXQiOjEyMywianRpIjoiYWJjIn0.dJOBqdEpMzs4PCmnzTUT9ITGO2G_ZwiemjJbFtb3lmQ" data["PULL_SECRET"] = []byte(fakePS) return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: common.MarketplaceNamespace, Name: "redhat-marketplace-pull-secret", }, Type: corev1.SecretTypeOpaque, Data: data, } } func getTestCSIDriverResource() runtime.Object { return &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": "marketplace.redhat.com/v1alpha1", "kind": "MarketplaceCSIDriver", "metadata": map[string]interface{}{ "namespace": common.MarketplaceNamespace, "name": common.ResourceNameS3Driver, }, "spec": map[string]interface{}{ "endpoint": "a.b.com", "mountRootPath": "/var/redhat-marketplace/datasets", "credential": map[string]interface{}{ "name": "redhat-marketplace-pull-secret", "type": "rhm-pull-secret", }, "mountOptions": []interface{}{"foo", "
{ if dv.errorOnAction == "mkdirall" { return fmt.Errorf("fake error") } return origDriverHelper.MkdirAll(path, perm) }
identifier_body
libvirt.go
DomainCreateFlagStartValidate ) // RebootFlags specifies domain reboot methods type RebootFlags uint32 const ( // RebootAcpiPowerBtn - send ACPI event RebootAcpiPowerBtn RebootFlags = 1 << iota // RebootGuestAgent - use guest agent RebootGuestAgent // RebootInitctl - use initctl RebootInitctl // RebootSignal - use signal RebootSignal // RebootParavirt - use paravirt guest control RebootParavirt ) // DomainMemoryStatTag specifies domain memory tags type DomainMemoryStatTag uint32 const ( // DomainMemoryStatTagSwapIn - The total amount of data read from swap space (in kB). DomainMemoryStatTagSwapIn DomainMemoryStatTag = iota // DomainMemoryStatTagSwapOut - The total amount of memory written out to swap space (in kB). DomainMemoryStatTagSwapOut // DomainMemoryStatTagMajorFault - Page faults occur when a process makes a valid access to virtual memory // that is not available. When servicing the page fault, if disk IO is // required, it is considered a major fault. // These are expressed as the number of faults that have occurred. DomainMemoryStatTagMajorFault // DomainMemoryStatTagMinorFault - If the page fault not require disk IO, it is a minor fault. DomainMemoryStatTagMinorFault // DomainMemoryStatTagUnused - The amount of memory left completely unused by the system (in kB). DomainMemoryStatTagUnused // DomainMemoryStatTagAvailable - The total amount of usable memory as seen by the domain (in kB). DomainMemoryStatTagAvailable // DomainMemoryStatTagActualBalloon - Current balloon value (in KB). DomainMemoryStatTagActualBalloon // DomainMemoryStatTagRss - Resident Set Size of the process running the domain (in KB). DomainMemoryStatTagRss // DomainMemoryStatTagUsable - How much the balloon can be inflated without pushing the guest system // to swap, corresponds to 'Available' in /proc/meminfo DomainMemoryStatTagUsable // DomainMemoryStatTagLastUpdate - Timestamp of the last update of statistics, in seconds. DomainMemoryStatTagLastUpdate // DomainMemoryStatTagNr - The number of statistics supported by this version of the interface. DomainMemoryStatTagNr ) // Capabilities returns an XML document describing the host's capabilties. func (l *Libvirt) Capabilities() ([]byte, error) { caps, err := l.ConnectGetCapabilities() return []byte(caps), err } // Connect establishes communication with the libvirt server. // The underlying libvirt socket connection must be previously established. func (l *Libvirt) Connect() error { return l.connect() } // Disconnect shuts down communication with the libvirt server // and closes the underlying net.Conn. func (l *Libvirt) Disconnect() error { // close event streams for id := range l.events { if err := l.removeStream(id); err != nil { return err } } // inform libvirt we're done if err := l.disconnect(); err != nil { return err } return l.conn.Close() } // Domains returns a list of all domains managed by libvirt. func (l *Libvirt) Domains() ([]Domain, error) { // these are the flags as passed by `virsh`, defined in: // src/remote/remote_protocol.x # remote_connect_list_all_domains_args domains, _, err := l.ConnectListAllDomains(1, 3) return domains, err } // DomainState returns state of the domain managed by libvirt. func (l *Libvirt) DomainState(dom string) (DomainState, error) { d, err := l.lookup(dom) if err != nil { return DomainStateNoState, err } state, _, err := l.DomainGetState(d, 0) return DomainState(state), err } // Events streams domain events. // If a problem is encountered setting up the event monitor connection // an error will be returned. Errors encountered during streaming will // cause the returned event channel to be closed. func (l *Libvirt) Events(dom string) (<-chan DomainEvent, error) { d, err := l.lookup(dom) if err != nil { return nil, err } payload := struct { Padding [4]byte Domain Domain Event [2]byte Flags [2]byte }{ Padding: [4]byte{0x0, 0x0, 0x1, 0x0}, Domain: d, Event: [2]byte{0x0, 0x0}, Flags: [2]byte{0x0, 0x0}, } buf, err := encode(&payload) if err != nil { return nil, err } resp, err := l.request(constants.QEMUConnectDomainMonitorEventRegister, constants.ProgramQEMU, &buf) if err != nil { return nil, err } res := <-resp if res.Status != StatusOK { err = decodeError(res.Payload) if err == ErrUnsupported { return nil, ErrEventsNotSupported } return nil, decodeError(res.Payload) } dec := xdr.NewDecoder(bytes.NewReader(res.Payload)) cbID, _, err := dec.DecodeUint() if err != nil { return nil, err } stream := make(chan *DomainEvent) l.addStream(cbID, stream) c := make(chan DomainEvent) go func() { // process events for e := range stream { c <- *e } }() return c, nil } // Migrate synchronously migrates the domain specified by dom, e.g., // 'prod-lb-01', to the destination hypervisor specified by dest, e.g., // 'qemu+tcp://example.com/system'. The flags argument determines the // type of migration and how it will be performed. For more information // on available migration flags and their meaning, see MigrateFlag*. func (l *Libvirt) Migrate(dom string, dest string, flags MigrateFlags) error { _, err := url.Parse(dest) if err != nil { return err } d, err := l.lookup(dom) if err != nil { return err } // Two unknowns remain here , Libvirt specifies RemoteParameters // and CookieIn. In testing both values are always set to 0 by virsh // and the source does not provide clear definitions of their purpose. // For now, using the same zero'd values as done by virsh will be Good Enough. destURI := []string{dest} remoteParams := []TypedParam{} cookieIn := []byte{} _, err = l.DomainMigratePerform3Params(d, destURI, remoteParams, cookieIn, uint32(flags)) return err } // MigrateSetMaxSpeed set the maximum migration bandwidth (in MiB/s) for a // domain which is being migrated to another host. Specifying a negative value // results in an essentially unlimited value being provided to the hypervisor. func (l *Libvirt) MigrateSetMaxSpeed(dom string, speed int64) error { d, err := l.lookup(dom) if err != nil { return err } return l.DomainMigrateSetMaxSpeed(d, uint64(speed), 0) } // Run executes the given QAPI command against a domain's QEMU instance. // For a list of available QAPI commands, see: // http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD func (l *Libvirt) Run(dom string, cmd []byte) ([]byte, error) { d, err := l.lookup(dom) if err != nil { return nil, err } payload := struct { Domain Domain Command []byte Flags uint32 }{ Domain: d, Command: cmd, Flags: 0, } buf, err := encode(&payload) if err != nil { return nil, err } resp, err := l.request(constants.QEMUDomainMonitor, constants.ProgramQEMU, &buf) if err != nil { return nil, err } res := <-resp // check for libvirt errors if res.Status != StatusOK { return nil, decodeError(res.Payload) } // check for QEMU process errors if err = getQEMUError(res); err != nil { return nil, err } r := bytes.NewReader(res.Payload) dec := xdr.NewDecoder(r) data, _, err := dec.DecodeFixedOpaque(int32(r.Len())) if err != nil { return nil, err } // drop QMP control characters from start of line, and drop // any trailing NULL characters from the end return bytes.TrimRight(data[4:], "\x00"), nil } // Secrets returns all secrets managed by the libvirt daemon. func (l *Libvirt) Secrets() ([]Secret, error) { secrets, _, err := l.ConnectListAllSecrets(1, 0) return secrets, err
}
random_line_split
libvirt.go
KB). DomainMemoryStatTagActualBalloon // DomainMemoryStatTagRss - Resident Set Size of the process running the domain (in KB). DomainMemoryStatTagRss // DomainMemoryStatTagUsable - How much the balloon can be inflated without pushing the guest system // to swap, corresponds to 'Available' in /proc/meminfo DomainMemoryStatTagUsable // DomainMemoryStatTagLastUpdate - Timestamp of the last update of statistics, in seconds. DomainMemoryStatTagLastUpdate // DomainMemoryStatTagNr - The number of statistics supported by this version of the interface. DomainMemoryStatTagNr ) // Capabilities returns an XML document describing the host's capabilties. func (l *Libvirt) Capabilities() ([]byte, error) { caps, err := l.ConnectGetCapabilities() return []byte(caps), err } // Connect establishes communication with the libvirt server. // The underlying libvirt socket connection must be previously established. func (l *Libvirt) Connect() error { return l.connect() } // Disconnect shuts down communication with the libvirt server // and closes the underlying net.Conn. func (l *Libvirt) Disconnect() error { // close event streams for id := range l.events { if err := l.removeStream(id); err != nil { return err } } // inform libvirt we're done if err := l.disconnect(); err != nil { return err } return l.conn.Close() } // Domains returns a list of all domains managed by libvirt. func (l *Libvirt) Domains() ([]Domain, error) { // these are the flags as passed by `virsh`, defined in: // src/remote/remote_protocol.x # remote_connect_list_all_domains_args domains, _, err := l.ConnectListAllDomains(1, 3) return domains, err } // DomainState returns state of the domain managed by libvirt. func (l *Libvirt) DomainState(dom string) (DomainState, error) { d, err := l.lookup(dom) if err != nil { return DomainStateNoState, err } state, _, err := l.DomainGetState(d, 0) return DomainState(state), err } // Events streams domain events. // If a problem is encountered setting up the event monitor connection // an error will be returned. Errors encountered during streaming will // cause the returned event channel to be closed. func (l *Libvirt) Events(dom string) (<-chan DomainEvent, error) { d, err := l.lookup(dom) if err != nil { return nil, err } payload := struct { Padding [4]byte Domain Domain Event [2]byte Flags [2]byte }{ Padding: [4]byte{0x0, 0x0, 0x1, 0x0}, Domain: d, Event: [2]byte{0x0, 0x0}, Flags: [2]byte{0x0, 0x0}, } buf, err := encode(&payload) if err != nil { return nil, err } resp, err := l.request(constants.QEMUConnectDomainMonitorEventRegister, constants.ProgramQEMU, &buf) if err != nil { return nil, err } res := <-resp if res.Status != StatusOK { err = decodeError(res.Payload) if err == ErrUnsupported { return nil, ErrEventsNotSupported } return nil, decodeError(res.Payload) } dec := xdr.NewDecoder(bytes.NewReader(res.Payload)) cbID, _, err := dec.DecodeUint() if err != nil { return nil, err } stream := make(chan *DomainEvent) l.addStream(cbID, stream) c := make(chan DomainEvent) go func() { // process events for e := range stream { c <- *e } }() return c, nil } // Migrate synchronously migrates the domain specified by dom, e.g., // 'prod-lb-01', to the destination hypervisor specified by dest, e.g., // 'qemu+tcp://example.com/system'. The flags argument determines the // type of migration and how it will be performed. For more information // on available migration flags and their meaning, see MigrateFlag*. func (l *Libvirt) Migrate(dom string, dest string, flags MigrateFlags) error { _, err := url.Parse(dest) if err != nil { return err } d, err := l.lookup(dom) if err != nil { return err } // Two unknowns remain here , Libvirt specifies RemoteParameters // and CookieIn. In testing both values are always set to 0 by virsh // and the source does not provide clear definitions of their purpose. // For now, using the same zero'd values as done by virsh will be Good Enough. destURI := []string{dest} remoteParams := []TypedParam{} cookieIn := []byte{} _, err = l.DomainMigratePerform3Params(d, destURI, remoteParams, cookieIn, uint32(flags)) return err } // MigrateSetMaxSpeed set the maximum migration bandwidth (in MiB/s) for a // domain which is being migrated to another host. Specifying a negative value // results in an essentially unlimited value being provided to the hypervisor. func (l *Libvirt) MigrateSetMaxSpeed(dom string, speed int64) error { d, err := l.lookup(dom) if err != nil { return err } return l.DomainMigrateSetMaxSpeed(d, uint64(speed), 0) } // Run executes the given QAPI command against a domain's QEMU instance. // For a list of available QAPI commands, see: // http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD func (l *Libvirt) Run(dom string, cmd []byte) ([]byte, error) { d, err := l.lookup(dom) if err != nil { return nil, err } payload := struct { Domain Domain Command []byte Flags uint32 }{ Domain: d, Command: cmd, Flags: 0, } buf, err := encode(&payload) if err != nil { return nil, err } resp, err := l.request(constants.QEMUDomainMonitor, constants.ProgramQEMU, &buf) if err != nil { return nil, err } res := <-resp // check for libvirt errors if res.Status != StatusOK { return nil, decodeError(res.Payload) } // check for QEMU process errors if err = getQEMUError(res); err != nil { return nil, err } r := bytes.NewReader(res.Payload) dec := xdr.NewDecoder(r) data, _, err := dec.DecodeFixedOpaque(int32(r.Len())) if err != nil { return nil, err } // drop QMP control characters from start of line, and drop // any trailing NULL characters from the end return bytes.TrimRight(data[4:], "\x00"), nil } // Secrets returns all secrets managed by the libvirt daemon. func (l *Libvirt) Secrets() ([]Secret, error) { secrets, _, err := l.ConnectListAllSecrets(1, 0) return secrets, err } // StoragePool returns the storage pool associated with the provided name. // An error is returned if the requested storage pool is not found. func (l *Libvirt) StoragePool(name string) (StoragePool, error) { return l.StoragePoolLookupByName(name) } // StoragePools returns a list of defined storage pools. Pools are filtered by // the provided flags. See StoragePools*. func (l *Libvirt) StoragePools(flags StoragePoolsFlags) ([]StoragePool, error) { pools, _, err := l.ConnectListAllStoragePools(1, uint32(flags)) return pools, err } // Undefine undefines the domain specified by dom, e.g., 'prod-lb-01'. // The flags argument allows additional options to be specified such as // cleaning up snapshot metadata. For more information on available // flags, see UndefineFlag*. func (l *Libvirt) Undefine(dom string, flags UndefineFlags) error { d, err := l.lookup(dom) if err != nil { return err } return l.DomainUndefineFlags(d, uint32(flags)) } // Destroy destroys the domain specified by dom, e.g., 'prod-lb-01'. // The flags argument allows additional options to be specified such as // allowing a graceful shutdown with SIGTERM than SIGKILL. // For more information on available flags, see DestroyFlag*. func (l *Libvirt) Destroy(dom string, flags DestroyFlags) error { d, err := l.lookup(dom) if err != nil { return err } return l.DomainDestroyFlags(d, uint32(flags)) } // XML returns a domain's raw XML definition, akin to `virsh dumpxml <domain>`. // See DomainXMLFlag* for optional flags. func (l *Libvirt)
XML
identifier_name
libvirt.go
NoAutostart // pools by type StoragePoolsFlagDir StoragePoolsFlagFS StoragePoolsFlagNETFS StoragePoolsFlagLogical StoragePoolsFlagDisk StoragePoolsFlagISCSI StoragePoolsFlagSCSI StoragePoolsFlagMPATH StoragePoolsFlagRBD StoragePoolsFlagSheepdog StoragePoolsFlagGluster StoragePoolsFlagZFS ) // DomainCreateFlags specify options for starting domains type DomainCreateFlags uint32 const ( // DomainCreateFlagPaused creates paused domain. DomainCreateFlagPaused = 1 << iota // DomainCreateFlagAutoDestroy destoy domain after libvirt connection closed. DomainCreateFlagAutoDestroy // DomainCreateFlagBypassCache avoid file system cache pollution. DomainCreateFlagBypassCache // DomainCreateFlagStartForceBoot boot, discarding any managed save DomainCreateFlagStartForceBoot // DomainCreateFlagStartValidate validate the XML document against schema DomainCreateFlagStartValidate ) // RebootFlags specifies domain reboot methods type RebootFlags uint32 const ( // RebootAcpiPowerBtn - send ACPI event RebootAcpiPowerBtn RebootFlags = 1 << iota // RebootGuestAgent - use guest agent RebootGuestAgent // RebootInitctl - use initctl RebootInitctl // RebootSignal - use signal RebootSignal // RebootParavirt - use paravirt guest control RebootParavirt ) // DomainMemoryStatTag specifies domain memory tags type DomainMemoryStatTag uint32 const ( // DomainMemoryStatTagSwapIn - The total amount of data read from swap space (in kB). DomainMemoryStatTagSwapIn DomainMemoryStatTag = iota // DomainMemoryStatTagSwapOut - The total amount of memory written out to swap space (in kB). DomainMemoryStatTagSwapOut // DomainMemoryStatTagMajorFault - Page faults occur when a process makes a valid access to virtual memory // that is not available. When servicing the page fault, if disk IO is // required, it is considered a major fault. // These are expressed as the number of faults that have occurred. DomainMemoryStatTagMajorFault // DomainMemoryStatTagMinorFault - If the page fault not require disk IO, it is a minor fault. DomainMemoryStatTagMinorFault // DomainMemoryStatTagUnused - The amount of memory left completely unused by the system (in kB). DomainMemoryStatTagUnused // DomainMemoryStatTagAvailable - The total amount of usable memory as seen by the domain (in kB). DomainMemoryStatTagAvailable // DomainMemoryStatTagActualBalloon - Current balloon value (in KB). DomainMemoryStatTagActualBalloon // DomainMemoryStatTagRss - Resident Set Size of the process running the domain (in KB). DomainMemoryStatTagRss // DomainMemoryStatTagUsable - How much the balloon can be inflated without pushing the guest system // to swap, corresponds to 'Available' in /proc/meminfo DomainMemoryStatTagUsable // DomainMemoryStatTagLastUpdate - Timestamp of the last update of statistics, in seconds. DomainMemoryStatTagLastUpdate // DomainMemoryStatTagNr - The number of statistics supported by this version of the interface. DomainMemoryStatTagNr ) // Capabilities returns an XML document describing the host's capabilties. func (l *Libvirt) Capabilities() ([]byte, error) { caps, err := l.ConnectGetCapabilities() return []byte(caps), err } // Connect establishes communication with the libvirt server. // The underlying libvirt socket connection must be previously established. func (l *Libvirt) Connect() error { return l.connect() } // Disconnect shuts down communication with the libvirt server // and closes the underlying net.Conn. func (l *Libvirt) Disconnect() error { // close event streams for id := range l.events { if err := l.removeStream(id); err != nil { return err } } // inform libvirt we're done if err := l.disconnect(); err != nil { return err } return l.conn.Close() } // Domains returns a list of all domains managed by libvirt. func (l *Libvirt) Domains() ([]Domain, error) { // these are the flags as passed by `virsh`, defined in: // src/remote/remote_protocol.x # remote_connect_list_all_domains_args domains, _, err := l.ConnectListAllDomains(1, 3) return domains, err } // DomainState returns state of the domain managed by libvirt. func (l *Libvirt) DomainState(dom string) (DomainState, error) { d, err := l.lookup(dom) if err != nil { return DomainStateNoState, err } state, _, err := l.DomainGetState(d, 0) return DomainState(state), err } // Events streams domain events. // If a problem is encountered setting up the event monitor connection // an error will be returned. Errors encountered during streaming will // cause the returned event channel to be closed. func (l *Libvirt) Events(dom string) (<-chan DomainEvent, error) { d, err := l.lookup(dom) if err != nil { return nil, err } payload := struct { Padding [4]byte Domain Domain Event [2]byte Flags [2]byte }{ Padding: [4]byte{0x0, 0x0, 0x1, 0x0}, Domain: d, Event: [2]byte{0x0, 0x0}, Flags: [2]byte{0x0, 0x0}, } buf, err := encode(&payload) if err != nil { return nil, err } resp, err := l.request(constants.QEMUConnectDomainMonitorEventRegister, constants.ProgramQEMU, &buf) if err != nil { return nil, err } res := <-resp if res.Status != StatusOK { err = decodeError(res.Payload) if err == ErrUnsupported { return nil, ErrEventsNotSupported } return nil, decodeError(res.Payload) } dec := xdr.NewDecoder(bytes.NewReader(res.Payload)) cbID, _, err := dec.DecodeUint() if err != nil { return nil, err } stream := make(chan *DomainEvent) l.addStream(cbID, stream) c := make(chan DomainEvent) go func() { // process events for e := range stream { c <- *e } }() return c, nil } // Migrate synchronously migrates the domain specified by dom, e.g., // 'prod-lb-01', to the destination hypervisor specified by dest, e.g., // 'qemu+tcp://example.com/system'. The flags argument determines the // type of migration and how it will be performed. For more information // on available migration flags and their meaning, see MigrateFlag*. func (l *Libvirt) Migrate(dom string, dest string, flags MigrateFlags) error { _, err := url.Parse(dest) if err != nil { return err } d, err := l.lookup(dom) if err != nil { return err } // Two unknowns remain here , Libvirt specifies RemoteParameters // and CookieIn. In testing both values are always set to 0 by virsh // and the source does not provide clear definitions of their purpose. // For now, using the same zero'd values as done by virsh will be Good Enough. destURI := []string{dest} remoteParams := []TypedParam{} cookieIn := []byte{} _, err = l.DomainMigratePerform3Params(d, destURI, remoteParams, cookieIn, uint32(flags)) return err } // MigrateSetMaxSpeed set the maximum migration bandwidth (in MiB/s) for a // domain which is being migrated to another host. Specifying a negative value // results in an essentially unlimited value being provided to the hypervisor. func (l *Libvirt) MigrateSetMaxSpeed(dom string, speed int64) error { d, err := l.lookup(dom) if err != nil { return err } return l.DomainMigrateSetMaxSpeed(d, uint64(speed), 0) } // Run executes the given QAPI command against a domain's QEMU instance. // For a list of available QAPI commands, see: // http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD func (l *Libvirt) Run(dom string, cmd []byte) ([]byte, error)
{ d, err := l.lookup(dom) if err != nil { return nil, err } payload := struct { Domain Domain Command []byte Flags uint32 }{ Domain: d, Command: cmd, Flags: 0, } buf, err := encode(&payload) if err != nil { return nil, err }
identifier_body
libvirt.go
PoolsFlagMPATH StoragePoolsFlagRBD StoragePoolsFlagSheepdog StoragePoolsFlagGluster StoragePoolsFlagZFS ) // DomainCreateFlags specify options for starting domains type DomainCreateFlags uint32 const ( // DomainCreateFlagPaused creates paused domain. DomainCreateFlagPaused = 1 << iota // DomainCreateFlagAutoDestroy destoy domain after libvirt connection closed. DomainCreateFlagAutoDestroy // DomainCreateFlagBypassCache avoid file system cache pollution. DomainCreateFlagBypassCache // DomainCreateFlagStartForceBoot boot, discarding any managed save DomainCreateFlagStartForceBoot // DomainCreateFlagStartValidate validate the XML document against schema DomainCreateFlagStartValidate ) // RebootFlags specifies domain reboot methods type RebootFlags uint32 const ( // RebootAcpiPowerBtn - send ACPI event RebootAcpiPowerBtn RebootFlags = 1 << iota // RebootGuestAgent - use guest agent RebootGuestAgent // RebootInitctl - use initctl RebootInitctl // RebootSignal - use signal RebootSignal // RebootParavirt - use paravirt guest control RebootParavirt ) // DomainMemoryStatTag specifies domain memory tags type DomainMemoryStatTag uint32 const ( // DomainMemoryStatTagSwapIn - The total amount of data read from swap space (in kB). DomainMemoryStatTagSwapIn DomainMemoryStatTag = iota // DomainMemoryStatTagSwapOut - The total amount of memory written out to swap space (in kB). DomainMemoryStatTagSwapOut // DomainMemoryStatTagMajorFault - Page faults occur when a process makes a valid access to virtual memory // that is not available. When servicing the page fault, if disk IO is // required, it is considered a major fault. // These are expressed as the number of faults that have occurred. DomainMemoryStatTagMajorFault // DomainMemoryStatTagMinorFault - If the page fault not require disk IO, it is a minor fault. DomainMemoryStatTagMinorFault // DomainMemoryStatTagUnused - The amount of memory left completely unused by the system (in kB). DomainMemoryStatTagUnused // DomainMemoryStatTagAvailable - The total amount of usable memory as seen by the domain (in kB). DomainMemoryStatTagAvailable // DomainMemoryStatTagActualBalloon - Current balloon value (in KB). DomainMemoryStatTagActualBalloon // DomainMemoryStatTagRss - Resident Set Size of the process running the domain (in KB). DomainMemoryStatTagRss // DomainMemoryStatTagUsable - How much the balloon can be inflated without pushing the guest system // to swap, corresponds to 'Available' in /proc/meminfo DomainMemoryStatTagUsable // DomainMemoryStatTagLastUpdate - Timestamp of the last update of statistics, in seconds. DomainMemoryStatTagLastUpdate // DomainMemoryStatTagNr - The number of statistics supported by this version of the interface. DomainMemoryStatTagNr ) // Capabilities returns an XML document describing the host's capabilties. func (l *Libvirt) Capabilities() ([]byte, error) { caps, err := l.ConnectGetCapabilities() return []byte(caps), err } // Connect establishes communication with the libvirt server. // The underlying libvirt socket connection must be previously established. func (l *Libvirt) Connect() error { return l.connect() } // Disconnect shuts down communication with the libvirt server // and closes the underlying net.Conn. func (l *Libvirt) Disconnect() error { // close event streams for id := range l.events { if err := l.removeStream(id); err != nil { return err } } // inform libvirt we're done if err := l.disconnect(); err != nil { return err } return l.conn.Close() } // Domains returns a list of all domains managed by libvirt. func (l *Libvirt) Domains() ([]Domain, error) { // these are the flags as passed by `virsh`, defined in: // src/remote/remote_protocol.x # remote_connect_list_all_domains_args domains, _, err := l.ConnectListAllDomains(1, 3) return domains, err } // DomainState returns state of the domain managed by libvirt. func (l *Libvirt) DomainState(dom string) (DomainState, error) { d, err := l.lookup(dom) if err != nil { return DomainStateNoState, err } state, _, err := l.DomainGetState(d, 0) return DomainState(state), err } // Events streams domain events. // If a problem is encountered setting up the event monitor connection // an error will be returned. Errors encountered during streaming will // cause the returned event channel to be closed. func (l *Libvirt) Events(dom string) (<-chan DomainEvent, error) { d, err := l.lookup(dom) if err != nil { return nil, err } payload := struct { Padding [4]byte Domain Domain Event [2]byte Flags [2]byte }{ Padding: [4]byte{0x0, 0x0, 0x1, 0x0}, Domain: d, Event: [2]byte{0x0, 0x0}, Flags: [2]byte{0x0, 0x0}, } buf, err := encode(&payload) if err != nil { return nil, err } resp, err := l.request(constants.QEMUConnectDomainMonitorEventRegister, constants.ProgramQEMU, &buf) if err != nil { return nil, err } res := <-resp if res.Status != StatusOK { err = decodeError(res.Payload) if err == ErrUnsupported { return nil, ErrEventsNotSupported } return nil, decodeError(res.Payload) } dec := xdr.NewDecoder(bytes.NewReader(res.Payload)) cbID, _, err := dec.DecodeUint() if err != nil { return nil, err } stream := make(chan *DomainEvent) l.addStream(cbID, stream) c := make(chan DomainEvent) go func() { // process events for e := range stream { c <- *e } }() return c, nil } // Migrate synchronously migrates the domain specified by dom, e.g., // 'prod-lb-01', to the destination hypervisor specified by dest, e.g., // 'qemu+tcp://example.com/system'. The flags argument determines the // type of migration and how it will be performed. For more information // on available migration flags and their meaning, see MigrateFlag*. func (l *Libvirt) Migrate(dom string, dest string, flags MigrateFlags) error { _, err := url.Parse(dest) if err != nil { return err } d, err := l.lookup(dom) if err != nil { return err } // Two unknowns remain here , Libvirt specifies RemoteParameters // and CookieIn. In testing both values are always set to 0 by virsh // and the source does not provide clear definitions of their purpose. // For now, using the same zero'd values as done by virsh will be Good Enough. destURI := []string{dest} remoteParams := []TypedParam{} cookieIn := []byte{} _, err = l.DomainMigratePerform3Params(d, destURI, remoteParams, cookieIn, uint32(flags)) return err } // MigrateSetMaxSpeed set the maximum migration bandwidth (in MiB/s) for a // domain which is being migrated to another host. Specifying a negative value // results in an essentially unlimited value being provided to the hypervisor. func (l *Libvirt) MigrateSetMaxSpeed(dom string, speed int64) error { d, err := l.lookup(dom) if err != nil { return err } return l.DomainMigrateSetMaxSpeed(d, uint64(speed), 0) } // Run executes the given QAPI command against a domain's QEMU instance. // For a list of available QAPI commands, see: // http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD func (l *Libvirt) Run(dom string, cmd []byte) ([]byte, error) { d, err := l.lookup(dom) if err != nil { return nil, err } payload := struct { Domain Domain Command []byte Flags uint32 }{ Domain: d, Command: cmd, Flags: 0, } buf, err := encode(&payload) if err != nil { return nil, err } resp, err := l.request(constants.QEMUDomainMonitor, constants.ProgramQEMU, &buf) if err != nil { return nil, err } res := <-resp // check for libvirt errors if res.Status != StatusOK
{ return nil, decodeError(res.Payload) }
conditional_block
global-network.js
.preventDefault(); $('.network-filter').hide(); //Hide all dropdowns }); //Live Search Global Network $("input#network-search").keyup(function(){ // Retrieve the input field text and reset the count to zero var filter = $(this).val(); // Loop through grid items $(".network-grid .network-grid-item").each(function(){ // If the list item does not contain the text phrase fade it out if ($(this).text().search(new RegExp(filter, "i")) < 0) { $(this).fadeOut(); // Show the list item if the phrase matches and increase the count by 1 } else { $(this).fadeIn(); } }); }); //Show Appropriate topic List on title click $('.topic-list li').click(function(event) { var section = $(this).attr('data-id'); $('form#Filters fieldset').hide(); $('form#Filters fieldset[name=' + section+']').show(); }); //Profile Popup $('.close-profile a').click(function(e) { e.preventDefault(); $('.profile-container').fadeOut('slow'); }); $('.network-grid-item').click(function(e) { var itemOffset = $(this).offset().top; $('.profile-container').css('top',itemOffset+'px'); var profile_id = $(this).parent().attr('data-id'); $('.profile-container').fadeIn('slow'); $('.profile-content').css('opacity',0); loadProfile(profile_id); }); //AJAX in profile data from json file function loadProfile(profile_id) { var is_loading = false; if (is_loading == false) { is_loading = true; $('#loader').show(); var data = { action: 'getSingleProfile', profile_id: profile_id }; jQuery.post(ajaxurl, data, function(response) { // append: add the new statments to the existing data if(response != 0){ $('.profile-content').empty(); console.log(response); $('.profile-content').append(response); $('.profile-content').css('opacity',1); is_loading = false; } else{ $('#loader').hide(); is_loading = false; } }); } } // ------------------------------------------------------ // ------------------ Filters---------------------------- // ------------------------------------------------------ //Update active filters and display function GetActiveString()
//========= PLUGIN ADD ON FOR MIXITUP ===================== // To keep our code clean and modular, all custom functionality will be contained inside a single object literal called "checkboxFilter". var checkboxFilter = { // Declare any variables we will need as properties of the object $filters: null, $reset: null, groups: [], outputArray: [], outputString: '', // The "init" method will run on document ready and cache any jQuery objects we will need. init: function(){ var self = this; // As a best practice, in each method we will asign "this" to the variable "self" so that it remains scope-agnostic. We will use it to refer to the parent "checkboxFilter" object so that we can share methods and properties between all parts of the object. self.$filters = $('#Filters'); self.$reset = $('.reset'); self.$container = $('#network-grid'); self.$filters.find('fieldset').each(function(){ self.groups.push({ $inputs: $(this).find('input'), active: [], tracker: false }); }); self.bindHandlers(); }, // The "bindHandlers" method will listen for whenever a form value changes. bindHandlers: function(){ var self = this; self.$filters.on('change', function(){ self.parseFilters(); }); self.$reset.on('click', function(e){ e.preventDefault(); self.$filters[0].reset(); self.parseFilters(); }); }, // The parseFilters method checks which filters are active in each group: parseFilters: function(){ var self = this; // loop through each filter group and add active filters to arrays for(var i = 0, group; group = self.groups[i]; i++){ group.active = []; // reset arrays group.$inputs.each(function(){ $(this).is(':checked') && group.active.push(this.value); }); group.active.length && (group.tracker = 0); } self.concatenate(); }, // The "concatenate" method will crawl through each group, concatenating filters as desired: concatenate: function(){ var self = this, cache = '', crawled = false, checkTrackers = function(){ var done = 0; for(var i = 0, group; group = self.groups[i]; i++){ (group.tracker === false) && done++; } return (done < self.groups.length); }, crawl = function(){ for(var i = 0, group; group = self.groups[i]; i++){ group.active[group.tracker] && (cache += group.active[group.tracker]); if(i === self.groups.length - 1){ self.outputArray.push(cache); cache = ''; updateTrackers(); } } }, updateTrackers = function(){ for(var i = self.groups.length - 1; i > -1; i--){ var group = self.groups[i]; if(group.active[group.tracker + 1]){ group.tracker++; break; } else if(i > 0){ group.tracker && (group.tracker = 0); } else { crawled = true; } } }; self.outputArray = []; // reset output array do{ crawl(); } while(!crawled && checkTrackers()); self.outputString = self.outputArray.join(); // If the output string is empty, show all rather than none: !self.outputString.length && (self.outputString = 'all'); //console.log(self.outputString); // ^ we can check the console here to take a look at the filter string that is produced // Send the output string to MixItUp via the 'filter' method: if(self.$container.mixItUp('isLoaded')){ self.$container.mixItUp('filter', self.outputString); } } }; //=======END FILTER BY CHECKBOX PLUGIN jQuery(document).ready(function($){ // Initialize checkboxFilter code checkboxFilter.init(); // Instantiate MixItUp $('#network-grid').mixItUp({ controls: { enable: false // we won't be needing these }, animation: { easing: 'cubic-bezier(0.86, 0, 0.07, 1)', duration: 600 }, callbacks: { onMixEnd: GetActiveString } }); //URL FILTERS var filter = getParameterByName('filter'); //check if url filter is present - initialize mixitup and prefilter if(filter !==''){ $("input[value='" + filter + "']").prop('checked', true); $('#network-grid').mixItUp('filter', filter, GetActiveString); $(".network-filter-status").slideDown(); } // //Show profile from url // var profile_id = getParameterByName('profile'); // if(profile_id !==''){ // var itemOffset = $('#22').offset().top; // console.log(itemOffset); // $('.profile-container').css('top',itemOffset+'px'); // $('.profile-container').fadeIn('slow'); // $('.profile-content').css('opacity',0); // loadProfile(profile_id); // $('html, body').animate({ // scrollTop: itemOffset // }, 1000); // } }); //funciton to easily retrieve URL Params function getParameterByName(name) { name = name.replace(/[\[]/, "\\[").replace(/[\]]/, "\\]"); var regex = new RegExp("[\\?&]" + name + "=([^&#]*)"), results = regex.exec(location.search); return results === null ? "" : decodeURIComponent(results[1].replace(/\+/g, " ")); } //Google Map Initilize function initialize() { var center = new google.maps.LatLng(40.00, -75.2); var options = { 'zoom': 3, 'center': center, 'mapTypeId': google.maps.MapTypeId.ROADMAP }; var map = new google.maps.Map(document.getElementById("map"), options); var markers = []; var profiles = {}; var infowindow = new google.maps.InfoWindow({ content: "..loading" }); var templateUrl = profile_json.templateUrl; $.getJSON(templateUrl+'/data/user-profiles.json', function(data){ for (var i = 0; i < data.length; i++) { var address = data[i]['City'] + ', ' + data[i]["State (USA only)"] + ' ' + data[i]["Country"]; var contentString = '<img src="'+data[i]["Profile Picture"] +'" style="width: 100px; display: inline; float: left; padding-right
{ var active = []; $('.checkbox input:checked').each(function() { active.push($(this).attr('data-filter')); }); var filtered = active.join(", "); if(filtered !== '') $('.filtered-list').html(filtered); else{ $('.filtered-list').html('All'); } }
identifier_body
global-network.js
(); $('.network-filter').hide(); //Hide all dropdowns }); //Live Search Global Network $("input#network-search").keyup(function(){ // Retrieve the input field text and reset the count to zero var filter = $(this).val(); // Loop through grid items $(".network-grid .network-grid-item").each(function(){ // If the list item does not contain the text phrase fade it out if ($(this).text().search(new RegExp(filter, "i")) < 0) { $(this).fadeOut(); // Show the list item if the phrase matches and increase the count by 1 } else { $(this).fadeIn(); } }); }); //Show Appropriate topic List on title click $('.topic-list li').click(function(event) { var section = $(this).attr('data-id'); $('form#Filters fieldset').hide(); $('form#Filters fieldset[name=' + section+']').show(); }); //Profile Popup $('.close-profile a').click(function(e) { e.preventDefault(); $('.profile-container').fadeOut('slow'); }); $('.network-grid-item').click(function(e) { var itemOffset = $(this).offset().top; $('.profile-container').css('top',itemOffset+'px'); var profile_id = $(this).parent().attr('data-id'); $('.profile-container').fadeIn('slow'); $('.profile-content').css('opacity',0); loadProfile(profile_id); }); //AJAX in profile data from json file function loadProfile(profile_id) { var is_loading = false; if (is_loading == false) { is_loading = true; $('#loader').show(); var data = { action: 'getSingleProfile', profile_id: profile_id }; jQuery.post(ajaxurl, data, function(response) { // append: add the new statments to the existing data if(response != 0){ $('.profile-content').empty(); console.log(response); $('.profile-content').append(response); $('.profile-content').css('opacity',1); is_loading = false; } else{ $('#loader').hide(); is_loading = false; } }); } } // ------------------------------------------------------ // ------------------ Filters---------------------------- // ------------------------------------------------------ //Update active filters and display function GetActiveString(){ var active = []; $('.checkbox input:checked').each(function() { active.push($(this).attr('data-filter')); }); var filtered = active.join(", "); if(filtered !== '') $('.filtered-list').html(filtered); else{ $('.filtered-list').html('All'); } } //========= PLUGIN ADD ON FOR MIXITUP ===================== // To keep our code clean and modular, all custom functionality will be contained inside a single object literal called "checkboxFilter". var checkboxFilter = { // Declare any variables we will need as properties of the object $filters: null, $reset: null, groups: [], outputArray: [], outputString: '', // The "init" method will run on document ready and cache any jQuery objects we will need. init: function(){ var self = this; // As a best practice, in each method we will asign "this" to the variable "self" so that it remains scope-agnostic. We will use it to refer to the parent "checkboxFilter" object so that we can share methods and properties between all parts of the object. self.$filters = $('#Filters'); self.$reset = $('.reset'); self.$container = $('#network-grid'); self.$filters.find('fieldset').each(function(){ self.groups.push({ $inputs: $(this).find('input'), active: [], tracker: false }); }); self.bindHandlers(); }, // The "bindHandlers" method will listen for whenever a form value changes. bindHandlers: function(){ var self = this; self.$filters.on('change', function(){ self.parseFilters(); }); self.$reset.on('click', function(e){ e.preventDefault(); self.$filters[0].reset(); self.parseFilters(); }); }, // The parseFilters method checks which filters are active in each group: parseFilters: function(){ var self = this; // loop through each filter group and add active filters to arrays for(var i = 0, group; group = self.groups[i]; i++){ group.active = []; // reset arrays group.$inputs.each(function(){ $(this).is(':checked') && group.active.push(this.value); }); group.active.length && (group.tracker = 0); } self.concatenate(); }, // The "concatenate" method will crawl through each group, concatenating filters as desired: concatenate: function(){ var self = this, cache = '', crawled = false, checkTrackers = function(){ var done = 0; for(var i = 0, group; group = self.groups[i]; i++){ (group.tracker === false) && done++; } return (done < self.groups.length); }, crawl = function(){ for(var i = 0, group; group = self.groups[i]; i++){ group.active[group.tracker] && (cache += group.active[group.tracker]); if(i === self.groups.length - 1){ self.outputArray.push(cache); cache = ''; updateTrackers(); } } }, updateTrackers = function(){ for(var i = self.groups.length - 1; i > -1; i--){ var group = self.groups[i]; if(group.active[group.tracker + 1]){ group.tracker++; break; } else if(i > 0){ group.tracker && (group.tracker = 0); } else { crawled = true; } } }; self.outputArray = []; // reset output array do{ crawl(); } while(!crawled && checkTrackers()); self.outputString = self.outputArray.join(); // If the output string is empty, show all rather than none: !self.outputString.length && (self.outputString = 'all'); //console.log(self.outputString); // ^ we can check the console here to take a look at the filter string that is produced // Send the output string to MixItUp via the 'filter' method: if(self.$container.mixItUp('isLoaded')){ self.$container.mixItUp('filter', self.outputString); } } }; //=======END FILTER BY CHECKBOX PLUGIN jQuery(document).ready(function($){ // Initialize checkboxFilter code checkboxFilter.init(); // Instantiate MixItUp $('#network-grid').mixItUp({ controls: { enable: false // we won't be needing these }, animation: { easing: 'cubic-bezier(0.86, 0, 0.07, 1)', duration: 600 }, callbacks: { onMixEnd: GetActiveString } });
//URL FILTERS var filter = getParameterByName('filter'); //check if url filter is present - initialize mixitup and prefilter if(filter !==''){ $("input[value='" + filter + "']").prop('checked', true); $('#network-grid').mixItUp('filter', filter, GetActiveString); $(".network-filter-status").slideDown(); } // //Show profile from url // var profile_id = getParameterByName('profile'); // if(profile_id !==''){ // var itemOffset = $('#22').offset().top; // console.log(itemOffset); // $('.profile-container').css('top',itemOffset+'px'); // $('.profile-container').fadeIn('slow'); // $('.profile-content').css('opacity',0); // loadProfile(profile_id); // $('html, body').animate({ // scrollTop: itemOffset // }, 1000); // } }); //funciton to easily retrieve URL Params function getParameterByName(name) { name = name.replace(/[\[]/, "\\[").replace(/[\]]/, "\\]"); var regex = new RegExp("[\\?&]" + name + "=([^&#]*)"), results = regex.exec(location.search); return results === null ? "" : decodeURIComponent(results[1].replace(/\+/g, " ")); } //Google Map Initilize function initialize() { var center = new google.maps.LatLng(40.00, -75.2); var options = { 'zoom': 3, 'center': center, 'mapTypeId': google.maps.MapTypeId.ROADMAP }; var map = new google.maps.Map(document.getElementById("map"), options); var markers = []; var profiles = {}; var infowindow = new google.maps.InfoWindow({ content: "..loading" }); var templateUrl = profile_json.templateUrl; $.getJSON(templateUrl+'/data/user-profiles.json', function(data){ for (var i = 0; i < data.length; i++) { var address = data[i]['City'] + ', ' + data[i]["State (USA only)"] + ' ' + data[i]["Country"]; var contentString = '<img src="'+data[i]["Profile Picture"] +'" style="width: 100px; display: inline; float: left; padding-right: 1
random_line_split
global-network.js
e.preventDefault(); $('.network-filter').hide(); //Hide all dropdowns }); //Live Search Global Network $("input#network-search").keyup(function(){ // Retrieve the input field text and reset the count to zero var filter = $(this).val(); // Loop through grid items $(".network-grid .network-grid-item").each(function(){ // If the list item does not contain the text phrase fade it out if ($(this).text().search(new RegExp(filter, "i")) < 0) { $(this).fadeOut(); // Show the list item if the phrase matches and increase the count by 1 } else { $(this).fadeIn(); } }); }); //Show Appropriate topic List on title click $('.topic-list li').click(function(event) { var section = $(this).attr('data-id'); $('form#Filters fieldset').hide(); $('form#Filters fieldset[name=' + section+']').show(); }); //Profile Popup $('.close-profile a').click(function(e) { e.preventDefault(); $('.profile-container').fadeOut('slow'); }); $('.network-grid-item').click(function(e) { var itemOffset = $(this).offset().top; $('.profile-container').css('top',itemOffset+'px'); var profile_id = $(this).parent().attr('data-id'); $('.profile-container').fadeIn('slow'); $('.profile-content').css('opacity',0); loadProfile(profile_id); }); //AJAX in profile data from json file function
(profile_id) { var is_loading = false; if (is_loading == false) { is_loading = true; $('#loader').show(); var data = { action: 'getSingleProfile', profile_id: profile_id }; jQuery.post(ajaxurl, data, function(response) { // append: add the new statments to the existing data if(response != 0){ $('.profile-content').empty(); console.log(response); $('.profile-content').append(response); $('.profile-content').css('opacity',1); is_loading = false; } else{ $('#loader').hide(); is_loading = false; } }); } } // ------------------------------------------------------ // ------------------ Filters---------------------------- // ------------------------------------------------------ //Update active filters and display function GetActiveString(){ var active = []; $('.checkbox input:checked').each(function() { active.push($(this).attr('data-filter')); }); var filtered = active.join(", "); if(filtered !== '') $('.filtered-list').html(filtered); else{ $('.filtered-list').html('All'); } } //========= PLUGIN ADD ON FOR MIXITUP ===================== // To keep our code clean and modular, all custom functionality will be contained inside a single object literal called "checkboxFilter". var checkboxFilter = { // Declare any variables we will need as properties of the object $filters: null, $reset: null, groups: [], outputArray: [], outputString: '', // The "init" method will run on document ready and cache any jQuery objects we will need. init: function(){ var self = this; // As a best practice, in each method we will asign "this" to the variable "self" so that it remains scope-agnostic. We will use it to refer to the parent "checkboxFilter" object so that we can share methods and properties between all parts of the object. self.$filters = $('#Filters'); self.$reset = $('.reset'); self.$container = $('#network-grid'); self.$filters.find('fieldset').each(function(){ self.groups.push({ $inputs: $(this).find('input'), active: [], tracker: false }); }); self.bindHandlers(); }, // The "bindHandlers" method will listen for whenever a form value changes. bindHandlers: function(){ var self = this; self.$filters.on('change', function(){ self.parseFilters(); }); self.$reset.on('click', function(e){ e.preventDefault(); self.$filters[0].reset(); self.parseFilters(); }); }, // The parseFilters method checks which filters are active in each group: parseFilters: function(){ var self = this; // loop through each filter group and add active filters to arrays for(var i = 0, group; group = self.groups[i]; i++){ group.active = []; // reset arrays group.$inputs.each(function(){ $(this).is(':checked') && group.active.push(this.value); }); group.active.length && (group.tracker = 0); } self.concatenate(); }, // The "concatenate" method will crawl through each group, concatenating filters as desired: concatenate: function(){ var self = this, cache = '', crawled = false, checkTrackers = function(){ var done = 0; for(var i = 0, group; group = self.groups[i]; i++){ (group.tracker === false) && done++; } return (done < self.groups.length); }, crawl = function(){ for(var i = 0, group; group = self.groups[i]; i++){ group.active[group.tracker] && (cache += group.active[group.tracker]); if(i === self.groups.length - 1){ self.outputArray.push(cache); cache = ''; updateTrackers(); } } }, updateTrackers = function(){ for(var i = self.groups.length - 1; i > -1; i--){ var group = self.groups[i]; if(group.active[group.tracker + 1]){ group.tracker++; break; } else if(i > 0){ group.tracker && (group.tracker = 0); } else { crawled = true; } } }; self.outputArray = []; // reset output array do{ crawl(); } while(!crawled && checkTrackers()); self.outputString = self.outputArray.join(); // If the output string is empty, show all rather than none: !self.outputString.length && (self.outputString = 'all'); //console.log(self.outputString); // ^ we can check the console here to take a look at the filter string that is produced // Send the output string to MixItUp via the 'filter' method: if(self.$container.mixItUp('isLoaded')){ self.$container.mixItUp('filter', self.outputString); } } }; //=======END FILTER BY CHECKBOX PLUGIN jQuery(document).ready(function($){ // Initialize checkboxFilter code checkboxFilter.init(); // Instantiate MixItUp $('#network-grid').mixItUp({ controls: { enable: false // we won't be needing these }, animation: { easing: 'cubic-bezier(0.86, 0, 0.07, 1)', duration: 600 }, callbacks: { onMixEnd: GetActiveString } }); //URL FILTERS var filter = getParameterByName('filter'); //check if url filter is present - initialize mixitup and prefilter if(filter !==''){ $("input[value='" + filter + "']").prop('checked', true); $('#network-grid').mixItUp('filter', filter, GetActiveString); $(".network-filter-status").slideDown(); } // //Show profile from url // var profile_id = getParameterByName('profile'); // if(profile_id !==''){ // var itemOffset = $('#22').offset().top; // console.log(itemOffset); // $('.profile-container').css('top',itemOffset+'px'); // $('.profile-container').fadeIn('slow'); // $('.profile-content').css('opacity',0); // loadProfile(profile_id); // $('html, body').animate({ // scrollTop: itemOffset // }, 1000); // } }); //funciton to easily retrieve URL Params function getParameterByName(name) { name = name.replace(/[\[]/, "\\[").replace(/[\]]/, "\\]"); var regex = new RegExp("[\\?&]" + name + "=([^&#]*)"), results = regex.exec(location.search); return results === null ? "" : decodeURIComponent(results[1].replace(/\+/g, " ")); } //Google Map Initilize function initialize() { var center = new google.maps.LatLng(40.00, -75.2); var options = { 'zoom': 3, 'center': center, 'mapTypeId': google.maps.MapTypeId.ROADMAP }; var map = new google.maps.Map(document.getElementById("map"), options); var markers = []; var profiles = {}; var infowindow = new google.maps.InfoWindow({ content: "..loading" }); var templateUrl = profile_json.templateUrl; $.getJSON(templateUrl+'/data/user-profiles.json', function(data){ for (var i = 0; i < data.length; i++) { var address = data[i]['City'] + ', ' + data[i]["State (USA only)"] + ' ' + data[i]["Country"]; var contentString = '<img src="'+data[i]["Profile Picture"] +'" style="width: 100px; display: inline; float: left; padding-right
loadProfile
identifier_name
global-network.js
}); //Clost all filter sections $('.close-filter').click(function(e) { e.preventDefault(); $('.network-filter').hide(); //Hide all dropdowns }); //Live Search Global Network $("input#network-search").keyup(function(){ // Retrieve the input field text and reset the count to zero var filter = $(this).val(); // Loop through grid items $(".network-grid .network-grid-item").each(function(){ // If the list item does not contain the text phrase fade it out if ($(this).text().search(new RegExp(filter, "i")) < 0) { $(this).fadeOut(); // Show the list item if the phrase matches and increase the count by 1 } else { $(this).fadeIn(); } }); }); //Show Appropriate topic List on title click $('.topic-list li').click(function(event) { var section = $(this).attr('data-id'); $('form#Filters fieldset').hide(); $('form#Filters fieldset[name=' + section+']').show(); }); //Profile Popup $('.close-profile a').click(function(e) { e.preventDefault(); $('.profile-container').fadeOut('slow'); }); $('.network-grid-item').click(function(e) { var itemOffset = $(this).offset().top; $('.profile-container').css('top',itemOffset+'px'); var profile_id = $(this).parent().attr('data-id'); $('.profile-container').fadeIn('slow'); $('.profile-content').css('opacity',0); loadProfile(profile_id); }); //AJAX in profile data from json file function loadProfile(profile_id) { var is_loading = false; if (is_loading == false) { is_loading = true; $('#loader').show(); var data = { action: 'getSingleProfile', profile_id: profile_id }; jQuery.post(ajaxurl, data, function(response) { // append: add the new statments to the existing data if(response != 0){ $('.profile-content').empty(); console.log(response); $('.profile-content').append(response); $('.profile-content').css('opacity',1); is_loading = false; } else{ $('#loader').hide(); is_loading = false; } }); } } // ------------------------------------------------------ // ------------------ Filters---------------------------- // ------------------------------------------------------ //Update active filters and display function GetActiveString(){ var active = []; $('.checkbox input:checked').each(function() { active.push($(this).attr('data-filter')); }); var filtered = active.join(", "); if(filtered !== '') $('.filtered-list').html(filtered); else{ $('.filtered-list').html('All'); } } //========= PLUGIN ADD ON FOR MIXITUP ===================== // To keep our code clean and modular, all custom functionality will be contained inside a single object literal called "checkboxFilter". var checkboxFilter = { // Declare any variables we will need as properties of the object $filters: null, $reset: null, groups: [], outputArray: [], outputString: '', // The "init" method will run on document ready and cache any jQuery objects we will need. init: function(){ var self = this; // As a best practice, in each method we will asign "this" to the variable "self" so that it remains scope-agnostic. We will use it to refer to the parent "checkboxFilter" object so that we can share methods and properties between all parts of the object. self.$filters = $('#Filters'); self.$reset = $('.reset'); self.$container = $('#network-grid'); self.$filters.find('fieldset').each(function(){ self.groups.push({ $inputs: $(this).find('input'), active: [], tracker: false }); }); self.bindHandlers(); }, // The "bindHandlers" method will listen for whenever a form value changes. bindHandlers: function(){ var self = this; self.$filters.on('change', function(){ self.parseFilters(); }); self.$reset.on('click', function(e){ e.preventDefault(); self.$filters[0].reset(); self.parseFilters(); }); }, // The parseFilters method checks which filters are active in each group: parseFilters: function(){ var self = this; // loop through each filter group and add active filters to arrays for(var i = 0, group; group = self.groups[i]; i++){ group.active = []; // reset arrays group.$inputs.each(function(){ $(this).is(':checked') && group.active.push(this.value); }); group.active.length && (group.tracker = 0); } self.concatenate(); }, // The "concatenate" method will crawl through each group, concatenating filters as desired: concatenate: function(){ var self = this, cache = '', crawled = false, checkTrackers = function(){ var done = 0; for(var i = 0, group; group = self.groups[i]; i++){ (group.tracker === false) && done++; } return (done < self.groups.length); }, crawl = function(){ for(var i = 0, group; group = self.groups[i]; i++){ group.active[group.tracker] && (cache += group.active[group.tracker]); if(i === self.groups.length - 1){ self.outputArray.push(cache); cache = ''; updateTrackers(); } } }, updateTrackers = function(){ for(var i = self.groups.length - 1; i > -1; i--){ var group = self.groups[i]; if(group.active[group.tracker + 1]){ group.tracker++; break; } else if(i > 0){ group.tracker && (group.tracker = 0); } else { crawled = true; } } }; self.outputArray = []; // reset output array do{ crawl(); } while(!crawled && checkTrackers()); self.outputString = self.outputArray.join(); // If the output string is empty, show all rather than none: !self.outputString.length && (self.outputString = 'all'); //console.log(self.outputString); // ^ we can check the console here to take a look at the filter string that is produced // Send the output string to MixItUp via the 'filter' method: if(self.$container.mixItUp('isLoaded')){ self.$container.mixItUp('filter', self.outputString); } } }; //=======END FILTER BY CHECKBOX PLUGIN jQuery(document).ready(function($){ // Initialize checkboxFilter code checkboxFilter.init(); // Instantiate MixItUp $('#network-grid').mixItUp({ controls: { enable: false // we won't be needing these }, animation: { easing: 'cubic-bezier(0.86, 0, 0.07, 1)', duration: 600 }, callbacks: { onMixEnd: GetActiveString } }); //URL FILTERS var filter = getParameterByName('filter'); //check if url filter is present - initialize mixitup and prefilter if(filter !==''){ $("input[value='" + filter + "']").prop('checked', true); $('#network-grid').mixItUp('filter', filter, GetActiveString); $(".network-filter-status").slideDown(); } // //Show profile from url // var profile_id = getParameterByName('profile'); // if(profile_id !==''){ // var itemOffset = $('#22').offset().top; // console.log(itemOffset); // $('.profile-container').css('top',itemOffset+'px'); // $('.profile-container').fadeIn('slow'); // $('.profile-content').css('opacity',0); // loadProfile(profile_id); // $('html, body').animate({ // scrollTop: itemOffset // }, 1000); // } }); //funciton to easily retrieve URL Params function getParameterByName(name) { name = name.replace(/[\[]/, "\\[").replace(/[\]]/, "\\]"); var regex = new RegExp("[\\?&]" + name + "=([^&#]*)"), results = regex.exec(location.search); return results === null ? "" : decodeURIComponent(results[1].replace(/\+/g, " ")); } //Google Map Initilize function initialize() { var center = new google.maps.LatLng(40.00, -75.2); var options = { 'zoom': 3, 'center': center, 'mapTypeId': google.maps.MapTypeId.ROADMAP }; var map = new google.maps.Map(document.getElementById("map"), options); var markers = []; var profiles = {}; var infowindow = new google.maps.InfoWindow({ content: "..loading" }); var templateUrl = profile_json.templateUrl; $.getJSON(templateUrl+'/data/user-profiles.json', function(data){ for (var i = 0; i < data.length; i++) { var address = data[i]['City'] + ', ' + data[i]["State (USA only)"] + ' ' + data[i]["Country"]; var contentString = '<img src="'+data[i]["Profile
{ initialize(); }
conditional_block
handler.go
[string]interface{} json.Unmarshal(bodyBytes, &response) dict, _ := json.Marshal(response["response"]) var bots []Bot json.Unmarshal(dict, &bots) log.Println(bots) return bots } func msgHandler() gin.HandlerFunc { return func(c *gin.Context) { var botResponse msg if c.BindJSON(&botResponse) == nil { bots := getBots() fields := strings.Fields(botResponse.Text) groupId := botResponse.GroupId botId := "" for _, bot := range bots { if bot.GroupId == groupId { botId = bot.BotId break } } log.Println(botId) log.Println(fields) if len(fields) == 0 { c.JSON(http.StatusOK, nil) return } if fields[0] == "!help" { if botResponse.GroupId == os.Getenv("htown") { sendPost("I am your chat bot.\nType `!coin` to flip a coin.\nType `!smack @someone` to talk trash.\nType `!suckup @someone` to show admiration.\nType `!stats player season week` for stats.\nType `!draft` for draft info.\nType `!standings` for league standings.", botId) } else { sendPost("I am your chat bot.\nType `!coin` to flip a coin.\nType `!smack @someone` to talk trash.\nType `!suckup @someone` to show admiration.\nType `!stats player season week` for stats.", botId) } } else if fields[0] == "!coin" { result := "Your coin landed on HEADS." if rand.Intn(2) == 1 { result = "Your coin landed on TAILS." } sendPost(result, botId) } else if fields[0] == "!draft" { message := os.Getenv("draft") sendPost(message, botId) } else if fields[0] == "!smack" { groupid := botResponse.GroupId league := getLeague(groupid) members := league.Response["members"] memberNum := getMemberNum(fields, members) if memberNum == -1 { memberNum = rand.Intn(len(members)) } nickname := members[memberNum]["nickname"] if nickname == os.Getenv("me") { sendCompliment(nickname, botId) return } sendInsult(nickname, botId) } else if fields[0] == "!suckup" { groupid := botResponse.GroupId league := getLeague(groupid) members := league.Response["members"] memberNum := getMemberNum(fields, members) if memberNum == -1 { memberNum = rand.Intn(len(members)) } nickname := members[memberNum]["nickname"] sendCompliment(nickname, botId) } else if fields[0] == "!standings" { sendStandings(botId) } else if fields[0] == "!stats" { if len(fields) <= 3 || len(fields) >= 6 { c.JSON(http.StatusOK, nil) return } name := fields[1] + " " + fields[2] season := fields[3] week := "" if len(fields) == 5 { week = fields[4] } player, err := queryPlayer(name) if err != nil { return } if player.Name == "" { sendPost("Player Not Found.", botId) return } stats := getStats(season, week) stat := stats[player.Id] log.Println(stat) log.Println(player.Name) pts := fmt.Sprintf("%.1f", stat["pts_half_ppr"]) message := player.Name + ": " + pts + " pts\n" if player.Position == "WR" || player.Position == "TE" { rec_tgt := fmt.Sprintf("%.0f", stat["rec_tgt"]) rec := fmt.Sprintf("%.0f", stat["rec"]) rec_yd := fmt.Sprintf("%.0f", stat["rec_yd"]) rec_td := fmt.Sprintf("%.0f", stat["rec_td"]) message = message + "- Targets: " + rec_tgt + "\n" message = message + "- Catches: " + rec + "\n" message = message + "- Yards: " + rec_yd + "\n" message = message + "- TDs: " + rec_td + "\n" sendPost(message, botId) } else if player.Position == "RB" { rush_att := fmt.Sprintf("%.0f", stat["rush_att"]) rush_yd := fmt.Sprintf("%.0f", stat["rush_yd"]) rush_td := fmt.Sprintf("%.0f", stat["rush_td"]) rec_tgt := fmt.Sprintf("%.0f", stat["rec_tgt"]) rec := fmt.Sprintf("%.0f", stat["rec"]) rec_yd := fmt.Sprintf("%.0f", stat["rec_yd"]) rec_td := fmt.Sprintf("%.0f", stat["rec_td"]) message = message + "- Rush Att: " + rush_att + "\n" message = message + "- Rush Yards: " + rush_yd + "\n" message = message + "- Rush TDs: " + rush_td + "\n" message = message + "- Targets: " + rec_tgt + "\n" message = message + "- Catches: " + rec + "\n" message = message + "- Rec Yards: " + rec_yd + "\n" message = message + "- Rec TDs: " + rec_td + "\n" sendPost(message, botId) } else if player.Position == "QB" { sendPost(player.Name+": "+pts+" pts", botId) pass_yd := fmt.Sprintf("%.0f", stat["pass_yd"]) pass_td := fmt.Sprintf("%.0f", stat["pass_td"]) pass_int := fmt.Sprintf("%.0f", stat["pass_int"]) rush_yd := fmt.Sprintf("%.0f", stat["rush_yd"]) rush_td := fmt.Sprintf("%.0f", stat["rush_td"]) fum_lost := fmt.Sprintf("%.0f", stat["fum_lost"]) message = message + "- Passing Yards: " + pass_yd + "\n" message = message + "- Passing TDs: " + pass_td + "\n" message = message + "- Passing INTs: " + pass_int + "\n" message = message + "- Rush Yards: " + rush_yd + "\n" message = message + "- Rush TDs: " + rush_td + "\n" message = message + "- Fumbles: " + fum_lost + "\n" } else { sendPost(player.Name+": "+pts+" pts", botId) } } else if botResponse.Text != "^this statement will not be acknowledged" { sendPost("^this statement will not be acknowledged", botId) } c.JSON(http.StatusOK, nil) } } } func getLeague(groupid string) League { url := "https://api.groupme.com/v3/groups/" + groupid + "?token=" url = url + os.Getenv("token") resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) var league League json.Unmarshal(bodyBytes, &league) return league } func getRosters(league string) []map[string]interface{} { url := "https://api.sleeper.app/v1/league/" + league + "/rosters" resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) var rosters []map[string]interface{} json.Unmarshal(bodyBytes, &rosters) return rosters } func getUsers(league string) []map[string]interface{}
func getStats(season string, week string) map[int]map[string]float32 { url := "https://api.sleeper.app/v1/stats/nfl/regular/" + season + "/" + week resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) var stats map[int]map[string]float32 json.Unmarshal(bodyBytes, &stats) return stats } func getMemberNum(fields []string, members []map[string]string) int { memberNum := -1 for i := 0; i < len(members); i++ { if len(fields) == 1 { break } else if len(fields) == 2 { if fields[1] == "@"+members[i]["nickname"] { memberNum = i break } } else if fields[1]+" "+fields[2] == "@"+members[i]["nickname"] { memberNum = i break
{ url := "https://api.sleeper.app/v1/league/" + league + "/users" resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) var users []map[string]interface{} json.Unmarshal(bodyBytes, &users) return users }
identifier_body
handler.go
") { sendPost("I am your chat bot.\nType `!coin` to flip a coin.\nType `!smack @someone` to talk trash.\nType `!suckup @someone` to show admiration.\nType `!stats player season week` for stats.\nType `!draft` for draft info.\nType `!standings` for league standings.", botId) } else { sendPost("I am your chat bot.\nType `!coin` to flip a coin.\nType `!smack @someone` to talk trash.\nType `!suckup @someone` to show admiration.\nType `!stats player season week` for stats.", botId) } } else if fields[0] == "!coin" { result := "Your coin landed on HEADS." if rand.Intn(2) == 1 { result = "Your coin landed on TAILS." } sendPost(result, botId) } else if fields[0] == "!draft" { message := os.Getenv("draft") sendPost(message, botId) } else if fields[0] == "!smack" { groupid := botResponse.GroupId league := getLeague(groupid) members := league.Response["members"] memberNum := getMemberNum(fields, members) if memberNum == -1 { memberNum = rand.Intn(len(members)) } nickname := members[memberNum]["nickname"] if nickname == os.Getenv("me") { sendCompliment(nickname, botId) return } sendInsult(nickname, botId) } else if fields[0] == "!suckup" { groupid := botResponse.GroupId league := getLeague(groupid) members := league.Response["members"] memberNum := getMemberNum(fields, members) if memberNum == -1 { memberNum = rand.Intn(len(members)) } nickname := members[memberNum]["nickname"] sendCompliment(nickname, botId) } else if fields[0] == "!standings" { sendStandings(botId) } else if fields[0] == "!stats" { if len(fields) <= 3 || len(fields) >= 6 { c.JSON(http.StatusOK, nil) return } name := fields[1] + " " + fields[2] season := fields[3] week := "" if len(fields) == 5 { week = fields[4] } player, err := queryPlayer(name) if err != nil { return } if player.Name == "" { sendPost("Player Not Found.", botId) return } stats := getStats(season, week) stat := stats[player.Id] log.Println(stat) log.Println(player.Name) pts := fmt.Sprintf("%.1f", stat["pts_half_ppr"]) message := player.Name + ": " + pts + " pts\n" if player.Position == "WR" || player.Position == "TE" { rec_tgt := fmt.Sprintf("%.0f", stat["rec_tgt"]) rec := fmt.Sprintf("%.0f", stat["rec"]) rec_yd := fmt.Sprintf("%.0f", stat["rec_yd"]) rec_td := fmt.Sprintf("%.0f", stat["rec_td"]) message = message + "- Targets: " + rec_tgt + "\n" message = message + "- Catches: " + rec + "\n" message = message + "- Yards: " + rec_yd + "\n" message = message + "- TDs: " + rec_td + "\n" sendPost(message, botId) } else if player.Position == "RB" { rush_att := fmt.Sprintf("%.0f", stat["rush_att"]) rush_yd := fmt.Sprintf("%.0f", stat["rush_yd"]) rush_td := fmt.Sprintf("%.0f", stat["rush_td"]) rec_tgt := fmt.Sprintf("%.0f", stat["rec_tgt"]) rec := fmt.Sprintf("%.0f", stat["rec"]) rec_yd := fmt.Sprintf("%.0f", stat["rec_yd"]) rec_td := fmt.Sprintf("%.0f", stat["rec_td"]) message = message + "- Rush Att: " + rush_att + "\n" message = message + "- Rush Yards: " + rush_yd + "\n" message = message + "- Rush TDs: " + rush_td + "\n" message = message + "- Targets: " + rec_tgt + "\n" message = message + "- Catches: " + rec + "\n" message = message + "- Rec Yards: " + rec_yd + "\n" message = message + "- Rec TDs: " + rec_td + "\n" sendPost(message, botId) } else if player.Position == "QB" { sendPost(player.Name+": "+pts+" pts", botId) pass_yd := fmt.Sprintf("%.0f", stat["pass_yd"]) pass_td := fmt.Sprintf("%.0f", stat["pass_td"]) pass_int := fmt.Sprintf("%.0f", stat["pass_int"]) rush_yd := fmt.Sprintf("%.0f", stat["rush_yd"]) rush_td := fmt.Sprintf("%.0f", stat["rush_td"]) fum_lost := fmt.Sprintf("%.0f", stat["fum_lost"]) message = message + "- Passing Yards: " + pass_yd + "\n" message = message + "- Passing TDs: " + pass_td + "\n" message = message + "- Passing INTs: " + pass_int + "\n" message = message + "- Rush Yards: " + rush_yd + "\n" message = message + "- Rush TDs: " + rush_td + "\n" message = message + "- Fumbles: " + fum_lost + "\n" } else { sendPost(player.Name+": "+pts+" pts", botId) } } else if botResponse.Text != "^this statement will not be acknowledged" { sendPost("^this statement will not be acknowledged", botId) } c.JSON(http.StatusOK, nil) } } } func getLeague(groupid string) League { url := "https://api.groupme.com/v3/groups/" + groupid + "?token=" url = url + os.Getenv("token") resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) var league League json.Unmarshal(bodyBytes, &league) return league } func getRosters(league string) []map[string]interface{} { url := "https://api.sleeper.app/v1/league/" + league + "/rosters" resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) var rosters []map[string]interface{} json.Unmarshal(bodyBytes, &rosters) return rosters } func getUsers(league string) []map[string]interface{} { url := "https://api.sleeper.app/v1/league/" + league + "/users" resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) var users []map[string]interface{} json.Unmarshal(bodyBytes, &users) return users } func getStats(season string, week string) map[int]map[string]float32 { url := "https://api.sleeper.app/v1/stats/nfl/regular/" + season + "/" + week resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) var stats map[int]map[string]float32 json.Unmarshal(bodyBytes, &stats) return stats } func getMemberNum(fields []string, members []map[string]string) int { memberNum := -1 for i := 0; i < len(members); i++ { if len(fields) == 1 { break } else if len(fields) == 2 { if fields[1] == "@"+members[i]["nickname"] { memberNum = i break } } else if fields[1]+" "+fields[2] == "@"+members[i]["nickname"] { memberNum = i break } } return memberNum } func sendCompliment(nickname string, botId string) { url := "https://complimentr.com/api" log.Println(url) resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) var compliment map[string]string json.Unmarshal(bodyBytes, &compliment) result := "@" + nickname + ", " + compliment["compliment"] sendPost(result, botId) } func sendInsult(nickname string, botId string) { nickname = strings.Replace(nickname, " ", "%20", 1) url := "https://insult.mattbas.org/api/insult?who=" + nickname log.Println(url) resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) result := "@" + string(bodyBytes) sendPost(result, botId) } func
sendStandings
identifier_name
handler.go
[string]interface{} json.Unmarshal(bodyBytes, &response) dict, _ := json.Marshal(response["response"]) var bots []Bot json.Unmarshal(dict, &bots) log.Println(bots) return bots } func msgHandler() gin.HandlerFunc { return func(c *gin.Context) { var botResponse msg if c.BindJSON(&botResponse) == nil { bots := getBots() fields := strings.Fields(botResponse.Text) groupId := botResponse.GroupId botId := "" for _, bot := range bots { if bot.GroupId == groupId { botId = bot.BotId break } } log.Println(botId) log.Println(fields) if len(fields) == 0 { c.JSON(http.StatusOK, nil) return } if fields[0] == "!help" { if botResponse.GroupId == os.Getenv("htown") { sendPost("I am your chat bot.\nType `!coin` to flip a coin.\nType `!smack @someone` to talk trash.\nType `!suckup @someone` to show admiration.\nType `!stats player season week` for stats.\nType `!draft` for draft info.\nType `!standings` for league standings.", botId) } else { sendPost("I am your chat bot.\nType `!coin` to flip a coin.\nType `!smack @someone` to talk trash.\nType `!suckup @someone` to show admiration.\nType `!stats player season week` for stats.", botId) } } else if fields[0] == "!coin" { result := "Your coin landed on HEADS." if rand.Intn(2) == 1 { result = "Your coin landed on TAILS." } sendPost(result, botId) } else if fields[0] == "!draft" { message := os.Getenv("draft") sendPost(message, botId) } else if fields[0] == "!smack" { groupid := botResponse.GroupId league := getLeague(groupid) members := league.Response["members"] memberNum := getMemberNum(fields, members) if memberNum == -1 { memberNum = rand.Intn(len(members)) } nickname := members[memberNum]["nickname"] if nickname == os.Getenv("me") { sendCompliment(nickname, botId) return } sendInsult(nickname, botId) } else if fields[0] == "!suckup" { groupid := botResponse.GroupId league := getLeague(groupid) members := league.Response["members"] memberNum := getMemberNum(fields, members) if memberNum == -1 { memberNum = rand.Intn(len(members)) } nickname := members[memberNum]["nickname"] sendCompliment(nickname, botId) } else if fields[0] == "!standings"
else if fields[0] == "!stats" { if len(fields) <= 3 || len(fields) >= 6 { c.JSON(http.StatusOK, nil) return } name := fields[1] + " " + fields[2] season := fields[3] week := "" if len(fields) == 5 { week = fields[4] } player, err := queryPlayer(name) if err != nil { return } if player.Name == "" { sendPost("Player Not Found.", botId) return } stats := getStats(season, week) stat := stats[player.Id] log.Println(stat) log.Println(player.Name) pts := fmt.Sprintf("%.1f", stat["pts_half_ppr"]) message := player.Name + ": " + pts + " pts\n" if player.Position == "WR" || player.Position == "TE" { rec_tgt := fmt.Sprintf("%.0f", stat["rec_tgt"]) rec := fmt.Sprintf("%.0f", stat["rec"]) rec_yd := fmt.Sprintf("%.0f", stat["rec_yd"]) rec_td := fmt.Sprintf("%.0f", stat["rec_td"]) message = message + "- Targets: " + rec_tgt + "\n" message = message + "- Catches: " + rec + "\n" message = message + "- Yards: " + rec_yd + "\n" message = message + "- TDs: " + rec_td + "\n" sendPost(message, botId) } else if player.Position == "RB" { rush_att := fmt.Sprintf("%.0f", stat["rush_att"]) rush_yd := fmt.Sprintf("%.0f", stat["rush_yd"]) rush_td := fmt.Sprintf("%.0f", stat["rush_td"]) rec_tgt := fmt.Sprintf("%.0f", stat["rec_tgt"]) rec := fmt.Sprintf("%.0f", stat["rec"]) rec_yd := fmt.Sprintf("%.0f", stat["rec_yd"]) rec_td := fmt.Sprintf("%.0f", stat["rec_td"]) message = message + "- Rush Att: " + rush_att + "\n" message = message + "- Rush Yards: " + rush_yd + "\n" message = message + "- Rush TDs: " + rush_td + "\n" message = message + "- Targets: " + rec_tgt + "\n" message = message + "- Catches: " + rec + "\n" message = message + "- Rec Yards: " + rec_yd + "\n" message = message + "- Rec TDs: " + rec_td + "\n" sendPost(message, botId) } else if player.Position == "QB" { sendPost(player.Name+": "+pts+" pts", botId) pass_yd := fmt.Sprintf("%.0f", stat["pass_yd"]) pass_td := fmt.Sprintf("%.0f", stat["pass_td"]) pass_int := fmt.Sprintf("%.0f", stat["pass_int"]) rush_yd := fmt.Sprintf("%.0f", stat["rush_yd"]) rush_td := fmt.Sprintf("%.0f", stat["rush_td"]) fum_lost := fmt.Sprintf("%.0f", stat["fum_lost"]) message = message + "- Passing Yards: " + pass_yd + "\n" message = message + "- Passing TDs: " + pass_td + "\n" message = message + "- Passing INTs: " + pass_int + "\n" message = message + "- Rush Yards: " + rush_yd + "\n" message = message + "- Rush TDs: " + rush_td + "\n" message = message + "- Fumbles: " + fum_lost + "\n" } else { sendPost(player.Name+": "+pts+" pts", botId) } } else if botResponse.Text != "^this statement will not be acknowledged" { sendPost("^this statement will not be acknowledged", botId) } c.JSON(http.StatusOK, nil) } } } func getLeague(groupid string) League { url := "https://api.groupme.com/v3/groups/" + groupid + "?token=" url = url + os.Getenv("token") resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) var league League json.Unmarshal(bodyBytes, &league) return league } func getRosters(league string) []map[string]interface{} { url := "https://api.sleeper.app/v1/league/" + league + "/rosters" resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) var rosters []map[string]interface{} json.Unmarshal(bodyBytes, &rosters) return rosters } func getUsers(league string) []map[string]interface{} { url := "https://api.sleeper.app/v1/league/" + league + "/users" resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) var users []map[string]interface{} json.Unmarshal(bodyBytes, &users) return users } func getStats(season string, week string) map[int]map[string]float32 { url := "https://api.sleeper.app/v1/stats/nfl/regular/" + season + "/" + week resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) var stats map[int]map[string]float32 json.Unmarshal(bodyBytes, &stats) return stats } func getMemberNum(fields []string, members []map[string]string) int { memberNum := -1 for i := 0; i < len(members); i++ { if len(fields) == 1 { break } else if len(fields) == 2 { if fields[1] == "@"+members[i]["nickname"] { memberNum = i break } } else if fields[1]+" "+fields[2] == "@"+members[i]["nickname"] { memberNum = i break
{ sendStandings(botId) }
conditional_block
handler.go
map[string]interface{} json.Unmarshal(bodyBytes, &response) dict, _ := json.Marshal(response["response"]) var bots []Bot json.Unmarshal(dict, &bots) log.Println(bots) return bots } func msgHandler() gin.HandlerFunc { return func(c *gin.Context) { var botResponse msg if c.BindJSON(&botResponse) == nil { bots := getBots() fields := strings.Fields(botResponse.Text) groupId := botResponse.GroupId botId := "" for _, bot := range bots { if bot.GroupId == groupId { botId = bot.BotId break
} log.Println(botId) log.Println(fields) if len(fields) == 0 { c.JSON(http.StatusOK, nil) return } if fields[0] == "!help" { if botResponse.GroupId == os.Getenv("htown") { sendPost("I am your chat bot.\nType `!coin` to flip a coin.\nType `!smack @someone` to talk trash.\nType `!suckup @someone` to show admiration.\nType `!stats player season week` for stats.\nType `!draft` for draft info.\nType `!standings` for league standings.", botId) } else { sendPost("I am your chat bot.\nType `!coin` to flip a coin.\nType `!smack @someone` to talk trash.\nType `!suckup @someone` to show admiration.\nType `!stats player season week` for stats.", botId) } } else if fields[0] == "!coin" { result := "Your coin landed on HEADS." if rand.Intn(2) == 1 { result = "Your coin landed on TAILS." } sendPost(result, botId) } else if fields[0] == "!draft" { message := os.Getenv("draft") sendPost(message, botId) } else if fields[0] == "!smack" { groupid := botResponse.GroupId league := getLeague(groupid) members := league.Response["members"] memberNum := getMemberNum(fields, members) if memberNum == -1 { memberNum = rand.Intn(len(members)) } nickname := members[memberNum]["nickname"] if nickname == os.Getenv("me") { sendCompliment(nickname, botId) return } sendInsult(nickname, botId) } else if fields[0] == "!suckup" { groupid := botResponse.GroupId league := getLeague(groupid) members := league.Response["members"] memberNum := getMemberNum(fields, members) if memberNum == -1 { memberNum = rand.Intn(len(members)) } nickname := members[memberNum]["nickname"] sendCompliment(nickname, botId) } else if fields[0] == "!standings" { sendStandings(botId) } else if fields[0] == "!stats" { if len(fields) <= 3 || len(fields) >= 6 { c.JSON(http.StatusOK, nil) return } name := fields[1] + " " + fields[2] season := fields[3] week := "" if len(fields) == 5 { week = fields[4] } player, err := queryPlayer(name) if err != nil { return } if player.Name == "" { sendPost("Player Not Found.", botId) return } stats := getStats(season, week) stat := stats[player.Id] log.Println(stat) log.Println(player.Name) pts := fmt.Sprintf("%.1f", stat["pts_half_ppr"]) message := player.Name + ": " + pts + " pts\n" if player.Position == "WR" || player.Position == "TE" { rec_tgt := fmt.Sprintf("%.0f", stat["rec_tgt"]) rec := fmt.Sprintf("%.0f", stat["rec"]) rec_yd := fmt.Sprintf("%.0f", stat["rec_yd"]) rec_td := fmt.Sprintf("%.0f", stat["rec_td"]) message = message + "- Targets: " + rec_tgt + "\n" message = message + "- Catches: " + rec + "\n" message = message + "- Yards: " + rec_yd + "\n" message = message + "- TDs: " + rec_td + "\n" sendPost(message, botId) } else if player.Position == "RB" { rush_att := fmt.Sprintf("%.0f", stat["rush_att"]) rush_yd := fmt.Sprintf("%.0f", stat["rush_yd"]) rush_td := fmt.Sprintf("%.0f", stat["rush_td"]) rec_tgt := fmt.Sprintf("%.0f", stat["rec_tgt"]) rec := fmt.Sprintf("%.0f", stat["rec"]) rec_yd := fmt.Sprintf("%.0f", stat["rec_yd"]) rec_td := fmt.Sprintf("%.0f", stat["rec_td"]) message = message + "- Rush Att: " + rush_att + "\n" message = message + "- Rush Yards: " + rush_yd + "\n" message = message + "- Rush TDs: " + rush_td + "\n" message = message + "- Targets: " + rec_tgt + "\n" message = message + "- Catches: " + rec + "\n" message = message + "- Rec Yards: " + rec_yd + "\n" message = message + "- Rec TDs: " + rec_td + "\n" sendPost(message, botId) } else if player.Position == "QB" { sendPost(player.Name+": "+pts+" pts", botId) pass_yd := fmt.Sprintf("%.0f", stat["pass_yd"]) pass_td := fmt.Sprintf("%.0f", stat["pass_td"]) pass_int := fmt.Sprintf("%.0f", stat["pass_int"]) rush_yd := fmt.Sprintf("%.0f", stat["rush_yd"]) rush_td := fmt.Sprintf("%.0f", stat["rush_td"]) fum_lost := fmt.Sprintf("%.0f", stat["fum_lost"]) message = message + "- Passing Yards: " + pass_yd + "\n" message = message + "- Passing TDs: " + pass_td + "\n" message = message + "- Passing INTs: " + pass_int + "\n" message = message + "- Rush Yards: " + rush_yd + "\n" message = message + "- Rush TDs: " + rush_td + "\n" message = message + "- Fumbles: " + fum_lost + "\n" } else { sendPost(player.Name+": "+pts+" pts", botId) } } else if botResponse.Text != "^this statement will not be acknowledged" { sendPost("^this statement will not be acknowledged", botId) } c.JSON(http.StatusOK, nil) } } } func getLeague(groupid string) League { url := "https://api.groupme.com/v3/groups/" + groupid + "?token=" url = url + os.Getenv("token") resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) var league League json.Unmarshal(bodyBytes, &league) return league } func getRosters(league string) []map[string]interface{} { url := "https://api.sleeper.app/v1/league/" + league + "/rosters" resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) var rosters []map[string]interface{} json.Unmarshal(bodyBytes, &rosters) return rosters } func getUsers(league string) []map[string]interface{} { url := "https://api.sleeper.app/v1/league/" + league + "/users" resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) var users []map[string]interface{} json.Unmarshal(bodyBytes, &users) return users } func getStats(season string, week string) map[int]map[string]float32 { url := "https://api.sleeper.app/v1/stats/nfl/regular/" + season + "/" + week resp, _ := http.Get(url) defer resp.Body.Close() bodyBytes, _ := ioutil.ReadAll(resp.Body) var stats map[int]map[string]float32 json.Unmarshal(bodyBytes, &stats) return stats } func getMemberNum(fields []string, members []map[string]string) int { memberNum := -1 for i := 0; i < len(members); i++ { if len(fields) == 1 { break } else if len(fields) == 2 { if fields[1] == "@"+members[i]["nickname"] { memberNum = i break } } else if fields[1]+" "+fields[2] == "@"+members[i]["nickname"] { memberNum = i break
}
random_line_split
install.rs
("S") .long(OPT_SUFFIX) .help("(unimplemented) override the usual backup suffix") .value_name("SUFFIX") ) .arg( // TODO implement flag Arg::with_name(OPT_TARGET_DIRECTORY) .short("t") .long(OPT_TARGET_DIRECTORY) .help("(unimplemented) move all SOURCE arguments into DIRECTORY") .value_name("DIRECTORY") ) .arg( // TODO implement flag Arg::with_name(OPT_NO_TARGET_DIRECTORY) .short("T") .long(OPT_NO_TARGET_DIRECTORY) .help("(unimplemented) treat DEST as a normal file") ) .arg( Arg::with_name(OPT_VERBOSE) .short("v") .long(OPT_VERBOSE) .help("explain what is being done") ) .arg( // TODO implement flag Arg::with_name(OPT_PRESERVE_CONTEXT) .short("P") .long(OPT_PRESERVE_CONTEXT) .help("(unimplemented) preserve security context") ) .arg( // TODO implement flag Arg::with_name(OPT_CONTEXT) .short("Z") .long(OPT_CONTEXT) .help("(unimplemented) set security context of files and directories") .value_name("CONTEXT") ) .arg(Arg::with_name(ARG_FILES).multiple(true).takes_value(true)) .get_matches_from(args); let paths: Vec<String> = matches .values_of(ARG_FILES) .map(|v| v.map(ToString::to_string).collect()) .unwrap_or_default(); if let Err(s) = check_unimplemented(&matches) { show_error!("Unimplemented feature: {}", s); return 2; } let behavior = match behavior(&matches) { Ok(x) => x, Err(ret) => { return ret; } }; match behavior.main_function { MainFunction::Directory => directory(paths, behavior), MainFunction::Standard => standard(paths, behavior), } } /// Check for unimplemented command line arguments. /// /// Either return the degenerate Ok value, or an Err with string. /// /// # Errors /// /// Error datum is a string of the unimplemented argument. /// /// fn check_unimplemented<'a>(matches: &ArgMatches) -> Result<(), &'a str> { if matches.is_present(OPT_BACKUP) { Err("--backup") } else if matches.is_present(OPT_BACKUP_2) { Err("-b") } else if matches.is_present(OPT_COMPARE) { Err("--compare, -C") } else if matches.is_present(OPT_CREATED) { Err("-D") } else if matches.is_present(OPT_GROUP) { Err("--group, -g") } else if matches.is_present(OPT_OWNER) { Err("--owner, -o") } else if matches.is_present(OPT_PRESERVE_TIMESTAMPS) { Err("--preserve-timestamps, -p") } else if matches.is_present(OPT_STRIP) { Err("--strip, -s") } else if matches.is_present(OPT_STRIP_PROGRAM) { Err("--strip-program") } else if matches.is_present(OPT_SUFFIX) { Err("--suffix, -S") } else if matches.is_present(OPT_TARGET_DIRECTORY) { Err("--target-directory, -t") } else if matches.is_present(OPT_NO_TARGET_DIRECTORY) { Err("--no-target-directory, -T") } else if matches.is_present(OPT_PRESERVE_CONTEXT) { Err("--preserve-context, -P") } else if matches.is_present(OPT_CONTEXT) { Err("--context, -Z") } else { Ok(()) } } /// Determine behavior, given command line arguments. /// /// If successful, returns a filled-out Behavior struct. /// /// # Errors /// /// In event of failure, returns an integer intended as a program return code. /// fn behavior(matches: &ArgMatches) -> Result<Behavior, i32> { let main_function = if matches.is_present("directory") { MainFunction::Directory } else { MainFunction::Standard }; let considering_dir: bool = MainFunction::Directory == main_function; let specified_mode: Option<u32> = if matches.is_present(OPT_MODE) { match matches.value_of(OPT_MODE) { Some(x) => match mode::parse(&x[..], considering_dir) { Ok(y) => Some(y), Err(err) => { show_error!("Invalid mode string: {}", err); return Err(1); } }, None => { show_error!( "option '--mode' requires an argument\n \ Try '{} --help' for more information.", executable!() ); return Err(1); } } } else { None }; let backup_suffix = if matches.is_present(OPT_SUFFIX) { match matches.value_of(OPT_SUFFIX) { Some(x) => x, None => { show_error!( "option '--suffix' requires an argument\n\ Try '{} --help' for more information.", executable!() ); return Err(1); } } } else { "~" }; Ok(Behavior { main_function, specified_mode, suffix: backup_suffix.to_string(), verbose: matches.is_present(OPT_VERBOSE), }) } /// Creates directories. /// /// GNU man pages describe this functionality as creating 'all components of /// the specified directories'. /// /// Returns an integer intended as a program return code. /// fn directory(paths: Vec<String>, b: Behavior) -> i32 { if paths.is_empty() { println!("{} with -d requires at least one argument.", executable!()); 1 } else { let mut all_successful = true; for directory in paths.iter() { let path = Path::new(directory); // if the path already exist, don't try to create it again if !path.exists() { if let Err(e) = fs::create_dir(directory) { show_info!("{}: {}", path.display(), e.to_string()); all_successful = false; } } if mode::chmod(&path, b.mode()).is_err() { all_successful = false; } if b.verbose { show_info!("created directory '{}'", path.display()); } } if all_successful { 0 } else { 1 } } } /// Test if the path is a new file path that can be /// created immediately fn is_new_file_path(path: &Path) -> bool { !path.exists() && (path.parent().map(Path::is_dir).unwrap_or(true) || path.parent().unwrap().to_string_lossy().is_empty()) // In case of a simple file } /// Perform an install, given a list of paths and behavior. /// /// Returns an integer intended as a program return code. /// fn standard(paths: Vec<String>, b: Behavior) -> i32 { if paths.len() < 2 { println!("{} requires at least 2 arguments.", executable!()); 1 } else { let sources = &paths[0..paths.len() - 1] .iter() .map(PathBuf::from) .collect::<Vec<_>>(); let target = Path::new(paths.last().unwrap()); if (target.is_file() || is_new_file_path(target)) && sources.len() == 1 { /* If the target already exist or directly creatable */ copy_file_to_file(&sources[0], &target.to_path_buf(), &b) } else { copy_files_into_dir(sources, &target.to_path_buf(), &b) } } } /// Copy some files into a directory. /// /// Prints verbose information and error messages. /// Returns an integer intended as a program return code. /// /// # Parameters /// /// _files_ must all exist as non-directories. /// _target_dir_ must be a directory. /// fn copy_files_into_dir(files: &[PathBuf], target_dir: &PathBuf, b: &Behavior) -> i32 { if !target_dir.is_dir() { show_error!("target ‘{}’ is not a directory", target_dir.display()); return 1; } let mut all_successful = true; for sourcepath in files.iter() { let targetpath = match sourcepath.as_os_str().to_str() { Some(name) => target_dir.join(name), None => { show_error!( "cannot stat ‘{}’: No such file or directory", sourcepath.display() ); all_successful = false; continue; } }; if copy(sourcepath, &targetpath, b).is_err() { all_successful = false; } } if all_successful { 0 } else { 1 } } /// Copy a file to another file. /// /// Prints verbose information and error messages. /// Returns an integer intended as a program return code. /// /// # Parameters /// /// _file_ must exist as a non-directory. /// _target_ must be a non-directory /// fn copy_file_to_file(file: &PathBuf, target: &PathBuf, b: &Behavior) -> i32 { if copy(file, &target, b).is_err() { 1 } else {
0 } } ///
conditional_block
install.rs
short("S") .long(OPT_SUFFIX) .help("(unimplemented) override the usual backup suffix") .value_name("SUFFIX") ) .arg( // TODO implement flag Arg::with_name(OPT_TARGET_DIRECTORY) .short("t") .long(OPT_TARGET_DIRECTORY) .help("(unimplemented) move all SOURCE arguments into DIRECTORY") .value_name("DIRECTORY") ) .arg( // TODO implement flag Arg::with_name(OPT_NO_TARGET_DIRECTORY) .short("T") .long(OPT_NO_TARGET_DIRECTORY) .help("(unimplemented) treat DEST as a normal file") ) .arg( Arg::with_name(OPT_VERBOSE) .short("v") .long(OPT_VERBOSE) .help("explain what is being done") ) .arg( // TODO implement flag Arg::with_name(OPT_PRESERVE_CONTEXT) .short("P") .long(OPT_PRESERVE_CONTEXT) .help("(unimplemented) preserve security context") ) .arg( // TODO implement flag Arg::with_name(OPT_CONTEXT) .short("Z") .long(OPT_CONTEXT) .help("(unimplemented) set security context of files and directories") .value_name("CONTEXT") ) .arg(Arg::with_name(ARG_FILES).multiple(true).takes_value(true)) .get_matches_from(args); let paths: Vec<String> = matches .values_of(ARG_FILES) .map(|v| v.map(ToString::to_string).collect()) .unwrap_or_default(); if let Err(s) = check_unimplemented(&matches) { show_error!("Unimplemented feature: {}", s); return 2; } let behavior = match behavior(&matches) { Ok(x) => x, Err(ret) => { return ret; } }; match behavior.main_function { MainFunction::Directory => directory(paths, behavior), MainFunction::Standard => standard(paths, behavior), } } /// Check for unimplemented command line arguments. /// /// Either return the degenerate Ok value, or an Err with string. /// /// # Errors /// /// Error datum is a string of the unimplemented argument. /// /// fn check_unimplemented<'a>(matches: &ArgMatches) -> Result<(), &'a str> { if matches.is_present(OPT_BACKUP) { Err("--backup") } else if matches.is_present(OPT_BACKUP_2) { Err("-b") } else if matches.is_present(OPT_COMPARE) { Err("--compare, -C") } else if matches.is_present(OPT_CREATED) { Err("-D") } else if matches.is_present(OPT_GROUP) { Err("--group, -g") } else if matches.is_present(OPT_OWNER) { Err("--owner, -o") } else if matches.is_present(OPT_PRESERVE_TIMESTAMPS) { Err("--preserve-timestamps, -p") } else if matches.is_present(OPT_STRIP) { Err("--strip, -s") } else if matches.is_present(OPT_STRIP_PROGRAM) { Err("--strip-program") } else if matches.is_present(OPT_SUFFIX) { Err("--suffix, -S") } else if matches.is_present(OPT_TARGET_DIRECTORY) { Err("--target-directory, -t") } else if matches.is_present(OPT_NO_TARGET_DIRECTORY) { Err("--no-target-directory, -T") } else if matches.is_present(OPT_PRESERVE_CONTEXT) { Err("--preserve-context, -P") } else if matches.is_present(OPT_CONTEXT) { Err("--context, -Z") } else { Ok(()) } } /// Determine behavior, given command line arguments. /// /// If successful, returns a filled-out Behavior struct. /// /// # Errors /// /// In event of failure, returns an integer intended as a program return code. /// fn behavior(matches: &ArgMatches) -> Result<Behavior, i32> { let main_function = if matches.is_present("directory") { MainFunction::Directory } else { MainFunction::Standard }; let considering_dir: bool = MainFunction::Directory == main_function; let specified_mode: Option<u32> = if matches.is_present(OPT_MODE) { match matches.value_of(OPT_MODE) { Some(x) => match mode::parse(&x[..], considering_dir) { Ok(y) => Some(y), Err(err) => { show_error!("Invalid mode string: {}", err); return Err(1); } }, None => { show_error!( "option '--mode' requires an argument\n \ Try '{} --help' for more information.", executable!() ); return Err(1); } } } else { None }; let backup_suffix = if matches.is_present(OPT_SUFFIX) { match matches.value_of(OPT_SUFFIX) { Some(x) => x, None => { show_error!( "option '--suffix' requires an argument\n\ Try '{} --help' for more information.", executable!() ); return Err(1); } } } else { "~" }; Ok(Behavior { main_function, specified_mode, suffix: backup_suffix.to_string(), verbose: matches.is_present(OPT_VERBOSE), }) } /// Creates directories. /// /// GNU man pages describe this functionality as creating 'all components of /// the specified directories'. /// /// Returns an integer intended as a program return code. /// fn directory(paths: Vec<String>, b: Behavior) -> i32 { if paths.is_empty() { println!("{} with -d requires at least one argument.", executable!()); 1 } else { let mut all_successful = true; for directory in paths.iter() { let path = Path::new(directory); // if the path already exist, don't try to create it again if !path.exists() { if let Err(e) = fs::create_dir(directory) { show_info!("{}: {}", path.display(), e.to_string()); all_successful = false; } } if mode::chmod(&path, b.mode()).is_err() { all_successful = false; } if b.verbose { show_info!("created directory '{}'", path.display()); } } if all_successful { 0 } else { 1 } } } /// Test if the path is a new file path that can be /// created immediately fn is_new_file_path(path: &Path) -> bool { !path.exists() && (path.parent().map(Path::is_dir).unwrap_or(true) || path.parent().unwrap().to_string_lossy().is_empty()) // In case of a simple file } /// Perform an install, given a list of paths and behavior. /// /// Returns an integer intended as a program return code. /// fn standard(paths: Vec<String>, b: Behavior) -> i32 { if paths.len() < 2 { println!("{} requires at least 2 arguments.", executable!()); 1 } else { let sources = &paths[0..paths.len() - 1] .iter() .map(PathBuf::from) .collect::<Vec<_>>(); let target = Path::new(paths.last().unwrap()); if (target.is_file() || is_new_file_path(target)) && sources.len() == 1 { /* If the target already exist or directly creatable */ copy_file_to_file(&sources[0], &target.to_path_buf(), &b) } else { copy_files_into_dir(sources, &target.to_path_buf(), &b) } } } /// Copy some files into a directory. /// /// Prints verbose information and error messages. /// Returns an integer intended as a program return code. /// /// # Parameters /// /// _files_ must all exist as non-directories. /// _target_dir_ must be a directory. /// fn copy_files_into_dir(files: &[PathBuf], target_dir: &PathBuf, b: &Behavior) -> i32 { if !target_dir.is_dir() { show_error!("target ‘{}’ is not a directory", target_dir.display()); return 1; } let mut all_successful = true; for sourcepath in files.iter() { let targetpath = match sourcepath.as_os_str().to_str() { Some(name) => target_dir.join(name), None => { show_error!( "cannot stat ‘{}’: No such file or directory", sourcepath.display() ); all_successful = false; continue; } }; if copy(sourcepath, &targetpath, b).is_err() { all_successful = false; } } if all_successful { 0 } else { 1 } } /// Copy a file to another file. /// /// Prints verbose information and error messages. /// Returns an integer intended as a program return code. /// /// # Parameters /// /// _file_ must exist as a non-directory. /// _target_ must be a non-directory /// fn copy_file_to_file(file: &PathBuf, target: &PathBuf, b: &Behavior) -> i32 { if
copy(file, &target, b).is_err() { 1 } else { 0 } } /// Co
identifier_body
install.rs
static OPT_STRIP_PROGRAM: &str = "strip-program"; static OPT_SUFFIX: &str = "suffix"; static OPT_TARGET_DIRECTORY: &str = "target-directory"; static OPT_NO_TARGET_DIRECTORY: &str = "no-target-directory"; static OPT_VERBOSE: &str = "verbose"; static OPT_PRESERVE_CONTEXT: &str = "preserve-context"; static OPT_CONTEXT: &str = "context"; static ARG_FILES: &str = "files"; fn get_usage() -> String { format!("{0} [OPTION]... [FILE]...", executable!()) } /// Main install utility function, called from main.rs. /// /// Returns a program return code. /// pub fn uumain(args: impl uucore::Args) -> i32 { let usage = get_usage(); let matches = App::new(executable!()) .version(VERSION) .about(ABOUT) .usage(&usage[..]) .arg( Arg::with_name(OPT_BACKUP) .long(OPT_BACKUP) .help("(unimplemented) make a backup of each existing destination file") .value_name("CONTROL") ) .arg( // TODO implement flag Arg::with_name(OPT_BACKUP_2) .short("b") .help("(unimplemented) like --backup but does not accept an argument") ) .arg( Arg::with_name(OPT_IGNORED) .short("c") .help("ignored") ) .arg( // TODO implement flag Arg::with_name(OPT_COMPARE) .short("C") .long(OPT_COMPARE) .help("(unimplemented) compare each pair of source and destination files, and in some cases, do not modify the destination at all") ) .arg( Arg::with_name(OPT_DIRECTORY) .short("d") .long(OPT_DIRECTORY) .help("treat all arguments as directory names. create all components of the specified directories") ) .arg( // TODO implement flag Arg::with_name(OPT_CREATED) .short("D") .help("(unimplemented) create all leading components of DEST except the last, then copy SOURCE to DEST") ) .arg( // TODO implement flag Arg::with_name(OPT_GROUP) .short("g") .long(OPT_GROUP) .help("(unimplemented) set group ownership, instead of process's current group") .value_name("GROUP") ) .arg( Arg::with_name(OPT_MODE) .short("m") .long(OPT_MODE) .help("set permission mode (as in chmod), instead of rwxr-xr-x") .value_name("MODE") ) .arg( // TODO implement flag Arg::with_name(OPT_OWNER) .short("o") .long(OPT_OWNER) .help("(unimplemented) set ownership (super-user only)") .value_name("OWNER") ) .arg( // TODO implement flag Arg::with_name(OPT_PRESERVE_TIMESTAMPS) .short("p") .long(OPT_PRESERVE_TIMESTAMPS) .help("(unimplemented) apply access/modification times of SOURCE files to corresponding destination files") ) .arg( // TODO implement flag Arg::with_name(OPT_STRIP) .short("s") .long(OPT_STRIP) .help("(unimplemented) strip symbol tables") ) .arg( // TODO implement flag Arg::with_name(OPT_STRIP_PROGRAM) .long(OPT_STRIP_PROGRAM) .help("(unimplemented) program used to strip binaries") .value_name("PROGRAM") ) .arg( // TODO implement flag Arg::with_name(OPT_SUFFIX) .short("S") .long(OPT_SUFFIX) .help("(unimplemented) override the usual backup suffix") .value_name("SUFFIX") ) .arg( // TODO implement flag Arg::with_name(OPT_TARGET_DIRECTORY) .short("t") .long(OPT_TARGET_DIRECTORY) .help("(unimplemented) move all SOURCE arguments into DIRECTORY") .value_name("DIRECTORY") ) .arg( // TODO implement flag Arg::with_name(OPT_NO_TARGET_DIRECTORY) .short("T") .long(OPT_NO_TARGET_DIRECTORY) .help("(unimplemented) treat DEST as a normal file") ) .arg( Arg::with_name(OPT_VERBOSE) .short("v") .long(OPT_VERBOSE) .help("explain what is being done") ) .arg( // TODO implement flag Arg::with_name(OPT_PRESERVE_CONTEXT) .short("P") .long(OPT_PRESERVE_CONTEXT) .help("(unimplemented) preserve security context") ) .arg( // TODO implement flag Arg::with_name(OPT_CONTEXT) .short("Z") .long(OPT_CONTEXT) .help("(unimplemented) set security context of files and directories") .value_name("CONTEXT") ) .arg(Arg::with_name(ARG_FILES).multiple(true).takes_value(true)) .get_matches_from(args); let paths: Vec<String> = matches .values_of(ARG_FILES) .map(|v| v.map(ToString::to_string).collect()) .unwrap_or_default(); if let Err(s) = check_unimplemented(&matches) { show_error!("Unimplemented feature: {}", s); return 2; } let behavior = match behavior(&matches) { Ok(x) => x, Err(ret) => { return ret; } }; match behavior.main_function { MainFunction::Directory => directory(paths, behavior), MainFunction::Standard => standard(paths, behavior), } } /// Check for unimplemented command line arguments. /// /// Either return the degenerate Ok value, or an Err with string. /// /// # Errors /// /// Error datum is a string of the unimplemented argument. /// /// fn check_unimplemented<'a>(matches: &ArgMatches) -> Result<(), &'a str> { if matches.is_present(OPT_BACKUP) { Err("--backup") } else if matches.is_present(OPT_BACKUP_2) { Err("-b") } else if matches.is_present(OPT_COMPARE) { Err("--compare, -C") } else if matches.is_present(OPT_CREATED) { Err("-D") } else if matches.is_present(OPT_GROUP) { Err("--group, -g") } else if matches.is_present(OPT_OWNER) { Err("--owner, -o") } else if matches.is_present(OPT_PRESERVE_TIMESTAMPS) { Err("--preserve-timestamps, -p") } else if matches.is_present(OPT_STRIP) { Err("--strip, -s") } else if matches.is_present(OPT_STRIP_PROGRAM) { Err("--strip-program") } else if matches.is_present(OPT_SUFFIX) { Err("--suffix, -S") } else if matches.is_present(OPT_TARGET_DIRECTORY) { Err("--target-directory, -t") } else if matches.is_present(OPT_NO_TARGET_DIRECTORY) { Err("--no-target-directory, -T") } else if matches.is_present(OPT_PRESERVE_CONTEXT) { Err("--preserve-context, -P") } else if matches.is_present(OPT_CONTEXT) { Err("--context, -Z") } else { Ok(()) } } /// Determine behavior, given command line arguments. /// /// If successful, returns a filled-out Behavior struct. /// /// # Errors /// /// In event of failure, returns an integer intended as a program return code. /// fn
(matches: &ArgMatches) -> Result<Behavior, i32> { let main_function = if matches.is_present("directory") { MainFunction::Directory } else { MainFunction::Standard }; let considering_dir: bool = MainFunction::Directory == main_function; let specified_mode: Option<u32> = if matches.is_present(OPT_MODE) { match matches.value_of(OPT_MODE) { Some(x) => match mode::parse(&x[..], considering_dir) { Ok(y) => Some(y), Err(err) => { show_error!("Invalid mode string: {}", err); return Err(1); } }, None => { show_error!( "option '--mode' requires an argument\n \ Try '{} --help' for more information.", executable!() ); return Err(1); } } } else { None }; let backup_suffix = if matches.is_present(OPT_SUFFIX) { match matches.value_of(OPT_SUFFIX) { Some(x) => x, None => { show_error!( "option '--suffix' requires an argument\n\ Try '{} --help' for more information.", executable!() ); return Err(1); } } } else { "~" }; Ok(Behavior { main_function, specified_mode, suffix: backup_suffix.to_string(), verbose: matches.is_present(OPT_VERBOSE), }) } /// Creates directories. /// /// GNU man pages describe this functionality as creating 'all components of /// the specified directories'. /// /// Returns an integer intended as a program return code. /// fn directory(paths: Vec<String>,
behavior
identifier_name
install.rs
/// Create directories Directory, /// Install files to locations (primary functionality) Standard, } impl Behavior { /// Determine the mode for chmod after copy. pub fn mode(&self) -> u32 { match self.specified_mode { Some(x) => x, None => DEFAULT_MODE, } } } static ABOUT: &str = "Copy SOURCE to DEST or multiple SOURCE(s) to the existing DIRECTORY, while setting permission modes and owner/group"; static VERSION: &str = env!("CARGO_PKG_VERSION"); static OPT_COMPARE: &str = "compare"; static OPT_BACKUP: &str = "backup"; static OPT_BACKUP_2: &str = "backup2"; static OPT_DIRECTORY: &str = "directory"; static OPT_IGNORED: &str = "ignored"; static OPT_CREATED: &str = "created"; static OPT_GROUP: &str = "group"; static OPT_MODE: &str = "mode"; static OPT_OWNER: &str = "owner"; static OPT_PRESERVE_TIMESTAMPS: &str = "preserve-timestamps"; static OPT_STRIP: &str = "strip"; static OPT_STRIP_PROGRAM: &str = "strip-program"; static OPT_SUFFIX: &str = "suffix"; static OPT_TARGET_DIRECTORY: &str = "target-directory"; static OPT_NO_TARGET_DIRECTORY: &str = "no-target-directory"; static OPT_VERBOSE: &str = "verbose"; static OPT_PRESERVE_CONTEXT: &str = "preserve-context"; static OPT_CONTEXT: &str = "context"; static ARG_FILES: &str = "files"; fn get_usage() -> String { format!("{0} [OPTION]... [FILE]...", executable!()) } /// Main install utility function, called from main.rs. /// /// Returns a program return code. /// pub fn uumain(args: impl uucore::Args) -> i32 { let usage = get_usage(); let matches = App::new(executable!()) .version(VERSION) .about(ABOUT) .usage(&usage[..]) .arg( Arg::with_name(OPT_BACKUP) .long(OPT_BACKUP) .help("(unimplemented) make a backup of each existing destination file") .value_name("CONTROL") ) .arg( // TODO implement flag Arg::with_name(OPT_BACKUP_2) .short("b") .help("(unimplemented) like --backup but does not accept an argument") ) .arg( Arg::with_name(OPT_IGNORED) .short("c") .help("ignored") ) .arg( // TODO implement flag Arg::with_name(OPT_COMPARE) .short("C") .long(OPT_COMPARE) .help("(unimplemented) compare each pair of source and destination files, and in some cases, do not modify the destination at all") ) .arg( Arg::with_name(OPT_DIRECTORY) .short("d") .long(OPT_DIRECTORY) .help("treat all arguments as directory names. create all components of the specified directories") ) .arg( // TODO implement flag Arg::with_name(OPT_CREATED) .short("D") .help("(unimplemented) create all leading components of DEST except the last, then copy SOURCE to DEST") ) .arg( // TODO implement flag Arg::with_name(OPT_GROUP) .short("g") .long(OPT_GROUP) .help("(unimplemented) set group ownership, instead of process's current group") .value_name("GROUP") ) .arg( Arg::with_name(OPT_MODE) .short("m") .long(OPT_MODE) .help("set permission mode (as in chmod), instead of rwxr-xr-x") .value_name("MODE") ) .arg( // TODO implement flag Arg::with_name(OPT_OWNER) .short("o") .long(OPT_OWNER) .help("(unimplemented) set ownership (super-user only)") .value_name("OWNER") ) .arg( // TODO implement flag Arg::with_name(OPT_PRESERVE_TIMESTAMPS) .short("p") .long(OPT_PRESERVE_TIMESTAMPS) .help("(unimplemented) apply access/modification times of SOURCE files to corresponding destination files") ) .arg( // TODO implement flag Arg::with_name(OPT_STRIP) .short("s") .long(OPT_STRIP) .help("(unimplemented) strip symbol tables") ) .arg( // TODO implement flag Arg::with_name(OPT_STRIP_PROGRAM) .long(OPT_STRIP_PROGRAM) .help("(unimplemented) program used to strip binaries") .value_name("PROGRAM") ) .arg( // TODO implement flag Arg::with_name(OPT_SUFFIX) .short("S") .long(OPT_SUFFIX) .help("(unimplemented) override the usual backup suffix") .value_name("SUFFIX") ) .arg( // TODO implement flag Arg::with_name(OPT_TARGET_DIRECTORY) .short("t") .long(OPT_TARGET_DIRECTORY) .help("(unimplemented) move all SOURCE arguments into DIRECTORY") .value_name("DIRECTORY") ) .arg( // TODO implement flag Arg::with_name(OPT_NO_TARGET_DIRECTORY) .short("T") .long(OPT_NO_TARGET_DIRECTORY) .help("(unimplemented) treat DEST as a normal file") ) .arg( Arg::with_name(OPT_VERBOSE) .short("v") .long(OPT_VERBOSE) .help("explain what is being done") ) .arg( // TODO implement flag Arg::with_name(OPT_PRESERVE_CONTEXT) .short("P") .long(OPT_PRESERVE_CONTEXT) .help("(unimplemented) preserve security context") ) .arg( // TODO implement flag Arg::with_name(OPT_CONTEXT) .short("Z") .long(OPT_CONTEXT) .help("(unimplemented) set security context of files and directories") .value_name("CONTEXT") ) .arg(Arg::with_name(ARG_FILES).multiple(true).takes_value(true)) .get_matches_from(args); let paths: Vec<String> = matches .values_of(ARG_FILES) .map(|v| v.map(ToString::to_string).collect()) .unwrap_or_default(); if let Err(s) = check_unimplemented(&matches) { show_error!("Unimplemented feature: {}", s); return 2; } let behavior = match behavior(&matches) { Ok(x) => x, Err(ret) => { return ret; } }; match behavior.main_function { MainFunction::Directory => directory(paths, behavior), MainFunction::Standard => standard(paths, behavior), } } /// Check for unimplemented command line arguments. /// /// Either return the degenerate Ok value, or an Err with string. /// /// # Errors /// /// Error datum is a string of the unimplemented argument. /// /// fn check_unimplemented<'a>(matches: &ArgMatches) -> Result<(), &'a str> { if matches.is_present(OPT_BACKUP) { Err("--backup") } else if matches.is_present(OPT_BACKUP_2) { Err("-b") } else if matches.is_present(OPT_COMPARE) { Err("--compare, -C") } else if matches.is_present(OPT_CREATED) { Err("-D") } else if matches.is_present(OPT_GROUP) { Err("--group, -g") } else if matches.is_present(OPT_OWNER) { Err("--owner, -o") } else if matches.is_present(OPT_PRESERVE_TIMESTAMPS) { Err("--preserve-timestamps, -p") } else if matches.is_present(OPT_STRIP) { Err("--strip, -s") } else if matches.is_present(OPT_STRIP_PROGRAM) { Err("--strip-program") } else if matches.is_present(OPT_SUFFIX) { Err("--suffix, -S") } else if matches.is_present(OPT_TARGET_DIRECTORY) { Err("--target-directory, -t") } else if matches.is_present(OPT_NO_TARGET_DIRECTORY) { Err("--no-target-directory, -T") } else if matches.is_present(OPT_PRESERVE_CONTEXT) { Err("--preserve-context, -P") } else if matches.is_present(OPT_CONTEXT) { Err("--context, -Z") } else { Ok(()) } } /// Determine behavior, given command line arguments. /// /// If successful, returns a filled-out Behavior struct. /// /// # Errors /// /// In event of failure, returns an integer intended as a program return code. /// fn behavior(matches: &ArgMatches) -> Result<Behavior, i32> { let main_function = if matches.is_present("directory") { MainFunction::Directory } else { MainFunction::Standard }; let considering_dir: bool = MainFunction::Directory == main_function; let specified_mode: Option<u32> = if matches.is_present(OPT_MODE) { match matches.value_of(OPT_MODE) { Some(x) => match mode::parse(&x[..], considering_dir) { Ok(y) => Some(y), Err(err) =>
random_line_split
modules.go
.AllPermissions, } cliToken, err := jwt.Sign(&p, jwt.NewHS256(sk)) if err != nil { return nil, err } if err := cfg.LocalStorage().SetAPIToken(cliToken); err != nil { return nil, err } return (*types2.APIAlg)(jwt.NewHS256(sk)), nil } //storage func StorageAuth(ctx MetricsCtx, ca api.Common) (sectorstorage.StorageAuth, error) { token, err := ca.AuthNew(ctx, []auth.Permission{"admin"}) if err != nil { return nil, xerrors.Errorf("creating storage auth header: %w", err) } headers := http.Header{} headers.Add("Authorization", "Bearer "+string(token)) return sectorstorage.StorageAuth(headers), nil } func
(ma types2.MinerAddress) (types2.MinerID, error) { id, err := address.IDFromAddress(address.Address(ma)) return types2.MinerID(id), err } func MinerAddress(metaDataService *service.MetadataService) (types2.MinerAddress, error) { ma, err := metaDataService.GetMinerAddress() return types2.MinerAddress(ma), err } func SealProofType(maddr types2.MinerAddress, fnapi api.FullNode) (abi.RegisteredSealProof, error) { mi, err := fnapi.StateMinerInfo(context.TODO(), address.Address(maddr), types.EmptyTSK) if err != nil { return 0, err } networkVersion, err := fnapi.StateNetworkVersion(context.TODO(), types.EmptyTSK) if err != nil { return 0, err } return miner.PreferredSealProofTypeFromWindowPoStType(networkVersion, mi.WindowPoStProofType) } var StorageCounterDSPrefix = "/storage/nextid" // nolint type sidsc struct { sc *storedcounter.StoredCounter } // nolint func (s *sidsc) Next() (abi.SectorNumber, error) { i, err := s.sc.Next() return abi.SectorNumber(i), err } func SectorIDCounter(metaDataService *service.MetadataService) types2.SectorIDCounter { return metaDataService } var WorkerCallsPrefix = datastore.NewKey("/worker/calls") var ManagerWorkPrefix = datastore.NewKey("/stmgr/calls") func LocalStorage(mctx MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, urls sectorstorage.URLs) (*stores.Local, error) { ctx := LifecycleCtx(mctx, lc) return stores.NewLocal(ctx, ls, si, urls) } func RemoteStorage(lstor *stores.Local, si stores.SectorIndex, sa sectorstorage.StorageAuth, sc sectorstorage.SealerConfig) *stores.Remote { return stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit, &stores.DefaultPartialFileHandler{}) } func SectorStorage(mctx MetricsCtx, lc fx.Lifecycle, lstor *stores.Local, stor *stores.Remote, ls stores.LocalStorage, si stores.SectorIndex, sc sectorstorage.SealerConfig, repo repo.Repo) (*sectorstorage.Manager, error) { ctx := LifecycleCtx(mctx, lc) wsts := service.NewWorkCallService(repo, "sealer") smsts := service.NewWorkStateService(repo) sst, err := sectorstorage.New(ctx, lstor, stor, ls, si, sc, wsts, smsts) if err != nil { return nil, err } lc.Append(fx.Hook{ OnStop: sst.Close, }) return sst, nil } func GetParams(mctx MetricsCtx, spt abi.RegisteredSealProof) error { ssize, err := spt.SectorSize() if err != nil { return err } ps, err := asset.Asset("fixtures/_assets/proof-params/parameters.json") if err != nil { return err } srs, err := asset.Asset("fixtures/_assets/proof-params/srs-inner-product.json") if err != nil { return err } if err := paramfetch.GetParams(mctx, ps, srs, uint64(ssize)); err != nil { return xerrors.Errorf("get params: %w", err) } return nil } func StorageNetworkName(ctx MetricsCtx, a api.FullNode) (types2.NetworkName, error) { /* if !build.Devnet { return "testnetnet", nil }*/ return a.StateNetworkName(ctx) } func AddressSelector(addrConf *config.MinerAddressConfig) func() (*storage.AddressSelector, error) { return func() (*storage.AddressSelector, error) { as := &storage.AddressSelector{} if addrConf == nil { return as, nil } log.Infof("miner address config: %v", *addrConf) for _, s := range addrConf.PreCommitControl { addr, err := address.NewFromString(s) if err != nil { return nil, xerrors.Errorf("parsing precommit control address: %w", err) } as.PreCommitControl = append(as.PreCommitControl, addr) } for _, s := range addrConf.CommitControl { addr, err := address.NewFromString(s) if err != nil { return nil, xerrors.Errorf("parsing commit control address: %w", err) } as.CommitControl = append(as.CommitControl, addr) } as.DisableOwnerFallback = addrConf.DisableOwnerFallback as.DisableWorkerFallback = addrConf.DisableWorkerFallback return as, nil } } type StorageMinerParams struct { fx.In Lifecycle fx.Lifecycle MetricsCtx MetricsCtx API api.FullNode Messager api.IMessager MarketClient api2.MarketFullNode MetadataService *service.MetadataService LogService *service.LogService SectorInfoService *service.SectorInfoService Sealer sectorstorage.SectorManager SectorIDCounter types2.SectorIDCounter Verifier ffiwrapper.Verifier Prover ffiwrapper.Prover GetSealingConfigFn types2.GetSealingConfigFunc Journal journal.Journal AddrSel *storage.AddressSelector NetworkParams *config.NetParamsConfig } func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*storage.Miner, error) { return func(params StorageMinerParams) (*storage.Miner, error) { var ( metadataService = params.MetadataService sectorinfoService = params.SectorInfoService logService = params.LogService mctx = params.MetricsCtx lc = params.Lifecycle api = params.API messager = params.Messager marketClient = params.MarketClient sealer = params.Sealer sc = params.SectorIDCounter verif = params.Verifier prover = params.Prover gsd = params.GetSealingConfigFn j = params.Journal as = params.AddrSel np = params.NetworkParams ) maddr, err := metadataService.GetMinerAddress() if err != nil { return nil, err } ctx := LifecycleCtx(mctx, lc) fps, err := storage.NewWindowedPoStScheduler(api, messager, fc, as, sealer, verif, sealer, j, maddr, np) if err != nil { return nil, err } sm, err := storage.NewMiner(api, messager, marketClient, maddr, metadataService, sectorinfoService, logService, sealer, sc, verif, prover, gsd, fc, j, as, np) if err != nil { return nil, err } lc.Append(fx.Hook{ OnStart: func(context.Context) error { go fps.Run(ctx) return sm.Run(ctx) }, OnStop: sm.Stop, }) return sm, nil } } func DoPoStWarmup(ctx MetricsCtx, api api.FullNode, metadataService *service.MetadataService, prover storage.WinningPoStProver) error { maddr, err := metadataService.GetMinerAddress() if err != nil { return err } deadlines, err := api.StateMinerDeadlines(ctx, maddr, types.EmptyTSK) if err != nil { return xerrors.Errorf("getting deadlines: %w", err) } var sector abi.SectorNumber = math.MaxUint64 out: for dlIdx := range deadlines { partitions, err := api.StateMinerPartitions(ctx, maddr, uint64(dlIdx), types.EmptyTSK) if err != nil { return xerrors.Errorf("getting partitions for deadline %d: %w", dlIdx, err) } for _, partition := range partitions { b, err := partition.ActiveSectors.First() if err == bitfield.ErrNoBitsSet { continue } if err != nil { return err } sector = abi.SectorNumber(b) break out } } if sector == math.MaxUint64 { log.Info
MinerID
identifier_name
modules.go
.AuthNew(ctx, []auth.Permission{"admin"}) if err != nil { return nil, xerrors.Errorf("creating storage auth header: %w", err) } headers := http.Header{} headers.Add("Authorization", "Bearer "+string(token)) return sectorstorage.StorageAuth(headers), nil } func MinerID(ma types2.MinerAddress) (types2.MinerID, error) { id, err := address.IDFromAddress(address.Address(ma)) return types2.MinerID(id), err } func MinerAddress(metaDataService *service.MetadataService) (types2.MinerAddress, error) { ma, err := metaDataService.GetMinerAddress() return types2.MinerAddress(ma), err } func SealProofType(maddr types2.MinerAddress, fnapi api.FullNode) (abi.RegisteredSealProof, error) { mi, err := fnapi.StateMinerInfo(context.TODO(), address.Address(maddr), types.EmptyTSK) if err != nil { return 0, err } networkVersion, err := fnapi.StateNetworkVersion(context.TODO(), types.EmptyTSK) if err != nil { return 0, err } return miner.PreferredSealProofTypeFromWindowPoStType(networkVersion, mi.WindowPoStProofType) } var StorageCounterDSPrefix = "/storage/nextid" // nolint type sidsc struct { sc *storedcounter.StoredCounter } // nolint func (s *sidsc) Next() (abi.SectorNumber, error) { i, err := s.sc.Next() return abi.SectorNumber(i), err } func SectorIDCounter(metaDataService *service.MetadataService) types2.SectorIDCounter { return metaDataService } var WorkerCallsPrefix = datastore.NewKey("/worker/calls") var ManagerWorkPrefix = datastore.NewKey("/stmgr/calls") func LocalStorage(mctx MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, urls sectorstorage.URLs) (*stores.Local, error) { ctx := LifecycleCtx(mctx, lc) return stores.NewLocal(ctx, ls, si, urls) } func RemoteStorage(lstor *stores.Local, si stores.SectorIndex, sa sectorstorage.StorageAuth, sc sectorstorage.SealerConfig) *stores.Remote { return stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit, &stores.DefaultPartialFileHandler{}) } func SectorStorage(mctx MetricsCtx, lc fx.Lifecycle, lstor *stores.Local, stor *stores.Remote, ls stores.LocalStorage, si stores.SectorIndex, sc sectorstorage.SealerConfig, repo repo.Repo) (*sectorstorage.Manager, error) { ctx := LifecycleCtx(mctx, lc) wsts := service.NewWorkCallService(repo, "sealer") smsts := service.NewWorkStateService(repo) sst, err := sectorstorage.New(ctx, lstor, stor, ls, si, sc, wsts, smsts) if err != nil { return nil, err } lc.Append(fx.Hook{ OnStop: sst.Close, }) return sst, nil } func GetParams(mctx MetricsCtx, spt abi.RegisteredSealProof) error { ssize, err := spt.SectorSize() if err != nil { return err } ps, err := asset.Asset("fixtures/_assets/proof-params/parameters.json") if err != nil { return err } srs, err := asset.Asset("fixtures/_assets/proof-params/srs-inner-product.json") if err != nil { return err } if err := paramfetch.GetParams(mctx, ps, srs, uint64(ssize)); err != nil { return xerrors.Errorf("get params: %w", err) } return nil } func StorageNetworkName(ctx MetricsCtx, a api.FullNode) (types2.NetworkName, error) { /* if !build.Devnet { return "testnetnet", nil }*/ return a.StateNetworkName(ctx) } func AddressSelector(addrConf *config.MinerAddressConfig) func() (*storage.AddressSelector, error) { return func() (*storage.AddressSelector, error) { as := &storage.AddressSelector{} if addrConf == nil { return as, nil } log.Infof("miner address config: %v", *addrConf) for _, s := range addrConf.PreCommitControl { addr, err := address.NewFromString(s) if err != nil { return nil, xerrors.Errorf("parsing precommit control address: %w", err) } as.PreCommitControl = append(as.PreCommitControl, addr) } for _, s := range addrConf.CommitControl { addr, err := address.NewFromString(s) if err != nil { return nil, xerrors.Errorf("parsing commit control address: %w", err) } as.CommitControl = append(as.CommitControl, addr) } as.DisableOwnerFallback = addrConf.DisableOwnerFallback as.DisableWorkerFallback = addrConf.DisableWorkerFallback return as, nil } } type StorageMinerParams struct { fx.In Lifecycle fx.Lifecycle MetricsCtx MetricsCtx API api.FullNode Messager api.IMessager MarketClient api2.MarketFullNode MetadataService *service.MetadataService LogService *service.LogService SectorInfoService *service.SectorInfoService Sealer sectorstorage.SectorManager SectorIDCounter types2.SectorIDCounter Verifier ffiwrapper.Verifier Prover ffiwrapper.Prover GetSealingConfigFn types2.GetSealingConfigFunc Journal journal.Journal AddrSel *storage.AddressSelector NetworkParams *config.NetParamsConfig } func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*storage.Miner, error) { return func(params StorageMinerParams) (*storage.Miner, error) { var ( metadataService = params.MetadataService sectorinfoService = params.SectorInfoService logService = params.LogService mctx = params.MetricsCtx lc = params.Lifecycle api = params.API messager = params.Messager marketClient = params.MarketClient sealer = params.Sealer sc = params.SectorIDCounter verif = params.Verifier prover = params.Prover gsd = params.GetSealingConfigFn j = params.Journal as = params.AddrSel np = params.NetworkParams ) maddr, err := metadataService.GetMinerAddress() if err != nil { return nil, err } ctx := LifecycleCtx(mctx, lc) fps, err := storage.NewWindowedPoStScheduler(api, messager, fc, as, sealer, verif, sealer, j, maddr, np) if err != nil { return nil, err } sm, err := storage.NewMiner(api, messager, marketClient, maddr, metadataService, sectorinfoService, logService, sealer, sc, verif, prover, gsd, fc, j, as, np) if err != nil { return nil, err } lc.Append(fx.Hook{ OnStart: func(context.Context) error { go fps.Run(ctx) return sm.Run(ctx) }, OnStop: sm.Stop, }) return sm, nil } } func DoPoStWarmup(ctx MetricsCtx, api api.FullNode, metadataService *service.MetadataService, prover storage.WinningPoStProver) error { maddr, err := metadataService.GetMinerAddress() if err != nil { return err } deadlines, err := api.StateMinerDeadlines(ctx, maddr, types.EmptyTSK) if err != nil { return xerrors.Errorf("getting deadlines: %w", err) } var sector abi.SectorNumber = math.MaxUint64 out: for dlIdx := range deadlines { partitions, err := api.StateMinerPartitions(ctx, maddr, uint64(dlIdx), types.EmptyTSK) if err != nil { return xerrors.Errorf("getting partitions for deadline %d: %w", dlIdx, err) } for _, partition := range partitions { b, err := partition.ActiveSectors.First() if err == bitfield.ErrNoBitsSet { continue } if err != nil { return err } sector = abi.SectorNumber(b) break out } } if sector == math.MaxUint64 { log.Info("skipping winning PoSt warmup, no sectors") return nil } log.Infow("starting winning PoSt warmup", "sector", sector) start := time.Now() var r abi.PoStRandomness = make([]byte, abi.RandomnessLength) _, _ = rand.Read(r) si, err := api.StateSectorGetInfo(ctx, maddr, sector, types.EmptyTSK) if err != nil
{ return xerrors.Errorf("getting sector info: %w", err) }
conditional_block