code
stringlengths 67
15.9k
| labels
listlengths 1
4
|
---|---|
package chef
import (
"bufio"
"fmt"
"io"
"os"
"strings"
"github.com/jarosser06/fastfood/common/fileutil"
)
type BerksFile struct {
Cookbooks map[string]BerksCookbook
}
type BerksCookbook struct {
Branch string `json:"branch"`
Name string `json:"name"`
Git string `json:"git"`
Path string `json:"path"`
Ref string `json:"ref"`
Revision string `json:"revision"`
Tag string `json:"tag"`
}
// Read a berkshelf file and return a berks struct
func BerksFromFile(f string) (BerksFile, error) {
b := BerksFile{Cookbooks: make(map[string]BerksCookbook)}
r, err := os.Open(f)
if err != nil {
return b, err
}
defer r.Close()
b.Parse(r)
return b, nil
}
func (c *BerksCookbook) String() string {
s := fmt.Sprintf("cookbook \"%s\"", c.Name)
if c.Git != "" {
s = fmt.Sprintf("%s, git: \"%s\"", s, c.Git)
switch {
case c.Branch != "":
s = fmt.Sprintf("%s, branch: \"%s\"", s, c.Branch)
case c.Ref != "":
s = fmt.Sprintf("%s, ref: \"%s\"", s, c.Ref)
case c.Revision != "":
s = fmt.Sprintf("%s, revision: \"%s\"", s, c.Revision)
case c.Tag != "":
s = fmt.Sprintf("%s, tag: \"%s\"", s, c.Tag)
}
} else if c.Path != "" {
s = fmt.Sprintf("%s, path: \"%s\"", s, c.Path)
}
return s
}
// Parse the Berksfile
func (b *BerksFile) Parse(r io.Reader) {
s := bufio.NewScanner(r)
s.Split(bufio.ScanWords)
for s.Scan() {
switch s.Text() {
case "cookbook":
s.Scan()
cName := strings.Trim(s.Text(), "',\"")
c := BerksCookbook{Name: cName}
b.Cookbooks[cName] = c
}
}
}
// Append Dependencies to a Berksfile
func (b *BerksFile) Append(f string, deps []BerksCookbook) []string {
var added []string
var buffer []string
if len(deps) == 0 {
return added
}
// Catches issue with Cookbooks not being created
if b.Cookbooks == nil {
b.Cookbooks = make(map[string]BerksCookbook)
}
for _, d := range deps {
if _, ok := b.Cookbooks[d.Name]; !ok {
b.Cookbooks[d.Name] = d
added = append(added, d.Name)
buffer = append(buffer, d.String())
}
}
if len(added) > 0 {
fileutil.AppendFile(
f,
fmt.Sprintf("%s\n", strings.Join(buffer, "\n")),
)
}
return added
}
|
[
7
] |
package main
import (
"fmt"
"io"
"log"
"math"
"sort"
"time"
"github.com/miekg/dns"
)
type DurationSlice []time.Duration
// NOTE: This implements the sortable interface
func (p DurationSlice) Len() int { return len(p) }
func (p DurationSlice) Less(i, j int) bool { return int64(p[i]) < int64(p[j]) }
func (p DurationSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// NOTE: Wasteful Convenience Functions
func (p DurationSlice) Min() time.Duration {
sort.Sort(p)
return p[0]
}
func (p DurationSlice) Max() time.Duration {
sort.Sort(p)
return p[p.Len()-1]
}
func (p DurationSlice) Avg() time.Duration {
var avg int64
for i := 0; i < p.Len(); i++ {
avg += int64(p[i])
}
return time.Duration(avg / int64(p.Len()))
}
func (p DurationSlice) Std() time.Duration {
sqdifs := make(DurationSlice, p.Len(), p.Len())
avg := p.Avg()
var avgsqdif int64
for i := 0; i < p.Len(); i++ {
sqdif := p[i] - avg
sqdifs[i] = sqdif * sqdif
avgsqdif += int64(sqdifs[i])
}
avgsqdif /= int64(sqdifs.Len())
return time.Duration(math.Sqrt(float64(avgsqdif)))
}
// TODO(zenware): make this output to a writer
func analyzeDns(w io.Writer, server, hostname string, samples, waitMillis int) {
m := new(dns.Msg)
m.Id = dns.Id()
m.RecursionDesired = true
m.Question = make([]dns.Question, 1)
m.Question[0] = dns.Question{Name: dns.Fqdn(hostname), Qtype: dns.TypeA, Qclass: dns.ClassINET}
wait := time.Duration(waitMillis) * time.Millisecond
c := new(dns.Client)
fmt.Printf("QUERY %v (@%v): %v data bytes\n", hostname, server, m.Len())
rtts := make(DurationSlice, samples, samples)
for i := 0; i < samples; i++ {
in, rtt, err := c.Exchange(m, server+":53")
if err != nil {
log.Println(err)
continue
}
rtts[i] = rtt
fmt.Fprintf(w, "%v bytes from %v: ttl=%v time=%v\n", in.Len(), server, time.Second*6, rtt)
time.Sleep(wait)
}
// NOTE: Potentially Eating Performance for Pretties
var min, max, avg, stddev time.Duration
min = rtts.Min()
max = rtts.Max()
avg = rtts.Avg()
stddev = rtts.Std()
fmt.Fprintf(w, "round-trip min/avg/max/stddev = %v/%v/%v/%v\n", min, avg, max, stddev)
}
|
[
1
] |
// Copyright (c) 2014, J. Salvador Arias <[email protected]>
// All rights reserved.
// Distributed under BSD2 license that can be found in the LICENSE file.
// Package inat implements a jdh driver for i-Naturalist.
package inat
import (
"encoding/xml"
"errors"
"net/http"
"time"
"github.com/js-arias/jdh/pkg/jdh"
)
const driver = "inat"
// DB implements the i-Naturalist connection jdh DB interface.
type DB struct {
isClosed bool
request chan string
answer chan interface{}
}
func init() {
jdh.Register(driver, open)
}
// open creates a new database.
func open(port string) (jdh.DB, error) {
db := &DB{
isClosed: false,
request: make(chan string),
answer: make(chan interface{}),
}
go db.req()
return db, nil
}
// Close closes the database.
func (db *DB) Close() error {
if db.isClosed {
return errors.New("database already closed")
}
close(db.request)
return nil
}
// Driver returns the driver name.
func (db *DB) Driver() string {
return driver
}
// Executable query can not be done in inat: it is a read only database.
func (db *DB) Exec(query jdh.Query, table jdh.Table, param interface{}) (string, error) {
return "", errors.New("ncbi is a read only database")
}
// Get returns an element data from inat database.
func (db *DB) Get(table jdh.Table, id string) (jdh.Scanner, error) {
if db.isClosed {
return nil, errors.New("database already closed")
}
switch table {
case jdh.Taxonomy:
return db.taxon(id)
}
return nil, errors.New("get not implemented for table " + string(table))
}
// List executes a query that returns a list.
func (db *DB) List(table jdh.Table, args *jdh.Values) (jdh.ListScanner, error) {
if db.isClosed {
return nil, errors.New("database already closed")
}
if args == nil {
return nil, errors.New("empty argument list")
}
switch table {
case jdh.Taxonomy:
return db.taxonList(args.KV)
}
return nil, errors.New("list not implemented for table " + string(table))
}
const inatHead = "http://www.inaturalist.org/"
// process requests
func (db *DB) req() {
for r := range db.request {
answer, err := http.Get(r)
if err != nil {
db.answer <- err
continue
}
db.answer <- answer
// this is set to not overload the gbif server...
// I'm afraid of being baned!
time.Sleep(100 * time.Millisecond)
}
}
func skip(dec *xml.Decoder, end string) error {
for tk, err := dec.Token(); ; tk, err = dec.Token() {
if err != nil {
return err
}
switch t := tk.(type) {
case xml.EndElement:
if t.Name.Local == end {
return nil
}
}
}
}
|
[
7
] |
package engine
import (
"errors"
"reflect"
"testing"
"github.com/kylelemons/godebug/pretty"
)
func TestGetFreePosition1(t *testing.T) {
g := newGrid(3, 3)
p, err := g.getFreePosition(
[]*Segment{
newSegment(East, NewPosition(0, 1), NewPosition(2, 1)),
newSegment(South, NewPosition(2, 0), NewPosition(2, 0)),
newSegment(West, NewPosition(0, 0), NewPosition(1, 0)),
newSegment(South, NewPosition(1, 2), NewPosition(1, 2)),
newSegment(East, NewPosition(0, 2), NewPosition(2, 2)),
},
)
if err != nil {
t.Errorf("unexpected error, [%v]", err)
} else {
exp := NewPosition(1, 1)
if !reflect.DeepEqual(exp, p) {
t.Errorf("unexpected value, exp [%+v] got [%+v]", *exp, *p)
}
}
}
func TestGetFreePosition2(t *testing.T) {
g := newGrid(3, 3)
p, err := g.getFreePosition(
[]*Segment{
newSegment(South, NewPosition(1, 0), NewPosition(1, 2)),
newSegment(West, NewPosition(2, 2), NewPosition(2, 2)),
newSegment(North, NewPosition(2, 0), NewPosition(2, 1)),
newSegment(West, NewPosition(0, 1), NewPosition(0, 1)),
newSegment(South, NewPosition(0, 0), NewPosition(0, 2)),
},
)
if err != nil {
t.Errorf("unexpected error, [%v]", err)
} else {
exp := NewPosition(1, 1)
if !reflect.DeepEqual(exp, p) {
t.Errorf("unexpected value, exp [%+v] got [%+v]", *exp, *p)
}
}
}
func TestGetFreePosition3(t *testing.T) {
g := newGrid(2, 2)
_, err := g.getFreePosition(
[]*Segment{
newSegment(West, NewPosition(0, 0), NewPosition(1, 0)),
newSegment(East, NewPosition(1, 1), NewPosition(0, 1)),
},
)
if err == nil {
t.Error("expecting error")
} else {
if err != ErrNoPosition {
t.Errorf("unexpected error, exp [%v] got [%v]", ErrNoPosition, err)
}
}
}
func TestIsFreePosition(t *testing.T) {
g := newGrid(2, 2)
v := g.isFreePosition(
NewPosition(0, 0),
[]*Segment{
newSegment(West, NewPosition(0, 0), NewPosition(1, 0)),
newSegment(East, NewPosition(1, 1), NewPosition(0, 1)),
},
)
if v {
t.Errorf("unexpected value")
}
v = g.isFreePosition(
NewPosition(0, 0),
[]*Segment{
newSegment(West, NewPosition(1, 0), NewPosition(1, 0)),
newSegment(East, NewPosition(1, 1), NewPosition(0, 1)),
},
)
if !v {
t.Errorf("unexpected value")
}
v = g.isFreePosition(
NewPosition(0, 1),
[]*Segment{
newSegment(West, NewPosition(1, 0), NewPosition(1, 0)),
newSegment(East, NewPosition(1, 1), NewPosition(0, 1)),
},
)
if !v {
t.Errorf("unexpected value")
}
}
func TestMove1(t *testing.T) {
g := newGrid(3, 3)
p, err := g.move(
East,
[]*Segment{
newSegment(East, NewPosition(0, 1), NewPosition(2, 1)),
newSegment(South, NewPosition(2, 0), NewPosition(2, 0)),
newSegment(West, NewPosition(0, 0), NewPosition(1, 0)),
newSegment(South, NewPosition(1, 2), NewPosition(1, 2)),
newSegment(East, NewPosition(0, 2), NewPosition(2, 2)),
},
NewPosition(1, 1),
false,
)
if err != nil {
t.Errorf("unexpected error, %v", err)
} else {
exp := []*Segment{
newSegment(East, NewPosition(1, 1), NewPosition(2, 1)),
newSegment(South, NewPosition(2, 0), NewPosition(2, 0)),
newSegment(West, NewPosition(0, 0), NewPosition(1, 0)),
newSegment(South, NewPosition(1, 2), NewPosition(1, 2)),
newSegment(East, NewPosition(0, 2), NewPosition(0, 2)),
}
if diff := pretty.Compare(exp, p); diff != "" {
t.Errorf("unexpected value\n%s", diff)
}
}
}
func TestMove2(t *testing.T) {
g := newGrid(3, 3)
_, err := g.move(
North,
[]*Segment{
newSegment(East, NewPosition(0, 1), NewPosition(2, 1)),
newSegment(West, NewPosition(2, 0), NewPosition(1, 0)),
newSegment(East, NewPosition(1, 2), NewPosition(2, 2)),
},
NewPosition(0, 0),
false,
)
if err == nil {
t.Error("expecting error")
} else {
if err != ErrColision {
t.Errorf("unexpected error, exp [%v] got [%v]", ErrColision, err)
}
}
}
func TestMove3(t *testing.T) {
g := newGrid(2, 2)
p, err := g.move(
South,
[]*Segment{
newSegment(West, NewPosition(0, 0), NewPosition(1, 0)),
newSegment(North, NewPosition(1, 1), NewPosition(1, 1)),
},
NewPosition(0, 1),
false,
)
if err != nil {
t.Errorf("unexpected error, %v", err)
} else {
exp := []*Segment{
newSegment(South, NewPosition(0, 1), NewPosition(0, 0)),
newSegment(West, NewPosition(1, 0), NewPosition(1, 0)),
}
if diff := pretty.Compare(exp, p); diff != "" {
t.Errorf("unexpected value\n%s", diff)
}
}
}
func TestGetPosition(t *testing.T) {
tests := []struct {
p string
exp *Position
err error
}{
{"A", nil, ErrInvalidKey},
{"A-A", nil, errors.New(`strconv.Atoi: parsing "A": invalid syntax`)},
{"0-A", nil, errors.New(`strconv.Atoi: parsing "A": invalid syntax`)},
{"0-0", NewPosition(0, 0), nil},
}
for i := range tests {
v, err := getPosition(tests[i].p)
if err != nil {
if tests[i].err == nil {
t.Errorf("unexpected error at %d, [%v]", i, err)
} else {
if err.Error() != tests[i].err.Error() {
t.Errorf("unexpected error at %d, exp [%v] got [%v]", i, tests[i].err, err)
}
}
} else {
if tests[i].err != nil {
t.Errorf("expecting error at %d", i)
}
}
if !reflect.DeepEqual(v, tests[i].exp) {
t.Errorf("unexpected value at %d, exp [%v] got [%v]", i, tests[i].exp, v)
}
}
}
func TestGetBodyParts1(t *testing.T) {
g := newGrid(3, 3)
v := g.getBodyParts(
[]*Segment{
newSegment(East, NewPosition(0, 1), NewPosition(2, 1)),
newSegment(South, NewPosition(2, 0), NewPosition(2, 0)),
newSegment(West, NewPosition(0, 0), NewPosition(1, 0)),
newSegment(South, NewPosition(1, 2), NewPosition(1, 2)),
newSegment(East, NewPosition(0, 2), NewPosition(2, 2)),
},
)
exp := []*BodyPart{
&BodyPart{BodySouthWest, NewPosition(2, 1)},
&BodyPart{HeadEast, NewPosition(0, 1)},
&BodyPart{BodyNorthWest, NewPosition(2, 0)},
&BodyPart{BodySouthEast, NewPosition(1, 0)},
&BodyPart{BodyHorizontal, NewPosition(0, 0)},
&BodyPart{BodyNorthEast, NewPosition(1, 2)},
&BodyPart{TailEast, NewPosition(2, 2)},
&BodyPart{BodyHorizontal, NewPosition(0, 2)},
}
if diff := pretty.Compare(exp, v); diff != "" {
t.Errorf("unexpected value\n%s", diff)
}
}
func TestGetBodyParts2(t *testing.T) {
g := newGrid(3, 3)
v := g.getBodyParts(
[]*Segment{
newSegment(South, NewPosition(1, 0), NewPosition(1, 2)),
newSegment(West, NewPosition(2, 2), NewPosition(2, 2)),
newSegment(North, NewPosition(2, 0), NewPosition(2, 1)),
newSegment(West, NewPosition(0, 1), NewPosition(0, 1)),
newSegment(South, NewPosition(0, 0), NewPosition(0, 2)),
},
)
exp := []*BodyPart{
&BodyPart{BodyNorthWest, NewPosition(1, 2)},
&BodyPart{HeadSouth, NewPosition(1, 0)},
&BodyPart{BodyNorthEast, NewPosition(2, 2)},
&BodyPart{BodySouthWest, NewPosition(2, 1)},
&BodyPart{BodyVertical, NewPosition(2, 0)},
&BodyPart{BodySouthEast, NewPosition(0, 1)},
&BodyPart{TailSouth, NewPosition(0, 2)},
&BodyPart{BodyVertical, NewPosition(0, 0)},
}
if diff := pretty.Compare(exp, v); diff != "" {
t.Errorf("unexpected value\n%s", diff)
}
}
|
[
4
] |
// Copyright 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Binary sparklinedemo displays a couple of SparkLine widgets.
// Exist when 'q' is pressed.
package main
import (
"context"
"math/rand"
"time"
"github.com/mum4k/termdash"
"github.com/mum4k/termdash/cell"
"github.com/mum4k/termdash/container"
"github.com/mum4k/termdash/linestyle"
"github.com/mum4k/termdash/terminal/tcell"
"github.com/mum4k/termdash/terminal/terminalapi"
"github.com/mum4k/termdash/widgets/sparkline"
)
// playSparkLine continuously adds values to the SparkLine, once every delay.
// Exits when the context expires.
func playSparkLine(ctx context.Context, sl *sparkline.SparkLine, delay time.Duration) {
const max = 100
ticker := time.NewTicker(delay)
defer ticker.Stop()
for {
select {
case <-ticker.C:
v := int(rand.Int31n(max + 1))
if err := sl.Add([]int{v}); err != nil {
panic(err)
}
case <-ctx.Done():
return
}
}
}
// fillSparkLine continuously fills the SparkLine up to its capacity with
// random values.
func fillSparkLine(ctx context.Context, sl *sparkline.SparkLine, delay time.Duration) {
const max = 100
ticker := time.NewTicker(delay)
defer ticker.Stop()
for {
select {
case <-ticker.C:
var values []int
for i := 0; i < sl.ValueCapacity(); i++ {
values = append(values, int(rand.Int31n(max+1)))
}
if err := sl.Add(values); err != nil {
panic(err)
}
case <-ctx.Done():
return
}
}
}
func main() {
t, err := tcell.New()
if err != nil {
panic(err)
}
defer t.Close()
ctx, cancel := context.WithCancel(context.Background())
green, err := sparkline.New(
sparkline.Label("Green SparkLine", cell.FgColor(cell.ColorNumber(33))),
sparkline.Color(cell.ColorGreen),
)
if err != nil {
panic(err)
}
go playSparkLine(ctx, green, 250*time.Millisecond)
red, err := sparkline.New(
sparkline.Label("Red SparkLine", cell.FgColor(cell.ColorNumber(33))),
sparkline.Color(cell.ColorRed),
)
if err != nil {
panic(err)
}
go playSparkLine(ctx, red, 500*time.Millisecond)
yellow, err := sparkline.New(
sparkline.Label("Yellow SparkLine", cell.FgColor(cell.ColorGreen)),
sparkline.Color(cell.ColorYellow),
)
if err != nil {
panic(err)
}
go fillSparkLine(ctx, yellow, 1*time.Second)
c, err := container.New(
t,
container.Border(linestyle.Light),
container.BorderTitle("PRESS Q TO QUIT"),
container.SplitVertical(
container.Left(
container.SplitHorizontal(
container.Top(),
container.Bottom(
container.Border(linestyle.Light),
container.BorderTitle("SparkLine group"),
container.SplitHorizontal(
container.Top(
container.PlaceWidget(green),
),
container.Bottom(
container.PlaceWidget(red),
),
),
),
),
),
container.Right(
container.Border(linestyle.Light),
container.PlaceWidget(yellow),
),
),
)
if err != nil {
panic(err)
}
quitter := func(k *terminalapi.Keyboard) {
if k.Key == 'q' || k.Key == 'Q' {
cancel()
}
}
if err := termdash.Run(ctx, t, c, termdash.KeyboardSubscriber(quitter)); err != nil {
panic(err)
}
}
|
[
1
] |
package controllers
import (
"yts/models"
"fmt"
"io/ioutil"
"regexp"
"strconv"
"github.com/astaxie/beego"
"time"
)
type TSController struct {
BaseController
}
const (
PHPPATH = "./conf/ts.php"
PHPBHPATH = "./conf/ts.bh.php"
JSPATH = "./conf/ts.js"
)
func (this *TSController) PHP() {
this.Info = &map[string]string{
"fail" : "验证不通过",
"user" : "用户已存在",
"ok" : "成功啦!",
}
token := this.GetString("t")
if len(token) == 0 {
this.ERR("fail")
return
}
path := PHPBHPATH
if len(token) > 0 {
_, err := models.NewTokenOption().Get(token)
if err == nil {
path = PHPPATH
}
}
phptag := ""
if this.GetString("include") == "1" {
phptag = "<?php"
}
replaces := map[string]string{
"token" : token,
"domain" : this.domain(),
"phptag" : phptag,
}
cc, _ := ioutil.ReadFile(path)
for kk, vv := range replaces {
re := regexp.MustCompile(fmt.Sprintf(`{%s}`, kk))
reByte := []byte(fmt.Sprintf("%s", vv))
cc = re.ReplaceAll(cc, reByte)
}
this.Ctx.Output.Body(cc)
}
func (this *TSController) JS() {
this.Info = &map[string]string{
"fail" : "账号或密码不对",
"user" : "用户已存在",
"ok" : "成功啦!",
}
token := this.GetString("t")
if len(token) == 0 {
this.ERR("fail")
return
}
_, err := models.NewTokenOption().Get(token)
if err != nil {
this.ERR("fail")
return
}
kl, _ := this.GetInt("kl")
tskl := "0"
if kl > 0 {
tskl = "1"
}
replaces := map[string]string{
"token" : token,
"domain" : this.domain(),
"tskl" : tskl,
}
cc, _ := ioutil.ReadFile(JSPATH)
for kk, vv := range replaces {
re := regexp.MustCompile(fmt.Sprintf(`{%s}`, kk))
reByte := []byte(fmt.Sprintf("%s", vv))
cc = re.ReplaceAll(cc, reByte)
}
this.Ctx.Output.Body(cc)
}
func (this *TSController) Get() {
tss, _ := models.NewTSOption().Get(this.User.Name, 0, 1)
var hash string
if len(tss) > 0 {
hash = models.MD5(tss[0].Data, "")
}
this.Data["User"] = this.OutUser()
this.Data["Hash"] = hash
this.Data["TS"] = tss
this.Data["PHPUrl"] = fmt.Sprintf("http://%s/ts/php?t=%s", this.domain(), this.Token)
this.Data["JSUrl"] = fmt.Sprintf("http://%s/ts/js?t=%s", this.domain(), this.Token)
this.Data["Script"] = []string{"jquery.jsonview", "app/app", "app/directives/tip", "app/directives/jsonview", "app/services/time", "app/controllers/ts/tsCtrl"}
this.Data["Css"] = []string{"jquery.jsonview"}
this.LayoutSections = make(map[string]string)
this.LayoutSections["Navbar"] = "layout/navbar.html"
this.TplNames = "ts.html"
}
func (this *TSController) AjaxGet() {
hash := this.GetString("hash")
name := this.User.Name
tsOption := models.NewTSOption()
var tss []models.TS
var nowHash string
var i int8
var max int8 = 30
for {
if i > max {
break
}
tss, _ = tsOption.Get(name, 0, 1)
if len(tss) > 0 {
nowHash = models.MD5(tss[0].Data, "")
}
if hash != nowHash {
break
}
time.Sleep(time.Second)
i++
}
if i > max {
this.OK(map[string]interface {}{
"hash" : hash,
})
return
}
this.OK(map[string]interface {}{
"ts" : tss,
"hash" : nowHash,
})
}
func (this *TSController) More() {
tss, _ := models.NewTSOption().Get(this.User.Name, 0, 10)
this.Data["TS"] = tss
this.Data["Script"] = []string{"jquery.jsonview", "app/app", "app/directives/tip", "app/directives/jsonview", "app/services/time", "app/controllers/tsmore/tsmoreCtrl"}
this.Data["Css"] = []string{"jquery.jsonview"}
this.LayoutSections = make(map[string]string)
this.LayoutSections["Navbar"] = "layout/navbar.html"
this.TplNames = "tsmore.html"
}
func (this *TSController) Post() {
token := this.GetString("token")
time := this.GetString("time")
data := this.GetString("data")
te := this.GetString("type")
fmt.Println(data)
ip := this.Ctx.Input.IP()
go func(token, time, data, te, ip string) {
t, err := models.NewTokenOption().Get(token)
if err != nil {
return
}
tstime, _ := strconv.ParseInt(time, 10, 64)
ts := models.NewTS(data, te, ip, tstime, 0)
models.NewTSOption().Insert(t.User.Name, ts)
}(token, time, data, te, ip)
this.Ctx.Output.Header("Access-Control-Allow-Origin", "*")
this.Ctx.WriteString("")
}
func (this *TSController) domain() string {
return fmt.Sprintf("%s:%s", beego.AppConfig.String("Domain"), beego.AppConfig.String("httpport"))
}
|
[
1
] |
package linkedList
type Node struct {
next, prev *Node
Value interface{}
}
func (n *Node) Next() *Node {
return n.next
}
func (n *Node) Prev() *Node {
return n.prev
}
func (n *Node) NewNode(value interface{}) *Node{
var new *Node
new = &Node{Value: value}
return new
}
|
[
1
] |
// Copyright (c) 2020 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package searcher
import (
"github.com/blugelabs/bluge/search"
)
func NewTermRangeSearcher(indexReader search.Reader,
min, max []byte, inclusiveMin, inclusiveMax bool, field string,
boost float64, scorer search.Scorer, compScorer search.CompositeScorer,
options search.SearcherOptions) (search.Searcher, error) {
if min == nil {
min = []byte{}
}
if max != nil && inclusiveMax {
max = append(max, 0)
}
fieldDict, err := indexReader.DictionaryIterator(field, nil, min, max)
if err != nil {
return nil, err
}
defer func() {
if cerr := fieldDict.Close(); cerr != nil && err == nil {
err = cerr
}
}()
var terms []string
tfd, err := fieldDict.Next()
for err == nil && tfd != nil {
terms = append(terms, tfd.Term())
tfd, err = fieldDict.Next()
}
if err != nil {
return nil, err
}
if len(terms) < 1 {
return NewMatchNoneSearcher(indexReader, options)
}
if !inclusiveMin && min != nil && string(min) == terms[0] {
terms = terms[1:]
// check again, as we might have removed only entry
if len(terms) < 1 {
return NewMatchNoneSearcher(indexReader, options)
}
}
return NewMultiTermSearcher(indexReader, terms, field, boost, scorer, compScorer, options, true)
}
|
[
1
] |
package main
import (
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
"time"
)
var (
// default retry configuration
defaultRetryWaitMin = 1 * time.Second
defaultRetryWaitMax = 30 * time.Second
defaultRetryMax = 4
)
type CheckForRetry func(resp *http.Response, err error) (bool, error)
// 創建一個RetryPolicy函數,當回應狀態為500時進行重新請求的機制
func DefaultRetryPolicy(resp *http.Response, err error) (bool, error) {
if err != nil {
return true, err
}
if resp.StatusCode == 0 || resp.StatusCode >= 500 {
return true, nil
}
return false, nil
}
// 創建一個Backoff函數用於做請前之間的延遲
type Backoff func(min, max time.Duration, attempNum int, resp *http.Response) time.Duration
func DefaultBackoff(min, max time.Duration, attempNum int, resp *http.Response) time.Duration {
mult := math.Pow(2, float64(attempNum)*float64(min))
sleep := time.Duration(mult)
if float64(sleep) != mult || sleep > max {
sleep = max
}
return sleep
}
// 宣告一個具有重試請求的http client
type Client struct {
HTTPClient *http.Client
RetryWaitMin time.Duration
RetryWaitMax time.Duration
RetryMax int
CheckForRetry CheckForRetry
Backoff Backoff
}
func NewClient() *Client {
return &Client{
HTTPClient: http.DefaultClient,
RetryWaitMin: defaultRetryWaitMin,
RetryWaitMax: defaultRetryWaitMax,
RetryMax: defaultRetryMax,
CheckForRetry: DefaultRetryPolicy,
Backoff: DefaultBackoff,
}
}
type Request struct {
body io.ReadSeeker
*http.Request
}
func NewRequest(method, url string, body io.ReadSeeker) (*Request, error) {
var rcBody io.ReadCloser
if body != nil {
rcBody = ioutil.NopCloser(body)
}
httpReq, err := http.NewRequest(method, url, rcBody)
if err != nil {
return nil, err
}
return &Request{body, httpReq}, nil
}
// claim a Do method for Client
func (c *Client) Do(req *Request) (*http.Response, error) {
fmt.Printf("DEBUG %s %s\n", req.Method, req.URL)
for {
var code int // HTTP response code
// Always rewind the request body when non-nil
if req.Body != nil {
if _, err := req.body.Seek(0, 0); err != nil {
return nil, fmt.Errorf("failed to seek body: %v\n", err)
}
}
// Attempt the request
resp, err := c.HTTPClient.Do(req.Request)
// Check if we should continue with retries.
checkOk, checkErr := c.CheckForRetry(resp, err)
if err != nil {
fmt.Printf("ERR %s %s request failed: %v\n", req.Method, req.URL, err)
} else {
// Call this here to maintain the behavior of logging all requests, etc
// even if CheckForRetry signals to stop
}
// decide whether to continue
if !checkOk {
if checkErr != nil {
err = checkErr
}
return resp, err
}
// going to retry, consume any response to reuse the connection
if err == nil {
c.drainBody(resp.Body)
}
remain := c.RetryMax - 1 // <--- it's a bug
if remain == 0 {
break
}
wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, 1, resp)
desc := fmt.Sprintf("%s %s", req.Method, req.URL)
if code > 0 {
desc = fmt.Sprintf("%s (status: %d)", desc, code)
}
fmt.Printf("DEBUG %s: retrying in %s (%d left)", desc, wait, remain)
time.Sleep(wait)
}
return nil, fmt.Errorf("%s %s giving up after %d attempts", req.Method, req.URL, c.RetryWaitMax+1)
}
// Try to read the response body so we can reuse this connection
func (c *Client) drainBody(body io.ReadCloser) {
defer body.Close()
_, err := io.Copy(ioutil.Discard, io.LimitReader(body, 10))
if err != nil {
fmt.Println("Error reading response body: %v", err)
}
}
// Get method
func (c *Client) Get(url string) (*http.Response, error) {
req, err := NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
return c.Do(req)
}
// Post method
func (c *Client) Post(url, bodyType string, body io.ReadSeeker) (*http.Response, error) {
req, err := NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", bodyType)
return c.Do(req)
}
func main() {
retryclient := NewClient()
resp, err := retryclient.Get("https://google.com")
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
fmt.Println(string(body))
}
|
[
1
] |
package main
import (
"fmt"
)
func main() {
var n, reminder, count, max int
fmt.Scan(&n)
for n > 0 {
reminder = n % 2
if reminder == 1 {
count++
} else {
count = 0
}
if count > max {
max = count
}
n /= 2
}
fmt.Println(max)
}
|
[
1
] |
package email
const (
hextable = "0123456789ABCDEF"
base64table = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
)
// QuotedPrintableEncode encodes the src data using the quoted-printable content
// transfer encoding specified by RFC 2045. Although RFC 2045 does not require that
// UTF multi-byte characters be kept on the same line of encoded text, this function
// does so.
func QuotedPrintableEncode(src []byte) []byte {
srcLen := len(src)
if srcLen == 0 {
return []byte{}
}
// guestimate max size of dst, trying to avoid reallocation on append
dst := make([]byte, 0, 2*srcLen)
pos := 0
var (
c byte
le int
// 'ending in space'; does the encoded text end in a whitespace ?
eis bool
)
enc := make([]byte, 0, 12) // enough for encoding a 4-byte utf symbol
for i := 0; i < srcLen; i++ {
enc, eis = enc[:0], false
switch c = src[i]; {
case c == '\t', c == ' ':
enc = append(enc, c)
eis = true
case '!' <= c && c <= '~' && c != '=':
enc = append(enc, c)
case c&0xC0 == 0xC0:
// start of utf-8 rune; subsequent bytes always have the top two bits set to 10.
enc = append(make([]byte, 0, 12), '=', hextable[c>>4], hextable[c&0x0f])
for i++; i < srcLen; i++ {
c = src[i]
if c&0xC0 != 0x80 {
// stepped past the end of the rune; step back and break out
i--
break
}
enc = append(enc, '=', hextable[c>>4], hextable[c&0x0f])
}
default:
enc = append(enc, '=', hextable[c>>4], hextable[c&0x0f])
}
le = len(enc)
if pos += le; pos > 75 { // max 76; need room for '='
dst = append(dst, []byte("=\r\n")...)
pos = le
}
dst = append(dst, enc...)
}
if eis {
dst = append(dst, '=')
}
return dst
}
// QEncode encodes the src data using the q-encoding encoded-word syntax specified
// by RFC 2047. Since RFC 2047 requires that each line of a header that includes
// encoded-word text be no longer than 76, this function takes an offset argument
// for the length of the current header line already used up, e.g. by the header
// name, colon and space.
func QEncode(src []byte, offset int) (dst []byte, pos int) {
srcLen := len(src)
if srcLen == 0 {
return []byte{}, offset
}
// guestimate max size of dst, trying to avoid reallocation on append
dst = make([]byte, 0, 12+2*srcLen)
if offset < 1 {
// header line can be max 76, but encoded-words can only be max 75;
// on subsequent lines, if any, the leading space evens things out,
// but if the first line is empty, we need to pretend it has one char.
offset = 1
}
// count in the 10 chars of "=?utf-8?q?", but do not add them yet! There is
// a chance that we cannot fit even one encoded character on the first line,
// but we won't know its length until we encoded it.
pos = 10 + offset
var (
c byte
le int
)
enc := make([]byte, 0, 12) // enough for encoding a 4-byte utf symbol
for i := 0; i < srcLen; i++ {
enc = enc[:0]
switch c = src[i]; {
case c == ' ':
enc = append(enc, '_')
case '!' <= c && c <= '~' && c != '=' && c != '?' && c != '_':
enc = append(enc, c)
case c&0xC0 == 0xC0:
// start of utf-8 rune; subsequent bytes always have the top two bits set to 10.
enc = append(make([]byte, 0, 12), '=', hextable[c>>4], hextable[c&0x0f])
for i++; i < srcLen; i++ {
c = src[i]
if c&0xC0 != 0x80 {
// stepped past the end of the rune; step back and break out
i--
break
}
enc = append(enc, '=', hextable[c>>4], hextable[c&0x0f])
}
default:
enc = append(enc, '=', hextable[c>>4], hextable[c&0x0f])
}
le = len(enc)
if pos += le; pos > 74 { // max 76; need room for '?='
if len(dst) > 0 {
dst = append(dst, []byte("?=\r\n =?utf-8?q?")...)
} else {
// the first encoded char doesn't fit on the first line, so
// start a new line and the encoded-word
dst = append(dst, []byte("\r\n =?utf-8?q?")...)
}
pos = le + 11
} else {
if len(dst) == 0 {
// the first encoded char fits on the first line, so start the encoded-word
dst = append(dst, []byte("=?utf-8?q?")...)
}
}
dst = append(dst, enc...)
}
dst = append(dst, '?', '=')
pos += 2
return
}
// QEncodeIfNeeded q-encodes the src data only if it contains 'unsafe' characters.
func QEncodeIfNeeded(src []byte, offset int) (dst []byte) {
safe := true
for i, sl := 0, len(src); i < sl && safe; i++ {
safe = ' ' <= src[i] && src[i] <= '~'
}
if safe {
return src
}
dst, _ = QEncode(src, offset)
return dst
}
// Base64Encode encodes the src data using the base64 content transfer encoding
// specified by RFC 2045. The result is the equivalent of base64-encoding src using
// StdEncoding from the standard package encoding/base64, then breaking it into
// lines of maximum 76 characters, separated by CRLF. Besides convenience, this
// function also has the advantage of combining the encoding and line-breaking
// steps into a single pass, with a single buffer allocation.
func Base64Encode(src []byte) []byte {
if len(src) == 0 {
return []byte{}
}
dstLen := ((len(src) + 2) / 3 * 4) // base64 encoded length
dstLen += (dstLen - 1) / 76 * 2 // add 2 bytes for each full 76-char line
dst := make([]byte, dstLen)
// fmt.Println(len(src), dstLen)
var (
p [4]int
)
for pos, lpos := 0, 0; len(src) > 0; {
// fmt.Println("step", pos, len(src), len(dst))
switch 76 - lpos {
case 0:
dst[pos], dst[pos+1] = '\r', '\n'
p[0], p[1], p[2], p[3] = pos+2, pos+3, pos+4, pos+5
pos += 6
lpos = 4
case 1:
dst[pos+1], dst[pos+2] = '\r', '\n'
p[0], p[1], p[2], p[3] = pos, pos+3, pos+4, pos+5
pos += 6
lpos = 3
case 2:
dst[pos+2], dst[pos+3] = '\r', '\n'
p[0], p[1], p[2], p[3] = pos, pos+1, pos+4, pos+5
pos += 6
lpos = 2
case 3:
dst[pos+3], dst[pos+4] = '\r', '\n'
p[0], p[1], p[2], p[3] = pos, pos+1, pos+2, pos+5
pos += 6
lpos = 1
default:
p[0], p[1], p[2], p[3] = pos, pos+1, pos+2, pos+3
pos += 4
lpos += 4
}
switch len(src) {
case 1:
dst[p[3]], dst[p[2]] = '=', '='
dst[p[1]] = base64table[(src[0]<<4)&0x3F]
dst[p[0]] = base64table[src[0]>>2]
return dst
case 2:
dst[p[3]] = '='
dst[p[2]] = base64table[(src[1]<<2)&0x3F]
dst[p[1]] = base64table[(src[1]>>4)|(src[0]<<4)&0x3F]
dst[p[0]] = base64table[src[0]>>2]
return dst
default:
dst[p[3]] = base64table[src[2]&0x3F]
dst[p[2]] = base64table[(src[2]>>6)|(src[1]<<2)&0x3F]
dst[p[1]] = base64table[(src[1]>>4)|(src[0]<<4)&0x3F]
dst[p[0]] = base64table[src[0]>>2]
src = src[3:]
}
}
return dst
}
|
[
4
] |
package main
import (
"fmt"
)
func lengthOfLongestSubstring(s string) int {
cache := [128]bool{}
var max, length, tail int
for i := range s {
for cache[s[i]] {
cache[s[tail]] = false
length--
tail++
}
cache[s[i]] = true
length++
if max < length {
max = length
}
}
return max
}
func main() {
input := "bbb"
fmt.Println(lengthOfLongestSubstring(input))
}
|
[
1
] |
package harmony
import (
"math/big"
"strconv"
log "github.com/sirupsen/logrus"
"github.com/trustwallet/blockatlas/pkg/blockatlas"
"github.com/trustwallet/blockatlas/services/assets"
"github.com/trustwallet/golibs/types"
)
const (
lockTime = 604800 // in seconds (7 epochs or 7 days)
)
func (p *Platform) GetActiveValidators() (blockatlas.StakeValidators, error) {
validators, err := assets.GetValidatorsMap(p)
if err != nil {
return nil, err
}
result := make(blockatlas.StakeValidators, 0, len(validators))
for _, v := range validators {
result = append(result, v)
}
return result, nil
}
func (p *Platform) GetValidators() (blockatlas.ValidatorPage, error) {
results := make(blockatlas.ValidatorPage, 0)
validators, err := p.client.GetValidators()
if err != nil {
return results, err
}
for _, v := range validators.Validators {
var apr float64
if apr, err = strconv.ParseFloat(v.Lifetime.Apr, 64); err != nil {
apr = 0
}
results = append(results, normalizeValidator(v, apr))
}
return results, nil
}
func (p *Platform) GetDetails() blockatlas.StakingDetails {
apr := p.GetMaxAPR()
return getDetails(apr)
}
func (p *Platform) GetMaxAPR() float64 {
validators, err := p.client.GetValidators()
if err != nil {
return Annual
}
var max = 0.0
for _, e := range validators.Validators {
var apr float64
if apr, err = strconv.ParseFloat(e.Lifetime.Apr, 64); err != nil {
apr = 0.0
}
if apr > max {
max = apr
}
}
return max
}
func (p *Platform) GetDelegations(address string) (blockatlas.DelegationsPage, error) {
delegations, err := p.client.GetDelegations(address)
if err != nil {
return nil, err
}
validators, err := assets.GetValidatorsMap(p)
if err != nil {
return nil, err
}
return NormalizeDelegations(delegations.List, validators), nil
}
func (p *Platform) UndelegatedBalance(address string) (string, error) {
balance, err := p.client.GetBalance(address)
if err != nil {
return "0", err
}
return balance, nil
}
func NormalizeDelegations(delegations []Delegation, validators blockatlas.ValidatorMap) []blockatlas.Delegation {
results := make([]blockatlas.Delegation, 0)
for _, v := range delegations {
validator, ok := validators[v.ValidatorAddress]
if !ok {
log.WithFields(
log.Fields{"address": v.ValidatorAddress, "platform": "harmony", "delegation": v.DelegatorAddress},
).Error("Validator not found")
continue
}
bigval := new(big.Float)
bigval.SetFloat64(v.Amount)
result := new(big.Int)
bigval.Int(result) // store converted number in result
delegation := blockatlas.Delegation{
Delegator: validator,
Value: result.String(), // v.Amount.String(),
Status: blockatlas.DelegationStatusActive,
}
results = append(results, delegation)
}
return results
}
func getDetails(apr float64) blockatlas.StakingDetails {
return blockatlas.StakingDetails{
Reward: blockatlas.StakingReward{Annual: apr},
MinimumAmount: types.Amount("1000"),
LockTime: lockTime,
Type: blockatlas.DelegationTypeDelegate,
}
}
func normalizeValidator(v Validator, apr float64) (validator blockatlas.Validator) {
return blockatlas.Validator{
Status: v.Active,
ID: v.Info.Address,
Details: getDetails(apr),
}
}
|
[
1
] |
package ui
import (
"encoding/json"
"fmt"
"strings"
)
type jsOption struct {
Dev bool `json:"dev"`
TLS bool `json:"tls"`
ReadyFuncName string `json:"readyFuncName"`
Prefix string `json:"prefix"`
Search string `json:"search"`
Bindings []string `json:"bindings"`
BlurOnClose bool `json:"blurOnClose"`
}
func injectOptions(op *jsOption) string {
if op == nil {
op = &jsOption{}
}
op.Dev = false
op.ReadyFuncName = ReadyFuncName
raw, _ := json.MarshalIndent(op, " ", " ")
text := string(raw)
return mapScript(script, "options = null", fmt.Sprintf("options = %s", text))
}
func mapScript(in, old, new string) string {
if old == new {
return in
}
index := strings.Index(in, old)
if index == -1 {
panic(fmt.Sprintf("mspScript error, old string not found: %s", old))
}
ret := strings.Replace(in, old, new, 1)
index = strings.Index(ret, old)
if index != -1 {
panic(fmt.Sprintf("mspScript error, old string appears many times: %s", old))
}
return ret
}
var script = `var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
(function () {
let options;
// inject server options here
options = null;
if (options === null) {
options = {
dev: true,
tls: false,
readyFuncName: "Vuego",
prefix: "",
search: "?name=api",
bindings: [],
blurOnClose: true
};
}
let dev = options.dev;
class Vuego {
constructor(ws) {
this.ws = ws;
this.resolveAPI = null;
this.lastRefID = 0;
this.beforeReady = null;
this.buildRoot();
this.attach();
this.initContext();
}
buildRoot() {
let root = {};
const ready = new Promise((resolve, reject) => {
this.resolveAPI = resolve;
});
// root[options.readyFuncName] = () => ready;
root[options.readyFuncName] = {};
for (const name of options.bindings) {
let placeholder = function () {
return __awaiter(this, arguments, void 0, function* () {
yield ready;
if (root[name] === placeholder) {
throw new Error("binding is not ready: " + name);
return;
}
return yield root[name](...arguments);
});
};
root[name] = placeholder;
this.copyBind(name, root);
}
this.extendVuego(root[options.readyFuncName]);
this.root = root;
}
extendVuego(Vuego) {
Vuego.self = this;
}
getapi() {
return this.root;
}
replymessage(id, ret, err) {
if (ret === undefined)
ret = null;
if (err === undefined)
err = null;
let msg = {
id: id,
method: "Vuego.ret",
params: {
result: ret,
error: err
}
};
this.ws.send(JSON.stringify(msg));
}
onmessage(e) {
let ws = this.ws;
let msg = JSON.parse(e.data);
if (dev)
console.log("receive: ", JSON.stringify(msg, null, " "));
let root = this.root;
let method = msg.method;
let params;
switch (method) {
case "Vuego.call": {
params = msg.params;
switch (params.name) {
case "eval": {
let ret, err;
try {
ret = eval(params.args[0]);
}
catch (ex) {
err = ex.toString() || "unknown error";
}
this.replymessage(msg.id, ret, err);
break;
}
}
break;
}
case "Vuego.ret": {
let { name, seq, result, error } = msg.params;
if (error) {
root[name]["errors"].get(seq)(error);
}
else {
root[name]["results"].get(seq)(result);
}
root[name]["errors"].delete(seq);
root[name]["results"].delete(seq);
break;
}
case "Vuego.callback": {
let { name, seq, args } = msg.params;
let ret, err;
try {
ret = root[name]["callbacks"].get(seq)(...args);
}
catch (ex) {
err = ex.toString() || "unknown error";
}
this.replymessage(msg.id, ret, err);
break;
}
case "Vuego.closeCallback": {
let { name, seq } = msg.params;
root[name]["callbacks"].delete(seq);
break;
}
case "Vuego.bind": {
params = msg.params;
if (Array.isArray(params.name))
for (const name of params.name)
this.bind(name);
else
this.bind(params.name);
break;
}
case "Vuego.ready": {
if (this.beforeReady !== null) {
this.beforeReady();
}
if (this.resolveAPI != null) {
this.resolveAPI();
}
break;
}
}
}
attach() {
let ws = this.ws;
ws.onmessage = this.onmessage.bind(this);
ws.onopen = e => {
if (options.blurOnClose)
window.document.body.style.opacity = 1;
};
ws.onerror = e => {
console.log("ws error at", new Date().toLocaleString(), e);
};
ws.onclose = e => {
if (options.blurOnClose)
window.document.body.style.opacity = 0.382;
console.log("ws close at", new Date().toLocaleString(), e);
};
}
bind(name) {
let root = this.root;
const bindingName = name;
root[bindingName] = (...args) => __awaiter(this, void 0, void 0, function* () {
const me = root[bindingName];
for (let i = 0; i < args.length; i++) {
// support javascript functions as arguments
if (typeof args[i] == "function") {
let callbacks = me["callbacks"];
if (!callbacks) {
callbacks = new Map();
me["callbacks"] = callbacks;
}
const seq = (callbacks["lastSeq"] || 0) + 1;
callbacks["lastSeq"] = seq;
callbacks.set(seq, args[i]); // root[bindingName].callbacks[callbackSeq] = func value
args[i] = {
bindingName: bindingName,
seq: seq
};
}
else if (args[i] instanceof this.contextType) {
const seq = ++this.lastRefID;
// js: rewrite input Context().seq = seq
args[i].seq = seq;
// go: will create Context object from seq and put it in jsclient.refs
args[i] = {
seq: seq
};
}
}
// prepare (errors, results, lastSeq) on binding function
let errors = me["errors"];
let results = me["results"];
if (!results) {
results = new Map();
me["results"] = results;
}
if (!errors) {
errors = new Map();
me["errors"] = errors;
}
const seq = (me["lastSeq"] || 0) + 1;
me["lastSeq"] = seq;
const promise = new Promise((resolve, reject) => {
results.set(seq, resolve);
errors.set(seq, reject);
});
// call go
let callMsg = {
method: "Vuego.call",
params: {
name: bindingName,
seq,
args
}
};
// binding call phrase 1
this.ws.send(JSON.stringify(callMsg));
return promise;
});
this.copyBind(bindingName, root);
}
copyBind(bindingName, root) {
// copy root["a.b"] to root.a.b
if (bindingName.indexOf(".") !== -1) {
const sp = bindingName.split(".");
const [parts, name] = [sp.slice(0, sp.length - 1), sp[sp.length - 1]];
let target = root;
for (const part of parts) {
target[part] = target[part] || {};
target = target[part];
}
target[name] = root[bindingName];
}
}
initContext() {
let $this = this;
// Context class
function Context() {
this.seq = -1; // this will be rewrite as refID
this.cancel = () => {
let msg = {
method: "Vuego.refCall",
params: {
seq: this.seq
}
};
$this.ws.send(JSON.stringify(msg));
};
this.getThis = () => {
return $this;
};
}
this.contextType = Context;
const TODO = new Context();
const Backgroud = new Context();
// context package
this.root.context = {
withCancel() {
let ctx = new Context();
return [ctx, ctx.cancel];
},
background() {
return Backgroud;
},
todo() {
return TODO;
}
};
}
}
function getparam(name, search) {
search = search === undefined ? window.location.search : search;
let pair = search
.slice(1)
.split("&")
.map(one => one.split("="))
.filter(one => one[0] == name)
.slice(-1)[0];
if (pair === undefined)
return;
return pair[1] || "";
}
function main() {
let host = window.location.host;
let ws = new WebSocket((options.tls ? "wss://" : "ws://") + host + options.prefix + "/vuego");
let vuego = new Vuego(ws);
let api = vuego.getapi();
let exportAPI = () => {
let name = getparam("name", options.search);
let win = window;
if (name === undefined || name === "window")
Object.assign(win, api);
else if (name)
win[name] = api;
};
vuego.beforeReady = exportAPI;
exportAPI();
return api;
}
return main();
})();
`
|
[
1
] |
package mocks
import "github.com/umputun/secrets/backend/app/store"
import "github.com/stretchr/testify/mock"
type Engine struct {
mock.Mock
}
func (_m *Engine) Save(msg *store.Message) error {
ret := _m.Called(msg)
var r0 error
if rf, ok := ret.Get(0).(func(*store.Message) error); ok {
r0 = rf(msg)
} else {
r0 = ret.Error(0)
}
return r0
}
func (_m *Engine) Load(key string) (*store.Message, error) {
ret := _m.Called(key)
var r0 *store.Message
if rf, ok := ret.Get(0).(func(string) *store.Message); ok {
r0 = rf(key)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*store.Message)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(key)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *Engine) IncErr(key string) (int, error) {
ret := _m.Called(key)
var r0 int
if rf, ok := ret.Get(0).(func(string) int); ok {
r0 = rf(key)
} else {
r0 = ret.Get(0).(int)
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(key)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *Engine) Remove(key string) error {
ret := _m.Called(key)
var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok {
r0 = rf(key)
} else {
r0 = ret.Error(0)
}
return r0
}
|
[
4
] |
package main
import (
"fmt"
"io/ioutil"
"log"
"strconv"
"strings"
"github.com/bakerolls/adventofcode-2018/day-11/grid"
)
func main() {
b, err := ioutil.ReadFile("../input.txt")
if err != nil {
log.Fatal(err)
}
serial, err := strconv.Atoi(strings.TrimSpace(string(b)))
if err != nil {
log.Fatal(err)
}
width, height := 300, 300
var max, maxX, maxY, maxSize int
g := grid.New(width, height, serial)
for s := 0; s < width; s++ {
x, y, m := g.Max(s)
if m > max {
max, maxX, maxY, maxSize = m, x, y, s
}
fmt.Printf("%d,%d,%d,%d,%d\n", s, maxX, maxY, maxSize, max)
}
fmt.Printf("%d,%d,%d\n", maxX, maxY, maxSize)
}
|
[
1
] |
// Code generated by mockery v1.0.0. DO NOT EDIT.
package mocks
import mock "github.com/stretchr/testify/mock"
// Queue is an autogenerated mock type for the Queue type
type Queue struct {
mock.Mock
}
// Len provides a mock function with given fields:
func (_m *Queue) Len() int {
ret := _m.Called()
var r0 int
if rf, ok := ret.Get(0).(func() int); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(int)
}
return r0
}
// Pop provides a mock function with given fields:
func (_m *Queue) Pop() interface{} {
ret := _m.Called()
var r0 interface{}
if rf, ok := ret.Get(0).(func() interface{}); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(interface{})
}
}
return r0
}
// Push provides a mock function with given fields: v
func (_m *Queue) Push(v interface{}) interface{} {
ret := _m.Called(v)
var r0 interface{}
if rf, ok := ret.Get(0).(func(interface{}) interface{}); ok {
r0 = rf(v)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(interface{})
}
}
return r0
}
|
[
4
] |
package queue
import "fmt"
type SPSlot struct {
BaseSlot
_val interface{}
}
type SPSlotArr []SPSlot
func NewSPSlotArr(cap uint32) *SPSlotArr {
arr := make(SPSlotArr, cap)
for i := uint32(0); i < cap-1; i++ {
fmt.Printf("arr[%v]:%p, next:%p\n", i, &arr[i], &arr[i+1])
arr[i].setNext(&arr[i+1])
}
return &arr
}
func NewSPSlotRing(cap uint32) *SPSlotArr {
arr := NewSPSlotArr(cap)
fmt.Printf("arr[%v]:%p, next:%p\n", cap-1, &(*arr)[cap-1], &(*arr)[0])
(*arr)[cap-1].setNext(&(*arr)[0])
return arr
}
func (s *SPSlotArr) HeadSlot() *SPSlot {
return &(*s)[0]
}
func (s *SPSlotArr) LastSlot() *SPSlot {
return &(*s)[len(*s)-1]
}
func (s *SPSlotArr) Len() uint32 {
return uint32(len(*s))
}
func (s *SPSlotArr) Get(i uint32) Slot {
return &((*s)[i])
}
|
[
1
] |
package internal
import (
"fmt"
"math"
"math/big"
"reflect"
"strconv"
"github.com/tada/catch"
"github.com/tada/dgo/dgo"
)
type (
// intVal is an int64 that implements the dgo.Value interface
intVal int64
defaultIntegerType int
integerType struct {
min dgo.Integer
max dgo.Integer
inclusive bool
}
)
// DefaultIntegerType is the unconstrained Int64 type
const DefaultIntegerType = defaultIntegerType(dgo.TiInteger)
var reflectIntegerType = reflect.TypeOf(int64(0))
// Integer64Type returns a dgo.Integer64Type that is limited to the inclusive range given by min and max
// If inclusive is true, then the range has an inclusive end.
func Integer64Type(min, max int64, inclusive bool) dgo.IntegerType {
if min == max {
if !inclusive {
panic(catch.Error(`non inclusive range cannot have equal min and max`))
}
return intVal(min).Type().(dgo.IntegerType)
}
if max < min {
t := max
max = min
min = t
}
var minV dgo.Integer
var maxV dgo.Integer
if min != math.MinInt64 {
minV = intVal(min)
}
if max != math.MaxInt64 {
maxV = intVal(max)
}
if minV == nil && maxV == nil {
return DefaultIntegerType
}
return &integerType{min: minV, max: maxV, inclusive: inclusive}
}
// IntegerType returns a dgo.Integer64Type that is limited to the inclusive range given by min and max
// If inclusive is true, then the range has an inclusive end. The IntegerType.ReflectType() returns
// the *big.Int type.
func IntegerType(min, max dgo.Integer, inclusive bool) dgo.IntegerType {
if min != nil && max != nil {
cmp, _ := min.CompareTo(max)
if cmp == 0 {
if !inclusive {
panic(catch.Error(`non inclusive range cannot have equal min and max`))
}
return min.(dgo.IntegerType)
}
if cmp > 0 {
t := max
max = min
min = t
}
} else if min == nil && max == nil {
return DefaultIntegerType
}
_, useBig := min.(dgo.BigInt)
if !useBig {
_, useBig = max.(dgo.BigInt)
}
if useBig {
return &bigIntType{integerType{min: min, max: max, inclusive: inclusive}}
}
return &integerType{min: min, max: max, inclusive: inclusive}
}
func (t *integerType) Assignable(other dgo.Type) bool {
switch ot := other.(type) {
case defaultIntegerType:
return false
case dgo.IntegerType:
if t.min != nil {
om := ot.Min()
if om == nil {
return false
}
cmp, _ := t.min.CompareTo(om)
if cmp > 0 {
return false
}
}
if mm := t.max; mm != nil {
om := ot.Max()
if om == nil {
return false
}
if t.Inclusive() {
mm = mm.Inc()
}
if ot.Inclusive() {
om = om.Inc()
}
cmp, _ := mm.CompareTo(om)
if cmp < 0 {
return false
}
}
return true
}
return CheckAssignableTo(nil, other, t)
}
func (t *integerType) Equals(other interface{}) bool {
ot, ok := other.(dgo.IntegerType)
return ok && t.inclusive == ot.Inclusive() && equals(nil, t.min, ot.Min()) && equals(nil, t.max, ot.Max())
}
func (t *integerType) HashCode() dgo.Hash {
h := dgo.Hash(dgo.TiIntegerRange)
if t.min != nil {
h = h*31 + t.min.HashCode()
}
if t.max != nil {
h = h*31 + t.max.HashCode()
}
if t.inclusive {
h *= 3
}
return h
}
func (t *integerType) Inclusive() bool {
return t.inclusive
}
func (t *integerType) Instance(value interface{}) bool {
yes := false
switch ov := value.(type) {
case dgo.Integer:
yes = t.isInstance(ov)
case int:
yes = t.isInstance(intVal(ov))
case uint:
yes = t.isInstance(uintVal(ov))
case uint64:
yes = t.isInstance(uintVal(ov))
case *big.Int:
yes = t.isInstance(&bigIntVal{ov})
default:
var iv int64
iv, yes = ToInt(value)
yes = yes && t.isInstance(intVal(iv))
}
return yes
}
func (t *integerType) isInstance(i dgo.Integer) bool {
if t.min != nil {
cmp, ok := t.min.CompareTo(i)
if !ok || cmp > 0 {
return false
}
}
if t.max != nil {
cmp, ok := t.max.CompareTo(i)
if !ok || cmp < 0 || cmp == 0 && !t.inclusive {
return false
}
}
return true
}
func (t *integerType) Max() dgo.Integer {
return t.max
}
func (t *integerType) Min() dgo.Integer {
return t.min
}
func (t *integerType) New(arg dgo.Value) dgo.Value {
return newInt(t, arg)
}
func (t *integerType) ReflectType() reflect.Type {
return reflectIntegerType
}
func (t *integerType) String() string {
return TypeString(t)
}
func (t *integerType) Type() dgo.Type {
return MetaType(t)
}
func (t *integerType) TypeIdentifier() dgo.TypeIdentifier {
return dgo.TiIntegerRange
}
func (t defaultIntegerType) Assignable(other dgo.Type) bool {
_, ok := other.(dgo.IntegerType)
return ok || CheckAssignableTo(nil, other, t)
}
func (t defaultIntegerType) Equals(other interface{}) bool {
_, ok := other.(defaultIntegerType)
return ok
}
func (t defaultIntegerType) HashCode() dgo.Hash {
return dgo.Hash(dgo.TiInteger)
}
func (t defaultIntegerType) Instance(value interface{}) bool {
switch value.(type) {
case dgo.Integer, *big.Int, int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8:
return true
}
return false
}
func (t defaultIntegerType) Inclusive() bool {
return true
}
func (t defaultIntegerType) Max() dgo.Integer {
return nil
}
func (t defaultIntegerType) Min() dgo.Integer {
return nil
}
func (t defaultIntegerType) New(arg dgo.Value) dgo.Value {
return newInt(t, arg)
}
func (t defaultIntegerType) ReflectType() reflect.Type {
return reflectIntegerType
}
func (t defaultIntegerType) String() string {
return TypeString(t)
}
func (t defaultIntegerType) Type() dgo.Type {
return MetaType(t)
}
func (t defaultIntegerType) TypeIdentifier() dgo.TypeIdentifier {
return dgo.TypeIdentifier(t)
}
// IntEnumType returns a Type that represents any of the given integers
func IntEnumType(ints []int) dgo.Type {
switch len(ints) {
case 0:
return ¬Type{DefaultAnyType}
case 1:
return intVal(ints[0]).Type()
}
ts := make([]dgo.Value, len(ints))
for i := range ints {
ts[i] = intVal(ints[i]).Type()
}
return &anyOfType{slice: ts}
}
// Int64 returns the dgo.Integer for the given int64
func Int64(v int64) dgo.Integer {
return intVal(v)
}
func (v intVal) Assignable(other dgo.Type) bool {
return v.Equals(other) || CheckAssignableTo(nil, other, v)
}
func (v intVal) compare64(fv int64) int {
r := 0
switch {
case int64(v) > fv:
r = 1
case int64(v) < fv:
r = -1
}
return r
}
func (v intVal) compareBig(ov *big.Int) int {
r := 0
if ov.IsInt64() {
r = v.compare64(ov.Int64())
} else {
r = -ov.Sign()
}
return r
}
func (v intVal) compareU64(ov uint64) int {
r := 0
if ov > math.MaxInt64 {
r = -1
} else {
r = v.compare64(int64(ov))
}
return r
}
func (v intVal) CompareTo(other interface{}) (int, bool) {
r := 0
ok := true
switch ov := other.(type) {
case nil, nilValue:
r = 1
case intVal:
r = v.compare64(int64(ov))
case int:
r = v.compare64(int64(ov))
case int64:
r = v.compare64(ov)
case uintVal:
r = v.compareU64(uint64(ov))
case uint:
r = v.compareU64(uint64(ov))
case uint64:
r = v.compareU64(ov)
case *bigIntVal:
r = v.compareBig(ov.Int)
case *big.Int:
r = v.compareBig(ov)
case dgo.Float:
r, ok = v.Float().CompareTo(ov)
default: // all other int types
var iv int64
iv, ok = ToInt(other)
if ok {
r = v.compare64(iv)
}
}
return r, ok
}
func (v intVal) Dec() dgo.Integer {
return v - 1
}
func (v intVal) Equals(other interface{}) bool {
i, ok := ToInt(other)
return ok && int64(v) == i
}
func (v intVal) Float() dgo.Float {
return floatVal(v)
}
func (v intVal) Format(s fmt.State, format rune) {
doFormat(int64(v), s, format)
}
func (v intVal) Generic() dgo.Type {
return DefaultIntegerType
}
func (v intVal) GoInt() int64 {
return int64(v)
}
func (v intVal) HashCode() dgo.Hash {
return dgo.Hash(v ^ (v >> 32))
}
func (v intVal) Inc() dgo.Integer {
return v + 1
}
func (v intVal) Inclusive() bool {
return true
}
func (v intVal) Instance(value interface{}) bool {
return v.Equals(value)
}
func (v intVal) Integer() dgo.Integer {
return v
}
func (v intVal) intPointer(kind reflect.Kind) reflect.Value {
var p reflect.Value
switch kind {
case reflect.Int:
gv := int(v)
p = reflect.ValueOf(&gv)
case reflect.Int8:
gv := int8(v)
p = reflect.ValueOf(&gv)
case reflect.Int16:
gv := int16(v)
p = reflect.ValueOf(&gv)
case reflect.Int32:
gv := int32(v)
p = reflect.ValueOf(&gv)
case reflect.Uint:
gv := uint(v)
p = reflect.ValueOf(&gv)
case reflect.Uint8:
gv := uint8(v)
p = reflect.ValueOf(&gv)
case reflect.Uint16:
gv := uint16(v)
p = reflect.ValueOf(&gv)
case reflect.Uint32:
gv := uint32(v)
p = reflect.ValueOf(&gv)
case reflect.Uint64:
gv := uint64(v)
p = reflect.ValueOf(&gv)
default:
gv := int64(v)
p = reflect.ValueOf(&gv)
}
return p
}
func (v intVal) Max() dgo.Integer {
return v
}
func (v intVal) Min() dgo.Integer {
return v
}
func (v intVal) New(arg dgo.Value) dgo.Value {
return newInt(v, arg)
}
func (v intVal) ReflectTo(value reflect.Value) {
switch value.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
value.SetInt(int64(v))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
value.SetUint(uint64(v))
case reflect.Ptr:
value.Set(v.intPointer(value.Type().Elem().Kind()))
default:
value.Set(reflect.ValueOf(int64(v)))
}
}
func (v intVal) ReflectType() reflect.Type {
return reflectIntegerType
}
func (v intVal) String() string {
return TypeString(v)
}
func (v intVal) ToBigInt() *big.Int {
return big.NewInt(int64(v))
}
func (v intVal) ToBigFloat() *big.Float {
return new(big.Float).SetInt64(int64(v))
}
func (v intVal) ToFloat() (float64, bool) {
return float64(v), true
}
func (v intVal) ToInt() (int64, bool) {
return int64(v), true
}
func (v intVal) ToUint() (uint64, bool) {
if v >= 0 {
return uint64(v), true
}
return 0, false
}
func (v intVal) Type() dgo.Type {
return v
}
func (v intVal) TypeIdentifier() dgo.TypeIdentifier {
return dgo.TiIntegerExact
}
// ToInt returns the (value as an int64, true) if it fits into that data type, (0, false) if not
func ToInt(value interface{}) (int64, bool) {
ok := true
v := int64(0)
switch value := value.(type) {
case intVal:
v = int64(value)
case int:
v = int64(value)
case int64:
v = value
case int32:
v = int64(value)
case int16:
v = int64(value)
case int8:
v = int64(value)
case uint:
if value <= math.MaxInt64 {
v = int64(value)
} else {
ok = false
}
case uint64:
if value <= math.MaxInt64 {
v = int64(value)
} else {
ok = false
}
case uintVal:
if value <= math.MaxInt64 {
v = int64(value)
} else {
ok = false
}
case uint32:
v = int64(value)
case uint16:
v = int64(value)
case uint8:
v = int64(value)
case *big.Int:
if value.IsInt64() {
v = value.Int64()
} else {
ok = false
}
case dgo.BigInt:
v, ok = value.ToInt()
default:
ok = false
}
return v, ok
}
var radixType = IntEnumType([]int{0, 2, 8, 10, 16})
func newInt(t dgo.Type, arg dgo.Value) (i dgo.Integer) {
if args, ok := arg.(dgo.Arguments); ok {
args.AssertSize(`int`, 1, 2)
if args.Len() == 2 {
i = intFromConvertible(args.Get(0), int(args.Arg(`int`, 1, radixType).(dgo.Integer).GoInt()))
} else {
i = intFromConvertible(args.Get(0), 0)
}
} else {
i = intFromConvertible(arg, 0)
}
if !t.Instance(i) {
panic(IllegalAssignment(t, i))
}
return i
}
func intFromConvertible(from dgo.Value, radix int) dgo.Integer {
switch from := from.(type) {
case dgo.Number:
return from.Integer()
case dgo.Boolean:
if from.GoBool() {
return intVal(1)
}
return intVal(0)
case dgo.String:
s := from.GoString()
i, err := strconv.ParseInt(s, radix, 64)
if err == nil {
return Int64(i)
}
numErr, ok := err.(*strconv.NumError)
if ok && numErr.Err == strconv.ErrRange {
var bi *big.Int
if bi, ok = new(big.Int).SetString(s, radix); ok {
if bi.IsUint64() {
return uintVal(bi.Uint64())
}
return BigInt(bi)
}
}
}
panic(catch.Error(`the value '%v' cannot be converted to an int`, from))
}
|
[
1
] |
package stream_test
import (
"stream"
"testing"
)
func TestBasicStream( t *testing.T ) {
const control, data, error = "Control", "Data", "Error"
var stream stream.Stream
stream.SetControlMessage("Control")
stream.SetDataMessage("Data")
stream.SetErrorMessage("Error")
if stream.ControlMessage != control || stream.DataMessage != data || stream.ErrorMessage != error {
t.Errorf( "Stream object failed basic creation." )
}
}
|
[
1
] |
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "github.com/user-service/pkg/domain"
)
// UserRepo is an autogenerated mock type for the UserRepo type
type UserRepo struct {
mock.Mock
}
// CreateUser provides a mock function with given fields: _a0
func (_m *UserRepo) CreateUser(_a0 *models.User) (*models.User, error) {
ret := _m.Called(_a0)
var r0 *models.User
if rf, ok := ret.Get(0).(func(*models.User) *models.User); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.User)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*models.User) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DeleteUser provides a mock function with given fields: _a0
func (_m *UserRepo) DeleteUser(_a0 uint64) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func(uint64) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}
// GetUser provides a mock function with given fields: _a0
func (_m *UserRepo) GetUser(_a0 uint64) (*models.User, error) {
ret := _m.Called(_a0)
var r0 *models.User
if rf, ok := ret.Get(0).(func(uint64) *models.User); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.User)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(uint64) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetUserByEmail provides a mock function with given fields: _a0
func (_m *UserRepo) GetUserByEmail(_a0 string) (*models.User, error) {
ret := _m.Called(_a0)
var r0 *models.User
if rf, ok := ret.Get(0).(func(string) *models.User); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.User)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// UpdateUser provides a mock function with given fields: _a0
func (_m *UserRepo) UpdateUser(_a0 *models.User) (*models.User, error) {
ret := _m.Called(_a0)
var r0 *models.User
if rf, ok := ret.Get(0).(func(*models.User) *models.User); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.User)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*models.User) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
|
[
4
] |
package main
import (
"fmt"
"github.com/williamjnzhang/cmtparser/ast"
"github.com/williamjnzhang/cmtparser/parser"
"go/token"
"strings"
)
var (
functypesrc = "./commentFuncType.src"
strutypesrc = "./commentStructType.src"
)
var a struct {
b string
}
func main() {
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, strutypesrc, nil, parser.ParseComments|parser.AllErrors)
if err != nil {
fmt.Println(err)
return
}
var v visitor
ast.Walk(v, f)
}
type visitor int
func (v visitor) Visit(n ast.Node) ast.Visitor {
fmt.Printf("%s%T\n", strings.Repeat("\t", int(v)), n)
switch t := n.(type) {
// case *ast.CommentGroup:
// if t.List != nil {
// for _, c := range t.List {
// fmt.Printf("cmt: %s", c.Text)
// }
// }
case *ast.Ident:
if t.Comment != nil {
for _, c := range t.Comment.List {
fmt.Printf("ident cmt: %s", c.Text)
}
}
}
return v + 1
}
|
[
7
] |
package fan_out_fan_in
import (
"fmt"
"math/rand"
"time"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func exampleOfBadSearch() {
max := 50000000
done := make(chan interface{})
defer close(done)
t := time.Now()
fmt.Println("Primes:")
numbers := randIntStream(done, max)
for prime := range take(done, primeFinder(done, numbers), 10) {
fmt.Printf("\t%d\n", prime)
}
fmt.Printf("Search took: %v\n", time.Since(t))
// Output:
// Primes:
// 22466923
// ...
// 18044753
// Search took: 22.458129071s
}
func randI(max int) int {
return rand.Intn(max)
}
func randIntStream(done <-chan interface{}, max int) <-chan int {
out := make(chan int)
go func() {
defer close(out)
for {
select {
case <-done:
return
case out <- randI(max):
}
}
}()
return out
}
func primeFinder(done <-chan interface{}, in <-chan int) <-chan int {
out := make(chan int)
go func() {
defer close(out)
for {
select {
case <-done:
return
case n := <-in:
if isPrime(n) {
out <- n
}
}
}
}()
return out
}
func isPrime(n int) bool {
if n < 4 {
return true
}
for i := n - 1; i > 1; i-- {
if n%i == 0 {
return false
}
}
return true
}
func take(done <-chan interface{}, in <-chan int, count int) <-chan int {
out := make(chan int, count)
go func() {
unique := make(map[int]bool, count)
defer close(out)
bound := 0
for {
select {
case <-done:
return
case n := <-in:
if !unique[n] {
unique[n] = true
out <- n
bound++
if bound == count {
return
}
}
}
}
}()
return out
}
|
[
1
] |
package gosmtpmx
import (
"errors"
"net"
"reflect"
"testing"
"flag"
)
var integrate = flag.Bool("integrate", false, "Perform actual DNS lookup")
var testMX MX = MX{
host: "",
port: "25",
auth: nil,
}
func NewTestClient(r Resolver, s Sender, mx MX) *client {
return &client{
Resolver: r,
Sender: s,
mx: mx,
}
}
type testSender struct {
}
func (s testSender) SendMail(addr, from string, to []string, msg []byte) error {
return nil
}
type testSenderRecoder struct {
TestAddrs []string
Errors []error
From string
To []string
Port string
Msg []byte
}
func (s *testSenderRecoder) SendMail(addr, from string, to []string, msg []byte) error {
s.TestAddrs = append(s.TestAddrs, addr)
s.From = from
s.To = to
s.Msg = msg
switch addr {
case "127.0.0.3:10025":
err := errors.New("[dummy] Connection refused")
s.Errors = append(s.Errors, err)
return err
}
return nil
}
type noSuchDomainResolver struct {
defaultResolver
}
func (r noSuchDomainResolver) ResolvMX(host string) ([]*net.MX, error) {
return nil, &net.DNSError{
Name: host,
Err: "no such host",
}
}
func (r noSuchDomainResolver) ResolvIP(host string) ([]net.IP, error) {
return []net.IP{
net.ParseIP("127.0.0.10"),
}, nil
}
type testNormalResolver struct {
defaultResolver
}
func (r testNormalResolver) ResolvMX(host string) ([]*net.MX, error) {
switch host {
case "example.org":
return []*net.MX{
&net.MX{Host: "mx1.example.org", Pref: 20},
&net.MX{Host: "mx0.example.org", Pref: 10},
}, nil
case "example.net":
return []*net.MX{
&net.MX{Host: "mx1.example.org", Pref: 10},
&net.MX{Host: "mx0.example.org", Pref: 20},
}, nil
case "example.info":
return []*net.MX{
&net.MX{Host: "mx0.example.info", Pref: 10},
&net.MX{Host: "mx1.example.info", Pref: 20},
}, nil
}
return []*net.MX{
&net.MX{Host: "mx0.example.org", Pref: 20},
&net.MX{Host: "nosuchmx0.example.org", Pref: 10},
}, nil
}
func (r testNormalResolver) ResolvIP(host string) ([]net.IP, error) {
switch host {
case "mx0.example.org":
return []net.IP{
net.ParseIP("127.0.0.1"),
net.ParseIP("127.0.0.2"),
}, nil
case "mx1.example.org":
return []net.IP{
net.ParseIP("127.0.0.3"),
net.ParseIP("127.0.0.4"),
}, nil
case "mx0.example.info":
return []net.IP{
net.ParseIP("127.0.0.3"),
net.ParseIP("127.0.0.3"),
}, nil
}
return nil, errors.New("no such host")
}
type testFailureResolver struct {
defaultResolver
}
func (r testFailureResolver) ResolvMX(host string) ([]*net.MX, error) {
return nil, &net.DNSError{
Name: host,
Err: "Unknown Error",
}
}
func TestLookup_MX(t *testing.T) {
c := NewTestClient(testNormalResolver{}, testSender{}, testMX)
expected := map[uint16]*[]*net.MX{
10: &[]*net.MX{
&net.MX{
Host: "mx0.example.org",
Pref: 10,
},
},
20: &[]*net.MX{
&net.MX{
Host: "mx1.example.org",
Pref: 20,
},
},
}
list, err := c.LookupMX("example.org")
if err != nil {
t.Fatal(err)
}
if len(list) != len(expected) {
t.Fatalf("Expect lengh to be %d, but %d", len(expected), len(list))
}
if !reflect.DeepEqual(expected[0], list[0]) {
t.Fatalf("Expect %v, but %v", expected, list)
}
}
func TestLookupMX_ImplicitMX(t *testing.T) {
c := NewTestClient(noSuchDomainResolver{}, testSender{}, testMX)
expected := map[uint16]*[]*net.MX{
0: &[]*net.MX{
&net.MX{
Host: "nosuchdomain.example.org",
Pref: 0,
},
},
}
list, err := c.LookupMX("nosuchdomain.example.org")
if err != nil {
t.Fatal(err)
}
if len(list) != len(expected) {
t.Fatal("Expect lengh to be %d, but %d", len(expected), len(list))
}
if !reflect.DeepEqual(expected[0], list[0]) {
t.Fatalf("Expect %v, but %v", expected[0], list[0])
}
}
func TestLookupMX_Failure(t *testing.T) {
c := NewTestClient(testFailureResolver{}, testSender{}, testMX)
list, err := c.LookupMX("example.org")
if err == nil {
t.Fatalf("Expect to be error, but nothing happen and returned '%v'", list)
}
}
func TestLookupIP(t *testing.T) {
c := NewTestClient(testNormalResolver{}, testSender{}, testMX)
expected := []string{"127.0.0.1", "127.0.0.2"}
ip, err := c.LookupIP("mx0.example.org")
if err != nil {
t.Fatalf("Expect not to be error, but '%v'", err)
}
if !reflect.DeepEqual(ip, expected) {
t.Fatalf("Expect '%v', but '%v'", expected, ip)
}
}
/*
Error situations
1. LookupMX fails
return immediately
2. LookupIP fails
3. SendMail fails
try to next host if available, or try to next MX if available
return error if no alternative found
*/
func TestDeliver_MultipleMX(t *testing.T) {
mx := MX{
host: "example.org",
port: "10025",
}
s := &testSenderRecoder{}
c := NewTestClient(testNormalResolver{}, s, mx)
if err := c.Deliver("[email protected]", []string{"[email protected]", "[email protected]"}, []byte("ABC")); err != nil {
t.Fatalf("Expect not to be error, but '%v'", err)
}
if s.TestAddrs[0] != "127.0.0.1:10025" {
t.Fatalf("Expect to try to connect to 1st preference, but connect to '%v'", s.TestAddrs[0])
}
}
func TestDeliver_ImplicitMX(t *testing.T) {
mx := MX{
host: "example.org",
port: "10025",
}
s := &testSenderRecoder{}
c := NewTestClient(noSuchDomainResolver{}, s, mx)
if err := c.Deliver("[email protected]", []string{"[email protected]"}, []byte("ABC")); err != nil {
t.Fatalf("Expect not to be error, but '%v'", err)
}
if s.TestAddrs[0] != "127.0.0.10:10025" {
t.Fatalf("Expect to try to implicit MX, but connect to '%v'", s.TestAddrs[0])
}
}
func TestDeliver_LookupIP(t *testing.T) {
mx := MX{
host: "nosuchmx.example.org",
port: "10025",
}
s := &testSenderRecoder{}
c := NewTestClient(testNormalResolver{}, s, mx)
if err := c.Deliver("[email protected]", []string{"[email protected]"}, []byte("ABC")); err != nil {
t.Fatalf("Expect not to be error, but '%v'", err)
}
if s.TestAddrs[0] != "127.0.0.1:10025" {
t.Fatalf("Expect to try to 2nd MX, but connect to '%v'", s.TestAddrs[0])
}
}
func TestDeliver_SendMail(t *testing.T) {
mx := MX{
host: "example.net",
port: "10025",
}
s := &testSenderRecoder{}
c := NewTestClient(testNormalResolver{}, s, mx)
if err := c.Deliver("[email protected]", []string{"[email protected]"}, []byte("ABC")); err != nil {
t.Fatalf("Expect not to be error, but '%v'", err)
}
if len(s.Errors) != 1 {
t.Fatalf("Expect to be error, but no error found")
}
if s.Errors[0].Error() != "[dummy] Connection refused" {
t.Fatalf("Expect to be connection refused, but '%v'", s.Errors[0])
}
if len(s.TestAddrs) != 2 {
t.Fatalf("Expect to try 2 times, but '%v'", len(s.TestAddrs))
}
if s.TestAddrs[1] != "127.0.0.4:10025" {
t.Fatalf("Expect to connect to 2nd host, but '%v'", s.TestAddrs[1])
}
}
func TestDeliver_NoAlternative(t *testing.T) {
mx := MX{
host: "example.info",
port: "10025",
}
s := &testSenderRecoder{}
c := NewTestClient(testNormalResolver{}, s, mx)
err := c.Deliver("[email protected]", []string{"[email protected]"}, []byte("ABC"))
if err == nil {
t.Fatalf("Expect to be error, but no error found")
}
if err.Error() != "No alternative found" {
t.Fatalf("Expect to be 'no alternative found', but '%v'", err)
}
expectedTestAddrs := []string{
"127.0.0.3:10025",
"127.0.0.3:10025",
}
if !reflect.DeepEqual(s.TestAddrs, expectedTestAddrs) {
t.Fatalf("Expect to try to connect to 2 nodes, but '%v'", s.TestAddrs)
}
}
func Test_New(t *testing.T) {
if !*integrate {
t.Skip("Actual DNS lookups are disabled. Add -integrate to perform lookups.")
}
mx := MX{
host: "gosmtpmxtest.aws.tknetworks.org",
port: "10025",
}
c := New(mx)
{
// all attempts failed
err := c.Deliver("[email protected]", []string{"[email protected]", "[email protected]"}, []byte("ABC"))
if err.Error() != "No alternative found" {
t.Fatal(err)
}
}
/*
{
err := c.Deliver("[email protected]", []string{"[email protected]", "[email protected]"}, []byte("ABC"))
if err != nil {
t.Fatal(err)
}
}
*/
}
|
[
7
] |
package scanner
import (
"github.com/arnodel/golua/token"
)
func scanToken(l *Scanner) stateFn {
for {
switch c := l.next(); {
case c == '-':
if l.next() == '-' {
return scanComment
}
l.backup()
l.emit(token.SgMinus)
case c == '"' || c == '\'':
return scanShortString(c)
case isDec(c):
l.backup()
return scanNumber
case c == '[':
n := l.next()
if n == '[' || n == '=' {
l.backup()
return scanLongString
}
l.backup()
l.emit(token.SgOpenSquareBkt)
case isAlpha(c):
return scanIdent
case isSpace(c):
l.ignore()
default:
switch c {
case ';', '(', ')', ',', '|', '&', '+', '*', '%', '^', '#', ']', '{', '}':
case '=':
l.accept("=")
case ':':
l.accept(":")
case '.':
if accept(l, isDec, -1) > 0 {
return scanExp(l, isDec, "eE", token.NUMDEC)
}
if l.accept(".") {
l.accept(".")
}
case '<':
l.accept("=<")
case '>':
l.accept("=>")
case '~':
l.accept("=")
case '/':
l.accept("/")
case -1:
l.emit(token.EOF)
return nil
default:
return l.errorf(token.INVALID, "illegal character")
}
l.emit(sgType[string(l.lit())])
}
return scanToken
}
}
func scanComment(l *Scanner) stateFn {
c := l.next()
if c == '[' {
return scanLongComment
}
l.backup()
return scanShortComment
}
func scanShortComment(l *Scanner) stateFn {
for {
switch c := l.next(); c {
case '\n':
l.acceptRune('\r')
l.ignore()
return scanToken
case -1:
l.ignore()
l.emit(token.EOF)
return nil
}
}
}
func scanLongComment(l *Scanner) stateFn {
return scanLong(true)
}
func scanLong(comment bool) stateFn {
return func(l *Scanner) stateFn {
level := 0
OpeningLoop:
for {
switch c := l.next(); c {
case '=':
level++
case '[':
break OpeningLoop
default:
if comment {
l.ignore()
return scanShortComment
}
return l.errorf(token.INVALID, "expected opening long bracket")
}
}
closeLevel := -1
// -1 means we haven't starting closing a bracket
// 0 means we have processed the first ']'
// n > 0 means we have processed ']' + n*'='
for {
switch c := l.next(); c {
case ']':
if closeLevel == level {
if comment {
l.ignore()
} else {
l.emit(token.LONGSTRING)
}
return scanToken
}
closeLevel = 0
case '=':
if closeLevel >= 0 {
closeLevel++
}
case -1:
return l.errorf(token.UNFINISHED, "illegal <eof> in long bracket of level %d", level)
default:
closeLevel = -1
}
}
}
}
func scanShortString(q rune) stateFn {
return func(l *Scanner) stateFn {
for {
switch c := l.next(); c {
case q:
l.emit(token.STRING)
return scanToken
case '\\':
switch c := l.next(); {
case c == 'x':
if accept(l, isHex, 2) != 2 {
return l.errorf(token.INVALID, `\x must be followed by 2 hex digits`)
}
case isDec(c):
accept(l, isDec, 2)
case c == 'u':
if l.next() != '{' {
return l.errorf(token.INVALID, `\u must be followed by '{'`)
}
if accept(l, isHex, -1) == 0 {
return l.errorf(token.INVALID, "at least 1 hex digit required")
}
if l.next() != '}' {
return l.errorf(token.INVALID, "missing '}'")
}
case c == 'z':
accept(l, isSpace, -1)
default:
switch c {
case '\n':
// Nothing to do
case 'a', 'b', 'f', 'n', 'r', 't', 'v', 'z', '"', '\'', '\\':
break
default:
return l.errorf(token.INVALID, "illegal escaped character")
}
}
case '\n', '\r':
return l.errorf(token.INVALID, "illegal new line in string literal")
case -1:
return l.errorf(token.INVALID, "illegal <eof> in string literal")
}
}
}
}
// For scanning numbers e.g. in files
func scanNumberPrefix(l *Scanner) stateFn {
accept(l, isSpace, -1)
l.accept("+-")
return scanNumber
}
func scanNumber(l *Scanner) stateFn {
isDigit := isDec
exp := "eE"
tp := token.NUMDEC
leading0 := l.accept("0")
dcount := 0
if leading0 && l.accept("xX") {
isDigit = isHex
exp = "pP"
tp = token.NUMHEX
} else if leading0 {
dcount++
}
dcount += accept(l, isDigit, -1)
if l.accept(".") {
dcount += accept(l, isDigit, -1)
}
if dcount == 0 {
return l.errorf(token.INVALID, "no digits in mantissa")
}
return scanExp(l, isDigit, exp, tp)
}
func scanExp(l *Scanner, isDigit func(rune) bool, exp string, tp token.Type) stateFn {
if l.accept(exp) {
l.accept("+-")
if accept(l, isDec, -1) == 0 {
return l.errorf(token.INVALID, "digit required after exponent")
}
}
l.emit(tp)
if isAlpha(l.peek()) {
l.next()
return l.errorf(token.INVALID, "illegal character following number")
}
return scanToken
}
func scanLongString(l *Scanner) stateFn {
return scanLong(false)
}
var kwType = map[string]token.Type{
"break": token.KwBreak,
"goto": token.KwGoto,
"do": token.KwDo,
"while": token.KwWhile,
"end": token.KwEnd,
"repeat": token.KwRepeat,
"until": token.KwUntil,
"then": token.KwThen,
"else": token.KwElse,
"elseif": token.KwElseIf,
"if": token.KwIf,
"for": token.KwFor,
"in": token.KwIn,
"function": token.KwFunction,
"local": token.KwLocal,
"and": token.KwAnd,
"or": token.KwOr,
"not": token.KwNot,
"nil": token.KwNil,
"true": token.KwTrue,
"false": token.KwFalse,
"return": token.KwReturn,
}
var sgType = map[string]token.Type{
"-": token.SgMinus,
"+": token.SgPlus,
"*": token.SgStar,
"/": token.SgSlash,
"//": token.SgSlashSlash,
"%": token.SgPct,
"|": token.SgPipe,
"&": token.SgAmpersand,
"^": token.SgHat,
">>": token.SgShiftRight,
"<<": token.SgShiftLeft,
"..": token.SgConcat,
"==": token.SgEqual,
"~=": token.SgNotEqual,
"<": token.SgLess,
"<=": token.SgLessEqual,
">": token.SgGreater,
">=": token.SgGreaterEqual,
"...": token.SgEtc,
"[": token.SgOpenSquareBkt,
"]": token.SgCloseSquareBkt,
"(": token.SgOpenBkt,
")": token.SgCloseBkt,
"{": token.SgOpenBrace,
"}": token.SgCloseBrace,
";": token.SgSemicolon,
",": token.SgComma,
".": token.SgDot,
":": token.SgColon,
"::": token.SgDoubleColon,
"=": token.SgAssign,
"#": token.SgHash,
"~": token.SgTilde,
}
func scanIdent(l *Scanner) stateFn {
accept(l, isAlnum, -1)
tp, ok := kwType[string(l.lit())]
if !ok {
tp = token.IDENT
}
l.emit(tp)
return scanToken
}
func isDec(x rune) bool {
return '0' <= x && x <= '9'
}
func isAlpha(x rune) bool {
return x >= 'a' && x <= 'z' || x >= 'A' && x <= 'Z' || x == '_'
}
func isAlnum(x rune) bool {
return isDec(x) || isAlpha(x)
}
func isHex(x rune) bool {
return isDec(x) || 'a' <= x && x <= 'f' || 'A' <= x && x <= 'F'
}
func isSpace(x rune) bool {
return x == ' ' || x == '\n' || x == '\r' || x == '\t' || x == '\v' || x == '\f'
}
type runePredicate func(rune) bool
func accept(l *Scanner, p runePredicate, max int) int {
for i := 0; i != max; i++ {
if !p(l.next()) {
l.backup()
return i
}
}
return max
}
|
[
1
] |
package sftp
import (
"fmt"
"io"
"net"
"os"
"strings"
proxyproto "github.com/pires/go-proxyproto"
"golang.org/x/crypto/ssh"
)
// Logger is an abstraction for how logging will be performed by the server. It matches
// a subset of the Clever/kayvee-go library.
type Logger interface {
InfoD(title string, meta map[string]interface{})
ErrorD(title string, meta map[string]interface{})
}
// meta is a shorthand for map[string]interface{} to make logger calls more concise.
type meta map[string]interface{}
// Alerter is the function signature for an optional alerting function to be called in error cases.
type Alerter func(title string, metadata map[string]interface{})
// DriverGenerator is a function that creates an SFTP ServerDriver if the login request
// is valid.
type DriverGenerator func(LoginRequest) ServerDriver
// LoginRequest is the metadata associated with a login request that is passed to the
// driverGenerator function in order for it to approve/deny the request.
type LoginRequest struct {
Username string
Password string
PublicKey string
RemoteAddr net.Addr
}
// ManagedServer is our term for the SFTP server.
type ManagedServer struct {
driverGenerator func(LoginRequest) ServerDriver
lg Logger
alertFn Alerter
}
// NewManagedServer creates a new ManagedServer which conditionally serves requests based
// on the output of driverGenerator.
func NewManagedServer(driverGenerator DriverGenerator, lg Logger, alertFn Alerter) *ManagedServer {
return &ManagedServer{
driverGenerator: driverGenerator,
lg: lg,
alertFn: alertFn,
}
}
func (m ManagedServer) errorAndAlert(title string, metadata map[string]interface{}) {
if m.alertFn != nil {
m.alertFn(title, metadata)
}
m.lg.ErrorD(title, metadata)
}
// Start actually starts the server and begins fielding requests.
func (m ManagedServer) Start(port int, rawPrivateKeys [][]byte, ciphers, macs []string) {
m.lg.InfoD("starting-server", meta{
"port": port,
"ciphers": ciphers,
"macs": macs,
})
privateKeys := []ssh.Signer{}
for i, rawKey := range rawPrivateKeys {
privateKey, err := ssh.ParsePrivateKey(rawKey)
if err != nil {
m.errorAndAlert("private-key-parse", meta{"index": i, "error": err.Error()})
os.Exit(1)
}
privateKeys = append(privateKeys, privateKey)
}
listener, err := net.Listen("tcp", fmt.Sprintf("0.0.0.0:%v", port))
proxyList := proxyproto.Listener{Listener: listener}
if err != nil {
m.errorAndAlert("listen-fail", meta{
"msg": "failed to open socket",
"error": err.Error(),
"port": port})
}
m.lg.InfoD("listening", meta{"address": proxyList.Addr().String()})
for {
newConn, err := proxyList.Accept()
if err != nil {
m.errorAndAlert("listener-accept-fail", meta{"error": err.Error()})
os.Exit(1)
}
go func(conn net.Conn) {
var driver ServerDriver
config := &ssh.ServerConfig{
Config: ssh.Config{
Ciphers: ciphers,
MACs: macs,
},
PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
driver = m.driverGenerator(LoginRequest{
Username: c.User(),
Password: string(pass),
PublicKey: "",
RemoteAddr: c.RemoteAddr(),
})
if driver == nil {
return nil, fmt.Errorf("password rejected for %q", c.User())
}
return nil, nil
},
PublicKeyCallback: func(c ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
driver = m.driverGenerator(LoginRequest{
Username: c.User(),
Password: "",
PublicKey: strings.TrimSpace(string(ssh.MarshalAuthorizedKey(key))),
RemoteAddr: c.RemoteAddr(),
})
if driver == nil {
return nil, fmt.Errorf("password rejected for %q", c.User())
}
return nil, nil
},
}
for _, privateKey := range privateKeys {
config.AddHostKey(privateKey)
}
_, newChan, requestChan, err := ssh.NewServerConn(conn, config)
if err != nil {
if err != io.EOF {
m.errorAndAlert("handshake-failure", meta{"error": err.Error()})
}
return
}
go ssh.DiscardRequests(requestChan)
for newChannelRequest := range newChan {
if newChannelRequest.ChannelType() != "session" {
newChannelRequest.Reject(ssh.UnknownChannelType, "unknown channel type")
m.errorAndAlert("unknown-channel-type", meta{"type": newChannelRequest.ChannelType()})
continue
}
channel, requests, err := newChannelRequest.Accept()
if err != nil {
if err != io.EOF {
m.errorAndAlert("channel-accept-failure", meta{
"err": err.Error(),
"type": newChannelRequest.ChannelType()})
}
return
}
go func(in <-chan *ssh.Request) {
for req := range in {
ok := false
switch req.Type {
case "subsystem":
if len(req.Payload) >= 4 {
// we reject all SSH requests that are not SFTP
if string(req.Payload[4:]) == "sftp" {
ok = true
}
}
}
req.Reply(ok, nil)
}
}(requests)
server, err := NewServer(channel, driver)
if err != nil {
m.errorAndAlert("server-creation-err", meta{"err": err.Error()})
return
}
if err := server.Serve(); err != nil {
channel.Close()
}
}
}(newConn)
}
}
|
[
7
] |
package main
import (
"fmt"
"log"
"time"
)
type Node struct {
value int
next *Node
}
func (n *Node) AddNode(value int) *Node {
newNode := Node{value, nil}
iter := n
for iter.next != nil {
iter = iter.next
}
iter.next = &newNode
return &newNode
}
func (n *Node) PrintNode(max int) {
iter := n
iteration := 0
for iter != nil && iteration < max {
fmt.Println(iter.value)
iter = iter.next
iteration++
}
}
func timeTrack(start time.Time, name string) {
elapsed := time.Since(start)
log.Printf("%s took %s", name, elapsed)
}
|
[
1
] |
// Code generated by mockery v1.0.0. DO NOT EDIT.
package automock
import mock "github.com/stretchr/testify/mock"
import v1alpha1 "github.com/kyma-project/helm-broker/pkg/apis/addons/v1alpha1"
// clusterAddonsCfgUpdater is an autogenerated mock type for the clusterAddonsCfgUpdater type
type clusterAddonsCfgUpdater struct {
mock.Mock
}
// AddRepos provides a mock function with given fields: name, url
func (_m *clusterAddonsCfgUpdater) AddRepos(name string, url []string) (*v1alpha1.ClusterAddonsConfiguration, error) {
ret := _m.Called(name, url)
var r0 *v1alpha1.ClusterAddonsConfiguration
if rf, ok := ret.Get(0).(func(string, []string) *v1alpha1.ClusterAddonsConfiguration); ok {
r0 = rf(name, url)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1alpha1.ClusterAddonsConfiguration)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string, []string) error); ok {
r1 = rf(name, url)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// RemoveRepos provides a mock function with given fields: name, urls
func (_m *clusterAddonsCfgUpdater) RemoveRepos(name string, urls []string) (*v1alpha1.ClusterAddonsConfiguration, error) {
ret := _m.Called(name, urls)
var r0 *v1alpha1.ClusterAddonsConfiguration
if rf, ok := ret.Get(0).(func(string, []string) *v1alpha1.ClusterAddonsConfiguration); ok {
r0 = rf(name, urls)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1alpha1.ClusterAddonsConfiguration)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string, []string) error); ok {
r1 = rf(name, urls)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Resync provides a mock function with given fields: name
func (_m *clusterAddonsCfgUpdater) Resync(name string) (*v1alpha1.ClusterAddonsConfiguration, error) {
ret := _m.Called(name)
var r0 *v1alpha1.ClusterAddonsConfiguration
if rf, ok := ret.Get(0).(func(string) *v1alpha1.ClusterAddonsConfiguration); ok {
r0 = rf(name)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1alpha1.ClusterAddonsConfiguration)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(name)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
|
[
4
] |
package mpb_test
import (
"bytes"
"context"
"io/ioutil"
"math/rand"
"sync"
"testing"
"time"
"github.com/mikewiacek/mpb"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func TestBarCount(t *testing.T) {
p := mpb.New(mpb.WithOutput(ioutil.Discard))
var wg sync.WaitGroup
wg.Add(1)
b := p.AddBar(100)
go func() {
for i := 0; i < 100; i++ {
if i == 33 {
wg.Done()
}
b.Increment()
time.Sleep(randomDuration(100 * time.Millisecond))
}
}()
wg.Wait()
count := p.BarCount()
if count != 1 {
t.Errorf("BarCount want: %q, got: %q\n", 1, count)
}
b.Abort(true)
p.Wait()
}
func TestBarAbort(t *testing.T) {
p := mpb.New(mpb.WithOutput(ioutil.Discard))
var wg sync.WaitGroup
wg.Add(1)
bars := make([]*mpb.Bar, 3)
for i := 0; i < 3; i++ {
b := p.AddBar(100)
bars[i] = b
go func(n int) {
for i := 0; !b.Completed(); i++ {
if n == 0 && i >= 33 {
b.Abort(true)
wg.Done()
}
b.Increment()
time.Sleep(randomDuration(100 * time.Millisecond))
}
}(i)
}
wg.Wait()
count := p.BarCount()
if count != 2 {
t.Errorf("BarCount want: %q, got: %q\n", 2, count)
}
bars[1].Abort(true)
bars[2].Abort(true)
p.Wait()
}
func TestWithContext(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
shutdown := make(chan struct{})
p := mpb.NewWithContext(ctx,
mpb.WithOutput(ioutil.Discard),
mpb.WithRefreshRate(50*time.Millisecond),
mpb.WithShutdownNotifier(shutdown),
)
total := 10000
numBars := 3
bars := make([]*mpb.Bar, 0, numBars)
for i := 0; i < numBars; i++ {
bar := p.AddBar(int64(total))
bars = append(bars, bar)
go func() {
for !bar.Completed() {
bar.Increment()
time.Sleep(randomDuration(100 * time.Millisecond))
}
}()
}
time.Sleep(50 * time.Millisecond)
cancel()
p.Wait()
select {
case <-shutdown:
case <-time.After(100 * time.Millisecond):
t.Error("Progress didn't stop")
}
}
func getLastLine(bb []byte) []byte {
split := bytes.Split(bb, []byte("\n"))
return split[len(split)-2]
}
func randomDuration(max time.Duration) time.Duration {
return time.Duration(rand.Intn(10)+1) * max / 10
}
|
[
1
] |
package main
import "fmt"
func aExer2() {
// 問題1
// 惜しかった
l := []int{100, 300, 23, 11, 23, 2, 4, 6, 4}
// 自分のこたえ
// hikaku := 1000
// 答え:宣言だけしとく
var min int
for i, val := range l {
if i == 0 {
min = val
continue
}
if min >= val {
min = val
}
}
fmt.Println(min)
// 問題2
map2 := map[string]int{
"apple": 200,
"banana": 300,
"orange": 150,
"grapes": 80,
"papaya": 500,
"kiwi": 90,
}
sum := 0
for _, val := range map2 {
sum += val
}
fmt.Println(sum)
}
|
[
1
] |
package lexrp
import (
"strings"
"unicode/utf8"
)
const (
eof = -1
)
type Lexer struct {
input string
start int
pos int
width int // current rune width
state stateFn
items chan Item // we will send this there
}
// Start lexer
func Lex(input string) (*Lexer, chan Item) {
l := &Lexer{
input: input,
state: lexBase,
items: make(chan Item, 2),
}
go l.run()
return l, l.items
}
func (l *Lexer) emit(t ItemType) {
l.items <- Item{t, l.input[l.start:l.pos], l.start}
l.start = l.pos // next
}
func (l *Lexer) next() (r rune) {
if l.pos >= len(l.input) {
l.width = 0
return eof
}
r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
l.pos += l.width
return r
}
// ignore a portion of the text
func (l *Lexer) ignore() {
l.start = l.pos
}
// Go back the last readed rune by its width
// ideally the width should be recalculated to the previous utf size
func (l *Lexer) backup() {
l.pos -= l.width
}
// Peek current rune from the string without moving cursor
func (l *Lexer) peek() rune {
r, _ := utf8.DecodeRuneInString(l.input[l.pos:])
return r
}
// Utils for lexer?!
//
// Accept any character on the string
// not consuming it and returning false
func (l *Lexer) acceptAny(any string) bool {
if strings.IndexRune(any, l.next()) >= 0 {
return true
}
l.backup()
return false
}
// move the cursor forward while the char matches
// any on the string
func (l *Lexer) acceptAnyRun(any string) {
for strings.IndexRune(any, l.next()) >= 0 {
}
l.backup()
}
// Run will move through states and close channel after
func (l *Lexer) run() {
for state := l.state; state != nil; {
state = state(l)
}
close(l.items)
}
// will fetch next item from channel or will process
// other character state
//
func (l *Lexer) NextItem() Item {
item := <-l.items
return item
/*for {
select {
case item := <-l.items:
return item
default:
l.state = l.state(l)
}
}*/
}
///////////////////////
/// State handlers
// STATES I GUESS
func lexBase(l *Lexer) stateFn {
for {
ch := l.next()
switch ch {
case ' ', '\t', '\r', '\f':
l.ignore()
continue // next
case '(':
l.emit(ItemLParen)
return lexBase
case ')':
l.emit(ItemRParen)
return lexBase
case eof:
return nil
case '"':
return lexDoubleQuotes
case '\'':
return lexSingleQuotes
case '=', '*':
l.ignore()
return lexBase
default:
if isIdentStart(ch) {
l.backup()
return lexIdent
}
}
l.emit(ItemUnknown)
return lexBase
}
}
// Atom
func lexIdent(l *Lexer) stateFn {
for isIdentMiddle(l.next()) {
}
l.backup() // Back the last rune
l.emit(ItemIdent)
// Whats next??!
return lexBase
}
func lexSingleQuotes(l *Lexer) stateFn {
vi := strings.Index(l.input[l.pos:], "'")
if vi == -1 {
l.emit(ItemError)
return nil
}
l.pos += vi + 1 // include '
l.emit(ItemSingleQuote)
return lexBase
}
func lexDoubleQuotes(l *Lexer) stateFn {
vi := strings.Index(l.input[l.pos:], "\"")
if vi == -1 {
l.emit(ItemError)
return nil
}
l.pos += vi + 1 // Include "
l.emit(ItemDoubleQuote)
return lexBase // back
}
func isSpace(ch rune) bool {
return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\f'
}
func isIdentStart(ch rune) bool {
return (ch >= 'a' && ch <= 'z' ||
ch >= 'A' && ch <= 'Z' ||
ch == '_')
}
func isDigit(ch rune) bool {
return ch >= '0' && ch <= '9'
}
func isIdentMiddle(ch rune) bool {
return isIdentStart(ch) || isDigit(ch)
}
type stateFn func(*Lexer) stateFn
// State functions
|
[
1
] |
package datadog
import (
"compress/zlib"
"context"
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stripe/veneur/samplers"
"github.com/stripe/veneur/ssf"
)
// DDMetricsRequest represents the body of the POST request
// for sending metrics data to Datadog
// Eventually we'll want to define this symmetrically.
type DDMetricsRequest struct {
Series []DDMetric
}
func TestDatadogRate(t *testing.T) {
ddSink := DatadogMetricSink{
hostname: "somehostname",
tags: []string{"a:b", "c:d"},
interval: 10,
}
metrics := []samplers.InterMetric{{
Name: "foo.bar.baz",
Timestamp: time.Now().Unix(),
Value: float64(10),
Tags: []string{"gorch:frobble", "x:e"},
Type: samplers.CounterMetric,
}}
ddMetrics := ddSink.finalizeMetrics(metrics)
assert.Equal(t, "rate", ddMetrics[0].MetricType, "Metric type should be rate")
assert.Equal(t, float64(1.0), ddMetrics[0].Value[0][1], "Metric rate wasnt computed correctly")
}
func TestServerTags(t *testing.T) {
ddSink := DatadogMetricSink{
hostname: "somehostname",
tags: []string{"a:b", "c:d"},
interval: 10,
}
metrics := []samplers.InterMetric{{
Name: "foo.bar.baz",
Timestamp: time.Now().Unix(),
Value: float64(10),
Tags: []string{"gorch:frobble", "x:e"},
Type: samplers.CounterMetric,
}}
ddMetrics := ddSink.finalizeMetrics(metrics)
assert.Equal(t, "somehostname", ddMetrics[0].Hostname, "Metric hostname uses argument")
assert.Contains(t, ddMetrics[0].Tags, "a:b", "Tags should contain server tags")
}
func TestHostMagicTag(t *testing.T) {
ddSink := DatadogMetricSink{
hostname: "badhostname",
tags: []string{"a:b", "c:d"},
}
metrics := []samplers.InterMetric{{
Name: "foo.bar.baz",
Timestamp: time.Now().Unix(),
Value: float64(10),
Tags: []string{"gorch:frobble", "host:abc123", "x:e"},
Type: samplers.CounterMetric,
}}
ddMetrics := ddSink.finalizeMetrics(metrics)
assert.Equal(t, "abc123", ddMetrics[0].Hostname, "Metric hostname should be from tag")
assert.NotContains(t, ddMetrics[0].Tags, "host:abc123", "Host tag should be removed")
assert.Contains(t, ddMetrics[0].Tags, "x:e", "Last tag is still around")
}
func TestDeviceMagicTag(t *testing.T) {
ddSink := DatadogMetricSink{
hostname: "badhostname",
tags: []string{"a:b", "c:d"},
}
metrics := []samplers.InterMetric{{
Name: "foo.bar.baz",
Timestamp: time.Now().Unix(),
Value: float64(10),
Tags: []string{"gorch:frobble", "device:abc123", "x:e"},
Type: samplers.CounterMetric,
}}
ddMetrics := ddSink.finalizeMetrics(metrics)
assert.Equal(t, "abc123", ddMetrics[0].DeviceName, "Metric devicename should be from tag")
assert.NotContains(t, ddMetrics[0].Tags, "device:abc123", "Host tag should be removed")
assert.Contains(t, ddMetrics[0].Tags, "x:e", "Last tag is still around")
}
func TestNewDatadogSpanSinkConfig(t *testing.T) {
// test the variables that have been renamed
ddSink, err := NewDatadogSpanSink("http://example.com", 100, &http.Client{}, nil, logrus.New())
if err != nil {
t.Fatal(err)
}
err = ddSink.Start(nil)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, "http://example.com", ddSink.traceAddress)
}
type DatadogRoundTripper struct {
Endpoint string
Contains string
GotCalled bool
ThingReceived bool
}
func (rt *DatadogRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
rec := httptest.NewRecorder()
if strings.HasPrefix(req.URL.Path, rt.Endpoint) {
body, _ := ioutil.ReadAll(req.Body)
defer req.Body.Close()
if strings.Contains(string(body), rt.Contains) {
rt.ThingReceived = true
}
rec.Code = http.StatusOK
rt.GotCalled = true
}
return rec.Result(), nil
}
func TestDatadogFlushSpans(t *testing.T) {
// test the variables that have been renamed
transport := &DatadogRoundTripper{Endpoint: "/v0.3/traces", Contains: "farts-srv"}
ddSink, err := NewDatadogSpanSink("http://example.com", 100, &http.Client{Transport: transport}, nil, logrus.New())
assert.NoError(t, err)
start := time.Now()
end := start.Add(2 * time.Second)
testSpan := &ssf.SSFSpan{
TraceId: 1,
ParentId: 1,
Id: 2,
StartTimestamp: int64(start.UnixNano()),
EndTimestamp: int64(end.UnixNano()),
Error: false,
Service: "farts-srv",
Tags: map[string]string{
"baz": "qux",
},
Indicator: false,
Name: "farting farty farts",
}
err = ddSink.Ingest(testSpan)
assert.NoError(t, err)
ddSink.Flush()
assert.Equal(t, true, transport.GotCalled, "Did not call spans endpoint")
}
type result struct {
received bool
contained bool
}
func ddTestServer(t *testing.T, endpoint, contains string) (*httptest.Server, chan result) {
received := make(chan result)
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
res := result{}
bstream := r.Body
if r.Header.Get("Content-Encoding") == "deflate" {
bstream, _ = zlib.NewReader(r.Body)
}
body, _ := ioutil.ReadAll(bstream)
defer bstream.Close()
if strings.HasPrefix(r.URL.Path, endpoint) {
res.received = true
res.contained = strings.Contains(string(body), contains)
}
w.WriteHeader(200)
received <- res
})
return httptest.NewServer(handler), received
}
func TestDatadogMetricRouting(t *testing.T) {
// test the variables that have been renamed
client := &http.Client{Transport: &http.Transport{DisableCompression: true}}
tests := []struct {
metric samplers.InterMetric
expect bool
}{
{
samplers.InterMetric{
Name: "to.anybody.in.particular",
Timestamp: time.Now().Unix(),
Value: float64(10),
Tags: []string{"gorch:frobble", "x:e"},
Type: samplers.CounterMetric,
},
true,
},
{
samplers.InterMetric{
Name: "to.datadog",
Timestamp: time.Now().Unix(),
Value: float64(10),
Tags: []string{"gorch:frobble", "x:e"},
Type: samplers.CounterMetric,
Sinks: samplers.RouteInformation{"datadog": struct{}{}},
},
true,
},
{
samplers.InterMetric{
Name: "to.kafka.only",
Timestamp: time.Now().Unix(),
Value: float64(10),
Tags: []string{"gorch:frobble", "x:e"},
Type: samplers.CounterMetric,
Sinks: samplers.RouteInformation{"kafka": struct{}{}},
},
false,
},
}
for _, elt := range tests {
test := elt
t.Run(test.metric.Name, func(t *testing.T) {
t.Parallel()
srv, rcved := ddTestServer(t, "/api/v1/series", test.metric.Name)
ddSink := DatadogMetricSink{
DDHostname: srv.URL,
HTTPClient: client,
flushMaxPerBody: 15,
log: logrus.New(),
tags: []string{"a:b", "c:d"},
interval: 10,
}
done := make(chan struct{})
go func() {
result, ok := <-rcved
if test.expect {
// TODO: negative case
assert.True(t, result.contained, "Should have sent the metric")
} else {
if ok {
assert.False(t, result.contained, "Should definitely not have sent the metric!")
}
}
close(done)
}()
err := ddSink.Flush(context.TODO(), []samplers.InterMetric{test.metric})
require.NoError(t, err)
close(rcved)
<-done
})
}
}
|
[
4
] |
package sessionmgr
import (
// "strconv"
"time"
)
var Memory_type_sqlite int = 1
var Memory_type_memory int = 2
var Memory_type_mysql int = 3
type CSessionMgr interface {
ConnectDb()
DisConnectDb()
CreateSession(timeout uint64, userdata string) (sessionId string, err error)
DestroySession(sessionId string) error
SessionIsVaild(sessionId string) (isVaild bool, err error)
ResetLosevaildTime(sessionId string) error
DeleteSessionAfterLosevaild(nowTimeStamp uint64) error
GetUserdata(sessionId string) (userdata string, err error)
}
func getNowTimeStamp() uint64 {
t := time.Now()
return uint64(t.UTC().UnixNano())
}
var endChannel chan (bool)
func New(memoryType int) CSessionMgr {
var mgr CSessionMgr
switch memoryType {
case Memory_type_mysql:
mgr = NewSessionMgrMysql()
mgr.ConnectDb()
}
endChannel = make(chan (bool))
go func() {
for {
select {
case <-endChannel:
return
default:
mgr.DeleteSessionAfterLosevaild(getNowTimeStamp())
time.Sleep(time.Millisecond * 1000)
}
}
}()
return mgr
}
func Destroy(mgr CSessionMgr) {
if mgr != nil {
mgr.DisConnectDb()
close(endChannel)
}
}
|
[
7
] |
package rcs
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"reflect"
"strings"
"testing"
)
func TestMemoryUnmapped(t *testing.T) {
var buf bytes.Buffer
log.SetFlags(0)
log.SetOutput(&buf)
defer func() {
log.SetFlags(log.LstdFlags)
log.SetOutput(os.Stderr)
}()
mem := NewMemory(1, 0x10000)
mem.Write(0x1234, 0xaa)
mem.Read(0x1234)
msg := []string{
"(!) mem: unmapped write, bank $0, addr $1234, val $aa",
"(!) mem: unmapped read, bank $0, addr $1234",
"",
}
have := buf.String()
want := strings.Join(msg, "\n")
if have != want {
t.Errorf("\n have: \n%v \n want: \n%v", have, want)
}
}
func TestMemoryRAM(t *testing.T) {
in := []uint8{10, 11, 12, 13, 14}
ram := make([]uint8, 5, 5)
out := make([]uint8, 5, 5)
mem := NewMemory(1, 15)
mem.MapRAM(10, ram)
for i := 0; i < 5; i++ {
mem.Write(i+10, in[i])
}
for i := 0; i < 5; i++ {
out[i] = mem.Read(i + 10)
}
if !reflect.DeepEqual(out, in) {
t.Errorf("\n have: %v \n want: %v", out, in)
}
}
func TestMemoryROM(t *testing.T) {
var buf bytes.Buffer
log.SetFlags(0)
log.SetOutput(&buf)
defer func() {
log.SetFlags(log.LstdFlags)
log.SetOutput(os.Stderr)
}()
rom := []uint8{10, 11, 12, 13, 14}
out := make([]uint8, 5, 5)
mem := NewMemory(1, 15)
mem.MapROM(10, rom)
for i := 0; i < 5; i++ {
mem.Write(i+10, 0xff)
out[i] = mem.Read(i + 10)
}
if !reflect.DeepEqual(out, rom) {
t.Errorf("\n have: %v \n want: %v", out, rom)
}
msg := []string{
"(!) mem: unmapped write, bank $0, addr $a, val $ff",
"(!) mem: unmapped write, bank $0, addr $b, val $ff",
"(!) mem: unmapped write, bank $0, addr $c, val $ff",
"(!) mem: unmapped write, bank $0, addr $d, val $ff",
"(!) mem: unmapped write, bank $0, addr $e, val $ff",
"",
}
have := buf.String()
want := strings.Join(msg, "\n")
if have != want {
t.Errorf("\n have: \n%v \n want: \n%v", have, want)
}
}
func TestMemoryMapValue(t *testing.T) {
in := []uint8{10, 11, 12, 13, 14}
ram := make([]uint8, 5, 5)
out := make([]uint8, 5, 5)
mem := NewMemory(1, 15)
mem.MapRW(10, &in[0])
mem.MapRW(11, &in[1])
mem.MapRW(12, &in[2])
mem.MapRW(13, &in[3])
mem.MapRW(14, &in[4])
for i := 0; i < 5; i++ {
mem.Write(i+10, ram[i])
}
for i := 0; i < 5; i++ {
out[i] = mem.Read(i + 10)
}
if !reflect.DeepEqual(out, in) {
t.Errorf("\n have: %v \n want: %v", out, in)
}
}
func TestMemoryMapFunc(t *testing.T) {
reads := 0
writes := 0
out := make([]uint8, 5, 5)
mem := NewMemory(1, 15)
for i := 0; i < 5; i++ {
j := i
mem.MapLoad(i+10, func() uint8 { reads++; return 40 + uint8(j) })
mem.MapStore(i+10, func(uint8) { writes++ })
}
for i := 0; i < 5; i++ {
out[i] = mem.Read(i + 10)
mem.Write(i+10, 99)
}
want := []uint8{40, 41, 42, 43, 44}
if !reflect.DeepEqual(out, want) {
t.Errorf("\n have: %v \n want: %v", out, want)
}
if reads != 5 {
t.Errorf("expected 5 reads, got %v", reads)
}
if writes != 5 {
t.Errorf("expected 5 writes, got %v", writes)
}
}
func TestMemoryMap(t *testing.T) {
main := NewMemory(1, 15)
mem := NewMemory(1, 5)
mem.MapRAM(0, make([]uint8, 5, 5))
main.Map(0, mem)
main.Map(5, mem)
main.Write(1, 22)
have := main.Read(6)
want := uint8(22)
if have != want {
t.Errorf("\n have: %04x \n want: %04x", have, want)
}
}
func TestMemoryUnmap(t *testing.T) {
var buf bytes.Buffer
log.SetFlags(0)
log.SetOutput(&buf)
defer func() {
log.SetFlags(log.LstdFlags)
log.SetOutput(os.Stderr)
}()
mem := NewMemory(1, 10)
mem.MapRAM(0, make([]uint8, 10, 10))
mem.Write(7, 44)
mem.Unmap(7)
mem.Read(7)
msg := []string{
"(!) mem: unmapped read, bank $0, addr $7",
"",
}
have := buf.String()
want := strings.Join(msg, "\n")
if have != want {
t.Errorf("\n have: \n%v \n want: \n%v", have, want)
}
}
func TestMemoryReadLE(t *testing.T) {
mem := NewMemory(1, 2)
mem.MapROM(0, []uint8{0xcd, 0xab})
have := mem.ReadLE(0)
want := 0xabcd
if want != have {
t.Errorf("\n have: %04x \n want: %04x", have, want)
}
}
func TestMemoryWriteLE(t *testing.T) {
mem := NewMemory(1, 2)
ram := make([]uint8, 2, 2)
mem.MapRAM(0, ram)
mem.WriteLE(0, 0xabcd)
want := []uint8{0xcd, 0xab}
if !reflect.DeepEqual(ram, want) {
t.Errorf("\n have: %v \n want: %v", ram, want)
}
}
func TestWriteN(t *testing.T) {
mem := NewMemory(1, 4)
ram := make([]uint8, 4, 4)
mem.MapRAM(0, ram)
mem.WriteN(1, 10, 11, 12)
want := []uint8{0, 10, 11, 12}
if !reflect.DeepEqual(ram, want) {
t.Errorf("\n have: %v \n want: %v", ram, want)
}
}
func TestMemoryBank(t *testing.T) {
mem := NewMemory(2, 2)
ram0 := []uint8{10, 0}
ram1 := []uint8{30, 0}
mem.MapRAM(0, ram0)
mem.SetBank(1)
mem.MapRAM(0, ram1)
out := make([]uint8, 4, 4)
mem.SetBank(0)
mem.Write(1, 20)
out[0] = mem.Read(0)
out[1] = mem.Read(1)
mem.SetBank(1)
mem.Write(1, 40)
out[2] = mem.Read(0)
out[3] = mem.Read(1)
want := []uint8{10, 20, 30, 40}
if !reflect.DeepEqual(out, want) {
t.Errorf("\n have: %v \n want: %v", out, want)
}
}
func TestMemoryMirror(t *testing.T) {
mem := NewMemory(1, 20)
ram := make([]uint8, 10, 10)
mem.MapRAM(0, ram)
mem.MapRAM(10, ram)
mem.Write(4, 99)
have := mem.Read(14)
want := uint8(99)
if have != want {
t.Errorf("\n have: %v \n want: %v", have, want)
}
}
func benchmarkMemoryW(count int, b *testing.B) {
mem := NewMemory(1, count)
mem.MapRAM(0, make([]uint8, count, count))
b.ResetTimer()
for n := 0; n < b.N; n++ {
for i := 0; i < count; i++ {
mem.Write(i, 0xff)
}
}
}
func benchmarkMemoryR(count int, b *testing.B) {
mem := NewMemory(1, count)
mem.MapRAM(0, make([]uint8, count, count))
out := make([]uint8, count, count)
b.ResetTimer()
for n := 0; n < b.N; n++ {
for i := 0; i < count; i++ {
out[i] = mem.Read(i)
}
}
}
func BenchmarkMemoryW(b *testing.B) { benchmarkMemoryW(1, b) }
func BenchmarkMemoryPageW(b *testing.B) { benchmarkMemoryW(0x100, b) }
func BenchmarkMemorySpaceW(b *testing.B) { benchmarkMemoryW(0x10000, b) }
func BenchmarkMemoryR(b *testing.B) { benchmarkMemoryR(1, b) }
func BenchmarkMemoryPageR(b *testing.B) { benchmarkMemoryR(0x100, b) }
func BenchmarkMemorySpaceR(b *testing.B) { benchmarkMemoryR(0x10000, b) }
func TestPointerFetch(t *testing.T) {
mem := NewMemory(1, 10)
mem.MapRAM(0, make([]uint8, 10, 10))
mem.Write(4, 44)
p := NewPointer(mem)
p.SetAddr(4)
have := p.Fetch()
want := uint8(44)
if have != want {
fmt.Printf("\n have: %v \n want: %v", have, want)
}
}
func TestPointerFetch2(t *testing.T) {
mem := NewMemory(1, 10)
mem.MapRAM(0, make([]uint8, 10, 10))
mem.Write(4, 44)
mem.Write(5, 55)
p := NewPointer(mem)
p.SetAddr(4)
p.Fetch()
have := p.Fetch()
want := uint8(55)
if have != want {
fmt.Printf("\n have: %v \n want: %v", have, want)
}
}
func TestPeek(t *testing.T) {
mem := NewMemory(1, 10)
mem.MapRAM(0, make([]uint8, 10, 10))
mem.Write(4, 44)
p := NewPointer(mem)
p.SetAddr(4)
p.Peek()
have := p.Peek()
want := uint8(44)
if have != want {
fmt.Printf("\n have: %v \n want: %v", have, want)
}
}
func TestFetchLE(t *testing.T) {
mem := NewMemory(1, 10)
mem.MapRAM(0, make([]uint8, 10, 10))
mem.Write(4, 0x44)
mem.Write(5, 0x55)
p := NewPointer(mem)
p.SetAddr(4)
have := p.FetchLE()
want := 0x5544
if have != want {
fmt.Printf("\n have: %04x \n want: %04x", have, want)
}
}
func TestPutN(t *testing.T) {
mem := NewMemory(1, 5)
ram := make([]uint8, 5, 5)
mem.MapRAM(0, ram)
p := NewPointer(mem)
p.PutN(1, 2, 3)
p.PutN(4, 5)
want := []uint8{1, 2, 3, 4, 5}
if !reflect.DeepEqual(ram, want) {
fmt.Printf("\n have: %04x \n want: %04x", ram, want)
}
}
func TestLoadROMs(t *testing.T) {
data0 := []byte{1, 2}
data1 := []byte{3, 4}
readFile = func(filename string) ([]byte, error) {
switch filename {
case "data0":
return data0, nil
case "data1":
return data1, nil
}
return nil, fmt.Errorf("invalid file: %v", filename)
}
defer func() { readFile = ioutil.ReadFile }()
rom0 := NewROM(" data0 ", " data0 ", "0ca623e2855f2c75c842ad302fe820e41b4d197d")
rom1 := NewROM(" data1 ", " data1 ", "c512123626a98914cb55a769db20808db3df3af7")
chunks, err := LoadROMs("", []ROM{rom0, rom1})
if err != nil {
t.Error(err)
}
want := map[string][]byte{
"data0": data0,
"data1": data1,
}
if !reflect.DeepEqual(chunks, want) {
t.Errorf("\n have: %+v \n want: %+v", chunks, want)
}
}
func TestLoadROMsCombine(t *testing.T) {
data0 := []byte{1, 2}
data1 := []byte{3, 4}
readFile = func(filename string) ([]byte, error) {
switch filename {
case "data0":
return data0, nil
case "data1":
return data1, nil
}
return nil, fmt.Errorf("invalid file")
}
defer func() { readFile = ioutil.ReadFile }()
rom0 := NewROM(" data ", " data0 ", "0ca623e2855f2c75c842ad302fe820e41b4d197d")
rom1 := NewROM(" data ", " data1 ", "c512123626a98914cb55a769db20808db3df3af7")
chunks, err := LoadROMs("", []ROM{rom0, rom1})
if err != nil {
t.Error(err)
}
want := map[string][]byte{
"data": []byte{1, 2, 3, 4},
}
if !reflect.DeepEqual(chunks, want) {
t.Errorf("\n have: %+v \n want: %+v", chunks, want)
}
}
func TestLoadROMsChecksumError(t *testing.T) {
data0 := []byte{1, 2}
readFile = func(filename string) ([]byte, error) {
switch filename {
case "/data0":
return data0, nil
}
return nil, fmt.Errorf("invalid file")
}
defer func() { readFile = ioutil.ReadFile }()
rom0 := NewROM("data0", "data0", "xx")
_, err := LoadROMs("/", []ROM{rom0})
if err == nil {
t.Errorf("expected error")
}
}
|
[
7
] |
package clients
import (
"os"
"fmt"
"time"
"context"
"strings"
"io/ioutil"
"../utils"
"../crypto"
"../models"
"../connect"
"../settings"
)
type auth struct {
login string
password string
}
var set_email struct {
title string
body string
}
func ClientTCP() {
var (
message string
splited []string
authorization auth
)
for {
message = utils.Input()
splited = strings.Split(message, " ")
switch splited[0] {
case settings.TERM_EXIT: os.Exit(settings.EXIT_SUCCESS)
case settings.TERM_HELP: utils.PrintHelp()
case settings.TERM_INTERFACE: turnInterface()
}
if !settings.User.Auth {
switch splited[0] {
case settings.TERM_LOGIN: setLogin(&authorization, splited)
case settings.TERM_PASSWORD: setPassword(&authorization, splited)
case settings.TERM_ADDRESS: setAddress(splited)
case settings.TERM_ENTER: pressEnter(authorization)
}
} else {
client(splited, message)
}
}
}
func client(splited []string, message string) {
switch splited[0] {
case settings.TERM_WHOAMI: fmt.Println("|", settings.User.Hash)
case settings.TERM_LOGOUT: connect.Logout()
case settings.TERM_NETWORK: network()
case settings.TERM_SEND: sendLocalMessage(splited)
case settings.TERM_EMAIL: emailAction(splited)
case settings.TERM_ARCHIVE: archiveAction(splited)
case settings.TERM_HISTORY: historyAction(splited)
case settings.TERM_CONNECT: connectTo(splited)
default: sendGlobalMessage(message)
}
}
// Actions with archives.
func archiveAction(splited []string) {
switch len(splited) {
case 1: listArchive()
case 2: listNodeArchive(splited)
case 3:
switch splited[1] {
case "download": downloadNodeFiles(splited)
}
}
}
// Download files from node archive.
func downloadNodeFiles(splited []string) {
if len(splited) < 4 { return }
for _, filename := range splited[3:] {
var new_pack = settings.PackageTCP {
From: models.From {
Name: settings.User.Hash,
},
To: splited[2],
Head: models.Head {
Header: settings.HEAD_ARCHIVE,
Mode: settings.MODE_READ_FILE,
},
Body: filename,
}
connect.CreateRedirectPackage(&new_pack)
connect.SendInitRedirectPackage(new_pack)
time.Sleep(time.Second * settings.TIME_SLEEP) // FIX
}
}
// Print list of files in nodes archive.
func listNodeArchive(splited []string) {
for _, name := range splited[1:] {
var new_pack = settings.PackageTCP {
From: models.From {
Name: settings.User.Hash,
},
To: name,
Head: models.Head {
Header: settings.HEAD_ARCHIVE,
Mode: settings.MODE_READ_LIST,
},
}
connect.CreateRedirectPackage(&new_pack)
connect.SendInitRedirectPackage(new_pack)
time.Sleep(time.Second * settings.TIME_SLEEP) // FIX
fmt.Printf("| %s:\n", name)
for _, file := range settings.User.TempArchive {
if file != "" {
fmt.Println("|", file)
}
}
}
}
// Print list of files in archive.
func listArchive() {
files, err := ioutil.ReadDir(settings.PATH_ARCHIVE)
utils.CheckError(err)
fmt.Printf("| %s:\n", settings.User.Hash)
for _, file := range files {
fmt.Println("|", file.Name())
}
}
// Actions with history of messages.
func historyAction(splited []string) {
var length = len(splited)
if length == 1 {
printGlobalHistory()
return
}
switch splited[1] {
case "del", "delete": historyDelete(splited, length)
case "loc", "local": historyLocal(splited, length)
}
}
// Delete global or local messages.
func historyDelete(splited []string, length int) {
if length == 2 {
connect.DeleteGlobalMessages()
return
}
connect.DeleteLocalMessages(splited[2:])
}
// Print local messages.
func historyLocal(splited []string, length int) {
if length > 2 {
printLocalHistory(splited[2:])
}
}
// Connect to nodes.
func connectTo(splited []string) {
if len(splited) > 1 {
connect.Connect(splited[1:], false)
}
}
// Actions with email.
func emailAction(splited []string) {
var length = len(splited)
if length > 1 {
switch splited[1] {
case "title": emailSetTitle(splited, length)
case "body": emailSetBody(splited, length)
case "write": emailWrite(splited, length)
case "read": emailRead(splited, length)
case "print": emailPrint(splited, length)
}
}
}
// Send email to one node.
func emailWrite(splited []string, length int) {
if length != 3 { return }
var new_pack = settings.PackageTCP {
From: models.From {
Name: settings.User.Hash,
},
To: splited[2],
Head: models.Head {
Header: settings.HEAD_EMAIL,
Mode: settings.MODE_SAVE,
},
Body:
set_email.title + settings.SEPARATOR +
set_email.body + settings.SEPARATOR +
time.Now().Format(time.RFC850),
}
connect.CreateRedirectPackage(&new_pack)
connect.SendInitRedirectPackage(new_pack)
}
// Read email.
func emailRead(splited []string, length int) {
switch length {
case 2: emailReadAll(splited)
case 3: emailReadAllByUser(splited)
case 4: emailReadByUserAndId(splited)
}
}
// Read list of emails by all nodes.
func emailReadAll(splited []string) {
var (
email models.Email
err error
)
rows, err := settings.DataBase.Query("SELECT Id, Title, User, Date FROM Email")
utils.CheckError(err)
defer rows.Close()
for rows.Next() {
err = rows.Scan(
&email.Id,
&email.Title,
&email.User,
&email.Date,
)
utils.CheckError(err)
crypto.DecryptEmail(settings.User.Password, &email)
fmt.Println("|", email.Id, "|", email.Title, "|", email.User, "|", email.Date, "|")
}
}
// Read list of emails by one node.
func emailReadAllByUser(splited []string) {
var (
email models.Email
err error
)
rows, err := settings.DataBase.Query(
"SELECT Id, Title, User, Date FROM Email WHERE User=$1",
splited[2],
)
utils.CheckError(err)
defer rows.Close()
for rows.Next() {
err = rows.Scan(
&email.Id,
&email.Title,
&email.User,
&email.Date,
)
utils.CheckError(err)
crypto.DecryptEmail(settings.User.Password, &email)
fmt.Println("|", email.Id, "|", email.Title, "|", email.User, "|", email.Date, "|")
}
}
// Read selected email by user and id.
func emailReadByUserAndId(splited []string) {
var (
email models.Email
err error
)
rows, err := settings.DataBase.Query(
"SELECT * FROM Email WHERE User=$1 AND Id=$2",
splited[2],
splited[3],
)
utils.CheckError(err)
defer rows.Close()
for rows.Next() {
err = rows.Scan(
&email.Id,
&email.Title,
&email.Body,
&email.User,
&email.Date,
)
utils.CheckError(err)
crypto.DecryptEmail(settings.User.Password, &email)
fmt.Println("--------------------------")
fmt.Println("| Title:", email.Title, "|")
fmt.Println("--------------------------")
fmt.Println("| Body:", email.Body, "|")
fmt.Println("--------------------------")
fmt.Println("| Author:", email.User, "|")
fmt.Println("--------------------------")
fmt.Println("| Date:", email.Date, "|")
fmt.Println("--------------------------")
}
}
// Print selected emails data.
func emailPrint(splited []string, length int) {
if length == 2 {
fmt.Println("| Title:", set_email.title, "|")
fmt.Println("| Body:", set_email.body, "|")
return
}
switch splited[2] {
case "title": fmt.Println("| Title:", set_email.title, "|")
case "body": fmt.Println("| Body:", set_email.body, "|")
}
}
// Set title in email.
func emailSetTitle(splited []string, length int) {
if length > 2 {
set_email.title = strings.Join(splited[2:], " ")
}
}
// Set main text in email.
func emailSetBody(splited []string, length int) {
if length > 2 {
set_email.body = strings.Join(splited[2:], " ")
}
}
// Send global message to all nodes.
func sendGlobalMessage(message string) {
if message == "" { return }
for username := range settings.User.NodeAddress {
var new_pack = settings.PackageTCP {
From: models.From {
Name: settings.User.Hash,
},
To: username,
Head: models.Head {
Header: settings.HEAD_MESSAGE,
Mode: settings.MODE_GLOBAL,
},
Body: message,
}
connect.CreateRedirectPackage(&new_pack)
connect.SendInitRedirectPackage(new_pack)
}
}
// Send local message to one node.
func sendLocalMessage(splited []string) {
if len(splited) > 2 {
var new_pack = settings.PackageTCP {
From: models.From {
Name: settings.User.Hash,
},
To: splited[1],
Head: models.Head {
Header: settings.HEAD_MESSAGE,
Mode: settings.MODE_LOCAL,
},
Body: strings.Join(splited[2:], " "),
}
connect.CreateRedirectPackage(&new_pack)
connect.SendInitRedirectPackage(new_pack)
}
}
// Print connections.
func network() {
for username := range settings.User.NodeAddress {
fmt.Println("|", username)
}
}
// Try to log in from login/password
func pressEnter(authorization auth) {
switch settings.Authorization(authorization.login, authorization.password) {
case 1: utils.PrintWarning("login is undefined")
case 2: utils.PrintWarning("length of login > 64 bytes")
case 3: utils.PrintWarning("password.hash undefined")
case 4: utils.PrintWarning("login or password is wrong")
default:
if !settings.GoroutinesIsRun && settings.User.Port != "" {
settings.Mutex.Lock()
settings.GoroutinesIsRun = true
settings.Mutex.Unlock()
go connect.ServerTCP()
go connect.FindConnects(10)
}
fmt.Println("[SUCCESS]: Authorization")
}
}
// Turn on/off interface.
func turnInterface() {
if settings.ServerListenHTTP == nil {
go ClientHTTP()
} else {
if err := settings.ServerListenHTTP.Shutdown(context.TODO()); err != nil {
utils.PrintWarning("failure shutting down")
}
}
}
// Set address ipv4:port.
func setAddress(splited []string) {
if len(splited) > 1 {
var ipv4_port = strings.Split(splited[1], ":")
if len(ipv4_port) != 2 {
utils.PrintWarning("invalid argument for ':address'")
return
}
settings.Mutex.Lock()
settings.User.IPv4 = ipv4_port[0]
settings.User.Port = ":" + ipv4_port[1]
settings.Mutex.Unlock()
}
}
// Set login.
func setLogin(authorization *auth, splited []string) {
if len(splited) > 1 {
authorization.login = strings.Join(splited[1:], " ")
}
}
// Set password.
func setPassword(authorization *auth, splited []string) {
if len(splited) > 1 {
authorization.password = strings.Join(splited[1:], " ")
}
}
// Print messages from all nodes.
func printGlobalHistory() {
rows, err := settings.DataBase.Query("SELECT Body FROM GlobalMessages ORDER BY Id")
utils.CheckError(err)
var data string
for rows.Next() {
rows.Scan(&data)
fmt.Println("|", data)
}
rows.Close()
}
// Print local messages from nodes.
func printLocalHistory(slice []string) {
for _, user := range slice {
if _, ok := settings.User.NodeAddress[user]; ok {
rows, err := settings.DataBase.Query("SELECT Body FROM Local" + user + " WHERE ORDER BY Id")
utils.CheckError(err)
fmt.Printf("| %s:\n", user)
var data string
for rows.Next() {
rows.Scan(&data)
fmt.Println("|", data)
}
rows.Close()
}
}
}
|
[
7
] |
package main
import "fmt"
const MAX_UNDO = 10000
type changeList struct {
ops []bufferChange
current int // the position of the next op to undo (0 if none)
redoMode bool // if true we are redoing an action, no need to record it
}
type undoContext struct {
text []line
start mark
end mark
}
type undoAction int
const (
undoDelete undoAction = iota
undoWrite
undoReplace
)
type bufferChange struct {
redo cmdContext
undo undoContext
}
func (c *changeList) add(redo cmdContext, undo undoContext) {
if !c.redoMode {
if c.current == MAX_UNDO {
c.ops = c.ops[1:]
c.current--
}
c.current++
c.ops = append(c.ops[:c.current], bufferChange{newRedoCtx(&redo), undo})
}
}
func (c *changeList) undo(v *view) string {
if c.current == 0 {
return "No more changes to undo"
}
ctx := c.ops[c.current].undo
// if ctx.end is not set we don't need to delete text
if ctx.end.buf != nil {
region{ctx.start, ctx.end}.delete()
}
if !text(ctx.text).empty() {
ctx.start.insertText(ctx.text)
} else {
// if we can we move left the cursor to place it before the deleted text
if !ctx.start.atLineStart() {
ctx.start.moveLeft(1)
}
}
*v.cs = ctx.start
c.current--
return fmt.Sprintf("undid change #%v of %v", c.current+1, len(c.ops)-1)
}
func (c *changeList) redo(v *view) string {
if c.current == len(c.ops)-1 {
return "Already at latest change"
}
c.redoMode = true
c.current++
ctx := c.ops[c.current].redo
p := *ctx.point
ctx.point = &p
pushCmd(&ctx)
*v.cs = *ctx.point
c.redoMode = false
return fmt.Sprintf("redid change #%v of %v", c.current, len(c.ops)-1)
}
func undo(ctx *cmdContext) {
for i := 0; i < ctx.num; i++ {
ctx.msg = ctx.point.buf.changeList.undo(ctx.view)
}
}
func redo(ctx *cmdContext) {
for i := 0; i < ctx.num; i++ {
ctx.msg = ctx.point.buf.changeList.redo(ctx.view)
}
}
func newRedoCtx(ctx *cmdContext) cmdContext {
p := *ctx.point
ctx.point = &p
ctx.silent = true
return *ctx
}
|
[
4
] |
package main
import (
"log"
"github.com/ppg/rosgo/ros"
)
func main() {
node := ros.NewNode("/test_param")
defer node.Shutdown()
if hasParam, err := node.HasParam("/rosdistro"); err != nil {
log.Fatal(err)
} else {
if !hasParam {
log.Fatal("HasParam() failed.")
}
}
if foundKey, err := node.SearchParam("/rosdistro"); err != nil {
log.Fatal(err)
} else {
if foundKey != "/rosdistro" {
log.Fatal("SearchParam() failed.")
}
}
if param, err := node.GetParam("/rosdistro"); err != nil {
log.Fatal(err)
} else {
if value, ok := param.(string); !ok {
log.Fatal("GetParam() failed.")
} else {
if value != "jade\n" {
log.Fatalf("Expected 'jade\\n' but '%s'", value)
}
}
}
if err := node.SetParam("/test_param", 42); err != nil {
log.Fatal(err)
}
if param, err := node.GetParam("/test_param"); err != nil {
log.Fatal(err)
} else {
if value, ok := param.(int32); ok {
if value != 42 {
log.Fatalf("Expected 42 but %d", value)
}
} else {
log.Fatal("GetParam('/test_param') failed.")
}
}
if err := node.DeleteParam("/test_param"); err != nil {
log.Fatal(err)
}
log.Print("Success")
}
|
[
4
] |
package musics
type (
// RatingFilter is filter expression builder for field Rating.
RatingFilter struct{}
)
var Rating RatingFilter
// Eq builds rating = value filter.
func (f RatingFilter) Eq(value float64) (exp Filter) {
return Filter{
expression: "rating = ?",
args: []interface{}{value},
}
}
// Neq builds rating <> value filter.
func (f RatingFilter) Neq(value float64) (exp Filter) {
return Filter{
expression: "rating <> ?",
args: []interface{}{value},
}
}
// Gt builds rating > value filter.
func (f RatingFilter) Gt(value float64) (exp Filter) {
return Filter{
expression: "rating > ?",
args: []interface{}{value},
}
}
// Lt builds rating < value filter.
func (f RatingFilter) Lt(value float64) (exp Filter) {
return Filter{
expression: "rating < ?",
args: []interface{}{value},
}
}
// Gte builds rating >= value filter.
func (f RatingFilter) Gte(value float64) (exp Filter) {
return Filter{
expression: "rating >= ?",
args: []interface{}{value},
}
}
// Lte builds rating <= value filter.
func (f RatingFilter) Lte(value float64) (exp Filter) {
return Filter{
expression: "rating <= ?",
args: []interface{}{value},
}
}
// Equal builds rating = value filter.
func (f RatingFilter) Equal(value float64) (exp Filter) {
return f.Eq(value)
}
// NotEqual builds rating <> value filter.
func (f RatingFilter) NotEqual(value float64) (exp Filter) {
return f.Neq(value)
}
// GreaterThan builds rating > value filter.
func (f RatingFilter) GreaterThan(value float64) (exp Filter) {
return f.Gt(value)
}
// LessThan builds rating < value filter.
func (f RatingFilter) LessThan(value float64) (exp Filter) {
return f.Lt(value)
}
// GreaterThanEqual builds rating >= value filter.
func (f RatingFilter) GreaterThanEqual(value float64) (exp Filter) {
return f.Gte(value)
}
// LessThanEqual builds rating <= value filter.
func (f RatingFilter) LessThanEqual(value float64) (exp Filter) {
return f.Lte(value)
}
// Between builds rating > min AND rating < max filter.
func (f RatingFilter) Between(min, max float64) (exp Filter) {
return Filter{
expression: "rating > ? AND rating < ?",
args: []interface{}{min, max},
}
}
// Range builds rating >= min AND rating <= max filter.
func (f RatingFilter) Range(min, max float64) (exp Filter) {
return Filter{
expression: "rating >= ? AND rating <= ?",
args: []interface{}{min, max},
}
}
// Name SQL field of Rating.
func (f RatingFilter) Name() string {
return "rating"
}
|
[
1
] |
package azurerm
import (
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"fmt"
"log"
"reflect"
"strings"
"sync"
"github.com/Azure/azure-sdk-for-go/arm/resources/resources"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/helper/mutexkv"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
riviera "github.com/jen20/riviera/azure"
)
// Provider returns a terraform.ResourceProvider.
func Provider() terraform.ResourceProvider {
var p *schema.Provider
p = &schema.Provider{
Schema: map[string]*schema.Schema{
"subscription_id": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""),
},
"client_id": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""),
},
"client_secret": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""),
},
"tenant_id": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""),
},
"environment": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", "public"),
},
"skip_provider_registration": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SKIP_PROVIDER_REGISTRATION", false),
},
},
DataSourcesMap: map[string]*schema.Resource{
"azurerm_client_config": dataSourceArmClientConfig(),
},
ResourcesMap: map[string]*schema.Resource{
// These resources use the Azure ARM SDK
"azurerm_availability_set": resourceArmAvailabilitySet(),
"azurerm_cdn_endpoint": resourceArmCdnEndpoint(),
"azurerm_cdn_profile": resourceArmCdnProfile(),
"azurerm_container_registry": resourceArmContainerRegistry(),
"azurerm_container_service": resourceArmContainerService(),
"azurerm_eventhub": resourceArmEventHub(),
"azurerm_eventhub_authorization_rule": resourceArmEventHubAuthorizationRule(),
"azurerm_eventhub_consumer_group": resourceArmEventHubConsumerGroup(),
"azurerm_eventhub_namespace": resourceArmEventHubNamespace(),
"azurerm_lb": resourceArmLoadBalancer(),
"azurerm_lb_backend_address_pool": resourceArmLoadBalancerBackendAddressPool(),
"azurerm_lb_nat_rule": resourceArmLoadBalancerNatRule(),
"azurerm_lb_nat_pool": resourceArmLoadBalancerNatPool(),
"azurerm_lb_probe": resourceArmLoadBalancerProbe(),
"azurerm_lb_rule": resourceArmLoadBalancerRule(),
"azurerm_managed_disk": resourceArmManagedDisk(),
"azurerm_key_vault": resourceArmKeyVault(),
"azurerm_local_network_gateway": resourceArmLocalNetworkGateway(),
"azurerm_network_interface": resourceArmNetworkInterface(),
"azurerm_network_security_group": resourceArmNetworkSecurityGroup(),
"azurerm_network_security_rule": resourceArmNetworkSecurityRule(),
"azurerm_public_ip": resourceArmPublicIp(),
"azurerm_redis_cache": resourceArmRedisCache(),
"azurerm_route": resourceArmRoute(),
"azurerm_route_table": resourceArmRouteTable(),
"azurerm_servicebus_namespace": resourceArmServiceBusNamespace(),
"azurerm_servicebus_subscription": resourceArmServiceBusSubscription(),
"azurerm_servicebus_topic": resourceArmServiceBusTopic(),
"azurerm_storage_account": resourceArmStorageAccount(),
"azurerm_storage_blob": resourceArmStorageBlob(),
"azurerm_storage_container": resourceArmStorageContainer(),
"azurerm_storage_share": resourceArmStorageShare(),
"azurerm_storage_queue": resourceArmStorageQueue(),
"azurerm_storage_table": resourceArmStorageTable(),
"azurerm_subnet": resourceArmSubnet(),
"azurerm_template_deployment": resourceArmTemplateDeployment(),
"azurerm_traffic_manager_endpoint": resourceArmTrafficManagerEndpoint(),
"azurerm_traffic_manager_profile": resourceArmTrafficManagerProfile(),
"azurerm_virtual_machine_extension": resourceArmVirtualMachineExtensions(),
"azurerm_virtual_machine": resourceArmVirtualMachine(),
"azurerm_virtual_machine_scale_set": resourceArmVirtualMachineScaleSet(),
"azurerm_virtual_network": resourceArmVirtualNetwork(),
"azurerm_virtual_network_peering": resourceArmVirtualNetworkPeering(),
// These resources use the Riviera SDK
"azurerm_dns_a_record": resourceArmDnsARecord(),
"azurerm_dns_aaaa_record": resourceArmDnsAAAARecord(),
"azurerm_dns_cname_record": resourceArmDnsCNameRecord(),
"azurerm_dns_mx_record": resourceArmDnsMxRecord(),
"azurerm_dns_ns_record": resourceArmDnsNsRecord(),
"azurerm_dns_srv_record": resourceArmDnsSrvRecord(),
"azurerm_dns_txt_record": resourceArmDnsTxtRecord(),
"azurerm_dns_zone": resourceArmDnsZone(),
"azurerm_resource_group": resourceArmResourceGroup(),
"azurerm_search_service": resourceArmSearchService(),
"azurerm_sql_database": resourceArmSqlDatabase(),
"azurerm_sql_firewall_rule": resourceArmSqlFirewallRule(),
"azurerm_sql_server": resourceArmSqlServer(),
},
}
p.ConfigureFunc = providerConfigure(p)
return p
}
// Config is the configuration structure used to instantiate a
// new Azure management client.
type Config struct {
ManagementURL string
SubscriptionID string
ClientID string
ClientSecret string
TenantID string
Environment string
SkipProviderRegistration bool
validateCredentialsOnce sync.Once
}
func (c *Config) validate() error {
var err *multierror.Error
if c.SubscriptionID == "" {
err = multierror.Append(err, fmt.Errorf("Subscription ID must be configured for the AzureRM provider"))
}
if c.ClientID == "" {
err = multierror.Append(err, fmt.Errorf("Client ID must be configured for the AzureRM provider"))
}
if c.ClientSecret == "" {
err = multierror.Append(err, fmt.Errorf("Client Secret must be configured for the AzureRM provider"))
}
if c.TenantID == "" {
err = multierror.Append(err, fmt.Errorf("Tenant ID must be configured for the AzureRM provider"))
}
if c.Environment == "" {
err = multierror.Append(err, fmt.Errorf("Environment must be configured for the AzureRM provider"))
}
return err.ErrorOrNil()
}
func providerConfigure(p *schema.Provider) schema.ConfigureFunc {
return func(d *schema.ResourceData) (interface{}, error) {
config := &Config{
SubscriptionID: d.Get("subscription_id").(string),
ClientID: d.Get("client_id").(string),
ClientSecret: d.Get("client_secret").(string),
TenantID: d.Get("tenant_id").(string),
Environment: d.Get("environment").(string),
SkipProviderRegistration: d.Get("skip_provider_registration").(bool),
}
if err := config.validate(); err != nil {
return nil, err
}
client, err := config.getArmClient()
if err != nil {
return nil, err
}
client.StopContext = p.StopContext()
// replaces the context between tests
p.MetaReset = func() error {
client.StopContext = p.StopContext()
return nil
}
// List all the available providers and their registration state to avoid unnecessary
// requests. This also lets us check if the provider credentials are correct.
providerList, err := client.providers.List(nil, "")
if err != nil {
return nil, fmt.Errorf("Unable to list provider registration status, it is possible that this is due to invalid "+
"credentials or the service principal does not have permission to use the Resource Manager API, Azure "+
"error: %s", err)
}
if !config.SkipProviderRegistration {
err = registerAzureResourceProvidersWithSubscription(*providerList.Value, client.providers)
if err != nil {
return nil, err
}
}
return client, nil
}
}
func registerProviderWithSubscription(providerName string, client resources.ProvidersClient) error {
_, err := client.Register(providerName)
if err != nil {
return fmt.Errorf("Cannot register provider %s with Azure Resource Manager: %s.", providerName, err)
}
return nil
}
var providerRegistrationOnce sync.Once
// registerAzureResourceProvidersWithSubscription uses the providers client to register
// all Azure resource providers which the Terraform provider may require (regardless of
// whether they are actually used by the configuration or not). It was confirmed by Microsoft
// that this is the approach their own internal tools also take.
func registerAzureResourceProvidersWithSubscription(providerList []resources.Provider, client resources.ProvidersClient) error {
var err error
providerRegistrationOnce.Do(func() {
providers := map[string]struct{}{
"Microsoft.Compute": struct{}{},
"Microsoft.Cache": struct{}{},
"Microsoft.ContainerRegistry": struct{}{},
"Microsoft.ContainerService": struct{}{},
"Microsoft.Network": struct{}{},
"Microsoft.Cdn": struct{}{},
"Microsoft.Storage": struct{}{},
"Microsoft.Sql": struct{}{},
"Microsoft.Search": struct{}{},
"Microsoft.Resources": struct{}{},
"Microsoft.ServiceBus": struct{}{},
"Microsoft.KeyVault": struct{}{},
"Microsoft.EventHub": struct{}{},
}
// filter out any providers already registered
for _, p := range providerList {
if _, ok := providers[*p.Namespace]; !ok {
continue
}
if strings.ToLower(*p.RegistrationState) == "registered" {
log.Printf("[DEBUG] Skipping provider registration for namespace %s\n", *p.Namespace)
delete(providers, *p.Namespace)
}
}
var wg sync.WaitGroup
wg.Add(len(providers))
for providerName := range providers {
go func(p string) {
defer wg.Done()
log.Printf("[DEBUG] Registering provider with namespace %s\n", p)
if innerErr := registerProviderWithSubscription(p, client); err != nil {
err = innerErr
}
}(providerName)
}
wg.Wait()
})
return err
}
// armMutexKV is the instance of MutexKV for ARM resources
var armMutexKV = mutexkv.NewMutexKV()
func azureStateRefreshFunc(resourceURI string, client *ArmClient, command riviera.APICall) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
req := client.rivieraClient.NewRequestForURI(resourceURI)
req.Command = command
res, err := req.Execute()
if err != nil {
return nil, "", fmt.Errorf("Error executing %T command in azureStateRefreshFunc", req.Command)
}
var value reflect.Value
if reflect.ValueOf(res.Parsed).Kind() == reflect.Ptr {
value = reflect.ValueOf(res.Parsed).Elem()
} else {
value = reflect.ValueOf(res.Parsed)
}
for i := 0; i < value.NumField(); i++ { // iterates through every struct type field
tag := value.Type().Field(i).Tag // returns the tag string
tagValue := tag.Get("mapstructure")
if tagValue == "provisioningState" {
return res.Parsed, value.Field(i).Elem().String(), nil
}
}
panic(fmt.Errorf("azureStateRefreshFunc called on structure %T with no mapstructure:provisioningState tag. This is a bug", res.Parsed))
}
}
// Resource group names can be capitalised, but we store them in lowercase.
// Use a custom diff function to avoid creation of new resources.
func resourceAzurermResourceGroupNameDiffSuppress(k, old, new string, d *schema.ResourceData) bool {
return strings.ToLower(old) == strings.ToLower(new)
}
// ignoreCaseDiffSuppressFunc is a DiffSuppressFunc from helper/schema that is
// used to ignore any case-changes in a return value.
func ignoreCaseDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool {
return strings.ToLower(old) == strings.ToLower(new)
}
// ignoreCaseStateFunc is a StateFunc from helper/schema that converts the
// supplied value to lower before saving to state for consistency.
func ignoreCaseStateFunc(val interface{}) string {
return strings.ToLower(val.(string))
}
func userDataStateFunc(v interface{}) string {
switch s := v.(type) {
case string:
s = base64Encode(s)
hash := sha1.Sum([]byte(s))
return hex.EncodeToString(hash[:])
default:
return ""
}
}
// Base64Encode encodes data if the input isn't already encoded using
// base64.StdEncoding.EncodeToString. If the input is already base64 encoded,
// return the original input unchanged.
func base64Encode(data string) string {
// Check whether the data is already Base64 encoded; don't double-encode
if isBase64Encoded(data) {
return data
}
// data has not been encoded encode and return
return base64.StdEncoding.EncodeToString([]byte(data))
}
func isBase64Encoded(data string) bool {
_, err := base64.StdEncoding.DecodeString(data)
return err == nil
}
|
[
1
] |
package viewmodels
import (
"net/url"
"github.com/authgear/authgear-server/pkg/api/model"
"github.com/authgear/authgear-server/pkg/lib/authn/identity"
"github.com/authgear/authgear-server/pkg/lib/config"
"github.com/authgear/authgear-server/pkg/lib/interaction"
)
type IdentityCandidatesGetter interface {
GetIdentityCandidates() []identity.Candidate
}
type AuthenticationViewModel struct {
IdentityCandidates []identity.Candidate
IdentityCount int
LoginIDDisabled bool
PhoneLoginIDEnabled bool
EmailLoginIDEnabled bool
UsernameLoginIDEnabled bool
PasskeyEnabled bool
// NonPhoneLoginIDInputType is the "type" attribute for the non-phone <input>.
// It is "email" or "text".
NonPhoneLoginIDInputType string
// NonPhoneLoginIDType is the type of non-phone login ID.
// It is "email", "username" or "email_or_username".
NonPhoneLoginIDType string
// q_login_id_input_type is the input the end-user has chosen.
// It is "email", "phone" or "text".
// LoginIDContextualType is the type the end-user thinks they should enter.
// It depends on q_login_id_input_type.
// It is "email", "phone", "username", or "email_or_username".
LoginIDContextualType string
}
type AuthenticationViewModeler struct {
Authentication *config.AuthenticationConfig
LoginID *config.LoginIDConfig
}
func (m *AuthenticationViewModeler) NewWithGraph(graph *interaction.Graph, form url.Values) AuthenticationViewModel {
var node IdentityCandidatesGetter
if !graph.FindLastNode(&node) {
panic("webapp: no node with identity candidates found")
}
return m.NewWithCandidates(node.GetIdentityCandidates(), form)
}
func (m *AuthenticationViewModeler) NewWithCandidates(candidates []identity.Candidate, form url.Values) AuthenticationViewModel {
hasEmail := false
hasUsername := false
hasPhone := false
identityCount := 0
// In the first loop, we first find out what type of login ID are available.
for _, c := range candidates {
typ, _ := c[identity.CandidateKeyType].(string)
if typ == string(model.IdentityTypeLoginID) {
loginIDType, _ := c[identity.CandidateKeyLoginIDType].(string)
switch loginIDType {
case "phone":
hasPhone = true
case "email":
hasEmail = true
default:
hasUsername = true
}
}
identityID := c[identity.CandidateKeyIdentityID].(string)
if identityID != "" {
identityCount++
}
}
// Then we determine NonPhoneLoginIDInputType.
nonPhoneLoginIDInputType := "text"
if hasEmail && !hasUsername {
nonPhoneLoginIDInputType = "email"
}
nonPhoneLoginIDType := "email"
switch {
case hasEmail && hasUsername:
nonPhoneLoginIDType = "email_or_username"
case hasUsername:
nonPhoneLoginIDType = "username"
}
// Then we loop again and assign login_id_input_type.
for _, c := range candidates {
typ, _ := c[identity.CandidateKeyType].(string)
if typ == string(model.IdentityTypeLoginID) {
loginIDType, _ := c[identity.CandidateKeyLoginIDType].(string)
switch loginIDType {
case "phone":
c["login_id_input_type"] = "phone"
default:
c["login_id_input_type"] = nonPhoneLoginIDInputType
}
}
}
// Then we determine q_login_id_input_type.
xLoginIDInputType := "text"
if _, ok := form["q_login_id_input_type"]; ok {
xLoginIDInputType = form.Get("q_login_id_input_type")
} else {
if len(m.LoginID.Keys) > 0 {
if m.LoginID.Keys[0].Type == model.LoginIDKeyTypePhone {
xLoginIDInputType = "phone"
} else {
xLoginIDInputType = nonPhoneLoginIDInputType
}
}
}
var loginIDContextualType string
switch {
case xLoginIDInputType == "phone":
loginIDContextualType = "phone"
default:
loginIDContextualType = nonPhoneLoginIDType
}
loginIDDisabled := !hasEmail && !hasUsername && !hasPhone
passkeyEnabled := false
for _, typ := range m.Authentication.Identities {
if typ == model.IdentityTypePasskey {
passkeyEnabled = true
}
}
return AuthenticationViewModel{
IdentityCandidates: candidates,
IdentityCount: identityCount,
LoginIDDisabled: loginIDDisabled,
PhoneLoginIDEnabled: hasPhone,
EmailLoginIDEnabled: hasEmail,
UsernameLoginIDEnabled: hasUsername,
PasskeyEnabled: passkeyEnabled,
NonPhoneLoginIDInputType: nonPhoneLoginIDInputType,
NonPhoneLoginIDType: nonPhoneLoginIDType,
LoginIDContextualType: loginIDContextualType,
}
}
|
[
4
] |
package main
/*
* @lc app=leetcode id=581 lang=golang
*
* [581] Shortest Unsorted Continuous Subarray
*/
// Solution 2:
func findUnsortedSubarray(nums []int) int {
// Pass 1: from left to right, find the last element which is smaller
// than its left side, mark it as end
var end int
var max = nums[0]
for i := 1; i < len(nums); i++ {
if nums[i] > max {
max = nums[i]
} else if nums[i] < max {
end = i
}
}
// Pass 2: from right to left, find the last element which is bigger
// than its right side, mark it as start
var begin int
var min = nums[len(nums)-1]
for i := len(nums) - 2; i >= 0; i-- {
if nums[i] < min {
min = nums[i]
} else if nums[i] > min {
begin = i
}
}
if begin == end {
return 0
}
return end - begin + 1
}
// Solution 1: My Stupid Solution: Using Stack
func findUnsortedSubarray_Solution_1(nums []int) int {
var s Stack
var hasFirst bool
var res int
var max int
for i, num := range nums {
if i == 0 || num >= max {
if !hasFirst {
s.push(i)
}
max = num
} else {
for !s.isEmpty() && num < nums[s.top()] {
s.pop()
}
if s.isEmpty() {
res = i - (-1)
} else {
res = i - s.top()
}
hasFirst = true
}
}
return res
}
type Stack []int
func (s Stack) top() int { return s[len(s)-1] }
func (s Stack) isEmpty() bool { return len(s) == 0 }
func (s *Stack) push(t int) { *s = append(*s, t) }
func (s *Stack) pop() int {
t := (*s)[len(*s)-1]
*s = (*s)[:len(*s)-1]
return t
}
|
[
1
] |
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import "net/http"
// Standard cross domain policy information located at https://s3.amazonaws.com/crossdomain.xml
const crossDomainXML = `<?xml version="1.0"?><!DOCTYPE cross-domain-policy SYSTEM "http://www.adobe.com/xml/dtds/cross-domain-policy.dtd"><cross-domain-policy><allow-access-from domain="*" secure="false" /></cross-domain-policy>`
// Standard path where an app would find cross domain policy information.
const crossDomainXMLEntity = "/crossdomain.xml"
// Cross domain policy implements http.Handler interface, implementing a custom ServerHTTP.
type crossDomainPolicy struct {
handler http.Handler
}
// A cross-domain policy file is an XML document that grants a web client, such as Adobe Flash Player
// or Adobe Acrobat (though not necessarily limited to these), permission to handle data across domains.
// When clients request content hosted on a particular source domain and that content make requests
// directed towards a domain other than its own, the remote domain needs to host a cross-domain
// policy file that grants access to the source domain, allowing the client to continue the transaction.
func setCrossDomainPolicy(h http.Handler) http.Handler {
return crossDomainPolicy{handler: h}
}
func (c crossDomainPolicy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Look for 'crossdomain.xml' in the incoming request.
switch r.URL.Path {
case crossDomainXMLEntity:
// Write the standard cross domain policy xml.
w.Write([]byte(crossDomainXML))
// Request completed, no need to serve to other handlers.
return
}
// Continue to serve the request further.
c.handler.ServeHTTP(w, r)
}
|
[
7
] |
package main
import "fmt"
const MAXSIZE = 99999999
type Trafik [MAXSIZE]int
var (
arr Trafik
n int
)
func catatLaluLintas(arr *Trafik, n int) {
for i := 1; i <= n+1; i++ {
fmt.Scan(&arr[i])
}
}
func kendaraanTerbanyak(arr *Trafik, n int) int {
var max, idx, banyak int
max = -9999
for i, a := range arr {
if a > i-1 {
banyak += 1
max = a
idx = i
}
}
fmt.Println("kendaraan terbanyak:", idx)
fmt.Println("Sebanyak :", banyak)
return max
}
func main() {
catatLaluLintas(&arr, 5)
kendaraanTerbanyak(&arr, n)
}
|
[
1
] |
package feast
import (
"context"
"encoding/json"
"errors"
"fmt"
"github.com/antonmedv/expr"
"github.com/antonmedv/expr/vm"
"math"
"strings"
"time"
"github.com/buger/jsonparser"
feast "github.com/feast-dev/feast/sdk/go"
"github.com/feast-dev/feast/sdk/go/protos/feast/serving"
"github.com/feast-dev/feast/sdk/go/protos/feast/types"
"github.com/oliveagle/jsonpath"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"go.uber.org/zap"
"github.com/gojek/merlin/pkg/transformer"
)
var (
feastError = promauto.NewCounter(prometheus.CounterOpts{
Namespace: transformer.PromNamespace,
Name: "feast_serving_error_count",
Help: "The total number of error returned by feast serving",
})
feastLatency = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: transformer.PromNamespace,
Name: "feast_serving_request_duration_ms",
Help: "Feast serving latency histogram",
Buckets: prometheus.ExponentialBuckets(1, 2, 10), // 1,2,4,8,16,32,64,128,256,512,+Inf
}, []string{"result"})
feastFeatureStatus = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: transformer.PromNamespace,
Name: "feast_feature_status_count",
Help: "Feature status by feature",
}, []string{"feature", "status"})
feastFeatureSummary = promauto.NewSummaryVec(prometheus.SummaryOpts{
Namespace: transformer.PromNamespace,
Name: "feast_feature_value",
Help: "Summary of feature value",
AgeBuckets: 1,
}, []string{"feature"})
)
// Options for the Feast transformer.
type Options struct {
ServingURL string `envconfig:"FEAST_SERVING_URL" required:"true"`
StatusMonitoringEnabled bool `envconfig:"FEAST_FEATURE_STATUS_MONITORING_ENABLED" default:"false"`
ValueMonitoringEnabled bool `envconfig:"FEAST_FEATURE_VALUE_MONITORING_ENABLED" default:"false"`
}
// Transformer wraps feast serving client to retrieve features.
type Transformer struct {
feastClient feast.Client
config *transformer.StandardTransformerConfig
logger *zap.Logger
options *Options
defaultValues map[string]*types.Value
compiledJsonPath map[string]*jsonpath.Compiled
compiledUdf map[string]*vm.Program
}
// NewTransformer initializes a new Transformer.
func NewTransformer(feastClient feast.Client, config *transformer.StandardTransformerConfig, options *Options, logger *zap.Logger) (*Transformer, error) {
defaultValues := make(map[string]*types.Value)
// populate default values
for _, ft := range config.TransformerConfig.Feast {
for _, f := range ft.Features {
if len(f.DefaultValue) != 0 {
feastValType := types.ValueType_Enum(types.ValueType_Enum_value[f.ValueType])
defVal, err := getValue(f.DefaultValue, feastValType)
if err != nil {
logger.Warn(fmt.Sprintf("invalid default value for %s : %v, %v", f.Name, f.DefaultValue, err))
continue
}
defaultValues[f.Name] = defVal
}
}
}
compiledJsonPath := make(map[string]*jsonpath.Compiled)
compiledUdf := make(map[string]*vm.Program)
for _, ft := range config.TransformerConfig.Feast {
for _, configEntity := range ft.Entities {
switch configEntity.Extractor.(type) {
case *transformer.Entity_JsonPath:
c, err := jsonpath.Compile(configEntity.GetJsonPath())
if err != nil {
return nil, fmt.Errorf("unable to compile jsonpath for entity %s: %s", configEntity.Name, configEntity.GetJsonPath())
}
compiledJsonPath[configEntity.GetJsonPath()] = c
case *transformer.Entity_Udf:
c, err := expr.Compile(configEntity.GetUdf(), expr.Env(UdfEnv{}))
if err != nil {
return nil, err
}
compiledUdf[configEntity.GetUdf()] = c
}
}
}
return &Transformer{
feastClient: feastClient,
config: config,
options: options,
logger: logger,
defaultValues: defaultValues,
compiledJsonPath: compiledJsonPath,
compiledUdf: compiledUdf,
}, nil
}
type FeastFeature struct {
Columns []string `json:"columns"`
Data [][]interface{} `json:"data"`
}
type result struct {
tableName string
feastFeature *FeastFeature
err error
}
// Transform retrieves the Feast features values and add them into the request.
func (t *Transformer) Transform(ctx context.Context, request []byte) ([]byte, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.Transform")
defer span.Finish()
feastFeatures := make(map[string]*FeastFeature, len(t.config.TransformerConfig.Feast))
// parallelize feast call per feature table
resChan := make(chan result, len(t.config.TransformerConfig.Feast))
for _, config := range t.config.TransformerConfig.Feast {
go func(cfg *transformer.FeatureTable) {
tableName := createTableName(cfg.Entities)
val, err := t.getFeastFeature(ctx, tableName, request, cfg)
resChan <- result{tableName, val, err}
}(config)
}
// collect result
for i := 0; i < cap(resChan); i++ {
res := <-resChan
if res.err != nil {
return nil, res.err
}
feastFeatures[res.tableName] = res.feastFeature
}
out, err := enrichRequest(ctx, request, feastFeatures)
if err != nil {
return nil, err
}
return out, err
}
func (t *Transformer) getFeastFeature(ctx context.Context, tableName string, request []byte, config *transformer.FeatureTable) (*FeastFeature, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.getFeastFeature")
span.SetTag("table.name", tableName)
defer span.Finish()
entities, err := t.buildEntitiesRequest(ctx, request, config.Entities)
if err != nil {
return nil, err
}
var features []string
for _, feature := range config.Features {
features = append(features, feature.Name)
}
feastRequest := feast.OnlineFeaturesRequest{
Project: config.Project,
Entities: entities,
Features: features,
}
t.logger.Debug("feast_request", zap.Any("feast_request", feastRequest))
startTime := time.Now()
feastResponse, err := t.feastClient.GetOnlineFeatures(ctx, &feastRequest)
durationMs := time.Now().Sub(startTime).Milliseconds()
if err != nil {
feastLatency.WithLabelValues("error").Observe(float64(durationMs))
feastError.Inc()
return nil, err
}
feastLatency.WithLabelValues("success").Observe(float64(durationMs))
t.logger.Debug("feast_response", zap.Any("feast_response", feastResponse.Rows()))
feastFeature, err := t.buildFeastFeatures(ctx, feastResponse, config)
if err != nil {
return nil, err
}
return feastFeature, nil
}
func (t *Transformer) buildEntitiesRequest(ctx context.Context, request []byte, configEntities []*transformer.Entity) ([]feast.Row, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.buildEntitiesRequest")
defer span.Finish()
var entities []feast.Row
var nodesBody interface{}
err := json.Unmarshal(request, &nodesBody)
if err != nil {
return nil, err
}
for _, configEntity := range configEntities {
switch configEntity.Extractor.(type) {
case *transformer.Entity_JsonPath:
_, ok := t.compiledJsonPath[configEntity.GetJsonPath()]
if !ok {
c, err := jsonpath.Compile(configEntity.GetJsonPath())
if err != nil {
return nil, fmt.Errorf("unable to compile jsonpath for entity %s: %s", configEntity.Name, configEntity.GetJsonPath())
}
t.compiledJsonPath[configEntity.GetJsonPath()] = c
}
}
vals, err := getValuesFromJSONPayload(nodesBody, configEntity, t.compiledJsonPath[configEntity.GetJsonPath()], t.compiledUdf[configEntity.GetUdf()])
if err != nil {
return nil, fmt.Errorf("unable to extract entity %s: %v", configEntity.Name, err)
}
if len(entities) == 0 {
for _, val := range vals {
entities = append(entities, feast.Row{
configEntity.Name: val,
})
}
} else {
newEntities := []feast.Row{}
for _, entity := range entities {
for _, val := range vals {
newFeastRow := feast.Row{}
for k, v := range entity {
newFeastRow[k] = v
}
newFeastRow[configEntity.Name] = val
newEntities = append(newEntities, newFeastRow)
}
}
entities = newEntities
}
}
return entities, nil
}
func (t *Transformer) buildFeastFeatures(ctx context.Context, feastResponse *feast.OnlineFeaturesResponse, config *transformer.FeatureTable) (*FeastFeature, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.buildFeastFeatures")
defer span.Finish()
var columns []string
for _, entity := range config.Entities {
columns = append(columns, entity.Name)
}
for _, feature := range config.Features {
columns = append(columns, feature.Name)
}
var data [][]interface{}
status := feastResponse.Statuses()
for i, feastRow := range feastResponse.Rows() {
var row []interface{}
for _, column := range columns {
featureStatus := status[i][column]
switch featureStatus {
case serving.GetOnlineFeaturesResponse_PRESENT:
rawValue := feastRow[column]
featVal, err := getFeatureValue(rawValue)
if err != nil {
return nil, err
}
row = append(row, featVal)
// put behind feature toggle since it will generate high cardinality metrics
if t.options.ValueMonitoringEnabled {
v, err := getFloatValue(featVal)
if err != nil {
continue
}
feastFeatureSummary.WithLabelValues(column).Observe(v)
}
case serving.GetOnlineFeaturesResponse_NOT_FOUND, serving.GetOnlineFeaturesResponse_NULL_VALUE, serving.GetOnlineFeaturesResponse_OUTSIDE_MAX_AGE:
defVal, ok := t.defaultValues[column]
if !ok {
row = append(row, nil)
continue
}
featVal, err := getFeatureValue(defVal)
if err != nil {
return nil, err
}
row = append(row, featVal)
default:
return nil, fmt.Errorf("Unsupported feature retrieval status: %s", featureStatus)
}
// put behind feature toggle since it will generate high cardinality metrics
if t.options.StatusMonitoringEnabled {
feastFeatureStatus.WithLabelValues(column, featureStatus.String()).Inc()
}
}
data = append(data, row)
}
return &FeastFeature{
Columns: columns,
Data: data,
}, nil
}
func getFloatValue(val interface{}) (float64, error) {
switch i := val.(type) {
case float64:
return i, nil
case float32:
return float64(i), nil
case int64:
return float64(i), nil
case int32:
return float64(i), nil
default:
return math.NaN(), errors.New("getFloat: unknown value is of incompatible type")
}
}
func createTableName(entities []*transformer.Entity) string {
entityNames := make([]string, 0)
for _, n := range entities {
entityNames = append(entityNames, n.Name)
}
return strings.Join(entityNames, "_")
}
func getFeatureValue(val *types.Value) (interface{}, error) {
switch val.Val.(type) {
case *types.Value_StringVal:
return val.GetStringVal(), nil
case *types.Value_DoubleVal:
return val.GetDoubleVal(), nil
case *types.Value_FloatVal:
return val.GetFloatVal(), nil
case *types.Value_Int32Val:
return val.GetInt32Val(), nil
case *types.Value_Int64Val:
return val.GetInt64Val(), nil
case *types.Value_BoolVal:
return val.GetBoolVal(), nil
case *types.Value_StringListVal:
return val.GetStringListVal(), nil
case *types.Value_DoubleListVal:
return val.GetDoubleListVal(), nil
case *types.Value_FloatListVal:
return val.GetFloatListVal(), nil
case *types.Value_Int32ListVal:
return val.GetInt32ListVal(), nil
case *types.Value_Int64ListVal:
return val.GetInt64ListVal(), nil
case *types.Value_BoolListVal:
return val.GetBoolListVal(), nil
case *types.Value_BytesVal:
return val.GetBytesVal(), nil
case *types.Value_BytesListVal:
return val.GetBytesListVal(), nil
default:
return nil, fmt.Errorf("unknown feature value type: %T", val.Val)
}
}
func enrichRequest(ctx context.Context, request []byte, feastFeatures map[string]*FeastFeature) ([]byte, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.enrichRequest")
defer span.Finish()
feastFeatureJSON, err := json.Marshal(feastFeatures)
if err != nil {
return nil, err
}
out, err := jsonparser.Set(request, feastFeatureJSON, transformer.FeastFeatureJSONField)
if err != nil {
return nil, err
}
return out, err
}
|
[
7
] |
package controller
/*
* blog需要的公有api
*/
import (
"encoding/base64"
"encoding/json"
"fmt"
"framework"
"framework/response"
"framework/server"
"html/template"
"info"
"io/ioutil"
"model"
"net/http"
"strconv"
)
type apiCommentRender struct {
UserID string
UserName string
Pic string
CommentID string
CommentContent string
CommentTime string
Floor int
User *info.UserInfo
ChildContent template.HTML
}
type APIController struct {
server.SessionController
}
func NewAPIController() *APIController {
return &APIController{}
}
func (a *APIController) Path() interface{} {
return "/api"
}
func (a *APIController) SessionPath() string {
return "/"
}
func (a *APIController) buildComment(commentId int) (string, error) {
var commentList []*info.CommentInfo = nil
for commentId != -1 {
comment, err := model.ShareCommentModel().FetchCommentByCommentId(info.CommentType_Blog, commentId)
if err != nil {
return "", err
}
commentList = append(commentList, comment)
commentId = comment.ParentCommentID
}
// 逆序
commentListLength := len(commentList)
for i := 0; i < commentListLength/2; i++ {
tmp := commentList[i]
commentList[i] = commentList[commentListLength-i-1]
commentList[commentListLength-i-1] = tmp
}
comment := buildOneCommentFromCommentList(&commentList)
return comment, nil
}
func (a *APIController) handlePublicCommentAction(w http.ResponseWriter, inf map[string]interface{}) {
status, err := a.WebSession.Get("status")
if err != nil {
response.JsonResponseWithMsg(w, framework.ErrorAccountNotLogin, err.Error())
return
}
if status != "login" {
response.JsonResponseWithMsg(w, framework.ErrorAccountNotLogin, "account not login")
return
}
uid, err := a.WebSession.Get("id")
userId, err := strconv.Atoi(uid.(string))
if err != nil {
response.JsonResponseWithMsg(w, framework.ErrorAccountNotLogin, err.Error())
return
}
parseInt := func(name string, retValue *int) bool {
var ok bool
if _, ok = inf[name]; ok {
switch inf[name].(type) {
case int, int32, int64:
*retValue = inf[name].(int)
case float32:
*retValue = int(inf[name].(float32))
case float64:
*retValue = int(inf[name].(float64))
default:
return false
}
return true
}
return false
}
var blogId, commentId int
var content string
if parseInt("blogId", &blogId) && parseInt("commentId", &commentId) {
if _, ok := inf["content"]; ok {
switch inf["content"].(type) {
case string:
content = inf["content"].(string)
commentId, err := model.ShareCommentModel().AddComment(info.CommentType_Blog, userId, blogId, commentId, content)
if err == nil {
comment, err := a.buildComment(commentId)
if err == nil {
var data map[string]interface{} = make(map[string]interface{})
data["comment"] = base64.StdEncoding.EncodeToString([]byte(comment))
response.JsonResponseWithData(w, framework.ErrorOK, "", data)
return
}
}
response.JsonResponseWithMsg(w, framework.ErrorSQLError, err.Error())
return
}
}
}
response.JsonResponse(w, framework.ErrorParamError)
}
func (a *APIController) handleGetUserInfoRequest(w http.ResponseWriter) {
status, err := a.WebSession.Get("status")
if err != nil {
response.JsonResponseWithMsg(w, framework.ErrorAccountNotLogin, err.Error())
return
}
if status != "login" {
response.JsonResponseWithMsg(w, framework.ErrorAccountNotLogin, "account not login")
return
}
uid, err := a.WebSession.Get("id")
userId, err := strconv.Atoi(uid.(string))
if err != nil {
response.JsonResponseWithMsg(w, framework.ErrorAccountNotLogin, err.Error())
return
}
userInfo, err := model.ShareUserModel().GetUserInfoById(int64(userId))
if err != nil {
response.JsonResponseWithMsg(w, framework.ErrorRunTimeError, err.Error())
return
}
response.JsonResponseWithData(w, framework.ErrorOK, "", map[string]interface{}{
"name": userInfo.UserName,
"pic": userInfo.SmallFigureurl,
})
}
func (a *APIController) HandlerRequest(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
response.JsonResponse(w, framework.ErrorMethodError)
return
}
result, err := ioutil.ReadAll(r.Body)
r.Body.Close()
if err != nil {
response.JsonResponse(w, framework.ErrorParamError)
return
}
var f interface{}
json.Unmarshal(result, &f)
switch f.(type) {
case map[string]interface{}:
info := f.(map[string]interface{})
if api, ok := info["type"]; ok {
switch api.(type) {
case string:
switch api.(string) {
case "talk":
a.SessionController.HandlerRequest(a, w, r)
a.handlePublicCommentAction(w, info)
return
case "blog":
case "getUserInfo":
fmt.Println("getUserInfo")
a.SessionController.HandlerRequest(a, w, r)
a.handleGetUserInfoRequest(w)
return
}
}
}
}
response.JsonResponse(w, framework.ErrorParamError)
}
|
[
7
] |
package whereBuilder
import (
"fmt"
"strconv"
"strings"
)
const condAnd = "and"
const condOr = "or"
type WhereBuilder map[string]interface{}
type Condition struct {
field string
operation string
value interface{}
conditions []*Condition
logic string
}
type prepareBuilder struct {
k int
values []interface{}
}
func (wb WhereBuilder) preProcess() {
for field, condition := range wb {
if len(field) >= 2 && field[0:2] == "__" {
continue
}
switch condition.(type) {
case string:
wb[field] = Cond(field, "=", condition)
case int:
wb[field] = Cond(field, "=", condition)
case []string:
wb[field] = Cond(field, "in", condition)
case []*string:
wb[field] = Cond(field, "in", condition)
case []int:
wb[field] = Cond(field, "in", condition)
case []*int:
wb[field] = Cond(field, "in", condition)
case []float32:
wb[field] = Cond(field, "in", condition)
case []*float32:
wb[field] = Cond(field, "in", condition)
case []float64:
wb[field] = Cond(field, "in", condition)
case []*float64:
wb[field] = Cond(field, "in", condition)
}
}
}
func (wb WhereBuilder) ToPrepare(builder ...*prepareBuilder) (prepare string, values []interface{}) {
var p *prepareBuilder
if builder == nil {
p = &prepareBuilder{
k: 0,
values: make([]interface{}, 0),
}
} else {
p = builder[0]
}
var sb strings.Builder
sb.WriteString("(")
var i = 0
var max = len(wb)
wb.preProcess()
for field, condition := range wb {
switch condition.(type) {
case *Condition:
cond := condition.(*Condition)
if cond.conditions != nil {
sb.WriteString("(")
sb.WriteString(p.groupToPrepare(cond))
sb.WriteString(")")
} else {
sb.WriteString(p.conditionToPrepare(cond))
}
case WhereBuilder:
cond := condition.(WhereBuilder)
str, _ := cond.ToPrepare(p)
sb.WriteString(str)
case *WhereBuilder:
cond := condition.(*WhereBuilder)
str, _ := cond.ToPrepare(p)
sb.WriteString(str)
case string:
if len(field) < 2 || field[0:2] != "__" {
sb.WriteString(" ")
sb.WriteString(field)
}
sb.WriteString(condition.(string))
}
if i < max-1 {
sb.WriteString(" ")
sb.WriteString(condAnd)
sb.WriteString(" ")
}
i++
}
sb.WriteString(")")
return sb.String(), p.values
}
func (wb WhereBuilder) ToSql() string {
var sb strings.Builder
sb.WriteString("(")
var i = 0
var max = len(wb)
wb.preProcess()
for field, condition := range wb {
switch condition.(type) {
case *Condition:
cond := condition.(*Condition)
if cond.conditions != nil {
sb.WriteString("(")
sb.WriteString(groupToSql(cond))
sb.WriteString(")")
} else {
sb.WriteString(conditionToSql(cond))
}
case WhereBuilder:
cond := condition.(WhereBuilder)
sb.WriteString(cond.ToSql())
case *WhereBuilder:
cond := condition.(*WhereBuilder)
sb.WriteString(cond.ToSql())
case string:
if len(field) < 2 || field[0:2] != "__" {
sb.WriteString(" ")
sb.WriteString(field)
}
sb.WriteString(condition.(string))
}
if i < max-1 {
sb.WriteString(" ")
sb.WriteString(condAnd)
sb.WriteString(" ")
}
i++
}
sb.WriteString(")")
return sb.String()
}
func conditionToSql(c *Condition) string {
sb := strings.Builder{}
sb.WriteString(" ")
sb.WriteString(c.field)
sb.WriteString(" ")
sb.WriteString(c.operation)
sb.WriteString(" ")
switch c.value.(type) {
case string:
sb.WriteString(`"`)
str := c.value.(string)
str = strings.Replace(str, `"`, `\"`, -1)
sb.WriteString(str)
sb.WriteString(`"`)
case int:
sb.WriteString(strconv.Itoa(c.value.(int)))
case float32:
sb.WriteString(fmt.Sprintf("%f", c.value.(float32)))
case float64:
sb.WriteString(fmt.Sprintf("%f", c.value.(float32)))
case bool:
if c.value.(bool) {
sb.WriteString("1")
} else {
sb.WriteString("0")
}
case []int:
sb.WriteString("(")
sb.WriteString(JoinInt(c.value.([]int), ","))
sb.WriteString(")")
case []float32:
sb.WriteString("(")
sb.WriteString(JoinFloat32(c.value.([]float32), ","))
sb.WriteString(")")
case []float64:
sb.WriteString("(")
sb.WriteString(JoinFloat64(c.value.([]float64), ","))
sb.WriteString(")")
case []string:
sb.WriteString(`("`)
strList := c.value.([]string)
for k, v := range strList {
strList[k] = strings.Replace(v, `"`, `\"`, -1)
}
str := strings.Join(strList, `","`)
sb.WriteString(str)
sb.WriteString(`")`)
}
sb.WriteString(" ")
return sb.String()
}
func (p *prepareBuilder) getPlace(value interface{}) string {
p.k++
switch value.(type) {
case bool:
if value.(bool) {
p.values = append(p.values, 1)
} else {
p.values = append(p.values, 0)
}
default:
p.values = append(p.values, value)
}
return "?"
}
func groupToSql(group *Condition) string {
sb := strings.Builder{}
max := len(group.conditions)
for k, v := range group.conditions {
if v.conditions != nil {
sb.WriteString(" (")
sb.WriteString(groupToSql(v))
sb.WriteString(")")
} else {
sb.WriteString(conditionToSql(v))
}
if k < max-1 {
sb.WriteString(group.logic)
}
}
return sb.String()
}
func (p *prepareBuilder) conditionToPrepare(c *Condition) string {
sb := strings.Builder{}
sb.WriteString(" ")
sb.WriteString(c.field)
sb.WriteString(" ")
sb.WriteString(c.operation)
sb.WriteString(" ")
switch c.operation {
case "in":
sb.WriteString("(")
sb.WriteString(p.getPlace(c.value))
sb.WriteString(")")
default:
sb.WriteString(p.getPlace(c.value))
}
sb.WriteString(" ")
return sb.String()
}
func (p *prepareBuilder) groupToPrepare(c *Condition) string {
sb := strings.Builder{}
max := len(c.conditions)
for k, v := range c.conditions {
if v.conditions != nil {
sb.WriteString(" (")
sb.WriteString(p.groupToPrepare(v))
sb.WriteString(")")
} else {
sb.WriteString(p.conditionToPrepare(v))
}
if k < max-1 {
sb.WriteString(c.logic)
}
}
return sb.String()
}
func Cond(field, operation string, value interface{}) *Condition {
return &Condition{
field: field,
operation: operation,
value: value,
}
}
func And(conditions ...*Condition) *Condition {
return &Condition{
conditions: conditions,
logic: condAnd,
}
}
func Or(conditions ...*Condition) *Condition {
return &Condition{
conditions: conditions,
logic: condOr,
}
}
|
[
1
] |
// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
// See License.txt for license information.
package storetest
import (
"testing"
"github.com/mattermost/mattermost-server/model"
"github.com/mattermost/mattermost-server/store"
)
func TestLicenseStore(t *testing.T, ss store.Store) {
t.Run("Save", func(t *testing.T) { testLicenseStoreSave(t, ss) })
t.Run("Get", func(t *testing.T) { testLicenseStoreGet(t, ss) })
}
func testLicenseStoreSave(t *testing.T, ss store.Store) {
l1 := model.LicenseRecord{}
l1.Id = model.NewId()
l1.Bytes = "junk"
if err := (<-ss.License().Save(&l1)).Err; err != nil {
t.Fatal("couldn't save license record", err)
}
if err := (<-ss.License().Save(&l1)).Err; err != nil {
t.Fatal("shouldn't fail on trying to save existing license record", err)
}
l1.Id = ""
if err := (<-ss.License().Save(&l1)).Err; err == nil {
t.Fatal("should fail on invalid license", err)
}
}
func testLicenseStoreGet(t *testing.T, ss store.Store) {
l1 := model.LicenseRecord{}
l1.Id = model.NewId()
l1.Bytes = "junk"
store.Must(ss.License().Save(&l1))
if r := <-ss.License().Get(l1.Id); r.Err != nil {
t.Fatal("couldn't get license", r.Err)
} else {
if r.Data.(*model.LicenseRecord).Bytes != l1.Bytes {
t.Fatal("license bytes didn't match")
}
}
if err := (<-ss.License().Get("missing")).Err; err == nil {
t.Fatal("should fail on get license", err)
}
}
|
[
4
] |
package main
import "fmt"
func main() {
var max = 3
var numbers = []int{2,3,0,4,3,2,0,4,2,0,3}
var howMany, getNumbers = findMax(numbers,max)
var theNumbers = getNumbers()
fmt.Println("Numbers\t:",numbers)
fmt.Printf("find \t: %d\n\n",max)
fmt.Println("found \t:", howMany)
fmt.Println("value \t:", theNumbers)
}
func findMax(numbers []int, max int) (int,func() []int) {
var res []int
for _,e := range numbers {
if e <= max {
res = append(res,e)
}
}
return len(res),func() []int {
return res
}
}
|
[
1
] |
package cron
import (
"fmt"
"log"
"math/rand"
"strings"
"time"
"github.com/kyokomi/slackbot/plugins"
"github.com/robfig/cron"
)
const helpText = `
register:
cron add */1 * * * * * hogehoge
response:
<cron_id>
delete:
cron del <cron_id>
response:
delete message.
list:
cron list
response:
show added cron list.
help:
cron help
response:
show this help.
`
var rd = rand.New(rand.NewSource(time.Now().UnixNano()))
type Context interface {
AddCommand(channel string, c Command) string
DelCommand(channel string, c Command) string
ListCommand(channel string, c Command) string
HelpCommand(channel string, c Command) string
Refresh(messageSender plugins.MessageSender, channel string)
AllRefresh(messageSender plugins.MessageSender)
Close()
}
type context struct {
repository Repository
cronClient map[string]*cron.Cron
cronTaskMap map[string]TaskMap
}
// NewContext create cron context
func NewContext(repository Repository) (Context, error) {
ctx := &context{
cronClient: map[string]*cron.Cron{},
repository: repository,
}
data, err := repository.Load()
if err != nil {
return nil, err
}
ctx.cronTaskMap = data
return ctx, nil
}
func (ctx *context) AllRefresh(messageSender plugins.MessageSender) {
for channelID := range ctx.cronTaskMap {
log.Println("Refresh channelID", channelID)
ctx.Refresh(messageSender, channelID)
}
}
func (ctx *context) Close() {
if ctx.cronClient != nil {
for _, c := range ctx.cronClient {
if c == nil {
continue
}
c.Stop()
}
}
if ctx.repository != nil {
ctx.repository.Close()
}
}
func (ctx *context) Refresh(messageSender plugins.MessageSender, channel string) {
if ctx.cronClient[channel] != nil {
ctx.cronClient[channel].Stop()
ctx.cronClient[channel] = nil
}
c := cron.New()
for _, activeCron := range ctx.getTaskMap(channel) {
if !activeCron.Active {
continue
}
cmd := activeCron.Command
c.AddFunc(activeCron.Command.CronSpec, func() {
message := cmd.Message()
switch cmd.Action {
case RandomAddAction:
idx := rd.Intn(len(cmd.Args) - 1)
log.Println(len(cmd.Args), idx, cmd.Args[idx])
message = cmd.Args[idx]
}
messageSender.SendMessage(message, channel)
})
}
c.Start()
ctx.cronClient[channel] = c
if ctx.repository != nil {
ctx.repository.Save(ctx.cronTaskMap)
}
}
func (ctx *context) startTask(channelID string, c Command) {
if ctx.cronTaskMap[channelID] == nil {
ctx.cronTaskMap[channelID] = TaskMap{}
}
ctx.cronTaskMap[channelID].AddTask(c.Key(), Task{true, c})
}
func (ctx *context) stopTask(channelID string, c Command) {
if ctx.cronTaskMap[channelID] == nil {
ctx.cronTaskMap[channelID] = TaskMap{}
}
ctx.cronTaskMap[channelID].AddTask(c.Key(), Task{false, c})
}
func (ctx *context) getTaskMap(channelID string) map[string]Task {
if ctx.cronTaskMap[channelID] == nil {
ctx.cronTaskMap[channelID] = TaskMap{}
}
return ctx.cronTaskMap[channelID]
}
func (ctx *context) AddCommand(channel string, c Command) string {
ctx.startTask(channel, c)
return fmt.Sprintf("`%s setup done`", c.CronID)
}
func (ctx *context) DelCommand(channel string, c Command) string {
ctx.stopTask(channel, c)
return fmt.Sprintf("`%s deleted done`", c.CronID)
}
func (ctx *context) ListCommand(channel string, _ Command) string {
specMessage := []string{}
for _, ccd := range ctx.getTaskMap(channel) {
if !ccd.Active {
continue
}
specMessage = append(specMessage, fmt.Sprintf(
"cron = [%s] message = [%s] id = [%s]",
ccd.Command.CronSpec,
ccd.Command.Message(),
ccd.Command.CronID,
))
}
message := strings.Join(specMessage, "\n")
if message == "" {
message = "not cron list"
}
return fmt.Sprintf("```\n%s\n```", message)
}
func (ctx *context) HelpCommand(channel string, _ Command) string {
return fmt.Sprintf("```\n%s\n```", helpText)
}
type Repository interface {
Load() (map[string]TaskMap, error)
Save(cronTaskMap map[string]TaskMap) error
Close() error
}
|
[
7
] |
package main
import (
"net/http"
"time"
"math/rand"
"encoding/json"
"fmt"
"io/ioutil"
"os"
)
type Expression struct {
ID int `json:"id"`
Phrase string `json:"phrase"`
Meaning string `json:"meaning"`
}
func (e Expression) toString() string {
return toJson(e)
}
func toJson(p interface{}) string {
bytes, err := json.Marshal(p)
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
return string(bytes)
}
func main(){
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
http.HandleFunc("/expression", ChooseExpression)
http.Handle("/", http.FileServer(http.Dir("public")))
http.ListenAndServe(":" + port, nil)
}
func ChooseExpression(response http.ResponseWriter, request *http.Request){
expression := GetExpression()
response.Write([]byte(expression))
}
func GetExpression() string {
file, err := ioutil.ReadFile("./expressions.json")
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
var c []Expression
json.Unmarshal(file, &c)
randomId := random(0, len(c))
return c[randomId].toString()
}
func random(min, max int) int {
rand.Seed(time.Now().Unix())
return rand.Intn(max - min) + min
}
|
[
1
] |
package algoliasearch
import (
"encoding/json"
"fmt"
"time"
)
type Rule struct {
Condition RuleCondition `json:"condition"`
Consequence RuleConsequence `json:"consequence"`
Description string `json:"description,omitempty"`
Enabled bool `json:"enabled"` // Defaults to true
HighlightResult Map `json:"_highlightResult,omitempty"`
ObjectID string `json:"objectID,omitempty"`
Validity []TimeRange `json:"validity,omitempty"`
isExplicitlyDisabled bool
}
func (r *Rule) Enable() {
r.Enabled = true
}
func (r *Rule) Disable() {
r.Enabled = false
r.isExplicitlyDisabled = true
}
func (r *Rule) enableImplicitly() {
if !r.isExplicitlyDisabled {
r.Enable()
}
}
// RuleCondition is the part of an Algolia Rule which describes the condition
// for the rule. The `Context` is optional, hence, it will get ignored if an
// empty string is used to set it.
type RuleCondition struct {
Anchoring RulePatternAnchoring `json:"anchoring"`
Pattern string `json:"pattern"`
Context string `json:"context,omitempty"`
}
type RulePatternAnchoring string
const (
Is RulePatternAnchoring = "is"
StartsWith RulePatternAnchoring = "startsWith"
EndsWith RulePatternAnchoring = "endsWith"
Contains RulePatternAnchoring = "contains"
)
// NewSimpleRuleCondition generates a RuleCondition where only the `Anchoring`
// and `Pattern` fields are specified. The optional `Context` field is then
// excluded.
func NewSimpleRuleCondition(anchoring RulePatternAnchoring, pattern string) RuleCondition {
return NewRuleCondition(anchoring, pattern, "")
}
// NewRuleCondition generates a RuleCondition where all the possible fields can
// be specified.
func NewRuleCondition(anchoring RulePatternAnchoring, pattern, context string) RuleCondition {
return RuleCondition{
Anchoring: anchoring,
Pattern: pattern,
Context: context,
}
}
type RuleConsequence struct {
Params Map `json:"params,omitempty"`
Promote []PromotedObject `json:"promote,omitempty"`
Hide []HiddenObject `json:"hide,omitempty"`
UserData interface{} `json:"userData,omitempty"`
}
// AutomaticFacetFilter
type AutomaticFacetFilter struct {
Facet string `json:"facet"`
Disjunctive bool `json:"disjunctive"` // Defaults to false
Score int `json:"score"`
}
// QueryIncrementalEdit can be used as a value for the `query` key when used in
// the `RuleConsequence.Params` map. It is used to remove specific words from
// the original query string.
//
// Deprecated: Use `DeleteEdit` instead. More specifically, code previously
// written this way:
//
// consequence := algoliasearch.RuleConsquence{
// Params: algoliasearch.Map{
// "query": algoliasearch.QueryIncrementalEdit{
// Remove: []string{"term1", "term2"},
// },
// },
// }
//
// should now be written:
//
// consequence := algoliasearch.RuleConsequence{
// Params: algoliasearch.Map{
// "query": algoliasearch.Map{
// "edits": []algoliasearch.Edit{
// algoliasearch.DeleteEdit("term1"),
// algoliasearch.DeleteEdit("term2"),
// },
// },
// },
// }
//
type QueryIncrementalEdit struct {
Remove []string `json:"remove"`
}
type Edit struct {
Type string `json:"type"`
Delete string `json:"delete"`
Insert string `json:"insert,omitempty"`
}
// DeleteEdit returns a new `Edit` instance used to remove the given `word`
// from an original query when used as a `RuleConsequence.Params`.
func DeleteEdit(word string) Edit {
return Edit{
Type: "remove",
Delete: word,
}
}
// ReplaceEdit returns a new `Edit` instance used to replace the given `old`
// term with `new` in a query when used as a `RuleConsequence.Params`.
func ReplaceEdit(old, new string) Edit {
return Edit{
Type: "replace",
Delete: old,
Insert: new,
}
}
type PromotedObject struct {
ObjectID string `json:"objectID"`
Position int `json:"position"`
}
type HiddenObject struct {
ObjectID string `json:"objectID"`
}
type SaveRuleRes struct {
TaskID int `json:"taskID"`
UpdatedAt string `json:"updatedAt"`
}
type BatchRulesRes struct {
TaskID int `json:"taskID"`
UpdatedAt string `json:"updatedAt"`
}
type DeleteRuleRes struct {
TaskID int `json:"taskID"`
UpdatedAt string `json:"updatedAt"`
}
type ClearRulesRes struct {
TaskID int `json:"taskID"`
UpdatedAt string `json:"updatedAt"`
}
type TimeRange struct {
From time.Time
Until time.Time
}
type timeRangeResponse struct {
From int64 `json:"from"`
Until int64 `json:"until"`
}
func (r TimeRange) MarshalJSON() ([]byte, error) {
data := fmt.Sprintf(
`{"from":%d,"until":%d}`,
r.From.Unix(),
r.Until.Unix(),
)
return []byte(data), nil
}
func (r *TimeRange) UnmarshalJSON(b []byte) error {
var res timeRangeResponse
err := json.Unmarshal(b, &res)
if err != nil {
return fmt.Errorf("cannot unmarshal integer values of time range: %s", err)
}
r.From = time.Unix(res.From, 0)
r.Until = time.Unix(res.Until, 0)
return nil
}
type SearchRulesRes struct {
Hits []Rule `json:"hits"`
NbHits int `json:"nbHits"`
Page int `json:"page"`
NbPages int `json:"nbPages"`
}
|
[
1
] |
package dsp
func Lerp(in, min, max Float64) Float64 {
return in*(max-min) + min
}
type RollingAverage struct {
Window int
value Float64
}
func (a *RollingAverage) Tick(in Float64) Float64 {
a.value -= a.value / Float64(a.Window)
a.value += in / Float64(a.Window)
return a.value
}
|
[
1
] |
package status
import (
"fmt"
"github.com/vvhq/exorsus/configuration"
"sync"
"sync/atomic"
"time"
)
type IOStdStore struct {
max int
warehouse []string
lock sync.RWMutex
}
func (store *IOStdStore) Append(item string) {
item = fmt.Sprintf("%s%s%s %s",
configuration.DefaultStdDatePrefix,
time.Now().Format(configuration.DefaultStdDateLayout),
configuration.DefaultStdDateSuffix,
item)
store.lock.Lock()
defer store.lock.Unlock()
store.warehouse = append(store.warehouse, item)
if len(store.warehouse) > store.max {
shifted := make([]string, store.max)
idx := len(store.warehouse) - store.max
copy(shifted, store.warehouse[idx:])
store.warehouse = shifted
}
}
func (store *IOStdStore) List() []string {
store.lock.RLock()
defer store.lock.RUnlock()
if len(store.warehouse) == 0 {
return []string{}
}
var warehouse []string
if len(store.warehouse) > store.max {
warehouse = make([]string, store.max)
copy(warehouse, store.warehouse[(len(store.warehouse)-store.max):])
} else {
warehouse = make([]string, len(store.warehouse))
copy(warehouse, store.warehouse)
}
return warehouse
}
func NewIOStdStore(max int) *IOStdStore {
return &IOStdStore{max: max}
}
const Stopped int = 0
const Started int = 1
const Stopping int = 2
const Starting int = 3
const Failed int = 4
type Status struct {
pid int32
code int32
state int32
startupError error
stdOutStore *IOStdStore
stdErrStore *IOStdStore
lock sync.RWMutex
}
func (status *Status) SetPid(pid int) {
atomic.SwapInt32(&status.pid, int32(pid))
}
func (status *Status) GetPid() int {
return int(atomic.LoadInt32(&status.pid))
}
func (status *Status) SetExitCode(code int) {
atomic.SwapInt32(&status.code, int32(code))
}
func (status *Status) GetExitCode() int {
return int(atomic.LoadInt32(&status.code))
}
func (status *Status) SetState(state int) {
atomic.SwapInt32(&status.state, int32(state))
}
func (status *Status) GetState() int {
return int(atomic.LoadInt32(&status.state))
}
func (status *Status) SetError(startupError error) {
status.lock.Lock()
defer status.lock.Unlock()
status.startupError = startupError
}
func (status *Status) GetError() error {
status.lock.RLock()
defer status.lock.RUnlock()
return status.startupError
}
func (status *Status) AddStdOutItem(item string) {
status.stdOutStore.Append(item)
}
func (status *Status) ListStdOutItems() []string {
return status.stdOutStore.List()
}
func (status *Status) AddStdErrItem(item string) {
status.stdErrStore.Append(item)
}
func (status *Status) ListStdErrItems() []string {
return status.stdErrStore.List()
}
func New(max int) *Status {
return &Status{pid: 0, code: 0, startupError: nil, state: int32(Stopped), stdOutStore: NewIOStdStore(max), stdErrStore: NewIOStdStore(max)}
}
|
[
1
] |
package gossip
import (
"sync"
"github.com/iotaledger/goshimmer/packages/model/meta_transaction"
"github.com/iotaledger/hive.go/daemon"
"github.com/iotaledger/hive.go/events"
"github.com/iotaledger/hive.go/node"
)
// region plugin module setup //////////////////////////////////////////////////////////////////////////////////////////
func configureSendQueue(plugin *node.Plugin) {
for _, neighbor := range neighbors.GetMap() {
setupEventHandlers(neighbor)
}
Events.AddNeighbor.Attach(events.NewClosure(setupEventHandlers))
daemon.Events.Shutdown.Attach(events.NewClosure(func() {
log.Info("Stopping Send Queue Dispatcher ...")
}))
}
func runSendQueue(plugin *node.Plugin) {
log.Info("Starting Send Queue Dispatcher ...")
daemon.BackgroundWorker("Gossip Send Queue Dispatcher", func() {
log.Info("Starting Send Queue Dispatcher ... done")
for {
select {
case <-daemon.ShutdownSignal:
log.Info("Stopping Send Queue Dispatcher ... done")
return
case tx := <-sendQueue:
connectedNeighborsMutex.RLock()
for _, neighborQueue := range neighborQueues {
select {
case neighborQueue.queue <- tx:
// log sth
default:
// log sth
}
}
connectedNeighborsMutex.RUnlock()
}
}
})
connectedNeighborsMutex.Lock()
for _, neighborQueue := range neighborQueues {
startNeighborSendQueue(neighborQueue.protocol.Neighbor, neighborQueue)
}
connectedNeighborsMutex.Unlock()
}
// endregion ///////////////////////////////////////////////////////////////////////////////////////////////////////////
// region public api ///////////////////////////////////////////////////////////////////////////////////////////////////
func SendTransaction(transaction *meta_transaction.MetaTransaction) {
sendQueue <- transaction
}
func (neighbor *Neighbor) SendTransaction(transaction *meta_transaction.MetaTransaction) {
if queue, exists := neighborQueues[neighbor.GetIdentity().StringIdentifier]; exists {
select {
case queue.queue <- transaction:
return
default:
return
}
}
}
// endregion ///////////////////////////////////////////////////////////////////////////////////////////////////////////
// region utility methods //////////////////////////////////////////////////////////////////////////////////////////////
func setupEventHandlers(neighbor *Neighbor) {
neighbor.Events.ProtocolConnectionEstablished.Attach(events.NewClosure(func(protocol *protocol) {
queue := &neighborQueue{
protocol: protocol,
queue: make(chan *meta_transaction.MetaTransaction, SEND_QUEUE_SIZE),
disconnectChan: make(chan int, 1),
}
connectedNeighborsMutex.Lock()
neighborQueues[neighbor.GetIdentity().StringIdentifier] = queue
connectedNeighborsMutex.Unlock()
protocol.Conn.Events.Close.Attach(events.NewClosure(func() {
close(queue.disconnectChan)
connectedNeighborsMutex.Lock()
delete(neighborQueues, neighbor.GetIdentity().StringIdentifier)
connectedNeighborsMutex.Unlock()
}))
if daemon.IsRunning() {
startNeighborSendQueue(neighbor, queue)
}
}))
}
func startNeighborSendQueue(neighbor *Neighbor, neighborQueue *neighborQueue) {
daemon.BackgroundWorker("Gossip Send Queue ("+neighbor.GetIdentity().StringIdentifier+")", func() {
for {
select {
case <-daemon.ShutdownSignal:
return
case <-neighborQueue.disconnectChan:
return
case tx := <-neighborQueue.queue:
switch neighborQueue.protocol.Version {
case VERSION_1:
sendTransactionV1(neighborQueue.protocol, tx)
}
}
}
})
}
// endregion ///////////////////////////////////////////////////////////////////////////////////////////////////////////
// region types and interfaces /////////////////////////////////////////////////////////////////////////////////////////
type neighborQueue struct {
protocol *protocol
queue chan *meta_transaction.MetaTransaction
disconnectChan chan int
}
// endregion ///////////////////////////////////////////////////////////////////////////////////////////////////////////
// region constants and variables //////////////////////////////////////////////////////////////////////////////////////
var neighborQueues = make(map[string]*neighborQueue)
var connectedNeighborsMutex sync.RWMutex
var sendQueue = make(chan *meta_transaction.MetaTransaction, SEND_QUEUE_SIZE)
const (
SEND_QUEUE_SIZE = 500
)
// endregion ///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
[
7
] |
package pgnull
import (
"database/sql"
"database/sql/driver"
"encoding/json"
"strconv"
)
type NullInt sql.NullInt64
func (i NullInt) MarshalJSON() ([]byte, error) {
if i.Valid {
return json.Marshal(i.Int64)
}
return json.Marshal(nil)
}
func (i *NullInt) UnmarshalJSON(bt []byte) error {
xyz := string(bt)
if xyz == "null" {
i.Int64 = 0
i.Valid = false
return nil
}
v, err := strconv.Atoi(xyz)
if err != nil {
i.Int64 = 0
i.Valid = false
return err
}
i.Int64 = int64(v)
i.Valid = true
return nil
}
func (i *NullInt) Scan(value interface{}) error {
switch v := value.(type) {
case int64:
i.Int64 = v
i.Valid = true
}
return nil
}
func (i NullInt) Value() (driver.Value, error) {
if !i.Valid {
return nil, nil
}
return i.Int64, nil
}
func NewNullInt(a int) NullInt {
return NullInt{int64(a), true}
}
func NullIntIsEqual(a, b NullInt) bool {
if !a.Valid && !b.Valid {
return true
}
if a.Valid != b.Valid {
return false
}
return a.Int64 == b.Int64
}
|
[
7
] |
package gurucomplete
import (
"fmt"
"strconv"
"strings"
"github.com/leeola/gokakoune/util"
)
const guruBin = "guru"
const (
lineUnknown int = iota
lineMethod
lineField
)
type Completion struct {
Completion string
File string
LineNo int
Column int
}
// GuruComplete returns a set of completions for the given file/pos.
//
// NOTE(leeola): This function is not well optimized in any sense. It is parsing
// a slower output format from guru, not quite intended for code completion.
// Despite this, the function exists because i wanted code completion from syntax,
// and gocode wasn't working well enough for me. I'll likely be writing my own
// code completion eventually, unless Guru adds a native version of it.
func GuruComplete(filepath string, byteOffset int) ([]Completion, error) {
stdout, _, exit, err := util.Exec(
"guru", "describe", fmt.Sprintf("%s:#%d", filepath, byteOffset))
if err != nil {
return nil, err
}
if exit != 0 {
return nil, fmt.Errorf("non-zero exit: %d", exit)
}
var (
completions []Completion
lineState int
)
for i, line := range strings.Split(stdout, "\n") {
if line == "" {
break
}
split := strings.SplitN(line, ":", 3)
if len(split) < 3 {
return nil, fmt.Errorf("unexpected colum format in line: %d", i)
}
file, pos, desc := split[0], split[1], split[2]
switch desc {
case " Methods:":
lineState = lineMethod
continue
}
switch lineState {
case lineMethod:
desc = trimMethodPrefix(desc)
// i believe [0] access is safe, i don't think Split will ever
// return less than 1.
sPos := strings.SplitN(pos, "-", 2)[0]
sPosSplit := strings.SplitN(sPos, ".", 2)
if len(sPosSplit) < 2 {
return nil, fmt.Errorf("unexpected line.col format: %q", sPos)
}
lineNo, err := strconv.Atoi(sPosSplit[0])
if err != nil {
return nil, fmt.Errorf("failed to lineNo to int: %q", sPosSplit[0])
}
col, err := strconv.Atoi(sPosSplit[1])
if err != nil {
return nil, fmt.Errorf("failed to col to int: %q", sPosSplit[1])
}
completions = append(completions, Completion{
// TODO(leeola): remove formatting on the desc for
// methods. Guru adds lots of indentation, a method
// prefix, etc.
Completion: desc,
File: file,
LineNo: lineNo,
Column: col,
})
}
}
return completions, nil
}
func trimMethodPrefix(desc string) string {
var spaceCount int
for i, ru := range desc {
switch ru {
case ' ':
spaceCount++
}
if spaceCount == 3 {
return desc[i+1:]
}
}
return ""
}
|
[
7
] |
/*-
* Copyright © 2018, 1&1 Internet SE
* All rights reserved.
*
* Use of this source code is governed by a 2-clause BSD license
* that can be found in the LICENSE file.
*/
package wall // import "github.com/solnx/eye/lib/eye.wall"
import proto "github.com/solnx/eye/lib/eye.proto"
// Activate marks a profile as active if l detected an API version that
// supports profile Activation
func (l *Lookup) Activate(profileID string) error {
// apiVersion is not initialized, run a quick tasting
if l.apiVersion == proto.ProtocolInvalid {
l.taste(true)
}
switch l.apiVersion {
case proto.ProtocolTwo:
return l.v2ActivateProfile(profileID)
}
return ErrProtocol
}
// PendingActivation returns the currently pending activations
func (l *Lookup) PendingActivation() (*proto.Result, error) {
// apiVersion is not initialized, run a quick tasting
if l.apiVersion == proto.ProtocolInvalid {
l.taste(true)
}
switch l.apiVersion {
case proto.ProtocolTwo:
return l.v2PendingActivation()
}
return nil, ErrProtocol
}
// vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix
|
[
7
] |
package main
import (
"fmt"
"image"
)
func main() {
serial := 7672
fmt.Printf("Answer (Part 1): %s\n", PartOne(serial))
fmt.Printf("Answer (Part 2): %s\n", PartTwo(serial))
}
func PartOne(serial int) image.Point {
var (
bestpower int
bestcell image.Point
)
ForEachKernel(3, image.Rect(1, 1, 300, 300), func(kernel image.Rectangle) {
var power int
ForEachCell(kernel, func(cell image.Point) {
power += Power(cell, serial)
})
if power > bestpower {
bestpower = power
bestcell = kernel.Min
}
})
return bestcell
}
func PartTwo(serial int) PartTwoResult {
var (
result = make(chan PartTwoResult)
best PartTwoResult
)
go PartTwoRange(serial, 1, 50, result)
go PartTwoRange(serial, 51, 100, result)
go PartTwoRange(serial, 101, 150, result)
go PartTwoRange(serial, 151, 200, result)
go PartTwoRange(serial, 201, 250, result)
go PartTwoRange(serial, 251, 300, result)
for i := 0; i < 6; i++ {
if res := <-result; res.Power > best.Power {
best = res
}
}
return best
}
type PartTwoResult struct {
Point image.Point
Power int
Size int
}
func (r PartTwoResult) String() string {
return fmt.Sprintf("%s, %d", r.Point, r.Size)
}
func PartTwoRange(serial, min, max int, out chan PartTwoResult) {
var (
bestpower int
bestsize int
bestcell image.Point
)
for size := min; size <= max; size++ {
ForEachKernel(size, image.Rect(1, 1, 300, 300), func(kernel image.Rectangle) {
var power int
ForEachCell(kernel, func(cell image.Point) {
power += Power(cell, serial)
})
if power > bestpower {
bestpower = power
bestcell = kernel.Min
bestsize = size
}
})
}
out <- PartTwoResult{bestcell, bestpower, bestsize}
}
func ForEachKernel(size int, bounds image.Rectangle, f func(r image.Rectangle)) {
kernel := image.Rect(0, 0, size-1, size-1).Add(bounds.Min)
for kernel.Max.Y <= bounds.Max.Y {
f(kernel)
kernel = kernel.Add(image.Pt(1, 0))
if kernel.Max.X > bounds.Max.X {
dx := kernel.Min.X - bounds.Min.X
kernel = kernel.Add(image.Pt(-dx, 1))
}
}
}
func ForEachCell(r image.Rectangle, f func(p image.Point)) {
for x := r.Min.X; x <= r.Max.X; x++ {
for y := r.Min.Y; y <= r.Max.Y; y++ {
f(image.Pt(x, y))
}
}
}
func Power(cell image.Point, serial int) int {
// Find the fuel cell's rack ID, which is its X coordinate plus 10.
rackID := cell.X + 10
// Begin with a power level of the rack ID times the Y coordinate.
power := rackID * cell.Y
// Increase the power level by the value of the grid serial number (your puzzle input)
power += serial
// Set the power level to itself multiplied by the rack ID.
power *= rackID
// Keep only the hundreds digit of the power level (so 12345 becomes 3; numbers with no hundreds digit become 0).
power = (power / 100) % 10
// Subtract 5 from the power level.
power -= 5
return power
}
|
[
1
] |
package userservice
import (
"github.com/Myriad-Dreamin/blog-backend-v2/control/auth"
base_service "github.com/Myriad-Dreamin/blog-backend-v2/lib/base-service"
"github.com/Myriad-Dreamin/blog-backend-v2/lib/serial"
"github.com/Myriad-Dreamin/blog-backend-v2/types"
"github.com/Myriad-Dreamin/minimum-lib/controller"
)
type PostRequest struct {
}
func (srv *Service) SerializePost(c controller.MContext) base_service.CRUDEntity {
panic("abort")
}
type PostReplyI interface {
GetID() uint
}
func (srv *Service) AfterPost(reply PostReplyI) interface{} {
if b, err := auth.UserEntity.AddReadPolicy(srv.enforcer, auth.UserEntity.CreateObj(reply.GetID()), reply.GetID()); err != nil {
if !b {
srv.logger.Debug("add failed")
}
return serial.ErrorSerializer{
Code: types.CodeAddReadPrivilegeError,
Error: err.Error(),
}
} else {
if !b {
srv.logger.Debug("add failed")
}
}
if b, err := auth.UserEntity.AddWritePolicy(srv.enforcer, auth.UserEntity.CreateObj(reply.GetID()), reply.GetID()); err != nil {
if !b {
srv.logger.Debug("add failed")
}
return serial.ErrorSerializer{
Code: types.CodeAddWritePrivilegeError,
Error: err.Error(),
}
} else {
if !b {
srv.logger.Debug("add failed")
}
}
return reply
}
|
[
4
] |
package main
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
// Complete the hourglassSum function below.
func sum(arr []int32) int32 {
var result int32
for _, v := range arr {
result += v
}
return result
}
func minValue(arr [][]int32) int32 {
var min int32
for i := 0; i < len(arr); i++ {
for j := 0; j < len(arr[0]); j++ {
if arr[i][j] < min {
min = arr[i][j]
}
}
}
if min < 0 {
return min * 7
}
return 0
}
func hourglassSum(arr [][]int32) int32 {
result := minValue(arr)
for i := 0; i <= len(arr)-3; i++ {
for j := 0; j <= len(arr[0])-3; j++ {
buf := sum(arr[i][j:j+3]) + arr[i+1][j+1] + sum(arr[i+2][j:j+3])
if buf > result {
result = buf
}
}
}
return result
}
func main() {
reader := bufio.NewReaderSize(os.Stdin, 1024 * 1024)
stdout, err := os.Create(os.Getenv("OUTPUT_PATH"))
checkError(err)
defer stdout.Close()
writer := bufio.NewWriterSize(stdout, 1024 * 1024)
var arr [][]int32
for i := 0; i < 6; i++ {
arrRowTemp := strings.Split(readLine(reader), " ")
var arrRow []int32
for _, arrRowItem := range arrRowTemp {
arrItemTemp, err := strconv.ParseInt(arrRowItem, 10, 64)
checkError(err)
arrItem := int32(arrItemTemp)
arrRow = append(arrRow, arrItem)
}
if len(arrRow) != int(6) {
panic("Bad input")
}
arr = append(arr, arrRow)
}
result := hourglassSum(arr)
fmt.Fprintf(writer, "%d\n", result)
writer.Flush()
}
func readLine(reader *bufio.Reader) string {
str, _, err := reader.ReadLine()
if err == io.EOF {
return ""
}
return strings.TrimRight(string(str), "\r\n")
}
func checkError(err error) {
if err != nil {
panic(err)
}
}
|
[
1
] |
package main
import (
"fmt"
"os"
"strconv"
)
func main() {
if len(os.Args) == 1 {
fmt.Println("Please give one or more floats.")
os.Exit(1)
}
var min, max float64
b := false
for _, arg := range os.Args[1:] {
n, err := strconv.ParseFloat(arg, 64)
if err == nil {
b = true
if n < min {
min = n
}
if n > max {
max = n
}
}
}
if b {
fmt.Println("Min:", min)
fmt.Println("Max:", max)
} else {
fmt.Println("None of the arguments is a float!")
}
}
|
[
1
] |
package main
import (
"fmt"
"gameswithgo/gogl"
"github.com/go-gl/gl/v3.3-core/gl"
"github.com/veandco/go-sdl2/sdl"
)
// https://learnopengl.com/ - nice OpenGL learning source (requires C knowledge)
const winWidth = 1280
const winHeight = 720
func main() {
err := sdl.Init(sdl.INIT_EVERYTHING)
if err != nil {
panic(err)
}
defer sdl.Quit()
window, err := sdl.CreateWindow("Hello triangle!", 200, 200, winWidth, winHeight, sdl.WINDOW_OPENGL)
if err != nil {
panic(err)
}
_, err = window.GLCreateContext()
if err != nil {
panic(err)
}
defer window.Destroy()
gl.Init()
fmt.Println(gogl.GetVersion())
shaderProgram, err := gogl.NewShader("hellotriangle/shaders/hello.vert", "hellotriangle/shaders/hello.frag")
if err != nil {
panic(err)
}
vertices := []float32{
0.5, 0.5, 0.0, 1.0, 1.0,
0.5, -0.5, 0.0, 1.0, 0.0,
-0.5, -0.5, 0.0, 0.0, 0.0,
-0.5, 0.5, 0.0, 0.0, 1.0}
indices := []uint32 {
0,1,3, // triangle 1
1,2,3, // triangle 2
}
gogl.GenBindBuffer(gl.ARRAY_BUFFER)
VAO := gogl.GenBindVertexArray()
gogl.BufferDataFloat(gl.ARRAY_BUFFER, vertices, gl.STATIC_DRAW)
gogl.GenBindBuffer(gl.ELEMENT_ARRAY_BUFFER)
gogl.BufferDataInt(gl.ELEMENT_ARRAY_BUFFER, indices, gl.STATIC_DRAW)
gl.VertexAttribPointer(0, 3, gl.FLOAT, false, 5*4, nil)
gl.EnableVertexAttribArray(0)
gl.VertexAttribPointer(2,2,gl.FLOAT, false,5*4, gl.PtrOffset(2*4))
gogl.UnbindVertexArray()
for {
for event := sdl.PollEvent(); event != nil; event = sdl.PollEvent() {
switch event.(type) {
case *sdl.QuitEvent:
return
}
}
gl.ClearColor(0, 0, 0, 0)
gl.Clear(gl.COLOR_BUFFER_BIT)
shaderProgram.Use()
gogl.BindVertexArray(VAO)
gl.DrawElements(gl.TRIANGLES, 6, gl.UNSIGNED_INT,gl.PtrOffset(0))
window.GLSwap()
shaderProgram.CheckShadersForChanges()
}
}
|
[
7
] |
package main
import "fmt"
import "strings"
import "github.com/xlab/treeprint"
import "github.com/Knetic/govaluate"
import "github.com/muhqu/go-gherkin"
import "github.com/muhqu/go-gherkin/nodes"
func EvaluateExpression(expression string, parameters map[string]interface{}) (interface{}, error) {
var err error
var res interface{}
var exp *govaluate.EvaluableExpression
if exp, err = govaluate.NewEvaluableExpression(expression); err == nil {
res, err = exp.Evaluate(parameters)
return res, err
}
return nil, err
}
func ParseGiven(step nodes.StepNode, params map[string]interface{}, example interface{}) map[string]interface{} {
var indexMap map[string]int = make(map[string]int)
params["example"] = example
if table := step.Table(); table != nil {
rows := table.Rows()
rowLen := len(rows)
if rowLen > 1 {
commandType := strings.TrimSpace(strings.ToLower(step.Text()))
for i, rowIndexes := range rows[0] {
indexMap[rowIndexes] = i + 1
}
switch commandType {
case GIVEN_CMD_DEFINE:
for r := 1; r < rowLen; r++ {
row := rows[r]
name, instance := GetInstanceFor(indexMap, row)
params[name] = instance
}
}
}
}
return params
}
func ParseStep(step nodes.StepNode, params map[string]interface{}) (interface{}, error) {
stepText := step.Text()
method := index[stepText]
if method != nil {
return method(), nil
} else {
result, err := EvaluateExpression(stepText, params)
return result, err
}
}
func LoadFeature(featureDef string) {
var err error
var feature nodes.FeatureNode
if feature, err = gherkin.ParseGherkinFeature(featureDef); err != nil {
return
}
tree := treeprint.New()
params := make(map[string]interface{})
scenarioLoop:
for _, scenario := range feature.Scenarios() {
scenarioTags := scenario.Tags()
for _, tag := range scenarioTags {
tagProps := strings.Split(tag, "::")
for _, prop := range tagProps {
if prop == "Rollback" {
continue scenarioLoop
}
}
}
var name string
var example interface{}
var examples nodes.OutlineExamplesNode
var indexMap map[string]int = make(map[string]int)
exampleIndex, exampleCount := 1, 0
if outline, ok := scenario.(nodes.OutlineNode); ok {
examples = outline.Examples()
exampleCount = len(examples.Table().Rows()) - 1
}
if exampleCount > 0 {
rows := examples.Table().Rows()
for i, rowIndexes := range rows[0] {
indexMap[rowIndexes] = i + 1
}
}
runOutline:
var featureBranch treeprint.Tree
ftitle, stitle := feature.Title(), scenario.Title()
if exampleCount > 0 {
exampleData := examples.Table().Rows()[exampleIndex]
name, example = GetInstanceFor(indexMap, exampleData)
featureBranch = tree.AddBranch(fmt.Sprintf("Feature <%s>: %s", name, ftitle))
} else {
featureBranch = tree.AddBranch(fmt.Sprintf("Feature: %s", ftitle))
}
scenarioBranch := featureBranch.AddBranch(fmt.Sprintf("Scenario: %s", stitle))
stepIteration:
for _, step := range scenario.Steps() {
switch step.StepType() {
case "Given":
params = ParseGiven(step, params, example)
case "Then", "When", "And", "Or", "But":
r, err := ParseStep(step, params)
scenarioBranch.AddNode(fmt.Sprintf("Step: %s", step.Text()))
if err != nil {
scenarioBranch.AddNode(fmt.Sprintf("Fail: %v", err))
break stepIteration
}
switch result := r.(type) {
case bool:
if !result {
scenarioBranch.AddNode("Fail: Step returned false")
break stepIteration
}
}
}
}
if exampleIndex < exampleCount {
exampleIndex += 1
goto runOutline
}
}
fmt.Println(tree.String())
}
|
[
7
] |
package cache
import (
"scoremanager/utils"
)
type CacheOp interface {
Get(key string) (interface{}, error)
Set(key string, value interface{}, options ...CacheOptions) error
Delete(key string) error
String(obj interface{}) (string, error)
}
type CacheOption struct {
Ex bool
ExTime int
}
type CacheOptions func(*CacheOption)
func WithEx(seconds int) CacheOptions {
return func(co *CacheOption) {
co.Ex = true
co.ExTime = seconds
}
}
func NewCacheOp() CacheOp {
cacheType := utils.GetEnvDefault("CACHE_TYPE", "redis")
var cacheOp CacheOp
switch cacheType {
case "redis":
cacheOp = &RedisPool
}
return cacheOp
}
|
[
7
] |
package rule
import (
"go/ast"
"go/token"
"github.com/mgechev/revive/lint"
)
// ConstantLogicalExprRule warns on constant logical expressions.
type ConstantLogicalExprRule struct{}
// Apply applies the rule to given file.
func (r *ConstantLogicalExprRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure {
var failures []lint.Failure
onFailure := func(failure lint.Failure) {
failures = append(failures, failure)
}
astFile := file.AST
w := &lintConstantLogicalExpr{astFile, onFailure}
ast.Walk(w, astFile)
return failures
}
// Name returns the rule name.
func (*ConstantLogicalExprRule) Name() string {
return "constant-logical-expr"
}
type lintConstantLogicalExpr struct {
file *ast.File
onFailure func(lint.Failure)
}
func (w *lintConstantLogicalExpr) Visit(node ast.Node) ast.Visitor {
switch n := node.(type) {
case *ast.BinaryExpr:
if !w.isOperatorWithLogicalResult(n.Op) {
return w
}
if gofmt(n.X) != gofmt(n.Y) { // check if subexpressions are the same
return w
}
// Handles cases like: a <= a, a == a, a >= a
if w.isEqualityOperator(n.Op) {
w.newFailure(n, "expression always evaluates to true")
return w
}
// Handles cases like: a < a, a > a, a != a
if w.isInequalityOperator(n.Op) {
w.newFailure(n, "expression always evaluates to false")
return w
}
w.newFailure(n, "left and right hand-side sub-expressions are the same")
}
return w
}
func (w *lintConstantLogicalExpr) isOperatorWithLogicalResult(t token.Token) bool {
switch t {
case token.LAND, token.LOR, token.EQL, token.LSS, token.GTR, token.NEQ, token.LEQ, token.GEQ:
return true
}
return false
}
func (w *lintConstantLogicalExpr) isEqualityOperator(t token.Token) bool {
switch t {
case token.EQL, token.LEQ, token.GEQ:
return true
}
return false
}
func (w *lintConstantLogicalExpr) isInequalityOperator(t token.Token) bool {
switch t {
case token.LSS, token.GTR, token.NEQ:
return true
}
return false
}
func (w lintConstantLogicalExpr) newFailure(node ast.Node, msg string) {
w.onFailure(lint.Failure{
Confidence: 1,
Node: node,
Category: "logic",
Failure: msg,
})
}
|
[
7
] |
/*-
* Copyright (c) 2016-2018, Jörg Pernfuß
*
* Use of this source code is governed by a 2-clause BSD license
* that can be found in the LICENSE file.
*/
package rest // import "github.com/mjolnir42/soma/internal/rest"
import (
"fmt"
"net/http"
"github.com/julienschmidt/httprouter"
"github.com/mjolnir42/soma/internal/msg"
"github.com/mjolnir42/soma/lib/proto"
)
// RepositoryConfigList function
func (x *Rest) RepositoryConfigList(w http.ResponseWriter, r *http.Request,
params httprouter.Params) {
defer panicCatcher(w)
request := msg.New(r, params)
request.Section = msg.SectionRepositoryConfig
request.Action = msg.ActionList
if !x.isAuthorized(&request) {
x.replyForbidden(&w, &request)
return
}
x.handlerMap.MustLookup(&request).Intake() <- request
result := <-request.Reply
x.send(&w, &result)
}
// RepositoryConfigSearch function
func (x *Rest) RepositoryConfigSearch(w http.ResponseWriter, r *http.Request,
params httprouter.Params) {
defer panicCatcher(w)
request := msg.New(r, params)
request.Section = msg.SectionRepositoryConfig
request.Action = msg.ActionSearch
cReq := proto.NewRepositoryFilter()
if err := decodeJSONBody(r, &cReq); err != nil {
x.replyBadRequest(&w, &request, err)
return
}
switch {
case cReq.Filter.Repository.ID != ``:
case cReq.Filter.Repository.Name != ``:
case cReq.Filter.Repository.TeamID != ``:
case cReq.Filter.Repository.FilterOnIsDeleted:
case cReq.Filter.Repository.FilterOnIsActive:
default:
x.replyBadRequest(&w, &request, fmt.Errorf(`RepositorySearch request without condition`))
return
}
request.Search.Repository.ID = cReq.Filter.Repository.ID
request.Search.Repository.Name = cReq.Filter.Repository.Name
request.Search.Repository.TeamID = cReq.Filter.Repository.TeamID
request.Search.Repository.IsDeleted = cReq.Filter.Repository.IsDeleted
request.Search.Repository.IsActive = cReq.Filter.Repository.IsActive
request.Search.Repository.FilterOnIsDeleted = cReq.Filter.Repository.FilterOnIsDeleted
request.Search.Repository.FilterOnIsActive = cReq.Filter.Repository.FilterOnIsActive
if !x.isAuthorized(&request) {
x.replyForbidden(&w, &request)
return
}
x.handlerMap.MustLookup(&request).Intake() <- request
result := <-request.Reply
x.send(&w, &result)
}
// RepositoryConfigShow function
func (x *Rest) RepositoryConfigShow(w http.ResponseWriter, r *http.Request,
params httprouter.Params) {
defer panicCatcher(w)
request := msg.New(r, params)
request.Section = msg.SectionRepositoryConfig
request.Action = msg.ActionShow
request.Repository.ID = params.ByName(`repositoryID`)
request.Repository.TeamID = params.ByName(`teamID`)
if !x.isAuthorized(&request) {
x.replyForbidden(&w, &request)
return
}
x.handlerMap.MustLookup(&request).Intake() <- request
result := <-request.Reply
x.send(&w, &result)
}
// RepositoryConfigTree function
func (x *Rest) RepositoryConfigTree(w http.ResponseWriter, r *http.Request,
params httprouter.Params) {
defer panicCatcher(w)
request := msg.New(r, params)
request.Section = msg.SectionRepositoryConfig
request.Action = msg.ActionTree
request.Tree = proto.Tree{
ID: params.ByName(`repositoryID`),
Type: msg.EntityRepository,
}
if !x.isAuthorized(&request) {
x.replyForbidden(&w, &request)
return
}
x.handlerMap.MustLookup(&request).Intake() <- request
result := <-request.Reply
x.send(&w, &result)
}
// RepositoryConfigPropertyCreate function
func (x *Rest) RepositoryConfigPropertyCreate(w http.ResponseWriter, r *http.Request,
params httprouter.Params) {
defer panicCatcher(w)
request := msg.New(r, params)
request.Section = msg.SectionRepositoryConfig
request.Action = msg.ActionPropertyCreate
cReq := proto.NewRepositoryRequest()
if err := decodeJSONBody(r, &cReq); err != nil {
x.replyBadRequest(&w, &request, err)
return
}
switch {
case params.ByName(`repositoryID`) != cReq.Repository.ID:
x.replyBadRequest(&w, &request, fmt.Errorf("Mismatched repository ids: %s, %s",
params.ByName(`repositoryID`), cReq.Repository.ID))
return
case len(*cReq.Repository.Properties) != 1:
x.replyBadRequest(&w, &request, fmt.Errorf("Expected property count 1, actual count: %d",
len(*cReq.Repository.Properties)))
return
}
switch (*cReq.Repository.Properties)[0].Type {
case `service`:
if (*cReq.Repository.Properties)[0].Service.Name == `` {
x.replyBadRequest(&w, &request, fmt.Errorf(`Invalid service name: empty string`))
return
}
}
request.Repository = cReq.Repository.Clone()
request.TargetEntity = msg.EntityRepository
request.Property.Type = (*cReq.Repository.Properties)[0].Type
if !x.isAuthorized(&request) {
x.replyForbidden(&w, &request)
return
}
x.handlerMap.MustLookup(&request).Intake() <- request
result := <-request.Reply
x.send(&w, &result)
}
// RepositoryConfigPropertyDestroy function
func (x *Rest) RepositoryConfigPropertyDestroy(w http.ResponseWriter, r *http.Request,
params httprouter.Params) {
defer panicCatcher(w)
request := msg.New(r, params)
request.Section = msg.SectionRepositoryConfig
request.Action = msg.ActionPropertyDestroy
request.TargetEntity = msg.EntityRepository
request.Property.Type = params.ByName(`propertyType`)
request.Repository.ID = params.ByName(`repositoryID`)
request.Repository.Properties = &[]proto.Property{
proto.Property{
Type: params.ByName(`propertyType`),
RepositoryID: params.ByName(`repositoryID`),
SourceInstanceID: params.ByName(`sourceID`),
},
}
if !x.isAuthorized(&request) {
x.replyForbidden(&w, &request)
return
}
x.handlerMap.MustLookup(&request).Intake() <- request
result := <-request.Reply
x.send(&w, &result)
}
// RepositoryConfigPropertyUpdate function
func (x *Rest) RepositoryConfigPropertyUpdate(w http.ResponseWriter, r *http.Request,
params httprouter.Params) {
defer panicCatcher(w)
request := msg.New(r, params)
request.Section = msg.SectionRepositoryConfig
request.Action = msg.ActionPropertyUpdate
cReq := proto.NewRepositoryRequest()
if err := decodeJSONBody(r, &cReq); err != nil {
x.replyBadRequest(&w, &request, err)
return
}
switch {
case params.ByName(`repositoryID`) != cReq.Repository.ID:
x.replyBadRequest(&w, &request, fmt.Errorf("Mismatched repository ids: %s, %s",
params.ByName(`repositoryID`), cReq.Repository.ID))
return
case len(*cReq.Repository.Properties) != 1:
x.replyBadRequest(&w, &request, fmt.Errorf("Expected property count 1, actual count: %d",
len(*cReq.Repository.Properties)))
return
case params.ByName(`propertyType`) != (*cReq.Repository.Properties)[0].Type:
x.replyBadRequest(&w, &request, fmt.Errorf("Mismatched property types: %s, %s",
params.ByName(`propertyType`), (*cReq.Repository.Properties)[0].Type))
return
case (params.ByName(`propertyType`) == `service`) && (*cReq.Repository.Properties)[0].Service.Name == ``:
x.replyBadRequest(&w, &request, fmt.Errorf(`Invalid service name: empty string`))
return
}
request.Repository = cReq.Repository.Clone()
request.TargetEntity = msg.EntityRepository
request.Property.Type = params.ByName(`propertyType`)
request.Repository.ID = params.ByName(`repositoryID`)
request.Update.Property = (*cReq.Repository.Properties)[0].Clone()
request.Update.Property.InstanceID = params.ByName(`sourceInstanceID`)
request.Update.Property.SourceInstanceID = params.ByName(`sourceInstanceID`)
request.Update.Property.Type = params.ByName(`propertyType`)
request.Update.Property.RepositoryID = params.ByName(`repositoryID`)
request.Repository.Properties = nil
if !x.isAuthorized(&request) {
x.replyForbidden(&w, &request)
return
}
x.handlerMap.MustLookup(&request).Intake() <- request
result := <-request.Reply
x.send(&w, &result)
}
// vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix
|
[
7
] |
package tenhou
import (
"encoding/xml"
"fmt"
"io"
)
type XmlElement struct {
Name string
Attr []XmlAttr
}
type XmlAttr struct {
Name string
Value string
}
func (e XmlElement) Text() string {
attrStr := ""
for _, attr := range e.Attr {
attrStr += fmt.Sprintf(` %s="%s"`, attr.Name, attr.Value)
}
return fmt.Sprintf("<%s%s/>", e.Name, attrStr)
}
func (e *XmlElement) AppendAttr(name, value string) {
e.Attr = append(e.Attr, XmlAttr{
Name: name,
Value: value,
})
}
func (e *XmlElement) AttrByName(name string) string {
for _, attr := range e.Attr {
if attr.Name == name {
return attr.Value
}
}
return ""
}
func (e *XmlElement) ForEachAttr(f func(name, value string) error) error {
for _, attr := range e.Attr {
if err := f(attr.Name, attr.Value); err != nil {
return err
}
}
return nil
}
func newXmlElement(name string, nameValue ...string) XmlElement {
elem := XmlElement{
Name: name,
}
for i := 0; i < len(nameValue); i += 2 {
name := nameValue[i]
value := nameValue[i+1]
elem.Attr = append(elem.Attr, XmlAttr{
Name: name,
Value: value,
})
}
return elem
}
type xmlReader struct {
dec *xml.Decoder
}
func newXmlReader(r io.Reader) (*xmlReader, error) {
return &xmlReader{dec: xml.NewDecoder(r)}, nil
}
func (r *xmlReader) next() (XmlElement, error) {
for {
t, err := r.dec.Token()
if err != nil {
return XmlElement{}, err
}
switch t := t.(type) {
case xml.StartElement:
var attr []XmlAttr
for _, a := range t.Attr {
attr = append(attr, XmlAttr{
Name: a.Name.Local,
Value: a.Value,
})
}
return XmlElement{
Name: t.Name.Local,
Attr: attr,
}, nil
}
}
}
|
[
7
] |
package cmd
import (
"errors"
"fmt"
"os"
"github.com/spf13/cobra"
"k8s.io/helm/pkg/helm"
"github.com/databus23/helm-diff/diff"
"github.com/databus23/helm-diff/manifest"
)
type release struct {
client helm.Interface
detailedExitCode bool
suppressedKinds []string
releases []string
outputContext int
includeTests bool
showSecrets bool
}
const releaseCmdLongUsage = `
This command compares the manifests details of a different releases created from the same chart
It can be used to compare the manifests of
- release1 with release2
$ helm diff release [flags] release1 release2
Example:
$ helm diff release my-prod my-stage
`
func releaseCmd() *cobra.Command {
diff := release{}
releaseCmd := &cobra.Command{
Use: "release [flags] RELEASE release1 [release2]",
Short: "Shows diff between release's manifests",
Long: releaseCmdLongUsage,
PreRun: func(*cobra.Command, []string) {
expandTLSPaths()
},
RunE: func(cmd *cobra.Command, args []string) error {
// Suppress the command usage on error. See #77 for more info
cmd.SilenceUsage = true
if v, _ := cmd.Flags().GetBool("version"); v {
fmt.Println(Version)
return nil
}
switch {
case len(args) < 2:
return errors.New("Too few arguments to Command \"release\".\nMinimum 2 arguments required: release name-1, release name-2")
}
if q, _ := cmd.Flags().GetBool("suppress-secrets"); q {
diff.suppressedKinds = append(diff.suppressedKinds, "Secret")
}
diff.releases = args[0:]
if isHelm3() {
return diff.differentiateHelm3()
}
if diff.client == nil {
diff.client = createHelmClient()
}
return diff.differentiate()
},
}
releaseCmd.Flags().BoolP("suppress-secrets", "q", false, "suppress secrets in the output")
releaseCmd.Flags().BoolVar(&diff.showSecrets, "show-secrets", false, "do not redact secret values in the output")
releaseCmd.Flags().BoolVar(&diff.detailedExitCode, "detailed-exitcode", false, "return a non-zero exit code when there are changes")
releaseCmd.Flags().StringArrayVar(&diff.suppressedKinds, "suppress", []string{}, "allows suppression of the values listed in the diff output")
releaseCmd.Flags().IntVarP(&diff.outputContext, "context", "C", -1, "output NUM lines of context around changes")
releaseCmd.Flags().BoolVar(&diff.includeTests, "include-tests", false, "enable the diffing of the helm test hooks")
releaseCmd.SuggestionsMinimumDistance = 1
if !isHelm3() {
addCommonCmdOptions(releaseCmd.Flags())
}
return releaseCmd
}
func (d *release) differentiateHelm3() error {
namespace := os.Getenv("HELM_NAMESPACE")
excludes := []string{helm3TestHook, helm2TestSuccessHook}
if d.includeTests {
excludes = []string{}
}
releaseResponse1, err := getRelease(d.releases[0], namespace)
if err != nil {
return err
}
releaseChart1, err := getChart(d.releases[0], namespace)
if err != nil {
return err
}
releaseResponse2, err := getRelease(d.releases[1], namespace)
if err != nil {
return err
}
releaseChart2, err := getChart(d.releases[1], namespace)
if err != nil {
return err
}
if releaseChart1 == releaseChart2 {
seenAnyChanges := diff.Releases(
manifest.Parse(string(releaseResponse1), namespace, excludes...),
manifest.Parse(string(releaseResponse2), namespace, excludes...),
d.suppressedKinds,
d.showSecrets,
d.outputContext,
os.Stdout)
if d.detailedExitCode && seenAnyChanges {
return Error{
error: errors.New("identified at least one change, exiting with non-zero exit code (detailed-exitcode parameter enabled)"),
Code: 2,
}
}
} else {
fmt.Printf("Error : Incomparable Releases \n Unable to compare releases from two different charts \"%s\", \"%s\". \n try helm diff release --help to know more \n", releaseChart1, releaseChart2)
}
return nil
}
func (d *release) differentiate() error {
releaseResponse1, err := d.client.ReleaseContent(d.releases[0])
if err != nil {
return prettyError(err)
}
releaseResponse2, err := d.client.ReleaseContent(d.releases[1])
if err != nil {
return prettyError(err)
}
if releaseResponse1.Release.Chart.Metadata.Name == releaseResponse2.Release.Chart.Metadata.Name {
seenAnyChanges := diff.Releases(
manifest.ParseRelease(releaseResponse1.Release, d.includeTests),
manifest.ParseRelease(releaseResponse2.Release, d.includeTests),
d.suppressedKinds,
d.showSecrets,
d.outputContext,
os.Stdout)
if d.detailedExitCode && seenAnyChanges {
return Error{
error: errors.New("identified at least one change, exiting with non-zero exit code (detailed-exitcode parameter enabled)"),
Code: 2,
}
}
} else {
fmt.Printf("Error : Incomparable Releases \n Unable to compare releases from two different charts \"%s\", \"%s\". \n try helm diff release --help to know more \n", releaseResponse1.Release.Chart.Metadata.Name, releaseResponse2.Release.Chart.Metadata.Name)
}
return nil
}
|
[
7
] |
package main
import (
"math"
"runtime"
"sync"
"time"
"github.com/vova616/chipmunk"
"github.com/vova616/chipmunk/vect"
sf "github.com/zyedidia/sfml/v2.3/sfml"
)
const (
screenWidth = 800
screenHeight = 600
ballRadius = 30
ballMass = 1
)
type Player struct {
*chipmunk.Body
*sf.Sprite
keys []sf.KeyCode
}
func NewPlayer(pos sf.Vector2f, keys []sf.KeyCode, texture *sf.Texture) *Player {
p := new(Player)
sprite := sf.NewSprite(texture)
size := sprite.GetGlobalBounds()
sprite.SetOrigin(sf.Vector2f{size.Width / 2, size.Height / 2})
sprite.SetPosition(pos)
p.Sprite = sprite
p.keys = keys
rect := chipmunk.NewBox(vect.Vector_Zero, vect.Float(size.Width), vect.Float(size.Height))
// rect.SetElasticity(0.95)
body := chipmunk.NewBody(vect.Float(ballMass), rect.Moment(float32(ballMass)))
body.SetPosition(vect.Vect{vect.Float(pos.X), vect.Float(pos.Y)})
// body.SetAngle(vect.Float(rand.Float32() * 2 * math.Pi))
body.AddShape(rect)
space.AddBody(body)
p.Body = body
return p
}
func (p *Player) Update() {
// b.Sprite.SetRotation(0)
p.Body.SetAngle(0)
// p.Shape.Get
// p.SetPosition(body)
pos := p.Body.Position()
p.Sprite.SetPosition(sf.Vector2f{float32(pos.X), float32(-pos.Y)})
v := p.Velocity()
if sf.KeyboardIsKeyPressed(p.keys[0]) {
p.SetVelocity(float32(v.X), 400)
}
if sf.KeyboardIsKeyPressed(p.keys[2]) {
p.AddVelocity(-10, 0)
}
if sf.KeyboardIsKeyPressed(p.keys[3]) {
p.AddVelocity(10, 0)
}
}
type Ball struct {
*chipmunk.Body
*sf.Sprite
}
func NewBall(pos sf.Vector2f, texture *sf.Texture) *Ball {
b := new(Ball)
sprite := sf.NewSprite(texture)
size := sprite.GetGlobalBounds()
sprite.SetOrigin(sf.Vector2f{size.Width / 2, size.Height / 2})
sprite.SetPosition(pos)
b.Sprite = sprite
ball := chipmunk.NewCircle(vect.Vector_Zero, float32(size.Width/2))
ball.SetElasticity(0.95)
body := chipmunk.NewBody(vect.Float(ballMass), ball.Moment(float32(ballMass)))
body.SetPosition(vect.Vect{vect.Float(pos.X), vect.Float(-pos.Y)})
// body.SetAngle(vect.Float(rand.Float32() * 2 * math.Pi))
body.AddShape(ball)
space.AddBody(body)
b.Body = body
// balls = append(balls, b)
return b
}
func (b *Ball) Update() {
pos := b.Body.Position()
b.Sprite.SetPosition(sf.Vector2f{float32(pos.X), float32(-pos.Y)})
angle := b.Body.Angle()
b.Sprite.SetRotation(180.0 / math.Pi * float32(-angle))
}
var (
space *chipmunk.Space
player *Player
balls []*Ball
staticLines []*chipmunk.Shape
)
func step() {
space.Step(vect.Float(1.0 / 60.0))
}
// createBodies sets up the chipmunk space and static bodies
func createWorld() {
space = chipmunk.NewSpace()
space.Gravity = vect.Vect{0, -900}
staticBody := chipmunk.NewBodyStatic()
staticLines = []*chipmunk.Shape{
chipmunk.NewSegment(vect.Vect{0, -600}, vect.Vect{800.0, -600}, 0),
chipmunk.NewSegment(vect.Vect{0, -600}, vect.Vect{0, 0}, 0),
chipmunk.NewSegment(vect.Vect{800, -600}, vect.Vect{800.0, 0}, 0),
}
for _, segment := range staticLines {
// segment.SetElasticity(0.6)
staticBody.AddShape(segment)
}
space.AddBody(staticBody)
}
func main() {
runtime.LockOSThread()
window := sf.NewRenderWindow(sf.VideoMode{screenWidth, screenHeight, 32}, "Space Shooter", sf.StyleDefault, nil)
window.SetFramerateLimit(60)
createWorld()
player := NewPlayer(sf.Vector2f{400, 0}, []sf.KeyCode{sf.KeyUp, sf.KeyDown, sf.KeyLeft, sf.KeyRight}, sf.NewTexture("mario.png"))
lock := sync.RWMutex{}
go func() {
for {
lock.Lock()
balls = append(balls, NewBall(sf.Vector2f{400, 0}, sf.NewTexture("smiley.png")))
lock.Unlock()
time.Sleep(500 * time.Millisecond)
}
}()
for window.IsOpen() {
if event := window.PollEvent(); event != nil {
switch event.Type {
case sf.EventClosed:
window.Close()
}
}
step()
player.Update()
lock.RLock()
for _, ball := range balls {
ball.Update()
}
lock.RUnlock()
window.Clear(sf.ColorWhite)
window.Draw(player.Sprite)
for _, ball := range balls {
window.Draw(ball.Sprite)
}
window.Display()
}
}
|
[
7
] |
// +build linux
package facts
import (
"bufio"
"fmt"
"log"
"os"
"strings"
"sync"
"golang.org/x/sys/unix"
)
// Constants
const (
// linuxSysinfoLoadsScale has been described elsewhere as a "magic" number.
// It reverts the calculation of "load << (SI_LOAD_SHIFT - FSHIFT)" done in the original load calculation.
linuxSysinfoLoadsScale float64 = 65536.0
)
func (f *SystemFacts) getSysInfo(wg *sync.WaitGroup) {
defer wg.Done()
var info unix.Sysinfo_t
if err := unix.Sysinfo(&info); err != nil {
if c.Debug {
log.Println(err.Error())
}
return
}
f.mu.Lock()
defer f.mu.Unlock()
f.Memory.Total = info.Totalram
f.Memory.Free = info.Freeram
f.Memory.Shared = info.Sharedram
f.Memory.Buffered = info.Bufferram
f.Swap.Total = info.Totalswap
f.Swap.Free = info.Freeswap
f.Uptime = info.Uptime
f.LoadAverage.One = fmt.Sprintf("%.2f", float64(info.Loads[0])/linuxSysinfoLoadsScale)
f.LoadAverage.Five = fmt.Sprintf("%.2f", float64(info.Loads[1])/linuxSysinfoLoadsScale)
f.LoadAverage.Ten = fmt.Sprintf("%.2f", float64(info.Loads[2])/linuxSysinfoLoadsScale)
return
}
func (f *SystemFacts) getOSRelease(wg *sync.WaitGroup) {
defer wg.Done()
osReleaseFile, err := os.Open("/etc/os-release")
if err != nil {
if c.Debug {
log.Println(err.Error())
}
return
}
defer osReleaseFile.Close()
f.mu.Lock()
defer f.mu.Unlock()
scanner := bufio.NewScanner(osReleaseFile)
for scanner.Scan() {
columns := strings.Split(scanner.Text(), "=")
if len(columns) > 1 {
key := columns[0]
value := strings.Trim(strings.TrimSpace(columns[1]), `"`)
switch key {
case "NAME":
f.OSRelease.Name = value
case "ID":
f.OSRelease.ID = value
case "PRETTY_NAME":
f.OSRelease.PrettyName = value
case "VERSION":
f.OSRelease.Version = value
case "VERSION_ID":
f.OSRelease.VersionID = value
}
}
}
lsbFile, err := os.Open("/etc/lsb-release")
if err != nil {
if c.Debug {
log.Println(err.Error())
}
return
}
defer lsbFile.Close()
scanner = bufio.NewScanner(lsbFile)
for scanner.Scan() {
columns := strings.Split(scanner.Text(), "=")
if len(columns) > 1 {
key := columns[0]
value := strings.Trim(strings.TrimSpace(columns[1]), `"`)
switch key {
case "DISTRIB_CODENAME":
f.OSRelease.CodeName = value
}
}
}
return
}
func (f *SystemFacts) getUname(wg *sync.WaitGroup) {
defer wg.Done()
var buf unix.Utsname
err := unix.Uname(&buf)
if err != nil {
if c.Debug {
log.Println(err.Error())
}
return
}
f.mu.Lock()
defer f.mu.Unlock()
f.Domainname = charsToString(buf.Domainname)
f.Architecture = charsToString(buf.Machine)
f.Hostname = charsToString(buf.Nodename)
f.Kernel.Name = charsToString(buf.Sysname)
f.Kernel.Release = charsToString(buf.Release)
f.Kernel.Version = charsToString(buf.Version)
return
}
|
[
7
] |
package reloader
import (
"code.google.com/p/go.crypto/ssh"
"errors"
"github.com/mduvall/reloader/reloader/protocols"
"log"
)
type Transporter interface {
Publish(string) bool
}
type Transport struct {
Connection *ssh.ClientConn
}
/**
* Establishes a session with the ClientConn and starts the netcat session
* for RPC
*/
func (t *Transport) Publish(message string) {
// Some voodoo here to actually send the string over the wire
log.Println("Got the message: ", message)
}
func CreateTransport(transportParameters map[string]string) (*Transport, error) {
switch transportParameters["protocol"] {
case "ssh":
clientConn, err := reloader.GetSshClientForTransport(transportParameters)
if err != nil {
return nil, err
}
log.Println("established ssh connection: ", clientConn)
return &Transport{Connection: clientConn}, nil
}
return nil, errors.New("not a valid protocol")
}
|
[
7
] |
package httparser
import (
"fmt"
"go/ast"
"go/parser"
"go/token"
"log"
"os"
"text/template"
)
// GenerateTestFile creates a file with the template values inserted into the template
func GenerateTestFile(testFileName string, templateValues *TemplateValues) error {
outFile, err := os.Create(testFileName)
if err != nil {
fmt.Printf("Error creating test file named: %s\n", testFileName)
}
tmpl := template.Must(template.New("out").Parse(outputTemplate))
if err := tmpl.Execute(outFile, templateValues); err != nil {
return err
}
if err := outFile.Close(); err != nil {
return err
}
return nil
}
// ParseFunctions parses a file and returns information about its HTTP handlers
func ParseFunctions(filePath string) *TemplateValues {
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, filePath, nil, parser.ParseComments)
if err != nil {
log.Fatal(err)
}
var funcInfos []FunctionInfo
packageName := fmt.Sprint(f.Name)
containsMux := false
for _, decl := range f.Decls {
switch t := decl.(type) {
case *ast.FuncDecl:
responseWriterParamExists := false
requestParamExists := false
for _, param := range t.Type.Params.List {
switch t2 := param.Type.(type) {
case *ast.SelectorExpr:
paramName := fmt.Sprint(t2.Sel.Name)
if paramName == "ResponseWriter" {
responseWriterParamExists = true
}
case *ast.StarExpr:
paramName := fmt.Sprint(t2.X)
if paramName == "&{http Request}" {
requestParamExists = true
}
}
}
if responseWriterParamExists && requestParamExists {
muxVars := getMuxVars(t)
if len(muxVars) > 0 {
containsMux = true
}
funcInfo := FunctionInfo{
Name: fmt.Sprint(t.Name),
MuxVars: muxVars,
}
funcInfos = append(funcInfos, funcInfo)
}
}
}
templateValues := TemplateValues{
FuncInfo: funcInfos,
PackageName: packageName,
ContainsMux: containsMux,
}
return &templateValues
}
|
[
7
] |
package service
import (
"errors"
"math/rand"
"strings"
"time"
"github.com/robfig/cron/v3"
)
const (
// randomly is used to generate random time.
// example:
// '@randomly' means 00:00:00~23:59:59
// '@randomly 8:00:00' means 8:00:00~23:59:59
// '@randomly 8:00:00 23:00:00' means 8:00:00~23:00:00
randomly = "@randomly"
randomLayout = "15:04:05"
randomStart = "00:00:00"
randomEnd = "23:59:59"
)
var (
ErrInvalidRandomly = errors.New("invalid randomly expression")
randomStartTime, _ = time.ParseInLocation(randomLayout, randomStart, time.UTC)
)
func parseRanges(ts []string) ([]time.Time, error) {
result := make([]time.Time, 0, len(ts))
for _, v := range ts {
if v == "" {
result = append(result, randomStartTime)
continue
}
t, err := time.ParseInLocation(randomLayout, v, time.UTC)
if err != nil {
return nil, err
}
result = append(result, t)
}
return result, nil
}
func newRandomSchedule(spec string) (*randomSchedule, error) {
spec = strings.TrimSpace(strings.TrimPrefix(spec, randomly))
ranges := strings.Split(spec, " ")
if len(ranges) == 1 {
ranges = append(ranges, randomEnd)
} else if len(ranges) > 2 {
return nil, ErrInvalidRandomly
}
result, err := parseRanges(ranges)
if err != nil {
return nil, err
}
start, end := result[0], result[1]
if start.After(end) {
return nil, ErrInvalidRandomly
}
return &randomSchedule{
start: start,
end: end,
}, nil
}
type randomSchedule struct {
start time.Time
end time.Time
}
func (rs *randomSchedule) offset() time.Duration {
d1 := rs.end.Sub(rs.start)
if d1.Nanoseconds() == 0 {
return 0
}
d2 := time.Duration(rand.Int63n(d1.Nanoseconds()))
d3 := rs.start.Sub(randomStartTime)
return d2 + d3
}
// Next 返回明天的某个时间
func (rs *randomSchedule) Next(t time.Time) time.Time {
var (
year = t.Year()
month = t.Month()
day = t.Day()
)
return time.Date(year, month, day, 0, 0, 0, 0, t.Location()).
AddDate(0, 0, 1).
Add(rs.offset())
}
func (s *Service) parse(spec string) (cron.Schedule, error) {
switch {
case strings.HasPrefix(spec, randomly):
return newRandomSchedule(spec)
}
return s.parser.Parse(spec)
}
func (s *Service) addJob(spec string, cmd cron.Job) error {
schedule, err := s.parse(spec)
if err != nil {
return err
}
s.cron.Schedule(schedule, cmd)
return nil
}
|
[
7
] |
// Code generated by thriftrw v1.2.0
// @generated
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package kv
import (
"fmt"
"go.uber.org/thriftrw/wire"
"strings"
)
type KeyValue_SetValue_Args struct {
Key *string `json:"key,omitempty"`
Value *string `json:"value,omitempty"`
}
func (v *KeyValue_SetValue_Args) ToWire() (wire.Value, error) {
var (
fields [2]wire.Field
i int = 0
w wire.Value
err error
)
if v.Key != nil {
w, err = wire.NewValueString(*(v.Key)), error(nil)
if err != nil {
return w, err
}
fields[i] = wire.Field{ID: 1, Value: w}
i++
}
if v.Value != nil {
w, err = wire.NewValueString(*(v.Value)), error(nil)
if err != nil {
return w, err
}
fields[i] = wire.Field{ID: 2, Value: w}
i++
}
return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil
}
func (v *KeyValue_SetValue_Args) FromWire(w wire.Value) error {
var err error
for _, field := range w.GetStruct().Fields {
switch field.ID {
case 1:
if field.Value.Type() == wire.TBinary {
var x string
x, err = field.Value.GetString(), error(nil)
v.Key = &x
if err != nil {
return err
}
}
case 2:
if field.Value.Type() == wire.TBinary {
var x string
x, err = field.Value.GetString(), error(nil)
v.Value = &x
if err != nil {
return err
}
}
}
}
return nil
}
func (v *KeyValue_SetValue_Args) String() string {
if v == nil {
return "<nil>"
}
var fields [2]string
i := 0
if v.Key != nil {
fields[i] = fmt.Sprintf("Key: %v", *(v.Key))
i++
}
if v.Value != nil {
fields[i] = fmt.Sprintf("Value: %v", *(v.Value))
i++
}
return fmt.Sprintf("KeyValue_SetValue_Args{%v}", strings.Join(fields[:i], ", "))
}
func (v *KeyValue_SetValue_Args) Equals(rhs *KeyValue_SetValue_Args) bool {
if !_String_EqualsPtr(v.Key, rhs.Key) {
return false
}
if !_String_EqualsPtr(v.Value, rhs.Value) {
return false
}
return true
}
func (v *KeyValue_SetValue_Args) MethodName() string {
return "setValue"
}
func (v *KeyValue_SetValue_Args) EnvelopeType() wire.EnvelopeType {
return wire.Call
}
var KeyValue_SetValue_Helper = struct {
Args func(key *string, value *string) *KeyValue_SetValue_Args
IsException func(error) bool
WrapResponse func(error) (*KeyValue_SetValue_Result, error)
UnwrapResponse func(*KeyValue_SetValue_Result) error
}{}
func init() {
KeyValue_SetValue_Helper.Args = func(key *string, value *string) *KeyValue_SetValue_Args {
return &KeyValue_SetValue_Args{Key: key, Value: value}
}
KeyValue_SetValue_Helper.IsException = func(err error) bool {
switch err.(type) {
default:
return false
}
}
KeyValue_SetValue_Helper.WrapResponse = func(err error) (*KeyValue_SetValue_Result, error) {
if err == nil {
return &KeyValue_SetValue_Result{}, nil
}
return nil, err
}
KeyValue_SetValue_Helper.UnwrapResponse = func(result *KeyValue_SetValue_Result) (err error) {
return
}
}
type KeyValue_SetValue_Result struct{}
func (v *KeyValue_SetValue_Result) ToWire() (wire.Value, error) {
var (
fields [0]wire.Field
i int = 0
)
return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil
}
func (v *KeyValue_SetValue_Result) FromWire(w wire.Value) error {
for _, field := range w.GetStruct().Fields {
switch field.ID {
}
}
return nil
}
func (v *KeyValue_SetValue_Result) String() string {
if v == nil {
return "<nil>"
}
var fields [0]string
i := 0
return fmt.Sprintf("KeyValue_SetValue_Result{%v}", strings.Join(fields[:i], ", "))
}
func (v *KeyValue_SetValue_Result) Equals(rhs *KeyValue_SetValue_Result) bool {
return true
}
func (v *KeyValue_SetValue_Result) MethodName() string {
return "setValue"
}
func (v *KeyValue_SetValue_Result) EnvelopeType() wire.EnvelopeType {
return wire.Reply
}
|
[
7
] |
// +build tinygo
package events
import "errors"
type receiverType int
const (
receiverTypeUnknown receiverType = iota
receiverTypeFuncNoArgs
)
type receiptHandler struct {
receiver interface{}
rt receiverType
}
func newReceiptHandler(receiver interface{}) (receiptHandler, error) {
var rt receiverType = 0
switch receiver.(type) {
case func():
rt = receiverTypeFuncNoArgs
default:
return receiptHandler{}, errors.New("unsupported receiver type")
}
return receiptHandler{receiver: receiver, rt: rt}, nil
}
func (rh *receiptHandler) invoke(values preparedArgs) {
switch rh.rt {
case receiverTypeFuncNoArgs:
rh.receiver.(func())()
}
}
type preparedArgs []interface{}
func prepareArgs(argIfacess []interface{}) preparedArgs {
return argIfacess
}
|
[
7
] |
package core
import (
"context"
"encoding/json"
"fmt"
"path"
"strings"
"sync"
"github.com/bleenco/abstruse/internal/common"
"github.com/bleenco/abstruse/pkg/lib"
"github.com/bleenco/abstruse/server/db/model"
"go.etcd.io/etcd/clientv3"
recipe "go.etcd.io/etcd/contrib/recipes"
"go.etcd.io/etcd/mvcc/mvccpb"
"go.uber.org/zap"
)
// Scheduler contains logic for etcd backed priority job scheduler.
type Scheduler struct {
mu sync.Mutex
paused bool
client *clientv3.Client
queue *recipe.PriorityQueue
workers map[string]*Worker
pending map[uint]*common.Job
logger *zap.SugaredLogger
app *App
ready chan struct{}
done chan struct{}
ctx context.Context
}
// NewScheduler returns a new Scheduler instance.
func NewScheduler(logger *zap.Logger, app *App) *Scheduler {
return &Scheduler{
ready: make(chan struct{}, 1),
workers: make(map[string]*Worker),
pending: make(map[uint]*common.Job),
logger: logger.With(zap.String("type", "scheduler")).Sugar(),
app: app,
ctx: context.Background(),
}
}
// Run starts the scheduler.
func (s *Scheduler) Run(client *clientv3.Client) error {
s.client = client
s.queue = recipe.NewPriorityQueue(client, common.QueuePrefix)
s.logger.Infof("starting main scheduler")
go s.watchDone()
for {
select {
case <-s.ready:
s.process()
case <-s.ctx.Done():
return s.ctx.Err()
case <-s.done:
return nil
}
}
}
// Stop stops the scheduler.
func (s *Scheduler) Stop() {
s.done <- struct{}{}
}
// Schedule adds new job for execution in queue with priority.
func (s *Scheduler) Schedule(job *common.Job) error {
job.Status = common.StatusQueued
job.StartTime = nil
job.EndTime = nil
if err := s.save(job); err != nil {
return err
}
val, err := json.Marshal(&job)
if err != nil {
return err
}
if err = s.queue.Enqueue(string(val), job.Priority); err != nil {
return err
}
s.logger.Debugf("job %d scheduled in the queue with priority %d", job.ID, job.Priority)
s.next()
return nil
}
// Pause pauses jobs in the queue waiting for execution.
func (s *Scheduler) Pause() error {
s.mu.Lock()
defer s.mu.Unlock()
s.paused = true
return nil
}
// Resume unpauses scheduler and continues with jobs
// waiting for execution.
func (s *Scheduler) Resume() error {
s.mu.Lock()
s.paused = false
s.mu.Unlock()
s.next()
return nil
}
// Cancel stops job if pending or removes from queue.
func (s *Scheduler) Cancel(id uint) error {
resp, err := s.client.Get(context.TODO(), common.QueuePrefix, clientv3.WithPrefix())
if err != nil {
return err
}
for i := range resp.Kvs {
key, val, job := string(resp.Kvs[i].Key), resp.Kvs[i].Value, &common.Job{}
if err := json.Unmarshal(val, &job); err == nil {
if job.ID == id {
s.logger.Debugf("removing job %d from queue...", job.ID)
if _, err := s.client.Delete(context.TODO(), key); err != nil {
return err
}
job.StartTime = lib.TimeNow()
job.EndTime = lib.TimeNow()
job.Status = common.StatusFailing
if err := s.save(job); err != nil {
return err
}
return nil
}
} else {
s.logger.Errorf("error unmarshaling job: %v", err)
}
}
s.mu.Lock()
if job, ok := s.pending[id]; ok {
s.mu.Unlock()
s.logger.Debugf("stopping job %d...", id)
job.EndTime = lib.TimeNow()
job.Status = common.StatusFailing
key := path.Join(common.StopPrefix, fmt.Sprintf("%d", job.ID))
val, err := json.Marshal(&job)
if err != nil {
return err
}
if _, err := s.client.Put(context.TODO(), key, string(val)); err != nil {
return err
}
return nil
}
s.mu.Unlock()
return nil
}
// AddWorker adds worker to schedulers worker list.
func (s *Scheduler) AddWorker(w *Worker) {
s.mu.Lock()
defer s.mu.Unlock()
s.workers[w.id] = w
s.logger.Debugf("worker %s added to scheduler list", w.id)
s.next()
}
// DeleteWorker removes worker from schedulers worker list.
func (s *Scheduler) DeleteWorker(id string) {
s.mu.Lock()
defer s.mu.Unlock()
delete(s.workers, id)
s.logger.Debugf("worker %s deleted from scheduler list", id)
}
func (s *Scheduler) process() error {
s.mu.Lock()
count := len(s.workers)
pause := s.paused
s.mu.Unlock()
if pause || count == 0 {
return nil
}
var worker *Worker
var c int
s.mu.Lock()
for _, w := range s.workers {
w.mu.Lock()
diff := w.max - w.running
if diff > c {
worker, c = w, diff
}
w.mu.Unlock()
}
s.mu.Unlock()
if c == 0 || s.queueLen() == 0 {
return nil
}
data, err := s.queue.Dequeue()
if err != nil {
s.logger.Errorf("error while dequeue: %v", err)
return err
}
job := &common.Job{}
if err := json.Unmarshal([]byte(data), &job); err != nil {
s.logger.Errorf("error while unmarshaling job: %v", err)
return err
}
worker.mu.Lock()
worker.running++
worker.mu.Unlock()
worker.emitUsage()
job.WorkerID = worker.id
s.logger.Debugf("job %d enqueued, sending to worker %s...", job.ID, job.WorkerID)
if err := s.startJob(job); err != nil {
s.logger.Errorf("error starting job %d: %v", job.ID, err)
}
return nil
}
func (s *Scheduler) startJob(job *common.Job) error {
data, err := json.Marshal(&job)
if err != nil {
return err
}
go s.jobLogs(job.WorkerID, job.ID, job.BuildID)
_, err = s.app.client.Put(context.Background(), path.Join(common.PendingPrefix, fmt.Sprintf("%d", job.ID)), string(data))
if err != nil {
return err
}
s.mu.Lock()
s.pending[job.ID] = job
s.mu.Unlock()
job.Status = common.StatusRunning
job.StartTime = lib.TimeNow()
if err := s.save(job); err != nil {
return err
}
s.next()
return nil
}
func (s *Scheduler) watchDone() {
resp, err := s.client.Get(context.Background(), common.DonePrefix, clientv3.WithPrefix())
if err != nil {
s.logger.Errorf("%v", err)
} else {
for i := range resp.Kvs {
key, val, job := string(resp.Kvs[i].Key), resp.Kvs[i].Value, common.Job{}
if err := json.Unmarshal(val, &job); err == nil {
if _, err := s.client.Delete(context.Background(), key); err == nil {
s.logger.Debugf("job %d done with status: %s", job.ID, job.GetStatus())
s.mu.Lock()
delete(s.pending, job.ID)
s.mu.Unlock()
s.next()
} else {
s.logger.Errorf("error deleting job %d from done", job.ID)
}
} else {
s.logger.Errorf("error unmarshaling job: %v", err)
}
}
}
go func() {
wch := s.client.Watch(context.Background(), common.DonePrefix, clientv3.WithPrefix())
for n := range wch {
for _, ev := range n.Events {
switch ev.Type {
case mvccpb.PUT:
key, val, job := string(ev.Kv.Key), ev.Kv.Value, &common.Job{}
if err := json.Unmarshal(val, &job); err == nil {
if _, err := s.client.Delete(context.Background(), key); err == nil {
s.mu.Lock()
if j, ok := s.pending[job.ID]; ok {
job.Log = j.Log
}
s.mu.Unlock()
if err := s.save(job); err == nil {
s.logger.Debugf("job %d done with status: %s", job.ID, job.GetStatus())
s.mu.Lock()
delete(s.pending, job.ID)
worker := s.workers[job.WorkerID]
worker.running--
worker.emitUsage()
s.mu.Unlock()
s.next()
} else {
s.logger.Errorf("error saving job %d to db", job.ID)
}
} else {
s.logger.Errorf("error deleting job %d from done", job.ID)
}
} else {
s.logger.Errorf("error unmarshaling job: %v", err)
}
}
}
}
}()
}
func (s *Scheduler) queueLen() int {
resp, err := s.client.Get(context.Background(), common.QueuePrefix, clientv3.WithPrefix())
if err != nil {
s.logger.Errorf("%v", err)
}
return len(resp.Kvs)
}
func (s *Scheduler) jobLogs(workerID string, jobID, buildID uint) {
if worker, ok := s.workers[workerID]; ok {
if err := worker.logOutput(context.Background(), jobID, buildID); err != nil {
s.logger.Errorf("error streaming logs for job %d: %v", jobID, err)
}
}
}
func (s *Scheduler) next() {
select {
case s.ready <- struct{}{}:
default:
}
}
func (s *Scheduler) save(job *common.Job) error {
jobModel := &model.Job{
ID: job.ID,
Status: job.GetStatus(),
StartTime: job.StartTime,
EndTime: job.EndTime,
Log: strings.Join(job.Log, ""),
}
_, err := s.app.repo.Job.Update(jobModel)
if err != nil {
return err
}
go s.app.broadcastJobStatus(job)
return s.app.updateBuildTime(job.BuildID)
}
|
[
7
] |
package mtl
import (
"bufio"
"fmt"
"io"
"os"
"strings"
)
// Represents the illumination mode of
// this file
type Mode int8
func (m Mode) String() string {
switch m {
case 0:
return "ColorOnAmbientOff"
case 1:
return "ColorOnAmbientOn"
case 2:
return "HighlightOn"
case 3:
return "R_Raytrace"
case 4:
return "T_Glass_R_Raytrace"
case 5:
return "R_Fresnel_Raytrace"
case 6:
return "T_Refraction_R_Raytrace"
case 7:
return "T_Reflection_R_Fresnel_Raytrace"
case 8:
return "Reflection_RaytraceOff"
case 9:
return "T_Glass_R_RayTrace_Off"
case 10:
return "Cast_Shadow"
}
return "Unknown"
}
const (
ColorOnAmbientOff = iota
ColorOnAmbientOn
HighlightOn
R_Raytrace
T_Glass_R_Raytrace
R_Fresnel_Raytrace
T_Refraction_R_Raytrace
T_Reflection_R_Fresnel_Raytrace
Reflection_RaytraceOff
T_Glass_R_RayTrace_Off
Cast_Shadow // wtf is this
)
// Representation of MTL file
type MTL struct {
// ambient reflectance // should be converted to its own type
Ka [3]float64
// diffuse reflectance
Kd [3]float64
// Specular reflectance
Ks [3]float64
Ke [3]float64
// Dissolve factor aka opacity
D *float64
// TODO transmission filter
Tf float64
// inverse of d
Tr float64
// Specular Exponent
Ns float64
// TODO label
Ni float64
// Illumination mode of the MTL
Illum Mode
// name of the material
Name string
// Mappings
Map_Kd *FileReference
/*
Specifies that a color texture file or a color procedural texture
is applied to the ambient reflectivity of the material. During
rendering, the "map_Ka" value is multiplied by the "Ka" value.
*/
Map_Ka *FileReference
Map_Bump *FileReference
fileName string
}
func CoerceMode(i Mode) Mode {
switch {
case i > 10:
return 10
case i < 0:
return 0
}
return i
}
func Decode(file *os.File) (out map[string]*MTL, err error) {
out = make(map[string]*MTL)
scanner := bufio.NewScanner(file)
m := new(MTL)
m.fileName = file.Name()
for scanner.Scan() {
s := strings.TrimSpace(scanner.Text())
switch parts := strings.SplitN(s, " ", 2); parts[0] {
case "#", "": // Empty response
case "newmtl":
out[m.Name] = m
m = new(MTL)
m.fileName = file.Name()
case "Ka", "ka":
var x, y, z float64
_, err = fmt.Sscanf(strings.TrimSpace(parts[1]), "%f %f %f", &x, &y, &z)
m.Ka = [3]float64{x, y, z}
case "Kd", "kd":
var x, y, z float64
_, err = fmt.Sscanf(strings.TrimSpace(parts[1]), "%f %f %f", &x, &y, &z)
m.Kd = [3]float64{x, y, z}
case "Ks", "ks":
// https://stackoverflow.com/questions/36964747/ke-attribute-in-mtl-files
case "Ke", "ke":
var x, y, z float64
_, err = fmt.Sscanf(strings.TrimSpace(parts[1]), "%f %f %f", &x, &y, &z)
m.Ke = [3]float64{x, y, z}
case "Tf", "tf":
case "illum": // illumination
var illum Mode
_, err = fmt.Sscanf(strings.TrimSpace(parts[1]), "%d", &illum)
m.Illum = CoerceMode(illum)
case "d": // dissolve
dissolve := new(float64)
_, err = fmt.Sscanf(strings.TrimSpace(parts[1]), "%f", dissolve)
m.D = dissolve
case "Ns", "ns":
var ns float64
_, err = fmt.Sscanf(strings.TrimSpace(parts[1]), "%f", &ns)
m.Ns = ns
case "sharpness":
case "Ni", "ni":
var ni float64
_, err = fmt.Sscanf(strings.TrimSpace(parts[1]), "%f", &ni)
m.Ni = ni
case "Material", "material":
case "Tr", "tr":
// TODO
var transparency float64
_, err = fmt.Sscanf(strings.TrimSpace(parts[1]), "%f", &transparency)
m.Tr = transparency
// Mapping
case "map_Kd":
// TODO
switch fields := strings.Fields(parts[1]); len(fields) {
case 1:
// Just got the filename
m.Map_Kd = &FileReference{
FileName: fields[0],
}
default:
// TODO Holy all the todos
}
case "map_Ka":
switch fields := strings.Fields(parts[1]); len(fields) {
case 1:
m.Map_Ka = &FileReference{
FileName: fields[0],
}
}
case "map_bump", "bump":
switch fields := strings.Fields(parts[1]); len(fields) {
case 1:
m.Map_Bump = &FileReference{
FileName: fields[0],
}
}
default:
fmt.Println("Unmatched", s)
}
if err != nil {
return out, err
}
}
if m != nil {
out[m.Name] = m
}
return
}
func (m MTL) Encode(w io.Writer) error {
// TODO
return nil
}
|
[
7
] |
// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.
package cdproto
import (
json "encoding/json"
target "github.com/chromedp/cdproto/target"
easyjson "github.com/mailru/easyjson"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
)
// suppress unused package warning
var (
_ *json.RawMessage
_ *jlexer.Lexer
_ *jwriter.Writer
_ easyjson.Marshaler
)
func easyjsonC5a4559bDecodeGithubComChromedpCdproto(in *jlexer.Lexer, out *empty) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjsonC5a4559bEncodeGithubComChromedpCdproto(out *jwriter.Writer, in empty) {
out.RawByte('{')
first := true
_ = first
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v empty) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdproto(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v empty) MarshalEasyJSON(w *jwriter.Writer) {
easyjsonC5a4559bEncodeGithubComChromedpCdproto(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *empty) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjsonC5a4559bDecodeGithubComChromedpCdproto(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *empty) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjsonC5a4559bDecodeGithubComChromedpCdproto(l, v)
}
func easyjsonC5a4559bDecodeGithubComChromedpCdproto1(in *jlexer.Lexer, out *Message) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "id":
out.ID = int64(in.Int64())
case "sessionId":
out.SessionID = target.SessionID(in.String())
case "method":
out.Method = MethodType(in.String())
case "params":
(out.Params).UnmarshalEasyJSON(in)
case "result":
(out.Result).UnmarshalEasyJSON(in)
case "error":
if in.IsNull() {
in.Skip()
out.Error = nil
} else {
if out.Error == nil {
out.Error = new(Error)
}
(*out.Error).UnmarshalEasyJSON(in)
}
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjsonC5a4559bEncodeGithubComChromedpCdproto1(out *jwriter.Writer, in Message) {
out.RawByte('{')
first := true
_ = first
if in.ID != 0 {
const prefix string = ",\"id\":"
first = false
out.RawString(prefix[1:])
out.Int64(int64(in.ID))
}
if in.SessionID != "" {
const prefix string = ",\"sessionId\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.SessionID))
}
if in.Method != "" {
const prefix string = ",\"method\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Method))
}
if (in.Params).IsDefined() {
const prefix string = ",\"params\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
(in.Params).MarshalEasyJSON(out)
}
if (in.Result).IsDefined() {
const prefix string = ",\"result\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
(in.Result).MarshalEasyJSON(out)
}
if in.Error != nil {
const prefix string = ",\"error\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
(*in.Error).MarshalEasyJSON(out)
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v Message) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdproto1(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v Message) MarshalEasyJSON(w *jwriter.Writer) {
easyjsonC5a4559bEncodeGithubComChromedpCdproto1(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *Message) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjsonC5a4559bDecodeGithubComChromedpCdproto1(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *Message) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjsonC5a4559bDecodeGithubComChromedpCdproto1(l, v)
}
func easyjsonC5a4559bDecodeGithubComChromedpCdproto2(in *jlexer.Lexer, out *Error) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "code":
out.Code = int64(in.Int64())
case "message":
out.Message = string(in.String())
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjsonC5a4559bEncodeGithubComChromedpCdproto2(out *jwriter.Writer, in Error) {
out.RawByte('{')
first := true
_ = first
{
const prefix string = ",\"code\":"
out.RawString(prefix[1:])
out.Int64(int64(in.Code))
}
{
const prefix string = ",\"message\":"
out.RawString(prefix)
out.String(string(in.Message))
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v Error) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonC5a4559bEncodeGithubComChromedpCdproto2(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v Error) MarshalEasyJSON(w *jwriter.Writer) {
easyjsonC5a4559bEncodeGithubComChromedpCdproto2(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *Error) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjsonC5a4559bDecodeGithubComChromedpCdproto2(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *Error) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjsonC5a4559bDecodeGithubComChromedpCdproto2(l, v)
}
|
[
7
] |
// Code generated by protoc-gen-go-form. DO NOT EDIT.
// Source: api_policy.proto
package pb
import (
base64 "encoding/base64"
json "encoding/json"
urlenc "github.com/erda-project/erda-infra/pkg/urlenc"
structpb "google.golang.org/protobuf/types/known/structpb"
url "net/url"
strings "strings"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the "github.com/erda-project/erda-infra/pkg/urlenc" package it is being compiled against.
var _ urlenc.URLValuesUnmarshaler = (*SetPolicyResponse)(nil)
var _ urlenc.URLValuesUnmarshaler = (*SetPolicyRequest)(nil)
var _ urlenc.URLValuesUnmarshaler = (*GetPolicyRequest)(nil)
var _ urlenc.URLValuesUnmarshaler = (*GetPolicyResponse)(nil)
// SetPolicyResponse implement urlenc.URLValuesUnmarshaler.
func (m *SetPolicyResponse) UnmarshalURLValues(prefix string, values url.Values) error {
for key, vals := range values {
if len(vals) > 0 {
switch prefix + key {
case "data":
if len(vals) > 1 {
var list []interface{}
for _, text := range vals {
var v interface{}
err := json.NewDecoder(strings.NewReader(text)).Decode(&v)
if err != nil {
list = append(list, v)
} else {
list = append(list, text)
}
}
val, _ := structpb.NewList(list)
m.Data = structpb.NewListValue(val)
} else {
var v interface{}
err := json.NewDecoder(strings.NewReader(vals[0])).Decode(&v)
if err != nil {
val, _ := structpb.NewValue(v)
m.Data = val
} else {
m.Data = structpb.NewStringValue(vals[0])
}
}
}
}
}
return nil
}
// SetPolicyRequest implement urlenc.URLValuesUnmarshaler.
func (m *SetPolicyRequest) UnmarshalURLValues(prefix string, values url.Values) error {
for key, vals := range values {
if len(vals) > 0 {
switch prefix + key {
case "category":
m.Category = vals[0]
case "packageId":
m.PackageId = vals[0]
case "apiId":
m.ApiId = vals[0]
case "body":
val, err := base64.StdEncoding.DecodeString(vals[0])
if err != nil {
return err
}
m.Body = val
}
}
}
return nil
}
// GetPolicyRequest implement urlenc.URLValuesUnmarshaler.
func (m *GetPolicyRequest) UnmarshalURLValues(prefix string, values url.Values) error {
for key, vals := range values {
if len(vals) > 0 {
switch prefix + key {
case "category":
m.Category = vals[0]
case "packageId":
m.PackageId = vals[0]
case "apiId":
m.ApiId = vals[0]
}
}
}
return nil
}
// GetPolicyResponse implement urlenc.URLValuesUnmarshaler.
func (m *GetPolicyResponse) UnmarshalURLValues(prefix string, values url.Values) error {
for key, vals := range values {
if len(vals) > 0 {
switch prefix + key {
case "data":
if len(vals) > 1 {
var list []interface{}
for _, text := range vals {
var v interface{}
err := json.NewDecoder(strings.NewReader(text)).Decode(&v)
if err != nil {
list = append(list, v)
} else {
list = append(list, text)
}
}
val, _ := structpb.NewList(list)
m.Data = structpb.NewListValue(val)
} else {
var v interface{}
err := json.NewDecoder(strings.NewReader(vals[0])).Decode(&v)
if err != nil {
val, _ := structpb.NewValue(v)
m.Data = val
} else {
m.Data = structpb.NewStringValue(vals[0])
}
}
}
}
}
return nil
}
|
[
7
] |
package store
import (
"fmt"
"time"
"github.com/garyburd/redigo/redis"
)
const (
server = "0.0.0.0:6379"
password = "admin"
indexName = "name"
)
var (
objectTables = map[string]*ObjectConfig{}
)
func genID(table, id string) string {
return fmt.Sprintf("%s:%s", table, id)
}
func genIndexKey(table, index, key string) string {
return fmt.Sprintf("%s.%s:%s", table, index, key)
}
func genPrefixIndexKey(table, index, key string) string {
return fmt.Sprintf("%s.%s:*%s*", table, index, key)
}
// RedisStore ...
type RedisStore struct {
pool *redis.Pool
}
// NewRedisStore ...
func NewRedisStore() *RedisStore {
pool := &redis.Pool{
MaxIdle: 3,
IdleTimeout: 240 * time.Second,
Dial: func() (redis.Conn, error) {
c, err := redis.Dial("tcp", server)
if err != nil {
return nil, err
}
// if _, err := c.Do("AUTH", password); err != nil {
// c.Close()
// return nil, err
// }
return c, err
},
TestOnBorrow: func(c redis.Conn, t time.Time) error {
if time.Since(t) < time.Minute {
return nil
}
_, err := c.Do("PING")
return err
},
}
return &RedisStore{pool}
}
func registry(objectConfig *ObjectConfig) {
objectTables[objectConfig.Name] = objectConfig
}
func (rs *RedisStore) lookup(table, index, id string) Object {
conn := rs.pool.Get()
defer conn.Close()
return nil
}
func (rs *RedisStore) get(table, id string) Object {
conn := rs.pool.Get()
defer conn.Close()
return nil
}
func (rs *RedisStore) find(table string, appendResult func(Object), bys ...By) error {
conn := rs.pool.Get()
defer conn.Close()
var results []string
var err error
if len(bys) == 0 {
results, err = scanAll(conn, table)
} else {
results, err = scanWithBys(conn, table, bys...)
}
if err != nil {
return err
}
ids := make(map[string]struct{})
for _, result := range results {
if _, exists := ids[result]; exists {
continue
}
jsonData, err := redis.String(conn.Do("GET", result))
if err != nil {
return err
}
if oc, ok := objectTables[table]; ok {
o := oc.Objecter()
o.SetContent(jsonData)
appendResult(o)
}
}
return nil
}
func scan(conn redis.Conn, match string) ([]string, error) {
var (
err error
cursor int64
items []string
results []string
values []interface{}
)
for {
if match == "" {
match = ":"
}
values, err = redis.Values(conn.Do("SCAN", cursor, "match", match))
if err != nil {
return nil, err
}
_, err = redis.Scan(values, &cursor, &items)
if err != nil {
return nil, err
}
results = append(results, items...)
if cursor == 0 {
break
}
}
return results, nil
}
func scanAll(conn redis.Conn, table string) ([]string, error) {
return scan(conn, fmt.Sprintf("%s:*", table))
}
func scanWithBys(conn redis.Conn, table string, bys ...By) ([]string, error) {
var (
results []string
items []string
err error
)
for _, by := range bys {
switch v := by.(type) {
case byName:
items, err = scan(conn, genPrefixIndexKey(table, indexName, string(v)))
if err != nil {
return nil, err
}
}
for item := range items {
members, err := redis.Strings(conn.Do("SMEMBERS", item))
if err != nil {
return nil, err
}
results = append(results, members...)
}
}
return results, nil
}
func (rs *RedisStore) create(table string, o Object) error {
conn := rs.pool.Get()
defer conn.Close()
return nil
}
func (rs *RedisStore) update(table string, o Object) error {
conn := rs.pool.Get()
defer conn.Close()
return nil
}
func (rs *RedisStore) delete(table, id string) error {
conn := rs.pool.Get()
defer conn.Close()
return nil
}
|
[
7
] |
package alias
import (
"fmt"
"github.com/olekukonko/tablewriter"
"senko/app"
"strings"
"sync"
)
type Alias struct {
mutex sync.RWMutex
aliases map[app.GuildID]Mappings
}
type Mappings map[string]string
func (c *Alias) OnRegister(store *app.Store) {
store.Link("alias.aliases", &c.aliases, make(map[app.GuildID]Mappings))
}
func (c *Alias) OnEvent(gateway *app.Gateway, event interface{}) error {
switch e := event.(type) {
case app.EventCommand:
// Adding an alias.
if vars, ok := e.Match("alias add <original> <replacement>"); ok {
c.mutex.Lock()
defer c.mutex.Unlock()
if c.aliases[e.GuildID] == nil {
c.aliases[e.GuildID] = make(Mappings)
}
c.aliases[e.GuildID][vars["original"]] = vars["replacement"]
return gateway.SendMessage(e.ChannelID, "Alias added.")
}
// Removing an alias.
if vars, ok := e.Match("alias remove <alias>"); ok {
c.mutex.Lock()
defer c.mutex.Unlock()
alias := vars["alias"]
aliases := c.aliases[e.GuildID]
if aliases == nil {
return fmt.Errorf("alias not found")
}
_, ok := aliases[alias]
if !ok {
return fmt.Errorf("alias not found")
}
delete(aliases, alias)
return gateway.SendMessage(e.ChannelID, fmt.Sprintf("Alias `%s` removed.", alias))
}
// Listing aliases.
if _, ok := e.Match("alias list"); ok {
c.mutex.RLock()
defer c.mutex.RUnlock()
aliases := c.aliases[e.GuildID]
if aliases == nil {
return gateway.SendMessage(e.ChannelID, "No aliases.")
}
builder := strings.Builder{}
table := tablewriter.NewWriter(&builder)
table.SetHeader([]string{"Alias", "Replacement"})
for k, v := range aliases {
table.Append([]string{k, v})
}
table.Render()
return gateway.SendMessage(e.ChannelID, fmt.Sprintf("```\n%s\n```", builder.String()))
}
// Processing aliases.
c.mutex.RLock()
defer c.mutex.RUnlock()
for original, replacement := range c.aliases[e.GuildID] {
if vars, ok := e.Match(original); ok {
newEvent := e
newEvent.Content = replacement
newEvent.Replace(vars)
gateway.BroadcastEvent(newEvent)
}
}
}
return nil
}
|
[
7
] |
// Copyright (c) 2022 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package care
import (
"context"
"time"
gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
gardencorev1beta1helper "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper"
resourcesv1alpha1 "github.com/gardener/gardener/pkg/apis/resources/v1alpha1"
"github.com/gardener/gardener/pkg/features"
gardenletfeatures "github.com/gardener/gardener/pkg/gardenlet/features"
"github.com/gardener/gardener/pkg/operation/botanist/component/clusterautoscaler"
"github.com/gardener/gardener/pkg/operation/botanist/component/clusteridentity"
"github.com/gardener/gardener/pkg/operation/botanist/component/dependencywatchdog"
"github.com/gardener/gardener/pkg/operation/botanist/component/etcd"
"github.com/gardener/gardener/pkg/operation/botanist/component/hvpa"
"github.com/gardener/gardener/pkg/operation/botanist/component/istio"
"github.com/gardener/gardener/pkg/operation/botanist/component/kubestatemetrics"
"github.com/gardener/gardener/pkg/operation/botanist/component/networkpolicies"
"github.com/gardener/gardener/pkg/operation/botanist/component/nginxingress"
"github.com/gardener/gardener/pkg/operation/botanist/component/seedadmissioncontroller"
"github.com/gardener/gardener/pkg/operation/botanist/component/seedsystem"
"github.com/gardener/gardener/pkg/operation/botanist/component/vpa"
kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var requiredManagedResourcesSeed = sets.NewString(
etcd.Druid,
seedadmissioncontroller.Name,
networkpolicies.ManagedResourceControlName,
clusteridentity.ManagedResourceControlName,
clusterautoscaler.ManagedResourceControlName,
kubestatemetrics.ManagedResourceName,
seedsystem.ManagedResourceName,
vpa.ManagedResourceControlName,
)
// SeedHealth contains information needed to execute health checks for seed.
type SeedHealth struct {
seed *gardencorev1beta1.Seed
seedClient client.Client
}
// NewHealthForSeed creates a new Health instance with the given parameters.
func NewHealthForSeed(seed *gardencorev1beta1.Seed, seedClient client.Client) *SeedHealth {
return &SeedHealth{
seedClient: seedClient,
seed: seed,
}
}
// CheckSeed conducts the health checks on all the given conditions.
func (h *SeedHealth) CheckSeed(ctx context.Context,
conditions []gardencorev1beta1.Condition,
thresholdMappings map[gardencorev1beta1.ConditionType]time.Duration) []gardencorev1beta1.Condition {
var systemComponentsCondition gardencorev1beta1.Condition
for _, cond := range conditions {
switch cond.Type {
case gardencorev1beta1.SeedSystemComponentsHealthy:
systemComponentsCondition = cond
}
}
checker := NewHealthChecker(thresholdMappings, nil, nil, nil, nil)
newSystemComponentsCondition, err := h.checkSeedSystemComponents(ctx, checker, systemComponentsCondition)
return []gardencorev1beta1.Condition{NewConditionOrError(systemComponentsCondition, newSystemComponentsCondition, err)}
}
func (h *SeedHealth) checkSeedSystemComponents(
ctx context.Context,
checker *HealthChecker,
condition gardencorev1beta1.Condition,
) (*gardencorev1beta1.Condition,
error) {
managedResources := requiredManagedResourcesSeed.List()
if gardenletfeatures.FeatureGate.Enabled(features.ManagedIstio) {
managedResources = append(managedResources, istio.ManagedResourceControlName)
}
if gardenletfeatures.FeatureGate.Enabled(features.HVPA) {
managedResources = append(managedResources, hvpa.ManagedResourceName)
}
if gardencorev1beta1helper.SeedSettingDependencyWatchdogEndpointEnabled(h.seed.Spec.Settings) {
managedResources = append(managedResources, dependencywatchdog.ManagedResourceDependencyWatchdogEndpoint)
}
if gardencorev1beta1helper.SeedSettingDependencyWatchdogProbeEnabled(h.seed.Spec.Settings) {
managedResources = append(managedResources, dependencywatchdog.ManagedResourceDependencyWatchdogProbe)
}
if gardencorev1beta1helper.SeedUsesNginxIngressController(h.seed) {
managedResources = append(managedResources, nginxingress.ManagedResourceName)
}
for _, name := range managedResources {
namespace := v1beta1constants.GardenNamespace
if name == istio.ManagedResourceControlName {
namespace = v1beta1constants.IstioSystemNamespace
}
mr := &resourcesv1alpha1.ManagedResource{}
if err := h.seedClient.Get(ctx, kutil.Key(namespace, name), mr); err != nil {
if apierrors.IsNotFound(err) {
exitCondition := checker.FailedCondition(condition, "ResourceNotFound", err.Error())
return &exitCondition, nil
}
return nil, err
}
if exitCondition := checkManagedResourceForSeed(checker, condition, mr); exitCondition != nil {
return exitCondition, nil
}
}
c := gardencorev1beta1helper.UpdatedCondition(condition, gardencorev1beta1.ConditionTrue, "SystemComponentsRunning", "All system components are healthy.")
return &c, nil
}
func checkManagedResourceForSeed(checker *HealthChecker, condition gardencorev1beta1.Condition, managedResource *resourcesv1alpha1.ManagedResource) *gardencorev1beta1.Condition {
conditionsToCheck := map[gardencorev1beta1.ConditionType]func(status gardencorev1beta1.ConditionStatus) bool{
resourcesv1alpha1.ResourcesApplied: defaultSuccessfulCheck(),
resourcesv1alpha1.ResourcesHealthy: defaultSuccessfulCheck(),
resourcesv1alpha1.ResourcesProgressing: resourcesNotProgressingCheck(),
}
return checker.checkManagedResourceConditions(condition, managedResource, conditionsToCheck)
}
|
[
7
] |
package main
import (
"encoding/json"
"flag"
"fmt"
"io"
"log"
"os"
"strconv"
"strings"
"time"
)
type Args struct {
Sample string
Margin float64
MinLength int
MaxDist int
Limit int
Penalty float64
Output string
Ercc bool
LogFilename string
Verbose bool
}
var args = Args{}
var logger *log.Logger
func init() {
log.SetFlags(0)
flag.StringVar(&args.Sample, "sample", "", "BAM file of the sample you want to filter (sorted by name, required)")
flag.Float64Var(&args.Margin, "margin", 1.0, "how much better sample needs to be matched")
flag.IntVar(&args.MinLength, "min-len", 60, "min length for an alignment")
flag.IntVar(&args.MaxDist, "max-edit-dist", 5, "max edit distance for a sample match")
flag.IntVar(&args.Limit, "limit", 0, "limit the number of sample reads considered (0 = no limit)")
flag.Float64Var(&args.Penalty, "edit-penalty", 2.0, "multiple for how to penalize edit distance")
flag.StringVar(&args.Output, "output", "", "output bam file (required)")
flag.StringVar(&args.LogFilename, "log", "", "write parameters and stats to a log file")
flag.BoolVar(&args.Verbose, "verbose", false, "keep a record of what happens to each read in the log (must give -log name)")
flag.BoolVar(&args.Ercc, "ercc", false, "exclude ERCC mappings from sample before filtering")
flag.Usage = func() {
log.Println("usage: contfilter [options] cont1.bam cont2.bam")
flag.PrintDefaults()
}
}
func benchmark(start time.Time, label string) {
elapsed := time.Since(start)
logger.Printf("%s took %s", label, elapsed)
}
func extract(row []string) (int, int, error) {
if len(row) < 15 {
return 0, 0, fmt.Errorf("too few fields")
}
match_len := len(row[9])
edit_tag := row[14]
if edit_tag[:5] != "nM:i:" {
return 0, 0, fmt.Errorf("malformed edit distance tag: %s", edit_tag)
}
edit_dist, err := strconv.Atoi(edit_tag[5:])
if err != nil {
return 0, 0, fmt.Errorf("failed to parse edit dist: %s", edit_tag)
}
return match_len, edit_dist, nil
}
func OpenLogger() {
if args.LogFilename == "" {
logger = log.New(os.Stderr, "", 0)
} else {
logfile, err := os.Create(args.LogFilename)
if err != nil {
log.Fatal(err)
}
logger = log.New(logfile, "", 0)
}
}
func LogArguments() {
logger.Println("command:", strings.Join(os.Args, " "))
blob, err := json.MarshalIndent(args, "", " ")
if err != nil {
logger.Fatal("failed to marshal arguments")
}
logger.Println(string(blob))
}
func MatchesErcc(mate1, mate2 []string) bool {
return args.Ercc &&
(strings.Contains(mate1[2], "ERCC") || (mate2 != nil && strings.Contains(mate2[2], "ERCC")))
}
func main() {
var kept_percent float64
flag.Parse()
contamination := flag.Args()
startedAt := time.Now()
if len(contamination) == 0 {
logger.Println("must specify at least one contamination mapping BAM file")
os.Exit(1)
}
if args.Output == "" {
logger.Println("must specify -output file")
os.Exit(1)
}
OpenLogger()
LogArguments()
scanner := BamScanner{}
if args.Sample == "" {
scanner.OpenStdin()
} else {
if err := scanner.OpenBam(args.Sample); err != nil {
logger.Fatal(err)
}
}
reads_found := make([]int, len(contamination))
reads_filtered := make([]int, len(contamination))
contScanners := make([]BamScanner, len(contamination))
rejected := make([]bool, len(contamination))
found := make([]bool, len(contamination))
for c := 0; c < len(contamination); c++ {
if err := contScanners[c].OpenBam(contamination[c]); err != nil {
logger.Fatal(err)
}
reads_found[c] = 0
reads_filtered[c] = 0
}
header, err := ReadBamHeader(args.Sample)
if err != nil {
logger.Fatal(err)
}
out := BamWriter{}
outfp, err := out.Open(args.Output)
if err != nil {
logger.Fatal(err)
}
io.WriteString(outfp, header)
reads_kept := 0
read_mates_kept := 0
total_reads := 0
total_read_mates := 0
ercc := 0
considered := 0
too_short := 0
too_diverged := 0
err = func() error {
defer scanner.Done()
defer benchmark(startedAt, "processing")
for {
if total_reads > 0 && total_reads%100000 == 0 {
kept_percent = float64(reads_kept) / float64(considered) * 100
logger.Printf("considered %d out of %d so far, kept %0.1f%%\n", considered, total_reads, kept_percent)
}
if args.Limit > 0 && args.Limit == total_reads {
return nil
}
// Set up flags for outcomes wrt each potential source of contamination.
for c, _ := range contamination {
rejected[c] = false
found[c] = false
}
// Read the first mate in a paired end run.
mate1, err := scanner.Record()
if err != nil {
return fmt.Errorf("failed to read from sample BAM: %v after %d lines", err, total_reads)
}
if scanner.Closed {
return nil
}
scanner.Ratchet()
read := mate1[0]
total_reads++
total_read_mates++
// See if we have the second mate of this pair.
mate2, err := scanner.Find(read)
if err != nil {
return fmt.Errorf("failed to read from sample BAM: %v after %d lines", err, total_reads)
}
if mate2 != nil {
scanner.Ratchet()
total_read_mates++
}
var mate1_len int
var mate1_edit_dist int
var mate2_len int
var mate2_edit_dist int
mate1_len, mate1_edit_dist, err = extract(mate1)
if err != nil {
return err
}
if args.Verbose {
logger.Println("found read", read, "mate 1:")
logger.Println(strings.Join(mate1, "\t"))
}
if mate2 != nil {
mate2_len, mate2_edit_dist, err = extract(mate2)
if err != nil {
return err
}
if args.Verbose {
logger.Println("found read", read, "mate 2:")
logger.Println(strings.Join(mate2, "\t"))
}
}
// Filter for ERCC if either mate is mapped to ERCC.
if MatchesErcc(mate1, mate2) {
ercc++
if args.Verbose {
logger.Println("ERCC, rejecting")
}
continue
}
if mate1_len < args.MinLength {
// If we don't have mate2 or if it's also too short, we mark this pair as too short.
if mate2 == nil || mate2_len < args.MinLength {
if args.Verbose {
logger.Println("too short, rejecting")
}
too_short++
continue
}
if args.Verbose {
logger.Println("promoting mate 2")
}
// Mate2 is okay, so we promote it to mate1, and forget mate2
mate1_len = mate2_len
mate1_edit_dist = mate2_edit_dist
mate1 = mate2
mate2 = nil
}
if mate2 != nil && mate2_len < args.MinLength {
// We have a mate2, but it doesn't meet the min length criteria, just forget it.
mate2 = nil
if args.Verbose {
logger.Println("mate 2 too short, forgetting")
}
}
// We treate the filter for edit distance the same way as length.
if mate1_edit_dist > args.MaxDist {
if mate2 == nil || mate2_edit_dist > args.MaxDist {
too_diverged++
if args.Verbose {
logger.Println("too divergent, rejecting")
}
continue
}
if args.Verbose {
logger.Println("promothing mate 2")
}
// Mate2 is okay, so we promote it to mate1, and forget mate2
mate1_len = mate2_len
mate1_edit_dist = mate2_edit_dist
mate1 = mate2
mate2 = nil
}
if mate2 != nil && mate2_edit_dist > args.MaxDist {
// We have a mate2, but it doesn't meet the max edit distance criteria, just forget it.
mate2 = nil
if args.Verbose {
logger.Println("mate 2, too diverged, forgetting")
}
}
// If we get this far it means the read met the preliminary filtering criteria.
considered++
// Compare agains the best score for the read pair.
mate1_score := float64(mate1_len) - float64(mate1_edit_dist)*args.Penalty
var mate2_score float64
best_score := mate1_score
best_len := mate1_len
best_edit_dist := mate1_edit_dist
if mate2 != nil {
mate2_score = float64(mate2_len) - float64(mate2_edit_dist)*args.Penalty
if mate2_score > mate1_score {
best_score = mate2_score
best_len = mate2_len
best_edit_dist = mate2_edit_dist
if args.Verbose {
logger.Printf("mate 2 has better score (%f) than mate 1 (%f)\n", mate2_score, mate1_score)
}
}
}
// Reads in the sample BAM will be rejected if either mate in any of the
// contamination BAM files maps better than in the sampel BAM file.
was_rejected := false
for c := 0; c < len(contamination); c++ {
m := 0
for {
mate, err := contScanners[c].Find(read)
if err != nil {
logger.Fatal(err)
}
if mate == nil {
// No more alignments for this read in this contamination mapping
break
}
m++
if args.Verbose {
logger.Printf("found mapping %d for %s in %s\n", m, mate[0], contamination[c])
logger.Println(strings.Join(mate, "\t"))
}
if !found[c] {
found[c] = true
reads_found[c]++
}
length, edit_dist, err := extract(mate)
if err != nil {
logger.Fatalf("failed to read from %s: %v", contamination[c], err)
}
if length >= args.MinLength {
score := float64(length) - float64(edit_dist)*args.Penalty
if args.Verbose {
logger.Printf("mapping meets length criteria and has score %f\n", score)
}
if best_score <= score+args.Margin {
if args.Verbose {
logger.Println("mapping has better score")
}
if !rejected[c] {
reads_filtered[c]++
rejected[c] = true
was_rejected = true
if args.Verbose {
logger.Printf("read %s with length %d and edit distance %d was rejected "+
"with score %0.1f because in %s it had a score of %0.1f with length "+
"%d and edit distance %d\n",
read, best_len, best_edit_dist, best_score, contamination[c],
score, length, edit_dist)
}
}
} else {
if args.Verbose {
logger.Println("mapping has worse score")
}
}
}
}
}
if !was_rejected {
// This read is okay, output it to the output BAM file.
_, err := fmt.Fprintf(outfp, "%s\n", strings.Join(mate1, "\t"))
if err != nil {
return err
}
reads_kept++
read_mates_kept++
if mate2 != nil {
_, err := fmt.Fprintf(outfp, "%s\n", strings.Join(mate2, "\t"))
if err != nil {
return err
}
read_mates_kept++
}
if args.Verbose {
logger.Printf("kept read %s with length %d and edit distance %d and score %0.1f\n",
read, best_len, best_edit_dist, best_score)
}
}
}
}()
if err != nil {
logger.Fatal(err)
}
outfp.Close()
out.Wait()
logger.Println("Preliminary filtering:")
if args.Ercc {
erccPerc := float64(ercc) / float64(total_reads) * 100
logger.Printf("filtered out %d ERCC reads (%0.1f%%) before comparing to contamination\n", ercc, erccPerc)
}
shortPerc := float64(too_short) / float64(total_reads) * 100
logger.Printf("filtered out %d reads (%0.1f%%) becase their alignment was too short\n", too_short, shortPerc)
divergedPerc := float64(too_diverged) / float64(total_reads) * 100
logger.Printf("filtered out %d reads (%0.1f%%) becase they were too diverged\n", too_diverged, divergedPerc)
logger.Printf("%d reads remaining after preliminary filtering\n", considered)
logger.Println("Contamination filtering:")
for c, cont := range contamination {
n := reads_filtered[c]
perc := float64(n) / float64(considered) * 100
found_perc := float64(reads_found[c]) / float64(considered) * 100
logger.Printf("found %d of %d reads in %s (%0.1f%%)\n", reads_found[c], considered, cont, found_perc)
logger.Printf("rejected %d of %d reads from %s (%0.1f%%)\n", reads_filtered[c], considered, cont, perc)
}
kept_percent = float64(reads_kept) / float64(considered) * 100
total_percent := float64(reads_kept) / float64(total_reads) * 100
logger.Printf("kept %d of %d reads (%0.1f%%), which is %0.1f%% of the %d reads that met preliminary filtering\n",
reads_kept, total_reads, total_percent, kept_percent, considered)
total_mates_percent := float64(read_mates_kept) / float64(total_read_mates) * 100
logger.Printf("kept %d of %d read mates (%0.1f%%)", read_mates_kept, total_read_mates, total_mates_percent)
input_mates_per_pair := float64(total_read_mates) / float64(total_reads)
output_mates_per_pair := float64(read_mates_kept) / float64(reads_kept)
logger.Printf("observed %0.1f mates/read on the input end and %0.1f mates/read on the output end\n",
input_mates_per_pair, output_mates_per_pair)
logger.Println("machine parsable stats:")
stats := []int{
total_reads,
total_read_mates,
ercc,
too_short,
too_diverged,
considered,
reads_kept,
read_mates_kept,
}
stats = append(stats, reads_found...)
stats = append(stats, reads_filtered...)
statsStr := "stats"
for _, s := range stats {
statsStr += fmt.Sprintf("\t%d", s)
}
logger.Println(statsStr)
}
|
[
4
] |
// Code generated by mockery v1.0.0
package mocks
import common "github.com/uber/aresdb/metastore/common"
import mock "github.com/stretchr/testify/mock"
// TableSchemaReader is an autogenerated mock type for the TableSchemaReader type
type TableSchemaReader struct {
mock.Mock
}
// GetTable provides a mock function with given fields: name
func (_m *TableSchemaReader) GetTable(name string) (*common.Table, error) {
ret := _m.Called(name)
var r0 *common.Table
if rf, ok := ret.Get(0).(func(string) *common.Table); ok {
r0 = rf(name)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*common.Table)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(name)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ListTables provides a mock function with given fields:
func (_m *TableSchemaReader) ListTables() ([]string, error) {
ret := _m.Called()
var r0 []string
if rf, ok := ret.Get(0).(func() []string); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]string)
}
}
var r1 error
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
|
[
4
] |
package core
import (
"context"
"errors"
"fmt"
"net/url"
"strings"
"github.com/iwataka/anaconda"
"github.com/iwataka/mybot/data"
"github.com/iwataka/mybot/models"
"github.com/iwataka/mybot/oauth"
"github.com/iwataka/mybot/utils"
"github.com/slack-go/slack"
)
// TwitterAPI is a wrapper of anaconda.TwitterApi.
type TwitterAPI struct {
api models.TwitterAPI
config Config
cache data.Cache
self *anaconda.User
}
// NewTwitterAPIWithAuth takes a user's authentication, cache and configuration and
// returns TwitterAPI instance for that user
func NewTwitterAPIWithAuth(auth oauth.OAuthCreds, config Config, cache data.Cache) *TwitterAPI {
at, ats := auth.GetCreds()
var api models.TwitterAPI
if len(at) > 0 && len(ats) > 0 {
api = anaconda.NewTwitterApi(at, ats)
}
return NewTwitterAPI(api, config, cache)
}
func NewTwitterAPI(api models.TwitterAPI, config Config, cache data.Cache) *TwitterAPI {
return &TwitterAPI{api, config, cache, nil}
}
func (a *TwitterAPI) BaseAPI() models.TwitterAPI {
return a.api
}
func (a *TwitterAPI) VerifyCredentials() (bool, error) {
if a.Enabled() {
return a.api.VerifyCredentials()
}
return false, fmt.Errorf("Twitter API is not available")
}
func (a *TwitterAPI) PostSlackMsg(text string, atts []slack.Attachment) (anaconda.Tweet, error) {
return a.api.PostTweet(text, nil)
}
// GetSelf gets the authenticated user's information and stores it as a cache,
// then returns it.
func (a *TwitterAPI) GetSelf() (anaconda.User, error) {
if a.self != nil {
return *a.self, nil
}
if a.Enabled() {
self, err := a.api.GetSelf(nil)
if err != nil {
return anaconda.User{}, utils.WithStack(err)
}
a.self = &self
return self, nil
}
return anaconda.User{}, fmt.Errorf("Twitter API is not available")
}
func (a *TwitterAPI) Enabled() bool {
return a.api != nil
}
// CheckUser cheks if user is matched for the given allowSelf and users
// arguments.
func (a *TwitterAPI) CheckUser(user string, allowSelf bool, users []string) (bool, error) {
if allowSelf {
self, err := a.GetSelf()
if err != nil {
return false, utils.WithStack(err)
}
if user == self.ScreenName {
return true, nil
}
}
for _, u := range users {
if user == u {
return true, nil
}
}
return false, nil
}
// ProcessFavorites gets tweets from the specified user's favorite list and do
// action for tweets filtered by c.
func (a *TwitterAPI) ProcessFavorites(
name string,
v url.Values,
c TweetChecker,
vision VisionMatcher,
lang LanguageMatcher,
slack *SlackAPI,
action data.Action,
) ([]anaconda.Tweet, []data.Action, error) {
latestID := a.cache.GetLatestFavoriteID(name)
v.Set("screen_name", name)
if latestID > 0 {
v.Set("since_id", fmt.Sprintf("%d", latestID))
} else {
// If the latest favorite ID doesn't exist, this fetches just
// the latest tweet and store that ID.
v.Set("count", "1")
}
tweets, err := a.api.GetFavorites(v)
if err != nil {
return nil, nil, utils.WithStack(err)
}
var pp TwitterPostProcessor
if c.ShouldRepeat() {
pp = &TwitterPostProcessorEach{action, a.cache}
} else {
pp = &TwitterPostProcessorTop{action, name, a.cache}
}
processedTweets, processedActions, err := a.processTweets(tweets, c, vision, lang, slack, action, pp)
if err != nil {
return nil, nil, utils.WithStack(err)
}
return processedTweets, processedActions, nil
}
// ProcessSearch gets tweets from search result by the specified query and do
// action for tweets filtered by c.
func (a *TwitterAPI) ProcessSearch(
query string,
v url.Values,
c TweetChecker,
vision VisionMatcher,
lang LanguageMatcher,
slack *SlackAPI,
action data.Action,
) ([]anaconda.Tweet, []data.Action, error) {
res, err := a.GetSearch(query, v)
if err != nil {
return nil, nil, utils.WithStack(err)
}
pp := &TwitterPostProcessorEach{action, a.cache}
processedTweets, processedActions, err := a.processTweets(res.Statuses, c, vision, lang, slack, action, pp)
if err != nil {
return nil, nil, utils.WithStack(err)
}
return processedTweets, processedActions, utils.WithStack(err)
}
type (
TwitterPostProcessor interface {
Process(anaconda.Tweet, bool) error
}
TwitterPostProcessorTop struct {
action data.Action
screenName string
cache data.Cache
}
TwitterPostProcessorEach struct {
action data.Action
cache data.Cache
}
)
func (p *TwitterPostProcessorTop) Process(t anaconda.Tweet, match bool) error {
id := p.cache.GetLatestTweetID(p.screenName)
if t.Id > id {
p.cache.SetLatestTweetID(p.screenName, t.Id)
}
if match {
ac := p.cache.GetTweetAction(t.Id)
p.cache.SetTweetAction(t.Id, ac.Add(p.action))
}
return nil
}
func (p *TwitterPostProcessorEach) Process(t anaconda.Tweet, match bool) error {
if match {
ac := p.cache.GetTweetAction(t.Id)
p.cache.SetTweetAction(t.Id, ac.Add(p.action))
}
return nil
}
func (a *TwitterAPI) processTweets(
tweets []anaconda.Tweet,
c TweetChecker,
v VisionMatcher,
l LanguageMatcher,
slack *SlackAPI,
action data.Action,
pp TwitterPostProcessor,
) ([]anaconda.Tweet, []data.Action, error) {
processedTweets := []anaconda.Tweet{}
processedActions := []data.Action{}
// From the oldest to the newest
for i := len(tweets) - 1; i >= 0; i-- {
t := tweets[i]
match, err := c.CheckTweet(t, v, l, a.cache)
if err != nil {
return nil, nil, utils.WithStack(err)
}
if match {
done := a.cache.GetTweetAction(t.Id)
undone := action.Sub(done)
err = a.processTweet(t, undone, slack)
if err != nil {
return nil, nil, utils.WithStack(err)
}
processedTweets = append(processedTweets, t)
processedActions = append(processedActions, undone)
}
err = pp.Process(t, match)
if err != nil {
return nil, nil, utils.WithStack(err)
}
}
return processedTweets, processedActions, nil
}
func (a *TwitterAPI) processTweet(
t anaconda.Tweet,
action data.Action,
slack *SlackAPI,
) error {
if action.Twitter.Retweet && !t.Retweeted {
var id int64
if t.RetweetedStatus == nil {
id = t.Id
} else {
id = t.RetweetedStatus.Id
}
_, err := a.api.Retweet(id, false)
if CheckTwitterError(err) {
return utils.WithStack(err)
}
}
if action.Twitter.Favorite && !t.Favorited {
id := t.Id
_, err := a.api.Favorite(id)
if CheckTwitterError(err) {
return utils.WithStack(err)
}
}
for _, col := range action.Twitter.Collections {
err := a.collectTweet(t, col)
if CheckTwitterError(err) {
return utils.WithStack(err)
}
}
if slack.Enabled() {
for _, ch := range action.Slack.Channels {
err := slack.PostTweet(ch, t)
if CheckSlackError(err) {
return utils.WithStack(err)
}
}
}
return nil
}
func (a *TwitterAPI) collectTweet(tweet anaconda.Tweet, collection string) error {
self, err := a.GetSelf()
if err != nil {
return utils.WithStack(err)
}
list, err := a.api.GetCollectionListByUserId(self.Id, nil)
if err != nil {
return utils.WithStack(err)
}
exists := false
var id string
for i, t := range list.Objects.Timelines {
if collection == t.Name {
exists = true
id = i
break
}
}
if !exists {
col, err := a.api.CreateCollection(collection, nil)
if err != nil {
return utils.WithStack(err)
}
id = col.Response.TimelineId
}
_, err = a.api.AddEntryToCollection(id, tweet.Id, nil)
if err != nil {
return utils.WithStack(err)
}
return nil
}
func (a *TwitterAPI) GetSearch(query string, url url.Values) (anaconda.SearchResponse, error) {
return a.api.GetSearch(query, url)
}
func (a *TwitterAPI) GetUserSearch(searchTerm string, v url.Values) ([]anaconda.User, error) {
return a.api.GetUserSearch(searchTerm, v)
}
func (a *TwitterAPI) GetFavorites(vals url.Values) ([]anaconda.Tweet, error) {
return a.api.GetFavorites(vals)
}
type TwitterUserListener struct {
stream *anaconda.Stream
api *TwitterAPI
vis VisionMatcher
lang LanguageMatcher
slack *SlackAPI
cache data.Cache
}
// ListenUsers listens timelines of the friends
func (a *TwitterAPI) ListenUsers(
v url.Values,
vis VisionMatcher,
lang LanguageMatcher,
slack *SlackAPI,
cache data.Cache,
) (*TwitterUserListener, error) {
if v == nil {
v = url.Values{}
}
names := a.config.GetTwitterScreenNames()
usernames := strings.Join(names, ",")
if len(usernames) == 0 {
return nil, errors.New("No user specified")
} else {
users, err := a.api.GetUsersLookup(usernames, nil)
if err != nil {
return nil, utils.WithStack(err)
}
userids := []string{}
for _, u := range users {
userids = append(userids, u.IdStr)
}
v.Set("follow", strings.Join(userids, ","))
stream := a.api.PublicStreamFilter(v)
return &TwitterUserListener{stream, a, vis, lang, slack, cache}, nil
}
}
func (l *TwitterUserListener) Listen(ctx context.Context, outChan chan<- interface{}) error {
for {
select {
case msg := <-l.stream.C:
err := l.processMessage(msg, outChan)
if err != nil {
return utils.WithStack(err)
}
case <-ctx.Done():
return nil
}
}
}
func (l *TwitterUserListener) processMessage(msg interface{}, outChan chan<- interface{}) error {
switch m := msg.(type) {
case anaconda.Tweet:
name := m.User.ScreenName
timelines := l.api.config.GetTwitterTimelinesByScreenName(name)
if len(timelines) != 0 {
outChan <- NewReceivedEvent(TwitterEventType, "tweet", m)
}
for _, timeline := range timelines {
if !checkTweetByTimelineConfig(m, timeline) {
continue
}
match, err := timeline.Filter.CheckTweet(m, l.vis, l.lang, l.cache)
if err != nil {
return utils.WithStack(err)
}
if !match {
continue
}
done := l.api.cache.GetTweetAction(m.Id)
undone := timeline.Action.Sub(done)
if err := l.api.processTweet(m, undone, l.slack); err != nil {
return utils.WithStack(err)
}
outChan <- NewActionEvent(undone, m)
l.api.cache.SetLatestTweetID(name, m.Id)
}
err := l.api.cache.Save()
if err != nil {
return utils.WithStack(err)
}
}
return nil
}
func (l *TwitterUserListener) Stop() {
l.stream.Stop()
}
func checkTweetByTimelineConfig(t anaconda.Tweet, c TimelineConfig) bool {
if c.ExcludeReplies && t.InReplyToScreenName != "" {
return false
}
if !c.IncludeRts && t.RetweetedStatus != nil {
return false
}
return true
}
type TwitterDMListener struct {
stream *anaconda.Stream
api *TwitterAPI
}
// ListenMyself listens to the authenticated user by Twitter's User Streaming
// API and reacts with direct messages.
func (a *TwitterAPI) ListenMyself(v url.Values) (*TwitterDMListener, error) {
ok, err := a.VerifyCredentials()
if err != nil {
return nil, utils.WithStack(err)
} else if !ok {
return nil, errors.New("Twitter Account Verification failed")
}
stream := a.api.UserStream(v)
return &TwitterDMListener{stream, a}, nil
}
func (l *TwitterDMListener) Listen(ctx context.Context, outChan chan<- interface{}) error {
// TODO: Twitter User Stream API has been retired, so I temporarily disable this feature.
// Later I completely remove this feature.
// https://developer.twitter.com/en/docs/twitter-api/enterprise/account-activity-api/migration/us-ss-migration-guide
return nil
// for {
// select {
// case msg := <-l.stream.C:
// switch c := msg.(type) {
// case anaconda.DirectMessage:
// outChan <- NewReceivedEvent(TwitterEventType, "DM", c)
// // TODO: Handle direct messages in the same way as the other sources
// id := l.api.cache.GetLatestDMID()
// if id < c.Id {
// l.api.cache.SetLatestDMID(c.Id)
// }
// err := l.api.cache.Save()
// if err != nil {
// return utils.WithStack(err)
// }
// }
// case <-ctx.Done():
// return nil
// }
// }
}
func (l *TwitterDMListener) Stop() {
l.stream.Stop()
}
// TweetChecker function checks if the specified tweet is acceptable, which means it
// should be retweeted.
type TweetChecker interface {
CheckTweet(t anaconda.Tweet, v VisionMatcher, l LanguageMatcher, c data.Cache) (bool, error)
ShouldRepeat() bool
}
func CheckTwitterError(err error) bool {
if err == nil {
return false
}
switch twitterErr := err.(type) {
case *anaconda.TwitterError:
// https://developer.twitter.com/ja/docs/basics/response-codes
// 130: Over capacity
// 131: Internal error
// 139: You have already favorited this status.
// 187: The status text has already been Tweeted by the authenticated account.
// 327: You have already retweeted this tweet.
switch twitterErr.Code {
case 130, 131, 139, 187, 327:
return false
}
case anaconda.TwitterError:
return CheckTwitterError(&twitterErr)
case *anaconda.ApiError:
code := twitterErr.StatusCode
// Status code 5?? means server error
if code >= 500 && code < 600 {
return false
}
for _, e := range twitterErr.Decoded.Errors {
if CheckTwitterError(e) {
return true
}
}
return false
case anaconda.ApiError:
return CheckTwitterError(&twitterErr)
}
return true
}
func TwitterStatusURL(t anaconda.Tweet) string {
srcFmt := "https://twitter.com/%s/status/%s"
return fmt.Sprintf(srcFmt, t.User.IdStr, t.IdStr)
}
|
[
7
] |
package server
import "fmt"
type pubArg struct {
subject []byte
reply []byte
sid []byte
azb []byte
size int
}
type parseState struct {
state int
as int
drop int
pa pubArg
argBuf []byte
msgBuf []byte
scratch [MAX_CONTROL_LINE_SIZE]byte
}
// 整个协议,用这个作为状态机记录。
// 其中state表示各个状态,其实这里有点类似游标的感觉,比如对于Info协议:
// INFO {["option_name":option_value],...}\r\n
// 当读入到字母"I"的时候,就会切入到状态"OP_I"状态。
// 之后读到”INFO”后面的空格时,进入到"INFO_ARG"状态,至此,后面的内容都会被当成参数,
// 记录到argBuf参数中,直到最后"\r\n"的时候结束对这个完整消息的读取。
// 先订阅(s)后发布(p)
const (
OP_START = iota
OP_PLUS
OP_PLUS_O
OP_PLUS_OK
OP_MINUS
OP_MINUS_E
OP_MINUS_ER
OP_MINUS_ERR
OP_MINUS_ERR_SPC
MINUS_ERR_ARG
OP_C
OP_CO
OP_CON
OP_CONN
OP_CONNE
OP_CONNEC
OP_CONNECT
CONNECT_ARG
OP_P
OP_PU
OP_PUB
OP_PUB_SPC
PUB_ARG
OP_PI
OP_PIN
OP_PING
OP_PO
OP_PON
OP_PONG
MSG_PAYLOAD
MSG_END
OP_S
OP_SU
OP_SUB
OP_SUB_SPC
SUB_ARG
OP_U
OP_UN
OP_UNS
OP_UNSU
OP_UNSUB
OP_UNSUB_SPC
UNSUB_ARG
OP_M
OP_MS
OP_MSG
OP_MSG_SPC
MSG_ARG
OP_I
OP_IN
OP_INF
OP_INFO
INFO_ARG
)
// buf是原始消息内容
func (c *client) parse(buf []byte) error {
var i int
var b byte
// Snapshot this, and reset when we receive a proper CONNECT id needed
authSet := c.isAuthTimerSet()
for i = 0; i < len(buf); i++ {
b = buf[i]
switch c.state {
// 是在"OP_START"状态下,先判断第一个字母是不是"C"/"c",
// 也就是大小写都兼容,并且客户端连接过来的第一条消息必须是"CONNECT"。
case OP_START:
if b != 'C' && b != 'c' && authSet {
goto authErr
}
switch b {
case 'C', 'c':
c.state = OP_C
case 'I', 'i':
c.state = OP_I
case 'S', 's':
c.state = OP_S
case 'P', 'p':
c.state = OP_P
case 'U', 'u':
c.state = OP_U
case 'M', 'm':
if c.typ == CLIENT {
goto parseErr
} else {
c.state = OP_M
}
case '+':
c.state = OP_PLUS
case '-':
c.state = OP_MINUS
default:
goto parseErr
}
// 处理CONNECT的的命令 C -> S
case OP_C:
switch b {
case 'O', 'o':
c.state = OP_CO
default:
goto parseErr
}
case OP_CO:
switch b {
case 'N', 'n':
c.state = OP_CON
default:
goto parseErr
}
case OP_CON:
switch b {
case 'N', 'n':
c.state = OP_CONN
default:
goto parseErr
}
case OP_CONN:
switch b {
case 'E', 'e':
c.state = OP_CONNE
default:
goto parseErr
}
case OP_CONNE:
switch b {
case 'C', 'c':
c.state = OP_CONNEC
default:
goto parseErr
}
case OP_CONNEC:
switch b {
case 'T', 't':
c.state = OP_CONNECT
default:
goto parseErr
}
case OP_CONNECT:
switch b {
case ' ', '\t':
// 保持OP_CONNECT的状态 并且跳过了分隔符
continue
default:
c.state = CONNECT_ARG
c.as = i
}
case CONNECT_ARG:
switch b {
case '\r':
c.drop = 1
case '\n':
var arg []byte
if c.argBuf != nil {
arg = c.argBuf
c.argBuf = nil
} else {
arg = buf[c.as : i-c.drop] // 划分出参数消息
}
// 处理CONNECT的逻辑
if err := c.processConnect(arg); err != nil {
return err
}
c.drop, c.state = 0, OP_START
// Reset notions(概念) on authSet
authSet = c.isAuthTimerSet()
default:
if c.argBuf != nil {
c.argBuf = append(c.argBuf, b)
}
}
// 处理INFO命令 S -> C
case OP_I:
switch b {
case 'N', 'n':
c.state = OP_IN
default:
goto parseErr
}
case OP_IN:
switch b {
case 'F', 'f':
c.state = OP_INF
default:
goto parseErr
}
case OP_INF:
switch b {
case 'O', 'o':
c.state = OP_INFO
default:
goto parseErr
}
case OP_INFO:
switch b {
case ' ', '\t':
continue
default:
c.state = INFO_ARG
c.as = i
}
case INFO_ARG:
switch b {
case '\r':
c.drop = 1
case '\n':
var arg []byte
if c.argBuf != nil {
arg = c.argBuf
c.argBuf = nil
} else {
arg = buf[c.as : i-c.drop]
}
if err := c.processInfo(arg); err != nil {
return err
}
c.drop, c.as, c.state = 0, i+1, OP_START
default:
if c.argBuf != nil {
c.argBuf = append(c.argBuf, b)
}
}
case OP_P:
switch b {
case 'U', 'u':
c.state = OP_PU
case 'I', 'i':
c.state = OP_PI
case 'O', 'o':
c.state = OP_PO
default:
goto parseErr
}
// 处理 SUB 命令 客户端向服务器订阅一条消息 C -> S
case OP_S:
switch b {
case 'U', 'u':
c.state = OP_SU
default:
goto parseErr
}
case OP_SU:
switch b {
case 'B', 'b':
c.state = OP_SUB
default:
goto parseErr
}
case OP_SUB:
switch b {
case ' ', '\t':
c.state = OP_SUB_SPC
default:
goto parseErr
}
case OP_SUB_SPC:
switch b {
case ' ', '\t':
continue
default:
c.state = SUB_ARG
c.as = i
}
case SUB_ARG:
switch b {
case '\r':
c.drop = 1
case '\n':
var arg []byte
if c.argBuf != nil {
arg = c.argBuf
c.argBuf = nil
} else {
arg = buf[c.as : i-c.drop]
}
if err := c.processSub(arg); err != nil {
return err
}
c.drop, c.as, c.state = 0, i+1, OP_START
default:
if c.argBuf != nil {
c.argBuf = append(c.argBuf, b)
}
}
// 处理 UNSUB 命令 客户端向服务器取消之前的订阅 C -> S
case OP_U:
switch b {
case 'N', 'n':
c.state = OP_UN
default:
goto parseErr
}
case OP_UN:
switch b {
case 'S', 's':
c.state = OP_UNS
default:
goto parseErr
}
case OP_UNS:
switch b {
case 'U', 'u':
c.state = OP_UNSU
default:
goto parseErr
}
case OP_UNSU:
switch b {
case 'B', 'b':
c.state = OP_UNSUB
default:
goto parseErr
}
case OP_UNSUB:
switch b {
case ' ', '\t':
c.state = OP_UNSUB_SPC
default:
goto parseErr
}
case OP_UNSUB_SPC:
switch b {
case ' ', '\t':
continue
default:
c.state = UNSUB_ARG
c.as = i
}
case UNSUB_ARG:
switch b {
case '\r':
c.drop = 1
case '\n':
var arg []byte
if c.argBuf != nil {
arg = c.argBuf
c.argBuf = nil
} else {
arg = buf[c.as : i-c.drop]
}
if err := c.processUnsub(arg); err != nil {
return err
}
c.drop, c.as, c.state = 0, i+1, OP_START
default:
if c.argBuf != nil {
c.argBuf = append(c.argBuf, b)
}
}
// 处理 PUB 命令 客户端发送一个发布消息给服务器 C -> S
case OP_PU:
switch b {
case 'B', 'b':
c.state = OP_PUB
default:
goto parseErr
}
case OP_PUB:
switch b {
case ' ', '\t':
c.state = OP_PUB_SPC
default:
goto parseErr
}
case OP_PUB_SPC:
switch b {
case ' ', '\t':
// 保留当前状态不停地删除无效信息
continue
default:
c.state = PUB_ARG
c.as = i
}
case PUB_ARG:
switch b {
case '\r':
c.drop = 1
case '\n':
var arg []byte
if c.argBuf != nil {
arg = c.argBuf
} else {
arg = buf[c.as : i-c.drop]
}
if err := c.processPub(arg); err != nil {
return err
}
c.drop, c.as, c.state = 0, i+1, MSG_PAYLOAD
// 如果我们没有保存的缓冲区,则继续使用索引。
// 如果这超出了剩下的内容,我们就会退出并处理拆分缓冲区。
if c.msgBuf == nil {
// TODO
}
default:
if c.argBuf != nil {
c.argBuf = append(c.argBuf, b)
}
}
// 处理 MSG 命令 服务器发送订阅的内容给客户端 S -> C
case OP_M:
switch b {
case 'S', 's':
c.state = OP_MS
default:
goto parseErr
}
case OP_MS:
switch b {
case 'G', 'g':
c.state = OP_MSG
default:
goto parseErr
}
case OP_MSG:
switch b {
case ' ', '\t':
c.state = OP_MSG_SPC
default:
goto parseErr
}
case OP_MSG_SPC:
switch b {
case ' ', '\t':
continue
default:
c.state = MSG_ARG
c.as = i
}
case MSG_ARG:
switch b {
case '\r':
c.drop = 1
case '\n':
var arg []byte
if c.argBuf != nil {
arg = c.argBuf
} else {
arg = buf[c.as : i-c.drop]
}
if err := c.processMsgArgs(arg); err != nil {
return err
}
c.drop, c.as, c.state = 0, i+1, MSG_PAYLOAD
// jump ahead with the index. If this overruns(泛滥成灾 超过)
// what is left we fall out and process split buffer.
// TODO
i = c.as + c.pa.size - 1
default:
if c.argBuf != nil {
c.argBuf = append(c.argBuf, b)
}
}
// 处理 PING 命令 PING keep-alive 消息 S <- -> C
case OP_PI:
switch b {
case 'N', 'n':
c.state = OP_PIN
default:
goto parseErr
}
case OP_PIN:
switch b {
case 'G', 'g':
c.state = OP_PING
default:
goto parseErr
}
case OP_PING:
switch b {
case '\n':
c.processPing()
c.drop, c.state = 0, OP_START
}
// 处理 PONG 命令 PONG keep-alive 响应 S <- -> C
case OP_PO:
switch b {
case 'N', 'n':
c.state = OP_PON
default:
goto parseErr
}
case OP_PON:
switch b {
case 'G', 'g':
c.state = OP_PONG
default:
goto parseErr
}
case OP_PONG:
switch b {
case '\n':
c.processPong()
c.drop, c.state = 0, OP_START
}
case OP_PLUS:
switch b {
case 'O', 'o':
c.state = OP_PLUS_O
default:
goto parseErr
}
case OP_PLUS_O:
switch b {
case 'K', 'k':
c.state = OP_PLUS_OK
default:
goto parseErr
}
case OP_PLUS_OK:
switch b {
case '\n':
c.drop, c.state = 0, OP_START
}
case OP_MINUS:
switch b {
case 'E', 'e':
c.state = OP_MINUS_E
default:
goto parseErr
}
case OP_MINUS_E:
switch b {
case 'R', 'r':
c.state = OP_MINUS_ER
default:
goto parseErr
}
case OP_MINUS_ER:
switch b {
case 'R', 'r':
c.state = OP_MINUS_ERR
default:
goto parseErr
}
case OP_MINUS_ERR:
switch b {
case ' ', '\t':
c.state = OP_MINUS_ERR_SPC
default:
goto parseErr
}
case OP_MINUS_ERR_SPC:
switch b {
case ' ', '\t':
continue
default:
c.state = MINUS_ERR_ARG
c.as = i
}
case MINUS_ERR_ARG:
switch b {
case '\r':
c.drop = 1
case '\n':
var arg []byte
if c.argBuf != nil {
arg = c.argBuf
c.argBuf = nil
} else {
arg = buf[c.as : i-c.drop]
}
c.processErr(string(arg))
c.drop, c.as, c.state = 0, i+1, OP_START
default:
if c.argBuf != nil {
c.argBuf = append(c.argBuf, b)
}
}
default:
goto parseErr
}
}
// 循环结束
return nil
authErr:
c.authViolation()
return ErrAuthorization
parseErr:
s.sendErr("Unknown Protocol Operation")
snip := protoSnippet(i, buf)
err := fmt.Errorf("%s Parser ERROR, state=%d, i=%d: proto='%s...'",
c.typeString(), c.state, i, snip)
return err
}
|
[
7
] |
package app
import (
"context"
"errors"
"net/http"
"strings"
"github.com/go-chi/chi/v5"
"github.com/go-chi/render"
validation "github.com/go-ozzo/ozzo-validation/v4"
"github.com/sajib-hassan/warden/pkg/auth/authorize"
"github.com/sajib-hassan/warden/pkg/auth/jwt"
)
// The list of error types returned from user resource.
var (
ErrUserValidation = errors.New("user validation error")
)
// UserResource implements user management handler.
type UserResource struct {
Store UserStore
}
// NewUserResource creates and returns an user resource.
func NewUserResource(store UserStore) *UserResource {
return &UserResource{
Store: store,
}
}
func (rs *UserResource) router() *chi.Mux {
r := chi.NewRouter()
r.Use(rs.userCtx)
r.Get("/", rs.get)
r.Put("/", rs.update)
r.Delete("/", rs.delete)
r.Route("/token/{tokenID}", func(r chi.Router) {
r.Put("/", rs.updateToken)
r.Delete("/", rs.deleteToken)
})
return r
}
func (rs *UserResource) userCtx(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
claims := jwt.ClaimsFromCtx(r.Context())
log().WithField("user_id", claims.ID)
user, err := rs.Store.Get(claims.ID)
if err != nil {
// user deleted while access token still valid
render.Render(w, r, ErrUnauthorized)
return
}
ctx := context.WithValue(r.Context(), ctxUser, user)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
type userRequest struct {
*authorize.User
// override protected data here, although not really necessary here
// as we limit updated database columns in store as well
ProtectedID int `json:"id"`
ProtectedActive bool `json:"active"`
ProtectedRoles []string `json:"roles"`
}
func (d *userRequest) Bind(r *http.Request) error {
// d.ProtectedActive = true
// d.ProtectedRoles = []string{}
return nil
}
type userResponse struct {
*authorize.User
}
func newUserResponse(a *authorize.User) *userResponse {
resp := &userResponse{User: a}
return resp
}
func (rs *UserResource) get(w http.ResponseWriter, r *http.Request) {
acc := r.Context().Value(ctxUser).(*authorize.User)
render.Respond(w, r, newUserResponse(acc))
}
func (rs *UserResource) update(w http.ResponseWriter, r *http.Request) {
acc := r.Context().Value(ctxUser).(*authorize.User)
data := &userRequest{User: acc}
if err := render.Bind(r, data); err != nil {
render.Render(w, r, ErrInvalidRequest(err))
return
}
if err := rs.Store.Update(acc); err != nil {
switch err.(type) {
case validation.Errors:
render.Render(w, r, ErrValidation(ErrUserValidation, err.(validation.Errors)))
return
}
render.Render(w, r, ErrRender(err))
return
}
render.Respond(w, r, newUserResponse(acc))
}
func (rs *UserResource) delete(w http.ResponseWriter, r *http.Request) {
acc := r.Context().Value(ctxUser).(*authorize.User)
if err := rs.Store.Delete(acc); err != nil {
render.Render(w, r, ErrRender(err))
return
}
render.Respond(w, r, http.NoBody)
}
type tokenRequest struct {
Identifier string
ProtectedID int `json:"id"`
}
func (d *tokenRequest) Bind(r *http.Request) error {
d.Identifier = strings.TrimSpace(d.Identifier)
return nil
}
func (rs *UserResource) updateToken(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "tokenID")
data := &tokenRequest{}
if err := render.Bind(r, data); err != nil {
render.Render(w, r, ErrInvalidRequest(err))
return
}
acc := r.Context().Value(ctxUser).(*authorize.User)
for _, t := range acc.Token {
if t.ID.Hex() == id {
jt := &jwt.Token{
Identifier: data.Identifier,
}
jt.SetID(t.ID)
if err := rs.Store.UpdateToken(jt); err != nil {
render.Render(w, r, ErrInvalidRequest(err))
return
}
}
}
render.Respond(w, r, http.NoBody)
}
func (rs *UserResource) deleteToken(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "tokenID")
acc := r.Context().Value(ctxUser).(*authorize.User)
for _, t := range acc.Token {
if t.ID.Hex() == id {
jt := &jwt.Token{}
jt.SetID(t.ID)
rs.Store.DeleteToken(jt)
}
}
render.Respond(w, r, http.NoBody)
}
|
[
7
] |
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
context "context"
mock "github.com/stretchr/testify/mock"
)
// Traceable is an autogenerated mock type for the Traceable type
type Traceable struct {
mock.Mock
}
// SetTraceID provides a mock function with given fields: ctx, traceID
func (_m *Traceable) SetTraceID(ctx context.Context, traceID string) context.Context {
ret := _m.Called(ctx, traceID)
var r0 context.Context
if rf, ok := ret.Get(0).(func(context.Context, string) context.Context); ok {
r0 = rf(ctx, traceID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(context.Context)
}
}
return r0
}
// TraceID provides a mock function with given fields: ctx
func (_m *Traceable) TraceID(ctx context.Context) string {
ret := _m.Called(ctx)
var r0 string
if rf, ok := ret.Get(0).(func(context.Context) string); ok {
r0 = rf(ctx)
} else {
r0 = ret.Get(0).(string)
}
return r0
}
|
[
4
] |
// --------
// fedex.go ::: fedex api
// --------
// Copyright (c) 2013-Present, Scott Cagno. All rights reserved.
// This source code is governed by a BSD-style license.
package fedex
import (
"bytes"
"encoding/xml"
"fmt"
"html/template"
"io/ioutil"
"net/http"
"time"
)
var (
// Production Account Info
Acct = Account{
ApiURI: "https://ws.fedex.com:443/web-services",
DevKey: "BltV0ugnEcMK7KdK",
Password: "0yi3ftgSTxvB4TeAxfFcHJg0w",
AcctNumber: "119710618",
MeterNumber: "104991078",
}
// Testing Account Info
TestAcct = Account{
ApiURI: "https://wsbeta.fedex.com:443/web-services",
DevKey: "isurR8vQXGWe7NdB",
Password: "sLpViYx5Zem7T01fLIHRZyKAQ",
AcctNumber: "510087682",
MeterNumber: "118573690",
}
Zoom = Contact{
PersonName: "",
CompanyName: "Zoom Envlopes",
Phone: "0000000000",
Address: Address{
Street: "52 Industrial Road",
City: "Ephrata",
State: "PA",
Zip: "17522",
},
}
)
type Account struct {
ApiURI string
DevKey string
Password string
AcctNumber string
MeterNumber string
}
type Package struct {
Weight float64
Tag string
SequenceNumber int
Shipment *Shipment
}
type Shipment struct {
Account Account
Shipper Contact
Recipient Contact
Packages []*Package
PackageCount int
TrackingId string
RequestTemplate *template.Template
TagTemplate *template.Template
Timestamp time.Time
}
type Contact struct {
PersonName, CompanyName, Phone string
Address Address
}
type Address struct {
Street, City, State, Zip string
}
func NewShipment(shipper, recipient Contact) *Shipment {
return &Shipment{
Account: Acct,
Shipper: shipper,
Recipient: recipient,
RequestTemplate: template.Must(template.New("xml").Parse(REQUEST_SHIPMENT_XML)),
TagTemplate: template.Must(template.New("tag").Parse(`<img width="380" hspace="25" vspace="50" src="data:image/png;base64,{{ .tag }}">`)),
Timestamp: time.Now(),
}
}
func (self *Shipment) ParsePackages(orderQty, maxCartonCnt, stockWeight int) {
pkgs := make([]*Package, 0)
env := (float64(stockWeight) / float64(1000))
var i, j int
for i = orderQty; i >= maxCartonCnt; i -= maxCartonCnt {
pkgs = append(pkgs, &Package{(float64(maxCartonCnt) * env), "", j + 1, self})
j++
}
if i != 0 {
pkgs = append(pkgs, &Package{(float64(i) * env), "", j + 1, self})
}
self.PackageCount = len(pkgs)
self.Packages = pkgs
}
func (self *Package) MakeRequest(autoParse bool) []byte {
var xml bytes.Buffer
self.Shipment.RequestTemplate.Execute(&xml, map[string]interface{}{"fedex": self.Shipment, "package": self})
response, err := http.Post(self.Shipment.Account.ApiURI, "application/xml", &xml)
if err != nil {
panic(err)
}
defer response.Body.Close()
xmlDat, _ := ioutil.ReadAll(response.Body)
if autoParse {
pngDat := ParseXmlVals(xmlDat, "Image")["Image"]
self.Tag = self.ParseImage(pngDat)
}
return xmlDat
}
func (self *Package) ParseImage(pngDat string) string {
var tag bytes.Buffer
self.Shipment.TagTemplate.Execute(&tag, map[string]interface{}{"tag": pngDat})
return tag.String()
}
func ParseXmlVals(xmldat []byte, s ...string) map[string]string {
vals := make(map[string]string)
for _, val := range s {
dec := xml.NewDecoder(bytes.NewBufferString(string(xmldat)))
for {
t, err := dec.Token()
if err != nil {
break
}
switch e := t.(type) {
case xml.StartElement:
if e.Name.Local == val {
b, _ := dec.Token()
switch b.(type) {
case xml.CharData:
vals[val] = fmt.Sprintf("%s", b)
}
}
}
}
}
return vals
}
/*
func (self *Shipment) MakeShipmentRequests() bool {
if len(self.Packages) <= 0 {
return false
}
self.PackageCount = len(self.Packages)
var xml bytes.Buffer
self.XmlTemplate.Execute(&xml, self)
resp, _ := http.Post(self.Account.ApiURI, "application/xml", &xml)
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
fmt.Printf("%s\n", body)
return true
}
*/
var TAG *template.Template
var REQUEST_SHIPMENT *template.Template
var REQUEST_SHIPMENT_XML = `<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:v12="http://fedex.com/ws/ship/v12">
<soapenv:Header/>
<soapenv:Body>
<v12:ProcessShipmentRequest>
<v12:WebAuthenticationDetail>
<v12:UserCredential>
<v12:Key>{{ .fedex.Account.DevKey }}</v12:Key>
<v12:Password>{{ .fedex.Account.Password }}</v12:Password>
</v12:UserCredential>
</v12:WebAuthenticationDetail>
<v12:ClientDetail>
<v12:AccountNumber>{{ .fedex.Account.AcctNumber }}</v12:AccountNumber>
<v12:MeterNumber>{{ .fedex.Account.MeterNumber }}</v12:MeterNumber>
</v12:ClientDetail>
<!--
<v12:TransactionDetail>
<v12:CustomerTransactionId>** TEST TRANSACTION**</v12:CustomerTransactionId>
</v12:TransactionDetail>
-->
<v12:Version>
<v12:ServiceId>ship</v12:ServiceId>
<v12:Major>12</v12:Major>
<v12:Intermediate>0</v12:Intermediate>
<v12:Minor>0</v12:Minor>
</v12:Version>
<v12:RequestedShipment>
<v12:ShipTimestamp>{{ .fedex.Timestamp }}</v12:ShipTimestamp>
<v12:DropoffType>REGULAR_PICKUP</v12:DropoffType>
<v12:ServiceType>PRIORITY_OVERNIGHT</v12:ServiceType>
<v12:PackagingType>YOUR_PACKAGING</v12:PackagingType>
<v12:Shipper>
<v12:Contact>
<v12:PersonName>{{ .fedex.Shipper.PersonName }}</v12:PersonName>
<v12:CompanyName>{{ .fedex.Shipper.CompanyName }}</v12:CompanyName>
<v12:PhoneNumber>{{ .fedex.Shipper.Phone }}</v12:PhoneNumber>
</v12:Contact>
<v12:Address>
<v12:StreetLines>{{ .fedex.Shipper.Address.Street }}</v12:StreetLines>
<v12:City>{{ .fedex.Shipper.Address.City }}</v12:City>
<v12:StateOrProvinceCode>{{ .fedex.Shipper.Address.State }}</v12:StateOrProvinceCode>
<v12:PostalCode>{{ .fedex.Shipper.Address.Zip }}</v12:PostalCode>
<v12:CountryCode>US</v12:CountryCode>
</v12:Address>
</v12:Shipper>
<v12:Recipient>
<v12:Contact>
<v12:PersonName>{{ .fedex.Recipient.PersonName }}</v12:PersonName>
<v12:CompanyName>{{ .fedex.Recipient.CompanyName }}</v12:CompanyName>
<v12:PhoneNumber>{{ .fedex.Recipient.Phone }}</v12:PhoneNumber>
</v12:Contact>
<v12:Address>
<v12:StreetLines>{{ .fedex.Recipient.Address.Street }}</v12:StreetLines>
<v12:City>{{ .fedex.Recipient.Address.City }}</v12:City>
<v12:StateOrProvinceCode>{{ .fedex.Recipient.Address.State }}</v12:StateOrProvinceCode>
<v12:PostalCode>{{ .fedex.Recipient.Address.Zip }}</v12:PostalCode>
<v12:CountryCode>US</v12:CountryCode>
<!--<v12:Residential>true</v12:Residential>-->
</v12:Address>
</v12:Recipient>
<v12:ShippingChargesPayment>
<v12:PaymentType>SENDER</v12:PaymentType>
<v12:Payor>
<v12:ResponsibleParty>
<v12:AccountNumber>{{ .fedex.Account.AcctNumber }}</v12:AccountNumber>
<v12:Contact/>
</v12:ResponsibleParty>
</v12:Payor>
</v12:ShippingChargesPayment>
<v12:LabelSpecification>
<v12:LabelFormatType>COMMON2D</v12:LabelFormatType>
<v12:ImageType>PNG</v12:ImageType>
<v12:LabelStockType>PAPER_4X6</v12:LabelStockType>
<v12:LabelPrintingOrientation>TOP_EDGE_OF_TEXT_FIRST</v12:LabelPrintingOrientation>
</v12:LabelSpecification>
<v12:RateRequestTypes>ACCOUNT</v12:RateRequestTypes>
{{ if .fedex.TrackingId }}
<v12:MasterTrackingId>
<v12:TrackingIdType>FEDEX</v12:TrackingIdType>
<v12:FormId></v12:FormId>
<v12:TrackingNumber>{{ .fedex.TrackingId }}</v12:TrackingNumber>
</v12:MasterTrackingId>
{{ end }}
<v12:PackageCount>{{ .fedex.PackageCount }}</v12:PackageCount>
<v12:RequestedPackageLineItems>
<v12:SequenceNumber>{{ .package.SequenceNumber }}</v12:SequenceNumber>
<v12:Weight>
<v12:Units>LB</v12:Units>
<v12:Value>{{ .package.Weight }}</v12:Value>
</v12:Weight>
</v12:RequestedPackageLineItems>
</v12:RequestedShipment>
</v12:ProcessShipmentRequest>
</soapenv:Body>
</soapenv:Envelope>`
|
[
7
] |
/*
Copyright 2015 Ian Bishop
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"log"
"net"
"sync"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/google/gopacket/pcap"
"github.com/vishvananda/netlink"
)
type listener struct {
sync.RWMutex
ifname string
extChan, intChan chan ndp
errChan chan error
ruleNet *net.IPNet
started, finished bool
}
type sessionStatus int
// sessions track clients who've previously sent neighbor solicits
type session struct {
upstream *listener
srcIP, dstIP, target net.IP
status sessionStatus
expiry time.Time
}
type ndp struct {
payload gopacket.Payload
icmp layers.ICMPv6
ip6 layers.IPv6
eth layers.Ethernet
}
const (
waiting sessionStatus = iota
valid
invalid
timeout = time.Duration(500 * time.Millisecond)
ttl = time.Duration(30 * time.Second)
routeCheckInterval = 30
// snaplen should be large enough to capture the layers we're interested in
snaplen = 100
)
var IPV6SolicitedNode = net.IP{0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01, 0xff, 0, 0, 0}
func Proxy(wg *sync.WaitGroup, ifname string, rules []string) {
defer wg.Done()
var err error
upstreams := make(map[string]*listener)
// shared channels upstreams send to
errChan := make(chan error)
intChan := make(chan ndp)
mainExtChan := make(chan ndp)
tickRouteChan := time.NewTicker(time.Second * routeCheckInterval).C
tickSessChan := time.NewTicker(time.Millisecond * 100).C
defer func() {
for _, upstream := range upstreams {
close(upstream.extChan)
delete(upstreams, upstream.ifname)
}
}()
var sessions []session
// launch handler for main interface 'ifname'
l := &listener{ifname: ifname, intChan: intChan, extChan: mainExtChan, errChan: errChan}
go l.handler()
err = refreshRoutes(rules, intChan, errChan, upstreams)
if err != nil {
fmt.Printf("%s\n", err)
return
}
for {
select {
case err = <-errChan:
fmt.Printf("%s\n", err)
return
case n := <-intChan:
sessions = proxyPacket(n, mainExtChan, upstreams, sessions)
case <-tickSessChan:
sessions = updateSessions(sessions)
case <-tickRouteChan:
err := refreshRoutes(rules, intChan, errChan, upstreams)
if err != nil {
fmt.Printf("%s\n", err)
return
}
}
}
}
func proxyPacket(n ndp, extChan chan ndp, upstreams map[string]*listener, sessions []session) []session {
var target net.IP
// IPv6 bounds check
if len(n.payload) >= 16 {
target = net.IP(n.payload[:16])
} else {
return sessions
}
switch n.icmp.TypeCode.Type() {
case layers.ICMPv6TypeNeighborAdvertisement:
for i, s := range sessions {
if s.target.Equal(target) && sessions[i].status == waiting {
vlog.Printf("advert, using existing session for target %s\n", target)
sessions[i].status = valid
sessions[i].expiry = time.Now().Add(ttl)
n.ip6.DstIP = s.srcIP
extChan <- n
return sessions
}
}
case layers.ICMPv6TypeNeighborSolicitation:
if !n.ip6.DstIP.IsMulticast() {
return sessions
}
for _, s := range sessions {
if s.target.Equal(target) {
switch s.status {
case waiting, invalid:
break
case valid:
// swap solicit for advert and send back out main interface
vlog.Printf("solicit, using existing session for target %s\n", target)
n.icmp.TypeCode = layers.CreateICMPv6TypeCode(layers.ICMPv6TypeNeighborAdvertisement, 0)
n.ip6.DstIP = n.ip6.SrcIP
n.ip6.SrcIP = nil
extChan <- n
}
return sessions
}
}
var s *session
// if msg arrived from the main interface, then send to matching upstreams
for _, upstream := range upstreams {
if upstream.ruleNet.Contains(target) {
vlog.Printf("session not found when handling solicit for target %s. Creating new session...\n", net.IP(n.payload[:16]))
s = &session{
upstream: upstream,
srcIP: n.ip6.SrcIP,
dstIP: n.ip6.DstIP,
target: target,
status: waiting,
expiry: time.Now().Add(timeout),
}
}
}
if s != nil {
if !s.upstream.started {
// launch upstream handler
go s.upstream.handler()
}
sessions = append(sessions, *s)
s.upstream.extChan <- n
}
}
return sessions
}
func updateSessions(sessions []session) []session {
for i := len(sessions) - 1; i >= 0; i-- {
if sessions[i].expiry.After(time.Now()) {
continue
}
switch sessions[i].status {
case waiting:
vlog.Printf("set waiting session %d to invalid, target %s", i, sessions[i].target)
sessions[i].status = invalid
sessions[i].expiry = time.Now().Add(ttl)
default:
vlog.Printf("remove session %d, target %s", i, sessions[i].target)
sessions = append(sessions[:i], sessions[i+1:]...)
}
}
return sessions
}
func refreshRoutes(rules []string, intChan chan ndp, errChan chan error, upstreams map[string]*listener) error {
vlog.Println("refreshing routes...")
for _, rule := range rules {
_, ruleNet, err := net.ParseCIDR(rule)
if err != nil {
return fmt.Errorf("invalid rule '%s', %s", rule, err)
}
routes, err := netlink.RouteList(nil, netlink.FAMILY_V6)
if err != nil {
return fmt.Errorf("error enumerating routes, %s", err)
}
var route *netlink.Route
for _, r := range routes {
if r.Dst != nil && r.Dst.Contains(ruleNet.IP) {
route = &r
break
}
}
if route == nil {
// cancel any proxies for removed routes
for _, upstream := range upstreams {
if upstream.ruleNet.IP.Equal(ruleNet.IP) {
log.Printf("route for upstream if %s went away. Removing listener...\n", upstream.ifname)
close(upstream.extChan)
delete(upstreams, upstream.ifname)
}
}
// route not found, skip
continue
}
links, err := netlink.LinkList()
if err != nil {
return fmt.Errorf("error enumerating links, %s", err)
}
for _, link := range links {
if link.Attrs().Index == route.LinkIndex {
if _, ok := upstreams[link.Attrs().Name]; !ok {
log.Printf("new upstream for link '%s', rule '%s', route '%s'\n", link.Attrs().Name, rule, route.Dst)
upstreams[link.Attrs().Name] = &listener{
ifname: link.Attrs().Name,
extChan: make(chan ndp),
intChan: intChan,
errChan: errChan,
ruleNet: ruleNet,
}
}
}
}
}
for name, listener := range upstreams {
listener.RLock()
if listener.finished {
delete(upstreams, name)
}
listener.RUnlock()
}
return nil
}
func (l *listener) handler() {
var err error
var handle *pcap.Handle
log.Printf("spawning listener for if %s\n", l.ifname)
l.Lock()
l.started = true
l.Unlock()
defer func() {
if err != nil {
l.errChan <- err
}
l.Lock()
l.finished = true
l.Unlock()
log.Printf("exiting listener for if %s\n", l.ifname)
}()
// open interface in promiscuous mode in order to pickup solicited-node multicasts
handle, err = pcap.OpenLive(l.ifname, snaplen, true, pcap.BlockForever)
if err != nil {
err = fmt.Errorf("pcap open error: %s", err)
return
}
defer handle.Close()
// limit captured packets to icmp6
err = handle.SetBPFFilter("icmp6")
if err != nil {
return
}
var iface *net.Interface
iface, err = net.InterfaceByName(l.ifname)
if err != nil {
return
}
var addrs []net.Addr
var linklocal net.IP
addrs, err = iface.Addrs()
if err != nil {
return
}
for _, addr := range addrs {
switch v := addr.(type) {
case *net.IPNet:
if v.IP.IsLinkLocalUnicast() {
linklocal = v.IP
break
}
}
}
if linklocal.IsUnspecified() {
err = fmt.Errorf("error finding link local unicast address for if %s", l.ifname)
return
}
var eth layers.Ethernet
var ip6 layers.IPv6
var ip6extensions layers.IPv6ExtensionSkipper
var icmp layers.ICMPv6
var payload gopacket.Payload
decoded := []gopacket.LayerType{}
parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip6, &ip6extensions, &icmp, &payload)
packetSource := gopacket.NewPacketSource(handle, handle.LinkType())
packetsChan := packetSource.Packets()
for {
select {
case packet := <-packetsChan:
parser.DecodeLayers(packet.Data(), &decoded)
for _, layerType := range decoded {
switch layerType {
case layers.LayerTypeICMPv6:
var target net.IP
// IPv6 bounds check
if len(payload) >= 16 {
target = net.IP(payload[:16])
} else {
continue
}
switch icmp.TypeCode.Type() {
case layers.ICMPv6TypeNeighborSolicitation, layers.ICMPv6TypeNeighborAdvertisement:
n := ndp{eth: eth, ip6: ip6, icmp: icmp, payload: payload}
vlog.Printf("%s\tread\t%s\tmac_src %s\tip6_src %s\tip6_dst %s\ttarget %s\n", l.ifname, icmp.TypeCode, eth.SrcMAC, ip6.SrcIP, ip6.DstIP, target)
l.intChan <- n
}
}
}
case n, ok := <-l.extChan:
if !ok {
// channel was closed
return
}
n.eth.DstMAC = nil
if n.ip6.DstIP.IsLinkLocalMulticast() {
// Ethernet MAC is derived by the four low-order octets of IPv6 address
n.eth.DstMAC = append(net.HardwareAddr{0x33, 0x33}, n.ip6.DstIP[12:]...)
} else {
var neighbors []netlink.Neigh
neighbors, err = netlink.NeighList(iface.Index, netlink.FAMILY_V6)
if err != nil {
return
}
for _, neighbor := range neighbors {
if neighbor.IP.Equal(n.ip6.DstIP) {
n.eth.DstMAC = neighbor.HardwareAddr
break
}
}
}
if n.eth.DstMAC == nil {
vlog.Printf("%s: could not find destination MAC address. %s mac_src %s ip6_dst %s ip6_src %s target %s", l.ifname, n.icmp.TypeCode, n.eth.SrcMAC, n.ip6.DstIP, n.ip6.SrcIP, net.IP(n.payload[:16]))
// Try Solicited-Node multicast address
// dst IP is derived by the first 13 octets of multicast address +
// last 3 octets of dst IP
n.ip6.DstIP = append(IPV6SolicitedNode[:13], n.ip6.DstIP[13:]...)
n.eth.DstMAC = append(net.HardwareAddr{0x33, 0x33}, n.ip6.DstIP[12:]...)
}
n.eth.SrcMAC = iface.HardwareAddr
n.ip6.SrcIP = linklocal
buf := gopacket.NewSerializeBuffer()
n.icmp.SetNetworkLayerForChecksum(&n.ip6)
opts := gopacket.SerializeOptions{ComputeChecksums: true}
switch n.icmp.TypeCode.Type() {
case layers.ICMPv6TypeNeighborSolicitation:
// source link-layer address opt type, opt length
n.payload = append(n.payload[:16], 0x01, 0x01)
case layers.ICMPv6TypeNeighborAdvertisement:
// target link-layer address opt type, opt length
n.payload = append(n.payload[:16], 0x02, 0x01)
n.icmp.TypeBytes[0] = 0xc0 // router,solicit,override flags
}
n.payload = append(n.payload[:18], iface.HardwareAddr...)
err = gopacket.SerializeLayers(buf, opts, &n.eth, &n.ip6, &n.icmp, &n.payload)
if err != nil {
err = fmt.Errorf("serialize layers error: %s", err)
return
}
err = handle.WritePacketData(buf.Bytes())
if err != nil {
err = fmt.Errorf("pcap write error: %s", err)
return
}
vlog.Printf("%s\twrite\t%s\tmac_dst %s\tip6_src %s\tip6_dst %s\ttarget %s\n", l.ifname, n.icmp.TypeCode, n.eth.DstMAC, n.ip6.SrcIP, n.ip6.DstIP, net.IP(n.payload[:16]))
}
}
}
|
[
7
] |
package controllers
import (
"github.com/revel/revel"
"just_chatting/app/modules/entity"
)
type CUser struct {
*revel.Controller
response entity.Response
}
// получение пользователя из сессии
func (c *CUser) GetUser() revel.Result {
user, err:= c.Session.Get("user")
if err != nil {
c.response.Type = entity.ResponseTypeError
c.response.Data = err.Error()
return c.RenderJSON(c.response)
} else {
if c.Session.SessionTimeoutExpiredOrMissing(){
c.response.Type = entity.ResponseTypeError
c.response.Data = "Сессия истекла"
return c.RenderJSON(c.response)
}
}
c.response.Type = entity.ResponseTypeData
c.response.Data = user
return c.RenderJSON(c.response)
}
|
[
4
] |
package parsing
import (
"fmt"
"go/ast"
"go/parser"
"go/token"
"log"
"strings"
"github.com/LinkinStars/baileys/internal/util"
)
const (
InterfaceTypeDef = "interface"
StructTypeDef = "struct"
TimeTypeDef = "time.Time"
)
// StructFlat 非嵌套结构体
type StructFlat struct {
Name string
Comment string
Fields []*StructField
}
// StructField 结构体字段
type StructField struct {
Name string
Type string
Comment string
Tag string
}
// GetTag 获取tag
func (s *StructField) GetTag(tagName string) string {
arr := strings.Split(s.Tag, " ")
for _, tag := range arr {
tag = strings.TrimSpace(tag)
if strings.HasPrefix(tag, tagName) {
tag = strings.TrimLeft(tag, tagName+":")
tag = strings.Trim(tag, "\"")
return tag
}
}
return ""
}
// GetJsonTag 获取json tag
func (s *StructField) GetJsonTag() string {
tag := s.GetTag("json")
// ignore json tag is `json:"-"`
if tag == "-" {
return ""
}
if len(tag) == 0 {
return s.Name
}
return tag
}
// StructParser golang struct 解析器
func StructParser(src string) (structList []*StructFlat, err error) {
src = addPackageIfNotExist(src)
fileSet := token.NewFileSet()
f, err := parser.ParseFile(fileSet, "src.go", src, parser.ParseComments)
if err != nil {
return nil, err
}
structList = make([]*StructFlat, 0)
for _, node := range f.Decls {
switch node.(type) {
case *ast.GenDecl:
genDecl := node.(*ast.GenDecl)
var structComment string
if genDecl.Doc != nil {
structComment = strings.TrimSpace(genDecl.Doc.Text())
}
for _, spec := range genDecl.Specs {
switch spec.(type) {
case *ast.TypeSpec:
typeSpec := spec.(*ast.TypeSpec)
// 获取结构体名称
structFlat := &StructFlat{Name: typeSpec.Name.Name, Comment: structComment}
structFlat.Fields = make([]*StructField, 0)
log.Printf("read struct %s %s\n", typeSpec.Name.Name, structComment)
switch typeSpec.Type.(type) {
case *ast.StructType:
structType := typeSpec.Type.(*ast.StructType)
for _, reField := range structType.Fields.List {
structField := &StructField{}
if reField.Tag != nil {
structField.Tag = strings.Trim(reField.Tag.Value, "`")
}
switch reField.Type.(type) {
case *ast.Ident:
iDent := reField.Type.(*ast.Ident)
structField.Type = iDent.Name
case *ast.InterfaceType:
structField.Type = InterfaceTypeDef
case *ast.MapType:
iDent := reField.Type.(*ast.MapType)
structField.Type = fmt.Sprintf("map[%s]%s", iDent.Key, iDent.Value)
case *ast.ArrayType:
iDent := reField.Type.(*ast.ArrayType)
iDentElem := util.ReflectAccess(iDent.Elt)
structField.Type = fmt.Sprintf("[]%s", iDentElem)
case *ast.StructType:
structField.Type = StructTypeDef
case *ast.SelectorExpr:
iDent := reField.Type.(*ast.SelectorExpr)
if iDent.Sel.Name == "Time" {
structField.Type = TimeTypeDef
} else {
log.Printf("undefined reField type %+v", reField.Type)
}
default:
log.Printf("undefined reField type %+v", reField.Type)
}
for _, name := range reField.Names {
structField.Name = name.Name
structField.Comment = strings.TrimSpace(reField.Doc.Text())
structFlat.Fields = append(structFlat.Fields, structField)
log.Printf("name=%s type=%s comment=%s tag=%s\n", name.Name, structField.Type, structField.Comment, structField.Tag)
}
}
}
structList = append(structList, structFlat)
}
}
}
}
return structList, nil
}
func addPackageIfNotExist(src string) string {
if strings.HasPrefix(src, "package") {
return src
}
return "package mypackage\n" + src
}
|
[
7
] |
package paraphrase
import (
"hash/fnv"
"math"
"regexp"
)
var (
whitespace = regexp.MustCompile(`\s`)
)
type Fingerprint uint64
func normalizeDocument(document []byte) []byte {
return removeWhitespace(document)
}
func removeWhitespace(document []byte) []byte {
// https://github.com/golang/go/wiki/SliceTricks
output := make([]byte, 0, len(document))
for _, x := range document {
switch x {
case '\t', '\n', '\v', '\f', '\r', ' ', 0x85, 0xA0:
continue
default:
output = append(output, x)
}
}
return output
}
func fingerprintDocument(document []byte, size int) []Fingerprint {
fingerprintCount := len(document) - size
var fingerprints []Fingerprint
for i := 0; i <= fingerprintCount; i++ {
hash := fnv.New64()
hash.Write(document[i : i+size])
fingerprints = append(fingerprints, Fingerprint(hash.Sum64()))
}
return fingerprints
}
func winnow(fingerprints []Fingerprint, window int, robust bool) []Fingerprint {
var recorded []Fingerprint
h := make([]Fingerprint, window)
for i, _ := range h {
h[i] = math.MaxUint64
}
r := 0 // window right end
min := 0 // index of min hash
for _, fingerprint := range fingerprints {
r = (r + 1) % window // shift window by one
h[r] = fingerprint
if min == r {
// previous minimum is no longer in this window.
// scan h leftward starting from r for the rightmost minimal hash.
// Note min starts with the index of the rightmost hash.
for i := (r - 1 + window) % window; i != r; i = (i - 1 + window) % window {
if h[i] < h[min] {
min = i
}
}
recorded = append(recorded, fingerprint)
} else {
// Otherwise, the previous minimum is still in this window. Compare
// against the new value and update min if necessary.
if h[r] < h[min] || (!robust && h[r] == h[min]) {
min = r
recorded = append(recorded, fingerprint)
}
}
}
return recorded
}
func (p *ParaphraseDb) WinnowData(bytes []byte) (TermCountVector, error) {
winnowed := make(TermCountVector)
norm := normalizeDocument(bytes)
prints := fingerprintDocument(norm, p.settings.FingerprintSize)
saved := winnow(prints, p.settings.WindowSize, p.settings.RobustHash)
for _, print := range saved {
curr := winnowed[uint64(print)]
winnowed[uint64(print)] = curr + 1
}
return winnowed, nil
}
|
[
4
] |
package pio
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"os/signal"
"os/user"
"path/filepath"
"strings"
"github.com/atotto/clipboard"
"golang.org/x/crypto/ssh/terminal"
)
const (
PASSGODIR = "PASSGODIR"
// ConfigFileName is the name of the passgo config file.
ConfigFileName = "config"
// SiteFileName is the name of the passgo password store file.
SiteFileName = "sites.json"
// EncryptedFileDir is the name of the passgo encrypted file dir.
EncryptedFileDir = "files"
)
var (
// MasterPassPrompt is the standard prompt string for all passgo
MasterPassPrompt = "Enter master password"
)
// PassFile is an interface for how all passgo files should be saved.
type PassFile interface {
SaveFile() (err error)
}
// ConfigFile represents the passgo config file.
type ConfigFile struct {
MasterKeyPrivSealed []byte
PubKeyHmac []byte
SiteHmac []byte
MasterPubKey [32]byte
MasterPassKeySalt [32]byte
HmacSalt [32]byte
SiteHmacSalt [32]byte
}
// SiteInfo represents a single saved password entry.
type SiteInfo struct {
PubKey [32]byte
PassSealed []byte
Name string
FileName string
IsFile bool
}
// SiteFile represents the entire passgo password store.
type SiteFile []SiteInfo
func PassFileDirExists() (bool, error) {
d, err := GetEncryptedFilesDir()
if err != nil {
return false, err
}
dirInfo, err := os.Stat(d)
if err == nil {
if dirInfo.IsDir() {
return true, nil
}
} else {
if os.IsNotExist(err) {
return false, nil
}
}
return false, err
}
// PassDirExists is used to determine if the passgo
// directory in the user's home directory exists.
func PassDirExists() (bool, error) {
d, err := GetPassDir()
if err != nil {
return false, err
}
dirInfo, err := os.Stat(d)
if err == nil {
if !dirInfo.IsDir() {
return true, nil
}
} else {
if os.IsNotExist(err) {
return false, nil
}
}
return false, err
}
// PassConfigExists is used to determine if the passgo config
// file exists in the user's passgo directory.
func PassConfigExists() (bool, error) {
c, err := GetConfigPath()
if err != nil {
return false, err
}
_, err = os.Stat(c)
if err != nil {
return false, err
}
return true, nil
}
// SitesVaultExists is used to determine if the password store
// exists in the user's passgo directory.
func SitesVaultExists() (bool, error) {
c, err := GetConfigPath()
if err != nil {
return false, err
}
sitesFilePath := filepath.Join(c, SiteFileName)
_, err = os.Stat(sitesFilePath)
if err != nil {
return false, err
}
return true, nil
}
func GetHomeDir() (d string, err error) {
usr, err := user.Current()
if err == nil {
d = usr.HomeDir
}
return
}
// GetPassDir is used to return the user's passgo directory.
func GetPassDir() (d string, err error) {
d, ok := os.LookupEnv(PASSGODIR)
if !ok {
home, err := GetHomeDir()
if err == nil {
d = filepath.Join(home, ".passgo")
}
}
return
}
// GetConfigPath is used to get the user's passgo directory.
func GetConfigPath() (p string, err error) {
d, err := GetPassDir()
if err == nil {
p = filepath.Join(d, ConfigFileName)
}
return
}
// GetEncryptedFilesDir is used to get the directory that we store
// encrypted files in.
func GetEncryptedFilesDir() (p string, err error) {
d, err := GetPassDir()
if err == nil {
p = filepath.Join(d, EncryptedFileDir)
}
return
}
// GetSitesFile will return the user's passgo vault.
func GetSitesFile() (d string, err error) {
p, err := GetPassDir()
if err == nil {
d = filepath.Join(p, SiteFileName)
}
return
}
func (s *SiteInfo) AddFile(fileBytes []byte, filename string) error {
encFileDir, err := GetEncryptedFilesDir()
if err != nil {
return err
}
// Make sure that the file directory exists.
fileDirExists, err := PassFileDirExists()
if err != nil {
return err
}
if !fileDirExists {
err = os.Mkdir(encFileDir, 0700)
if err != nil {
log.Fatalf("Could not create passgo encrypted file dir: %s", err.Error())
}
}
encFilePath := filepath.Join(encFileDir, filename)
dir, _ := filepath.Split(encFilePath)
err = os.MkdirAll(dir, 0700)
if err != nil {
log.Fatalf("Could not create subdirectory: %s", err.Error())
}
err = ioutil.WriteFile(encFilePath, fileBytes, 0666)
if err != nil {
return err
}
// We still need to add this site info to the bytes.
return s.AddSite()
}
// AddSite is used by individual password entries to update the vault.
func (s *SiteInfo) AddSite() (err error) {
siteFile := GetVault()
for _, si := range siteFile {
if s.Name == si.Name {
return errors.New("Could not add site with duplicate name")
}
}
siteFile = append(siteFile, *s)
return UpdateVault(siteFile)
}
// GetVault is used to retrieve the password vault for the user.
func GetVault() (s SiteFile) {
si, err := GetSitesFile()
if err != nil {
log.Fatalf("Could not get pass dir: %s", err.Error())
}
siteFileContents, err := ioutil.ReadFile(si)
if err != nil {
if os.IsNotExist(err) {
log.Fatalf("Could not open site file. Run passgo init.: %s", err.Error())
}
log.Fatalf("Could not read site file: %s", err.Error())
}
err = json.Unmarshal(siteFileContents, &s)
if err != nil {
log.Fatalf("Could not unmarshal site info: %s", err.Error())
}
return
}
// GetSiteFileBytes returns the bytes instead of a SiteFile
func GetSiteFileBytes() (b []byte) {
si, err := GetSitesFile()
if err != nil {
log.Fatalf("Could not get pass dir: %s", err.Error())
}
f, err := os.OpenFile(si, os.O_RDWR, 0600)
if err != nil {
log.Fatalf("Could not open site file: %s", err.Error())
}
defer f.Close()
b, err = ioutil.ReadAll(f)
if err != nil {
log.Fatalf("Could not read site file: %s", err.Error())
}
return
}
// UpdateVault is used to replace the current password vault.
func UpdateVault(s SiteFile) (err error) {
si, err := GetSitesFile()
if err != nil {
log.Fatalf("Could not get pass dir: %s", err.Error())
}
siteFileContents, err := json.MarshalIndent(s, "", "\t")
if err != nil {
log.Fatalf("Could not marshal site info: %s", err.Error())
}
// Write the site with the newly appended site to the file.
err = ioutil.WriteFile(si, siteFileContents, 0666)
return
}
// SaveFile is used by ConfigFiles to update the passgo config.
func (c *ConfigFile) SaveFile() (err error) {
if exists, err := PassConfigExists(); err != nil {
log.Fatalf("Could not find config file: %s", err.Error())
} else {
if !exists {
log.Fatalf("pass config could not be found: %s", err.Error())
}
}
cBytes, err := json.MarshalIndent(c, "", "\t")
if err != nil {
log.Fatalf("Could not marshal config file: %s", err.Error())
}
path, err := GetConfigPath()
if err != nil {
log.Fatalf("Could not get config file path: %s", err.Error())
}
err = ioutil.WriteFile(path, cBytes, 0666)
return
}
// ReadConfig is used to return the passgo ConfigFile.
func ReadConfig() (c ConfigFile, err error) {
config, err := GetConfigPath()
if err != nil {
return
}
configBytes, err := ioutil.ReadFile(config)
if err != nil {
return
}
err = json.Unmarshal(configBytes, &c)
return
}
// GetPassFromFile will get users password from ~/.config/passgo/passgo.pass if present.
func GetPassFromFile() (pass string, err error) {
passwordFile := os.Getenv("HOME") + "/.config/passgo/passgo.pass"
content, err := ioutil.ReadFile(passwordFile)
if err != nil {
return "", err
} else {
fmt.Fprintf(os.Stderr, "Found and using password file, %s\n", passwordFile)
return strings.TrimSuffix(string(content), "\n"), nil
}
}
// PromptPass will prompt user's for a password by terminal.
func PromptPass(prompt string) (pass string, err error) {
// Make a copy of STDIN's state to restore afterward
fd := int(os.Stdin.Fd())
oldState, err := terminal.GetState(fd)
if err != nil {
panic("Could not get state of terminal: " + err.Error())
}
defer terminal.Restore(fd, oldState)
// Restore STDIN in the event of a signal interuption
sigch := make(chan os.Signal, 1)
signal.Notify(sigch, os.Interrupt)
go func() {
for _ = range sigch {
terminal.Restore(fd, oldState)
os.Exit(1)
}
}()
fmt.Fprintf(os.Stderr, "%s: ", prompt)
passBytes, err := terminal.ReadPassword(fd)
fmt.Fprintln(os.Stderr, "")
return string(passBytes), err
}
// Prompt will prompt a user for regular data from stdin.
func Prompt(prompt string) (s string, err error) {
fmt.Printf("%s", prompt)
stdin := bufio.NewReader(os.Stdin)
l, _, err := stdin.ReadLine()
return string(l), err
}
func ToClipboard(s string) {
if err := clipboard.WriteAll(s); err != nil {
log.Fatalf("Could not copy password to clipboard: %s", err.Error())
}
}
|
[
4
] |
package postgres
import (
"database/sql/driver"
"fmt"
"reflect"
"time"
"github.com/lib/pq"
)
// ColumnTyper is an interface to be implemented by a custom type
type ColumnTyper interface {
// ColumnType returns postgres' column type
ColumnType() string
}
// columnType returns a postgres column type for a given value
func columnType(value interface{}) string {
if ct, err := callColumnTyper(value); err != nil && err != ErrColumnTyperNotImplemented {
panic(err)
} else if err == nil {
return ct
}
if implementsScanner(reflect.ValueOf(value)) {
return "text null"
}
switch value.(type) {
case time.Time, *time.Time:
// INFO: we map the zero type to null
return "timestamp (6) without time zone null"
case time.Duration:
// see https://github.com/lib/pq/issues/78 why we can't use postgres' interval type
return "bigint not null default 0"
case *time.Duration:
// see comment above in regards to interval type
return "bigint null"
case []string:
return "text[] null"
}
switch typeOf(value).Kind() {
case reflect.String:
return "text not null default ''"
case reflect.Bool:
return "boolean not null default false"
case reflect.Int:
return "integer not null default 0"
case reflect.Struct:
return "jsonb null"
case reflect.Slice:
return "jsonb null"
case reflect.Map:
return "jsonb null"
}
panic(fmt.Sprintf("columnType: unsupported Go type %v", reflect.TypeOf(value)))
}
// encodeValue returns driver.Value, which is stored in postgres, for a given value
func encodeValue(value reflect.Value) (driver.Value, error) {
if (value.Kind() == reflect.Slice || value.Kind() == reflect.Map) && value.Len() == 0 {
return nil, nil
}
if value.Kind() == reflect.Struct && isZero(value.Interface()) {
return nil, nil
}
if value.Kind() == reflect.Slice {
switch value.Interface().(type) {
case []string:
return pq.Array(value.Interface()).Value()
}
}
switch v := value.Interface().(type) {
case time.Time:
// please note that postgres only stores microsecond 1e+6 precision
return v.UTC().Truncate(time.Microsecond), nil // always store UTC
case *time.Time:
if v == nil {
return nil, nil
}
return v.UTC().Truncate(time.Microsecond), nil // always store UTC
case time.Duration:
return v.Nanoseconds(), nil
case *time.Duration:
if v == nil {
return nil, nil
}
return v.Nanoseconds(), nil
}
if reflect.PtrTo(value.Type()).Implements(valuerReflectType) {
return value.Addr().Interface().(driver.Valuer).Value()
}
v, err := driver.DefaultParameterConverter.ConvertValue(value.Interface())
if err == nil {
return v, nil
}
return jsonMarshal(value.Interface())
}
// decodeValue stores decoded value in dst
func decodeValue(dst reflect.Value, value interface{}) error {
// if value is nil, we can skip further processing
if value == nil {
return nil
}
if err := callScanner(dst, value); err != nil && err != ErrScannerNotImplemented {
return err
} else if err == nil {
return nil
}
switch x := value.(type) {
case time.Time:
switch dst.Interface().(type) {
case time.Time:
return setValue(dst, x.UTC())
case *time.Time:
n := x.UTC()
return setValue(dst, &n)
}
}
switch dst.Interface().(type) {
case time.Duration:
return setValue(dst, time.Duration(value.(int64)))
case *time.Duration:
x := time.Duration(value.(int64))
return setValue(dst, &x)
}
// reverse pointer if any
dstKind := dst.Type().Kind()
if dstKind == reflect.Ptr {
dstKind = dst.Type().Elem().Kind()
}
switch dstKind {
case reflect.String:
if _, ok := dst.Interface().(string); ok {
return setValue(dst, value)
}
return setValue(dst, reflect.ValueOf(value).Convert(dst.Type()).Interface())
case reflect.Bool:
return setValue(dst, value.(bool))
case reflect.Int:
switch v := value.(type) {
case int64:
return setValue(dst, int(v))
}
case reflect.Map:
n := reflect.New(dst.Type()).Interface()
if err := jsonUnmarshal([]byte(value.([]byte)), n); err != nil {
return err
}
return setValue(dst, n)
case reflect.Slice:
switch dst.Interface().(type) {
case []string:
a := pq.StringArray{}
if err := a.Scan(value.([]byte)); err != nil {
return err
}
return setValue(dst, a)
}
n := reflect.New(dst.Type()).Interface()
if err := jsonUnmarshal([]byte(value.([]byte)), n); err != nil {
return err
}
return setValue(dst, n)
case reflect.Struct:
n := reflect.New(dst.Type())
if err := jsonUnmarshal([]byte(value.([]byte)), n.Interface()); err != nil {
return err
}
return setValue(dst, n.Elem().Interface())
}
return nil
}
|
[
7
] |
// Copyright 2012 Google, Inc. All rights reserved.
// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
package layers
import (
"encoding/binary"
"fmt"
"github.com/tsg/gopacket"
"net"
)
// IPv6 is the layer for the IPv6 header.
type IPv6 struct {
// http://www.networksorcery.com/enp/protocol/ipv6.htm
BaseLayer
Version uint8
TrafficClass uint8
FlowLabel uint32
Length uint16
NextHeader IPProtocol
HopLimit uint8
SrcIP net.IP
DstIP net.IP
HopByHop *IPv6HopByHop
// hbh will be pointed to by HopByHop if that layer exists.
hbh IPv6HopByHop
}
// LayerType returns LayerTypeIPv6
func (i *IPv6) LayerType() gopacket.LayerType { return LayerTypeIPv6 }
func (i *IPv6) NetworkFlow() gopacket.Flow {
return gopacket.NewFlow(EndpointIPv6, i.SrcIP, i.DstIP)
}
const (
IPv6HopByHopOptionJumbogram = 0xC2 // RFC 2675
)
// SerializeTo writes the serialized form of this layer into the
// SerializationBuffer, implementing gopacket.SerializableLayer.
// See the docs for gopacket.SerializableLayer for more info.
func (ip6 *IPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
payload := b.Bytes()
if ip6.HopByHop != nil {
return fmt.Errorf("unable to serialize hopbyhop for now")
}
bytes, err := b.PrependBytes(40)
if err != nil {
return err
}
bytes[0] = (ip6.Version << 4) | (ip6.TrafficClass >> 4)
bytes[1] = (ip6.TrafficClass << 4) | uint8(ip6.FlowLabel>>16)
binary.BigEndian.PutUint16(bytes[2:], uint16(ip6.FlowLabel))
if opts.FixLengths {
ip6.Length = uint16(len(payload))
}
binary.BigEndian.PutUint16(bytes[4:], ip6.Length)
bytes[6] = byte(ip6.NextHeader)
bytes[7] = byte(ip6.HopLimit)
if len(ip6.SrcIP) != 16 {
return fmt.Errorf("invalid src ip %v", ip6.SrcIP)
}
if len(ip6.DstIP) != 16 {
return fmt.Errorf("invalid dst ip %v", ip6.DstIP)
}
copy(bytes[8:], ip6.SrcIP)
copy(bytes[24:], ip6.DstIP)
return nil
}
func (ip6 *IPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
ip6.Version = uint8(data[0]) >> 4
ip6.TrafficClass = uint8((binary.BigEndian.Uint16(data[0:2]) >> 4) & 0x00FF)
ip6.FlowLabel = binary.BigEndian.Uint32(data[0:4]) & 0x000FFFFF
ip6.Length = binary.BigEndian.Uint16(data[4:6])
ip6.NextHeader = IPProtocol(data[6])
ip6.HopLimit = data[7]
ip6.SrcIP = data[8:24]
ip6.DstIP = data[24:40]
ip6.HopByHop = nil
// We initially set the payload to all bytes after 40. ip6.Length or the
// HopByHop jumbogram option can both change this eventually, though.
ip6.BaseLayer = BaseLayer{data[:40], data[40:]}
// We treat a HopByHop IPv6 option as part of the IPv6 packet, since its
// options are crucial for understanding what's actually happening per packet.
if ip6.NextHeader == IPProtocolIPv6HopByHop {
ip6.hbh.DecodeFromBytes(ip6.Payload, df)
hbhLen := len(ip6.hbh.Contents)
// Reset IPv6 contents to include the HopByHop header.
ip6.BaseLayer = BaseLayer{data[:40+hbhLen], data[40+hbhLen:]}
ip6.HopByHop = &ip6.hbh
if ip6.Length == 0 {
for _, o := range ip6.hbh.Options {
if o.OptionType == IPv6HopByHopOptionJumbogram {
if len(o.OptionData) != 4 {
return fmt.Errorf("Invalid jumbo packet option length")
}
payloadLength := binary.BigEndian.Uint32(o.OptionData)
pEnd := int(payloadLength)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
} else {
ip6.Payload = ip6.Payload[:pEnd]
ip6.hbh.Payload = ip6.Payload
}
return nil
}
}
return fmt.Errorf("IPv6 length 0, but HopByHop header does not have jumbogram option")
}
}
if ip6.Length == 0 {
return fmt.Errorf("IPv6 length 0, but next header is %v, not HopByHop", ip6.NextHeader)
} else {
pEnd := int(ip6.Length)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
pEnd = len(ip6.Payload)
}
ip6.Payload = ip6.Payload[:pEnd]
}
return nil
}
func (i *IPv6) CanDecode() gopacket.LayerClass {
return LayerTypeIPv6
}
func (i *IPv6) NextLayerType() gopacket.LayerType {
if i.HopByHop != nil {
return i.HopByHop.NextHeader.LayerType()
}
return i.NextHeader.LayerType()
}
func decodeIPv6(data []byte, p gopacket.PacketBuilder) error {
ip6 := &IPv6{}
err := ip6.DecodeFromBytes(data, p)
p.AddLayer(ip6)
p.SetNetworkLayer(ip6)
if ip6.HopByHop != nil {
// TODO(gconnell): Since HopByHop is now an integral part of the IPv6
// layer, should it actually be added as its own layer? I'm leaning towards
// no.
p.AddLayer(ip6.HopByHop)
}
if err != nil {
return err
}
if ip6.HopByHop != nil {
return p.NextDecoder(ip6.HopByHop.NextHeader)
}
return p.NextDecoder(ip6.NextHeader)
}
func (i *IPv6HopByHop) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
i.ipv6ExtensionBase = decodeIPv6ExtensionBase(data)
i.Options = i.opts[:0]
var opt *IPv6HopByHopOption
for d := i.Contents[2:]; len(d) > 0; d = d[opt.ActualLength:] {
i.Options = append(i.Options, IPv6HopByHopOption(decodeIPv6HeaderTLVOption(d)))
opt = &i.Options[len(i.Options)-1]
}
return nil
}
func decodeIPv6HopByHop(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6HopByHop{}
err := i.DecodeFromBytes(data, p)
p.AddLayer(i)
if err != nil {
return err
}
return p.NextDecoder(i.NextHeader)
}
type ipv6HeaderTLVOption struct {
OptionType, OptionLength uint8
ActualLength int
OptionData []byte
}
func decodeIPv6HeaderTLVOption(data []byte) (h ipv6HeaderTLVOption) {
if data[0] == 0 {
h.ActualLength = 1
return
}
h.OptionType = data[0]
h.OptionLength = data[1]
h.ActualLength = int(h.OptionLength) + 2
h.OptionData = data[2:h.ActualLength]
return
}
func (h *ipv6HeaderTLVOption) serializeTo(b gopacket.SerializeBuffer, fixLengths bool) (int, error) {
if fixLengths {
h.OptionLength = uint8(len(h.OptionData))
}
length := int(h.OptionLength) + 2
data, err := b.PrependBytes(length)
if err != nil {
return 0, err
}
data[0] = h.OptionType
data[1] = h.OptionLength
copy(data[2:], h.OptionData)
return length, nil
}
// IPv6HopByHopOption is a TLV option present in an IPv6 hop-by-hop extension.
type IPv6HopByHopOption ipv6HeaderTLVOption
type ipv6ExtensionBase struct {
BaseLayer
NextHeader IPProtocol
HeaderLength uint8
ActualLength int
}
func decodeIPv6ExtensionBase(data []byte) (i ipv6ExtensionBase) {
i.NextHeader = IPProtocol(data[0])
i.HeaderLength = data[1]
i.ActualLength = int(i.HeaderLength)*8 + 8
i.Contents = data[:i.ActualLength]
i.Payload = data[i.ActualLength:]
return
}
// IPv6ExtensionSkipper is a DecodingLayer which decodes and ignores v6
// extensions. You can use it with a DecodingLayerParser to handle IPv6 stacks
// which may or may not have extensions.
type IPv6ExtensionSkipper struct {
NextHeader IPProtocol
BaseLayer
}
func (i *IPv6ExtensionSkipper) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
extension := decodeIPv6ExtensionBase(data)
i.BaseLayer = BaseLayer{data[:extension.ActualLength], data[extension.ActualLength:]}
i.NextHeader = extension.NextHeader
return nil
}
func (i *IPv6ExtensionSkipper) CanDecode() gopacket.LayerClass {
return LayerClassIPv6Extension
}
func (i *IPv6ExtensionSkipper) NextLayerType() gopacket.LayerType {
return i.NextHeader.LayerType()
}
// IPv6HopByHop is the IPv6 hop-by-hop extension.
type IPv6HopByHop struct {
ipv6ExtensionBase
Options []IPv6HopByHopOption
opts [2]IPv6HopByHopOption
}
// LayerType returns LayerTypeIPv6HopByHop.
func (i *IPv6HopByHop) LayerType() gopacket.LayerType { return LayerTypeIPv6HopByHop }
// IPv6Routing is the IPv6 routing extension.
type IPv6Routing struct {
ipv6ExtensionBase
RoutingType uint8
SegmentsLeft uint8
// This segment is supposed to be zero according to RFC2460, the second set of
// 4 bytes in the extension.
Reserved []byte
// SourceRoutingIPs is the set of IPv6 addresses requested for source routing,
// set only if RoutingType == 0.
SourceRoutingIPs []net.IP
}
// LayerType returns LayerTypeIPv6Routing.
func (i *IPv6Routing) LayerType() gopacket.LayerType { return LayerTypeIPv6Routing }
func decodeIPv6Routing(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6Routing{
ipv6ExtensionBase: decodeIPv6ExtensionBase(data),
RoutingType: data[2],
SegmentsLeft: data[3],
Reserved: data[4:8],
}
switch i.RoutingType {
case 0: // Source routing
if (len(data)-8)%16 != 0 {
return fmt.Errorf("Invalid IPv6 source routing, length of type 0 packet %d", len(data))
}
for d := i.Contents[8:]; len(d) >= 16; d = d[16:] {
i.SourceRoutingIPs = append(i.SourceRoutingIPs, net.IP(d[:16]))
}
}
p.AddLayer(i)
return p.NextDecoder(i.NextHeader)
}
// IPv6Fragment is the IPv6 fragment header, used for packet
// fragmentation/defragmentation.
type IPv6Fragment struct {
BaseLayer
NextHeader IPProtocol
// Reserved1 is bits [8-16), from least to most significant, 0-indexed
Reserved1 uint8
FragmentOffset uint16
// Reserved2 is bits [29-31), from least to most significant, 0-indexed
Reserved2 uint8
MoreFragments bool
Identification uint32
}
// LayerType returns LayerTypeIPv6Fragment.
func (i *IPv6Fragment) LayerType() gopacket.LayerType { return LayerTypeIPv6Fragment }
func decodeIPv6Fragment(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6Fragment{
BaseLayer: BaseLayer{data[:8], data[8:]},
NextHeader: IPProtocol(data[0]),
Reserved1: data[1],
FragmentOffset: binary.BigEndian.Uint16(data[2:4]) >> 3,
Reserved2: data[3] & 0x6 >> 1,
MoreFragments: data[3]&0x1 != 0,
Identification: binary.BigEndian.Uint32(data[4:8]),
}
p.AddLayer(i)
return p.NextDecoder(gopacket.DecodeFragment)
}
// IPv6DestinationOption is a TLV option present in an IPv6 destination options extension.
type IPv6DestinationOption ipv6HeaderTLVOption
func (o *IPv6DestinationOption) serializeTo(b gopacket.SerializeBuffer, fixLengths bool) (int, error) {
return (*ipv6HeaderTLVOption)(o).serializeTo(b, fixLengths)
}
// IPv6Destination is the IPv6 destination options header.
type IPv6Destination struct {
ipv6ExtensionBase
Options []IPv6DestinationOption
}
// LayerType returns LayerTypeIPv6Destination.
func (i *IPv6Destination) LayerType() gopacket.LayerType { return LayerTypeIPv6Destination }
func decodeIPv6Destination(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6Destination{
ipv6ExtensionBase: decodeIPv6ExtensionBase(data),
// We guess we'll 1-2 options, one regular option at least, then maybe one
// padding option.
Options: make([]IPv6DestinationOption, 0, 2),
}
var opt *IPv6DestinationOption
for d := i.Contents[2:]; len(d) > 0; d = d[opt.ActualLength:] {
i.Options = append(i.Options, IPv6DestinationOption(decodeIPv6HeaderTLVOption(d)))
opt = &i.Options[len(i.Options)-1]
}
p.AddLayer(i)
return p.NextDecoder(i.NextHeader)
}
// SerializeTo writes the serialized form of this layer into the
// SerializationBuffer, implementing gopacket.SerializableLayer.
// See the docs for gopacket.SerializableLayer for more info.
func (i *IPv6Destination) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
optionLength := 0
for _, opt := range i.Options {
l, err := opt.serializeTo(b, opts.FixLengths)
if err != nil {
return err
}
optionLength += l
}
bytes, err := b.PrependBytes(2)
if err != nil {
return err
}
bytes[0] = uint8(i.NextHeader)
if opts.FixLengths {
i.HeaderLength = uint8((optionLength + 2) / 8)
}
bytes[1] = i.HeaderLength
return nil
}
|
[
7
] |
package main
import (
. "pb/template"
)
type logic struct {
server *server
}
func newLogic() *logic {
return &logic{}
}
func (l *logic) init(s *server) {
l.server = s
}
// handle all logic message including client message or grpc message
func (l *logic) handleMessage(msgName, msg interface{}) (interface{}, error) {
switch msgName {
case "TemplateMsgTest":
pbMsg := msg.(*TemplateMsgTest)
if pbMsg != nil {
}
}
return nil, nil
}
|
[
7
] |
/*
Copyright (c) 2012 José Carlos Nieto, http://xiam.menteslibres.org/
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package db
import (
"fmt"
"github.com/gosexy/sugar"
"regexp"
"strconv"
"strings"
"time"
)
// Returns the item value as a string.
func (item Item) GetString(name string) string {
return fmt.Sprintf("%v", item[name])
}
// Returns the item value as a Go date.
func (item Item) GetDate(name string) time.Time {
date := time.Date(0, time.January, 0, 0, 0, 0, 0, time.UTC)
switch item[name].(type) {
case time.Time:
date = item[name].(time.Time)
case string:
var matched bool
value := item[name].(string)
matched, _ = regexp.MatchString(`^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$`, value)
if matched {
date, _ = time.Parse("2006-01-02 15:04:05", value)
}
}
return date
}
// Returns the item value as a Go duration.
func (item Item) GetDuration(name string) time.Duration {
duration, _ := time.ParseDuration("0h0m0s")
switch item[name].(type) {
case time.Duration:
duration = item[name].(time.Duration)
case string:
var matched bool
var re *regexp.Regexp
value := item[name].(string)
matched, _ = regexp.MatchString(`^\d{2}:\d{2}:\d{2}$`, value)
if matched {
re, _ = regexp.Compile(`^(\d{2}):(\d{2}):(\d{2})$`)
all := re.FindAllStringSubmatch(value, -1)
formatted := fmt.Sprintf("%sh%sm%ss", all[0][1], all[0][2], all[0][3])
duration, _ = time.ParseDuration(formatted)
}
}
return duration
}
// Returns the item value as a Tuple.
func (item Item) GetTuple(name string) sugar.Tuple {
tuple := sugar.Tuple{}
switch item[name].(type) {
case map[string]interface{}:
for k, _ := range item[name].(map[string]interface{}) {
tuple[k] = item[name].(map[string]interface{})[k]
}
case sugar.Tuple:
tuple = item[name].(sugar.Tuple)
}
return tuple
}
// Returns the item value as an array.
func (item Item) GetList(name string) sugar.List {
list := sugar.List{}
switch item[name].(type) {
case []interface{}:
list = make(sugar.List, len(item[name].([]interface{})))
for k, _ := range item[name].([]interface{}) {
list[k] = item[name].([]interface{})[k]
}
}
return list
}
// Returns the item value as an integer.
func (item Item) GetInt(name string) int64 {
i, _ := strconv.ParseInt(fmt.Sprintf("%v", item[name]), 10, 64)
return i
}
// Returns the item value as a floating point number.
func (item Item) GetFloat(name string) float64 {
f, _ := strconv.ParseFloat(fmt.Sprintf("%v", item[name]), 64)
return f
}
// Returns the item value as a boolean.
func (item Item) GetBool(name string) bool {
if item[name] == nil {
return false
}
switch item[name].(type) {
default:
b := strings.ToLower(fmt.Sprintf("%v", item[name]))
if b == "" || b == "0" || b == "false" {
return false
}
}
return true
}
|
[
7
] |
// Code generated by mockery v2.3.0. DO NOT EDIT.
package mocks
import (
context "context"
pipeline "github.com/vordev/VOR/core/services/pipeline"
mock "github.com/stretchr/testify/mock"
)
// Task is an autogenerated mock type for the Task type
type Task struct {
mock.Mock
}
// DotID provides a mock function with given fields:
func (_m *Task) DotID() string {
ret := _m.Called()
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// OutputIndex provides a mock function with given fields:
func (_m *Task) OutputIndex() int32 {
ret := _m.Called()
var r0 int32
if rf, ok := ret.Get(0).(func() int32); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(int32)
}
return r0
}
// OutputTask provides a mock function with given fields:
func (_m *Task) OutputTask() pipeline.Task {
ret := _m.Called()
var r0 pipeline.Task
if rf, ok := ret.Get(0).(func() pipeline.Task); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(pipeline.Task)
}
}
return r0
}
// Run provides a mock function with given fields: ctx, taskRun, inputs
func (_m *Task) Run(ctx context.Context, taskRun pipeline.TaskRun, inputs []pipeline.Result) pipeline.Result {
ret := _m.Called(ctx, taskRun, inputs)
var r0 pipeline.Result
if rf, ok := ret.Get(0).(func(context.Context, pipeline.TaskRun, []pipeline.Result) pipeline.Result); ok {
r0 = rf(ctx, taskRun, inputs)
} else {
r0 = ret.Get(0).(pipeline.Result)
}
return r0
}
// SetOutputTask provides a mock function with given fields: task
func (_m *Task) SetOutputTask(task pipeline.Task) {
_m.Called(task)
}
// Type provides a mock function with given fields:
func (_m *Task) Type() pipeline.TaskType {
ret := _m.Called()
var r0 pipeline.TaskType
if rf, ok := ret.Get(0).(func() pipeline.TaskType); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(pipeline.TaskType)
}
return r0
}
|
[
4
] |
// Code generated by smithy-go-codegen DO NOT EDIT.
package transcribestreaming
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go-v2/service/transcribestreaming/types"
smithy "github.com/aws/smithy-go"
"github.com/aws/smithy-go/middleware"
)
type validateOpStartCallAnalyticsStreamTranscription struct {
}
func (*validateOpStartCallAnalyticsStreamTranscription) ID() string {
return "OperationInputValidation"
}
func (m *validateOpStartCallAnalyticsStreamTranscription) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*StartCallAnalyticsStreamTranscriptionInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpStartCallAnalyticsStreamTranscriptionInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpStartMedicalStreamTranscription struct {
}
func (*validateOpStartMedicalStreamTranscription) ID() string {
return "OperationInputValidation"
}
func (m *validateOpStartMedicalStreamTranscription) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*StartMedicalStreamTranscriptionInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpStartMedicalStreamTranscriptionInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpStartStreamTranscription struct {
}
func (*validateOpStartStreamTranscription) ID() string {
return "OperationInputValidation"
}
func (m *validateOpStartStreamTranscription) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*StartStreamTranscriptionInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpStartStreamTranscriptionInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
func addOpStartCallAnalyticsStreamTranscriptionValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpStartCallAnalyticsStreamTranscription{}, middleware.After)
}
func addOpStartMedicalStreamTranscriptionValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpStartMedicalStreamTranscription{}, middleware.After)
}
func addOpStartStreamTranscriptionValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpStartStreamTranscription{}, middleware.After)
}
func validateAudioStream(v types.AudioStream) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "AudioStream"}
switch uv := v.(type) {
case *types.AudioStreamMemberConfigurationEvent:
if err := validateConfigurationEvent(&uv.Value); err != nil {
invalidParams.AddNested("[ConfigurationEvent]", err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateChannelDefinition(v *types.ChannelDefinition) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "ChannelDefinition"}
if len(v.ParticipantRole) == 0 {
invalidParams.Add(smithy.NewErrParamRequired("ParticipantRole"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateChannelDefinitions(v []types.ChannelDefinition) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "ChannelDefinitions"}
for i := range v {
if err := validateChannelDefinition(&v[i]); err != nil {
invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateConfigurationEvent(v *types.ConfigurationEvent) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "ConfigurationEvent"}
if v.ChannelDefinitions != nil {
if err := validateChannelDefinitions(v.ChannelDefinitions); err != nil {
invalidParams.AddNested("ChannelDefinitions", err.(smithy.InvalidParamsError))
}
}
if v.PostCallAnalyticsSettings != nil {
if err := validatePostCallAnalyticsSettings(v.PostCallAnalyticsSettings); err != nil {
invalidParams.AddNested("PostCallAnalyticsSettings", err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validatePostCallAnalyticsSettings(v *types.PostCallAnalyticsSettings) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "PostCallAnalyticsSettings"}
if v.OutputLocation == nil {
invalidParams.Add(smithy.NewErrParamRequired("OutputLocation"))
}
if v.DataAccessRoleArn == nil {
invalidParams.Add(smithy.NewErrParamRequired("DataAccessRoleArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpStartCallAnalyticsStreamTranscriptionInput(v *StartCallAnalyticsStreamTranscriptionInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "StartCallAnalyticsStreamTranscriptionInput"}
if len(v.LanguageCode) == 0 {
invalidParams.Add(smithy.NewErrParamRequired("LanguageCode"))
}
if v.MediaSampleRateHertz == nil {
invalidParams.Add(smithy.NewErrParamRequired("MediaSampleRateHertz"))
}
if len(v.MediaEncoding) == 0 {
invalidParams.Add(smithy.NewErrParamRequired("MediaEncoding"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpStartMedicalStreamTranscriptionInput(v *StartMedicalStreamTranscriptionInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "StartMedicalStreamTranscriptionInput"}
if len(v.LanguageCode) == 0 {
invalidParams.Add(smithy.NewErrParamRequired("LanguageCode"))
}
if v.MediaSampleRateHertz == nil {
invalidParams.Add(smithy.NewErrParamRequired("MediaSampleRateHertz"))
}
if len(v.MediaEncoding) == 0 {
invalidParams.Add(smithy.NewErrParamRequired("MediaEncoding"))
}
if len(v.Specialty) == 0 {
invalidParams.Add(smithy.NewErrParamRequired("Specialty"))
}
if len(v.Type) == 0 {
invalidParams.Add(smithy.NewErrParamRequired("Type"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpStartStreamTranscriptionInput(v *StartStreamTranscriptionInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "StartStreamTranscriptionInput"}
if v.MediaSampleRateHertz == nil {
invalidParams.Add(smithy.NewErrParamRequired("MediaSampleRateHertz"))
}
if len(v.MediaEncoding) == 0 {
invalidParams.Add(smithy.NewErrParamRequired("MediaEncoding"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
|
[
7
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.