code
stringlengths 67
15.9k
| labels
listlengths 1
4
|
---|---|
/*
* // Copyright 2020 Insolar Network Ltd.
* // All rights reserved.
* // This material is licensed under the Insolar License version 1.0,
* // available at https://github.com/insolar/assured-ledger/blob/master/LICENSE.md.
*/
package loaderbot
import (
"bytes"
"encoding/gob"
"log"
"os"
"os/signal"
"path/filepath"
"runtime"
"syscall"
)
var (
sigs = make(chan os.Signal, 1)
)
func (r *Runner) handleShutdownSignal() {
r.wg.Add(1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
go func() {
defer r.wg.Done()
select {
case <-r.TimeoutCtx.Done():
return
case <-sigs:
r.CancelFunc()
r.L.Infof("exit signal received, exiting")
if r.Cfg.GoroutinesDump {
buf := make([]byte, 1<<20)
stacklen := runtime.Stack(buf, true)
r.L.Infof("=== received SIGTERM ===\n*** goroutine dump...\n%s\n*** End\n", buf[:stacklen])
}
os.Exit(1)
}
}()
}
// CreateFileOrAppend creates file if not exists or opens in append mode, used for metrics between tests
func CreateFileOrAppend(fname string) *os.File {
var file *os.File
fpath, _ := filepath.Abs(fname)
_, err := os.Stat(fpath)
if err != nil {
file, err = os.Create(fname)
} else {
file, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
}
if err != nil {
log.Fatal(err)
}
return file
}
// CreateFileOrReplace creates new file every time, used for files with static name
// content of which must not contain data from different tests
func CreateFileOrReplace(fname string) *os.File {
fpath, _ := filepath.Abs(fname)
_ = os.Remove(fpath)
file, err := os.Create(fpath)
if err != nil {
log.Fatal(err)
}
return file
}
func MaxRPS(array []float64) float64 {
if len(array) == 0 {
return 1
}
var max = array[0]
for _, value := range array {
if max < value {
max = value
}
}
return max
}
// for ease of use cfg now is just bytes, create pb types later
func MarshalConfigGob(cfg interface{}) []byte {
var b bytes.Buffer
enc := gob.NewEncoder(&b)
if err := enc.Encode(cfg); err != nil {
log.Fatal(err)
}
return b.Bytes()
}
func UnmarshalConfigGob(d []byte) RunnerConfig {
b := bytes.NewBuffer(d)
dec := gob.NewDecoder(b)
var cfg RunnerConfig
if err := dec.Decode(&cfg); err != nil {
log.Fatal(err)
}
return cfg
}
|
[
1
] |
package paypal
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"time"
)
const (
// APIBaseSandBox points to the sandbox (for testing) version of the API
APIBaseSandBox = "https://api.sandbox.paypal.com/v1"
// APIBaseLive points to the live version of the API
APIBaseLive = "https://api.paypal.com/v1"
)
type (
// Client represents a Paypal REST API Client
Client struct {
client *http.Client
ClientID string
Secret string
APIBase string
Token *TokenResp
}
// ErrorResponse is used when a response contains errors
// maps to error object
ErrorResponse struct {
// HTTP response that caused this error
Response *http.Response `json:"-"`
Name string `json:"name"`
DebugID string `json:"debug_id"`
Message string `json:"message"`
InformationLink string `json:"information_link"`
Details []ErrorDetail `json:"details"`
}
// ErrorDetails map to error_details object
ErrorDetail struct {
Field string `json:"field"`
Issue string `json:"issue"`
}
// TokenResp maps to the API response for the /oauth2/token endpoint
TokenResp struct {
Scope string `json:"scope"` // "https://api.paypal.com/v1/payments/.* https://api.paypal.com/v1/vault/credit-card https://api.paypal.com/v1/vault/credit-card/.*",
Token string `json:"access_token"` // "EEwJ6tF9x5WCIZDYzyZGaz6Khbw7raYRIBV_WxVvgmsG",
Type string `json:"token_type"` // "Bearer",
AppID string `json:"app_id"` // "APP-6XR95014BA15863X",
ExpiresIn int `json:"expires_in"` // 28800
ExpiresAt time.Time `json:"expires_at"`
}
)
func (r *ErrorResponse) Error() string {
return fmt.Sprintf("%v %v: %d %v\nDetails: %v",
r.Response.Request.Method, r.Response.Request.URL, r.Response.StatusCode, r.Message, r.Details)
}
// NewClient returns a new Client struct
func NewClient(clientID, secret, APIBase string) *Client {
return &Client{
&http.Client{},
clientID,
secret,
APIBase,
nil,
}
}
// NewRequest constructs a request. If payload is not empty, it will be
// marshalled into JSON
func NewRequest(method, url string, payload interface{}) (*http.Request, error) {
var buf io.Reader
if payload != nil {
var b []byte
b, err := json.Marshal(&payload)
if err != nil {
return nil, err
}
buf = bytes.NewBuffer(b)
}
return http.NewRequest(method, url, buf)
}
// GetAcessToken request a new access token from Paypal
func (c *Client) GetAccessToken() (*TokenResp, error) {
buf := bytes.NewBuffer([]byte("grant_type=client_credentials"))
req, err := http.NewRequest("POST", fmt.Sprintf("%s%s", c.APIBase, "/oauth2/token"), buf)
if err != nil {
return nil, err
}
req.SetBasicAuth(c.ClientID, c.Secret)
req.Header.Set("Content-type", "application/x-www-form-urlencoded")
t := TokenResp{}
err = c.Send(req, &t)
if err == nil {
t.ExpiresAt = time.Now().Add(time.Duration(t.ExpiresIn/2) * time.Second)
}
return &t, err
}
// Send makes a request to the API, the response body will be
// unmarshaled into v, or if v is an io.Writer, the response will
// be written to it without decoding
func (c *Client) Send(req *http.Request, v interface{}) error {
// Set default headers
req.Header.Set("Accept", "application/json")
req.Header.Set("Accept-Language", "en_US")
// Default values for headers
if req.Header.Get("Content-type") == "" {
req.Header.Set("Content-type", "application/json")
}
log.Println(req.Method, ": ", req.URL)
resp, err := c.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if c := resp.StatusCode; c < 200 || c > 299 {
errResp := &ErrorResponse{Response: resp}
data, err := ioutil.ReadAll(resp.Body)
if err == nil && len(data) > 0 {
json.Unmarshal(data, errResp)
}
return errResp
}
if v != nil {
if w, ok := v.(io.Writer); ok {
io.Copy(w, resp.Body)
} else {
err = json.NewDecoder(resp.Body).Decode(v)
if err != nil {
return err
}
}
}
return nil
}
// SendWithAuth makes a request to the API and apply OAuth2 header automatically.
// If the access token soon to be expired, it will try to get a new one before
// making the main request
func (c *Client) SendWithAuth(req *http.Request, v interface{}) error {
if (c.Token == nil) || (c.Token.ExpiresAt.Before(time.Now())) {
resp, err := c.GetAccessToken()
if err != nil {
return err
}
c.Token = resp
}
req.Header.Set("Authorization", "Bearer "+c.Token.Token)
return c.Send(req, v)
}
|
[
2
] |
package emails
import (
"context"
"sync"
"gitlab.com/distributed_lab/notificator-server/client"
"gitlab.com/distributed_lab/running"
)
func (t Task) toPayload() notificator.EmailRequestPayload {
return notificator.EmailRequestPayload {
Destination: t.Destination,
Subject: t.Subject,
Message: t.Message,
}
}
type TaskSyncSet struct {
mu sync.Mutex
data map[Task]struct{}
}
func newSyncSet() TaskSyncSet {
return TaskSyncSet{
mu: sync.Mutex{},
data: make(map[Task]struct{}),
}
}
func (s *TaskSyncSet) put(ctx context.Context, new Task) {
put := func() <-chan struct{} {
c := make(chan struct{})
go func() {
s.mu.Lock()
defer s.mu.Unlock()
s.data[new] = struct{}{}
close(c)
}()
return c
}
select {
case <-ctx.Done():
return
case <-put():
return
}
}
func (s *TaskSyncSet) delete(values []Task) {
s.mu.Lock()
defer s.mu.Unlock()
for _, value := range values {
delete(s.data, value)
}
}
func (s *TaskSyncSet) length() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.data)
}
func (s *TaskSyncSet) rangeThrough(ctx context.Context, f func(task Task)) {
// TODO Listen to ctx along with mutex
s.mu.Lock()
defer s.mu.Unlock()
for key := range s.data {
if running.IsCancelled(ctx) {
return
}
f(key)
}
}
|
[
1
] |
package servergrpc
import (
"flag"
"fmt"
"math/rand"
"net"
"os"
"sync"
"time"
context "golang.org/x/net/context"
pb "github.com/221bytes/osiris/fileguide"
"github.com/221bytes/osiris/fileutils"
"google.golang.org/grpc"
"google.golang.org/grpc/grpclog"
)
var (
port = flag.Int64("port", 10000, "The server port")
)
type fileGuideServer struct {
fileSummariesc chan *pb.FileSummary
}
var progressMap = make(map[string]int64)
func (s *fileGuideServer) SaveFile(stream pb.FileGuide_SaveFileServer) error {
fileSummary, err := fileutils.SaveFileFromStream(stream)
if err != nil {
grpclog.Fatalf("%v.Server SaveFile= %v", stream, err)
}
grpclog.Printf("file download in %v seconds", fileSummary.ElapsedTime)
s.fileSummariesc <- fileSummary
return stream.SendAndClose(fileSummary)
}
func (s *fileGuideServer) GetFile(filename *pb.Filename, stream pb.FileGuide_GetFileServer) error {
file, err := os.Open(filename.Name)
if err != nil {
grpclog.Fatalf("%v.runSaveFileRoute() got error %v, want %v", stream, err, nil)
}
if err := fileutils.SendFileToStream(file, stream); err != nil {
grpclog.Fatalf("%v.RecordRoute(_) = _, %v", stream, err)
}
return nil
}
func random(min, max int) int64 {
rand.Seed(time.Now().Unix())
return int64(rand.Intn(max-min) + min)
}
func (s *fileGuideServer) InitConnection(ctx context.Context, ini *pb.InitConnectionData) (*pb.InitConnectionData, error) {
myrand := random(1024, 65535)
ports := make([]int64, ini.NBPort)
ini.Ports = ports
for i := int64(0); i < ini.NBPort; i++ {
go runServer(myrand + i)
ports[i] = myrand + i
}
return ini, nil
}
func newServer() *fileGuideServer {
s := new(fileGuideServer)
return s
}
func runServer(port int64) {
var mutex = &sync.Mutex{}
grpclog.Printf("Starting ServerGRPC on %v\n", port)
fileSummariesc := make(chan *pb.FileSummary)
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
if err != nil {
grpclog.Fatalf("failed to listen: %v", err)
}
var opts []grpc.ServerOption
grpcServer := grpc.NewServer(opts...)
fileGuideSvr := newServer()
fileGuideSvr.fileSummariesc = fileSummariesc
pb.RegisterFileGuideServer(grpcServer, fileGuideSvr)
go grpcServer.Serve(lis)
for {
select {
case fileSummary := <-fileSummariesc:
fmt.Println("fileSummary")
fmt.Println(fileSummary)
mutex.Lock()
progressMap[fileSummary.Name]++
if progressMap[fileSummary.Name] == fileSummary.TotalBlock {
fmt.Println("all blocks has been downloaded")
fileutils.MergeFile(fileSummary.Name)
}
mutex.Unlock()
}
}
}
func StartServer() {
flag.Parse()
runServer(*port)
}
|
[
1
] |
package sign
import (
"context"
"crypto/md5"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
)
func Tosign(cxt context.Context, name, BDUSS, tbs string) error {
body := "kw=" + name + "&tbs=" + tbs + "&sign=" + enCodeMd5("kw="+name+"tbs="+tbs+"tiebaclient!!!")
reqs, err := http.NewRequestWithContext(cxt, "POST", SIGNUEL, strings.NewReader(body))
if err != nil {
return fmt.Errorf("Tosign: %w", err)
}
reqs.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8")
reqs.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36")
reqs.Header.Set("Cookie", "BDUSS="+BDUSS)
rep, err := client.Do(reqs)
if rep != nil {
defer rep.Body.Close()
}
if err != nil {
return fmt.Errorf("Tosign: %w", err)
}
if rep.StatusCode != 200 {
return Not200{rep.Status}
}
b, err := ioutil.ReadAll(rep.Body)
if err != nil {
return fmt.Errorf("Tosign: %w", err)
}
var e errcode
err = json.Unmarshal(b, &e)
if err != nil {
return fmt.Errorf("Tosign: %w", err)
}
if e.ErrCode != "0" {
return SignErr
}
return nil
}
var SignErr = errors.New("签到失败")
func enCodeMd5(msg string) string {
h := md5.Sum([]byte(msg))
return hex.EncodeToString(h[:])
}
type errcode struct {
ErrCode string `json:"error_code"`
}
|
[
2
] |
package transformer
import (
"sort"
"strings"
"github.com/bblfsh/sdk/v3/uast"
"github.com/bblfsh/sdk/v3/uast/nodes"
)
const optimizeCheck = true
// Transformer is an interface for transformations that operates on AST trees.
// An implementation is responsible for walking the tree and executing transformation on each AST node.
type Transformer interface {
Do(root nodes.Node) (nodes.Node, error)
}
// CodeTransformer is a special case of Transformer that needs an original source code to operate.
type CodeTransformer interface {
OnCode(code string) Transformer
}
// Sel is an operation that can verify if a specific node matches a set of constraints or not.
type Sel interface {
// Kinds returns a mask of all nodes kinds that this operation might match.
Kinds() nodes.Kind
// Check will verify constraints for a single node and returns true if an objects matches them.
// It can also populate the State with variables that can be used later to Construct a different object from the State.
Check(st *State, n nodes.Node) (bool, error)
}
// Mod is an operation that can reconstruct an AST node from a given State.
type Mod interface {
// Construct will use variables stored in State to reconstruct an AST node.
// Node that is provided as an argument may be used as a base for reconstruction.
Construct(st *State, n nodes.Node) (nodes.Node, error)
}
// Op is a generic AST transformation step that describes a shape of an AST tree.
// It can be used to either check the constraints for a specific node and populate state, or to reconstruct an AST shape
// from a the same state (probably produced by another Op).
type Op interface {
Sel
Mod
}
// Transformers appends all provided transformer slices into single one.
func Transformers(arr ...[]Transformer) []Transformer {
var out []Transformer
for _, a := range arr {
out = append(out, a...)
}
return out
}
var _ Transformer = (TransformFunc)(nil)
// TransformFunc is a function that will be applied to each AST node to transform the tree.
// It returns a new AST and true if tree was changed, or an old node and false if no modifications were done.
// The the tree will be traversed automatically and the callback will be called for each node.
type TransformFunc func(n nodes.Node) (nodes.Node, bool, error)
// Do runs a transformation function for each AST node.
func (f TransformFunc) Do(n nodes.Node) (nodes.Node, error) {
var last error
nn, ok := nodes.Apply(n, func(n nodes.Node) (nodes.Node, bool) {
nn, ok, err := f(n)
if err != nil {
last = err
return n, false
} else if !ok {
return n, false
}
return nn, ok
})
if ok {
return nn, last
}
return n, last
}
var _ Transformer = (TransformObjFunc)(nil)
// TransformObjFunc is like TransformFunc, but only matches Object nodes.
type TransformObjFunc func(n nodes.Object) (nodes.Object, bool, error)
// Func converts this TransformObjFunc to a regular TransformFunc by skipping all non-object nodes.
func (f TransformObjFunc) Func() TransformFunc {
return TransformFunc(func(n nodes.Node) (nodes.Node, bool, error) {
obj, ok := n.(nodes.Object)
if !ok {
return n, false, nil
}
nn, ok, err := f(obj)
if err != nil {
return n, false, err
} else if !ok {
return n, false, nil
}
return nn, ok, nil
})
}
// Do runs a transformation function for each AST node.
func (f TransformObjFunc) Do(n nodes.Node) (nodes.Node, error) {
return f.Func().Do(n)
}
// Map creates a two-way mapping between two transform operations.
// The first operation will be used to check constraints for each node and store state, while the second one will use
// the state to construct a new tree.
func Map(src, dst Op) Mapping {
return mapping{src: src, dst: dst}
}
func MapObj(src, dst ObjectOp) ObjMapping {
return objMapping{src: src, dst: dst}
}
func MapPart(vr string, m ObjMapping) ObjMapping {
src, dst := m.ObjMapping()
_, sok := src.Fields()
_, dok := dst.Fields()
if !sok && !dok {
// both contain partial op, ignore current label
return MapObj(src, dst)
} else if sok != dok {
panic("inconsistent use of Part")
}
return MapObj(Part(vr, src), Part(vr, dst))
}
func Identity(op Op) Mapping {
return Map(op, op)
}
type Mapping interface {
Mapping() (src, dst Op)
}
type ObjMapping interface {
Mapping
ObjMapping() (src, dst ObjectOp)
}
type MappingOp interface {
Op
Mapping
}
type mapping struct {
src, dst Op
}
func (m mapping) Mapping() (src, dst Op) {
return m.src, m.dst
}
type objMapping struct {
src, dst ObjectOp
}
func (m objMapping) Mapping() (src, dst Op) {
return m.src, m.dst
}
func (m objMapping) ObjMapping() (src, dst ObjectOp) {
return m.src, m.dst
}
// Reverse changes a transformation direction, allowing to construct the source tree.
func Reverse(m Mapping) Mapping {
src, dst := m.Mapping()
return Map(dst, src)
}
func (m mapping) apply(root nodes.Node) (nodes.Node, error) {
src, dst := m.src, m.dst
var errs []error
_, objOp := src.(ObjectOp)
_, arrOp := src.(ArrayOp)
st := NewState()
nn, ok := nodes.Apply(root, func(n nodes.Node) (nodes.Node, bool) {
if n != nil {
if objOp {
if _, ok := n.(nodes.Object); !ok {
return n, false
}
} else if arrOp {
if _, ok := n.(nodes.Array); !ok {
return n, false
}
}
}
st.Reset()
if ok, err := src.Check(st, n); err != nil {
errs = append(errs, errCheck.Wrap(err))
return n, false
} else if !ok {
return n, false
}
nn, err := dst.Construct(st, nil)
if err != nil {
errs = append(errs, errConstruct.Wrap(err))
return n, false
}
return nn, true
})
err := NewMultiError(errs...)
if ok {
return nn, err
}
return root, err
}
// Mappings takes multiple mappings and optimizes the process of applying them as a single transformation.
func Mappings(maps ...Mapping) Transformer {
if len(maps) == 0 {
return mappings{}
}
mp := mappings{
all: maps,
}
if optimizeCheck {
mp.byKind = make(map[nodes.Kind][]Mapping)
mp.index()
}
return mp
}
type mappings struct {
all []Mapping
// indexed mappings
byKind map[nodes.Kind][]Mapping // mappings applied to specific node kind
typedObj map[string][]Mapping // mappings for objects with specific type
typedAny []Mapping // mappings for any typed object (operations that does not mention the type)
}
func (m *mappings) index() {
precompile := func(m Mapping) Mapping {
return Map(m.Mapping())
}
type ordered struct {
ind int
mp Mapping
}
var typedAny []ordered
typed := make(map[string][]ordered)
for i, mp := range m.all {
// pre-compile object operations (sort fields for unordered ops, etc)
mp = precompile(mp)
oop, _ := mp.Mapping()
if chk, ok := oop.(*opCheck); ok {
oop = chk.op
}
// switch by operation type and make a separate list
// next time we will see a node with matching type, we will apply only specific ops
for _, k := range oop.Kinds().Split() {
m.byKind[k] = append(m.byKind[k], mp)
}
switch op := oop.(type) {
case ObjectOp:
specific := false
fields, _ := op.Fields()
if f, ok := fields.Get(uast.KeyType); ok && !f.Optional {
if f.Fixed != nil {
typ := *f.Fixed
if typ, ok := typ.(nodes.String); ok {
s := string(typ)
typed[s] = append(typed[s], ordered{ind: i, mp: mp})
specific = true
}
}
}
if !specific {
typedAny = append(typedAny, ordered{ind: i, mp: mp})
}
default:
// the type is unknown, thus we should try to apply it to objects and array as well
typedAny = append(typedAny, ordered{ind: i, mp: mp})
}
}
m.typedObj = make(map[string][]Mapping, len(typed))
for typ, ord := range typed {
ord = append(ord, typedAny...)
sort.Slice(ord, func(i, j int) bool {
return ord[i].ind < ord[j].ind
})
maps := make([]Mapping, 0, len(ord))
for _, o := range ord {
maps = append(maps, o.mp)
}
m.typedObj[typ] = maps
}
}
func (m mappings) Do(root nodes.Node) (nodes.Node, error) {
var errs []error
st := NewState()
nn, ok := nodes.Apply(root, func(old nodes.Node) (nodes.Node, bool) {
var maps []Mapping
if !optimizeCheck {
maps = m.all
} else {
maps = m.byKind[nodes.KindOf(old)]
switch old := old.(type) {
case nodes.Object:
if typ, ok := old[uast.KeyType].(nodes.String); ok {
if mp, ok := m.typedObj[string(typ)]; ok {
maps = mp
}
}
}
}
n := old
applied := false
for _, mp := range maps {
src, dst := mp.Mapping()
st.Reset()
if ok, err := src.Check(st, n); err != nil {
errs = append(errs, errCheck.Wrap(err))
continue
} else if !ok {
continue
}
applied = true
nn, err := dst.Construct(st, nil)
if err != nil {
errs = append(errs, errConstruct.Wrap(err))
continue
}
n = nn
}
if !applied {
return old, false
}
return n, true
})
err := NewMultiError(errs...)
if err == nil {
err = st.Validate()
}
if ok {
return nn, err
}
return root, err
}
// NewState creates a new state for Ops to work on.
// It stores variables, flags and anything that necessary
// for transformation steps to persist data.
func NewState() *State {
return &State{}
}
// Vars is a set of variables with their values.
type Vars map[string]nodes.Node
// State stores all variables (placeholder values, flags and wny other state) between Check and Construct steps.
type State struct {
vars Vars
unused map[string]struct{}
states map[string][]*State
}
// Reset clears the state and allows to reuse an object.
func (st *State) Reset() {
st.vars = nil
st.unused = nil
st.states = nil
}
// Validate should be called after a successful transformation to check if there are any errors related to unused state.
func (st *State) Validate() error {
if len(st.unused) == 0 {
return nil
}
names := make([]string, 0, len(st.unused))
for name := range st.unused {
names = append(names, name)
}
sort.Strings(names)
return ErrVariableUnused.New(names)
}
// Clone will return a copy of the State. This can be used to apply Check and throw away any variables produced by it.
// To merge a cloned state back use ApplyFrom on a parent state.
func (st *State) Clone() *State {
st2 := NewState()
if len(st.vars) != 0 {
st2.vars = make(Vars)
st2.unused = make(map[string]struct{})
}
for k, v := range st.vars {
st2.vars[k] = v
}
for k := range st.unused {
st2.unused[k] = struct{}{}
}
if len(st.states) != 0 {
st2.states = make(map[string][]*State)
}
for k, v := range st.states {
st2.states[k] = v
}
return st2
}
// ApplyFrom merges a provided state into this state object.
func (st *State) ApplyFrom(st2 *State) {
if len(st2.vars) != 0 && st.vars == nil {
st.vars = make(Vars)
st.unused = make(map[string]struct{})
}
for k, v := range st2.vars {
if _, ok := st.vars[k]; !ok {
st.vars[k] = v
}
}
for k := range st2.unused {
st.unused[k] = struct{}{}
}
if len(st2.states) != 0 && st.states == nil {
st.states = make(map[string][]*State)
}
for k, v := range st2.states {
if _, ok := st.states[k]; !ok {
st.states[k] = v
}
}
}
// GetVar looks up a named variable.
func (st *State) GetVar(name string) (nodes.Node, bool) {
n, ok := st.vars[name]
if ok {
delete(st.unused, name)
}
return n, ok
}
// MustGetVar looks up a named variable and returns ErrVariableNotDefined in case it does not exists.
func (st *State) MustGetVar(name string) (nodes.Node, error) {
n, ok := st.GetVar(name)
if !ok {
return nil, ErrVariableNotDefined.New(name)
}
return n, nil
}
// VarsPtrs is a set of variable pointers.
type VarsPtrs map[string]nodes.NodePtr
// MustGetVars is like MustGetVar but fetches multiple variables in one operation.
func (st *State) MustGetVars(vars VarsPtrs) error {
for name, dst := range vars {
n, ok := st.GetVar(name)
if !ok {
return ErrVariableNotDefined.New(name)
}
if err := dst.SetNode(n); err != nil {
return err
}
}
return nil
}
// SetVar sets a named variable. It will return ErrVariableRedeclared if a variable with the same name is already set.
// It will ignore the operation if variable already exists and has the same value (nodes.Value).
func (st *State) SetVar(name string, val nodes.Node) error {
cur, ok := st.vars[name]
if !ok {
// not declared
if st.vars == nil {
st.vars = make(Vars)
st.unused = make(map[string]struct{})
}
st.vars[name] = val
st.unused[name] = struct{}{}
return nil
}
if nodes.Equal(cur, val) {
// already declared, and the same value is already in the map
return nil
}
return ErrVariableRedeclared.New(name, cur, val)
}
// SetVars is like SetVar but sets multiple variables in one operation.
func (st *State) SetVars(vars Vars) error {
for k, v := range vars {
if err := st.SetVar(k, v); err != nil {
return err
}
}
return nil
}
// GetStateVar returns a stored sub-state from a named variable.
func (st *State) GetStateVar(name string) ([]*State, bool) {
n, ok := st.states[name]
return n, ok
}
// SetStateVar sets a sub-state variable. It returns ErrVariableRedeclared if the variable with this name already exists.
func (st *State) SetStateVar(name string, sub []*State) error {
cur, ok := st.states[name]
if ok {
return ErrVariableRedeclared.New(name, cur, sub)
}
if st.states == nil {
st.states = make(map[string][]*State)
}
st.states[name] = sub
return nil
}
// DefaultNamespace is a transform that sets a specified namespace for predicates and values that doesn't have a namespace.
func DefaultNamespace(ns string) Transformer {
return TransformFunc(func(n nodes.Node) (nodes.Node, bool, error) {
obj, ok := n.(nodes.Object)
if !ok {
return n, false, nil
}
tp, ok := obj[uast.KeyType].(nodes.String)
if !ok {
return n, false, nil
}
if strings.Contains(string(tp), ":") {
return n, false, nil
}
obj = obj.CloneObject()
obj[uast.KeyType] = nodes.String(ns + ":" + string(tp))
return obj, true, nil
})
}
|
[
7
] |
package models
import (
"context"
"crypto/md5"
"fmt"
"io"
"satellity/internal/durable"
"satellity/internal/session"
"strings"
"time"
"github.com/gofrs/uuid"
"github.com/jackc/pgx/v4"
)
// SolidStatisticID is used to generate a solid id from name
const SolidStatisticID = "540cbd3c-f4eb-479c-bcd8-b5629af57267"
const (
StatisticTypeUsers = "users"
StatisticTypeTopics = "topics"
StatisticTypeComments = "comments"
)
// Statistic is the body of statistic
type Statistic struct {
StatisticID string
Name string
Count int64
CreatedAt time.Time
UpdatedAt time.Time
}
var statisticColumns = []string{"statistic_id", "name", "count", "created_at", "updated_at"}
func (s *Statistic) values() []interface{} {
return []interface{}{s.StatisticID, s.Name, s.Count, s.CreatedAt, s.UpdatedAt}
}
func statisticFromRows(row durable.Row) (*Statistic, error) {
var s Statistic
err := row.Scan(&s.StatisticID, &s.Name, &s.Count, &s.CreatedAt, &s.UpdatedAt)
return &s, err
}
func UpsertStatistic(ctx context.Context, name string) (*Statistic, error) {
id, err := generateStatisticID(SolidStatisticID, name)
if err != nil {
return nil, session.ServerError(ctx, err)
}
switch name {
case StatisticTypeUsers,
StatisticTypeTopics,
StatisticTypeComments:
default:
return nil, session.BadDataError(ctx)
}
var statistic *Statistic
err = session.Database(ctx).RunInTransaction(ctx, func(tx pgx.Tx) error {
s, err := findStatistic(ctx, tx, id)
if err != nil {
return err
}
var count int64
switch name {
case StatisticTypeUsers:
count, err = usersCount(ctx, tx)
case StatisticTypeTopics:
count, err = fetchTopicsCount(ctx, tx, "ALL")
case StatisticTypeComments:
count, err = fetchCommentsCount(ctx, tx, "ALL")
}
if err != nil {
return err
}
t := time.Now()
if s == nil {
s = &Statistic{
StatisticID: id,
Name: name,
CreatedAt: t,
}
}
s.Count = count
s.UpdatedAt = t
cols, params := durable.PrepareColumnsAndExpressions(statisticColumns, 0)
_, err = tx.Exec(ctx, fmt.Sprintf("INSERT INTO statistics(%s) VALUES (%s) ON CONFLICT (statistic_id) DO UPDATE SET (count,updated_at)=(EXCLUDED.count,EXCLUDED.updated_at)", cols, params), s.values()...)
statistic = s
return err
})
if err != nil {
return nil, session.TransactionError(ctx, err)
}
return statistic, nil
}
func findStatistic(ctx context.Context, tx pgx.Tx, id string) (*Statistic, error) {
if uuid.FromStringOrNil(id).String() != id {
return nil, nil
}
row := tx.QueryRow(ctx, fmt.Sprintf("SELECT %s FROM Statistics WHERE statistic_id=$1", strings.Join(statisticColumns, ",")), id)
s, err := statisticFromRows(row)
if err == pgx.ErrNoRows {
return nil, nil
}
return s, err
}
func generateStatisticID(ID, name string) (string, error) {
h := md5.New()
io.WriteString(h, ID)
io.WriteString(h, name)
sum := h.Sum(nil)
sum[6] = (sum[6] & 0x0f) | 0x30
sum[8] = (sum[8] & 0x3f) | 0x80
id, err := uuid.FromBytes(sum)
return id.String(), err
}
|
[
2
] |
package lru
import "github.com/negz/practice/lru/list"
type Cache struct {
l *list.List
kn map[string]*list.Node
nk map[*list.Node]string
m int
}
func New(max int) *Cache {
return &Cache{
&list.List{},
make(map[string]*list.Node),
make(map[*list.Node]string),
max,
}
}
func (c *Cache) Get(k string) (uint32, bool) {
n := c.kn[k]
if n == nil {
return 0, false
}
c.l.MoveToTail(n)
return n.Value, true
}
func (c *Cache) Insert(k string, v uint32) {
n := c.kn[k]
if n != nil {
c.l.MoveToTail(n)
c.l.Tail.Value = v
return
}
n = c.l.Append(v)
c.kn[k] = n
c.nk[n] = k
if len(c.kn) > c.m {
h := c.l.TrimHead()
delete(c.kn, c.nk[h])
delete(c.nk, h)
}
}
func (c *Cache) ToSlice() []uint32 {
return c.l.ToSlice()
}
|
[
1
] |
package fuzzyargs
import (
"testing"
"github.com/skatteetaten/ao/pkg/auroraconfig"
"github.com/skatteetaten/ao/pkg/configuration"
"github.com/skatteetaten/ao/pkg/serverapi"
)
func TestGetOneFile(t *testing.T) {
fuzzyArgs, err := initiateFuzzyArgs()
const argument = "utv0/afs-import.json"
const expected = "utv0/afs-import.json"
err = fuzzyArgs.PopulateFuzzyFile(getArgs(argument))
if err != nil {
t.Errorf("Error in PopulateFuzzyFile(%v): %v", argument, err.Error())
}
filename, err := fuzzyArgs.GetFile()
if err != nil {
t.Errorf("Error in GetFile(%v): %v", argument, err.Error())
} else {
if filename != expected {
t.Errorf("Eror in GetFile(%v): Expected %v, got %v", argument, expected, filename)
}
}
}
func TestGetOneFileArray(t *testing.T) {
fuzzyArgs, err := initiateFuzzyArgs()
const argument1 = "utv0"
const argument2 = "afs-import.json"
const expected = "utv0/afs-import.json"
err = fuzzyArgs.PopulateFuzzyFile(getArgs(argument1, argument2))
if err != nil {
t.Errorf("Error in PopulateFuzzyFile(%v %v): %v", argument1, argument2, err.Error())
} else {
filename, err := fuzzyArgs.GetFile()
if err != nil {
t.Errorf("Error in GetFile(%v): %v %v", argument1, argument2, err.Error())
} else {
if filename != expected {
t.Errorf("Eror in GetFile(%v %v): Expected %v, got %v", argument1, argument2, expected, filename)
}
}
}
}
func TestGetOneFuzzyFile(t *testing.T) {
fuzzyArgs, err := initiateFuzzyArgs()
const argument = "v0/afs"
const expected = "utv0/afs-import.json"
err = fuzzyArgs.PopulateFuzzyFile(getArgs(expected))
if err != nil {
t.Errorf("Error in PopulateFuzzyFile(%v): %v", expected, err.Error())
} else {
filename, err := fuzzyArgs.GetFile()
if err != nil {
t.Errorf("Error in GetFile(%v): %v", expected, err.Error())
} else {
if filename != expected {
t.Errorf("Eror in GetFile(%v): Expected %v, got %v", argument, expected, filename)
}
}
}
}
func TestGetNonUniqueEnvFuzzyFile(t *testing.T) {
fuzzyArgs, err := initiateFuzzyArgs()
const expected = "0/afs-import.json"
err = fuzzyArgs.PopulateFuzzyFile(getArgs(expected))
if err == nil {
t.Errorf("Error in PopulateFuzzyFile(%v): Expected duplicate error", err.Error())
}
}
func TestGetNonUniqueAppFuzzyFile(t *testing.T) {
fuzzyArgs, err := initiateFuzzyArgs()
const expected = "v0/im"
err = fuzzyArgs.PopulateFuzzyFile(getArgs(expected))
if err == nil {
t.Errorf("Error in PopulateFuzzyFile(%v): Expected duplicate error", expected)
}
}
func TestGetUniqueApp(t *testing.T) {
fuzzyArgs, err := initiateFuzzyArgs()
const argument = "bas-dev/console"
err = fuzzyArgs.PopulateFuzzyEnvAppList(getArgs(argument), false)
if err != nil {
t.Errorf("Error in PopulateFuzzyEnvAppList(%v): %v", argument, err.Error())
}
}
func TestGetOneFuzzyApp(t *testing.T) {
fuzzyArgs, err := initiateFuzzyArgs()
const argument = "bas/con"
const expectedApp = "console"
const expectedEnv = "bas-dev"
err = fuzzyArgs.PopulateFuzzyEnvAppList(getArgs(argument), false)
if err != nil {
t.Errorf("Error in PopulateFuzzyEnvAppList(%v): %v", argument, err.Error())
} else {
app, err := fuzzyArgs.GetApp()
if err != nil {
t.Errorf("Error in GetApp: %v", err.Error())
} else {
if app != expectedApp {
t.Errorf("Error in TestGetOneFuzzyApp, Expected app %v, got %v", expectedApp, app)
}
}
env, err := fuzzyArgs.GetEnv()
if err != nil {
t.Errorf("Error in GetEnv: %v", err.Error())
} else {
if env != expectedEnv {
t.Errorf("Error in TestGetOneFuzzyApp, Expected env %v, got %v", expectedEnv, env)
}
}
}
}
func TestGetFuzzyApp(t *testing.T) {
fuzzyArgs, err := initiateFuzzyArgs()
const argument = "con"
const expected = "console"
app, err := fuzzyArgs.GetFuzzyApp(argument)
if err != nil {
t.Errorf("Error in GetFuzzyApp: %v", err.Error())
} else {
if app != expected {
t.Errorf("Error in TestGetFuzzyApp: Expected %v, got %v", expected, app)
}
}
}
func TestGetFuzzyEnv(t *testing.T) {
fuzzyArgs, err := initiateFuzzyArgs()
const argument = "bas-d"
const expected = "bas-dev"
env, err := fuzzyArgs.GetFuzzyEnv(argument)
if err != nil {
t.Errorf("Error in GetFuzzyEnv: %v", err.Error())
} else {
if env != expected {
t.Errorf("Error in TestGetFuzzyEnv: Expected %v, got %v", expected, env)
}
}
}
func TestApp2File(t *testing.T) {
fuzzyArgs, err := initiateFuzzyArgs()
const argument = "s-dev/cons"
const expected = "bas-dev/console.json"
filename, err := fuzzyArgs.App2File(argument)
if err != nil {
t.Errorf("Error in TestApp2File: %v", err.Error())
} else {
if filename != expected {
t.Errorf("Error in TestApp2File: Expected %v, got %v", expected, filename)
}
}
}
func initiateFuzzyArgs() (fuzzyArgs *FuzzyArgs, err error) {
config := configuration.NewTestConfiguration()
request := auroraconfig.GetAuroraConfigRequest(config)
response, err := serverapi.CallApiWithRequest(request, config)
if err != nil {
return fuzzyArgs, err
}
auroraConfig, err := auroraconfig.Response2AuroraConfig(response)
if err != nil {
return fuzzyArgs, err
}
fuzzyArgs = new(FuzzyArgs)
fuzzyArgs.Init(&auroraConfig)
return fuzzyArgs, nil
}
func getArgs(argN ...string) (args []string) {
return argN
}
|
[
4
] |
package networkMend
import (
"structs"
"mainInstance"
"kvsAccess"
"strconv"
"net/http"
"net/url"
"strings"
"encoding/json"
)
// Sends Key Value Store to newly reconnected node in partition
// Also sends Causal Payload to check if KVS is up to date
// author: Alec
// update: first letter of function to upper case
// purpose: now it can be exported
func SendNetworkMend (Node structs.NodeInfo) {
log.Print("SendNetworkMend")
Ip := Node.Ip
Port := Node.Port
URL := "http://" + Ip + ":" + Port + "/networkMend"
form := url.Values{}
for key, val := range mainInstance.GetKVS().Store {
form.Add("Key", key)
form.Add("Val", val)
}
for _, val := range mainInstance.GetPayload() {
form.Add("Payload", string(val))
}
formJSON := form.Encode()
req, _ := http.NewRequest(http.MethodPut, URL, strings.NewReader(formJSON))
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
client := &http.Client{}
_, err := client.Do(req)
if err != nil {
panic(err)
}
}
// Retrieves new KVS from other node in partition
// Checks the Causal Payload to see if it is newer than current one
func HandleNetworkMend (w http.ResponseWriter, r *http.Request) {
r.ParseForm()
Payload := r.PostForm["Payload"]
newer := true
var newPayload []int
for ind, my_num := range mainInstance.GetPayload() {
new_num,_ := strconv.Atoi(Payload[ind])
if my_num > new_num {
newer = false
break
}
newPayload[ind] = new_num
}
if newer {
newKVS := kvsAccess.NewKVS()
keys := r.PostForm["Key"]
vals := r.PostForm["Val"]
for i, key := range keys {
newKVS.SetValue(key, vals[i])
}
mainInstance.SetKVS(newKVS)
mainInstance.SetPayload(newPayload)
}
respBody := structs.PartitionResp{"success"}
bodyBytes, _ := json.Marshal(respBody)
w.WriteHeader(200)
w.Write(bodyBytes)
}
|
[
2
] |
package database
import (
"go.mongodb.org/mongo-driver/bson/primitive"
"context"
"log"
"time"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo/options"
"github.com/amirkr/graphql-example/graph/model"
)
type DB struct {
client *mongo.Client
}
func Connect() *DB {
client, err := mongo.NewClient(options.Client().ApplyURI("mongodb://mongo:27017"))
if err != nil {
log.Fatal("Mongo NewClient error:", err.Error())
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
err = client.Connect(ctx)
if err != nil {
log.Fatal("Failure to Connect to MongoDB:", err.Error())
}
return &DB {
client: client,
}
}
func (db* DB) Save(input model.NewAuthor) *model.Author {
collection := db.client.Database("my-gqlgen").Collection("author")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
res, err := collection.InsertOne(ctx, input)
if err != nil {
log.Fatal("MongoDB Author Insertion Failure:", err.Error())
}
return &model.Author {
ID: res.InsertedID.(primitive.ObjectID).Hex(),
Firstname: input.Firstname,
Lastname: input.Lastname,
}
}
func (db *DB) FindyID(ID string) *model.Author {
ObjectID, err := primitive.ObjectIDFromHex(ID)
collection := db.client.Database("my-gqlgen").Collection("author")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
res, err := collection.Find(ctx, bson.M{"_id": ObjectID})
if err != nil {
log.Fatal("MongoDB Author Find Failure:", err.Error())
}
var author *model.Author
res.Decode(&author)
return author
}
func (db *DB) All() []*model.Author {
var authors []*model.Author
collection := db.client.Database("my-gqlgen").Collection("author")
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
cursor, err := collection.Find(ctx, bson.D{})
if err != nil {
log.Fatal("MongoDB Author Find Failure:", err.Error())
}
for cursor.Next(ctx){
var author *model.Author
err := cursor.Decode(&author)
if err != nil {
log.Fatal("MongoDB Failure to Decode Author:", err.Error())
}
authors = append(authors, author)
}
return authors
}
|
[
2
] |
/* https://theweeklychallenge.org/blog/perl-weekly-challenge-140/
TASK #1 › Add Binary
Submitted by: Mohammad S Anwar
You are given two decimal-coded binary numbers, $a and $b.
Write a script to simulate the addition of the given binary numbers.
The script should simulate something like $a + $b. (operator overloading)
Example 1
Input: $a = 11; $b = 1;
Output: 100
Example 2
Input: $a = 101; $b = 1;
Output: 110
Example 3
Input: $a = 100; $b = 11;
Output: 111
*/
package main
import (
"bufio"
"errors"
"os"
"sort"
"strings"
)
func main() {
w := bufio.NewWriter(os.Stdout)
sample := make([][2]string, 1)
if len(os.Args) > 2 {
sample[0] = [2]string{os.Args[1], os.Args[2]}
} else {
sample = [][2]string{
[2]string{"11", "1"},
[2]string{"101", "1"},
[2]string{"100", "11"},
}
}
var a, b string
for _, v := range sample {
a, b = v[0], v[1]
w.WriteString("Input: a = " + a + "; b = " + b + "\nOutput: ")
res, err := Add(a, b)
if err != nil {
w.WriteString(err.Error())
} else {
w.WriteString(res)
}
w.WriteString("\n\n")
}
w.Flush()
}
func Add(a, b string) (r string, err error) {
for _, v := range a + b {
if v != '1' && v != '0' {
return "", errors.New("invalid binary string")
}
}
for _, v := range []*string{&a, &b} {
*v = Reverse(strings.TrimLeft(*v, "0"))
}
max := MaxLen(a, b)
var count1 int
for i := 0; i < max+count1; i++ {
for _, str := range []string{a, b} {
if i < len(str) {
if str[i] == '1' {
count1++
}
}
}
if count1%2 == 0 {
r += "0"
} else {
r += "1"
}
count1 /= 2
}
return Reverse(r), nil
}
func Reverse(str string) string {
s := []byte(str)
sort.SliceStable(s, func(i, j int) bool {
return true
})
return string(s)
}
func MaxLen(s ...string) (max int) {
l := len(s[0])
max = l
for _, v := range s[1:] {
l := len(v)
if max < l {
max = l
}
}
return max
}
|
[
1
] |
package http
import (
"context"
"encoding/json"
"github.com/go-kit/kit/endpoint"
kitLog "github.com/go-kit/kit/log"
"github.com/go-kit/kit/transport"
kitHttp "github.com/go-kit/kit/transport/http"
"github.com/gorilla/mux"
"github.com/togettoyou/go-kit-example/hello/endpoints"
"log"
"net/http"
"os"
)
func NewHttpHandler(eps endpoints.HelloEndPoints) http.Handler {
r := mux.NewRouter()
options := getServerOptions()
r.Methods("GET").Path("/name").Handler(newServer(eps.GetNameEndpoint, options))
r.Methods("GET").Path("/age").Handler(newServer(eps.GetAgeEndpoint, options))
return r
}
func newServer(e endpoint.Endpoint, options []kitHttp.ServerOption) http.Handler {
return kitHttp.NewServer(
e,
decodeRequest,
encodeJSONResponse,
options...,
)
}
func getServerOptions() []kitHttp.ServerOption {
logger := kitLog.NewLogfmtLogger(os.Stderr)
logger = kitLog.With(logger, "ts", kitLog.DefaultTimestampUTC)
logger = kitLog.With(logger, "caller", kitLog.DefaultCaller)
options := []kitHttp.ServerOption{
kitHttp.ServerErrorHandler(transport.NewLogErrorHandler(logger)),
kitHttp.ServerErrorEncoder(encodeError),
}
return options
}
func decodeRequest(ctx context.Context, r *http.Request) (interface{}, error) {
log.Println("Request拦截-decodeRequest")
return r, nil
}
func encodeJSONResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {
log.Println("Response拦截-encodeJSONResponse")
w.Header().Set("Content-Type", "application/json;charset=utf-8")
return json.NewEncoder(w).Encode(response)
}
func encodeError(_ context.Context, err error, w http.ResponseWriter) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
switch err {
default:
w.WriteHeader(http.StatusInternalServerError)
}
json.NewEncoder(w).Encode(map[string]interface{}{
"error": err.Error(),
})
}
|
[
7
] |
package database
import (
"fmt"
"os"
"github.com/jinzhu/gorm"
)
var Conn *gorm.DB
func InitDB() (Conn *gorm.DB) {
fmt.Println("initialising DB")
dbuser := os.Getenv("DBUSER")
dbpassword := os.Getenv("DBPASSWORD")
dbname := os.Getenv("DBNAME")
dbhost := os.Getenv("DBHOST")
dbport := os.Getenv("DBPORT")
fmt.Println("mysql", dbuser+":"+dbpassword+"@("+dbhost+":"+dbport+")/"+dbname+"?charset=utf8&parseTime=true")
Conn, err := gorm.Open("mysql", dbuser+":"+dbpassword+"@("+dbhost+":"+dbport+")/"+dbname+"?charset=utf8&parseTime=true")
if err != nil {
panic("failed to connect database")
}
fmt.Println("DB initiallised")
Conn.LogMode(true)
return
}
|
[
2
] |
/*
* Copyright 2018 evove.tech
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package git
import (
"os/exec"
"regexp"
"fmt"
"github.com/evovetech/got/util"
)
var reUU = regexp.MustCompile("^\\?\\?\\s+(.*)")
func SymbolicRef(ref string) string {
cmd := Command("symbolic-ref", "--short", ref)
return cmd.OutputString()
}
func StatusCmd(file string) *Cmd {
return Command("status", "-s", "--", file)
}
func AddCmd(file string, options ...string) *Cmd {
options = append(options, "--", file)
return Command("add", options...)
}
func Add(file string, options ...string) error {
return AddCmd(file, options...).Run()
}
func CheckoutCmd(args ...string) *Cmd {
return Command("checkout", args...)
}
func Checkout(args ...string) error {
err := FuncGroup(
CheckStatus,
CheckoutCmd(args...).Run,
Command("reset", "--soft", "HEAD").Run,
CheckStatus,
).Run()
return err
}
func ResolveRmCmd(file string) Runner {
exec.Command("rm", file).Run()
return AddCmd(file, "-A")
}
func ResolveCheckoutCmd(file string, s MergeStrategy) Runner {
return Group(
CheckoutCmd(s.Option(), "--", file),
AddCmd(file, "-A"),
)
}
func AbortMerge() error {
return FuncGroup(
Merge().Abort,
RemoveUntracked,
).Run()
}
func RemoveUntracked() error {
var errors []error
diff := Command("status", "-s", "--untracked-files=all")
for _, status := range diff.OutputLines() {
switch {
case reUU.MatchString(status):
match := reUU.FindStringSubmatch(status)
cmd := exec.Command("rm", match[1])
if err := Run(cmd); err != nil {
errors = append(errors, err)
}
}
}
return util.CompositeError(errors)
}
func CheckStatus() error {
// check git status/diff on HEAD and bail if there are changes
status, err := Command("status", "-s", "--untracked-files=all").Output()
if err != nil {
return err
} else if status != "" {
return fmt.Errorf("please stash or commit changes before merging")
}
return nil
}
|
[
7
] |
/*-
* Copyright © 2016-2017, Jörg Pernfuß <[email protected]>
* Copyright © 2016, 1&1 Internet SE
* All rights reserved.
*
* Use of this source code is governed by a 2-clause BSD license
* that can be found in the LICENSE file.
*/
package cyclone // import "github.com/mjolnir42/cyclone/lib/cyclone"
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/go-redis/redis"
"github.com/mjolnir42/cyclone/lib/cyclone/cpu"
"github.com/mjolnir42/cyclone/lib/cyclone/disk"
"github.com/mjolnir42/cyclone/lib/cyclone/mem"
"github.com/mjolnir42/erebos"
"github.com/mjolnir42/legacy"
metrics "github.com/rcrowley/go-metrics"
)
// Handlers is the registry of running application handlers
var Handlers map[int]erebos.Handler
// AgeCutOff is the duration after which back-processed alarms are
// ignored and not alerted
var AgeCutOff time.Duration
func init() {
Handlers = make(map[int]erebos.Handler)
}
// Cyclone performs threshold evaluation alarming on metrics
type Cyclone struct {
Num int
Input chan *erebos.Transport
Shutdown chan struct{}
Death chan error
Config *erebos.Config
Metrics *metrics.Registry
CPUData map[int64]cpu.CPU
MemData map[int64]mem.Mem
CTXData map[int64]cpu.CTX
DskData map[int64]map[string]disk.Disk
redis *redis.Client
internalInput chan *legacy.MetricSplit
}
// AlarmEvent is the datatype for sending out alarm notifications
type AlarmEvent struct {
Source string `json:"source"`
EventID string `json:"event_id"`
Version string `json:"version"`
Sourcehost string `json:"sourcehost"`
Oncall string `json:"on_call"`
Targethost string `json:"targethost"`
Message string `json:"message"`
Level int64 `json:"level"`
Timestamp string `json:"timestamp"`
Check string `json:"check"`
Monitoring string `json:"monitoring"`
Team string `json:"team"`
}
// run is the event loop for Cyclone
func (c *Cyclone) run() {
runloop:
for {
select {
case <-c.Shutdown:
// received shutdown, drain input channel which will be
// closed by main
goto drainloop
case msg := <-c.Input:
if msg == nil {
// this can happen if we read the closed Input channel
// before the closed Shutdown channel
continue runloop
}
if err := c.process(msg); err != nil {
c.Death <- err
<-c.Shutdown
break runloop
}
}
}
drainloop:
for {
select {
case msg := <-c.Input:
if msg == nil {
// channel is closed
break drainloop
}
c.process(msg)
}
}
}
// process evaluates a metric and raises alarms as required
func (c *Cyclone) process(msg *erebos.Transport) error {
if msg == nil || msg.Value == nil {
logrus.Warnf("Ignoring empty message from: %d", msg.HostID)
if msg != nil {
go c.commit(msg)
}
return nil
}
m := &legacy.MetricSplit{}
if err := json.Unmarshal(msg.Value, m); err != nil {
return err
}
switch m.Path {
case `_internal.cyclone.heartbeat`:
c.heartbeat()
return nil
}
// non-heartbeat metrics count towards processed metrics
metrics.GetOrRegisterMeter(`/metrics/processed.per.second`,
*c.Metrics).Mark(1)
switch m.Path {
case `/sys/cpu/ctx`:
ctx := cpu.CTX{}
id := m.AssetID
if _, ok := c.CTXData[id]; ok {
ctx = c.CTXData[id]
}
m = ctx.Update(m)
c.CTXData[id] = ctx
case `/sys/cpu/count/idle`:
fallthrough
case `/sys/cpu/count/iowait`:
fallthrough
case `/sys/cpu/count/irq`:
fallthrough
case `/sys/cpu/count/nice`:
fallthrough
case `/sys/cpu/count/softirq`:
fallthrough
case `/sys/cpu/count/system`:
fallthrough
case `/sys/cpu/count/user`:
cu := cpu.CPU{}
id := m.AssetID
if _, ok := c.CPUData[id]; ok {
cu = c.CPUData[id]
}
cu.Update(m)
m = cu.Calculate()
c.CPUData[id] = cu
case `/sys/memory/active`:
fallthrough
case `/sys/memory/buffers`:
fallthrough
case `/sys/memory/cached`:
fallthrough
case `/sys/memory/free`:
fallthrough
case `/sys/memory/inactive`:
fallthrough
case `/sys/memory/swapfree`:
fallthrough
case `/sys/memory/swaptotal`:
fallthrough
case `/sys/memory/total`:
mm := mem.Mem{}
id := m.AssetID
if _, ok := c.MemData[id]; ok {
mm = c.MemData[id]
}
mm.Update(m)
m = mm.Calculate()
c.MemData[id] = mm
case `/sys/disk/blk_total`:
fallthrough
case `/sys/disk/blk_used`:
fallthrough
case `/sys/disk/blk_read`:
fallthrough
case `/sys/disk/blk_wrtn`:
if len(m.Tags) == 0 {
m = nil
break
}
d := disk.Disk{}
id := m.AssetID
mpt := m.Tags[0]
if c.DskData[id] == nil {
c.DskData[id] = make(map[string]disk.Disk)
}
if _, ok := c.DskData[id][mpt]; !ok {
c.DskData[id][mpt] = d
}
if _, ok := c.DskData[id][mpt]; ok {
d = c.DskData[id][mpt]
}
d.Update(m)
mArr := d.Calculate()
if mArr != nil {
for _, mPtr := range mArr {
// no deadlock, channel is buffered
c.internalInput <- mPtr
}
}
c.DskData[id][mpt] = d
m = nil
}
if m == nil {
logrus.Debugf("Cyclone[%d], Metric has been consumed", c.Num)
return nil
}
lid := m.LookupID()
thr := c.Lookup(lid)
if thr == nil {
logrus.Errorf("Cyclone[%d], ERROR fetching threshold data. Lookup service available?", c.Num)
return nil
}
if len(thr) == 0 {
logrus.Debugf("Cyclone[%d], No thresholds configured for %s from %d", c.Num, m.Path, m.AssetID)
return nil
}
logrus.Debugf("Cyclone[%d], Forwarding %s from %d for evaluation (%s)", c.Num, m.Path, m.AssetID, lid)
evals := metrics.GetOrRegisterMeter(`/evaluations.per.second`,
*c.Metrics)
evals.Mark(1)
internalMetric := false
switch m.Path {
case
// internal metrics generated by cyclone
`cpu.ctx.per.second`,
`cpu.usage.percent`,
`memory.usage.percent`:
internalMetric = true
case
// internal metrics sent by main daemon
`/sys/cpu/blocked`,
`/sys/cpu/uptime`,
`/sys/load/300s`,
`/sys/load/60s`,
`/sys/load/900s`,
`/sys/load/running_proc`,
`/sys/load/total_proc`:
internalMetric = true
default:
switch {
case
strings.HasPrefix(m.Path, `disk.free:`),
strings.HasPrefix(m.Path, `disk.read.per.second:`),
strings.HasPrefix(m.Path, `disk.usage.percent:`),
strings.HasPrefix(m.Path, `disk.write.per.second:`):
internalMetric = true
}
}
evaluations := 0
thrloop:
for key := range thr {
var alarmLevel = "0"
var brokenThr int64
dispatchAlarm := false
broken := false
fVal := ``
if internalMetric {
dispatchAlarm = true
}
if len(m.Tags) > 0 && m.Tags[0] == thr[key].ID {
dispatchAlarm = true
}
if !dispatchAlarm {
continue thrloop
}
logrus.Debugf("Cyclone[%d], Evaluating metric %s from %d against config %s",
c.Num, m.Path, m.AssetID, thr[key].ID)
evaluations++
lvlloop:
for _, lvl := range []string{`9`, `8`, `7`, `6`, `5`, `4`, `3`, `2`, `1`, `0`} {
thrval, ok := thr[key].Thresholds[lvl]
if !ok {
continue
}
logrus.Debugf("Cyclone[%d], Checking %s alarmlevel %s", c.Num, thr[key].ID, lvl)
switch m.Type {
case `integer`:
fallthrough
case `long`:
broken, fVal = c.cmpInt(thr[key].Predicate,
m.Value().(int64),
thrval)
case `real`:
broken, fVal = c.cmpFlp(thr[key].Predicate,
m.Value().(float64),
thrval)
}
if broken {
alarmLevel = lvl
brokenThr = thrval
break lvlloop
}
}
al := AlarmEvent{
Source: fmt.Sprintf("%s / %s", thr[key].MetaTargethost, thr[key].MetaSource),
EventID: thr[key].ID,
Version: c.Config.Cyclone.APIVersion,
Sourcehost: thr[key].MetaTargethost,
Oncall: thr[key].Oncall,
Targethost: thr[key].MetaTargethost,
Timestamp: time.Now().UTC().Format(time.RFC3339Nano),
Check: fmt.Sprintf("cyclone(%s)", m.Path),
Monitoring: thr[key].MetaMonitoring,
Team: thr[key].MetaTeam,
}
al.Level, _ = strconv.ParseInt(alarmLevel, 10, 64)
if alarmLevel == `0` {
al.Message = `Ok.`
} else {
al.Message = fmt.Sprintf(
"Metric %s has broken threshold. Value %s %s %d",
m.Path,
fVal,
thr[key].Predicate,
brokenThr,
)
}
if al.Oncall == `` {
al.Oncall = `No oncall information available`
}
c.updateEval(thr[key].ID)
if c.Config.Cyclone.TestMode {
// do not send out alarms in testmode
continue thrloop
}
alrms := metrics.GetOrRegisterMeter(`/alarms.per.second`,
*c.Metrics)
alrms.Mark(1)
go func(a AlarmEvent) {
b := new(bytes.Buffer)
aSlice := []AlarmEvent{a}
if err := json.NewEncoder(b).Encode(aSlice); err != nil {
logrus.Errorf("Cyclone[%d], ERROR json encoding alarm for %s: %s", c.Num, a.EventID, err)
return
}
resp, err := http.Post(
c.Config.Cyclone.DestinationURI,
`application/json; charset=utf-8`,
b,
)
if err != nil {
logrus.Errorf("Cyclone[%d], ERROR sending alarm for %s: %s", c.Num, a.EventID, err)
return
}
logrus.Infof("Cyclone[%d], Dispatched alarm for %s at level %d, returncode was %d",
c.Num, a.EventID, a.Level, resp.StatusCode)
if resp.StatusCode >= 209 {
// read response body
bt, _ := ioutil.ReadAll(resp.Body)
logrus.Errorf("Cyclone[%d], ResponseMsg(%d): %s", c.Num, resp.StatusCode, string(bt))
resp.Body.Close()
// reset buffer and encode JSON again so it can be
// logged
b.Reset()
json.NewEncoder(b).Encode(aSlice)
logrus.Errorf("Cyclone[%d], RequestJSON: %s", c.Num, b.String())
return
}
// ensure http.Response.Body is consumed and closed,
// otherwise it leaks filehandles
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}(al)
}
if evaluations == 0 {
logrus.Debugf("Cyclone[%d], metric %s(%d) matched no configurations", c.Num, m.Path, m.AssetID)
}
return nil
}
// commit marks a message as fully processed
func (c *Cyclone) commit(msg *erebos.Transport) {
msg.Commit <- &erebos.Commit{
Topic: msg.Topic,
Partition: msg.Partition,
Offset: msg.Offset,
}
}
// cmpInt compares an integer value against a threshold
func (c *Cyclone) cmpInt(pred string, value, threshold int64) (bool, string) {
fVal := fmt.Sprintf("%d", value)
switch pred {
case `<`:
return value < threshold, fVal
case `<=`:
return value <= threshold, fVal
case `==`:
return value == threshold, fVal
case `>=`:
return value >= threshold, fVal
case `>`:
return value > threshold, fVal
case `!=`:
return value != threshold, fVal
default:
logrus.Errorf("Cyclone[%d], ERROR unknown predicate: %s", c.Num, pred)
return false, ``
}
}
// cmpFlp compares a floating point value against a threshold
func (c *Cyclone) cmpFlp(pred string, value float64, threshold int64) (bool, string) {
fthreshold := float64(threshold)
fVal := fmt.Sprintf("%.3f", value)
switch pred {
case `<`:
return value < fthreshold, fVal
case `<=`:
return value <= fthreshold, fVal
case `==`:
return value == fthreshold, fVal
case `>=`:
return value >= fthreshold, fVal
case `>`:
return value > fthreshold, fVal
case `!=`:
return value != fthreshold, fVal
default:
logrus.Errorf("Cyclone[%d], ERROR unknown predicate: %s", c.Num, pred)
return false, ``
}
}
// vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix
|
[
7
] |
package helpers
import (
"fmt"
"io/fs"
"io/ioutil"
"path/filepath"
"strings"
"gopkg.in/yaml.v2"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
"github.com/mwlng/k8s_resources_sync/pkg/k8s_resources"
)
func LoadCronJobYamlFiles(rootDir string) []*batchv1.CronJob {
cronJobs := []*batchv1.CronJob{}
err := filepath.Walk(rootDir, func(path string, info fs.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
ext := strings.ToLower(filepath.Ext(path))
if ext == ".yml" || ext == ".yaml" {
data, err := ioutil.ReadFile(path)
if err != nil {
klog.Errorf("Error while reading YAML file. Err was: %s", err)
return err
}
decode := scheme.Codecs.UniversalDeserializer().Decode
obj, _, err := decode([]byte(data), nil, nil)
if err != nil {
klog.Errorf("Error while decoding YAML file: %s. Err was: %s", path, err)
return nil
}
switch obj.(type) {
case *batchv1.CronJob:
cronJobs = append(cronJobs, obj.(*batchv1.CronJob))
//default:
// fmt.Println(reflect.TypeOf(obj))
}
}
}
return nil
})
if err != nil {
klog.Errorf("Error while reading YAML files. Err was: %s", err)
}
return cronJobs
}
func SyncCronJobs(kubeConfig *rest.Config, cronJobs []*batchv1.CronJob) []*batchv1.CronJob {
klog.Infof("Syncing cron jobs from cluster: %s, namespace: %s\n", kubeConfig.Host, corev1.NamespaceDefault)
cronJob, err := k8s_resources.NewCronJob(kubeConfig, corev1.NamespaceDefault)
if err != nil {
panic(err)
}
synced_cronJobs := []*batchv1.CronJob{}
for _, job := range cronJobs {
src_cronJob, err := cronJob.GetCronJob(job.Name)
if err != nil {
klog.Errorf("Failed to get cron job: %s. Err was: %s", job.Name, err)
continue
}
if src_cronJob != nil {
containerImageMap := map[string]string{}
for _, c := range src_cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers {
containerImageMap[c.Name] = c.Image
}
for i, c := range job.Spec.JobTemplate.Spec.Template.Spec.Containers {
job.Spec.JobTemplate.Spec.Template.Spec.Containers[i].Image = containerImageMap[c.Name]
}
job.Spec.Schedule = src_cronJob.Spec.Schedule
synced_cronJobs = append(synced_cronJobs, job)
}
}
return synced_cronJobs
}
func PrintCronJobs(cronJobs []*batchv1.CronJob) {
for _, job := range cronJobs {
result, _ := yaml.Marshal(job)
fmt.Printf("%s\n", string(result))
}
}
func ApplyCronJobs(kubeConfig *rest.Config, cronJobs []*batchv1.CronJob) {
cronJob, err := k8s_resources.NewCronJob(kubeConfig, corev1.NamespaceDefault)
if err != nil {
panic(err)
}
for _, job := range cronJobs {
klog.Infof("Applying cron job: %s ...", job.Name)
err := cronJob.ApplyCronJob(job)
if err != nil {
klog.Errorf("Failed to apply cron job. Err was: %s", err)
continue
}
klog.Infoln("Done.")
}
}
|
[
7
] |
package otp
import "litshop/src/lvm/types"
func GetCode(action types.OtpAction) string {
return ""
}
func VerifyCode(Code string, action types.OtpAction) bool {
return false
}
type OtpDriver interface {
Store()
Load()
Delete()
}
|
[
2
] |
package main
import (
"encoding/json"
"fmt"
"github.com/brincowale/go-telegram-sender"
"github.com/parnurzeal/gorequest"
"megabus-new-tickets/utils"
"net/http"
"os"
"time"
)
type Tickets struct {
Dates []struct {
Price interface{} `json:"price"`
Date string `json:"date"`
Available bool `json:"available"`
} `json:"dates"`
}
func main() {
configs := utils.LoadConfig()
t := telegram.New(configs.TelegramApiKey)
URL := generateURL(configs)
tickets := getTickets(URL)
sendToTelegram(tickets, t, configs)
}
func generateURL(configs utils.Config) string {
return "https://" + configs.Country + ".megabus.com/journey-planner/api/journeys/prices" +
"?originId=" + configs.OriginId + "&destinationId=" + configs.DestinationId +
"&departureDate=" + configs.DepartureDate + "&minDate=" + configs.MinDate +
"&days=" + configs.Days +
"&totalPassengers=1&concessionCount=0&nusCount=0&otherDisabilityCount=0&wheelchairSeated=0&pcaCount=0"
}
func getTickets(URL string) Tickets {
request := gorequest.New().Timeout(30*time.Second).Retry(3, 5*time.Second, http.StatusInternalServerError)
_, body, _ := request.Get(URL).End()
var tickets Tickets
err := json.Unmarshal([]byte(body), &tickets)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
return tickets
}
func sendToTelegram(tickets Tickets, t telegram.Client, configs utils.Config) {
for _, ticket := range tickets.Dates {
if ticket.Price != nil {
err := telegram.SendMessage(t, telegram.Message{
ChatId: configs.TelegramChannelId,
Text: fmt.Sprintf("Date: %v\nPrice: %v", ticket.Date, ticket.Price),
})
if err != nil {
fmt.Println(err)
}
}
}
}
|
[
2
] |
package utils
type Bitmap struct {
array []byte
len uint32
}
func NewBitmap(max uint32) *Bitmap {
len := max/8 + 1
return &Bitmap{
len: len,
array: make([]byte, len),
}
}
func (bitmap *Bitmap) Set(i uint32) {
idx := i / 8
pos := i % 8
bitmap.array[idx] |= 1 << pos
return
}
func (bitmap *Bitmap) UnSet(i uint32) {
idx := i / 8
pos := i % 8
bitmap.array[idx] &= ^(1 << pos)
return
}
func (bitmap *Bitmap) Test(i uint32) bool {
idx := i / 8
pos := i % 8
return (bitmap.array[idx] & (1 << pos)) != 0
}
|
[
1
] |
package solution
func numComponents(head *ListNode, G []int) int {
set := make(map[int]bool, len(G))
for _, val := range G {
set[val] = true
}
res := 0
for head != nil {
if set[head.Val] && (head.Next == nil || !set[head.Next.Val]) {
res++
}
head = head.Next
}
return res
}
|
[
2
] |
package openstack
import (
"log"
"github.com/pkg/errors"
"github.com/Masterminds/semver"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/apiversions"
)
var maxOctaviaVersion *semver.Version = nil
func getMaxOctaviaAPIVersion(client *gophercloud.ServiceClient) (*semver.Version, error) {
allPages, err := apiversions.List(client).AllPages()
if err != nil {
return nil, err
}
apiVersions, err := apiversions.ExtractAPIVersions(allPages)
if err != nil {
return nil, err
}
var max *semver.Version = nil
for _, apiVersion := range apiVersions {
ver, err := semver.NewVersion(apiVersion.ID)
if err != nil {
// We're ignoring the error, if Octavia is returning anything odd we don't care.
log.Printf("Error when parsing Octavia API version %s: %v. Ignoring it", apiVersion.ID, err)
continue
}
if max == nil || ver.GreaterThan(max) {
max = ver
}
}
if max == nil {
// If we have max == nil at this point, then we couldn't read the versions at all.
// This happens for 2.0 API and let's return that.
max = semver.MustParse("v2.0")
}
log.Printf("Detected Octavia API v%s", max)
return max, nil
}
func IsOctaviaVersionSupported(client *gophercloud.ServiceClient, constraint string) (bool, error) {
if maxOctaviaVersion == nil {
var err error
maxOctaviaVersion, err = getMaxOctaviaAPIVersion(client)
if err != nil {
return false, errors.Wrap(err, "cannot get Octavia API versions")
}
}
constraintVer := semver.MustParse(constraint)
return !constraintVer.GreaterThan(maxOctaviaVersion), nil
}
|
[
1
] |
package sensu
import (
"fmt"
"regexp"
"strings"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
v2 "github.com/sensu/core/v2"
"github.com/sensu/sensu-go/types"
)
// Name
var resourceNameSchema = &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validation.StringMatch(
regexp.MustCompile(`\A[\w\.\-]+\z`),
"Invalid name"),
}
var dataSourceNameSchema = &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validation.StringMatch(
regexp.MustCompile(`\A[\w\.\-]+\z`),
"Invalid name"),
}
var resourceAssetNameSchema = &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validation.StringMatch(
regexp.MustCompile(`\A[\w\.\-/]+\z`),
"Invalid name"),
}
var dataSourceAssetNameSchema = &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validation.StringMatch(
regexp.MustCompile(`\A[\w\.\-/]+\z`),
"Invalid name"),
}
// Namespace
var resourceNamespaceSchema = &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
}
// Environment Variables
var resourceEnvVarsSchema = &schema.Schema{
Type: schema.TypeMap,
Optional: true,
}
var dataSourceEnvVarsSchema = &schema.Schema{
Type: schema.TypeMap,
Computed: true,
}
func expandEnvVars(v map[string]interface{}) []string {
var envVars []string
for key, val := range v {
raw := val.(string)
envVar := fmt.Sprintf("%s=%s", key, raw)
envVars = append(envVars, envVar)
}
return envVars
}
func flattenEnvVars(v []string) map[string]string {
envVars := make(map[string]string)
for _, v := range v {
parts := strings.SplitN(v, "=", 2)
if len(parts) == 2 {
envVars[parts[0]] = parts[1]
}
}
return envVars
}
// Time Window
var resourceTimeWindowsSchema = &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"day": &schema.Schema{
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{
"all", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday",
}, false),
},
"begin": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"end": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
},
},
}
var dataSourceTimeWindowsSchema = &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"day": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"begin": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"end": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
},
},
}
func expandTimeWindows(v []interface{}) types.TimeWindowWhen {
var timeWindows types.TimeWindowWhen
for _, v := range v {
timeRange := new(types.TimeWindowTimeRange)
subdueData := v.(map[string]interface{})
// subdue day
var day string
if raw, ok := subdueData["day"]; ok {
day = strings.ToLower(raw.(string))
}
// begin and end
if raw, ok := subdueData["begin"]; ok {
timeRange.Begin = raw.(string)
}
if raw, ok := subdueData["end"]; ok {
timeRange.End = raw.(string)
}
switch day {
case "all":
timeWindows.Days.All = append(timeWindows.Days.All, timeRange)
case "monday":
timeWindows.Days.Monday = append(timeWindows.Days.Monday, timeRange)
case "tuesday":
timeWindows.Days.Tuesday = append(timeWindows.Days.Tuesday, timeRange)
case "wednesday":
timeWindows.Days.Wednesday = append(timeWindows.Days.Wednesday, timeRange)
case "thursday":
timeWindows.Days.Thursday = append(timeWindows.Days.Thursday, timeRange)
case "friday":
timeWindows.Days.Friday = append(timeWindows.Days.Friday, timeRange)
case "saturday":
timeWindows.Days.Saturday = append(timeWindows.Days.Saturday, timeRange)
case "sunday":
timeWindows.Days.Sunday = append(timeWindows.Days.Sunday, timeRange)
}
}
return timeWindows
}
func flattenTimeWindows(v *types.TimeWindowWhen) []map[string]interface{} {
var timeWindows []map[string]interface{}
if v == nil {
return timeWindows
}
for _, v := range v.Days.All {
timeWindow := make(map[string]interface{})
timeWindow["day"] = "all"
timeWindow["begin"] = v.Begin
timeWindow["end"] = v.End
timeWindows = append(timeWindows, timeWindow)
}
for _, v := range v.Days.Monday {
timeWindow := make(map[string]interface{})
timeWindow["day"] = "monday"
timeWindow["begin"] = v.Begin
timeWindow["end"] = v.End
timeWindows = append(timeWindows, timeWindow)
}
for _, v := range v.Days.Tuesday {
timeWindow := make(map[string]interface{})
timeWindow["day"] = "tuesday"
timeWindow["begin"] = v.Begin
timeWindow["end"] = v.End
timeWindows = append(timeWindows, timeWindow)
}
for _, v := range v.Days.Wednesday {
timeWindow := make(map[string]interface{})
timeWindow["day"] = "wednesday"
timeWindow["begin"] = v.Begin
timeWindow["end"] = v.End
timeWindows = append(timeWindows, timeWindow)
}
for _, v := range v.Days.Thursday {
timeWindow := make(map[string]interface{})
timeWindow["day"] = "thursday"
timeWindow["begin"] = v.Begin
timeWindow["end"] = v.End
timeWindows = append(timeWindows, timeWindow)
}
for _, v := range v.Days.Friday {
timeWindow := make(map[string]interface{})
timeWindow["day"] = "friday"
timeWindow["begin"] = v.Begin
timeWindow["end"] = v.End
timeWindows = append(timeWindows, timeWindow)
}
for _, v := range v.Days.Saturday {
timeWindow := make(map[string]interface{})
timeWindow["day"] = "saturday"
timeWindow["begin"] = v.Begin
timeWindow["end"] = v.End
timeWindows = append(timeWindows, timeWindow)
}
for _, v := range v.Days.Sunday {
timeWindow := make(map[string]interface{})
timeWindow["day"] = "sunday"
timeWindow["begin"] = v.Begin
timeWindow["end"] = v.End
timeWindows = append(timeWindows, timeWindow)
}
return timeWindows
}
// RBAC Rules
var allVerbs = []string{
"get", "list", "create", "update", "delete", "*",
}
var resourceRulesSchema = &schema.Schema{
Type: schema.TypeList,
Required: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"verbs": &schema.Schema{
Type: schema.TypeList,
Required: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"resources": &schema.Schema{
Type: schema.TypeList,
Required: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"resource_names": &schema.Schema{
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
},
}
var dataSourceRulesSchema = &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"verbs": &schema.Schema{
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"resources": &schema.Schema{
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"resource_names": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
},
}
func expandRules(v []interface{}) []types.Rule {
var rules []types.Rule
for _, v := range v {
rule := new(types.Rule)
ruleData := v.(map[string]interface{})
if raw, ok := ruleData["verbs"]; ok {
for _, verb := range raw.([]interface{}) {
rule.Verbs = append(rule.Verbs, verb.(string))
}
}
if raw, ok := ruleData["resources"]; ok {
for _, resource := range raw.([]interface{}) {
rule.Resources = append(rule.Resources, resource.(string))
}
}
if raw, ok := ruleData["resource_names"]; ok {
for _, resourceNames := range raw.([]interface{}) {
rule.ResourceNames = append(rule.ResourceNames, resourceNames.(string))
}
}
rules = append(rules, *rule)
}
return rules
}
func flattenRules(v []types.Rule) []map[string]interface{} {
var rules []map[string]interface{}
if v == nil {
return rules
}
for _, v := range v {
rule := make(map[string]interface{})
rule["verbs"] = v.Verbs
rule["resources"] = v.Resources
rule["resource_names"] = v.ResourceNames
rules = append(rules, rule)
}
return rules
}
// StringList to StringSlice
func expandStringList(v []interface{}) []string {
var vs []string
for _, v := range v {
val, ok := v.(string)
if ok && val != "" {
vs = append(vs, val)
}
}
return vs
}
// Map to String Map
func expandStringMap(v map[string]interface{}) map[string]string {
m := make(map[string]string)
for key, val := range v {
m[key] = val.(string)
}
return m
}
// Assets
var resourceAssetBuildsSchema = &schema.Schema{
Type: schema.TypeList,
Optional: true,
ForceNew: true,
ConflictsWith: []string{"url", "sha512", "filters"},
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sha512": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"url": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"filters": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"headers": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
},
},
},
}
var dataSourceAssetBuildsSchema = &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sha512": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"url": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"filters": &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"headers": &schema.Schema{
Type: schema.TypeMap,
Computed: true,
},
},
},
}
func expandAssetBuilds(v []interface{}) []*v2.AssetBuild {
var builds []*v2.AssetBuild
for _, v := range v {
build := new(v2.AssetBuild)
data := v.(map[string]interface{})
if raw, ok := data["sha512"]; ok {
build.Sha512 = raw.(string)
}
if raw, ok := data["url"]; ok {
build.URL = raw.(string)
}
if raw, ok := data["filters"]; ok {
build.Filters = expandStringList(raw.([]interface{}))
}
if raw, ok := data["headers"]; ok {
build.Headers = expandStringMap(raw.(map[string]interface{}))
}
builds = append(builds, build)
}
return builds
}
func flattenAssetBuilds(v []*v2.AssetBuild) []map[string]interface{} {
var builds []map[string]interface{}
for _, b := range v {
build := make(map[string]interface{})
build["sha512"] = b.Sha512
build["url"] = b.URL
build["filters"] = b.Filters
build["headers"] = b.Headers
builds = append(builds, build)
}
return builds
}
// Entities
func expandEntityDeregistration(v []interface{}) types.Deregistration {
var deregistration types.Deregistration
for _, v := range v {
data := v.(map[string]interface{})
if raw, ok := data["handler"]; ok {
deregistration.Handler = raw.(string)
}
}
return deregistration
}
func flattenEntityDeregistration(v types.Deregistration) []map[string]interface{} {
var dereg []map[string]interface{}
if h := v.Handler; h != "" {
handler := make(map[string]interface{})
handler["handler"] = h
dereg = append(dereg, handler)
}
return dereg
}
func flattenEntitySystem(v types.System) []map[string]interface{} {
var systems []map[string]interface{}
if h := v.Hostname; h != "" {
system := make(map[string]interface{})
system["hostname"] = v.Hostname
system["os"] = v.OS
system["platform"] = v.Platform
system["platform_family"] = v.PlatformFamily
system["platform_version"] = v.PlatformVersion
system["arch"] = v.Arch
var networks []map[string]interface{}
for _, i := range v.Network.Interfaces {
network := make(map[string]interface{})
network["name"] = i.Name
network["mac"] = i.MAC
network["addresses"] = i.Addresses
networks = append(networks, network)
}
system["network_interfaces"] = networks
systems = append(systems, system)
}
return systems
}
// Suppress diffs for REDACTED values.
func suppressDiffRedacted(k, old, new string, d *schema.ResourceData) bool {
if new != "" && old == "REDACTED" {
return true
}
return false
}
// Check Proxy Requests
func expandCheckProxyRequests(v []interface{}) types.ProxyRequests {
var proxyRequests types.ProxyRequests
for _, v := range v {
proxyData := v.(map[string]interface{})
// entity attributes
if raw, ok := proxyData["entity_attributes"]; ok {
list := raw.([]interface{})
proxyRequests.EntityAttributes = expandStringList(list)
}
// splay
if raw, ok := proxyData["splay"]; ok {
proxyRequests.Splay = raw.(bool)
}
// splay coverage
if raw, ok := proxyData["splay_coverage"]; ok {
proxyRequests.SplayCoverage = uint32(raw.(int))
}
}
return proxyRequests
}
func flattenCheckProxyRequests(v *types.ProxyRequests) []map[string]interface{} {
var proxyRequests []map[string]interface{}
if v != nil {
if len(v.EntityAttributes) > 0 {
pr := make(map[string]interface{})
pr["entity_attributes"] = v.EntityAttributes
pr["splay"] = v.Splay
pr["splay_coverage"] = v.SplayCoverage
proxyRequests = append(proxyRequests, pr)
}
}
return proxyRequests
}
// Secret Values
var resourceSecretValuesSchema = &schema.Schema{
Type: schema.TypeMap,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
}
var dataSecretValuesSchema = &schema.Schema{
Type: schema.TypeMap,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
}
func expandSecretValues(v map[string]interface{}) []*v2.Secret {
var secretValues []*v2.Secret
for key, val := range v {
raw := val.(string)
secretValue := new(v2.Secret)
secretValue.Name = key
secretValue.Secret = raw
secretValues = append(secretValues, secretValue)
}
return secretValues
}
func flattenSecretValues(v []*v2.Secret) map[string]string {
secretValues := make(map[string]string)
for _, v := range v {
secretValues[v.Name] = v.Secret
}
return secretValues
}
|
[
1
] |
package main
// FibEvenTotal euler 2
func FibEvenTotal(max int) int {
total := 0
previousFib := 0
nextFib := 1
for nextFib < max {
if nextFib%2 == 0 {
total += nextFib
}
previousFib, nextFib = nextFib, previousFib+nextFib
}
return total
}
|
[
1
] |
package controllers
import (
"github.com/makdenis/Golang-Api-project/Models"
"database/sql"
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"net/http"
"strconv"
)
func Vote(Db *sql.DB, respWriter http.ResponseWriter, request *http.Request) {
respWriter.Header().Set("Content-Type", "application/json; charset=utf-8")
slug := mux.Vars(request)["slug"]
vote := Models.Vote{}
if err := json.NewDecoder(request.Body).Decode(&vote); err != nil {
panic(err)
}
id,_:=strconv.Atoi(slug)
user:=GetUsersByEmailOrNick(Db,"",vote.NickName)
if len(user)==0{
respWriter.WriteHeader(http.StatusNotFound)
tmp2:=errr{"Can't find user with nickname: "+slug}
writeJSONBody(&respWriter, tmp2)
return
}
threads:=GetThreadBySlugorID(Db,slug,id)
if len(threads)==0{
respWriter.WriteHeader(http.StatusNotFound)
tmp2:=errr{"Can't find user with nickname: "+slug}
writeJSONBody(&respWriter, tmp2)
return
}else{
var count int
var flag bool
insertUserQuery := `insert into votes (username, thread, voice) values ($1, $2, $3)`
_, err:= Db.Exec(insertUserQuery, vote.NickName,threads[0].ID,vote.Voice)
if err!=nil {
fmt.Println(err)
flag=true
}
if !flag{
if vote.Voice>0 {
insertUserQuery := `update threads set votes=votes+1 where LOWER(slug)=lower($1) or id = $2;`
threads[0].Votes+=1
_, errr:= Db.Exec(insertUserQuery, slug,id)
if errr!=nil{
fmt.Println(errr)}
}
if vote.Voice<0 {
votes:=GetVoteByUser(Db,vote.NickName)
if len(votes)>0{
count=2
}else{
count=1
}
insertUserQuery := `update threads set votes=votes-$3 where LOWER(slug)=lower($1) or id = $2;`
threads[0].Votes-=count
_, _ = Db.Exec(insertUserQuery, slug,id,count)
}}
respWriter.WriteHeader(http.StatusOK)
writeJSONBody(&respWriter, threads[0])
}}
func GetVoteByUser(Db *sql.DB, user string) []Models.Vote {
votes := make([]Models.Vote, 0)
query:="SELECT username::text, voice::integer FROM votes WHERE LOWER(username) = LOWER($1)"
var resultRows *sql.Rows
resultRows,err:= Db.Query(query, user)
if err!=nil{
fmt.Println(err)}
defer resultRows.Close()
for resultRows.Next() {
vote := new(Models.Vote)
err := resultRows.Scan(&vote.NickName, &vote.Voice)
if err != nil { }
votes = append(votes, *vote)
}
return votes
}
func GetThreadDetails(Db *sql.DB, respWriter http.ResponseWriter, request *http.Request) {
respWriter.Header().Set("Content-Type", "application/json; charset=utf-8")
thr := make([]Models.Thread, 0)
query:="SELECT author::text, created::timestamp, forum::text, id::integer, message::text, slug::text,title::text, votes::integer FROM threads WHERE LOWER(slug) = LOWER($1) or id=$2 "
slug := mux.Vars(request)["slug"]
id,_:=strconv.Atoi(slug)
var resultRows *sql.Rows
resultRows,_= Db.Query(query, slug, id)
defer resultRows.Close()
for resultRows.Next() {
thread := new(Models.Thread)
err := resultRows.Scan(&thread.Author, &thread.Created, &thread.Forum, &thread.ID, &thread.Message, &thread.Slug, &thread.Title, &thread.Votes)
if err != nil { }
thr = append(thr, *thread)
}
if len(thr)>0 {
respWriter.WriteHeader(http.StatusOK)
writeJSONBody(&respWriter, thr[0])
}else{
respWriter.WriteHeader(http.StatusNotFound)
tmp2:=errr{"Can't find user with nickname: "+slug}
writeJSONBody(&respWriter, tmp2)
return
}
}
|
[
2
] |
package misc
import (
"fmt"
"encoding/json"
stuc "github.com/tomk0/libs/structs"
)
func CheckError(err error){
if err != nil {
panic(err)
}
}
func JSONCompile (cmd string, Data stuc.DataOut) string {
CmdOut := stuc.CmdOut{Cmd : cmd, Data : Data}
jsonEnc, err := json.Marshal(CmdOut)
CheckError(err)
return string(jsonEnc)
}
func PrintOutAllOrders(Orders []stuc.OrderOut){
for _, Order := range Orders{
fmt.Println("###########################################################################")
fmt.Println("ID: ", Order.ID)
fmt.Println("Time: ", Order.Time)
fmt.Println("Tabel: ", Order.Tabel)
fmt.Println("Total: ", Order.Total)
for i, Items := range Order.Items{
fmt.Println("---------------------------------------------------------------", Order.ID, " ", i)
fmt.Println("Name: ", Items.Name)
fmt.Println("Notes: ", Items.Notes)
fmt.Println("Amount: ", Items.Amount)
}
}
}
func PrintOutAnOrder(Order stuc.OrderOut){
fmt.Println("###########################################################################")
fmt.Println("ID: ", Order.ID)
fmt.Println("Time: ", Order.Time)
fmt.Println("Tabel: ", Order.Tabel)
fmt.Println("Total: ", Order.Total)
for i, Items := range Order.Items{
fmt.Println("---------------------------------------------------------------", Order.ID, " ", i)
fmt.Println("Name: ", Items.Name)
fmt.Println("Notes: ", Items.Notes)
fmt.Println("Amount: ", Items.Amount)
}
}
|
[
2
] |
package recordOperation
import (
"encoding/json"
"fmt"
"log"
"net/http"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/services/alidns"
"github.com/sunliang711/aliddns2/types"
)
type UpdateRecordResponse struct {
RequestId string `json:"RequestId"`
RecordId string `json:"RecordId"`
}
func (o *Operator) UpdateRecord(recordId, RR, Type, Value, TTL string) (string, error) {
log.Printf("UpdateRecord(): recordId:%v, RR:%v, Type:%v, Value:%v, TTL:%v", recordId, RR, Type, Value, TTL)
defer func() {
log.Printf("Leave UpdateRecord()")
}()
request := alidns.CreateUpdateDomainRecordRequest()
request.RecordId = recordId
request.RR = RR
request.Type = Type
request.Value = Value
request.TTL = requests.Integer(TTL)
response, err := o.client.UpdateDomainRecord(request)
if err != nil {
log.Printf(">>UpdateDomainRecord error:%v", err)
fmt.Print(err.Error())
}
if response.GetHttpStatus() != http.StatusOK {
log.Printf(">>%v", types.ErrHttpStatusNotOK)
return "", types.ErrHttpStatusNotOK
}
var res UpdateRecordResponse
err = json.Unmarshal(response.GetHttpContentBytes(), &res)
if err != nil {
log.Printf(">>json.Unmarshal error:%v", err)
return "", err
}
if res.RecordId != recordId {
log.Printf(">>%v", types.ErrResponseIdNotMatchRequestId)
return "", types.ErrResponseIdNotMatchRequestId
}
return response.GetHttpContentString(), nil
}
|
[
2
] |
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package elgamal
import (
"github.com/IBM/mathlib"
"github.com/pkg/errors"
)
type PublicKey struct {
Gen *math.G1
H *math.G1
Curve *math.Curve
}
type Ciphertext struct {
C1 *math.G1
C2 *math.G1
}
type SecretKey struct {
*PublicKey
x *math.Zr
}
func NewSecretKey(sk *math.Zr, gen, pk *math.G1, c *math.Curve) *SecretKey {
return &SecretKey{
x: sk,
PublicKey: &PublicKey{
Gen: gen,
H: pk,
Curve: c,
},
}
}
// encrypt using Elgamal encryption
func (pk *PublicKey) Encrypt(M *math.G1) (*Ciphertext, *math.Zr, error) {
if pk.Gen == nil || pk.H == nil {
return nil, nil, errors.Errorf("Provide a non-nil Elgamal public key")
}
rand, err := pk.Curve.Rand()
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to compute Elgamal ciphertext")
}
r := pk.Curve.NewRandomZr(rand)
c := &Ciphertext{
C1: pk.Gen.Mul(r),
}
c.C2 = pk.H.Mul(r)
c.C2.Add(M)
return c, r, nil
}
// Decrypt using Elgamal secret key
func (sk *SecretKey) Decrypt(c *Ciphertext) *math.G1 {
c.C2.Sub(c.C1.Mul(sk.x))
return c.C2
}
// encrypt message in Zr using Elgamal encryption
func (pk *PublicKey) EncryptZr(m *math.Zr) (*Ciphertext, *math.Zr, error) {
rand, err := pk.Curve.Rand()
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to compute Elgamal ciphertext")
}
r := pk.Curve.NewRandomZr(rand)
c := &Ciphertext{
C1: pk.Gen.Mul(r),
}
c.C2 = pk.H.Mul(r)
c.C2.Add(pk.Gen.Mul(m))
return c, r, nil
}
|
[
2
] |
// errorcheck
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Issue 1606.
package main
func main() {
var x interface{}
switch t := x.(type) {
case 0: // ERROR "type"
t.x = 1
x.x = 1 // ERROR "type interface \{\}|reference to undefined field or method|interface with no methods|undefined"
}
}
|
[
7
] |
// Copyright 2020 The Atlas Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kafka
import (
"io"
"github.com/golang/protobuf/proto"
log "github.com/sirupsen/logrus"
"github.com/binarly-io/atlas/pkg/api/atlas"
)
// DataChunkTransport defines transport level interface
// Has the following functions:
// Send(*DataChunk) error
// Recv() (*DataChunk, error)
type DataChunkTransport struct {
Transport
}
// NewDataChunkTransport
func NewDataChunkTransport(producer *Producer, consumer *Consumer, close bool) *DataChunkTransport {
log.Infof("kafka.NewDataChunkTransport() - start")
defer log.Infof("kafka.NewDataChunkTransport() - end")
return &DataChunkTransport{
Transport{
producer: producer,
consumer: consumer,
close: close,
},
}
}
// Send
func (t *DataChunkTransport) Send(dataChunk *atlas.DataChunk) error {
log.Infof("kafka.DataChunkTransport.Send() - start")
defer log.Infof("kafka.DataChunkTransport.Send() - end")
if buf, err := proto.Marshal(dataChunk); err == nil {
return t.producer.Send(buf)
} else {
return err
}
}
// Recv
func (t *DataChunkTransport) Recv() (*atlas.DataChunk, error) {
log.Infof("kafka.DataChunkTransport.Recv() - start")
defer log.Infof("kafka.DataChunkTransport.Recv() - end")
msg := t.consumer.Recv()
if msg == nil {
// TODO not sure
return nil, io.EOF
}
dataChunk := &atlas.DataChunk{}
return dataChunk, proto.Unmarshal(msg.Value, dataChunk)
}
|
[
1
] |
package api
import (
"context"
"github.com/golang/protobuf/ptypes"
"go_grpc_gorm_micro/lib/response"
"go_grpc_gorm_micro/proto/proto"
"go_grpc_gorm_micro/service"
"google.golang.org/protobuf/types/known/anypb"
)
type SysApis struct{}
// 生成curd代码
func (s *SysApis) Create(ctx context.Context, req *proto.SysApis) (*proto.Response, error) {
data, err := service.CreateSysApis(req)
return response.SuccessAny(data), err
}
func (s *SysApis) Delete(ctx context.Context, req *proto.SysApis) (*proto.Response, error) {
data, err := service.DeleteSysApis(req)
return response.SuccessAny(data), err
}
func (s *SysApis) DeleteById(ctx context.Context, req *proto.SysApis) (*proto.Response, error) {
data, err := service.DeleteByIdSysApis(req)
return response.SuccessAny(data), err
}
func (s *SysApis) Update(ctx context.Context, req *proto.SysApis) (*proto.Response, error) {
data, err := service.UpdateSysApis(req)
return response.SuccessAny(data), err
}
func (s *SysApis) Find(ctx context.Context, req *proto.SysApis) (*proto.Response, error) {
data, err := service.FindSysApis(req)
return response.SuccessAny(data), err
}
func (s *SysApis) Lists(ctx context.Context, req *proto.Request) (*proto.Responses, error) {
data, total, err := service.GetListSysApis(req)
var any = make([]*anypb.Any, len(data))
for k, r := range data {
any[k], err = ptypes.MarshalAny(r)
}
return response.SuccesssAny(any, total), err
}
|
[
1
] |
package testLightly
import "fmt"
type Reduction func(x, y interface{}) interface{}
type ValueSet []interface{}
type TestTable struct {
XValues ValueSet
YValues ValueSet
compatibility []ValueSet
test Reduction
}
func NewTestTable(test Reduction) *TestTable {
t := new(TestTable)
t.test = test
return t
}
func (t *TestTable) X(values... interface{}) *TestTable {
t.XValues = append(t.XValues, values...)
return t
}
func (t *TestTable) Y(value interface{}, compatibility... interface{}) *TestTable {
t.YValues = append(t.YValues, value)
t.compatibility = append(t.compatibility, append(ValueSet{}, compatibility...))
return t
}
func (t *TestTable) Assess(T *Test) *TestTable {
for row, y := range t.YValues {
for column, x := range t.XValues {
expectation := t.compatibility[row][column]
if result := t.Apply(x, y); result != expectation {
T.Error(fmt.Sprintf("[%v, %v]", row, column), fmt.Sprintf("-> expected %v got %v", expectation, result))
}
}
}
return t
}
func (t *TestTable) Apply(x, y interface{}) (i interface{}) {
defer func() { if recover() != nil { i = nil } }()
return t.test(x, y)
}
|
[
2
] |
package main
import "fmt"
func main(){
nums := scanNums(2)
x := nums[0]
y := nums[1]
fmt.Printf("%d\n", x + y/2)
}
/**
* 1行に空白区切りで数字を読み込み
*/
func scanNums(len int) (nums []int) {
nums = make([]int, len)
for i := 0; i < len; i++ {
fmt.Scan(&nums[i])
}
return
}
|
[
1
] |
package community_api_go
func isEqualError(error error, errors ...string) bool {
if error == nil {
return false
}
for _, err := range errors {
if error.Error() == err {
return true
}
}
return false
}
|
[
1
] |
package rpc
import (
"context"
pb "github.com/gagarinchain/common/protobuff"
"time"
)
type CommonClient struct {
*Client
pbc pb.CommonServiceClient
}
func (c *CommonClient) Pbc() pb.CommonServiceClient {
return c.pbc
}
func InitCommonClient(address string) *CommonClient {
client := NewClient(address)
return &CommonClient{
Client: client,
pbc: pb.NewCommonServiceClient(client.conn),
}
}
func (c *CommonClient) PollView(ctx context.Context) chan int32 {
timer := time.NewTimer(10 * time.Microsecond)
res := make(chan int32)
go func() {
currentView := int32(0)
for {
select {
case <-timer.C:
if view, err := c.pbc.GetCurrentView(ctx, &pb.GetCurrentViewRequest{}); err != nil {
log.Error(err)
} else {
if currentView != view.View {
currentView = view.View
res <- view.View
}
}
timer = time.NewTimer(50 * time.Microsecond)
case <-ctx.Done():
close(res)
}
}
}()
return res
}
|
[
4
] |
package server
import (
"reflect"
"strconv"
"sync/atomic"
"testing"
"github.com/stretchr/testify/assert"
)
func TestCreateEngine(t *testing.T) {
router := New()
assert.Equal(t, "/", router.basePath)
assert.Equal(t, router.engine, router)
assert.Empty(t, router.Handlers)
}
func TestAddRoute(t *testing.T) {
router := New()
router.addRoute("GET", "/", HandlersChain{func(_ *Context) {}})
assert.Len(t, router.trees, 1)
assert.NotNil(t, router.trees.get("GET"))
assert.Nil(t, router.trees.get("POST"))
router.addRoute("POST", "/", HandlersChain{func(_ *Context) {}})
assert.Len(t, router.trees, 2)
assert.NotNil(t, router.trees.get("GET"))
assert.NotNil(t, router.trees.get("POST"))
router.addRoute("POST", "/post", HandlersChain{func(_ *Context) {}})
assert.Len(t, router.trees, 2)
}
func TestAddRouteFails(t *testing.T) {
router := New()
assert.Panics(t, func() { router.addRoute("", "/", HandlersChain{func(_ *Context) {}}) })
assert.Panics(t, func() { router.addRoute("GET", "a", HandlersChain{func(_ *Context) {}}) })
assert.Panics(t, func() { router.addRoute("GET", "/", HandlersChain{}) })
router.addRoute("POST", "/post", HandlersChain{func(_ *Context) {}})
assert.Panics(t, func() {
router.addRoute("POST", "/post", HandlersChain{func(_ *Context) {}})
})
}
func TestNoRouteWithoutGlobalHandlers(t *testing.T) {
var middleware0 HandlerFn = func(c *Context) {}
var middleware1 HandlerFn = func(c *Context) {}
router := New()
router.NoRoute(middleware0)
assert.Nil(t, router.Handlers)
assert.Len(t, router.noRoute, 1)
assert.Len(t, router.allNoRoute, 1)
compareFunc(t, router.noRoute[0], middleware0)
compareFunc(t, router.allNoRoute[0], middleware0)
router.NoRoute(middleware1, middleware0)
assert.Len(t, router.noRoute, 2)
assert.Len(t, router.allNoRoute, 2)
compareFunc(t, router.noRoute[0], middleware1)
compareFunc(t, router.allNoRoute[0], middleware1)
compareFunc(t, router.noRoute[1], middleware0)
compareFunc(t, router.allNoRoute[1], middleware0)
}
func TestNoRouteWithGlobalHandlers(t *testing.T) {
var middleware0 HandlerFn = func(c *Context) {}
var middleware1 HandlerFn = func(c *Context) {}
var middleware2 HandlerFn = func(c *Context) {}
router := New()
router.Use(middleware2)
router.NoRoute(middleware0)
assert.Len(t, router.allNoRoute, 2)
assert.Len(t, router.Handlers, 1)
assert.Len(t, router.noRoute, 1)
compareFunc(t, router.Handlers[0], middleware2)
compareFunc(t, router.noRoute[0], middleware0)
compareFunc(t, router.allNoRoute[0], middleware2)
compareFunc(t, router.allNoRoute[1], middleware0)
router.Use(middleware1)
assert.Len(t, router.allNoRoute, 3)
assert.Len(t, router.Handlers, 2)
assert.Len(t, router.noRoute, 1)
compareFunc(t, router.Handlers[0], middleware2)
compareFunc(t, router.Handlers[1], middleware1)
compareFunc(t, router.noRoute[0], middleware0)
compareFunc(t, router.allNoRoute[0], middleware2)
compareFunc(t, router.allNoRoute[1], middleware1)
compareFunc(t, router.allNoRoute[2], middleware0)
}
func TestNoMethodWithoutGlobalHandlers(t *testing.T) {
var middleware0 HandlerFn = func(c *Context) {}
var middleware1 HandlerFn = func(c *Context) {}
router := New()
router.NoMethod(middleware0)
assert.Empty(t, router.Handlers)
assert.Len(t, router.noMethod, 1)
assert.Len(t, router.allNoMethod, 1)
compareFunc(t, router.noMethod[0], middleware0)
compareFunc(t, router.allNoMethod[0], middleware0)
router.NoMethod(middleware1, middleware0)
assert.Len(t, router.noMethod, 2)
assert.Len(t, router.allNoMethod, 2)
compareFunc(t, router.noMethod[0], middleware1)
compareFunc(t, router.allNoMethod[0], middleware1)
compareFunc(t, router.noMethod[1], middleware0)
compareFunc(t, router.allNoMethod[1], middleware0)
}
func TestRebuild404Handlers(t *testing.T) {
}
func TestNoMethodWithGlobalHandlers(t *testing.T) {
var middleware0 HandlerFn = func(c *Context) {}
var middleware1 HandlerFn = func(c *Context) {}
var middleware2 HandlerFn = func(c *Context) {}
router := New()
router.Use(middleware2)
router.NoMethod(middleware0)
assert.Len(t, router.allNoMethod, 2)
assert.Len(t, router.Handlers, 1)
assert.Len(t, router.noMethod, 1)
compareFunc(t, router.Handlers[0], middleware2)
compareFunc(t, router.noMethod[0], middleware0)
compareFunc(t, router.allNoMethod[0], middleware2)
compareFunc(t, router.allNoMethod[1], middleware0)
router.Use(middleware1)
assert.Len(t, router.allNoMethod, 3)
assert.Len(t, router.Handlers, 2)
assert.Len(t, router.noMethod, 1)
compareFunc(t, router.Handlers[0], middleware2)
compareFunc(t, router.Handlers[1], middleware1)
compareFunc(t, router.noMethod[0], middleware0)
compareFunc(t, router.allNoMethod[0], middleware2)
compareFunc(t, router.allNoMethod[1], middleware1)
compareFunc(t, router.allNoMethod[2], middleware0)
}
func compareFunc(t *testing.T, a, b interface{}) {
sf1 := reflect.ValueOf(a)
sf2 := reflect.ValueOf(b)
if sf1.Pointer() != sf2.Pointer() {
t.Error("different functions")
}
}
func TestEngineHandleContext(t *testing.T) {
r := New()
r.GET("/", func(c *Context) {
c.Request.URL.Path = "/v2"
r.HandleContext(c)
})
v2 := r.Group("/v2")
{
v2.GET("/", func(c *Context) {})
}
assert.NotPanics(t, func() {
w := performRequest(r, "GET", "/")
assert.Equal(t, 301, w.Code)
})
}
func TestEngineHandleContextManyReEntries(t *testing.T) {
expectValue := 10000
var handlerCounter, middlewareCounter int64
r := New()
r.Use(func(c *Context) {
atomic.AddInt64(&middlewareCounter, 1)
})
r.GET("/:count", func(c *Context) {
countStr := c.Param("count")
count, err := strconv.Atoi(countStr)
assert.NoError(t, err)
n, err := c.Writer.Write([]byte("."))
assert.NoError(t, err)
assert.Equal(t, 1, n)
switch {
case count > 0:
c.Request.URL.Path = "/" + strconv.Itoa(count-1)
r.HandleContext(c)
}
}, func(c *Context) {
atomic.AddInt64(&handlerCounter, 1)
})
assert.NotPanics(t, func() {
w := performRequest(r, "GET", "/"+strconv.Itoa(expectValue-1)) // include 0 value
assert.Equal(t, 200, w.Code)
assert.Equal(t, expectValue, w.Body.Len())
})
assert.Equal(t, int64(expectValue), handlerCounter)
assert.Equal(t, int64(expectValue), middlewareCounter)
}
func assertRoutePresent(t *testing.T, gotRoutes RoutesInfo, wantRoute RouteInfo) {
for _, gotRoute := range gotRoutes {
if gotRoute.Path == wantRoute.Path && gotRoute.Method == wantRoute.Method {
assert.Regexp(t, wantRoute.Handler, gotRoute.Handler)
return
}
}
t.Errorf("route not found: %v", wantRoute)
}
func handlerTest1(c *Context) {}
func handlerTest2(c *Context) {}
|
[
7
] |
package main
import "fmt"
func main() {
var divisor, bound int
fmt.Print("Please Enter divisor: ")
fmt.Scan(&divisor)
fmt.Println()
fmt.Print("Please Enter bound: ")
fmt.Scan(&bound)
fmt.Println("Greatest Divisor is: ", maxDivisor(divisor,bound))
}
func maxDivisor(divisor, bound int) int {
var max = divisor
for num := divisor + 1; num <= bound; num++ {
if num % divisor == 0 && num > max {
max = num
}
}
return max
}
|
[
1
] |
package heap
type ArrayHeap struct {
data []int
cap int
len int
}
func NewArrayHeap(cap int) *ArrayHeap {
return &ArrayHeap{
data: make([]int, cap+1), // 0下标不使用
cap: cap,
len: 0,
}
}
func (this *ArrayHeap) Insert(v int) {
if this.len == this.cap {
return
}
this.len++
this.data[this.len] = v
// 堆化
i := this.len
pre := i / 2
for pre > 0 && this.data[pre] < v {
this.data[pre], this.data[i] = this.data[i], this.data[pre]
i = pre
pre = i / 2
}
}
// 移除堆顶元素
func (this *ArrayHeap) RemoveTop() {
if this.len == 0 {
return
}
// 将堆中最后一个元素移动到对顶,再进行堆化
this.data[1] = this.data[this.len]
this.data[this.len] = 0
this.len--
this.heapify(1)
}
func (this *ArrayHeap) BuildHeap(data []int) {
k := 1
for _, v := range data {
this.data[k] = v
k++
}
this.len = len(data)
// 堆化
// 只需要从 1/2之一处开始,相邻部分会比较
for i := this.len / 2; i >= 1; i-- {
this.heapify(i)
}
}
// 大顶堆
// 堆化 自上而下堆化 可以拆分为相邻小堆移动
func (this *ArrayHeap) heapify(i int) {
for {
maxPos := i
// 左
if i*2 <= this.len && this.data[i*2] > this.data[maxPos] {
maxPos = i * 2
}
// 右
if i*2+1 <= this.len && this.data[i*2+1] > this.data[maxPos] {
maxPos = i*2 + 1
}
if maxPos == i {
break
}
// 交换
this.data[i], this.data[maxPos] = this.data[maxPos], this.data[i]
i = maxPos
}
}
|
[
1
] |
package users
import (
"testing"
"golang.org/x/crypto/bcrypt"
)
func TestValidate(t *testing.T) {
cases := []struct {
name string
user *NewUser
expectError bool
}{
{
"Basic case",
&NewUser{
"[email protected]",
"password",
"password",
"Username",
"firstname",
"lastname",
},
false,
},
{
"Email badly formatted",
&NewUser{
"@@@newuser.com",
"password",
"password",
"Username",
"firstname",
"lastname",
},
true,
},
{
"Non matching password confirmation",
&NewUser{
"[email protected]",
"password",
"password2",
"Username",
"firstname",
"lastname",
},
true,
},
{
"Password less than 6 characters",
&NewUser{
"[email protected]",
"a",
"a",
"Username",
"firstname",
"lastname",
},
true,
},
{
"Username has spaces",
&NewUser{
"[email protected]",
"password",
"password",
" Username",
"firstname",
"lastname",
},
true,
},
{
"No username provided",
&NewUser{
"[email protected]",
"password",
"password",
"",
"firstname",
"lastname",
},
true,
},
{
"Empty user",
&NewUser{},
true,
},
}
for _, c := range cases {
anyErr := c.user.Validate()
if anyErr != nil && !c.expectError {
t.Errorf("Unexpected error occurred. Got \"%v\" for test [%s]", anyErr, c.name)
}
if anyErr == nil && c.expectError {
t.Errorf("Expected error but received none for test [%s]", c.name)
}
}
}
func TestSetPassword(t *testing.T) {
cases := []struct {
name string
password string
}{
{
"Test hashing works correctly",
"password",
},
{
"Test hasing a crazy string",
"adsfjsayu83oi147103985thrwuelijkfdo(*@&^#*(U",
},
{
"Test empty string",
"",
},
{
"Test super long space string",
" ",
},
}
for _, c := range cases {
user := &User{}
anyErr := user.SetPassword(c.password)
if anyErr != nil {
t.Errorf("Unexpected error occured. Got \"%v\" for test [%s]", anyErr, c.name)
}
if err := bcrypt.CompareHashAndPassword(user.PassHash, []byte(c.password)); err != nil {
t.Errorf("Password and hash matching failed for test [%s]", c.name)
}
}
}
func TestToUser(t *testing.T) {
cases := []struct {
name string
user *NewUser
expectError bool
}{
{
"Working case",
&NewUser{
"[email protected]",
"password",
"password",
"username",
"firstname",
"lastname",
},
false,
},
{
"Case expected to fail validation",
&NewUser{
"[email protected]",
"password",
"passwordconffail",
"username",
"firstname",
"lastname",
},
true,
},
}
for _, c := range cases {
user, err := c.user.ToUser()
if !c.expectError {
if err != nil {
t.Errorf("Unexpected error occured for test [%s]. Received \"%v\"", c.name, err)
}
if c.user.FirstName != user.FirstName {
t.Errorf("First name does not match for test [%s]. Expected \"%s\" but got \"%s\"", c.name, c.user.FirstName, user.FirstName)
}
if c.user.LastName != user.LastName {
t.Errorf("Last name does not match for test [%s]. Expected \"%s\" but got \"%s\"", c.name, c.user.LastName, user.LastName)
}
if c.user.Email != user.Email {
t.Errorf("Email does not match for test [%s]. Expected \"%s\" but got \"%s\"", c.name, c.user.Email, user.Email)
}
} else {
if err == nil {
t.Errorf("Expected error validation error but got %v", err)
}
}
}
}
func TestGetGravitar(t *testing.T) {
baseURL := "https://www.gravatar.com/avatar/"
cases := []struct {
name string
input string
outputHashString string
}{
{
"Simple case",
"[email protected]",
"0bc83cb571cd1c50ba6f3e8a78ef1346",
},
{
"Space case",
" [email protected] ",
"0bc83cb571cd1c50ba6f3e8a78ef1346",
},
{
"Random case case",
"[email protected]",
"0bc83cb571cd1c50ba6f3e8a78ef1346",
},
}
for _, c := range cases {
tempUser := &User{}
GetGravitar(tempUser, c.input)
if tempUser.PhotoURL != baseURL+c.outputHashString {
t.Errorf("Error, hash doesn't match expected output. Expected [%s] but got [%s]", baseURL+c.outputHashString, tempUser.PhotoURL)
}
}
}
func TestFullName(t *testing.T) {
cases := []struct {
name string
user *User
expected string
}{
{
"Working case",
&User{
FirstName: "ABC",
LastName: "CDE",
},
"ABC CDE",
},
{
"No first name",
&User{
LastName: "CDE",
},
"CDE",
},
{
"No last name",
&User{
FirstName: "ABC",
},
"ABC",
},
{
"No first or last name",
&User{},
"",
},
}
for _, c := range cases {
if c.user.FullName() != c.expected {
t.Errorf("Failure on [%s], expected [%s] but got [%s]", c.name, c.expected, c.user.FullName())
}
}
}
func TestAuthenticate(t *testing.T) {
cases := []struct {
name string
password string
}{
{
"Base case",
"password",
},
{
"Empty password (never happens)",
"",
},
{
"Long password random string",
"asdjkfhslajkhr2uyio3y41o93yr@*^$&@$*^YFBDS",
},
}
for _, c := range cases {
user := &User{}
if err := user.SetPassword(c.password); err != nil {
t.Errorf("Unepected error occured on test [%s], got [%v]", c.name, err)
}
if err := user.Authenticate(c.password); err != nil {
t.Errorf("Unepected error occured on test [%s], got [%v]", c.name, err)
}
if err := user.Authenticate(c.password + "randomstufftoaddtopassword"); err == nil {
t.Errorf("Expected error, but got [%v]", err)
}
}
}
func TestApplyUpdates(t *testing.T) {
cases := []struct {
name string
firstName string
lastName string
newFirstName string
newLastName string
}{
{
"Base case",
"name",
"Lastname",
"namenew",
"namenewlastname",
},
{
"Empty last name",
"firstname",
"lastname",
"newfirstname",
"",
},
{
"Empty first name",
"firstname",
"lastname",
"",
"newlastname",
},
{
"Empty new name",
"firstname",
"lastname",
"",
"",
},
}
for _, c := range cases {
user := &User{
FirstName: c.firstName,
LastName: c.lastName,
}
if err := user.ApplyUpdates(&Updates{FirstName: c.newFirstName, LastName: c.newLastName}); err != nil {
t.Errorf("Unexpected error occurred in case [%s], got [%v]", c.name, err)
}
if c.newFirstName != "" && user.FirstName != c.newFirstName {
t.Errorf("First name does not match new first name, expected [%s] but got [%s]", c.newFirstName, user.FirstName)
}
if c.newFirstName == "" && user.FirstName != c.firstName {
t.Errorf("First name does not match expected output. Expected [%s], got [%s]", c.firstName, user.FirstName)
}
if c.newLastName != "" && user.LastName != c.newLastName {
t.Errorf("Last name does not match new last name, expected [%s] but got [%s]", c.newLastName, user.LastName)
}
if c.newLastName == "" && user.LastName != c.lastName {
t.Errorf("Last name does not match expected output. Expected [%s], got [%s]", c.lastName, user.LastName)
}
}
}
|
[
4
] |
package jzon
import (
"fmt"
"strings"
)
// state indicates the inner state of the `parsePath` state machine
type state int64
const (
// $.key1[1].big-array[1:4]
_Start state = 2 << iota
_Dollar // $ root
_Dot // . key mark
_LeftSB // [ index mark
_RightSB // ] index end
_Key // .* object key
_Index // [1-9]\d+ array index
_Colon // : slice mark
_Semicolon // ; line tail
)
var stateStrings = map[state]string{
_Start: "_Start",
_Dollar: "_Dollar",
_Dot: "_Dot",
_LeftSB: "_LeftSB",
_RightSB: "_RightSB",
_Key: "_Key",
_Index: "_Index",
_Colon: "_Colon",
_Semicolon: "_Semicolon",
}
func (st state) match(states ...state) bool {
for _, s := range states {
if uint64(st)&uint64(s) > 0 {
return true
}
}
return false
}
// Query searches a child node in an object or an array, if the
// node at the path doesn't exist, an error will be thrown out
func (jz *Jzon) Query(path string) (g *Jzon, err error) {
// in the implements of function `parsePath()` we don't handle
// exceptions about slice bounds out of range. here we simply
// throw the error recovered from those unhandled exceptions
defer func() {
e := recover()
if e != nil {
err = fmt.Errorf("maybe out of bound: %v", e)
}
}()
return parsePath(jz, append([]byte(path), ';'))
}
// Search determines whether there exists the node on the given path
func (jz *Jzon) Search(path string) (exists bool) {
found, _ := jz.Query(path)
return found != nil
}
func expectState(real state, ex []state) error {
var sa []string
for _, s := range ex {
sa = append(sa, stateStrings[s])
}
expectStates := strings.Join(sa, " | ")
return fmt.Errorf("expect state %s, but the real state is %s", expectStates, stateStrings[real])
}
func parsePath(root *Jzon, path []byte) (curr *Jzon, err error) {
var st = _Start
var ex = []state{_Dollar}
var key string
// a typical state machine model
for {
switch {
case path[0] == '$' && st.match(_Start):
ex = []state{_Dot, _LeftSB, _Semicolon}
st = _Dollar
curr = root
path = path[1:]
case path[0] == ';' && st.match(_Dollar, _RightSB, _Key):
ex = []state{}
st = _Semicolon
return
case path[0] == '.' && st.match(_Dollar, _Key, _RightSB):
ex = []state{_Key}
st = _Dot
path = path[1:]
case path[0] == '[' && st.match(_Dollar, _Key):
ex = []state{_Index}
st = _LeftSB
path = path[1:]
case isDigit(path[0]) && st.match(_LeftSB):
ex = []state{_RightSB}
st = _Index
var n int64
var f float64
var isInt bool
n, f, isInt, path, err = parseNumeric(path)
if err != nil {
return
}
if !isInt {
err = fmt.Errorf("expect an integer index, but found float: %v", f)
return
}
curr, err = curr.ValueAt(int(n))
if err != nil {
return
}
case path[0] == ']' && st.match(_Index):
ex = []state{_Dot, _Semicolon}
st = _RightSB
path = path[1:]
case st.match(_Dot):
ex = []state{_Dot, _LeftSB, _Semicolon}
st = _Key
key, path, err = parsePathKey(path)
if err != nil {
return
}
curr, err = curr.ValueOf(key)
if err != nil {
return
}
default:
return nil, expectState(st, ex)
}
}
}
// parsePathKey parses as `parseKey()`, except that the given string
// isn't surrounded with ", and it will escape some more characters
func parsePathKey(path []byte) (k string, rem []byte, err error) {
var parsed = make([]byte, 0, SHORT_STRING_OPTIMIZED_CAP)
var c byte
rem = path
for {
switch {
case rem[0] == '\\' && rem[1] == 'u':
utf8str := make([]byte, 0, SHORT_STRING_OPTIMIZED_CAP)
utf8str, rem, err = parseUnicode(rem)
for _, c := range utf8str {
parsed = append(parsed, c)
}
continue
case rem[0] == '\\' && rem[1] == '.':
parsed = append(parsed, '.')
rem = rem[2:]
continue
case rem[0] == '\\' && rem[1] == '[':
parsed = append(parsed, '[')
rem = rem[2:]
continue
case rem[0] == '\\' && rem[1] == ']':
parsed = append(parsed, ']')
rem = rem[2:]
continue
case rem[0] == '\\' && rem[1] == ';':
parsed = append(parsed, ';')
rem = rem[2:]
continue
case rem[0] == '\\' && rem[1] != 'u':
c, rem, err = parseEscaped(rem)
if err != nil {
return
}
parsed = append(parsed, c)
continue
case rem[0] == '.' || rem[0] == '[' || rem[0] == ';':
goto End
default:
parsed = append(parsed, rem[0])
pos.col += 1
rem = rem[1:]
continue
}
}
End:
return string(parsed), rem, nil
}
|
[
1
] |
package items
import (
"encoding/json"
_ "encoding/json"
"fmt"
"github.com/Rohan12152001/Syook_Assignment/managers/items"
"github.com/Rohan12152001/Syook_Assignment/managers/items/data"
"github.com/Rohan12152001/Syook_Assignment/managers/middleware"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
"golang.org/x/xerrors"
"io/ioutil"
_ "io/ioutil"
"strconv"
)
type Items struct {
itemManager items.ItemsManager
authManager middleware.AuthManager
}
var logger = logrus.New()
func New() Items {
return Items{
itemManager: items.New(),
authManager: middleware.New(),
}
}
func getParam(c *gin.Context, paramName string) string {
return c.Params.ByName(paramName)
}
func (I Items) SetRoutes(router *gin.Engine) {
router.GET("/items", I.authManager.AuthMiddleWareWithUser, I.GetAllItems)
router.POST("/item", I.authManager.AuthMiddleWareWithUser, I.CreateItem)
router.GET("/item/:id", I.authManager.AuthMiddleWareWithUser, I.ReadItem)
router.PUT("/item/:id", I.authManager.AuthMiddleWareWithUser, I.UpdateItem)
}
// Handlers
func (I Items) GetAllItems(context *gin.Context) {
// Call manager
items, err := I.itemManager.GetAllItems(context)
if err != nil {
// errors
context.AbortWithStatus(500)
}
context.JSON(200, gin.H{
"items": items,
})
}
func (I Items) ReadItem(context *gin.Context) {
IdFromParam := getParam(context, "id")
Id, err := strconv.Atoi(IdFromParam)
if err != nil {
// errors
logrus.Error("err: ", err)
context.AbortWithStatus(500)
}
// Manager
item, err := I.itemManager.GetItem(context, Id)
if err != nil {
// errors
if xerrors.Is(err, items.ItemNotFound) {
context.JSON(404, gin.H{
"item": "Not found",
})
return
}
context.AbortWithStatus(500)
}
context.JSON(200, gin.H{
"item": item,
})
}
func (I Items) CreateItem(context *gin.Context) {
itemPayload := data.Item{}
b, err := ioutil.ReadAll(context.Request.Body)
err = json.Unmarshal(b, &itemPayload)
if err != nil {
logger.Error("err: ", err)
context.AbortWithStatus(500)
return
}
// Manager
itemId, err := I.itemManager.CreateItem(context, itemPayload.Name, itemPayload.Price)
if err != nil {
context.AbortWithStatus(500)
context.Error(err)
return
}
context.JSON(200, gin.H{
"itemId": itemId,
})
}
func (I Items) UpdateItem(context *gin.Context) {
itemPayload := data.Item{}
b, err := ioutil.ReadAll(context.Request.Body)
err = json.Unmarshal(b, &itemPayload)
if err != nil {
logger.Error("err: ", err)
context.AbortWithStatus(500)
return
}
// Get ID from pathParam
IdFromParam := getParam(context, "id")
Id, err := strconv.Atoi(IdFromParam)
if err != nil {
// errors
logger.Error("err: ", err)
context.AbortWithStatus(500)
}
// Update struct with ItemId
itemPayload.ItemId = Id
// Manager
ok, err := I.itemManager.UpdateItem(context, itemPayload)
if err != nil {
// errors
context.AbortWithStatus(500)
}
if !ok {
fmt.Println("Item not found!")
context.JSON(404, gin.H{
"item": "Not found",
})
context.AbortWithStatus(404)
} else {
context.JSON(200, gin.H{
"item": "Updated",
})
}
}
|
[
2
] |
package ttlmap
import (
"fmt"
"testing"
"time"
)
type testItem struct {
key string
item *Item
timestamp time.Time
}
func TestNewMap(t *testing.T) {
opts := &Options{}
m := New(opts)
if m == nil {
t.Fatalf("Expecting map")
}
defer m.Drain()
}
func TestNewMapWithoutOptions(t *testing.T) {
m := New(nil)
if m == nil {
t.Fatalf("Expecting map")
}
defer m.Drain()
}
func TestMapGetEmpty(t *testing.T) {
opts := &Options{}
m := New(opts)
defer m.Drain()
if m.Get("invalid") != nil {
t.Fatalf("Not expecting item")
}
}
func TestMapSetGet(t *testing.T) {
opts := &Options{}
m := New(opts)
defer m.Drain()
foo := NewItemWithTTL("hello", 1*time.Second)
if err := m.Set("foo", foo); err != nil {
t.Fatal(err)
}
if item := m.Get("foo"); item != foo || item.Value() != "hello" {
t.Fatalf("Invalid item")
}
bar := NewItemWithTTL("world", 1*time.Second)
if err := m.Set("bar", bar); err != nil {
t.Fatal(err)
}
if item := m.Get("bar"); item != bar || bar.Value() != "world" {
t.Fatalf("Invalid item")
}
}
func TestMapSetNXGet(t *testing.T) {
opts := &Options{}
m := New(opts)
defer m.Drain()
foo := NewItemWithTTL("hello", 1*time.Second)
if err := m.SetNX("foo", foo); err != nil {
t.Fatal(err)
}
if item := m.Get("foo"); item != foo || item.Value() != "hello" {
t.Fatalf("Invalid item")
}
bar := NewItemWithTTL("world", 1*time.Second)
if err := m.SetNX("bar", bar); err != nil {
t.Fatal(err)
}
if item := m.Get("bar"); item != bar || bar.Value() != "world" {
t.Fatalf("Invalid item")
}
bar2 := NewItemWithTTL("world2", 1*time.Second)
if err := m.SetNX("bar", bar2); err != ErrExists {
t.Fatal(err)
}
if item := m.Get("bar"); item != bar || bar.Value() != "world" {
t.Fatalf("Invalid item")
}
}
func TestMapSetDeleteGet(t *testing.T) {
opts := &Options{}
m := New(opts)
defer m.Drain()
foo := NewItemWithTTL("hello", 1*time.Second)
if err := m.Set("foo", foo); err != nil {
t.Fatal(err)
}
if item := m.Get("foo"); item != foo || item.Value() != "hello" {
t.Fatalf("Invalid item")
}
if m.Len() != 1 {
t.Fatalf("Invalid length")
}
if item := m.Delete("foo"); item != foo {
t.Fatalf("Invalid item")
}
if m.Len() != 0 {
t.Fatalf("Invalid length")
}
if item := m.Get("foo"); item != nil {
t.Fatalf("Not expecting item")
}
if item := m.Delete("foo"); item != nil {
t.Fatalf("Not expecting item")
}
}
func TestMapWaitExpired(t *testing.T) {
var expired []*testItem
opts := &Options{
OnWillExpire: func(key string, item *Item) {
expired = append(expired, &testItem{key, item, time.Now()})
},
}
m := New(opts)
defer m.Drain()
start := time.Now()
n, min := 100, 500
testMapSetNIncreasing(t, m, n, min, start)
time.Sleep(1 * time.Second)
if m.Len() != 0 {
t.Fatalf("Invalid length")
}
m.Drain()
if len(expired) != n {
t.Fatalf("Expecting %d expired items\n", n)
}
for i, eitem := range expired {
diff := eitem.timestamp.Sub(start)
diff -= time.Duration(i+min) * time.Millisecond
key := fmt.Sprintf("%d", i)
if eitem.key != key {
t.Fatalf("Wrong expiration key")
}
value := fmt.Sprintf("value for %s", key)
if eitem.item.Value() != value {
t.Fatalf("Wrong expiration value")
}
if diff < 0 || diff > 10*time.Millisecond {
t.Fatalf("Wrong expiration time")
}
}
}
func testMapSetNIncreasing(t *testing.T, m *Map, n, min int, start time.Time) {
for i := 0; i < n; i++ {
key := fmt.Sprintf("%d", i)
value := fmt.Sprintf("value for %s", key)
ttl := time.Duration(i+min) * time.Millisecond
expiration := start.Add(ttl)
item := NewItem(value, expiration)
if err := m.SetNX(key, item); err != nil {
t.Fatal(err)
}
}
if m.Len() != n {
t.Fatalf("Invalid length")
}
}
func TestMapDrain(t *testing.T) {
opts := &Options{}
m := New(opts)
defer m.Drain()
testMapSetN(t, m, 100, 100*time.Millisecond)
select {
case <-m.Draining():
t.Fatalf("Expecting not draining")
default:
}
m.Drain()
select {
case <-m.Draining():
default:
t.Fatalf("Expecting draining")
}
if m.Len() != 0 {
t.Fatalf("Invalid length")
}
if m.Get("1") != nil {
t.Fatalf("Not expecting item")
}
item := NewItemWithTTL("value", 100*time.Millisecond)
if err := m.Set("1", item); err != ErrDrained {
t.Fatal(err)
}
if err := m.SetNX("1", item); err != ErrDrained {
t.Fatal(err)
}
if item := m.Delete("1"); item != nil {
t.Fatalf("Not expecting item")
}
}
func TestMapSetItemReuseEvict(t *testing.T) {
var evicted []*testItem
opts := &Options{
OnWillEvict: func(key string, item *Item) {
evicted = append(evicted, &testItem{key, item, time.Now()})
},
}
m := New(opts)
value := NewItemWithTTL("bar", 30*time.Minute)
for i := 0; i < 1000; i++ {
if err := m.Set(fmt.Sprintf("%d", i), value); err != nil {
t.Fatal(err)
}
}
if len(evicted) != 0 {
t.Fatalf("Invalid length")
}
m.Drain()
if len(evicted) != 1000 {
t.Fatalf("Invalid length")
}
}
func testMapSetN(t *testing.T, m *Map, n int, d time.Duration) {
for i := 0; i < n; i++ {
item := NewItemWithTTL("value", d)
if err := m.SetNX(fmt.Sprintf("%d", i), item); err != nil {
t.Fatal(err)
}
}
if m.Len() != n {
t.Fatalf("Invalid length")
}
}
func TestMapSetSetEvict(t *testing.T) {
var evicted []*testItem
opts := &Options{
OnWillEvict: func(key string, item *Item) {
evicted = append(evicted, &testItem{key, item, time.Now()})
},
}
m := New(opts)
defer m.Drain()
item := NewItemWithTTL("hello", 1*time.Second)
if err := m.Set("foo", item); err != nil {
t.Fatal(err)
}
if len(evicted) != 0 {
t.Fatalf("Invalid length")
}
item = NewItemWithTTL("world", 2*time.Second)
if err := m.Set("foo", item); err != nil {
t.Fatal(err)
}
if len(evicted) != 1 {
t.Fatalf("Invalid length")
}
}
func TestMapExpireAlreadyExpired(t *testing.T) {
var expired []*testItem
opts := &Options{
OnWillExpire: func(key string, item *Item) {
expired = append(expired, &testItem{key, item, time.Now()})
},
}
m := New(opts)
defer m.Drain()
time.Sleep(100 * time.Millisecond)
start := time.Now()
expiration := start.Add(-1 * time.Second)
item := NewItem("bar", expiration)
if err := m.Set("foo", item); err != nil {
t.Fatal(err)
}
time.Sleep(100 * time.Millisecond)
if m.Len() != 0 {
t.Fatalf("Invalid length")
}
m.Drain()
if len(expired) != 1 {
t.Fatalf("Expecting %d expired items\n", 1)
}
eitem := expired[0]
diff := eitem.timestamp.Sub(start)
if diff < 0 || diff > 10*time.Millisecond {
t.Fatalf("Wrong expiration time")
}
}
func TestMapGetAlreadyExpired(t *testing.T) {
opts := &Options{}
m := New(opts)
defer m.Drain()
time.Sleep(100 * time.Millisecond)
start := time.Now()
expiration := start.Add(-1 * time.Second)
done := false
for i := 0; i < 1000 && !done; i++ {
item := NewItem("bar", expiration)
if err := m.Set("foo", item); err != nil {
t.Fatal(err)
}
if item := m.Get("foo"); item != nil {
done = true
break
}
if m.Len() != 0 {
t.Fatalf("Invalid length")
}
time.Sleep(10 * time.Millisecond)
}
if !done {
t.Fatalf("Expecting get to succeed")
}
}
func BenchmarkMapGet1(b *testing.B) {
b.StopTimer()
m := New(nil)
if err := m.Set("foo", NewItemWithTTL("bar", 30*time.Minute)); err != nil {
b.Fatal(err)
}
b.StartTimer()
for i := 0; i < b.N; i++ {
m.Get("foo")
}
b.StopTimer()
m.Drain()
}
func BenchmarkMapSet1(b *testing.B) {
b.StopTimer()
m := New(nil)
value := NewItemWithTTL("bar", 30*time.Minute)
b.StartTimer()
for i := 0; i < b.N; i++ {
if err := m.Set("foo", value); err != nil {
b.Fatal(err)
}
}
b.StopTimer()
m.Drain()
}
func BenchmarkMapSetNX1(b *testing.B) {
b.StopTimer()
m := New(nil)
value := NewItemWithTTL("bar", 30*time.Minute)
b.StartTimer()
if err := m.SetNX("foo", value); err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
if err := m.SetNX("foo", value); err != ErrExists {
b.Fatal("Expecting already exists")
}
}
b.StopTimer()
m.Drain()
}
func BenchmarkMapDelete1(b *testing.B) {
b.StopTimer()
m := New(nil)
b.StartTimer()
for i := 0; i < b.N; i++ {
m.Delete("foo")
}
b.StopTimer()
m.Drain()
}
func BenchmarkMapSetDelete1(b *testing.B) {
b.StopTimer()
m := New(nil)
value := NewItemWithTTL("bar", 30*time.Minute)
b.StartTimer()
for i := 0; i < b.N; i++ {
if err := m.Set("foo", value); err != nil {
b.Fatal(err)
}
m.Delete("foo")
}
b.StopTimer()
m.Drain()
}
func BenchmarkMapSetDrainN(b *testing.B) {
b.StopTimer()
opts := &Options{
InitialCapacity: b.N,
OnWillEvict: func(key string, item *Item) {
// do nothing
},
}
m := New(opts)
value := NewItemWithTTL("bar", 30*time.Minute)
for i := 0; i < b.N; i++ {
if err := m.Set(fmt.Sprintf("%d", i), value); err != nil {
b.Fatal(err)
}
}
b.StartTimer()
for i := 0; i < b.N; i++ {
m.Drain()
}
b.StopTimer()
}
|
[
1
] |
// Code generated by mockery v1.0.0. DO NOT EDIT.
package mocks
import auth "github.com/transcom/mymove/pkg/auth"
import mock "github.com/stretchr/testify/mock"
import models "github.com/transcom/mymove/pkg/models"
import uuid "github.com/gofrs/uuid"
// ShipmentLineItemFetcher is an autogenerated mock type for the ShipmentLineItemFetcher type
type ShipmentLineItemFetcher struct {
mock.Mock
}
// GetShipmentLineItemsByShipmentID provides a mock function with given fields: shipmentID, session
func (_m *ShipmentLineItemFetcher) GetShipmentLineItemsByShipmentID(shipmentID uuid.UUID, session *auth.Session) ([]models.ShipmentLineItem, error) {
ret := _m.Called(shipmentID, session)
var r0 []models.ShipmentLineItem
if rf, ok := ret.Get(0).(func(uuid.UUID, *auth.Session) []models.ShipmentLineItem); ok {
r0 = rf(shipmentID, session)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]models.ShipmentLineItem)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(uuid.UUID, *auth.Session) error); ok {
r1 = rf(shipmentID, session)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
|
[
4
] |
package vpn
import (
"bufio"
"errors"
"fmt"
log "github.com/sirupsen/logrus"
"io"
"net/http"
"net/http/cookiejar"
"net/url"
"regexp"
"strings"
)
const USTBVpnHost = "n.ustb.edu.cn"
const USTBVpnHttpScheme = "http"
const USTBVpnHttpsScheme = "https"
const USTBVpnWSScheme = "ws"
const USTBVpnWSSScheme = "wss"
type AutoLoginInterface interface {
TestAddr() string
LoginAddr() string
LogoutAddr() string
}
type AutoLogin struct {
Host string
ForceLogout bool
SSLEnabled bool // the vpn server supports https
}
func (al *AutoLogin) TestAddr(ssl bool) string {
if ssl {
return USTBVpnHttpsScheme + "://" + al.Host + "/"
}
return USTBVpnHttpScheme + "://" + al.Host + "/"
}
func (al *AutoLogin) LoginAddr(ssl bool) string {
if ssl {
return USTBVpnHttpsScheme + "://" + al.Host + "/do-login"
}
return USTBVpnHttpScheme + "://" + al.Host + "/do-login"
}
func (al *AutoLogin) LogoutAddr(ssl bool) string {
if ssl {
return USTBVpnHttpsScheme + "://" + al.Host + "/do-confirm-login"
}
return USTBVpnHttpScheme + "://" + al.Host + "/do-confirm-login"
}
// auto login vpn and get cookie
func (al *AutoLogin) vpnLogin(uname, passwd string) ([]*http.Cookie, error) {
// send a get request and check whether it is https protocol.
// andd save https enable/disable flag
if httpsEnabled, err := testHttpsEnabled(al.Host); err != nil {
return nil, err
} else {
if httpsEnabled {
al.SSLEnabled = true
}
}
var loginAddress = al.LoginAddr(al.SSLEnabled)
form := url.Values{
"auth_type": {"local"},
"sms_code": {""},
"username": {uname},
"password": {passwd},
}
hc := http.Client{
// disable redirection
// If login success, it will be redirected to index page
// and cookie would lost if we enable redirection.
CheckRedirect: func(req *http.Request, via []*http.Request) error {
// if upgrade http to https
return http.ErrUseLastResponse
},
}
req, err := http.NewRequest("POST", loginAddress, strings.NewReader(form.Encode())) // todo missing http.
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
if resp, err := hc.Do(req); err != nil {
return nil, err
} else {
defer resp.Body.Close()
cookies := resp.Cookies()
// return cookies or error.
if len(cookies) == 0 {
return nil, errors.New(fmt.Sprintf("no cookie while auto login to %s ", loginAddress))
} else {
// test connection and logout account if have login.
if err := al.testConnect(uname, cookies); err != nil {
return nil, err
}
return cookies, nil
}
}
}
func testHttpsEnabled(host string) (bool, error) {
testUrl, err := url.Parse(USTBVpnHttpScheme + "://" + host + "/")
if err != nil {
return false, err
}
httpsSupport := false
hc := http.Client{
// disable redirection
// f login success, it will be redirected to index page
// and cookie would lost if we enable redirection.
CheckRedirect: func(req *http.Request, via []*http.Request) error {
// if upgrade http to https
if testUrl.Scheme != req.URL.Scheme && testUrl.Path == req.URL.Path { // is http -> https redirection
httpsSupport = true
return nil
}
return http.ErrUseLastResponse
},
}
req, err := http.NewRequest("GET", testUrl.String(), nil)
if err != nil {
return false, err
}
if resp, err := hc.Do(req); err != nil {
return false, err
} else {
defer resp.Body.Close()
return httpsSupport, nil
}
}
func (al *AutoLogin) testConnect(uname string, cookies []*http.Cookie) error {
hc := http.Client{}
req, err := http.NewRequest("GET", al.TestAddr(al.SSLEnabled), nil) // // todo missing http.
if err != nil {
return err
}
jar, _ := cookiejar.New(nil)
jar.SetCookies(req.URL, cookies)
hc.Jar = jar
if resp, err := hc.Do(req); err != nil {
return err
} else {
defer resp.Body.Close()
if found, token, err := al.findLogoutToken(resp.Body); err != nil {
return err
} else {
if found {
if !al.ForceLogout { // if force logout is not enabled, just return an error.
return errors.New("you account have been signed in on other device")
}
log.WithField("token", token).Info("found logout token, we will logout account.")
log.Info("sending logout request.")
if err := al.logoutAccount(uname, token, cookies); err != nil {
return err
}
}
// if we did not found token in http response body, we do nothing.
}
}
return nil
}
func (al *AutoLogin) findLogoutToken(rd io.Reader) (bool, string, error) {
reader := bufio.NewReader(rd)
for {
// read a line
if line, _, err := reader.ReadLine(); err != nil {
break
} else {
// if matched.
matched, _ := regexp.Match(`logoutOtherToken[\s]+=[\s]+'[\w]+`, line)
if matched { // matched
subString := strings.Split(string(line), `'`)
if len(subString) >= 2 {
return true, subString[1], nil
} else {
return false, "", errors.New("logout token not fount")
}
}
}
}
return false, "", nil
}
func (al *AutoLogin) logoutAccount(uname, token string, cookies []*http.Cookie) error {
form := url.Values{
"logoutOtherToken": {token},
"username": {uname},
}
hc := http.Client{}
req, err := http.NewRequest("POST", al.LogoutAddr(al.SSLEnabled),
strings.NewReader(form.Encode()))
if err != nil {
return err
}
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
jar, _ := cookiejar.New(nil)
jar.SetCookies(req.URL, cookies)
hc.Jar = jar
if resp, err := hc.Do(req); err != nil {
return err
} else {
defer resp.Body.Close()
return nil // ok
}
}
|
[
4
] |
package ecslogs
import (
"fmt"
"reflect"
"syscall"
"time"
"github.com/segmentio/encoding/json"
)
type EventError struct {
Type string `json:"type,omitempty"`
Error string `json:"error,omitempty"`
Errno int `json:"errno,omitempty"`
Stack interface{} `json:"stack,omitempty"`
OriginalError error `json:"origError,omitempty"`
}
func MakeEventError(err error) EventError {
e := EventError{
Type: reflect.TypeOf(err).String(),
Error: err.Error(),
OriginalError: err,
}
if errno, ok := err.(syscall.Errno); ok {
e.Errno = int(errno)
}
return e
}
type EventInfo struct {
Host string `json:"host,omitempty"`
Source string `json:"source,omitempty"`
ID string `json:"id,omitempty"`
PID int `json:"pid,omitempty"`
UID int `json:"uid,omitempty"`
GID int `json:"gid,omitempty"`
Errors []EventError `json:"errors,omitempty"`
}
func (e EventInfo) Bytes() []byte {
b, _ := json.Marshal(e)
return b
}
func (e EventInfo) String() string {
return string(e.Bytes())
}
type EventData map[string]interface{}
func (e EventData) Bytes() []byte {
b, _ := json.Marshal(e)
return b
}
func (e EventData) String() string {
return string(e.Bytes())
}
type Event struct {
Level Level `json:"level"`
Time time.Time `json:"time"`
Info EventInfo `json:"info"`
Data EventData `json:"data"`
Message string `json:"message"`
}
func Eprintf(level Level, format string, args ...interface{}) Event {
return MakeEvent(level, sprintf(format, args...), args...)
}
func Eprint(level Level, args ...interface{}) Event {
return MakeEvent(level, sprint(args...), args...)
}
func MakeEvent(level Level, message string, values ...interface{}) Event {
var errors []EventError
for _, val := range values {
switch v := val.(type) {
case error:
errors = append(errors, MakeEventError(v))
}
}
return Event{
Info: EventInfo{Errors: errors},
Data: EventData{},
Level: level,
Message: message,
}
}
func (e Event) Bytes() []byte {
b, _ := json.Marshal(e)
return b
}
func (e Event) String() string {
return string(e.Bytes())
}
func copyEventData(data ...EventData) EventData {
copy := EventData{}
for _, d := range data {
for k, v := range d {
copy[k] = v
}
}
return copy
}
func sprintf(format string, args ...interface{}) string {
return fmt.Sprintf(format, args...)
}
func sprint(args ...interface{}) string {
s := fmt.Sprintln(args...)
return s[:len(s)-1]
}
|
[
7
] |
package main
import (
"bytes"
"strings"
)
func boldWords(words []string, S string) string {
bold := make([]bool, len(S))
for _, word := range words {
n := strings.Index(S, word)
for n != -1 {
for i := n; i < n+len(word); i++ {
bold[i] = true
}
t := strings.Index(S[n+1:], word)
if t == -1 {
break
}
n = t + n + 1
}
}
buf := bytes.Buffer{}
if bold[0] {
buf.WriteString("<b>")
}
for i := 0; i < len(bold); i++ {
buf.WriteByte(S[i])
if i == len(bold)-1 {
if bold[i] {
buf.WriteString("</b>")
}
break
}
if !bold[i] && bold[i+1] {
buf.WriteString("<b>")
}
if bold[i] && !bold[i+1] {
buf.WriteString("</b>")
}
}
println(buf.String())
return buf.String()
}
func main() {
boldWords([]string{"ccb", "b", "d", "cba", "dc"},
"eeaadadadc")
}
|
[
2
] |
package hrtime
import (
"sync"
"sync/atomic"
"time"
)
// Span defines a time.Duration span
type Span struct {
Start time.Duration
Finish time.Duration
}
// Duration returns the duration of the time span.
func (span *Span) Duration() time.Duration {
return span.Finish - span.Start
}
// Stopwatch allows concurrent benchmarking using Now
type Stopwatch struct {
nextLap int32
lapsMeasured int32
spans []Span
wait sync.Mutex
}
// NewStopwatch creates a new concurrent benchmark using Now
func NewStopwatch(count int) *Stopwatch {
if count <= 0 {
panic("must have count at least 1")
}
bench := &Stopwatch{
nextLap: 0,
spans: make([]Span, count),
}
// lock mutex to ensure Wait() blocks until finalize is called
bench.wait.Lock()
return bench
}
// mustBeCompleted checks whether measurement has been completed.
func (bench *Stopwatch) mustBeCompleted() {
if int(atomic.LoadInt32(&bench.lapsMeasured)) < len(bench.spans) {
panic("benchmarking incomplete")
}
}
// Start starts measuring a new lap.
// It returns the lap number to pass in for Stop.
// It will return -1, when all measurements have been made.
//
// Call to Stop with -1 is ignored.
func (bench *Stopwatch) Start() int32 {
lap := atomic.AddInt32(&bench.nextLap, 1) - 1
if int(lap) > len(bench.spans) {
return -1
}
bench.spans[lap].Start = Now()
return lap
}
// Stop stops measuring the specified lap.
//
// Call to Stop with -1 is ignored.
func (bench *Stopwatch) Stop(lap int32) {
if lap < 0 {
return
}
bench.spans[lap].Finish = Now()
lapsMeasured := atomic.AddInt32(&bench.lapsMeasured, 1)
if int(lapsMeasured) == len(bench.spans) {
bench.finalize()
} else if int(lapsMeasured) > len(bench.spans) {
panic("stop called too many times")
}
}
// finalize finalizes the stopwatch
func (bench *Stopwatch) finalize() {
// release the initial lock such that Wait can proceed.
bench.wait.Unlock()
}
// Wait waits for all measurements to be completed.
func (bench *Stopwatch) Wait() {
// lock waits for finalize to be called by the last measurement.
bench.wait.Lock()
_ = 1 // intentionally empty block, suppress staticcheck SA2001 warning
bench.wait.Unlock()
}
// Spans returns measured time-spans.
func (bench *Stopwatch) Spans() []Span {
bench.mustBeCompleted()
return append(bench.spans[:0:0], bench.spans...)
}
// Durations returns measured durations.
func (bench *Stopwatch) Durations() []time.Duration {
bench.mustBeCompleted()
durations := make([]time.Duration, len(bench.spans))
for i, span := range bench.spans {
durations[i] = span.Duration()
}
return durations
}
// Histogram creates an histogram of all the durations.
//
// It creates binCount bins to distribute the data and uses the
// 99.9 percentile as the last bucket range. However, for a nicer output
// it might choose a larger value.
func (bench *Stopwatch) Histogram(binCount int) *Histogram {
bench.mustBeCompleted()
opts := defaultOptions
opts.BinCount = binCount
return NewDurationHistogram(bench.Durations(), &opts)
}
// HistogramClamp creates an historgram of all the durations clamping minimum and maximum time.
//
// It creates binCount bins to distribute the data and uses the
// maximum as the last bucket.
func (bench *Stopwatch) HistogramClamp(binCount int, min, max time.Duration) *Histogram {
bench.mustBeCompleted()
durations := make([]time.Duration, 0, len(bench.spans))
for _, span := range bench.spans {
duration := span.Duration()
if duration < min {
durations = append(durations, min)
} else {
durations = append(durations, duration)
}
}
opts := defaultOptions
opts.BinCount = binCount
opts.ClampMaximum = float64(max.Nanoseconds())
opts.ClampPercentile = 0
return NewDurationHistogram(durations, &opts)
}
|
[
1
] |
/*
LeetCode 962: https://leetcode.com/problems/maximum-width-ramp/
*/
package leetcode
func maxWidthRamp(A []int) int {
stack := make([]int, 0)
for i, num := range A {
if len(stack) == 0 || A[stack[len(stack)-1]] > num {
stack = append(stack, i)
}
}
max := 0
for i := len(A) - 1; i >= 0; i-- {
for len(stack) > 0 && A[stack[len(stack)-1]] <= A[i] {
index := stack[len(stack)-1]
stack = stack[:len(stack)-1]
if max < i-index {
max = i - index
}
}
}
return max
}
|
[
2
] |
package main
/*
* @lc app=leetcode id=869 lang=golang
*
* [869] Reordered Power of 2
*
* https://leetcode.com/problems/reordered-power-of-2/description/
*
* algorithms
* Medium (53.38%)
* Likes: 209
* Dislikes: 92
* Total Accepted: 14.7K
* Total Submissions: 27.5K
* Testcase Example: '1'
*
* Starting with a positive integer N, we reorder the digits in any order
* (including the original order) such that the leading digit is not zero.
*
* Return true if and only if we can do this in a way such that the resulting
* number is a power of 2.
*
*
*
*
*
*
*
* Example 1:
*
*
* Input: 1
* Output: true
*
*
*
* Example 2:
*
*
* Input: 10
* Output: false
*
*
*
* Example 3:
*
*
* Input: 16
* Output: true
*
*
*
* Example 4:
*
*
* Input: 24
* Output: false
*
*
*
* Example 5:
*
*
* Input: 46
* Output: true
*
*
*
*
* Note:
*
*
* 1 <= N <= 10^9
*
*
*
*
*
*
*
*/
// @lc code=start
func reorderedPowerOf2(N int) bool {
}
// @lc code=end
|
[
2
] |
package minifyurl
import (
"url-at-minimal-api/internal/external_interfaces/randomizer"
"url-at-minimal-api/internal/external_interfaces/repository"
"url-at-minimal-api/internal/domain"
)
// MinifyURL interface
type MinifyURL interface {
Execute(url string, len, tries int) string
}
// Minifier is a feature used to shorten the given url
type Minifier struct {
repository repository.Repository
randomizer randomizer.Randomizer
}
// New returns a valid instace of Minifier
func New(rep repository.Repository, rand randomizer.Randomizer) *Minifier {
return &Minifier{
repository: rep,
randomizer: rand,
}
}
// Execute minifies the given url to a known shorter version
func (m Minifier) Execute(url string, len, tries int) string {
if tries < 1 {
return ""
}
shorten := m.randomizer.RandomString(len)
err := m.repository.Save(url, shorten)
if err != domain.ErrCouldNotSaveEntry {
return shorten
}
return m.Execute(url, len, tries-1)
}
|
[
1
] |
package repository
import (
"context"
"encoding/base64"
"fmt"
"time"
"github.com/jinzhu/gorm"
"github.com/sirupsen/logrus"
"github.com/naveenpatilm/go-clean-arch/article"
"github.com/naveenpatilm/go-clean-arch/models"
)
const (
timeFormat = "2006-01-02T15:04:05.999Z07:00" // reduce precision from RFC3339Nano as date format
)
type mysqlArticleRepository struct {
DB *gorm.DB
}
// NewMysqlArticleRepository will create an object that represent the article.Repository interface
func NewMysqlArticleRepository(DB *gorm.DB) article.Repository {
return &mysqlArticleRepository{DB}
}
func (m *mysqlArticleRepository) Fetch(ctx context.Context, cursor string, num int64) ([]*models.Article, error) {
decodedCursor, err := DecodeCursor(cursor)
if err != nil && cursor != "" {
return nil, models.ErrBadParamInput
}
var articles []*models.Article
err = m.DB.Where("created_at > ?", decodedCursor).Order("created_at", true).Find(&articles).Error
if err != nil {
return nil, err
}
if len(articles) > 0 {
return articles, nil
} else {
return nil, models.ErrNotFound
}
}
func (m *mysqlArticleRepository) GetByID(ctx context.Context, id int64) (*models.Article, error) {
var article *models.Article
err := m.DB.First(&article, id).Error
if err != nil {
return nil, err
}
if article != nil {
return article, nil
} else {
return nil, models.ErrNotFound
}
}
func (m *mysqlArticleRepository) GetByTitle(ctx context.Context, title string) (*models.Article, error) {
var article *models.Article
err := m.DB.Where("title = ?", title).First(&article).Error
if err != nil {
return nil, err
}
if article != nil {
return article, nil
} else {
return nil, models.ErrNotFound
}
}
func (m *mysqlArticleRepository) Store(ctx context.Context, a *models.Article) error {
err := m.DB.Create(&a).Error
if err != nil {
return err
}
logrus.Debug("Created At: ", a.CreatedAt)
return nil
}
func (m *mysqlArticleRepository) Delete(ctx context.Context, id int64) error {
res := m.DB.Where("id = ?", id).Delete(models.Article{})
err := res.Error
if err != nil {
return err
}
rowsAffected := res.RowsAffected
if rowsAffected != 1 {
err = fmt.Errorf("Weird Behaviour. Total Affected: %d", rowsAffected)
return err
}
return nil
}
func (m *mysqlArticleRepository) Update(ctx context.Context, ar *models.Article) error {
res := m.DB.Save(&ar)
err := res.Error
if err != nil {
return err
}
affected := res.RowsAffected
if affected != 1 {
err = fmt.Errorf("Weird Behaviour. Total Affected: %d", affected)
return err
}
return nil
}
func DecodeCursor(encodedTime string) (time.Time, error) {
byt, err := base64.StdEncoding.DecodeString(encodedTime)
if err != nil {
return time.Time{}, err
}
timeString := string(byt)
t, err := time.Parse(timeFormat, timeString)
return t, err
}
func EncodeCursor(t time.Time) string {
timeString := t.Format(timeFormat)
return base64.StdEncoding.EncodeToString([]byte(timeString))
}
|
[
2
] |
//
// projecteuler.net
//
// problem 29
//
package main
import (
"fmt"
"math/big"
)
func unique(Slice []string) []string {
keys := make(map[string]bool)
list := []string{}
for _, entry := range Slice {
if _, value := keys[entry]; !value {
keys[entry] = true
list = append(list, entry)
}
}
return list
}
func main() {
a := make([]string, 0)
for i := int64(2); i < 101; i++ {
for j := int64(2); j < 101; j++ {
x := big.NewInt(i)
y := big.NewInt(j)
z := big.NewInt(0)
z = z.Exp(x, y, z)
a = append(a, z.String())
}
}
a = unique(a)
fmt.Printf("%v\n", len(a))
}
|
[
2
] |
package dao
import (
"fmt"
"github.com/jinzhu/gorm"
"go-learning/log"
)
var db *gorm.DB
func init() {
}
func dbConn(MyUser, Password, Host, Db string, Port int) *gorm.DB {
connArgs := fmt.Sprintf("%s:%s@(%s:%d)/%s?charset=utf8&parseTime=True&loc=Local", MyUser, Password, Host, Port, Db)
db, err := gorm.Open("mysql", connArgs)
if err != nil {
log.Fatal(err)
}
db.SingularTable(true)
return db
}
func GetDb() *gorm.DB {
return dbConn("root", "root", "127.0.0.1", "spring_jpa", 3306)
}
|
[
2
] |
package main
import (
"encoding/json"
"fmt"
"log"
"time"
"github.com/couchbaselabs/sockjs-go/sockjs"
"github.com/dustin/go-broadcast"
)
var changes_broadcaster = broadcast.NewBroadcaster(100)
var recentChanges = newChangeRing(100)
func init() {
go rememberChanges()
}
func rememberChanges() {
changes_broadcaster.Register(recentChanges.chin)
}
type Change struct {
User Email `json:"user"`
Action string `json:"action"`
Bug APIBug `json:"bug"`
BugID string `json:"bugid"`
Time time.Time `json:"time"`
Status string `json:"status"`
Title string `json:"title"`
Private bool `json:"private"`
}
type changeEligible interface {
changeObjectFor(u User) (Change, error)
}
type changeRing struct {
start int
items []interface{}
chin chan interface{}
req chan chan []interface{}
}
func newChangeRing(size int) *changeRing {
rv := &changeRing{
items: make([]interface{}, 0, size),
chin: make(chan interface{}),
req: make(chan chan []interface{}),
}
go rv.process()
return rv
}
func (cr *changeRing) Add(i interface{}) {
cr.chin <- i
}
func (cr *changeRing) Slice() []interface{} {
ch := make(chan []interface{}, 1)
cr.req <- ch
return <-ch
}
func (cr *changeRing) Latest(n int) []interface{} {
r := cr.Slice()
if len(r) > n {
r = r[len(r)-n:]
}
return r
}
func (cr *changeRing) process() {
for {
select {
case i := <-cr.chin:
cr.addItem(i)
case r := <-cr.req:
r <- cr.slice()
}
}
}
func (cr *changeRing) addItem(i interface{}) {
if len(cr.items) < cap(cr.items) {
cr.items = append(cr.items, i)
} else {
if cr.start == cap(cr.items) {
cr.start = 0
}
cr.items[cr.start] = i
cr.start++
}
}
func (cr *changeRing) slice() []interface{} {
rv := make([]interface{}, 0, cap(cr.items))
for i := cr.start; i < len(cr.items); i++ {
rv = append(rv, cr.items[i])
}
for i := 0; i < cr.start; i++ {
rv = append(rv, cr.items[i])
}
return rv
}
type connection struct {
// The websocket connection.
ws sockjs.Conn
// Buffered channel of outbound messages.
send chan interface{}
// Authenticated User
user User
}
func (c *connection) reader() {
for {
if msg, err := c.ws.ReadMessage(); err == nil {
// this section is unfortunately ugly, they seem to have
// double encoded the JSON string in another string
// first parse the message as a JSON string
var parsedString string
err = json.Unmarshal(msg, &parsedString)
if err != nil {
log.Printf("error decoding message string %v", err)
continue
}
// no parse that string as a JSON object
parsedMessage := map[string]interface{}{}
err = json.Unmarshal([]byte(parsedString), &parsedMessage)
if err != nil {
log.Printf("error decoding message json %v", err)
continue
}
// now if this is an auth message, validate the cookie
if parsedMessage["type"] == "auth" {
switch cookie := parsedMessage["cookie"].(type) {
case string:
user, err := userFromCookie(cookie)
if err == nil {
log.Printf("authenticated realtime stream as user %v", user)
c.user = user
}
}
}
} else {
break
}
}
c.ws.Close()
}
func (c *connection) writer() {
for message := range c.send {
changes := convertMessageToChangeNotifications(message, c.user)
for _, change := range changes {
bytes, err := json.Marshal(change)
if err != nil {
log.Print("Failed to marshall notification to JSON, ignoring")
continue
}
_, err = c.ws.WriteMessage(bytes)
if err != nil {
break
}
}
}
c.ws.Close()
}
func convertMessageToChangeNotifications(message interface{},
connUser User) []interface{} {
co, ok := message.(changeEligible)
if ok {
c, err := co.changeObjectFor(connUser)
if err == nil {
return []interface{}{c}
}
} else {
log.Printf("%T isn't changeEligible", message)
}
return nil
}
func ChangesHandler(conn sockjs.Conn) {
c := &connection{send: make(chan interface{}, 256), ws: conn}
for _, change := range recentChanges.Latest(cap(c.send)) {
c.send <- change
}
changes_broadcaster.Register(c.send)
defer changes_broadcaster.Unregister(c.send)
go c.writer()
c.reader()
}
func loadChangeObject(doctype, docid string) (interface{}, error) {
switch doctype {
case "bug", "bughistory":
bug, err := getBug(docid)
if err != nil {
return bug, err
}
return bugChange{bug.Id,
bug.ModBy,
[]string{bug.ModType},
"",
&bug,
}, nil
case "comment":
return getComment(docid)
}
return nil, fmt.Errorf("Unhandled type: %v", doctype)
}
func loadRecent() {
args := map[string]interface{}{
"descending": true,
"limit": 20,
"stale": false,
}
viewRes := struct {
Rows []struct {
ID string
Key string
Value struct {
Type string
}
}
}{}
err := db.ViewCustom("cbugg", "changes", args, &viewRes)
if err != nil {
log.Printf("Error initializing recent changes: %v", err)
return
}
for i := range viewRes.Rows {
r := viewRes.Rows[len(viewRes.Rows)-i-1]
change, err := loadChangeObject(r.Value.Type, r.ID)
if err == nil {
recentChanges.Add(change)
} else {
log.Printf("Error loading %v/%v: %v",
r.Value.Type, r.ID, err)
}
}
}
|
[
7
] |
package Worker
import "DistributedSystem/MapReduce"
type Config struct {
mapper MapReduce.Mapper
reducer MapReduce.Reducer
outputKeyType interface{}
outputRecordType interface{}
}
func (c *Config) SetMapper(function MapReduce.Mapper) {
c.mapper = function
}
func (c *Config) GetMapper() MapReduce.Mapper {
return c.mapper
}
func (c *Config) SetReducer(function MapReduce.Reducer) {
c.reducer = function
}
func (c *Config) SetOutputKeyType(Type interface{}) {
c.outputKeyType = Type
}
func (c *Config) SetOutputRecordType(Type interface{}) {
c.outputRecordType = Type
}
|
[
2
] |
// Code generated by mockery v2.13.1. DO NOT EDIT.
package mocks
import (
types "github.com/aws-controllers-k8s/runtime/pkg/types"
mock "github.com/stretchr/testify/mock"
)
// Logger is an autogenerated mock type for the Logger type
type Logger struct {
mock.Mock
}
// Debug provides a mock function with given fields: msg, additionalValues
func (_m *Logger) Debug(msg string, additionalValues ...interface{}) {
var _ca []interface{}
_ca = append(_ca, msg)
_ca = append(_ca, additionalValues...)
_m.Called(_ca...)
}
// Enter provides a mock function with given fields: name, additionalValues
func (_m *Logger) Enter(name string, additionalValues ...interface{}) {
var _ca []interface{}
_ca = append(_ca, name)
_ca = append(_ca, additionalValues...)
_m.Called(_ca...)
}
// Exit provides a mock function with given fields: name, err, additionalValues
func (_m *Logger) Exit(name string, err error, additionalValues ...interface{}) {
var _ca []interface{}
_ca = append(_ca, name, err)
_ca = append(_ca, additionalValues...)
_m.Called(_ca...)
}
// Info provides a mock function with given fields: msg, additionalValues
func (_m *Logger) Info(msg string, additionalValues ...interface{}) {
var _ca []interface{}
_ca = append(_ca, msg)
_ca = append(_ca, additionalValues...)
_m.Called(_ca...)
}
// IsDebugEnabled provides a mock function with given fields:
func (_m *Logger) IsDebugEnabled() bool {
ret := _m.Called()
var r0 bool
if rf, ok := ret.Get(0).(func() bool); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// Trace provides a mock function with given fields: name, additionalValues
func (_m *Logger) Trace(name string, additionalValues ...interface{}) types.TraceExiter {
var _ca []interface{}
_ca = append(_ca, name)
_ca = append(_ca, additionalValues...)
ret := _m.Called(_ca...)
var r0 types.TraceExiter
if rf, ok := ret.Get(0).(func(string, ...interface{}) types.TraceExiter); ok {
r0 = rf(name, additionalValues...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(types.TraceExiter)
}
}
return r0
}
// WithValues provides a mock function with given fields: _a0
func (_m *Logger) WithValues(_a0 ...interface{}) {
var _ca []interface{}
_ca = append(_ca, _a0...)
_m.Called(_ca...)
}
type mockConstructorTestingTNewLogger interface {
mock.TestingT
Cleanup(func())
}
// NewLogger creates a new instance of Logger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewLogger(t mockConstructorTestingTNewLogger) *Logger {
mock := &Logger{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
|
[
4
] |
package audit
import (
"context"
"github.com/gojektech/proctor/proctord/kubernetes"
"github.com/gojektech/proctor/proctord/logger"
"github.com/gojektech/proctor/proctord/storage"
"github.com/gojektech/proctor/proctord/utility"
)
type Auditor interface {
AuditJobsExecution(context.Context)
}
type auditor struct {
store storage.Store
kubeClient kubernetes.Client
}
func New(store storage.Store, kubeClient kubernetes.Client) Auditor {
return &auditor{
store: store,
kubeClient: kubeClient,
}
}
func (auditor *auditor) AuditJobsExecution(ctx context.Context) {
jobSubmissionStatus := ctx.Value(utility.JobSubmissionStatusContextKey).(string)
userEmail := ctx.Value(utility.UserEmailContextKey).(string)
if jobSubmissionStatus != utility.JobSubmissionSuccess {
err := auditor.store.JobsExecutionAuditLog(jobSubmissionStatus, utility.JobFailed, "", userEmail, "", "", map[string]string{})
if err != nil {
logger.Error("Error auditing jobs execution", err)
}
return
}
jobName := ctx.Value(utility.JobNameContextKey).(string)
JobNameSubmittedForExecution := ctx.Value(utility.JobNameSubmittedForExecutionContextKey).(string)
imageName := ctx.Value(utility.ImageNameContextKey).(string)
jobArgs := ctx.Value(utility.JobArgsContextKey).(map[string]string)
err := auditor.store.JobsExecutionAuditLog(jobSubmissionStatus, utility.JobWaiting, jobName, userEmail, JobNameSubmittedForExecution, imageName, jobArgs)
if err != nil {
logger.Error("Error auditing jobs execution", err)
}
go auditor.auditJobExecutionStatus(JobNameSubmittedForExecution)
}
func (auditor *auditor) auditJobExecutionStatus(JobNameSubmittedForExecution string) {
status, err := auditor.kubeClient.JobExecutionStatus(JobNameSubmittedForExecution)
if err != nil {
logger.Error("Error getting job execution status", err)
}
err = auditor.store.UpdateJobsExecutionAuditLog(JobNameSubmittedForExecution, status)
if err != nil {
logger.Error("Error auditing jobs execution", err)
}
}
|
[
2
] |
package main
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
"math"
)
func arrayManipulation(n int64, queries [][]int64) int64 {
table := make([]int64, n + 1)
var max int64 = -9999999
var sum int64 = 0
for _, data := range queries{
table[data[0]-1] += data[2]
table[data[1]] -= data[2]
}
for _, data := range table{
sum += data
max = int64(math.Max(float64(sum), float64(max)))
}
return max
}
func main() {
reader := bufio.NewReaderSize(os.Stdin, 16 * 1024 * 1024)
stdout, err := os.Create(os.Getenv("OUTPUT_PATH"))
checkError(err)
defer stdout.Close()
writer := bufio.NewWriterSize(stdout, 16 * 1024 * 1024)
firstMultipleInput := strings.Split(strings.TrimSpace(readLine(reader)), " ")
nTemp, err := strconv.ParseInt(firstMultipleInput[0], 10, 64)
checkError(err)
n := int64(nTemp)
mTemp, err := strconv.ParseInt(firstMultipleInput[1], 10, 64)
checkError(err)
m := int64(mTemp)
var queries [][]int64
for i := 0; i < int(m); i++ {
queriesRowTemp := strings.Split(strings.TrimRight(readLine(reader)," \t\r\n"), " ")
var queriesRow []int64
for _, queriesRowItem := range queriesRowTemp {
queriesItemTemp, err := strconv.ParseInt(queriesRowItem, 10, 64)
checkError(err)
queriesItem := int64(queriesItemTemp)
queriesRow = append(queriesRow, queriesItem)
}
if len(queriesRow) != 3 {
panic("Bad input")
}
queries = append(queries, queriesRow)
}
result := arrayManipulation(n, queries)
fmt.Fprintf(writer, "%d\n", result)
writer.Flush()
}
func readLine(reader *bufio.Reader) string {
str, _, err := reader.ReadLine()
if err == io.EOF {
return ""
}
return strings.TrimRight(string(str), "\r\n")
}
func checkError(err error) {
if err != nil {
panic(err)
}
}
|
[
1
] |
/*
Copyright (c) 2019 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package account
import (
"bytes"
"fmt"
"github.com/openshift-online/ocm-sdk-go"
amv1 "github.com/openshift-online/ocm-sdk-go/accountsmgmt/v1"
)
// GetRolesFromUsers gets all roles a specific user possesses.
func GetRolesFromUsers(accounts []*amv1.Account,
conn *sdk.Connection) (results map[*amv1.Account][]string, error error) {
// Prepare the results:
results = map[*amv1.Account][]string{}
// Prepare a map of accounts indexed by identifier:
accountsMap := map[string]*amv1.Account{}
for _, account := range accounts {
accountsMap[account.ID()] = account
}
// Prepare a query to retrieve all the role bindings that correspond to any of the
// accounts:
ids := &bytes.Buffer{}
for i, account := range accounts {
if i > 0 {
fmt.Fprintf(ids, ", ")
}
fmt.Fprintf(ids, "'%s'", account.ID())
}
query := fmt.Sprintf("account_id in (%s)", ids)
index := 1
size := 100
for {
// Prepare the request:
response, err := conn.AccountsMgmt().V1().RoleBindings().List().
Size(size).
Page(index).
Parameter("search", query).
Send()
if err != nil {
return nil, fmt.Errorf("Can't retrieve roles: %v", err)
}
// Loop through the results and save them:
response.Items().Each(func(item *amv1.RoleBinding) bool {
account := accountsMap[item.Account().ID()]
itemID := item.Role().ID()
if _, ok := results[account]; ok {
if !stringInList(results[account], itemID) {
results[account] = append(results[account], itemID)
}
} else {
results[account] = append(results[account], itemID)
}
return true
})
// Break the loop if the page size is smaller than requested, as that indicates
// that this is the last page:
if response.Size() < size {
break
}
index++
}
return
}
// stringInList returns a bool signifying whether
// a string is in a string array.
func stringInList(strArr []string, key string) bool {
for _, str := range strArr {
if str == key {
return true
}
}
return false
}
|
[
1
] |
package wirenet
import (
"crypto/tls"
"io"
"math"
"time"
)
const (
DefaultKeepAliveInterval = 15 * time.Second
DefaultEnableKeepAlive = true
DefaultReadTimeout = 10 * time.Second
DefaultWriteTimeout = 10 * time.Second
DefaultAcceptBacklog = 256
DefaultSessionCloseTimeout = 5 * time.Second
DefaultRetryMax = 100
DefaultRetryWaitMax = 60 * time.Second
DefaultRetryWaitMin = 5 * time.Second
)
type (
Option func(*wire)
Identification []byte
Token []byte
)
func WithConnectHook(hook func(io.Closer)) Option {
return func(w *wire) {
w.onConnect = hook
}
}
func WithErrorHandler(h ErrorHandler) Option {
return func(w *wire) {
w.errorHandler = h
}
}
func WithSessionOpenHook(hook SessionHook) Option {
return func(w *wire) {
w.openSessHook = hook
}
}
func WithSessionCloseHook(hook SessionHook) Option {
return func(w *wire) {
w.closeSessHook = hook
}
}
func WithIdentification(id Identification, token Token) Option {
return func(w *wire) {
w.identification = id
w.token = token
}
}
func WithTokenValidator(v TokenValidator) Option {
return func(w *wire) {
w.verifyToken = v
}
}
func WithTLS(conf *tls.Config) Option {
return func(w *wire) {
w.tlsConfig = conf
}
}
func WithRetryWait(min, max time.Duration) Option {
return func(w *wire) {
w.retryWaitMax = max
w.retryWaitMin = min
}
}
func WithRetryMax(n int) Option {
return func(w *wire) {
w.retryMax = n
}
}
func WithLogWriter(w io.Writer) Option {
return func(wire *wire) {
wire.transportConf.LogOutput = w
}
}
func WithKeepAliveInterval(interval time.Duration) Option {
return func(wire *wire) {
wire.transportConf.KeepAliveInterval = interval
wire.transportConf.EnableKeepAlive = true
}
}
func WithKeepAlive(flag bool) Option {
return func(wire *wire) {
wire.transportConf.EnableKeepAlive = flag
}
}
func WithReadWriteTimeouts(read, write time.Duration) Option {
return func(wire *wire) {
wire.readTimeout = read
wire.writeTimeout = write
wire.transportConf.ConnectionWriteTimeout = write
}
}
func WithSessionCloseTimeout(dur time.Duration) Option {
return func(w *wire) {
w.sessCloseTimeout = dur
}
}
func WithRetryPolicy(rp RetryPolicy) Option {
return func(w *wire) {
w.retryPolicy = rp
}
}
func DefaultRetryPolicy(min, max time.Duration, attemptNum int) time.Duration {
m := math.Pow(2, float64(attemptNum)) * float64(min)
wait := time.Duration(m)
if float64(wait) != m || wait > max {
wait = max
}
return wait
}
|
[
1
] |
package deb
import (
"fmt"
"sort"
"strings"
"github.com/aptly-dev/aptly/aptly"
"github.com/aptly-dev/aptly/utils"
)
// Dependency options
const (
// DepFollowSource pulls source packages when required
DepFollowSource = 1 << iota
// DepFollowSuggests pulls from suggests
DepFollowSuggests
// DepFollowRecommends pulls from recommends
DepFollowRecommends
// DepFollowAllVariants follows all variants if depends on "a | b"
DepFollowAllVariants
// DepFollowBuild pulls build dependencies
DepFollowBuild
// DepVerboseResolve emits additional logs while dependencies are being resolved
DepVerboseResolve
)
// PackageList is list of unique (by key) packages
//
// It could be seen as repo snapshot, repo contents, result of filtering,
// merge, etc.
//
// If indexed, PackageList starts supporting searching
type PackageList struct {
// Straight list of packages as map
packages map[string]*Package
// Indexed list of packages, sorted by name internally
packagesIndex []*Package
// Map of packages for each virtual package (provides)
providesIndex map[string][]*Package
// Package key generation function
keyFunc func(p *Package) string
// Allow duplicates?
duplicatesAllowed bool
// Has index been prepared?
indexed bool
}
// PackageConflictError means that package can't be added to the list due to error
type PackageConflictError struct {
error
}
// Verify interface
var (
_ sort.Interface = &PackageList{}
_ PackageCatalog = &PackageList{}
)
func packageShortKey(p *Package) string {
return string(p.ShortKey(""))
}
func packageFullKey(p *Package) string {
return string(p.Key(""))
}
// NewPackageList creates empty package list without duplicate package
func NewPackageList() *PackageList {
return NewPackageListWithDuplicates(false, 1000)
}
// NewPackageListWithDuplicates creates empty package list which might allow or block duplicate packages
func NewPackageListWithDuplicates(duplicates bool, capacity int) *PackageList {
if capacity == 0 {
capacity = 1000
}
result := &PackageList{
packages: make(map[string]*Package, capacity),
duplicatesAllowed: duplicates,
keyFunc: packageShortKey,
}
if duplicates {
result.keyFunc = packageFullKey
}
return result
}
// NewPackageListFromRefList loads packages list from PackageRefList
func NewPackageListFromRefList(reflist *PackageRefList, collection *PackageCollection, progress aptly.Progress) (*PackageList, error) {
// empty reflist
if reflist == nil {
return NewPackageList(), nil
}
result := NewPackageListWithDuplicates(false, reflist.Len())
if progress != nil {
progress.InitBar(int64(reflist.Len()), false, aptly.BarGeneralBuildPackageList)
}
err := reflist.ForEach(func(key []byte) error {
p, err2 := collection.ByKey(key)
if err2 != nil {
return fmt.Errorf("unable to load package with key %s: %s", key, err2)
}
if progress != nil {
progress.AddBar(1)
}
return result.Add(p)
})
if progress != nil {
progress.ShutdownBar()
}
if err != nil {
return nil, err
}
return result, nil
}
// Has checks whether package is already in the list
func (l *PackageList) Has(p *Package) bool {
key := l.keyFunc(p)
_, ok := l.packages[key]
return ok
}
// Add appends package to package list, additionally checking for uniqueness
func (l *PackageList) Add(p *Package) error {
key := l.keyFunc(p)
existing, ok := l.packages[key]
if ok {
if !existing.Equals(p) {
return &PackageConflictError{fmt.Errorf("conflict in package %s", p)}
}
return nil
}
l.packages[key] = p
if l.indexed {
for _, provides := range p.Provides {
l.providesIndex[provides] = append(l.providesIndex[provides], p)
}
i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.lessPackages(p, l.packagesIndex[j]) })
// insert p into l.packagesIndex in position i
l.packagesIndex = append(l.packagesIndex, nil)
copy(l.packagesIndex[i+1:], l.packagesIndex[i:])
l.packagesIndex[i] = p
}
return nil
}
// ForEach calls handler for each package in list
func (l *PackageList) ForEach(handler func(*Package) error) error {
var err error
for _, p := range l.packages {
err = handler(p)
if err != nil {
return err
}
}
return err
}
// ForEachIndexed calls handler for each package in list in indexed order
func (l *PackageList) ForEachIndexed(handler func(*Package) error) error {
if !l.indexed {
panic("list not indexed, can't iterate")
}
var err error
for _, p := range l.packagesIndex {
err = handler(p)
if err != nil {
return err
}
}
return err
}
// Len returns number of packages in the list
func (l *PackageList) Len() int {
return len(l.packages)
}
// Append adds content from one package list to another
func (l *PackageList) Append(pl *PackageList) error {
if l.indexed {
panic("Append not supported when indexed")
}
for k, p := range pl.packages {
existing, ok := l.packages[k]
if ok {
if !existing.Equals(p) {
return fmt.Errorf("conflict in package %s", p)
}
} else {
l.packages[k] = p
}
}
return nil
}
// Remove removes package from the list, and updates index when required
func (l *PackageList) Remove(p *Package) {
delete(l.packages, l.keyFunc(p))
if l.indexed {
for _, provides := range p.Provides {
for i, pkg := range l.providesIndex[provides] {
if pkg.Equals(p) {
// remove l.ProvidesIndex[provides][i] w/o preserving order
l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][i], l.providesIndex[provides] =
nil, l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][:len(l.providesIndex[provides])-1]
break
}
}
}
i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.packagesIndex[j].Name >= p.Name })
for i < len(l.packagesIndex) && l.packagesIndex[i].Name == p.Name {
if l.packagesIndex[i].Equals(p) {
// remove l.packagesIndex[i] preserving order
copy(l.packagesIndex[i:], l.packagesIndex[i+1:])
l.packagesIndex[len(l.packagesIndex)-1] = nil
l.packagesIndex = l.packagesIndex[:len(l.packagesIndex)-1]
break
}
i++
}
}
}
// Architectures returns list of architectures present in packages and flag if source packages are present.
//
// If includeSource is true, meta-architecture "source" would be present in the list
func (l *PackageList) Architectures(includeSource bool) (result []string) {
result = make([]string, 0, 10)
for _, pkg := range l.packages {
if pkg.Architecture != ArchitectureAll && (pkg.Architecture != ArchitectureSource || includeSource) && !utils.StrSliceHasItem(result, pkg.Architecture) {
result = append(result, pkg.Architecture)
}
}
return
}
// Strings builds list of strings with package keys
func (l *PackageList) Strings() []string {
result := make([]string, l.Len())
i := 0
for _, p := range l.packages {
result[i] = string(p.Key(""))
i++
}
return result
}
// FullNames builds a list of package {name}_{version}_{arch}
func (l *PackageList) FullNames() []string {
result := make([]string, l.Len())
i := 0
for _, p := range l.packages {
result[i] = p.GetFullName()
i++
}
return result
}
// depSliceDeduplicate removes dups in slice of Dependencies
func depSliceDeduplicate(s []Dependency) []Dependency {
l := len(s)
if l < 2 {
return s
}
if l == 2 {
if s[0] == s[1] {
return s[0:1]
}
return s
}
found := make(map[string]bool, l)
j := 0
for i, x := range s {
h := x.Hash()
if !found[h] {
found[h] = true
s[j] = s[i]
j++
}
}
return s[:j]
}
// VerifyDependencies looks for missing dependencies in package list.
//
// Analysis would be performed for each architecture, in specified sources
func (l *PackageList) VerifyDependencies(options int, architectures []string, sources *PackageList, progress aptly.Progress) ([]Dependency, error) {
l.PrepareIndex()
missing := make([]Dependency, 0, 128)
if progress != nil {
progress.InitBar(int64(l.Len())*int64(len(architectures)), false, aptly.BarGeneralVerifyDependencies)
}
for _, arch := range architectures {
cache := make(map[string]bool, 2048)
for _, p := range l.packagesIndex {
if progress != nil {
progress.AddBar(1)
}
if !p.MatchesArchitecture(arch) {
continue
}
for _, dep := range p.GetDependencies(options) {
variants, err := ParseDependencyVariants(dep)
if err != nil {
return nil, fmt.Errorf("unable to process package %s: %s", p, err)
}
variants = depSliceDeduplicate(variants)
variantsMissing := make([]Dependency, 0, len(variants))
for _, dep := range variants {
if dep.Architecture == "" {
dep.Architecture = arch
}
hash := dep.Hash()
satisfied, ok := cache[hash]
if !ok {
satisfied = sources.Search(dep, false) != nil
cache[hash] = satisfied
}
if !satisfied && !ok {
variantsMissing = append(variantsMissing, dep)
}
if satisfied && options&DepFollowAllVariants == 0 {
variantsMissing = nil
break
}
}
missing = append(missing, variantsMissing...)
}
}
}
if progress != nil {
progress.ShutdownBar()
}
if options&DepVerboseResolve == DepVerboseResolve && progress != nil {
missingStr := make([]string, len(missing))
for i := range missing {
missingStr[i] = missing[i].String()
}
progress.ColoredPrintf("@{y}Missing dependencies:@| %s", strings.Join(missingStr, ", "))
}
return missing, nil
}
// Swap swaps two packages in index
func (l *PackageList) Swap(i, j int) {
l.packagesIndex[i], l.packagesIndex[j] = l.packagesIndex[j], l.packagesIndex[i]
}
func (l *PackageList) lessPackages(iPkg, jPkg *Package) bool {
if iPkg.Name == jPkg.Name {
cmp := CompareVersions(iPkg.Version, jPkg.Version)
if cmp == 0 {
return iPkg.Architecture < jPkg.Architecture
}
return cmp == 1
}
return iPkg.Name < jPkg.Name
}
// Less compares two packages by name (lexographical) and version (latest to oldest)
func (l *PackageList) Less(i, j int) bool {
return l.lessPackages(l.packagesIndex[i], l.packagesIndex[j])
}
// PrepareIndex prepares list for indexing
func (l *PackageList) PrepareIndex() {
if l.indexed {
return
}
l.packagesIndex = make([]*Package, l.Len())
l.providesIndex = make(map[string][]*Package, 128)
i := 0
for _, p := range l.packages {
l.packagesIndex[i] = p
i++
for _, provides := range p.Provides {
l.providesIndex[provides] = append(l.providesIndex[provides], p)
}
}
sort.Sort(l)
l.indexed = true
}
// Scan searches package index using full scan
func (l *PackageList) Scan(q PackageQuery) (result *PackageList) {
result = NewPackageListWithDuplicates(l.duplicatesAllowed, 0)
for _, pkg := range l.packages {
if q.Matches(pkg) {
result.Add(pkg)
}
}
return
}
// SearchSupported returns true for PackageList
func (l *PackageList) SearchSupported() bool {
return true
}
// SearchByKey looks up package by exact key reference
func (l *PackageList) SearchByKey(arch, name, version string) (result *PackageList) {
result = NewPackageListWithDuplicates(l.duplicatesAllowed, 0)
pkg := l.packages["P"+arch+" "+name+" "+version]
if pkg != nil {
result.Add(pkg)
}
return
}
// Search searches package index for specified package(s) using optimized queries
func (l *PackageList) Search(dep Dependency, allMatches bool) (searchResults []*Package) {
if !l.indexed {
panic("list not indexed, can't search")
}
i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.packagesIndex[j].Name >= dep.Pkg })
for i < len(l.packagesIndex) && l.packagesIndex[i].Name == dep.Pkg {
p := l.packagesIndex[i]
if p.MatchesDependency(dep) {
searchResults = append(searchResults, p)
if !allMatches {
break
}
}
i++
}
if dep.Relation == VersionDontCare {
for _, p := range l.providesIndex[dep.Pkg] {
if dep.Architecture == "" || p.MatchesArchitecture(dep.Architecture) {
searchResults = append(searchResults, p)
if !allMatches {
break
}
}
}
}
return
}
// Filter filters package index by specified queries (ORed together), possibly pulling dependencies
func (l *PackageList) Filter(queries []PackageQuery, withDependencies bool, source *PackageList, dependencyOptions int, architecturesList []string) (*PackageList, error) {
return l.FilterWithProgress(queries, withDependencies, source, dependencyOptions, architecturesList, nil)
}
// FilterWithProgress filters package index by specified queries (ORed together), possibly pulling dependencies and displays progress
func (l *PackageList) FilterWithProgress(queries []PackageQuery, withDependencies bool, source *PackageList, dependencyOptions int, architecturesList []string, progress aptly.Progress) (*PackageList, error) {
if !l.indexed {
panic("list not indexed, can't filter")
}
result := NewPackageList()
for _, query := range queries {
result.Append(query.Query(l))
}
if withDependencies {
added := result.Len()
result.PrepareIndex()
dependencySource := NewPackageList()
if source != nil {
dependencySource.Append(source)
}
dependencySource.Append(result)
dependencySource.PrepareIndex()
// while some new dependencies were discovered
for added > 0 {
added = 0
// find missing dependencies
missing, err := result.VerifyDependencies(dependencyOptions, architecturesList, dependencySource, progress)
if err != nil {
return nil, err
}
// try to satisfy dependencies
for _, dep := range missing {
if dependencyOptions&DepFollowAllVariants == 0 {
// dependency might have already been satisfied
// with packages already been added
//
// when follow-all-variants is enabled, we need to try to expand anyway,
// as even if dependency is satisfied now, there might be other ways to satisfy dependency
if result.Search(dep, false) != nil {
if dependencyOptions&DepVerboseResolve == DepVerboseResolve && progress != nil {
progress.ColoredPrintf("@{y}Already satisfied dependency@|: %s with %s", &dep, result.Search(dep, true))
}
continue
}
}
searchResults := l.Search(dep, true)
if len(searchResults) > 0 {
for _, p := range searchResults {
if result.Has(p) {
continue
}
if dependencyOptions&DepVerboseResolve == DepVerboseResolve && progress != nil {
progress.ColoredPrintf("@{g}Injecting package@|: %s", p)
}
result.Add(p)
dependencySource.Add(p)
added++
if dependencyOptions&DepFollowAllVariants == 0 {
break
}
}
} else {
if dependencyOptions&DepVerboseResolve == DepVerboseResolve && progress != nil {
progress.ColoredPrintf("@{r}Unsatisfied dependency@|: %s", dep.String())
}
}
}
}
}
return result, nil
}
|
[
4
] |
package types
import (
"io"
"strings"
)
type Param struct {
Direction *Direction `parser:"@@?"`
Type string `parser:"@Ident"`
Const string `parser:"@('const')?"`
Pointer string `parser:"@('*')*"`
Name string `parser:"@Ident ','?"`
// Processed
GoType string
// This is used to generate setup code for the Go inputs
setupTemplate string
cleanupTemplate string
LocalName string
decl *InterfaceMethod
VtableCallInput string
}
func (p *Param) IsOutputParam() bool {
if p.Direction == nil {
return false
}
return p.Direction.Dir == "out"
}
func (p *Param) Process(decl *InterfaceMethod) {
p.decl = decl
p.GoType = IdlTypeToGoType(p.Type)
switch p.GoType {
case "string", "uint", "int", "float64", "bool":
return
}
if p.Pointer != "" {
p.GoType = "*" + p.GoType
}
}
func (p *Param) processSetup() {
p.processSetupInputs()
p.processSetupOutputs()
p.processVtableCallInput()
}
func (p *Param) SetupCode(w io.Writer) {
if p.setupTemplate == "" {
return
}
data := struct {
Param *Param
ErrorValues string
}{
Param: p,
ErrorValues: p.decl.ErrorValues(),
}
mustTemplate("Param Setup: "+p.setupTemplate, p.setupTemplate, &data, w)
}
func (p *Param) CleanupCode(w io.Writer) {
if p.cleanupTemplate == "" {
return
}
mustTemplate("Param Cleanup: "+p.cleanupTemplate, p.cleanupTemplate, p, w)
}
func (p *Param) IsInputParam() bool {
return !p.IsOutputParam()
}
func (p *Param) processVtableCallInput() {
variableName := p.GetVariableName()
if strings.HasPrefix(p.Type, "int") || strings.HasPrefix(p.Type, "uint") || p.Type == "bool" || p.Type == "float32" || p.Type == "float64" {
p.VtableCallInput = "uintptr(" + variableName + ")"
return
}
switch p.Type {
case "LPCWSTR", "LPWSTR":
p.VtableCallInput = "uintptr(unsafe.Pointer(" + variableName + "))"
return
}
if p.Pointer == "**" {
p.VtableCallInput = "uintptr(unsafe.Pointer(&" + variableName + "))"
return
}
if p.Pointer == "*" {
if p.IsOutputParam() {
p.VtableCallInput = "uintptr(unsafe.Pointer(&" + variableName + "))"
} else {
p.VtableCallInput = "uintptr(unsafe.Pointer(" + variableName + "))"
}
return
}
if p.IsEnum() {
p.VtableCallInput = "uintptr(unsafe.Pointer(&" + variableName + "))"
return
}
p.VtableCallInput = "uintptr(unsafe.Pointer(&" + variableName + "))"
}
func (p *Param) ClearLocalName() string {
p.LocalName = ""
return ""
}
func (p *Param) GetVariableName() string {
if p.LocalName != "" {
return p.LocalName
}
return p.Name
}
func (p *Param) IsEnum() bool {
return p.decl.decl.decl.library.enums.Contains(p.Type)
}
func (p *Param) processSetupInputs() {
if !p.IsInputParam() {
return
}
switch p.GoType {
case "string":
// We need to convert to *uint16
p.setupTemplate = "inputStringSetup.tmpl"
p.LocalName = "_" + p.Name
p.decl.decl.includes.AddUnique(`"golang.org/x/sys/windows"`)
}
}
func (p *Param) processSetupOutputs() {
if !p.IsOutputParam() {
return
}
switch p.GoType {
case "string":
p.LocalName = "_" + p.Name
p.setupTemplate = "outputStringSetup.tmpl"
p.cleanupTemplate = "outputStringCleanup.tmpl"
p.decl.decl.includes.AddUnique(`"golang.org/x/sys/windows"`)
default:
p.setupTemplate = "outputDefaultSetup.tmpl"
}
if p.Pointer != "" {
p.decl.decl.includes.AddUnique(`"unsafe"`)
}
}
type Direction struct {
Dir string `parser:"'[' @('out'|'in')"`
Retval string `parser:"(',' @('retval'|'size_is' '(' Ident ')') )? ']'"`
}
|
[
7
] |
package services
import (
"net/http"
"qiniu.com/avaspark/net"
)
type Resp struct {
Code int `json:"code"`
Message string `json:"message"`
Data interface{} `json:"data,omitempty"`
}
func Default(Req *http.Request, Rw http.ResponseWriter) {
resp := Resp{
Code: 100,
Message: "welcome to ava spark",
}
net.WriteResp(Rw, resp, nil)
}
|
[
2
] |
// Copyright 2018 The CUE Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"io/ioutil"
"os"
"strings"
"unicode"
"github.com/spf13/cobra"
"cuelang.org/go/cue/ast"
"cuelang.org/go/cue/ast/astutil"
"cuelang.org/go/cue/format"
"cuelang.org/go/cue/literal"
"cuelang.org/go/cue/load"
"cuelang.org/go/cue/parser"
"cuelang.org/go/cue/token"
"cuelang.org/go/encoding/json"
"cuelang.org/go/internal/third_party/yaml"
)
func newImportCmd(c *Command) *cobra.Command {
cmd := &cobra.Command{
Use: "import",
Short: "convert other data formats to CUE files",
Long: `import converts other data formats, like JSON and YAML to CUE files
The following file formats are currently supported:
Format Extensions
JSON .json .jsonl .ndjson
YAML .yaml .yml
protobuf .proto
Files can either be specified explicitly, or inferred from the
specified packages. In either case, the file extension is
replaced with .cue. It will fail if the file already exists by
default. The -f flag overrides this.
Examples:
# Convert individual files:
$ cue import foo.json bar.json # create foo.yaml and bar.yaml
# Convert all json files in the indicated directories:
$ cue import ./... -type=json
The "flags" help topic describes how to assign values to a
specific path within a CUE namespace. Some examples of that
Examples:
$ cat <<EOF > foo.yaml
kind: Service
name: booster
EOF
# include the parsed file as an emit value:
$ cue import foo.yaml
$ cat foo.cue
{
kind: Service
name: booster
}
# include the parsed file at the root of the CUE file:
$ cue import -f foo.yaml
$ cat foo.cue
kind: Service
name: booster
# include the import config at the mystuff path
$ cue import -f -l '"mystuff"' foo.yaml
$ cat foo.cue
myStuff: {
kind: Service
name: booster
}
# append another object to the input file
$ cat <<EOF >> foo.yaml
---
kind: Deployment
name: booster
replicas: 1
# base the path values on the input
$ cue import -f -l 'strings.ToLower(kind)' -l name foo.yaml
$ cat foo.cue
service: booster: {
kind: "Service"
name: "booster"
}
# base the path values on the input and file name
$ cue import -f --with-context -l 'path.Base(filename)' -l data.kind foo.yaml
$ cat foo.cue
"foo.yaml": Service: {
kind: "Service"
name: "booster"
}
"foo.yaml": Deployment: {
kind: "Deployment"
name: "booster
replicas: 1
}
# include all files as list elements
$ cue import -f -list -foo.yaml
$ cat foo.cue
[{
kind: "Service"
name: "booster"
}, {
kind: "Deployment"
name: "booster
replicas: 1
}]
# collate files with the same path into a list
$ cue import -f -list -l 'strings.ToLower(kind)' foo.yaml
$ cat foo.cue
service: [{
kind: "Service"
name: "booster"
}
deployment: [{
kind: "Deployment"
name: "booster
replicas: 1
}]
Embedded data files
The --recursive or -R flag enables the parsing of fields that are string
representations of data formats themselves. A field that can be parsed is
replaced with a call encoding the data from a structured form that is placed
in a sibling field.
It is also possible to recursively hoist data formats:
Example:
$ cat <<EOF > example.json
"a": {
"data": '{ "foo": 1, "bar": 2 }',
}
EOF
$ cue import -R example.json
$ cat example.cue
import "encoding/json"
a: {
data: json.Encode(_data),
_data = {
foo: 1
bar: 2
}
}
`,
RunE: mkRunE(c, runImport),
}
addOutFlags(cmd.Flags(), false)
addOrphanFlags(cmd.Flags())
cmd.Flags().Bool(string(flagFiles), false, "split multiple entries into different files")
cmd.Flags().String(string(flagType), "", "only apply to files of this type")
cmd.Flags().BoolP(string(flagForce), "f", false, "force overwriting existing files")
cmd.Flags().Bool(string(flagDryrun), false, "only run simulation")
cmd.Flags().BoolP(string(flagRecursive), "R", false, "recursively parse string values")
return cmd
}
const (
flagFiles flagName = "files"
flagProtoPath flagName = "proto_path"
flagWithContext flagName = "with-context"
)
// TODO: factor out rooting of orphaned files.
func runImport(cmd *Command, args []string) (err error) {
b, err := parseArgs(cmd, args, &load.Config{DataFiles: true})
if err != nil {
return err
}
pkgFlag := flagPackage.String(cmd)
for _, pkg := range b.insts {
pkgName := pkgFlag
if pkgName == "" {
pkgName = pkg.PkgName
}
// TODO: allow if there is a unique package name.
if pkgName == "" && len(b.insts) > 1 {
err = fmt.Errorf("must specify package name with the -p flag")
exitOnErr(cmd, err, true)
}
}
for _, f := range b.imported {
err := handleFile(b, f)
if err != nil {
return err
}
}
exitOnErr(cmd, err, true)
return nil
}
func handleFile(b *buildPlan, f *ast.File) (err error) {
cueFile := f.Filename
if out := flagOutFile.String(b.cmd); out != "" {
cueFile = out
}
if cueFile != "-" {
switch _, err := os.Stat(cueFile); {
case os.IsNotExist(err):
case err == nil:
if !flagForce.Bool(b.cmd) {
// TODO: mimic old behavior: write to stderr, but do not exit
// with error code. Consider what is best to do here.
stderr := b.cmd.Command.OutOrStderr()
fmt.Fprintf(stderr, "skipping file %q: already exists\n", cueFile)
return nil
}
default:
return fmt.Errorf("error creating file: %v", err)
}
}
if flagRecursive.Bool(b.cmd) {
h := hoister{fields: map[string]bool{}}
h.hoist(f)
}
return writeFile(b.cmd, f, cueFile)
}
func writeFile(cmd *Command, f *ast.File, cueFile string) error {
b, err := format.Node(f, format.Simplify())
if err != nil {
return fmt.Errorf("error formatting file: %v", err)
}
if cueFile == "-" {
_, err := cmd.OutOrStdout().Write(b)
return err
}
return ioutil.WriteFile(cueFile, b, 0644)
}
type hoister struct {
fields map[string]bool
}
func (h *hoister) hoist(f *ast.File) {
ast.Walk(f, nil, func(n ast.Node) {
name := ""
switch x := n.(type) {
case *ast.Field:
name, _, _ = ast.LabelName(x.Label)
case *ast.Alias:
name = x.Ident.Name
}
if name != "" {
h.fields[name] = true
}
})
_ = astutil.Apply(f, func(c astutil.Cursor) bool {
n := c.Node()
switch n.(type) {
case *ast.Comprehension:
return false
}
return true
}, func(c astutil.Cursor) bool {
switch f := c.Node().(type) {
case *ast.Field:
name, _, _ := ast.LabelName(f.Label)
if name == "" {
return false
}
lit, ok := f.Value.(*ast.BasicLit)
if !ok || lit.Kind != token.STRING {
return false
}
str, err := literal.Unquote(lit.Value)
if err != nil {
return false
}
expr, enc := tryParse(str)
if expr == nil {
return false
}
pkg := c.Import("encoding/" + enc)
if pkg == nil {
return false
}
// found a replacable string
dataField := h.uniqueName(name, "_", "cue_")
f.Value = ast.NewCall(
ast.NewSel(pkg, "Marshal"),
ast.NewIdent(dataField))
// TODO: use definitions instead
c.InsertAfter(astutil.ApplyRecursively(&ast.Alias{
Ident: ast.NewIdent(dataField),
Expr: expr,
}))
}
return true
})
}
func tryParse(str string) (s ast.Expr, pkg string) {
b := []byte(str)
if json.Valid(b) {
expr, err := parser.ParseExpr("", b)
if err != nil {
// TODO: report error
return nil, ""
}
switch expr.(type) {
case *ast.StructLit, *ast.ListLit:
default:
return nil, ""
}
return expr, "json"
}
if expr, err := yaml.Unmarshal("", b); err == nil {
switch expr.(type) {
case *ast.StructLit, *ast.ListLit:
default:
return nil, ""
}
return expr, "yaml"
}
return nil, ""
}
func (h *hoister) uniqueName(base, prefix, typ string) string {
base = strings.Map(func(r rune) rune {
if unicode.In(r, unicode.L, unicode.N) {
return r
}
return '_'
}, base)
name := prefix + typ + base
for {
if !h.fields[name] {
h.fields[name] = true
return name
}
name = prefix + typ + base
typ += "x"
}
}
|
[
7
] |
package db
import (
"database/sql"
"errors"
"fmt"
"gnusocial/config"
"log"
"math/rand"
"strings"
_ "github.com/lib/pq" // postgres driver
)
func GetDBConn(dbname string) *sql.DB {
// dbConnAddr := "postgresql://%s@%s:%s/%s?sslmode=disable"
psqlInfo := fmt.Sprintf("host=%s port=%s user=%s "+
"password=%s dbname=%s sslmode=disable", config.DB_ADDR, config.DB_PORT, config.DB_USER, config.DB_PASSWORD, dbname)
dbConn, err := sql.Open("postgres", psqlInfo)
// sql.Open("postgres",fmt.Sprintf(dbConnAddr, config.DB_USER, config.DB_ADDR, config.DB_PORT, dbname))
if err != nil {
log.Println("Can't connect to DB:", dbname)
log.Fatal(err)
} else {
log.Println("Connected to DB:", dbname)
}
return dbConn
}
func GetNewRowIDForTable(dbConn *sql.DB, table string) string {
var rowid int32
for {
rowid = rand.Int31n(2147483647)
q := fmt.Sprintf("SELECT id FROM \"%s\" WHERE id = %d", table, rowid)
if v, err := DataCall1(dbConn, q); err != nil {
fmt.Println(q)
log.Println("@db.GetNewRowIDForTable: ", table)
log.Fatal(err)
} else if v == nil {
break
}
}
return fmt.Sprint(rowid)
}
func RunTxWQnArgsReturningId(tx *sql.Tx, query string, args ...interface{}) (int, error) {
lastInsertId := -1
err := tx.QueryRow(query, args...).Scan(&lastInsertId)
if err != nil || lastInsertId == -1 {
log.Println("# Can't insert!", err)
tx.Rollback()
log.Println(query, args)
log.Println("Transaction rolled back!")
return lastInsertId, err
}
return lastInsertId, err
}
func RunTxWQnArgs(tx *sql.Tx, query string, args ...interface{}) error {
if _, err := tx.Exec(query, args...); err != nil {
tx.Rollback()
if !strings.Contains(err.Error(), "row_desc_pk") {
log.Println("# Can't execute!", err)
log.Println(query, args)
log.Println("Transaction rolled back!")
} else {
return errors.New("# ROW_DESC ROWID EXISTS!")
}
return err
}
return nil
}
func DataCall(dbConn *sql.DB, sql string, args ...interface{}) []map[string]string {
var result []map[string]string
rows, err := dbConn.Query(sql, args...)
if err != nil {
log.Fatal(err)
}
cols, err := rows.Columns()
if err != nil {
log.Fatal(err)
}
for rows.Next() {
data := make(map[string]string)
columns := make([]string, len(cols))
columnPointers := make([]interface{}, len(cols))
for i := range columns {
columnPointers[i] = &columns[i]
}
rows.Scan(columnPointers...)
for i, col := range cols {
data[col] = columns[i]
}
result = append(result, data)
}
rows.Close()
return result
}
func DataCall1(db *sql.DB, SQL string, args ...interface{}) (map[string]interface{}, error) {
// db := GetDBConn(app)
// log.Println(SQL, args)
if rows, err := db.Query(SQL+" LIMIT 1", args...); err != nil {
// log.Println(SQL, args)
// log.Println("## DB ERROR: ", err)
// log.Fatal("check datacall1 in stencil.db")
return nil, err
} else {
defer rows.Close()
if colNames, err := rows.Columns(); err != nil {
return nil, err
} else {
if rows.Next() {
var data = make(map[string]interface{})
cols := make([]interface{}, len(colNames))
colPtrs := make([]interface{}, len(colNames))
for i := 0; i < len(colNames); i++ {
colPtrs[i] = &cols[i]
}
// for rows.Next() {
err = rows.Scan(colPtrs...)
if err != nil {
log.Fatal(err)
}
for i, col := range cols {
data[colNames[i]] = col
}
// Do something with the map
// for key, val := range data {
// fmt.Println("Key:", key, "Value Type:", reflect.TypeOf(val), fmt.Sprint(val))
// }
return data, nil
} else {
return nil, nil
}
}
}
}
|
[
2
] |
package iirepo_apps
import (
"github.com/reiver/go-iirepo/logger"
"os"
"path/filepath"
"strings"
"syscall"
)
func List(path string) ([][]string, error) {
iirepo_logger.Debugf("iirepo_apps.List(%q): begin", path)
defer iirepo_logger.Debugf("iirepo_apps.List(%q): end", path)
appspath, err := Locate(path)
if nil != err {
return nil, err
}
iirepo_logger.Debugf("iirepo_apps.List(%q): appspath = %q", path, appspath)
{
_, err := os.Stat(appspath)
if nil != err {
switch patherror := err.(type) {
case *os.PathError:
switch errno := patherror.Err.(type) {
case syscall.Errno:
if syscall.ENOENT == errno {
iirepo_logger.Debugf("iirepo_apps.List(%q): repo exists, but %s/ not created yet (therefore no apps) (note: this is not considered an error)", path, Name())
return nil, nil
}
}
}
return nil, err
}
}
var apps [][]string
err = filepath.Walk(appspath, func(apppath string, info os.FileInfo, err error) error {
if nil != err {
return err
}
if appspath == apppath {
return nil
}
if !strings.HasPrefix(apppath, appspath) {
return nil
}
appName := filepath.Base(apppath)
iirepo_logger.Debugf("iirepo_apps.List(%q): app-name = %q", path, appName)
app := []string{appName}
apps = append(apps, app)
return nil
})
if nil != err {
return nil, err
}
return apps, nil
}
|
[
7
] |
package router
import (
"github.com/gin-gonic/gin"
v1 "github.com/zewei1022/lemon-gin-web-framework/api/v1"
)
func InitBookRouter(Router *gin.RouterGroup) {
BookRouter := Router.Group("book")
{
BookRouter.GET("/findBook", v1.FindBook)
BookRouter.GET("/getBookList", v1.GetBookList)
BookRouter.POST("/createBook", v1.CreateBook)
BookRouter.POST("/updateBook", v1.UpdateBook)
BookRouter.POST("/deleteBook", v1.DeleteBook)
}
}
|
[
2
] |
package o3
// 在一个长度为n的数组中,元素都在0~n-1范围内,请找出其中的任意一个重复数字
// 改变原数组的方法
// 交换位置
func FindDuplicateNum(nums []int) int {
for i := 0; i < len(nums); i++ {
for nums[i] != i {
if nums[i] == nums[nums[i]] {
return nums[i]
}
nums[i], nums[nums[i]] = nums[nums[i]], nums[i]
}
}
return -1
}
// 不改变原数组的方法
// 二分查找
func FindDuplicateNum2(nums []int) int {
i, j := 0, len(nums)-1
for i <= j {
mid := i + (j-i)/2
count := countRangeNumInNums(nums, i, mid)
if i == j && count > 1 {
return i
}
if count > mid-i+1 {
j = mid
} else {
i = mid + 1
}
}
return -1
}
// count数组中在[min,max]范围里数字的个数
func countRangeNumInNums(nums []int, min, max int) int {
count := 0
for _, num := range nums {
if num >= min && num <= max {
count++
}
}
return count
}
|
[
1
] |
package command_executor
import (
"encoding/json"
"log"
. "meta/error"
pb "meta/msg"
. "meta/processor/command_executor/msg"
. "meta/utils/exec"
)
type CommandExecutor struct {
}
func (c *CommandExecutor) Command(in *pb.Msg) (*pb.Msg, error) {
var content CommandContent
err := json.Unmarshal(in.Content, &content)
if err != nil {
log.Printf("Failed to load")
return GetErrorMsg(err)
}
stdout, stderr, exitCode := RunCommand(content.Command, content.Args...)
result := CommandResult{
Stdout: stdout,
Stderr: stderr,
Status: exitCode,
}
resultBytes, err := json.Marshal(result)
if err != nil {
//https://stackoverflow.com/questions/61949913/why-cant-i-get-a-non-nil-response-and-err-from-grpc
log.Printf("could not marshal: %s", result)
return GetErrorMsg(err)
}
return &pb.Msg{
Type: pb.OK.Id,
Content: resultBytes,
}, nil
}
func (c *CommandExecutor) Dispatch(in *pb.Msg) (*pb.Msg, error) {
switch in.Type {
case pb.COMMAND.Id:
return c.Command(in)
}
return GetErrorMsg(MSGTYPE_NOT_FOUND{})
}
|
[
7
] |
package main
func main() {
}
func transpose(A [][]int) [][]int {
m := len(A)
if m == 0 {
return nil
}
n := len(A[0])
b := make([][]int, n)
for i := 0; i < n; i++ {
b[i] = make([]int, m)
}
for k, v := range A {
for kk, vv := range v {
b[kk][k] = vv
}
}
return b
}
|
[
2
] |
package gen
import (
"context"
"fmt"
"time"
"github.com/ergo-services/ergo/etf"
)
// EnvKey
type EnvKey string
// Process
type Process interface {
Core
// Spawn create a new process with parent
Spawn(name string, opts ProcessOptions, object ProcessBehavior, args ...etf.Term) (Process, error)
// RemoteSpawn creates a new process at a remote node. The object name is a regitered
// behavior on a remote name using RegisterBehavior(...). The given options will stored
// in the process environment using node.EnvKeyRemoteSpawn as a key
RemoteSpawn(node string, object string, opts RemoteSpawnOptions, args ...etf.Term) (etf.Pid, error)
RemoteSpawnWithTimeout(timeout int, node string, object string, opts RemoteSpawnOptions, args ...etf.Term) (etf.Pid, error)
// Name returns process name used on starting.
Name() string
// RegisterName register associates the name with pid (not overrides registered name on starting)
RegisterName(name string) error
// UnregisterName unregister named process. Unregistering name is allowed to the owner only
UnregisterName(name string) error
// NodeName returns node name
NodeName() string
// NodeStop stops the node
NodeStop()
// NodeUptime returns node lifespan
NodeUptime() int64
// Info returns process details
Info() ProcessInfo
// Self returns registered process identificator belongs to the process
Self() etf.Pid
// Direct make a direct request to the actor (gen.Application, gen.Supervisor, gen.Server or
// inherited from gen.Server actor) with default timeout 5 seconds
Direct(request interface{}) (interface{}, error)
// DirectWithTimeout make a direct request to the actor with the given timeout (in seconds)
DirectWithTimeout(request interface{}, timeout int) (interface{}, error)
// Send sends a message in fashion of 'erlang:send'. The value of 'to' can be a Pid, registered local name
// or gen.ProcessID{RegisteredName, NodeName}
Send(to interface{}, message etf.Term) error
// SendAfter starts a timer. When the timer expires, the message sends to the process
// identified by 'to'. 'to' can be a Pid, registered local name or
// gen.ProcessID{RegisteredName, NodeName}. Returns cancel function in order to discard
// sending a message. CancelFunc returns bool value. If it returns false, than the timer has
// already expired and the message has been sent.
SendAfter(to interface{}, message etf.Term, after time.Duration) CancelFunc
// Exit initiate a graceful stopping process
Exit(reason string) error
// Kill immediately stops process
Kill()
// CreateAlias creates a new alias for the Process
CreateAlias() (etf.Alias, error)
// DeleteAlias deletes the given alias
DeleteAlias(alias etf.Alias) error
// ListEnv returns a map of configured environment variables.
// It also includes environment variables from the GroupLeader, Parent and Node.
// which are overlapped by priority: Process(Parent(GroupLeader(Node)))
ListEnv() map[EnvKey]interface{}
// SetEnv set environment variable with given name. Use nil value to remove variable with given name.
SetEnv(name EnvKey, value interface{})
// Env returns value associated with given environment name.
Env(name EnvKey) interface{}
// Wait waits until process stopped
Wait()
// WaitWithTimeout waits until process stopped. Return ErrTimeout
// if given timeout is exceeded
WaitWithTimeout(d time.Duration) error
// Link creates a link between the calling process and another process.
// Links are bidirectional and there can only be one link between two processes.
// Repeated calls to Process.Link(Pid) have no effect. If one of the participants
// of a link terminates, it will send an exit signal to the other participant and caused
// termination of the last one. If process set a trap using Process.SetTrapExit(true) the exit signal transorms into the MessageExit and delivers as a regular message.
Link(with etf.Pid) error
// Unlink removes the link, if there is one, between the calling process and
// the process referred to by Pid.
Unlink(with etf.Pid) error
// IsAlive returns whether the process is alive
IsAlive() bool
// SetTrapExit enables/disables the trap on terminate process. When a process is trapping exits,
// it will not terminate when an exit signal is received. Instead, the signal is transformed
// into a 'gen.MessageExit' which is put into the mailbox of the process just like a regular message.
SetTrapExit(trap bool)
// TrapExit returns whether the trap was enabled on this process
TrapExit() bool
// Compression returns true if compression is enabled for this process
Compression() bool
// SetCompression enables/disables compression for the messages sent outside of this node
SetCompression(enabled bool)
// CompressionLevel returns comression level for the process
CompressionLevel() int
// SetCompressionLevel defines compression level. Value must be in range:
// 1 (best speed) ... 9 (best compression), or -1 for the default compression level
SetCompressionLevel(level int) bool
// CompressionThreshold returns compression threshold for the process
CompressionThreshold() int
// SetCompressionThreshold defines the minimal size for the message that must be compressed
// Value must be greater than DefaultCompressionThreshold (1024)
SetCompressionThreshold(threshold int) bool
// MonitorNode creates monitor between the current process and node. If Node fails or does not exist,
// the message MessageNodeDown is delivered to the process.
MonitorNode(name string) etf.Ref
// DemonitorNode removes monitor. Returns false if the given reference wasn't found
DemonitorNode(ref etf.Ref) bool
// MonitorProcess creates monitor between the processes.
// Allowed types for the 'process' value: etf.Pid, gen.ProcessID
// When a process monitor is triggered, a MessageDown sends to the caller.
// Note: The monitor request is an asynchronous signal. That is, it takes
// time before the signal reaches its destination.
MonitorProcess(process interface{}) etf.Ref
// DemonitorProcess removes monitor. Returns false if the given reference wasn't found
DemonitorProcess(ref etf.Ref) bool
// Behavior returns the object this process runs on.
Behavior() ProcessBehavior
// GroupLeader returns group leader process. Usually it points to the application process.
GroupLeader() Process
// Parent returns parent process. It returns nil if this process was spawned using Node.Spawn.
Parent() Process
// Context returns process context.
Context() context.Context
// Children returns list of children pid (Application, Supervisor)
Children() ([]etf.Pid, error)
// Links returns list of the process pids this process has linked to.
Links() []etf.Pid
// Monitors returns list of monitors created this process by pid.
Monitors() []etf.Pid
// Monitors returns list of monitors created this process by name.
MonitorsByName() []ProcessID
// MonitoredBy returns list of process pids monitored this process.
MonitoredBy() []etf.Pid
// Aliases returns list of aliases of this process.
Aliases() []etf.Alias
// RegisterEvent
RegisterEvent(event Event, messages ...EventMessage) error
UnregisterEvent(event Event) error
MonitorEvent(event Event) error
DemonitorEvent(event Event) error
SendEventMessage(event Event, message EventMessage) error
PutSyncRequest(ref etf.Ref) error
CancelSyncRequest(ref etf.Ref)
WaitSyncReply(ref etf.Ref, timeout int) (etf.Term, error)
PutSyncReply(ref etf.Ref, term etf.Term, err error) error
ProcessChannels() ProcessChannels
}
// ProcessInfo struct with process details
type ProcessInfo struct {
PID etf.Pid
Name string
CurrentFunction string
Status string
MessageQueueLen int
Links []etf.Pid
Monitors []etf.Pid
MonitorsByName []ProcessID
MonitoredBy []etf.Pid
Aliases []etf.Alias
Dictionary etf.Map
TrapExit bool
GroupLeader etf.Pid
Compression bool
}
// ProcessOptions
type ProcessOptions struct {
// Context allows mixing the system context with the custom one. E.g., to limit
// the lifespan using context.WithTimeout. This context MUST be based on the
// other Process' context. Otherwise, you get the error lib.ErrProcessContext
Context context.Context
// MailboxSize defines the length of message queue for the process
MailboxSize uint16
// DirectboxSize defines the length of message queue for the direct requests
DirectboxSize uint16
// GroupLeader
GroupLeader Process
// Env set the process environment variables
Env map[EnvKey]interface{}
// Fallback defines the process to where messages will be forwarded
// if the mailbox is overflowed. The tag value could be used to
// differentiate the source processes. Forwarded messages are wrapped
// into the MessageFallback struct.
Fallback ProcessFallback
}
// ProcessFallback
type ProcessFallback struct {
Name string
Tag string
}
// RemoteSpawnRequest
type RemoteSpawnRequest struct {
From etf.Pid
Ref etf.Ref
Options RemoteSpawnOptions
}
// RemoteSpawnOptions defines options for RemoteSpawn method
type RemoteSpawnOptions struct {
// Name register associated name with spawned process
Name string
// Monitor enables monitor on the spawned process using provided reference
Monitor etf.Ref
// Link enables link between the calling and spawned processes
Link bool
// Function in order to support {M,F,A} request to the Erlang node
Function string
}
// ProcessChannels
type ProcessChannels struct {
Mailbox <-chan ProcessMailboxMessage
Direct <-chan ProcessDirectMessage
GracefulExit <-chan ProcessGracefulExitRequest
}
// ProcessMailboxMessage
type ProcessMailboxMessage struct {
From etf.Pid
Message interface{}
}
// ProcessDirectMessage
type ProcessDirectMessage struct {
Ref etf.Ref
Message interface{}
Err error
}
// ProcessGracefulExitRequest
type ProcessGracefulExitRequest struct {
From etf.Pid
Reason string
}
// ProcessState
type ProcessState struct {
Process
State interface{}
}
// ProcessBehavior interface contains methods you should implement to make your own process behavior
type ProcessBehavior interface {
ProcessInit(Process, ...etf.Term) (ProcessState, error)
ProcessLoop(ProcessState, chan<- bool) string // method which implements control flow of process
}
// Core the common set of methods provided by Process and node.Node interfaces
type Core interface {
// ProcessByName returns Process for the given name.
// Returns nil if it doesn't exist (not found) or terminated.
ProcessByName(name string) Process
// ProcessByPid returns Process for the given Pid.
// Returns nil if it doesn't exist (not found) or terminated.
ProcessByPid(pid etf.Pid) Process
// ProcessByAlias returns Process for the given alias.
// Returns nil if it doesn't exist (not found) or terminated
ProcessByAlias(alias etf.Alias) Process
// ProcessInfo returns the details about given Pid
ProcessInfo(pid etf.Pid) (ProcessInfo, error)
// ProcessList returns the list of running processes
ProcessList() []Process
// MakeRef creates an unique reference within this node
MakeRef() etf.Ref
// IsAlias checks whether the given alias is belongs to the alive process on this node.
// If the process died all aliases are cleaned up and this function returns
// false for the given alias. For alias from the remote node always returns false.
IsAlias(etf.Alias) bool
// IsMonitor returns true if the given references is a monitor
IsMonitor(ref etf.Ref) bool
// RegisterBehavior
RegisterBehavior(group, name string, behavior ProcessBehavior, data interface{}) error
// RegisteredBehavior
RegisteredBehavior(group, name string) (RegisteredBehavior, error)
// RegisteredBehaviorGroup
RegisteredBehaviorGroup(group string) []RegisteredBehavior
// UnregisterBehavior
UnregisterBehavior(group, name string) error
}
// RegisteredBehavior
type RegisteredBehavior struct {
Behavior ProcessBehavior
Data interface{}
}
// ProcessID long notation of registered process {process_name, node_name}
type ProcessID struct {
Name string
Node string
}
// String string representaion of ProcessID value
func (p ProcessID) String() string {
return fmt.Sprintf("<%s:%s>", p.Name, p.Node)
}
// MessageDown delivers as a message to Server's HandleInfo callback of the process
// that created monitor using MonitorProcess.
// Reason values:
// - the exit reason of the process
// - 'noproc' (process did not exist at the time of monitor creation)
// - 'noconnection' (no connection to the node where the monitored process resides)
// - 'noproxy' (no connection to the proxy this node had has a connection through. monitored process could be still alive)
type MessageDown struct {
Ref etf.Ref // a monitor reference
ProcessID ProcessID // if monitor was created by name
Pid etf.Pid
Reason string
}
// MessageNodeDown delivers as a message to Server's HandleInfo callback of the process
// that created monitor using MonitorNode
type MessageNodeDown struct {
Ref etf.Ref
Name string
}
// MessageProxyDown delivers as a message to Server's HandleInfo callback of the process
// that created monitor using MonitorNode if the connection to the node was through the proxy
// nodes and one of them went down.
type MessageProxyDown struct {
Ref etf.Ref
Node string
Proxy string
Reason string
}
// MessageExit delievers to Server's HandleInfo callback on enabled trap exit using SetTrapExit(true)
// Reason values:
// - the exit reason of the process
// - 'noproc' (process did not exist at the time of link creation)
// - 'noconnection' (no connection to the node where the linked process resides)
// - 'noproxy' (no connection to the proxy this node had has a connection through. linked process could be still alive)
type MessageExit struct {
Pid etf.Pid
Reason string
}
// MessageFallback delivers to the process specified as a fallback process in ProcessOptions.Fallback.Name if the mailbox has been overflowed
type MessageFallback struct {
Process etf.Pid
Tag string
Message etf.Term
}
// MessageDirectChildren type intended to be used in Process.Children which returns []etf.Pid
// You can handle this type of message in your HandleDirect callback to enable Process.Children
// support for your gen.Server actor.
type MessageDirectChildren struct{}
// IsMessageDown
func IsMessageDown(message etf.Term) (MessageDown, bool) {
var md MessageDown
switch m := message.(type) {
case MessageDown:
return m, true
}
return md, false
}
// IsMessageExit
func IsMessageExit(message etf.Term) (MessageExit, bool) {
var me MessageExit
switch m := message.(type) {
case MessageExit:
return m, true
}
return me, false
}
// IsMessageProxyDown
func IsMessageProxyDown(message etf.Term) (MessageProxyDown, bool) {
var mpd MessageProxyDown
switch m := message.(type) {
case MessageProxyDown:
return m, true
}
return mpd, false
}
// IsMessageFallback
func IsMessageFallback(message etf.Term) (MessageFallback, bool) {
var mf MessageFallback
switch m := message.(type) {
case MessageFallback:
return m, true
}
return mf, false
}
type CancelFunc func() bool
type EventMessage interface{}
type Event string
// MessageEventDown delivers to the process which monitored EventType if the owner
// of this EventType has terminated
type MessageEventDown struct {
Event Event
Reason string
}
|
[
7
] |
package actions
import (
"database/sql"
"profira-backend/db/models"
"profira-backend/db/repositories/contracts"
"strings"
)
type BedRepository struct {
DB *sql.DB
}
func NewBedRepository(DB *sql.DB) contracts.IBedRepository {
bedWhereParams = []interface{}{}
return &BedRepository{DB: DB}
}
const (
bedSelectStatement = `select id,bed_code,clinic_id,name,address,pic_name,phone_number,email,is_use_able,treatments,created_at,updated_at,deleted_at`
)
var (
bedWhereParams = []interface{}{}
bedWhereStatement = `where (bed_code like $1 or lower(name) like $1) and deleted_at is null`
)
func (repository BedRepository) scanRows(rows *sql.Rows) (res models.Bed, err error) {
err = rows.Scan(&res.ID, &res.BedCode, &res.Clinic.ID, &res.Clinic.Name, &res.Clinic.Address, &res.Clinic.PICName, &res.Clinic.PhoneNumber, &res.Clinic.Email, &res.IsUseAble, &res.Treatments, &res.CreatedAt, &res.UpdatedAt,
&res.DeletedAt)
if err != nil {
return res, err
}
return res, nil
}
func (repository BedRepository) scanRow(row *sql.Row) (res models.Bed, err error) {
err = row.Scan(&res.ID, &res.BedCode, &res.Clinic.ID, &res.Clinic.Name, &res.Clinic.Address, &res.Clinic.PICName, &res.Clinic.PhoneNumber, &res.Clinic.Email, &res.IsUseAble, &res.Treatments, &res.CreatedAt, &res.UpdatedAt,
&res.DeletedAt)
if err != nil {
return res, err
}
return res, nil
}
func (repository BedRepository) Browse(clinicID, search, order, sort string, limit, offset int) (data []models.Bed, count int, err error) {
bedWhereParams = []interface{}{"%" + strings.ToLower(search) + "%"}
if clinicID != "" {
bedWhereStatement += ` and clinic_id=$2`
bedWhereParams = append(bedWhereParams, clinicID)
}
bedWhereParams = append(bedWhereParams, []interface{}{limit, offset}...)
statement := bedSelectStatement + ` from "clinic_beds" ` + bedWhereStatement + ` order by ` + order + ` ` + sort + ` limit $3 offset $4`
rows, err := repository.DB.Query(statement, bedWhereParams...)
if err != nil {
return data, count, err
}
for rows.Next() {
temp, err := repository.scanRows(rows)
if err != nil {
return data, count, err
}
data = append(data, temp)
}
statement = `select count(id) from "clinic_beds" ` + bedWhereStatement
err = repository.DB.QueryRow(statement, bedWhereParams[0], bedWhereParams[1]).Scan(&count)
if err != nil {
return data, count, err
}
return data, count, nil
}
func (repository BedRepository) BrowseAll(clinicID, search string) (data []models.Bed, err error) {
bedWhereParams = []interface{}{"%" + strings.ToLower(search) + "%"}
if clinicID != "" {
bedWhereStatement += ` and clinic_id=$2`
bedWhereParams = append(bedWhereParams, clinicID)
}
statement := bedSelectStatement + ` from "clinic_beds" ` + bedWhereStatement
rows, err := repository.DB.Query(statement, bedWhereParams...)
if err != nil {
return data, err
}
for rows.Next() {
temp, err := repository.scanRows(rows)
if err != nil {
return data, err
}
data = append(data, temp)
}
return data, nil
}
func (repository BedRepository) ReadBy(column, value, operator string) (data models.Bed, err error) {
statement := bedSelectStatement + ` from "clinic_beds" where ` + column + `` + operator + `$1 and "deleted_at" is null`
row := repository.DB.QueryRow(statement, value)
data, err = repository.scanRow(row)
if err != nil {
return data, err
}
return data, nil
}
func (BedRepository) Edit(input models.Bed, tx *sql.Tx) (err error) {
statement := `update "beds" set bed_code=$1, clinic_id=$2, is_use_able=$3, updated_at=$4 where id=$5 returning id`
_, err = tx.Exec(statement, input.BedCode, input.ClinicID, input.IsUseAble, input.UpdatedAt, input.ID)
if err != nil {
return err
}
return nil
}
func (repository BedRepository) Add(input models.Bed, tx *sql.Tx) (res string, err error) {
statement := `insert into "beds" (bed_code,clinic_id,is_use_able,created_at,updated_at) values($1,$2,$3,$4,$5) returning id`
err = repository.DB.QueryRow(statement, input.BedCode, input.ClinicID, input.IsUseAble, input.CreatedAt, input.UpdatedAt).Scan(&res)
if err != nil {
return res, err
}
return res, nil
}
func (BedRepository) DeleteBy(column, value, operator string, input models.Bed, tx *sql.Tx) (err error) {
statement := `update "beds" set updated_at=$1, deleted_at=$2 where ` + column + `` + operator + `$3 returning id`
_, err = tx.Exec(statement, input.UpdatedAt, input.DeletedAt.Time, value)
if err != nil {
return err
}
return nil
}
func (repository BedRepository) CountBy(ID, column, value, operator string) (res int, err error) {
countWhereStatement := `where ` + column + `` + operator + `$1 and deleted_at is null`
countWhereParams := []interface{}{value}
if ID != "" {
countWhereStatement = `where (` + column + `` + operator + `$1 and deleted_at is null) and id <> $2`
countWhereParams = []interface{}{value, ID}
}
statement := `select count(id) from "clinic_beds" ` + countWhereStatement
err = repository.DB.QueryRow(statement, countWhereParams...).Scan(&res)
if err != nil {
return res, err
}
return res, nil
}
|
[
2
] |
package mugglepay
import (
"bytes"
"crypto/md5"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
)
func NewMugglepay(key string) *Mugglepay {
mgp := &Mugglepay{
ApplicationKey: key,
ApiUrl: "https://api.mugglepay.com/v1",
}
return mgp
}
type Mugglepay struct {
ApplicationKey string
ApiUrl string
CallBackUrl string
CancelUrl string
SuccessUrl string
}
type Order struct {
OrderId string `json:"order_id"`
UserId int64 `json:"user_id"`
MerchantOrderId string `json:"merchant_order_id"`
Title string `json:"title"`
Description string `json:"description"`
CallBackUrl string `json:"callback_url"`
CancelUrl string `json:"cancel_url"`
SuccessUrl string `json:"success_url"`
PriceAmount float64 `json:"price_amount"`
PriceCurrency string `json:"price_currency"`
Status string `json:"status"`
Notified string `json:"notified"`
CreatedAt string `json:"created_at"`
UpdatedAt string `json:"updated_at"`
PayAmount float64 `json:"pay_amount"`
PayCurrency string `json:"pay_currency"`
IsSelf bool `json:"is_self"`
Mobile bool `json:"mobile"`
Fast bool `json:"fast"`
Token string `json:"token"`
PaidAt string `json:"paid_at"`
ReceiveCurrency string `json:"receive_currency"`
}
type Invoice struct {
InvoiceId string `json:"invoice_id"`
OrderId string `json:"order_id"`
PayAmount float64 `json:"pay_amount"`
PayCurrency string `json:"pay_currency"`
Status string `json:"status"`
CreatedAt string `json:"created_at"`
CreatedAtT int64 `json:"created_at_t"`
ExpiredAt string `json:"expired_at"`
ExpiredAtT int64 `json:"expired_at_t"`
MerchantOrderId string `json:"merchant_order_id"`
ReceiveAmount float64 `json:"receive_amount"`
ReceiveCurrency string `json:"receive_currency"`
Qrcode string `json:"qrcode"`
QrcodeLg string `json:"qrcodeLg"`
Address string `json:"address"`
Memo string `json:"memo"`
}
type ServerOrder struct {
Status int `json:"status"`
Order Order `json:"order"`
Merchant Merchant `json:"merchant"`
PaymentUrl string `json:"payment_url"`
Invoice Invoice `json:"invoice"`
Permission string `json:"permission"`
}
type Merchant struct {
AcceptBtc bool `json:"accept_btc"`
AcceptUsdt bool `json:"accept_usdt"`
AcceptBch bool `json:"accept_bch"`
AcceptEth bool `json:"accept_eth"`
AcceptEos bool `json:"accept_eos"`
AcceptLtc bool `json:"accept_ltc"`
AcceptBnb bool `json:"accept_bnb"`
AcceptBusd bool `json:"accept_busd"`
AcceptCusd bool `json:"accept_cusd"`
AcceptAlipay bool `json:"accept_alipay"`
AcceptWechat bool `json:"accept_wechat"`
WalletUserHash string `json:"wallet_user_hash"`
WalletUserEnabled bool `json:"wallet_user_enabled"`
EmailVerified bool `json:"email_verified"`
Price map[string]interface{} `json:"price"`
Permission string `json:"permission"`
}
type Callback struct {
MerchantOrderId string `json:"merchant_order_id"`
OrderId string `json:"order_id"`
Status string `json:"status"`
PriceAmount float64 `json:"price_amount"`
PriceCurrency string `json:"price_currency"`
PayAmount float64 `json:"pay_amount"`
PayCurrency string `json:"pay_currency"`
CreatedAt string `json:"created_at"`
CreatedAtT int64 `json:"created_at_t"`
Token string `json:"token"`
Meta Meta `json:"meta"`
}
type Meta struct {
Payment string `json:"payment"`
TotalMmount string `json:"total_amount"`
TradeNo string `json:"trade_no"`
OutTradeNo string `json:"out_trade_no"`
}
// 创建订单,返回 ServerOrder
func (mgp *Mugglepay) CreateOrder(order *Order) (ServerOrder, error) {
var sorder ServerOrder
if mgp.ApplicationKey == "" {
return sorder, errors.New("application key cannot be null")
}
if order.MerchantOrderId == "" {
return sorder, errors.New("merchant order id cannot be null")
}
if order.PriceCurrency == "" {
order.PriceCurrency = "CNY"
}
if mgp.CallBackUrl == "" {
// 如果没有回调地址将无法使用法币支付,默认仅可用虚拟币
order.PayCurrency = ""
}
order.CallBackUrl = mgp.CallBackUrl
if mgp.CancelUrl != "" {
order.CancelUrl = mgp.CancelUrl + order.MerchantOrderId
}
if mgp.SuccessUrl != "" {
order.SuccessUrl = mgp.SuccessUrl + order.MerchantOrderId
}
// 签名
order.sign(mgp.ApplicationKey)
jsonOrder, _ := json.Marshal(order)
reqest, _ := http.NewRequest("POST", fmt.Sprintf("%s/orders", mgp.ApiUrl), bytes.NewBuffer(jsonOrder))
reqest.Header.Add("content-type", "application/json")
http_unmarshal(reqest, &sorder, mgp.ApplicationKey)
http_unmarshal(reqest, &sorder, mgp.ApplicationKey)
return sorder, nil
}
// 签名
func (order *Order) sign(secret string) {
q := url.Values{}
q.Set("merchant_order_id", order.MerchantOrderId)
q.Set("secret", secret)
q.Set("type", "FIAT")
order.Token = strings.ToLower(fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%x", md5.Sum([]byte(q.Encode())))+secret))))
}
// 校验订单 true: 已支付; false: 未支付/取消/欺诈
func (mgp *Mugglepay) VerifyOrder(callback *Callback) bool {
if mgp.ApplicationKey == "" {
return false
}
order := &Order{MerchantOrderId: callback.MerchantOrderId}
order.sign(mgp.ApplicationKey)
// 校验签名
if order.Token != callback.Token {
return false
}
if callback.Status == "PAID" {
return true
}
return false
}
// 根据网关订单编号获取 ServerOrder
func (mgp *Mugglepay) GetOrder(OrderId string) (ServerOrder, error) {
var sorder ServerOrder
if OrderId == "" {
return sorder, errors.New("order id cannot be null")
}
reqest, _ := http.NewRequest("GET", fmt.Sprintf("%s/orders/%s", mgp.ApiUrl, OrderId), nil)
http_unmarshal(reqest, &sorder, mgp.ApplicationKey)
return sorder, nil
}
// 构建 CURL 请求
func http_unmarshal(reqest *http.Request, sorder *ServerOrder, key string) {
reqest.Header.Add("token", key)
client := &http.Client{}
response, _ := client.Do(reqest)
responseB := response.Body
defer responseB.Close()
body, _ := ioutil.ReadAll(responseB)
bytes := []byte(body)
_ = json.Unmarshal(bytes, &sorder)
}
// 获取支付地址
func (sorder *ServerOrder) GetUrl() {
getUrl := func(longurl, key string) string {
var res string
if u, err := url.Parse(longurl); err == nil {
if p, err := url.ParseQuery(u.RawQuery); err == nil {
if val, ok := p[key]; ok {
res = val[0]
}
}
}
return res
}
switch sorder.Invoice.PayCurrency {
case "ALIPAY":
if rurl := getUrl(sorder.Invoice.Qrcode, "url"); rurl != "" {
sorder.Invoice.Address = rurl
} else {
sorder.Invoice.Address = getUrl(sorder.Invoice.QrcodeLg, "mpurl")
}
case "WECHAT":
sorder.Invoice.Address = sorder.Invoice.Qrcode
case "EOS":
sorder.Invoice.Address = "mgtestflight"
sorder.Invoice.Memo = fmt.Sprintf("MP:%s", sorder.Invoice.OrderId)
}
}
// 切换网关支付方式
func (mgp *Mugglepay) CheckOut(OrderId, PayCurrency string) (ServerOrder, error) {
var sorder ServerOrder
if OrderId == "" {
return sorder, errors.New("order id cannot be null")
}
me := make(map[string]string)
me["pay_currency"] = PayCurrency
newpatC, _ := json.Marshal(me)
reqest, _ := http.NewRequest("POST", fmt.Sprintf("%s/orders/%s/checkout", mgp.ApiUrl, OrderId), bytes.NewBuffer(newpatC))
reqest.Header.Add("content-type", "application/json")
http_unmarshal(reqest, &sorder, mgp.ApplicationKey)
return sorder, nil
}
// 订单查询
func (mgp *Mugglepay) GetStatus(OrderId string) (ServerOrder, error) {
var sorder ServerOrder
if OrderId == "" {
return sorder, errors.New("order id cannot be null")
}
reqest, _ := http.NewRequest("GET", fmt.Sprintf("%s/orders/%s/status", mgp.ApiUrl, OrderId), nil)
http_unmarshal(reqest, &sorder, mgp.ApplicationKey)
return sorder, nil
}
// 虚拟币: 我已支付
func (mgp *Mugglepay) Sent(OrderId string) (ServerOrder, error) {
var sorder ServerOrder
if OrderId == "" {
return sorder, errors.New("order id cannot be null")
}
sorder, _ = mgp.GetOrder(OrderId)
if sorder.Invoice.PayCurrency == "ALIPAY" || sorder.Invoice.PayCurrency == "WECHAT" {
// 法币不可调用此 API
return sorder, errors.New("tan 90°")
}
nilmap, _ := json.Marshal(make(map[string]interface{}))
reqest, _ := http.NewRequest("POST", fmt.Sprintf("%s/orders/%s/sent", mgp.ApiUrl, OrderId), bytes.NewBuffer(nilmap))
http_unmarshal(reqest, &sorder, mgp.ApplicationKey)
return sorder, nil
}
|
[
2
] |
package orm
var DeleteTag string = "Delete"
type Delete struct {
table *Table
filter
}
func (delete *Delete) Where(sql string, args ...interface{}) *Delete {
delete.filter = filter{sql: " WHERE " + sql, args: args}
return delete
}
func (delete *Delete) And(sql string, args ...interface{}) *Delete {
delete.and(sql, args...)
return delete
}
func (delete *Delete) Or(sql string, args ...interface{}) *Delete {
delete.or(sql, args...)
return delete
}
func (delete *Delete) Exec() (int64, error) {
sql := "DELETE FROM " + delete.table.name + delete.sql
if delete.table.showSql {
ormLogger.D(DeleteTag, "Exec# %s", sql)
}
res, err := delete.table.db.Exec(sql, delete.args...)
if err != nil {
return 0, err
}
return res.RowsAffected()
}
|
[
1
] |
package main
import (
"encoding/hex"
"sort"
"github.com/bobg/scp"
"github.com/chain/txvm/protocol"
"github.com/chain/txvm/protocol/bc"
)
// The concrete type for scp.Value. This network votes on block
// IDs. When a node needs to know the contents of a block, it can
// inquire via RPC.
type valtype bc.Hash
func (v valtype) Less(otherval scp.Value) bool {
other := otherval.(valtype)
if v.V0 < other.V0 {
return true
}
if v.V0 > other.V0 {
return false
}
if v.V1 < other.V1 {
return true
}
if v.V1 > other.V1 {
return false
}
if v.V2 < other.V2 {
return true
}
if v.V2 > other.V2 {
return false
}
return v.V3 < other.V3
}
func (v valtype) Bytes() []byte {
return bc.Hash(v).Bytes()
}
func (v valtype) String() string {
return hex.EncodeToString(bc.Hash(v).Bytes())
}
func (v valtype) Combine(otherval scp.Value, slotID scp.SlotID) scp.Value {
other := otherval.(valtype)
if other.Less(v) {
return other.Combine(v, slotID)
}
if !v.Less(other) {
// v == other
return v
}
b1, err := getBlock(int(slotID), bc.Hash(v))
if err != nil {
panic(err) // xxx is this OK?
}
b2, err := getBlock(int(slotID), bc.Hash(other))
if err != nil {
panic(err) // xxx
}
txs := b1.Transactions
txs = append(txs, b2.Transactions...)
sort.Slice(txs, func(i, j int) bool {
s := make(map[bc.Hash]struct{})
for _, out := range txs[i].Outputs {
s[out.ID] = struct{}{}
}
for _, in := range txs[j].Inputs {
if _, ok := s[in.ID]; ok {
return true
}
}
s = make(map[bc.Hash]struct{})
for _, out := range txs[j].Outputs {
s[out.ID] = struct{}{}
}
for _, in := range txs[i].Inputs {
if _, ok := s[in.ID]; ok {
return false
}
}
return valtype(txs[i].ID).Less(valtype(txs[j].ID))
})
// Eliminate duplicates. There should be no more than two of any
// given txid, but this code handles any number of duplicates
// anyway.
var (
n = 0
d = 1
)
for n+d < len(txs) { // xxx double-check the logic in this loop
if txs[n].ID == txs[n+d].ID {
d++
} else {
if d > 1 {
txs[n+1] = txs[n+d]
n++
}
}
}
txs = txs[:n]
cmtxs := make([]*bc.CommitmentsTx, 0, len(txs))
for _, tx := range txs {
cmtxs = append(cmtxs, bc.NewCommitmentsTx(tx))
}
// Use the earlier timestamp.
timestampMS := b1.TimestampMs
if b2.TimestampMs < timestampMS {
timestampMS = b2.TimestampMs
}
dflt := func() valtype {
// Cannot make a block from the combined set of txs. Choose one of
// the input blocks as the winner.
if slotID%2 == 0 {
return v
}
return other
}
// TODO: reuse a builder object
bb := protocol.NewBlockBuilder()
snapshot := chain.State()
err = bb.Start(snapshot, timestampMS)
if err != nil {
return dflt()
}
for _, tx := range cmtxs {
err = bb.AddTx(tx)
if err != nil {
return dflt()
}
}
ublock, _, err := bb.Build()
if err != nil {
return dflt()
}
block, err := bc.SignBlock(ublock, snapshot.Header, nil)
if err != nil {
return dflt()
}
err = storeBlock(block)
if err != nil {
panic(err)
}
return valtype(block.Hash())
}
func (v valtype) IsNil() bool {
h := bc.Hash(v)
return h.IsZero()
}
|
[
4
] |
package lesson
import (
"fmt"
"time"
)
func SwitchTest() {
for i := 0; i < 3; i++ {
switch i {
case 0:
{
fmt.Println(i)
}
case 1:
{
fmt.Println(i)
}
case 2:
{
fmt.Println(i)
}
}
}
switch time.Now().Weekday() {
case time.Wednesday:
fmt.Println("time.Wendsday")
}
switch {
case time.Now().Hour() < 12:
fmt.Println("time.Hour<12")
default:
fmt.Println("time.Now()")
}
}
|
[
7
] |
package util
import "sync"
// Pool Defines the interface of a worker pool
type Pool struct {
nWorkers int
jobfunc func(in interface{})
jobs chan interface{}
sync.WaitGroup
WorkerPool chan chan interface{}
quit chan bool
}
type worker struct {
jobfunc func(in interface{})
WorkerPool chan chan interface{}
jobChannel chan interface{}
*sync.WaitGroup
}
func newWorker(WorkerPool chan chan interface{}, jobfunc func(in interface{}), w *sync.WaitGroup) *worker {
return &worker{
jobfunc: jobfunc,
WaitGroup: w,
WorkerPool: WorkerPool,
jobChannel: make(chan interface{}),
}
}
func (w *worker) Start() {
go func() {
w.WorkerPool <- w.jobChannel
for {
select {
case job, open := <-w.jobChannel:
if !open {
return
}
w.jobfunc(job)
w.WaitGroup.Done()
w.WorkerPool <- w.jobChannel
}
}
}()
}
func NewPool(numWorkers int, jobfunc func(in interface{})) *Pool {
return &Pool{
nWorkers: numWorkers,
jobfunc: jobfunc,
jobs: make(chan interface{}, numWorkers),
WorkerPool: make(chan chan interface{}, numWorkers),
}
}
// Start starts the worker pool
// Usage go pool.Start()
func (p *Pool) Start() {
for i := 0; i < p.nWorkers; i++ {
w := newWorker(p.WorkerPool, p.jobfunc, &p.WaitGroup)
w.Start()
}
go func() {
for {
select {
case job := <-p.jobs:
p.WaitGroup.Add(1)
go func(job interface{}) {
jobChan := <-p.WorkerPool
jobChan <- job
}(job)
case <-p.quit:
return
}
}
}()
}
// TODO: make shure add returns error after Quit is called
func (p *Pool) Wait() {
p.WaitGroup.Wait()
}
func (p *Pool) Add(job interface{}) {
go func() {
p.jobs <- job
}()
}
func (p *Pool) Quit() {
go func() {
p.quit <- true
for {
select {
case wp := <-p.WorkerPool:
close(wp)
default:
return
}
}
}()
}
|
[
2
] |
package vcs
import (
"bytes"
"fmt"
"strings"
"time"
tm "github.com/buger/goterm"
"github.com/cheggaaa/pb"
)
type lastWriteBuffer struct {
buffer *bytes.Buffer
}
func newLastWriteBuffer() *lastWriteBuffer {
return &lastWriteBuffer{&bytes.Buffer{}}
}
type ProgressBarBank struct {
bars map[*pb.ProgressBar]*lastWriteBuffer
barorder []*pb.ProgressBar
names map[*pb.ProgressBar]string
started bool
isFinished bool
}
func NewProgressBarBank() *ProgressBarBank {
return &ProgressBarBank{
bars: make(map[*pb.ProgressBar]*lastWriteBuffer),
barorder: []*pb.ProgressBar{},
names: map[*pb.ProgressBar]string{},
}
}
func (b *lastWriteBuffer) Write(p []byte) (n int, err error) {
b.buffer.Reset()
return b.buffer.Write(p)
}
func (b *lastWriteBuffer) String() string {
return b.buffer.String()
}
func (b *ProgressBarBank) updateWidth() {
var max int
for _, name := range b.names {
l := len(name)
if l > max {
max = l
}
}
w := tm.Width() - max - 2
for bar, _ := range b.bars {
bar.SetWidth(w)
}
}
func (b *ProgressBarBank) StartNew(count int, prefix string) *pb.ProgressBar {
defer b.Start()
bar := pb.New(count)
defer bar.Start()
defer b.updateWidth()
buffer := newLastWriteBuffer()
b.bars[bar] = buffer
b.barorder = append(b.barorder, bar)
b.names[bar] = prefix
bar.ShowCounters = false
bar.Output = buffer
bar.NotPrint = true
return bar
}
func clearScreen() {
tm.Clear()
tm.MoveCursor(1, 1)
}
func (b *ProgressBarBank) Render() {
clearScreen()
table := tm.NewTable(0, 5, 1, ' ', 0)
for _, k := range b.barorder {
fmt.Fprintf(table, "%s\t%s\n", tm.Color(b.names[k], tm.WHITE), strings.Trim(b.bars[k].String(), "\r"))
}
tm.Println(table)
tm.Flush()
}
func (b *ProgressBarBank) writer() {
for {
if b.isFinished {
break
}
b.Render()
time.Sleep(200 * time.Millisecond)
}
}
func (b *ProgressBarBank) Start() {
if !b.started {
go b.writer()
}
b.started = true
}
|
[
1
] |
package translatedassert
import (
"reflect"
)
// UnaryOpADD has nodoc
func UnaryOpADD(x interface{}) interface{} {
switch x.(type) {
case uint8:
return +x.(uint8)
case uint16:
return +x.(uint16)
case uint32:
return +x.(uint32)
case uint64:
return +x.(uint64)
case uint:
return +x.(uint)
case int8:
return +x.(int8)
case int16:
return +x.(int16)
case int32:
return +x.(int32)
case int64:
return +x.(int64)
case int:
return +x.(int)
case float32:
return +x.(float32)
case float64:
return +x.(float64)
case complex64:
return +x.(complex64)
case complex128:
return +x.(complex128)
}
panic("unary(+) can take integers, floats, complex values")
}
// UnaryOpSUB has nodoc
func UnaryOpSUB(x interface{}) interface{} {
switch x.(type) {
case uint8:
return -x.(uint8)
case uint16:
return -x.(uint16)
case uint32:
return -x.(uint32)
case uint64:
return -x.(uint64)
case uint:
return -x.(uint)
case int8:
return -x.(int8)
case int16:
return -x.(int16)
case int32:
return -x.(int32)
case int64:
return -x.(int64)
case int:
return -x.(int)
case float32:
return -x.(float32)
case float64:
return -x.(float64)
case complex64:
return -x.(complex64)
case complex128:
return -x.(complex128)
}
panic("unary(-) can take integers, floats, complex values")
}
// UnaryOpNOT has nodoc
func UnaryOpNOT(x interface{}) interface{} {
switch x.(type) {
case bool:
return !x.(bool)
}
panic("unary(!) can take bool")
}
// UnaryOpXOR has nodoc
func UnaryOpXOR(x interface{}) interface{} {
switch x.(type) {
case uint8:
return ^x.(uint8)
case uint16:
return ^x.(uint16)
case uint32:
return ^x.(uint32)
case uint64:
return ^x.(uint64)
case uint:
return ^x.(uint)
case int8:
return ^x.(int8)
case int16:
return ^x.(int16)
case int32:
return ^x.(int32)
case int64:
return ^x.(int64)
case int:
return ^x.(int)
}
panic("unary(^) can take integers")
}
// UnaryOpARROW has nodoc
func UnaryOpARROW(x interface{}) interface{} {
r, _ := reflect.ValueOf(x).Recv()
return r.Interface()
}
|
[
7
] |
package main
import (
"bufio"
"fmt"
"os"
"sort"
"strconv"
)
func out(x ...interface{}) {
fmt.Println(x...)
}
var sc = bufio.NewScanner(os.Stdin)
func getInt() int {
sc.Scan()
i, e := strconv.Atoi(sc.Text())
if e != nil {
panic(e)
}
return i
}
func getInts(N int) []int {
ret := make([]int, N)
for i := 0; i < N; i++ {
ret[i] = getInt()
}
return ret
}
func getString() string {
sc.Scan()
return sc.Text()
}
// min, max, asub, absなど基本関数
func max(a, b int) int {
if a > b {
return a
}
return b
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func asub(a, b int) int {
if a > b {
return a - b
}
return b - a
}
func abs(a int) int {
if a >= 0 {
return a
}
return -a
}
func lowerBound(a []int, x int) int {
idx := sort.Search(len(a), func(i int) bool {
return a[i] >= x
})
return idx
}
func upperBound(a []int, x int) int {
idx := sort.Search(len(a), func(i int) bool {
return a[i] > x
})
return idx
}
var good = "good"
var problem = "problem"
func solve(s string) {
ans := len(s)
n := len(s) - len(good) - len(problem) + 1
for i := 0; i < n; i++ {
cnt0 := 0
for j := 0; j < len(good); j++ {
if s[i+j] != good[j] {
cnt0++
}
}
// out(cnt0)
m := len(s) - len(problem) + 1
// out(i+4, m)
for j := i + 4; j < m; j++ {
cnt1 := 0
for k := 0; k < len(problem); k++ {
if s[j+k] != problem[k] {
cnt1++
}
}
ans = min(ans, cnt0+cnt1)
}
}
out(ans)
}
func main() {
sc.Split(bufio.ScanWords)
sc.Buffer([]byte{}, 1000000)
T := getInt()
for i := 0; i < T; i++ {
s := getString()
solve(s)
}
}
|
[
2
] |
package marshal
import (
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"encoding/binary"
"encoding/pem"
"errors"
"fmt"
"math/big"
"golang.org/x/crypto/ssh"
)
// An exact copy of the diff from this unmerged change:
// https://go-review.googlesource.com/c/crypto/+/218620/
//
// Once the above is merged or another solution is found,
// this should be removed and hostkeys should adopt.
//
// Kept in /internal/ to avoid api leakage
func MarshalPrivateKey(key crypto.PrivateKey, comment string) (*pem.Block, error) {
return marshalOpenSSHPrivateKey(key, comment, unencryptedOpenSSHMarshaler)
}
func unencryptedOpenSSHMarshaler(PrivKeyBlock []byte) ([]byte, string, string, string, error) {
key := generateOpenSSHPadding(PrivKeyBlock, 8)
return key, "none", "none", "", nil
}
type openSSHEncryptFunc func(PrivKeyBlock []byte) (ProtectedKeyBlock []byte, cipherName, kdfName, kdfOptions string, err error)
func marshalOpenSSHPrivateKey(key crypto.PrivateKey, comment string, encrypt openSSHEncryptFunc) (*pem.Block, error) {
var w struct {
CipherName string
KdfName string
KdfOpts string
NumKeys uint32
PubKey []byte
PrivKeyBlock []byte
}
var pk1 struct {
Check1 uint32
Check2 uint32
Keytype string
Rest []byte `ssh:"rest"`
}
// Random check bytes.
var check uint32
if err := binary.Read(rand.Reader, binary.BigEndian, &check); err != nil {
return nil, err
}
pk1.Check1 = check
pk1.Check2 = check
w.NumKeys = 1
// Use a []byte directly on ed25519 keys.
if k, ok := key.(*ed25519.PrivateKey); ok {
key = *k
}
switch k := key.(type) {
case *rsa.PrivateKey:
E := new(big.Int).SetInt64(int64(k.PublicKey.E))
// Marshal public key:
// E and N are in reversed order in the public and private key.
pubKey := struct {
KeyType string
E *big.Int
N *big.Int
}{
ssh.KeyAlgoRSA,
E, k.PublicKey.N,
}
w.PubKey = ssh.Marshal(pubKey)
// Marshal private key.
key := struct {
N *big.Int
E *big.Int
D *big.Int
Iqmp *big.Int
P *big.Int
Q *big.Int
Comment string
}{
k.PublicKey.N, E,
k.D, k.Precomputed.Qinv, k.Primes[0], k.Primes[1],
comment,
}
pk1.Keytype = ssh.KeyAlgoRSA
pk1.Rest = ssh.Marshal(key)
case ed25519.PrivateKey:
pub := make([]byte, ed25519.PublicKeySize)
priv := make([]byte, ed25519.PrivateKeySize)
copy(pub, k[ed25519.PublicKeySize:])
copy(priv, k)
// Marshal public key.
pubKey := struct {
KeyType string
Pub []byte
}{
ssh.KeyAlgoED25519, pub,
}
w.PubKey = ssh.Marshal(pubKey)
// Marshal private key.
key := struct {
Pub []byte
Priv []byte
Comment string
}{
pub, priv,
comment,
}
pk1.Keytype = ssh.KeyAlgoED25519
pk1.Rest = ssh.Marshal(key)
case *ecdsa.PrivateKey:
var curve, keyType string
switch name := k.Curve.Params().Name; name {
case "P-256":
curve = "nistp256"
keyType = ssh.KeyAlgoECDSA256
case "P-384":
curve = "nistp384"
keyType = ssh.KeyAlgoECDSA384
case "P-521":
curve = "nistp521"
keyType = ssh.KeyAlgoECDSA521
default:
return nil, errors.New("ssh: unhandled elliptic curve " + name)
}
pub := elliptic.Marshal(k.Curve, k.PublicKey.X, k.PublicKey.Y)
// Marshal public key.
pubKey := struct {
KeyType string
Curve string
Pub []byte
}{
keyType, curve, pub,
}
w.PubKey = ssh.Marshal(pubKey)
// Marshal private key.
key := struct {
Curve string
Pub []byte
D *big.Int
Comment string
}{
curve, pub, k.D,
comment,
}
pk1.Keytype = keyType
pk1.Rest = ssh.Marshal(key)
default:
return nil, fmt.Errorf("ssh: unsupported key type %T", k)
}
var err error
// Add padding and encrypt the key if necessary.
w.PrivKeyBlock, w.CipherName, w.KdfName, w.KdfOpts, err = encrypt(ssh.Marshal(pk1))
if err != nil {
return nil, err
}
b := ssh.Marshal(w)
block := &pem.Block{
Type: "OPENSSH PRIVATE KEY",
Bytes: append([]byte(magic), b...),
}
return block, nil
}
const magic = "openssh-key-v1\x00"
func generateOpenSSHPadding(block []byte, blockSize int) []byte {
for i, l := 0, len(block); (l+i)%blockSize != 0; i++ {
block = append(block, byte(i+1))
}
return block
}
|
[
2
] |
package models
type Service_centre struct{
Id string
Adress string
IdAuto string
}
func NewSC(Id, Adress, IdAuto string) *Service_centre {
return &Service_centre{Id, Adress, IdAuto}
}
|
[
2
] |
package lib
import (
"flag"
"log"
"net/url"
"os"
"strings"
"github.com/circonus-labs/circonus-gometrics/api"
)
// Allocation is a struct containing state of a nomad allocation
type Allocation struct {
// ID of the allocation (UUID)
ID string
// Name is a logical name of the allocation.
Name string
// NodeID is the node this is being placed on
NodeID string
// JobID is the parent job of the task group being allocated.
// This is copied at allocation time to avoid issues if the job
// definition is updated.
JobID string
// ClientStatus of the allocation on the client
ClientStatus string
}
type CetricSearchResult struct {
CheckBundleID string `json:"_check_bundle"`
Name string `json:"_metric_name"`
Type string `json:"_metric_type"`
Tags []string `json:"tags"`
Units string `json:"units"`
}
type CheckBundleMetric struct {
Name string `json:"name"`
Status string `json:"status"`
Tags []string `json:"tags"`
Type string `json:"type"`
Units string `json:"units"`
Result string `json:"result,omitempty"`
}
type CheckBundleMetricList struct {
Metrics []checkBundleMetric `json:"metrics"`
}
type CheckBundleMetricResult struct {
CID string `json:"_cid"`
Metrics []checkBundleMetric `json:"metrics"`
}
var (
NomadURL *url.URL
circapi *api.API
)
func setup() *api.Config {
var err error
var apiKey string
var apiApp string
var apiURL string
var nomadAPIURL string
var debug bool
flag.StringVar(&apiKey, "key", "", "Circonus API Token Key [none] (CIRCONUS_API_KEY)")
flag.StringVar(&apiApp, "app", "", "Circonus API Token App [nomad-metric-reaper] (CIRCONUS_API_APP)")
flag.StringVar(&apiURL, "apiurl", "", "Base Circonus API URL [https://api.circonus.com/] (CIRCONUS_API_URL)")
flag.StringVar(&nomadAPIURL, "nomadurl", "", "Base Nomad API URL [http://localhost:4646/] (NOMAD_API_URL)")
flag.BoolVar(&debug, "debug", false, "Enable Circonus API debugging")
flag.Parse()
cfg := &api.Config{}
if apiKey == "" {
apiKey = os.Getenv("CIRCONUS_API_KEY")
if apiKey == "" {
log.Printf("CIRCONUS_API_KEY is not set, exiting.\n")
os.Exit(1)
}
}
cfg.TokenKey = apiKey
if apiApp == "" {
apiApp = os.Getenv("CIRCONUS_API_APP")
if apiApp == "" {
apiApp = "nomad-metrics-reaper"
}
}
cfg.TokenApp = apiApp
if apiURL == "" {
apiURL = os.Getenv("CIRCONUS_API_URL")
if apiURL == "" {
apiURL = "https://api.circonus.com/"
}
}
cfg.URL = apiURL
cfg.Debug = debug
if nomadAPIURL == "" {
nomadAPIURL = os.Getenv("NOMAD_API_URL")
// log.Printf("URL before = %v\n", nomadAPIURL)
if nomadAPIURL == "" {
nomadAPIURL = "http://localhost:4646/v1/jobs"
} else {
if strings.Contains(nomadAPIURL, "allocations") {
nomadAPIURL = strings.Replace(nomadAPIURL, "allocations", "jobs", 1)
}
}
}
// log.Printf("URL after = %v\n", nomadAPIURL)
NomadURL, err = url.Parse(nomadAPIURL)
if err != nil {
log.Printf("ERROR: parsing Nomad API URL %+v\n", err)
os.Exit(1)
}
return cfg
}
|
[
4
] |
package net
import "fmt"
import "strconv"
import "strings"
import "net/url"
import "github.com/jinzhu/gorm"
import "github.com/gedex/inflector"
import "github.com/labstack/gommon/log"
import "github.com/dadleyy/charcoal.api/util"
const BlueprintDefaultLimit = 100
const BlueprintMaxLimit = 500
const BlueprintMinLimit = 1
const BlueprintFilterStart = "filter["
const BlueprintFilterEnd = "]"
type Blueprint struct {
*gorm.DB
*log.Logger
values url.Values
}
type BlueprintForeignReference struct {
reference string
source string
}
func (r *BlueprintForeignReference) JoinString() string {
bits := strings.Split(r.reference, ".")
table := inflector.Pluralize(bits[0])
fk := fmt.Sprintf("%s_id", inflector.Singularize(table))
return fmt.Sprintf("JOIN %s on %s.id = %s.%s", table, table, r.source, fk)
}
func (r *BlueprintForeignReference) WhereField() string {
bits := strings.Split(r.reference, ".")
table := inflector.Pluralize(bits[0])
return fmt.Sprintf("%s.%s", table, bits[1])
}
func (print *Blueprint) Limit() int {
if i, err := strconv.Atoi(print.values.Get("limit")); err == nil {
return util.MaxInt(util.MinInt(BlueprintMaxLimit, i), BlueprintMinLimit)
}
return BlueprintDefaultLimit
}
func (print *Blueprint) Apply(out interface{}) (int, error) {
var total int
limit, page := BlueprintDefaultLimit, 0
cursor := print.DB
if i, err := strconv.Atoi(print.values.Get("limit")); err == nil {
limit = util.MinInt(BlueprintMaxLimit, i)
}
if i, err := strconv.Atoi(print.values.Get("page")); err == nil {
page = i
}
scope := print.NewScope(out)
table := scope.TableName()
for key := range print.values {
filterable := strings.HasPrefix(key, BlueprintFilterStart) && strings.HasSuffix(key, BlueprintFilterEnd)
value := strings.SplitN(print.values.Get(key), "(", 2)
if filterable == false || len(value) != 2 || strings.HasSuffix(value[1], ")") != true {
continue
}
column := strings.TrimSuffix(strings.TrimPrefix(key, BlueprintFilterStart), BlueprintFilterEnd)
operation, target := value[0], strings.TrimSuffix(value[1], ")")
full := fmt.Sprintf("%s.%s", table, column)
if bits := strings.Split(column, "."); len(bits) == 2 {
print.Debugf("found an association query: %s - %s(%s)", column, operation, target)
reference := BlueprintForeignReference{column, table}
// move the cursor into a join + change the where clause to be our referenced where
cursor = cursor.Joins(reference.JoinString())
full = reference.WhereField()
}
switch operation {
case "in":
values := strings.Split(target, ",")
cursor = cursor.Where(fmt.Sprintf("%s in (?)", full), values)
case "lk":
query, search := fmt.Sprintf("%s LIKE ?", full), fmt.Sprintf("%%%s%%", target)
cursor = cursor.Where(query, search)
case "eq":
cursor = cursor.Where(fmt.Sprintf("%s = ?", full), target)
case "lt":
cursor = cursor.Where(fmt.Sprintf("%s < ?", full), target)
case "gt":
cursor = cursor.Where(fmt.Sprintf("%s > ?", full), target)
}
}
direction := "ASC"
if o := print.values.Get("sort_order"); o == "desc" || o == "desc" {
direction = "DESC"
}
sort := fmt.Sprintf("id %s", direction)
if on := print.values.Get("sort_on"); len(on) >= 1 {
sort = fmt.Sprintf("%s %s", on, direction)
}
// now that we've chained all our filters, execute the db query and return the error, if any
if e := cursor.Limit(limit).Offset(page * limit).Order(sort).Find(out).Error; e != nil {
return -1, e
}
// also make a `count()` request
cursor.Model(out).Count(&total)
return total, nil
}
|
[
1
] |
package g8
import (
"e8vm.io/e8vm/g8/ast"
"e8vm.io/e8vm/g8/types"
"e8vm.io/e8vm/sym8"
)
func buildArrayType(b *builder, expr *ast.ArrayTypeExpr) types.T {
t := buildType(b, expr.Type)
if t == nil {
return nil
}
if expr.Len == nil {
// slice
return &types.Slice{t}
}
// array
n := b.buildConstExpr(expr.Len)
if n == nil {
return nil
} else if !n.IsSingle() {
panic("bug")
}
ntype := n.Type()
c, ok := ntype.(*types.Const)
if !ok {
// might be true, false, or other builtin consts
b.Errorf(ast.ExprPos(expr), "array index is not a constant")
return nil
}
if v, ok := types.NumConst(ntype); ok {
if v < 0 {
b.Errorf(ast.ExprPos(expr),
"array index is negative: %d", c.Value,
)
return nil
} else if !types.InRange(v, types.Int) {
b.Errorf(ast.ExprPos(expr), "index out of range of int32")
return nil
}
return &types.Array{T: t, N: int32(v)}
}
// TODO: support typed const
b.Errorf(ast.ExprPos(expr), "typed const not implemented yet")
return nil
}
func buildPkgRef(b *builder, expr ast.Expr) *types.Pkg {
switch expr := expr.(type) {
case *ast.Operand:
ret := buildOperand(b, expr)
if ret == nil {
return nil
}
if !ret.IsPkg() {
b.Errorf(ast.ExprPos(expr), "expect a package, got %s", ret)
return nil
}
return ret.Type().(*types.Pkg)
}
b.Errorf(ast.ExprPos(expr), "expect an imported package")
return nil
}
func buildType(b *builder, expr ast.Expr) types.T {
if expr == nil {
panic("bug")
}
switch expr := expr.(type) {
case *ast.Operand:
ret := buildOperand(b, expr)
if ret == nil {
return nil
} else if !ret.IsType() {
b.Errorf(ast.ExprPos(expr), "expect a type, got %s", ret)
return nil
}
return ret.TypeType()
case *ast.StarExpr:
t := buildType(b, expr.Expr)
if t == nil {
return nil
}
return &types.Pointer{t}
case *ast.ArrayTypeExpr:
return buildArrayType(b, expr)
case *ast.ParenExpr:
return buildType(b, expr.Expr)
case *ast.FuncTypeExpr:
return buildFuncType(b, nil, expr.FuncSig)
case *ast.MemberExpr:
pkg := buildPkgRef(b, expr.Expr)
if pkg == nil {
return nil
}
name := expr.Sub.Lit
s := pkg.Syms.Query(name)
if s == nil {
b.Errorf(expr.Sub.Pos, "symbol %s not found", name)
return nil
}
if !sym8.IsPublic(name) && s.Pkg() != b.symPkg {
b.Errorf(expr.Sub.Pos, "symbol %s is not public", name)
return nil
}
if s.Type != symStruct {
b.Errorf(expr.Sub.Pos, "symbol %s is a %s, not a struct",
name, symStr(s.Type),
)
return nil
}
return s.Item.(*objType).ref.TypeType()
}
b.Errorf(ast.ExprPos(expr), "expect a type")
return nil
}
|
[
7
] |
package main
import (
"bufio"
"encoding/xml"
"flag"
"fmt"
"log"
"os"
"runtime/pprof"
)
var profile = flag.Bool("profile", false, "Set to true to enable profiling to cpuprofile.out")
func main() {
flag.Parse()
filename := "generate.xml"
// Enable profiling.
if *profile {
f, err := os.Create("cpuprofile.out")
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
count := countHouses(filename)
fmt.Printf("Go: Found %v houses\n", count)
}
func countHouses(filename string) int {
houses := 0
file, err := os.Open(filename)
handle(err)
defer file.Close()
buffer := bufio.NewReaderSize(file, 1024*1024*256) // 33554432
decoder := xml.NewDecoder(buffer)
for {
t, _ := decoder.Token()
if t == nil {
break
}
switch se := t.(type) {
case xml.StartElement:
if se.Name.Local == "House" {
houses++
}
}
}
return houses
}
func handle(err error) {
if err != nil {
fmt.Printf("error: %v\n", err)
}
}
|
[
7
] |
package font
import (
"github.com/wieku/danser-go/render/texture"
"path/filepath"
"strings"
"github.com/wieku/danser-go/utils"
)
func LoadTextureFont(path, name string, min, max rune, atlas *texture.TextureAtlas) *Font {
font := new(Font)
font.min = min
font.max = max
font.glyphs = make([]*glyphData, font.max-font.min+1)
font.atlas = atlas
extension := filepath.Ext(path)
baseFile := strings.TrimSuffix(path, extension)
for i := min; i <= max; i++ {
region, _ := utils.LoadTextureToAtlas(font.atlas, baseFile+string(i)+extension)
if float64(region.Height) > font.initialSize {
font.initialSize = float64(region.Height)
}
font.glyphs[i-font.min] = &glyphData{*region, float64(region.Width), 0, float64(region.Height) / 2}
}
return font
}
|
[
1
] |
package otgorm
import (
"context"
"fmt"
"testing"
sqlmock "github.com/DATA-DOG/go-sqlmock"
"github.com/jinzhu/gorm"
"github.com/opentracing/opentracing-go/ext"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/mocktracer"
"github.com/stretchr/testify/assert"
)
func TestWrapDB(t *testing.T) {
assert := assert.New(t)
db, _, err := sqlmock.New()
if err != nil {
panic(err)
}
defer db.Close()
gdb, err := gorm.Open("allan-test", db)
if err != nil {
panic(err)
}
tests := map[string]struct {
db *gorm.DB
isErr bool
}{
"fail": {db: nil, isErr: true},
"success": {db: gdb},
}
for k, tc := range tests {
t.Run(k, func(t *testing.T) {
_, err := WrapDB(tc.db)
assert.Equal(tc.isErr, err != nil)
})
}
}
func TestWrapDB_WithContext(t *testing.T) {
assert := assert.New(t)
db, _, err := sqlmock.New()
if err != nil {
panic(err)
}
defer db.Close()
gdb, err := gorm.Open("allan-test", db)
if err != nil {
panic(err)
}
wdb, err := WrapDB(gdb)
if err != nil {
panic(err)
}
tests := map[string]struct {
db DB
ctx context.Context
}{
"check": {db: wdb, ctx: context.TODO()},
}
for k, tc := range tests {
t.Run(k, func(t *testing.T) {
newdb := tc.db.WithContext(tc.ctx)
v, ok := newdb.Get(gormCtx)
assert.True(ok)
assert.Equal(tc.ctx, v)
})
}
}
func TestWrapDB_Callback(t *testing.T) {
assert := assert.New(t)
opentracing.SetGlobalTracer(mocktracer.New())
db, _, err := sqlmock.New()
if err != nil {
panic(err)
}
defer db.Close()
gdb, err := gorm.Open("common", db)
if err != nil {
panic(err)
}
wdb, err := WrapDB(gdb)
if err != nil {
panic(err)
}
tests := map[string]struct {
db *wrapDB
}{
"success": {db: wdb.(*wrapDB)},
}
for k, tc := range tests {
t.Run(k, func(t *testing.T) {
switch k {
case "success":
err := fmt.Errorf("error test")
db := tc.db.WithContext(context.TODO())
scope := db.Table("users").Select("id").NewScope(db.Value)
scope.DB().Error = err
tc.db.beforeQuery(scope)
tc.db.afterCallback(scope)
v, _ := scope.Get(gormCtx)
ctx := v.(context.Context)
span := opentracing.SpanFromContext(ctx).(*mocktracer.MockSpan)
assert.Equal("gorm:common:query", span.OperationName)
for k, v := range span.Tags() {
switch k {
case string(ext.DBType):
assert.Equal("sql", v.(string))
case string(ext.DBInstance):
assert.Equal("common", v.(string))
case string(ext.SpanKind):
assert.Equal("client/server", string(v.(ext.SpanKindEnum)))
case string(ext.DBStatement), "db.method":
// don't check
case "db.table":
assert.Equal("users", v)
case "db.rows_affected":
assert.Equal(int64(0), v.(int64))
case "error":
assert.Equal(true, v.(bool))
default:
panic("unknown tag")
}
}
for _, v := range span.Logs() {
assert.Equal(err.Error(), v.Fields[0].ValueString)
}
}
})
}
}
|
[
7
] |
package main
import (
"encoding/csv"
"flag"
"io"
"log"
"math"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/garyburd/redigo/redis"
)
var (
err error
logStderr *log.Logger
redisHost = flag.String("redis_host", "127.0.0.1", "-redis_host=127.0.0.1")
redisPort = flag.String("redis_port", "6379", "-redis_port=6379")
redisAuth = flag.String("redis_auth", "", "-redis_auth=MyPasswd")
flushall = flag.Bool("flushall", false, "-flushall")
debug = flag.Bool("debug", false, "-debug")
)
func main() {
logStderr = log.New(os.Stderr, "", log.Ldate|log.Lmicroseconds|log.Lshortfile)
flag.Parse()
pool := newRedisPool(*redisHost+":"+*redisPort, *redisAuth)
// Check redis connect
if _, err = pool.Dial(); err != nil {
logStderr.Fatalln("Redis Driver Error", err)
}
redispool := pool.Get()
defer redispool.Close()
rcsv := csv.NewReader(os.Stdin)
rcsv.Comma = ';'
rcsv.Comment = '#'
rcsv.LazyQuotes = true
if *flushall {
_, err = redispool.Do("FLUSHALL")
if err != nil {
logStderr.Fatal(err)
}
}
for {
record, err := rcsv.Read()
if err == io.EOF {
break
}
if err != nil {
logStderr.Fatal(err)
}
parsePrefix(redispool, "7"+strings.TrimSpace(record[0])+strings.TrimSpace(record[1]), "7"+strings.TrimSpace(record[0])+strings.TrimSpace(record[2]), strings.TrimSpace(record[4])+";"+strings.TrimSpace(record[5]))
}
err = redispool.Flush()
if err != nil {
logStderr.Fatal(err)
}
}
// загружает данные в Redis
func load_csv(w http.ResponseWriter, r *http.Request) {
}
func parsePrefix(redispool redis.Conn, min, max, operator string) {
if *debug {
logStderr.Printf("%s\t%s\t%s\n", min, max, operator)
}
min_len := len(min)
max_len := len(max)
if min_len != max_len {
logStderr.Fatalf("Invalid len min %d != len max %d\n", min_len, max_len)
}
minuint64, err := strconv.ParseUint(min, 10, 64)
if err != nil {
logStderr.Fatalln(err)
}
maxuint64, err := strconv.ParseUint(max, 10, 64)
if err != nil {
logStderr.Fatalln(err)
}
for minuint64 < maxuint64 {
var mask uint64 = 0
var pow uint64 = 0
for i := 0; ; i++ {
pow = uint64(math.Pow10(min_len - i))
mask = minuint64 / pow
if minuint64+pow-1 <= maxuint64 {
mask = minuint64 / pow
if *debug {
logStderr.Printf("%d %s\n", mask, operator)
}
err = redispool.Send("SET", mask, operator)
if err != nil {
logStderr.Fatal(err)
}
break
}
}
minuint64 = (mask + 1) * pow
}
}
func newRedisPool(server, password string) *redis.Pool {
return &redis.Pool{
MaxIdle: 3,
MaxActive: 1024,
IdleTimeout: 180 * time.Second,
Dial: func() (redis.Conn, error) {
c, err := redis.Dial("tcp", server)
if err != nil {
return nil, err
}
if password != "" {
if _, err := c.Do("AUTH", password); err != nil {
c.Close()
return nil, err
}
}
return c, err
},
TestOnBorrow: func(c redis.Conn, t time.Time) error {
_, err := c.Do("PING")
return err
},
}
}
|
[
1
] |
// Program in Go language to find the maximum and the next maximum number in an array
package main
import "fmt"
func main() {
// Declare the array size
var arraySize int
fmt.Print("Enter number of elements in your array: ")
fmt.Scan(&arraySize)
fmt.Println("Enter your array: ")
// Create the array
array := make([]int, arraySize)
for i := 0; i < arraySize; i++ {
fmt.Scan(&array[i])
}
// max is for maximum number and
// nextMax for next(second) maximum number
var max, nextMax int
// Assigning max and nextMax with array[0] and array[1] depending
// on the condition max > nextMax
if array[0] > array[1] {
max = array[0]
nextMax = array[1]
} else {
max = array[1]
nextMax = array[0]
}
// Comparing rest of the element in the array
for i := 2; i < arraySize; i++ {
if array[i] > nextMax {
if array[i] > max {
nextMax = max
max = array[i]
} else {
nextMax = array[i]
}
}
}
fmt.Printf("Max: %d", max)
fmt.Printf("\nNext max: %d", nextMax)
}
/*
Output after Execution:
Enter number of elements in your array: 5
Enter your array:
1
2
3
4
5
Max: 5
Next max: 4 */
|
[
1
] |
// run
package main
import "fmt"
func main() {
switch i := interface{}(true); {
case i:
fmt.Print("1")
case false:
fmt.Print("2")
default:
fmt.Print("3")
}
switch i := interface{}(true); {
case i:
fmt.Print("i")
}
switch interface{}(true) {
case true:
}
switch true {
case interface{}(true):
}
switch interface{}(true) {
case interface{}(true):
}
switch true {
case true:
}
switch float64(10) {
case 10:
}
}
|
[
7
] |
/*
Bagja 9102 Kurniawan
*/
package main
import "fmt"
var size, input int
var arr []int
func FindMinAndMax(arr []int) string {
var min, max, iMax, iMin int = arr[0], arr[0], 0, 0
for i, num := range arr {
if num > max {
max, iMax = num, i
} else if num < min {
min, iMin = num, i
}
}
return fmt.Sprintf("min: %d index: %d max: %d index: %d", min, iMin, max, iMax)
}
func main() {
fmt.Print("Length array = ")
fmt.Scanf("%d\n", &size)
for i := 0; i < size; i++ {
fmt.Print("Input Element = ")
fmt.Scanf("%d\n", &input)
arr = append(arr, input)
}
fmt.Println(FindMinAndMax(arr))
}
|
[
1
] |
/* Copyright 2019 DevFactory FZ LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// Code generated by mockery v1.0.0. DO NOT EDIT.
package mocks
import mock "github.com/stretchr/testify/mock"
import net "net"
import v1 "k8s.io/api/core/v1"
import v1alpha1 "github.com/DevFactory/smartnat/pkg/apis/smartnat/v1alpha1"
// Scrubber is an autogenerated mock type for the Scrubber type
type Scrubber struct {
mock.Mock
}
// ScrubMapping provides a mock function with given fields: sn
func (_m *Scrubber) ScrubMapping(sn *v1alpha1.Mapping) (bool, bool, *net.IP) {
ret := _m.Called(sn)
var r0 bool
if rf, ok := ret.Get(0).(func(*v1alpha1.Mapping) bool); ok {
r0 = rf(sn)
} else {
r0 = ret.Get(0).(bool)
}
var r1 bool
if rf, ok := ret.Get(1).(func(*v1alpha1.Mapping) bool); ok {
r1 = rf(sn)
} else {
r1 = ret.Get(1).(bool)
}
var r2 *net.IP
if rf, ok := ret.Get(2).(func(*v1alpha1.Mapping) *net.IP); ok {
r2 = rf(sn)
} else {
if ret.Get(2) != nil {
r2 = ret.Get(2).(*net.IP)
}
}
return r0, r1, r2
}
// ValidateEndpoints provides a mock function with given fields: _a0, endpoints
func (_m *Scrubber) ValidateEndpoints(_a0 *v1alpha1.Mapping, endpoints *v1.Endpoints) error {
ret := _m.Called(_a0, endpoints)
var r0 error
if rf, ok := ret.Get(0).(func(*v1alpha1.Mapping, *v1.Endpoints) error); ok {
r0 = rf(_a0, endpoints)
} else {
r0 = ret.Error(0)
}
return r0
}
|
[
4
] |
//go:build integration
// +build integration
// Copyright 2019 The mqtt-go authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mqtt
import (
"bytes"
"context"
"crypto/tls"
"fmt"
"reflect"
"testing"
"time"
)
func ExampleClient() {
done := make(chan struct{})
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
baseCli, err := DialContext(ctx, "mqtt://localhost:1883")
if err != nil {
panic(err)
}
// store as Client to make it easy to enable high level wrapper later
var cli Client = baseCli
cli.Handle(HandlerFunc(func(msg *Message) {
fmt.Printf("%s[%d]: %s", msg.Topic, int(msg.QoS), []byte(msg.Payload))
close(done)
}))
if _, err := cli.Connect(ctx, "TestClient", WithCleanSession(true)); err != nil {
panic(err)
}
if _, err := cli.Subscribe(ctx, Subscription{Topic: "test/topic", QoS: QoS1}); err != nil {
panic(err)
}
if err := cli.Publish(ctx, &Message{
Topic: "test/topic", QoS: QoS1, Payload: []byte("message"),
}); err != nil {
panic(err)
}
<-done
if err := cli.Disconnect(ctx); err != nil {
panic(err)
}
// Output: test/topic[1]: message
}
func TestIntegration_Connect(t *testing.T) {
// Overwrite default port to avoid using privileged port during test.
defaultPorts["ws"] = 9001
defaultPorts["wss"] = 9443
test := func(t *testing.T, urls map[string]string) {
for name, url := range urls {
t.Run(name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cli, err := DialContext(ctx, url, WithTLSConfig(&tls.Config{InsecureSkipVerify: true}))
if err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
if _, err := cli.Connect(ctx, "Client"); err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
if err := cli.Disconnect(ctx); err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
})
}
}
t.Run("WithPort", func(t *testing.T) {
test(t, urls)
})
t.Run("WithoutPort", func(t *testing.T) {
test(t, urlsWithoutPort)
})
}
func TestIntegration_Publish(t *testing.T) {
for _, size := range []int{0x100, 0x3FF7, 0x3FF8, 0x7FF7, 0x7FF8, 0x20000} {
t.Run(fmt.Sprintf("%dBytes", size), func(t *testing.T) {
for name, url := range urls {
t.Run(name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cli, err := DialContext(ctx, url, WithTLSConfig(&tls.Config{InsecureSkipVerify: true}))
if err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
if _, err := cli.Connect(ctx, fmt.Sprintf("Client%s%x", name, size)); err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
if err := cli.Publish(ctx, &Message{
Topic: "test",
Payload: make([]byte, size),
}); err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
if err := cli.Publish(ctx, &Message{
Topic: "test",
QoS: QoS1,
Payload: make([]byte, size),
}); err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
if err := cli.Disconnect(ctx); err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
})
}
})
}
}
func TestIntegration_PublishSubscribe(t *testing.T) {
for name, url := range urls {
t.Run(name, func(t *testing.T) {
for _, qos := range []QoS{QoS0, QoS1, QoS2} {
t.Run(fmt.Sprintf("QoS%d", int(qos)), func(t *testing.T) {
chReceived := make(chan *Message, 100)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cli, err := DialContext(ctx, url,
WithTLSConfig(&tls.Config{InsecureSkipVerify: true}),
WithConnStateHandler(func(s ConnState, err error) {
switch s {
case StateClosed:
close(chReceived)
t.Errorf("Connection is expected to be disconnected, but closed.")
}
}),
)
if err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
if _, err := cli.Connect(ctx, "PubSubClient"+name, WithCleanSession(true)); err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
cli.Handle(HandlerFunc(func(msg *Message) {
chReceived <- msg
}))
topic := "test_pubsub_" + name
subs, err := cli.Subscribe(ctx, Subscription{Topic: topic, QoS: qos})
if err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
expectedSubs := []Subscription{{Topic: topic, QoS: qos}}
if !reflect.DeepEqual(expectedSubs, subs) {
t.Fatalf("Expected subscriptions: %v, actual: %v", expectedSubs, subs)
}
if err := cli.Publish(ctx, &Message{
Topic: topic,
QoS: qos,
Payload: []byte("message"),
}); err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
select {
case <-ctx.Done():
t.Fatalf("Unexpected error: '%v'", ctx.Err())
case msg, ok := <-chReceived:
if !ok {
t.Errorf("Connection closed unexpectedly")
break
}
if msg.Topic != topic {
t.Errorf("Expected topic name of '%s', got '%s'", topic, msg.Topic)
}
if !bytes.Equal(msg.Payload, []byte("message")) {
t.Errorf("Expected payload of '%v', got '%v'", []byte("message"), msg.Payload)
}
}
if err := cli.Disconnect(ctx); err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
})
}
})
}
}
func TestIntegration_SubscribeUnsubscribe(t *testing.T) {
for name, url := range urls {
t.Run(name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cli, err := DialContext(ctx, url, WithTLSConfig(&tls.Config{InsecureSkipVerify: true}))
if err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
if _, err := cli.Connect(ctx, "SubUnsubClient"+name); err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
subs, err := cli.Subscribe(ctx, Subscription{Topic: "test", QoS: QoS2})
if err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
expectedSubs := []Subscription{{Topic: "test", QoS: QoS2}}
if !reflect.DeepEqual(expectedSubs, subs) {
t.Fatalf("Expected subscriptions: %v, actual: %v", expectedSubs, subs)
}
if err := cli.Unsubscribe(ctx, "test"); err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
if err := cli.Disconnect(ctx); err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
})
}
}
func TestIntegration_Ping(t *testing.T) {
for name, url := range urls {
t.Run(name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cli, err := DialContext(ctx, url, WithTLSConfig(&tls.Config{InsecureSkipVerify: true}))
if err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
if _, err := cli.Connect(ctx, "PingClient"+name); err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
if err := cli.Ping(ctx); err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
if err := cli.Disconnect(ctx); err != nil {
t.Fatalf("Unexpected error: '%v'", err)
}
})
}
}
func BenchmarkPublishSubscribe(b *testing.B) {
for name, url := range urls {
b.Run(name, func(b *testing.B) {
chReceived := make(chan *Message, 100)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
cli, err := DialContext(ctx, url,
WithTLSConfig(&tls.Config{InsecureSkipVerify: true}),
WithConnStateHandler(func(s ConnState, err error) {
switch s {
case StateClosed:
close(chReceived)
}
}),
)
if err != nil {
b.Fatalf("Unexpected error: '%v'", err)
}
if _, err := cli.Connect(ctx, "PubSubBenchClient"+name); err != nil {
b.Fatalf("Unexpected error: '%v'", err)
}
cli.Handle(HandlerFunc(func(msg *Message) {
chReceived <- msg
}))
if _, err := cli.Subscribe(ctx, Subscription{Topic: "test", QoS: QoS2}); err != nil {
b.Fatalf("Unexpected error: '%v'", err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := cli.Publish(ctx, &Message{
Topic: "test",
QoS: QoS2,
Payload: []byte("message"),
}); err != nil {
b.Fatalf("Unexpected error: '%v'", err)
}
if _, ok := <-chReceived; !ok {
b.Fatal("Connection closed unexpectedly")
}
}
b.StopTimer()
if err := cli.Disconnect(ctx); err != nil {
b.Fatalf("Unexpected error: '%v'", err)
}
})
}
}
|
[
7
] |
package main
import (
"bufio"
"fmt"
"os"
)
func addr(r [6]int64, a, b, c int64) [6]int64 {
newR := r
(&newR)[c] = r[a] + r[b]
return newR
}
func addi(r [6]int64, a, b, c int64) [6]int64 {
newR := r
(&newR)[c] = r[a] + b
return newR
}
func mulr(r [6]int64, a, b, c int64) [6]int64 {
newR := r
(&newR)[c] = r[a] * r[b]
return newR
}
func muli(r [6]int64, a, b, c int64) [6]int64 {
newR := r
(&newR)[c] = r[a] * b
return newR
}
func banr(r [6]int64, a, b, c int64) [6]int64 {
newR := r
(&newR)[c] = r[a] & r[b]
return newR
}
func bani(r [6]int64, a, b, c int64) [6]int64 {
newR := r
(&newR)[c] = r[a] & b
return newR
}
func borr(r [6]int64, a, b, c int64) [6]int64 {
newR := r
(&newR)[c] = r[a] | r[b]
return newR
}
func bori(r [6]int64, a, b, c int64) [6]int64 {
newR := r
(&newR)[c] = r[a] | b
return newR
}
func setr(r [6]int64, a, b, c int64) [6]int64 {
newR := r
(&newR)[c] = r[a]
return newR
}
func seti(r [6]int64, a, b, c int64) [6]int64 {
newR := r
(&newR)[c] = a
return newR
}
func gtir(r [6]int64, a, b, c int64) [6]int64 {
newR := r
if a > r[b] {
(&newR)[c] = 1
} else {
(&newR)[c] = 0
}
return newR
}
func gtri(r [6]int64, a, b, c int64) [6]int64 {
newR := r
if r[a] > b {
(&newR)[c] = 1
} else {
(&newR)[c] = 0
}
return newR
}
func gtrr(r [6]int64, a, b, c int64) [6]int64 {
newR := r
if r[a] > r[b] {
(&newR)[c] = 1
} else {
(&newR)[c] = 0
}
return newR
}
func eqir(r [6]int64, a, b, c int64) [6]int64 {
newR := r
if a == r[b] {
(&newR)[c] = 1
} else {
(&newR)[c] = 0
}
return newR
}
func eqri(r [6]int64, a, b, c int64) [6]int64 {
newR := r
if r[a] == b {
(&newR)[c] = 1
} else {
(&newR)[c] = 0
}
return newR
}
func eqrr(r [6]int64, a, b, c int64) [6]int64 {
newR := r
if r[a] == r[b] {
(&newR)[c] = 1
} else {
(&newR)[c] = 0
}
return newR
}
var funcMap = map[string]func(r [6]int64, a, b, c int64) [6]int64{
"addr": addr,
"addi": addi,
"mulr": mulr,
"muli": muli,
"banr": banr,
"bani": bani,
"borr": borr,
"bori": bori,
"setr": setr,
"seti": seti,
"gtir": gtir,
"gtri": gtri,
"gtrr": gtrr,
"eqir": eqir,
"eqri": eqri,
"eqrr": eqrr,
}
type op struct {
name string
a, b, c int64
}
func firstChallenge(IP int, ops []op) {
state, opLen := [6]int64{}, int64(len(ops))
for state[IP] < opLen {
op := ops[state[IP]]
if op.name == "eqrr" {
fmt.Println(state)
return
}
state = funcMap[op.name](state, op.a, op.b, op.c)
(&state)[IP]++
}
}
func secondChallenge(IP int, ops []op) {
state, opLen, max, time := [6]int64{}, int64(len(ops)), int64(0), int64(0)
regFiveMap := map[int64]int64{}
for state[IP] < opLen {
op := ops[state[IP]]
time++
if op.name == "eqrr" {
if state[5] > max {
max = state[5]
}
if regFiveMap[state[5]] > 0 {
timeMax, bestChoice := int64(0), int64(0)
for k, v := range regFiveMap {
if v > timeMax {
timeMax = v
bestChoice = k
}
}
fmt.Println(bestChoice)
os.Exit(0)
}
regFiveMap[state[5]] = time
}
state = funcMap[op.name](state, op.a, op.b, op.c)
(&state)[IP]++
}
}
func parseInput() (int, []op) {
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
IP := 0
fmt.Sscanf(scanner.Text(), "#ip %d", &IP)
ops := []op{}
for scanner.Scan() {
name, a, b, c := "", int64(0), int64(0), int64(0)
fmt.Sscanf(scanner.Text(), "%s %d %d %d", &name, &a, &b, &c)
ops = append(ops, op{name, a, b, c})
}
return IP, ops
}
func main() {
IP, ops := parseInput()
fmt.Println("first challenge:")
firstChallenge(IP, ops)
fmt.Println("****************")
fmt.Println("second challenge:")
secondChallenge(IP, ops)
}
|
[
2
] |
package api
import (
"fmt"
stack "github.com/pkg/errors"
v1 "github.com/yametech/logging/pkg/apis/yamecloud/v1"
"github.com/yametech/logging/pkg/service"
"k8s.io/apimachinery/pkg/watch"
)
var _ IReconcile = &Sink{}
type Sink struct {
ns string
service.IService
}
func NewSink(ns string, service service.IService) IReconcile {
return &Sink{ns, service}
}
func (s *Sink) Run(errors chan error) {
var sink *v1.Sink
var err error
sink, err = s.GetSink(s.ns)
if err != nil {
errors <- stack.WithStack(err)
return
}
if sink == nil {
if sink, err = s.CreateSink(s.ns); err != nil {
errors <- stack.WithStack(err)
return
}
}
slackChannel, err := s.WatchSlack(s.ns, sink.GetResourceVersion())
if err != nil {
errors <- stack.WithStack(err)
return
}
for {
slackEvt, ok := <-slackChannel
if !ok {
errors <- fmt.Errorf("failed to watch sink")
return
}
switch slackEvt.Type {
case watch.Deleted:
if sink, err = s.CreateSink(s.ns); err != nil {
errors <- stack.WithStack(err)
return
}
}
}
}
|
[
7
] |
package container
import (
"context"
"time"
)
// ID of runnable
type ID string
// Base instance that can be run.
type Runnable interface {
Label() string
Run(ctx context.Context) error
}
// Factory that creates runnable
type Factory func() (Runnable, error)
// General information about runnable
type RunnableInfo struct {
Instance Runnable
ID ID
}
// Monitor events of state in supervisor
type Monitor interface {
Spawned(runnable Runnable, id ID)
Stopped(runnable Runnable, id ID, err error)
}
// Event when runnable started
type WatchEventStarted RunnableInfo
// Event when runnable stopped
type WatchEventStopped struct {
RunnableInfo
Error error
}
// Stream of events (WatchEventStarted or WatchEventStopped) while process running
type WatchEvents <-chan interface{}
// Supervisor monitors group of processes
type Supervisor interface {
// Watch runnable and restart if needed but not more then restartLimit. If restartLimit is negative, it's mean infinity.
// If runnable stopped with error and stopOnError is true, then watch loop exits.
// Events MUST be consumed
Watch(ctx context.Context, factory Factory, restartLimit int, restartDelay time.Duration, stopOnError bool) WatchEvents
// Spawn and monitor one runnable in background. Returns generated ID, done channel (buffered) and stop function
Spawn(runnable Runnable) (ID, <-chan error, func())
// SpawnFunc creates and spawn ClosureWrapper runnable
SpawnFunc(label string, closure func(ctx context.Context) error) (ID, <-chan error, func())
// List all runnables
List() []RunnableInfo
// Get runnable by generated ID
Get(ID) Runnable
// Events emitter of runnable
Events() MonitorEvents
// Close supervisor and stops all processes
Close()
}
// Wait for finish
func Wait(events WatchEvents) error {
var err error
for event := range events {
switch v := event.(type) {
case WatchEventStopped:
err = v.Error
}
}
return err
}
// Create factory that repeats closure
func RepeatFunc(label string, fn func(ctx context.Context) error) Factory {
return func() (Runnable, error) {
return &closure{instance: fn, label: label}, nil
}
}
//go:generate gbus Monitor supervisor.go
|
[
7
] |
package handlers
import (
"encoding/json"
"net/http"
"github.com/qri-io/apiutil"
"github.com/qri-io/registry"
)
const (
defaultOffset = 0
defaultLimit = 25
)
// NewSearchHandler creates a search handler function taht operates on a *registry.Searchable
func NewSearchHandler(s registry.Searchable) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
p := ®istry.SearchParams{}
switch r.Header.Get("Content-Type") {
case "application/json":
if err := json.NewDecoder(r.Body).Decode(p); err != nil {
apiutil.WriteErrResponse(w, http.StatusBadRequest, err)
return
}
if p.Limit == 0 {
p.Limit = defaultLimit
}
default:
// read form values
var err error
if p.Limit, err = apiutil.ReqParamInt("limit", r); err != nil {
p.Limit = defaultLimit
err = nil
}
if p.Offset, err = apiutil.ReqParamInt("offset", r); err != nil {
p.Offset = defaultOffset
err = nil
}
p.Q = r.FormValue("q")
}
switch r.Method {
case "GET":
results, err := s.Search(*p)
if err != nil {
apiutil.WriteErrResponse(w, http.StatusBadRequest, err)
return
}
apiutil.WriteResponse(w, results)
return
}
}
}
|
[
7
] |
package model
import (
"errors"
"github.com/jinzhu/gorm"
"log"
"math/rand"
"time"
)
const (
RoleTeacher = 1
RoleFamily = 2
RoleStudent = 3
)
var letterRunes = []rune("123456789")
func randStringRunes(n int) string {
// reset rand.Seed
rand.Seed(time.Now().Unix())
b := make([]rune, n)
for i := range b {
b[i] = letterRunes[rand.Intn(len(letterRunes))]
}
return string(b)
}
// 循环50次直到找到未使用的ID为止
func tryToGetId(len int) (error, string) {
var id string
for sum := 1; sum < 50; sum++ {
id = randStringRunes(len)
var account AccountInfo
// ID在数据库不存在就返回,否则继续匹配
if err := GetAccount(&account, id); err != nil && gorm.IsRecordNotFoundError(err) {
log.Print("generated account id:%s", id)
return nil, id
}
}
return errors.New("无法生成账号ID"), ""
}
// 教师身份6位(), 家庭身份6位(), 学生编号:8位(20190526),
func IdGen(accountRole uint) (error, string) {
if accountRole == RoleTeacher || accountRole == RoleFamily {
return tryToGetId(6)
} else if accountRole == RoleStudent {
return tryToGetId(8)
}
return errors.New("角色不正确,无法生成账号"), ""
}
|
[
1
] |
// https://app.codility.com/demo/results/trainingUKF4Q7-WQM/
package solution
// you can also use imports, for example:
// import "fmt"
// import "os"
// import "math";
import "sort";
// you can write to stdout for debugging purposes, e.g.
// fmt.Println("this is a debug message")
func Solution(A []int) int {
// write your code in Go 1.4
var lenA = len(A);
if(lenA > 100000) {
return 0;
}
sort.Ints(A);
for i := 0; i < lenA - 2; i++ {
if(((A[i] + A[i+1]) > A[i+2]) && ((A[i+1] + A[i+2]) > A[i]) && ((A[i+2] + A[i]) > A[i+1])) {
return 1;
}
}
return 0;
}
|
[
2
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.