code
stringlengths 67
15.9k
| labels
listlengths 1
4
|
---|---|
package main
import (
"context"
"strconv"
"strings"
)
// Part1 solves the first part of the day's puzzle
func Part1(ctx context.Context, input string) (interface{}, error) {
rules, _, nearbyTickets, err := parseInput(input)
if err != nil {
return nil, err
}
sum := 0
for _, ticket := range nearbyTickets {
_, invalid := ticketIsValid(ticket, rules)
for _, i := range invalid {
sum = sum + i
}
}
return sum, nil
}
type intRange struct {
min int
max int
}
type rule struct {
name string
ranges []intRange
}
func (r *rule) contains(i int) bool {
for _, r := range r.ranges {
if i >= r.min && i <= r.max {
return true
}
}
return false
}
func ticketIsValid(ticket []int, rules []rule) (valid, invalid []int) {
for _, i := range ticket {
ok := false
for _, r := range rules {
if r.contains(i) {
ok = true
valid = append(valid, i)
break
}
}
if !ok {
invalid = append(invalid, i)
}
}
return valid, invalid
}
func parseRule(s string) (rule, error) {
r := rule{}
parts := strings.Split(s, ": ")
r.name = parts[0]
ranges := strings.Split(parts[1], " or ")
for _, rng := range ranges {
minmax := strings.Split(rng, "-")
min, err := strconv.Atoi(minmax[0])
if err != nil {
return r, err
}
max, err := strconv.Atoi(minmax[1])
if err != nil {
return r, err
}
r.ranges = append(r.ranges, intRange{min: min, max: max})
}
return r, nil
}
func parseInput(input string) (rules []rule, myTicket []int, nearbyTickets [][]int, err error) {
sections := strings.Split(input, "\n\n")
// Parse the rules
ruleStrings := strings.Split(sections[0], "\n")
for _, rule := range ruleStrings {
r, err := parseRule(rule)
if err != nil {
return nil, nil, nil, err
}
rules = append(rules, r)
}
// Our ticket
ticket := strings.Split(sections[1], "\n")[1]
myTicket, err = parseTicket(ticket)
if err != nil {
return nil, nil, nil, err
}
// Other tickets
for i, ticket := range strings.Split(sections[2], "\n") {
if i == 0 {
continue
}
t, err := parseTicket(ticket)
if err != nil {
return nil, nil, nil, err
}
nearbyTickets = append(nearbyTickets, t)
}
return rules, myTicket, nearbyTickets, nil
}
func parseTicket(t string) ([]int, error) {
ticket := []int{}
for _, i := range strings.Split(t, ",") {
j, err := strconv.Atoi(i)
if err != nil {
return nil, err
}
ticket = append(ticket, j)
}
return ticket, nil
}
|
[
0
] |
package main
/*
sorted by misses
tinylfu: 4.075395207s 10000000 total 2205147 misses
clockpro: 3.133172079s 10000000 total 2212461 misses
arc: 5.136880077s 10000000 total 2220016 misses
slru2080: 6.36564715s 10000000 total 2254569 misses
s4lru: 2.571416442s 10000000 total 2259629 misses
lfu: 5.194561433s 10000000 total 2326371 misses
slru5050: 6.266535668s 10000000 total 2360416 misses
clock: 2.497958238s 10000000 total 2587380 misses
lru: 5.020239605s 10000000 total 2643644 misses
random: 2.523026752s 10000000 total 2900727 misses
*/
import (
"bufio"
"flag"
"fmt"
"log"
"os"
"time"
"github.com/calmh/lfucache"
"github.com/dgryski/go-arc"
"github.com/dgryski/go-clockpro"
"github.com/dgryski/go-s4lru"
"github.com/dgryski/go-tinylfu"
"github.com/dgryski/trifles/cachetest/clock"
"github.com/dgryski/trifles/cachetest/random"
"github.com/dgryski/trifles/cachetest/slru"
"github.com/dgryski/trifles/cachetest/tworand"
"github.com/golang/groupcache/lru"
"github.com/pkg/profile"
)
func main() {
n := flag.Int("n", 1000, "cache size")
alg := flag.String("alg", "", "algorithm")
file := flag.String("f", "", "input file")
door := flag.Bool("door", false, "use doorkeeper")
cpuprofile := flag.Bool("cpuprofile", false, "cpuprofile")
memprofile := flag.Bool("memprofile", false, "memprofile")
flag.Parse()
if *alg == "" {
log.Fatalln("no algorithm provided (-alg)")
}
if *cpuprofile {
defer profile.Start(profile.CPUProfile).Stop()
}
if *memprofile {
defer profile.Start(profile.MemProfile).Stop()
}
count := 0
miss := 0
t0 := time.Now()
var f func(string) bool
var bouncer *doorkeeper
if *door {
bouncer = newDoorkeeper(*n)
}
switch *alg {
case "arc":
cache := arc.New(*n)
f = func(s string) bool {
var miss bool
cache.Get(s, func() interface{} {
miss = true
return s
})
return miss
}
case "random":
cache := random.New(*n)
f = func(s string) bool {
if i := cache.Get(s); i == nil {
if bouncer.allow(s) {
cache.Set(s, s)
}
return true
} else {
if i.(string) != s {
panic("key != value")
}
}
return false
}
case "tworand":
cache := tworand.New(*n)
f = func(s string) bool {
if i := cache.Get(s); i == nil {
cache.Set(s, s)
return true
} else {
if i.(string) != s {
panic("key != value")
}
}
return false
}
case "lru":
cache := lru.New(*n)
f = func(s string) bool {
if v, ok := cache.Get(s); !ok {
if bouncer.allow(s) {
cache.Add(s, s)
}
return true
} else {
if v.(string) != s {
panic("key != value")
}
}
return false
}
case "lfu":
cache := lfucache.New(*n)
f = func(s string) bool {
if v, ok := cache.Access(s); !ok {
if bouncer.allow(s) {
cache.Insert(s, s)
}
return true
} else {
if v.(string) != s {
panic("key != value")
}
}
return false
}
case "tinylfu":
cache := tinylfu.New(*n, *n*10)
f = func(s string) bool {
if v, ok := cache.Get(s); !ok {
if bouncer.allow(s) {
cache.Add(s, s)
}
return true
} else {
if v.(string) != s {
panic("key != value")
}
}
return false
}
case "clock":
cache := clock.New(*n)
f = func(s string) bool {
if i := cache.Get(s); i == nil {
if bouncer.allow(s) {
cache.Set(s, s)
}
return true
} else {
if i.(string) != s {
panic("key != value")
}
}
return false
}
case "clockpro":
cache := clockpro.New(*n)
f = func(s string) bool {
if i := cache.Get(s); i == nil {
if bouncer.allow(s) {
cache.Set(s, s)
}
return true
} else {
if i.(string) != s {
panic("key != value")
}
}
return false
}
case "slru":
cache := slru.New(int(float64(*n)*0.2), int(float64(*n)*0.8))
f = func(s string) bool {
if i := cache.Get(s); i == nil {
if bouncer.allow(s) {
cache.Set(s, s)
}
return true
} else {
if i.(string) != s {
panic("key != value")
}
}
return false
}
case "s4lru":
cache := s4lru.New(*n)
f = func(s string) bool {
if v, ok := cache.Get(s); !ok {
if bouncer.allow(s) {
cache.Set(s, s)
}
return true
} else {
if v.(string) != s {
panic("key != value")
}
}
return false
}
default:
log.Fatalln("unknown algorithm")
}
var inputFile = os.Stdin
if *file != "" {
var err error
inputFile, err = os.Open(*file)
if err != nil {
log.Fatalln(err)
}
defer inputFile.Close()
}
in := bufio.NewScanner(inputFile)
for in.Scan() {
if f(in.Text()) {
miss++
}
count++
}
fmt.Printf("%s: %s %d total %d misses (hit rate %d %%)\n", *alg, time.Since(t0), count, miss, int(100*(float64(count-miss)/float64(count))))
}
|
[
4
] |
package generators
import (
"github.com/jpg013/go_stream/types"
)
// NumberGenerator generates an incremented integer each time.
type NumberGenerator struct {
iter int
max int
}
func (g *NumberGenerator) Next() (types.Chunk, error) {
if g.iter > g.max {
return nil, nil
}
chunk := g.iter
g.iter++
return chunk, nil
}
func NewNumberGenerator(max int) (Type, error) {
return &NumberGenerator{
iter: 1,
max: max,
}, nil
}
|
[
1
] |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// Code generated by mockery v2.32.4. DO NOT EDIT.
package spi
import (
transports "github.com/apache/plc4x/plc4go/spi/transports"
mock "github.com/stretchr/testify/mock"
)
// MockTransportInstanceExposer is an autogenerated mock type for the TransportInstanceExposer type
type MockTransportInstanceExposer struct {
mock.Mock
}
type MockTransportInstanceExposer_Expecter struct {
mock *mock.Mock
}
func (_m *MockTransportInstanceExposer) EXPECT() *MockTransportInstanceExposer_Expecter {
return &MockTransportInstanceExposer_Expecter{mock: &_m.Mock}
}
// GetTransportInstance provides a mock function with given fields:
func (_m *MockTransportInstanceExposer) GetTransportInstance() transports.TransportInstance {
ret := _m.Called()
var r0 transports.TransportInstance
if rf, ok := ret.Get(0).(func() transports.TransportInstance); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(transports.TransportInstance)
}
}
return r0
}
// MockTransportInstanceExposer_GetTransportInstance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetTransportInstance'
type MockTransportInstanceExposer_GetTransportInstance_Call struct {
*mock.Call
}
// GetTransportInstance is a helper method to define mock.On call
func (_e *MockTransportInstanceExposer_Expecter) GetTransportInstance() *MockTransportInstanceExposer_GetTransportInstance_Call {
return &MockTransportInstanceExposer_GetTransportInstance_Call{Call: _e.mock.On("GetTransportInstance")}
}
func (_c *MockTransportInstanceExposer_GetTransportInstance_Call) Run(run func()) *MockTransportInstanceExposer_GetTransportInstance_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockTransportInstanceExposer_GetTransportInstance_Call) Return(_a0 transports.TransportInstance) *MockTransportInstanceExposer_GetTransportInstance_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockTransportInstanceExposer_GetTransportInstance_Call) RunAndReturn(run func() transports.TransportInstance) *MockTransportInstanceExposer_GetTransportInstance_Call {
_c.Call.Return(run)
return _c
}
// NewMockTransportInstanceExposer creates a new instance of MockTransportInstanceExposer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockTransportInstanceExposer(t interface {
mock.TestingT
Cleanup(func())
}) *MockTransportInstanceExposer {
mock := &MockTransportInstanceExposer{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
|
[
4
] |
package parallel
import (
"bytes"
"net/http"
"github.com/minight/h2csmuggler/http2"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
type res struct {
target string
res *http.Response // response.Body is already read and closed and stored on body
body []byte
err error
}
func (r *res) IsNil() bool {
return r.err == nil && r.res == nil
}
func (r *res) Log(source string) {
log.WithFields(log.Fields{
"body": string(r.body),
"target": r.target,
"res": r.res,
"err": r.err,
}).Debugf("recieved")
if r.err != nil {
var uscErr http2.UnexpectedStatusCodeError
if errors.As(r.err, &uscErr) {
log.WithFields(log.Fields{
"status": uscErr.Code,
"target": r.target,
"source": source,
}).Errorf("unexpected status code")
} else {
log.WithField("target", r.target).WithError(r.err).Errorf("failed")
}
} else {
log.WithFields(log.Fields{
"status": r.res.StatusCode,
"headers": r.res.Header,
"body": len(r.body),
"target": r.target,
"source": source,
}).Infof("success")
}
}
type Diff struct {
HTTP2 *res
H2C *res
}
type ResponseDiff struct {
cache map[string]*Diff
DeleteOnShow bool // if enabled, results will be cleared from the cache once shown
}
func NewDiffer(DeleteOnShow bool) *ResponseDiff {
return &ResponseDiff{
cache: make(map[string]*Diff),
DeleteOnShow: DeleteOnShow,
}
}
// ShowDiffH2C will show if there's a diff between the http2 and h2c responses.
// if the corresponding response is not cached, this does nothing
func (r *ResponseDiff) ShowDiffH2C(http2res *res) {
d := r.diffH2C(http2res)
if d.H2C == nil || d.HTTP2 == nil {
return
}
r.diffHosts(d)
}
// ShowDiffHTTP2 will show if there's a diff between the http2 and h2c responses.
// if the corresponding response is not cached, this does nothing
func (r *ResponseDiff) ShowDiffHTTP2(http2res *res) {
d := r.diffHTTP2(http2res)
if d.H2C == nil || d.HTTP2 == nil {
return
}
r.diffHosts(d)
}
func (r *ResponseDiff) diffHosts(d *Diff) {
log.Tracef("got d: %+v", d)
log.Tracef("r is :%+v", r)
diff := false
fields := log.Fields{}
debugFields := log.Fields{}
if d.HTTP2.err != d.H2C.err {
diff = true
if d.H2C.err != nil {
fields["normal-status-code"] = d.HTTP2.res.StatusCode
fields["normal-response-body-len"] = len(d.HTTP2.body)
fields["host"] = d.HTTP2.res.Request.Host
fields["h2c-error"] = d.H2C.err
}
if d.HTTP2.err != nil {
fields["h2c-status-code"] = d.H2C.res.StatusCode
fields["h2c-response-body-len"] = len(d.H2C.body)
fields["host"] = d.H2C.res.Request.Host
fields["normal-error"] = d.HTTP2.err
}
}
if d.HTTP2.res != nil && d.H2C.res != nil {
fields["host"] = d.H2C.res.Request.Host
if d.HTTP2.res.StatusCode != d.H2C.res.StatusCode {
diff = true
fields["normal-status-code"] = d.HTTP2.res.StatusCode
fields["h2c-status-code"] = d.H2C.res.StatusCode
}
if len(d.HTTP2.res.Header) != len(d.H2C.res.Header) {
diff = true
sharedHeaders := http.Header{}
http2Headers := http.Header{}
h2cHeaders := http.Header{}
seen := map[string]struct{}{}
for k, v := range d.HTTP2.res.Header {
h2cv := d.H2C.res.Header.Values(k)
if len(v) != len(h2cv) {
for _, vv := range v {
http2Headers.Add(k, vv)
}
for _, vv := range h2cv {
h2cHeaders.Add(k, vv)
}
} else {
for _, vv := range v {
sharedHeaders.Add(k, vv)
}
}
seen[k] = struct{}{}
}
for k, v := range d.H2C.res.Header {
_, ok := seen[k]
if ok {
continue
}
for _, vv := range v {
h2cHeaders.Add(k, vv)
}
}
fields["normal-headers"] = http2Headers
fields["same-headers"] = sharedHeaders
fields["h2c-headers"] = h2cHeaders
}
if len(d.HTTP2.body) != len(d.H2C.body) {
diff = true
fields["normal-response-body-len"] = len(d.HTTP2.body)
fields["h2c-response-body-len"] = len(d.H2C.body)
}
if bytes.Compare(d.HTTP2.body, d.H2C.body) != 0 {
debugFields["normal-body"] = string(d.HTTP2.body)
debugFields["h2c-body"] = string(d.H2C.body)
}
}
if diff {
switch log.GetLevel() {
case log.InfoLevel:
log.WithFields(fields).Infof("results differ")
default:
log.WithFields(fields).WithFields(debugFields).Debugf("results differ")
}
}
if r.DeleteOnShow {
delete(r.cache, d.HTTP2.target)
}
}
// DiffHTTP2 will return the diff, with the provided argument as the http2 result
func (r *ResponseDiff) diffHTTP2(http2res *res) (d *Diff) {
diff, ok := r.cache[http2res.target]
if !ok {
r.cache[http2res.target] = &Diff{}
diff = r.cache[http2res.target]
}
diff.HTTP2 = http2res
return diff
}
// DiffH2C will return the diff, with the provided argument as the http2 result
func (r *ResponseDiff) diffH2C(h2cres *res) (d *Diff) {
diff, ok := r.cache[h2cres.target]
if !ok {
r.cache[h2cres.target] = &Diff{}
diff = r.cache[h2cres.target]
}
diff.H2C = h2cres
return diff
}
|
[
2
] |
package pdf
import (
"context"
"encoding/json"
"github.com/jung-kurt/gofpdf"
// "github.com/rainycape/unidecode"
"github.com/sfomuseum/go-font-ocra"
"log"
"strings"
"sync"
)
type BookOptions struct {
Orientation string
Size string
Width float64
Height float64
DPI float64
Border float64
FontSize float64
Debug bool
OCRA bool
RecordSeparator string
}
type BookBorder struct {
Top float64
Bottom float64
Left float64
Right float64
}
type BookCanvas struct {
Width float64
Height float64
}
type Book struct {
PDF *gofpdf.Fpdf
Mutex *sync.Mutex
Border BookBorder
Canvas BookCanvas
Options *BookOptions
pages int
}
func NewDefaultBookOptions() *BookOptions {
opts := &BookOptions{
Orientation: "P",
Size: "letter",
Width: 0.0,
Height: 0.0,
DPI: 150.0,
Border: 0.01,
Debug: false,
FontSize: 12.0,
RecordSeparator: "RECORDSEPARATOR",
}
return opts
}
func NewBook(opts *BookOptions) (*Book, error) {
var pdf *gofpdf.Fpdf
if opts.Size == "custom" {
sz := gofpdf.SizeType{
Wd: opts.Width,
Ht: opts.Height,
}
init := gofpdf.InitType{
OrientationStr: opts.Orientation,
UnitStr: "in",
SizeStr: "",
Size: sz,
FontDirStr: "",
}
pdf = gofpdf.NewCustom(&init)
} else {
pdf = gofpdf.New(opts.Orientation, "in", opts.Size, "")
}
if opts.OCRA {
font, err := ocra.LoadFPDFFont()
if err != nil {
return nil, err
}
pdf.AddFontFromBytes(font.Family, font.Style, font.JSON, font.Z)
pdf.SetFont(font.Family, "", opts.FontSize)
} else {
pdf.SetFont("Courier", "", opts.FontSize)
}
w, h, _ := pdf.PageSize(1)
page_w := w * opts.DPI
page_h := h * opts.DPI
border_top := 1.0 * opts.DPI
border_bottom := border_top * 1.5
border_left := border_top * 1.0
border_right := border_top * 1.0
canvas_w := page_w - (border_left + border_right)
canvas_h := page_h - (border_top + border_bottom)
pdf.AddPage()
b := BookBorder{
Top: border_top,
Bottom: border_bottom,
Left: border_left,
Right: border_right,
}
c := BookCanvas{
Width: canvas_w,
Height: canvas_h,
}
mu := new(sync.Mutex)
pb := Book{
PDF: pdf,
Mutex: mu,
Border: b,
Canvas: c,
Options: opts,
pages: 0,
}
return &pb, nil
}
func (bk *Book) AddRecord(ctx context.Context, body []byte) error {
var stub interface{}
err := json.Unmarshal(body, &stub)
if err != nil {
return err
}
enc, err := json.Marshal(stub)
if err != nil {
return err
}
str_body := string(enc)
str_body = strings.Replace(str_body, "\n", "", -1)
bk.Mutex.Lock()
defer bk.Mutex.Unlock()
_, lh := bk.PDF.GetFontSize()
lh = lh * 1.3
bk.PDF.MultiCell(0, lh, str_body, "", "left", false)
bk.PDF.MultiCell(0, lh, bk.Options.RecordSeparator, "", "", false)
return nil
}
func (bk *Book) Save(path string) error {
if bk.Options.Debug {
log.Printf("save %s\n", path)
}
return bk.PDF.OutputFileAndClose(path)
}
|
[
0
] |
package main
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
var sc = bufio.NewScanner(os.Stdin)
func nextLine() string {
sc.Scan()
return sc.Text()
}
func splitIntStdin(delim string) (intSlices []int, err error) {
// 文字列スライスを取得
stringSplited := strings.Split(delim, " ")
// 整数スライスに保存
for i := range stringSplited {
var iparam int
iparam, err = strconv.Atoi(stringSplited[i])
if err != nil {
return
}
intSlices = append(intSlices, iparam)
}
return
}
func splitFloatStdin(delim string) (floatSlices []float64, err error) {
// 文字列スライスを取得
stringSplited := strings.Split(delim, " ")
// 整数スライスに保存
for i := range stringSplited {
var iparam float64
iparam, err = strconv.ParseFloat(stringSplited[i], 64)
if err != nil {
return
}
floatSlices = append(floatSlices, iparam)
}
return
}
func main() {
l1 := nextLine()
vals, _ := splitFloatStdin(l1)
var x1 = vals[0]
var x2 = vals[1]
var y1 = vals[2]
var y2 = vals[3]
var z1 = vals[4]
var z2 = vals[5]
y1 = y1 - x1
z1 = z1 - x1
y2 = y2 - x2
z2 = z2 - x2
var result = (y1*z2 - y2*z1) / 2
if result > 0 {
fmt.Println(result)
} else {
fmt.Println(result * -1)
}
}
|
[
0
] |
/*
MIT License
Copyright (c) 2018 Victor Springer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package memory
import (
"errors"
"fmt"
"sync"
"time"
cache "github.com/victorspringer/http-cache"
)
// Algorithm is the string type for caching algorithms labels.
type Algorithm string
const (
// LRU is the constant for Least Recently Used.
LRU Algorithm = "LRU"
// MRU is the constant for Most Recently Used.
MRU Algorithm = "MRU"
// LFU is the constant for Least Frequently Used.
LFU Algorithm = "LFU"
// MFU is the constant for Most Frequently Used.
MFU Algorithm = "MFU"
)
// Adapter is the memory adapter data structure.
type Adapter struct {
mutex sync.RWMutex
capacity int
algorithm Algorithm
store map[uint64][]byte
}
// AdapterOptions is used to set Adapter settings.
type AdapterOptions func(a *Adapter) error
// Get implements the cache Adapter interface Get method.
func (a *Adapter) Get(key uint64) ([]byte, bool) {
a.mutex.RLock()
response, ok := a.store[key]
a.mutex.RUnlock()
if ok {
return response, true
}
return nil, false
}
// Set implements the cache Adapter interface Set method.
func (a *Adapter) Set(key uint64, response []byte, expiration time.Time) {
a.mutex.Lock()
defer a.mutex.Unlock()
if _, ok := a.store[key]; ok {
// Known key, overwrite previous item.
a.store[key] = response
return
}
// New key, make sure we have the capacity.
if len(a.store) == a.capacity {
a.evict()
}
a.store[key] = response
}
// Release implements the Adapter interface Release method.
func (a *Adapter) Release(key uint64) {
a.mutex.RLock()
_, ok := a.store[key]
a.mutex.RUnlock()
if ok {
a.mutex.Lock()
delete(a.store, key)
a.mutex.Unlock()
}
}
// evict removes a single entry from the store. It assumes that the caller holds
// the write lock.
func (a *Adapter) evict() {
selectedKey := uint64(0)
lastAccess := time.Now()
frequency := 2147483647
if a.algorithm == MRU {
lastAccess = time.Time{}
} else if a.algorithm == MFU {
frequency = 0
}
for k, v := range a.store {
r := cache.BytesToResponse(v)
switch a.algorithm {
case LRU:
if r.LastAccess.Before(lastAccess) {
selectedKey = k
lastAccess = r.LastAccess
}
case MRU:
if r.LastAccess.After(lastAccess) ||
r.LastAccess.Equal(lastAccess) {
selectedKey = k
lastAccess = r.LastAccess
}
case LFU:
if r.Frequency < frequency {
selectedKey = k
frequency = r.Frequency
}
case MFU:
if r.Frequency >= frequency {
selectedKey = k
frequency = r.Frequency
}
}
}
delete(a.store, selectedKey)
}
// NewAdapter initializes memory adapter.
func NewAdapter(opts ...AdapterOptions) (cache.Adapter, error) {
a := &Adapter{}
for _, opt := range opts {
if err := opt(a); err != nil {
return nil, err
}
}
if a.capacity <= 1 {
return nil, errors.New("memory adapter capacity is not set")
}
if a.algorithm == "" {
return nil, errors.New("memory adapter caching algorithm is not set")
}
a.mutex = sync.RWMutex{}
a.store = make(map[uint64][]byte, a.capacity)
return a, nil
}
// AdapterWithAlgorithm sets the approach used to select a cached
// response to be evicted when the capacity is reached.
func AdapterWithAlgorithm(alg Algorithm) AdapterOptions {
return func(a *Adapter) error {
a.algorithm = alg
return nil
}
}
// AdapterWithCapacity sets the maximum number of cached responses.
func AdapterWithCapacity(cap int) AdapterOptions {
return func(a *Adapter) error {
if cap <= 1 {
return fmt.Errorf("memory adapter requires a capacity greater than %v", cap)
}
a.capacity = cap
return nil
}
}
|
[
1
] |
package command
import (
"fmt"
"strings"
"sync"
"time"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/mitchellh/cli"
)
const (
// updateWait is the amount of time to wait between status
// updates. Because the monitor is poll-based, we use this
// delay to avoid overwhelming the API server.
updateWait = time.Second
)
// evalState is used to store the current "state of the world"
// in the context of monitoring an evaluation.
type evalState struct {
status string
desc string
node string
job string
allocs map[string]*allocState
wait time.Duration
index uint64
}
// newEvalState creates and initializes a new monitorState
func newEvalState() *evalState {
return &evalState{
status: structs.EvalStatusPending,
allocs: make(map[string]*allocState),
}
}
// allocState is used to track the state of an allocation
type allocState struct {
id string
group string
node string
desired string
desiredDesc string
client string
clientDesc string
index uint64
// full is the allocation struct with full details. This
// must be queried for explicitly so it is only included
// if there is important error information inside.
full *api.Allocation
}
// monitor wraps an evaluation monitor and holds metadata and
// state information.
type monitor struct {
ui cli.Ui
client *api.Client
state *evalState
// length determines the number of characters for identifiers in the ui.
length int
sync.Mutex
}
// newMonitor returns a new monitor. The returned monitor will
// write output information to the provided ui. The length parameter determines
// the number of characters for identifiers in the ui.
func newMonitor(ui cli.Ui, client *api.Client, length int) *monitor {
mon := &monitor{
ui: &cli.PrefixedUi{
InfoPrefix: "==> ",
OutputPrefix: " ",
ErrorPrefix: "==> ",
Ui: ui,
},
client: client,
state: newEvalState(),
length: length,
}
return mon
}
// update is used to update our monitor with new state. It can be
// called whether the passed information is new or not, and will
// only dump update messages when state changes.
func (m *monitor) update(update *evalState) {
m.Lock()
defer m.Unlock()
existing := m.state
// Swap in the new state at the end
defer func() {
m.state = update
}()
// Check if the evaluation was triggered by a node
if existing.node == "" && update.node != "" {
m.ui.Output(fmt.Sprintf("Evaluation triggered by node %q",
limit(update.node, m.length)))
}
// Check if the evaluation was triggered by a job
if existing.job == "" && update.job != "" {
m.ui.Output(fmt.Sprintf("Evaluation triggered by job %q", update.job))
}
// Check the allocations
for allocID, alloc := range update.allocs {
if existing, ok := existing.allocs[allocID]; !ok {
switch {
case alloc.index < update.index:
// New alloc with create index lower than the eval
// create index indicates modification
m.ui.Output(fmt.Sprintf(
"Allocation %q modified: node %q, group %q",
limit(alloc.id, m.length), limit(alloc.node, m.length), alloc.group))
case alloc.desired == structs.AllocDesiredStatusRun:
// New allocation with desired status running
m.ui.Output(fmt.Sprintf(
"Allocation %q created: node %q, group %q",
limit(alloc.id, m.length), limit(alloc.node, m.length), alloc.group))
}
} else {
switch {
case existing.client != alloc.client:
description := ""
if alloc.clientDesc != "" {
description = fmt.Sprintf(" (%s)", alloc.clientDesc)
}
// Allocation status has changed
m.ui.Output(fmt.Sprintf(
"Allocation %q status changed: %q -> %q%s",
limit(alloc.id, m.length), existing.client, alloc.client, description))
}
}
}
// Check if the status changed. We skip any transitions to pending status.
if existing.status != "" &&
update.status != structs.AllocClientStatusPending &&
existing.status != update.status {
m.ui.Output(fmt.Sprintf("Evaluation status changed: %q -> %q",
existing.status, update.status))
}
}
// monitor is used to start monitoring the given evaluation ID. It
// writes output directly to the monitor's ui, and returns the
// exit code for the command. If allowPrefix is false, monitor will only accept
// exact matching evalIDs.
//
// The return code will be 0 on successful evaluation. If there are
// problems scheduling the job (impossible constraints, resources
// exhausted, etc), then the return code will be 2. For any other
// failures (API connectivity, internal errors, etc), the return code
// will be 1.
func (m *monitor) monitor(evalID string, allowPrefix bool) int {
// Track if we encounter a scheduling failure. This can only be
// detected while querying allocations, so we use this bool to
// carry that status into the return code.
var schedFailure bool
// The user may have specified a prefix as eval id. We need to lookup the
// full id from the database first. Since we do this in a loop we need a
// variable to keep track if we've already written the header message.
var headerWritten bool
// Add the initial pending state
m.update(newEvalState())
for {
// Query the evaluation
eval, _, err := m.client.Evaluations().Info(evalID, nil)
if err != nil {
if !allowPrefix {
m.ui.Error(fmt.Sprintf("No evaluation with id %q found", evalID))
return 1
}
if len(evalID) == 1 {
m.ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
return 1
}
if len(evalID)%2 == 1 {
// Identifiers must be of even length, so we strip off the last byte
// to provide a consistent user experience.
evalID = evalID[:len(evalID)-1]
}
evals, _, err := m.client.Evaluations().PrefixList(evalID)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading evaluation: %s", err))
return 1
}
if len(evals) == 0 {
m.ui.Error(fmt.Sprintf("No evaluation(s) with prefix or id %q found", evalID))
return 1
}
if len(evals) > 1 {
// Format the evaluations
out := make([]string, len(evals)+1)
out[0] = "ID|Priority|Type|Triggered By|Status"
for i, eval := range evals {
out[i+1] = fmt.Sprintf("%s|%d|%s|%s|%s",
limit(eval.ID, m.length),
eval.Priority,
eval.Type,
eval.TriggeredBy,
eval.Status)
}
m.ui.Output(fmt.Sprintf("Prefix matched multiple evaluations\n\n%s", formatList(out)))
return 0
}
// Prefix lookup matched a single evaluation
eval, _, err = m.client.Evaluations().Info(evals[0].ID, nil)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading evaluation: %s", err))
}
}
if !headerWritten {
m.ui.Info(fmt.Sprintf("Monitoring evaluation %q", limit(eval.ID, m.length)))
headerWritten = true
}
// Create the new eval state.
state := newEvalState()
state.status = eval.Status
state.desc = eval.StatusDescription
state.node = eval.NodeID
state.job = eval.JobID
state.wait = eval.Wait
state.index = eval.CreateIndex
// Query the allocations associated with the evaluation
allocs, _, err := m.client.Evaluations().Allocations(eval.ID, nil)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading allocations: %s", err))
return 1
}
// Add the allocs to the state
for _, alloc := range allocs {
state.allocs[alloc.ID] = &allocState{
id: alloc.ID,
group: alloc.TaskGroup,
node: alloc.NodeID,
desired: alloc.DesiredStatus,
desiredDesc: alloc.DesiredDescription,
client: alloc.ClientStatus,
clientDesc: alloc.ClientDescription,
index: alloc.CreateIndex,
}
}
// Update the state
m.update(state)
switch eval.Status {
case structs.EvalStatusComplete, structs.EvalStatusFailed, structs.EvalStatusCancelled:
if len(eval.FailedTGAllocs) == 0 {
m.ui.Info(fmt.Sprintf("Evaluation %q finished with status %q",
limit(eval.ID, m.length), eval.Status))
} else {
// There were failures making the allocations
schedFailure = true
m.ui.Info(fmt.Sprintf("Evaluation %q finished with status %q but failed to place all allocations:",
limit(eval.ID, m.length), eval.Status))
// Print the failures per task group
for tg, metrics := range eval.FailedTGAllocs {
noun := "allocation"
if metrics.CoalescedFailures > 0 {
noun += "s"
}
m.ui.Output(fmt.Sprintf("Task Group %q (failed to place %d %s):", tg, metrics.CoalescedFailures+1, noun))
metrics := formatAllocMetrics(metrics, false, " ")
for _, line := range strings.Split(metrics, "\n") {
m.ui.Output(line)
}
}
if eval.BlockedEval != "" {
m.ui.Output(fmt.Sprintf("Evaluation %q waiting for additional capacity to place remainder",
limit(eval.BlockedEval, m.length)))
}
}
default:
// Wait for the next update
time.Sleep(updateWait)
continue
}
// Monitor the next eval in the chain, if present
if eval.NextEval != "" {
if eval.Wait.Nanoseconds() != 0 {
m.ui.Info(fmt.Sprintf(
"Monitoring next evaluation %q in %s",
limit(eval.NextEval, m.length), eval.Wait))
// Skip some unnecessary polling
time.Sleep(eval.Wait)
}
// Reset the state and monitor the new eval
m.state = newEvalState()
return m.monitor(eval.NextEval, allowPrefix)
}
break
}
// Treat scheduling failures specially using a dedicated exit code.
// This makes it easier to detect failures from the CLI.
if schedFailure {
return 2
}
return 0
}
// dumpAllocStatus is a helper to generate a more user-friendly error message
// for scheduling failures, displaying a high level status of why the job
// could not be scheduled out.
func dumpAllocStatus(ui cli.Ui, alloc *api.Allocation, length int) {
// Print filter stats
ui.Output(fmt.Sprintf("Allocation %q status %q (%d/%d nodes filtered)",
limit(alloc.ID, length), alloc.ClientStatus,
alloc.Metrics.NodesFiltered, alloc.Metrics.NodesEvaluated))
ui.Output(formatAllocMetrics(alloc.Metrics, true, " "))
}
func formatAllocMetrics(metrics *api.AllocationMetric, scores bool, prefix string) string {
// Print a helpful message if we have an eligibility problem
var out string
if metrics.NodesEvaluated == 0 {
out += fmt.Sprintf("%s* No nodes were eligible for evaluation\n", prefix)
}
// Print a helpful message if the user has asked for a DC that has no
// available nodes.
for dc, available := range metrics.NodesAvailable {
if available == 0 {
out += fmt.Sprintf("%s* No nodes are available in datacenter %q\n", prefix, dc)
}
}
// Print filter info
for class, num := range metrics.ClassFiltered {
out += fmt.Sprintf("%s* Class %q filtered %d nodes\n", prefix, class, num)
}
for cs, num := range metrics.ConstraintFiltered {
out += fmt.Sprintf("%s* Constraint %q filtered %d nodes\n", prefix, cs, num)
}
// Print exhaustion info
if ne := metrics.NodesExhausted; ne > 0 {
out += fmt.Sprintf("%s* Resources exhausted on %d nodes\n", prefix, ne)
}
for class, num := range metrics.ClassExhausted {
out += fmt.Sprintf("%s* Class %q exhausted on %d nodes\n", prefix, class, num)
}
for dim, num := range metrics.DimensionExhausted {
out += fmt.Sprintf("%s* Dimension %q exhausted on %d nodes\n", prefix, dim, num)
}
// Print scores
if scores {
for name, score := range metrics.Scores {
out += fmt.Sprintf("%s* Score %q = %f\n", prefix, name, score)
}
}
out = strings.TrimSuffix(out, "\n")
return out
}
|
[
7
] |
// Code generated by mockery v2.28.1. DO NOT EDIT.
package mocks
import (
common "github.com/ethereum/go-ethereum/common"
assets "github.com/smartcontractkit/chainlink/v2/core/assets"
config "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config"
mock "github.com/stretchr/testify/mock"
)
// GasEstimator is an autogenerated mock type for the GasEstimator type
type GasEstimator struct {
mock.Mock
}
// BlockHistory provides a mock function with given fields:
func (_m *GasEstimator) BlockHistory() config.BlockHistory {
ret := _m.Called()
var r0 config.BlockHistory
if rf, ok := ret.Get(0).(func() config.BlockHistory); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(config.BlockHistory)
}
}
return r0
}
// BumpMin provides a mock function with given fields:
func (_m *GasEstimator) BumpMin() *assets.Wei {
ret := _m.Called()
var r0 *assets.Wei
if rf, ok := ret.Get(0).(func() *assets.Wei); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*assets.Wei)
}
}
return r0
}
// BumpPercent provides a mock function with given fields:
func (_m *GasEstimator) BumpPercent() uint16 {
ret := _m.Called()
var r0 uint16
if rf, ok := ret.Get(0).(func() uint16); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(uint16)
}
return r0
}
// BumpThreshold provides a mock function with given fields:
func (_m *GasEstimator) BumpThreshold() uint64 {
ret := _m.Called()
var r0 uint64
if rf, ok := ret.Get(0).(func() uint64); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(uint64)
}
return r0
}
// BumpTxDepth provides a mock function with given fields:
func (_m *GasEstimator) BumpTxDepth() uint32 {
ret := _m.Called()
var r0 uint32
if rf, ok := ret.Get(0).(func() uint32); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(uint32)
}
return r0
}
// EIP1559DynamicFees provides a mock function with given fields:
func (_m *GasEstimator) EIP1559DynamicFees() bool {
ret := _m.Called()
var r0 bool
if rf, ok := ret.Get(0).(func() bool); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// FeeCapDefault provides a mock function with given fields:
func (_m *GasEstimator) FeeCapDefault() *assets.Wei {
ret := _m.Called()
var r0 *assets.Wei
if rf, ok := ret.Get(0).(func() *assets.Wei); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*assets.Wei)
}
}
return r0
}
// LimitDefault provides a mock function with given fields:
func (_m *GasEstimator) LimitDefault() uint32 {
ret := _m.Called()
var r0 uint32
if rf, ok := ret.Get(0).(func() uint32); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(uint32)
}
return r0
}
// LimitJobType provides a mock function with given fields:
func (_m *GasEstimator) LimitJobType() config.LimitJobType {
ret := _m.Called()
var r0 config.LimitJobType
if rf, ok := ret.Get(0).(func() config.LimitJobType); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(config.LimitJobType)
}
}
return r0
}
// LimitMax provides a mock function with given fields:
func (_m *GasEstimator) LimitMax() uint32 {
ret := _m.Called()
var r0 uint32
if rf, ok := ret.Get(0).(func() uint32); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(uint32)
}
return r0
}
// LimitMultiplier provides a mock function with given fields:
func (_m *GasEstimator) LimitMultiplier() float32 {
ret := _m.Called()
var r0 float32
if rf, ok := ret.Get(0).(func() float32); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(float32)
}
return r0
}
// LimitTransfer provides a mock function with given fields:
func (_m *GasEstimator) LimitTransfer() uint32 {
ret := _m.Called()
var r0 uint32
if rf, ok := ret.Get(0).(func() uint32); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(uint32)
}
return r0
}
// Mode provides a mock function with given fields:
func (_m *GasEstimator) Mode() string {
ret := _m.Called()
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// PriceDefault provides a mock function with given fields:
func (_m *GasEstimator) PriceDefault() *assets.Wei {
ret := _m.Called()
var r0 *assets.Wei
if rf, ok := ret.Get(0).(func() *assets.Wei); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*assets.Wei)
}
}
return r0
}
// PriceMax provides a mock function with given fields:
func (_m *GasEstimator) PriceMax() *assets.Wei {
ret := _m.Called()
var r0 *assets.Wei
if rf, ok := ret.Get(0).(func() *assets.Wei); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*assets.Wei)
}
}
return r0
}
// PriceMaxKey provides a mock function with given fields: _a0
func (_m *GasEstimator) PriceMaxKey(_a0 common.Address) *assets.Wei {
ret := _m.Called(_a0)
var r0 *assets.Wei
if rf, ok := ret.Get(0).(func(common.Address) *assets.Wei); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*assets.Wei)
}
}
return r0
}
// PriceMin provides a mock function with given fields:
func (_m *GasEstimator) PriceMin() *assets.Wei {
ret := _m.Called()
var r0 *assets.Wei
if rf, ok := ret.Get(0).(func() *assets.Wei); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*assets.Wei)
}
}
return r0
}
// TipCapDefault provides a mock function with given fields:
func (_m *GasEstimator) TipCapDefault() *assets.Wei {
ret := _m.Called()
var r0 *assets.Wei
if rf, ok := ret.Get(0).(func() *assets.Wei); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*assets.Wei)
}
}
return r0
}
// TipCapMin provides a mock function with given fields:
func (_m *GasEstimator) TipCapMin() *assets.Wei {
ret := _m.Called()
var r0 *assets.Wei
if rf, ok := ret.Get(0).(func() *assets.Wei); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*assets.Wei)
}
}
return r0
}
type mockConstructorTestingTNewGasEstimator interface {
mock.TestingT
Cleanup(func())
}
// NewGasEstimator creates a new instance of GasEstimator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewGasEstimator(t mockConstructorTestingTNewGasEstimator) *GasEstimator {
mock := &GasEstimator{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
|
[
4
] |
package errcode
import (
"fmt"
)
const (
MempoolErrorBase = iota * 1000
ScriptErrorBase
TxErrorBase
ChainErrorBase
RPCErrorBase
DiskErrorBase
)
type ProjectError struct {
Module string
Code int
Desc string
ErrorCode fmt.Stringer
}
func (e ProjectError) Error() string {
return fmt.Sprintf("module: %s, global errcode: %v, desc: %s", e.Module, e.Code, e.Desc)
}
func getCode(errCode fmt.Stringer) (int, string) {
code := 0
module := ""
switch t := errCode.(type) {
case RPCErr:
code = int(t)
module = "rpc"
case MemPoolErr:
code = int(t)
module = "mempool"
case ChainErr:
code = int(t)
module = "chain"
case DiskErr:
code = int(t)
module = "disk"
case ScriptErr:
code = int(t)
module = "script"
case TxErr:
code = int(t)
module = "transaction"
case RejectCode:
code = int(t)
module = "tx_validation"
default:
}
return code, module
}
func IsErrorCode(err error, errCode fmt.Stringer) bool {
e, ok := err.(ProjectError)
code, _ := getCode(errCode)
return ok && code == e.Code
}
func New(errCode fmt.Stringer) error {
return NewError(errCode, errCode.String())
}
func NewError(errCode fmt.Stringer, desc string) error {
code, module := getCode(errCode)
return ProjectError{
Module: module,
Code: code,
Desc: desc,
ErrorCode: errCode,
}
}
func MakeError(code RejectCode, format string, innerErr error) error {
return NewError(code, fmt.Sprintf(format, shortDesc(innerErr)))
}
// IsRejectCode BIP61 reject code; never send internal reject codes over P2P.
func IsRejectCode(err error) (RejectCode, string, bool) {
e, ok := err.(ProjectError)
if ok && e.ErrorCode != nil {
switch t := e.ErrorCode.(type) {
case RejectCode:
return t, e.Desc, true
}
}
return 0, "", false
}
func shortDesc(err error) string {
e, ok := err.(ProjectError)
if ok && e.ErrorCode != nil {
return e.ErrorCode.String()
}
return e.Error()
}
|
[
7
] |
package list
import (
"context"
"encoding/json"
"fmt"
dtos "restaurant-visualizer/pkg/dtos/out"
"restaurant-visualizer/pkg/models"
"restaurant-visualizer/pkg/storage"
"strconv"
)
type ListRepo interface {
GetAllBuyers(page, size int) ([]models.Buyer, error)
GetTotalBuyersCount() (int, error)
GetBuyerInformation(buyerId string) (*dtos.BuyerInfo, error)
GetBuyersByDate(date string) ([]models.Buyer, error)
}
type DgraphListRepo struct {
db storage.Storage
context context.Context
}
type BuyerInfoResponse struct {
BuyerInfo []dtos.BuyerDto `json:"buyer,omitempty"`
Transactions []dtos.TransactionInfo `json:"transactions,omitempty"`
BuyersWithSameIp []dtos.BuyersWithRelatedIps `json:"buyersWithSameIp,omitempty"`
Products []models.Product `json:"top10Products,omitempty"`
}
type BuyersListResponse struct {
Buyers []models.Buyer `json:"buyers,omitempty"`
}
func NewDgraphListRepo(Db storage.Storage, context context.Context) *DgraphListRepo {
return &DgraphListRepo{db: Db, context: context}
}
func (dgRepo *DgraphListRepo) GetBuyersByDate(date string) ([]models.Buyer, error) {
query := `
query GetBuyersByDate($date: string) {
buyers(func: eq(date, $date)) @filter(type(Buyer)){
id
name
age
date
}
}
`
variables := map[string]string{"$date": date}
resp, err := dgRepo.db.DbClient.NewReadOnlyTxn().QueryWithVars(dgRepo.context, query, variables)
if err != nil {
return nil, err
}
var dgraphResponse BuyersListResponse
err = json.Unmarshal(resp.Json, &dgraphResponse)
if err != nil {
return nil, err
}
return dgraphResponse.Buyers, nil
}
func (dgRepo *DgraphListRepo) GetAllBuyers(offset, size int) ([]models.Buyer, error) {
query := `
query GetAllBuyers($offset: int, $size: int) {
buyers(func: type(Buyer), offset: $offset, first: $size, orderdesc: date) {
id
name
age
date
}
}
`
variables := map[string]string{"$offset": fmt.Sprint(offset), "$size": fmt.Sprint(size)}
resp, err := dgRepo.db.DbClient.NewReadOnlyTxn().QueryWithVars(dgRepo.context, query, variables)
if err != nil {
return nil, err
}
var dgraphResponse BuyersListResponse
err = json.Unmarshal(resp.Json, &dgraphResponse)
if err != nil {
return nil, err
}
return dgraphResponse.Buyers, nil
}
func (dgRepo *DgraphListRepo) GetBuyerInformation(buyerId string) (*dtos.BuyerInfo, error) {
query := `
query getBuyerInformation($buyerId: string) {
buyer(func: eq(id, $buyerId)){
id
name
age
date
}
transactions(func: eq(buyerId, $buyerId), first: 10){
id
ipAddress as ipAddress
device
date
products: bought {
id
name
price as price
}
total: sum(val(price))
}
buyersWithSameIp(func: eq(ipAddress, val(ipAddress)), first: 10) @filter(NOT uid(ipAddress))
{
device
ipAddress
buyer: was_made_by {
id
name
age
}
}
var(func: eq(id, $buyerId)){
made {
bought {
productsBought as id
}
}
}
var(func: eq(id, val(productsBought))){
id
name
price
was_bought {
id
bought @filter(NOT uid(productsBought)) {
productsToBeRecommended as id
}
}
}
var(func: eq(id, val(productsToBeRecommended))){
id
total as count(was_bought)
}
top10Products(func: uid(total), orderdesc: val(total), first: 10){
id
name
price
}
}
`
variables := map[string]string{"$buyerId": buyerId}
resp, err := dgRepo.db.DbClient.NewReadOnlyTxn().QueryWithVars(dgRepo.context, query, variables)
if err != nil {
return nil, err
}
var dgraphResponse BuyerInfoResponse
err = json.Unmarshal(resp.Json, &dgraphResponse)
if err != nil {
return nil, err
}
if len(dgraphResponse.BuyerInfo) == 0 {
return nil, nil
}
result := dgraphResponse.BuyerInfo[0]
buyerInfo := dtos.NewBuyerInformation(result, dgraphResponse.Transactions, dgraphResponse.BuyersWithSameIp, dgraphResponse.Products)
return buyerInfo, nil
}
func (dgRepo *DgraphListRepo) GetTotalBuyersCount() (int, error) {
query := `
query {
total(func: type(Buyer)) {
count: count(uid)
}
}
`
resp, err := dgRepo.db.DbClient.NewReadOnlyTxn().Query(dgRepo.context, query)
if err != nil {
return 0, err
}
json := string(resp.Json)
countStr := json[19 : len(json)-3]
count, err := strconv.Atoi(countStr)
if err != nil {
return 0, err
}
return count, nil
}
|
[
2
] |
package main
import "fmt"
func main() {
fmt.Println(minAddToMakeValid("())("))
}
func minAddToMakeValid(S string) int {
left := 0
right := 0
for i := 0; i < len(S); i++ {
if S[i] == '(' {
left++
} else {
if left > 0 {
left--
} else {
right++
}
}
}
return left + right
}
|
[
2
] |
package ch5
func BitSwapRequired(a, b int) int {
count := 0
for c := a ^ b; c != 0; c = c >> 1 {
count += c & 1
}
return count
}
|
[
0
] |
package utils
import (
"context"
"fmt"
"github.com/Rhymen/go-whatsapp"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"log"
"time"
)
type DB struct {
client *mongo.Client
}
type session struct {
ClientId string `json:"clientId"`
ClientToken string `json:"clientToken"`
EncKey []byte `json:"encKey"`
Mackey []byte `json:"macKey"`
ServerToken string `json:"serverToken"`
Wid string `json:"wid"`
}
func (C *DB) Access(url string) {
client, err := mongo.NewClient(options.Client().ApplyURI(url))
if err != nil {
log.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
err = client.Connect(ctx)
if err != nil {
log.Fatalf("Error while connecting DB : %v", err)
return
}
C.client = client
}
func (C DB) GetKey() (bool, whatsapp.Session) {
Ctx, cancel := context.WithTimeout(context.Background(), 40*time.Second)
var value whatsapp.Session
defer cancel()
collection := C.client.Database(GetDbName()).Collection(GetDbCollection())
var sus *session
err := collection.FindOne(Ctx, bson.M{"key": "access"}).Decode(&sus)
if err != nil {
fmt.Println(err.Error())
if err == mongo.ErrNoDocuments {
return false, value
}
}
value = whatsapp.Session{
ClientId: sus.ClientId,
ClientToken: sus.ClientToken,
ServerToken: sus.ServerToken,
EncKey: sus.EncKey,
MacKey: sus.Mackey,
Wid: sus.Wid,
}
return true, value
}
func (C DB) Addkey(kek whatsapp.Session) bool {
ctx, cancel := context.WithTimeout(context.Background(), 40*time.Second)
defer cancel()
collection := C.client.Database(GetDbName()).Collection(GetDbCollection())
_, err := collection.InsertOne(ctx, bson.M{
"key": "access",
"clientId": kek.ClientId,
"clientToken": kek.ClientToken,
"encKey": kek.EncKey,
"macKey": kek.MacKey,
"serverToken": kek.ServerToken,
"wid": kek.Wid})
if err != nil {
return false
}
return true
}
func (C DB) DelKeys() {
ctx, cancel := context.WithTimeout(context.Background(), 40*time.Second)
defer cancel()
collection := C.client.Database(GetDbName()).Collection(GetDbCollection())
res, err := collection.DeleteMany(ctx, bson.M{})
if err != nil {
fmt.Println(err)
}
fmt.Printf("deleted %v documents\n", res.DeletedCount)
}
|
[
2
] |
package whpetitionsinfo
import (
"appengine"
"appengine/datastore"
"appengine/memcache"
"appengine/urlfetch"
"bytes"
"encoding/json"
"fmt"
"html/template"
"io/ioutil"
"math"
"net/http"
"sort"
"strconv"
"strings"
"time"
)
type Stats struct {
AverageDuration time.Duration
Number int
}
type RenderStats struct {
AverageResponse, AveragePending, NumberResponse, NumberPending, NumberTotal, PercentResponded int
}
type RenderData struct {
Stats RenderStats
Petitions PetitionSet
}
type APIResponse struct {
Results []Petition
}
type UnrespondedAPIResponse struct {
Results []UnrespondedPetition
}
type Petition struct {
// Id string
Title, Url, Status string
Body string `datastore:",noindex"`
SignatureThreshold, SignatureCount, SignaturesNeeded, Deadline, Created int
Response WHResponse
DeadlineTime, UpdatedTime time.Time
YearAgo bool
}
type UnrespondedPetition struct {
// Id string
Title, Url, Status string
Body string `datastore:",noindex"`
SignatureThreshold, SignatureCount, SignaturesNeeded, Deadline, Created int
// Response WHResponse
DeadlineTime, UpdatedTime time.Time
YearAgo bool
}
type WHResponse struct {
// Id string
Url string
AssociationTime int
}
type PetitionSet []Petition
type UnrespondedPetitionSet []UnrespondedPetition
func init() {
http.HandleFunc("/", mainHandler)
http.HandleFunc("/updatePending", pendingHandler)
http.HandleFunc("/updateResponded", respondedHandler)
}
/*
TEMPLATE STUFF
*/
var templateFuncs = template.FuncMap{
"addCommas": Comma,
}
var index = template.Must(template.New("base.html").Funcs(templateFuncs).ParseFiles(
"templates/base.html",
"templates/petitions.html",
))
var page404 = template.Must(template.ParseFiles(
"templates/base.html",
"templates/404.html",
))
var empty = template.Must(template.ParseFiles(
"templates/base.html",
"templates/empty.html",
))
/*
HANDLERS
*/
func mainHandler(w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
if r.Method != "GET" || r.URL.Path != "/" {
w.WriteHeader(http.StatusNotFound)
if item, err := memcache.Get(c, "cached404"); err == memcache.ErrCacheMiss {
// Item not in cache; continue.
} else if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
} else {
w.Write(item.Value)
return
}
} else {
if item, err := memcache.Get(c, "cachedIndex"); err == memcache.ErrCacheMiss {
// Item not in cache; continue.
} else if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
} else {
w.Write(item.Value)
return
}
}
var renderData RenderData
var stats RenderStats
var response APIResponse
if err8 := datastore.Get(c, datastore.NewKey(c, "APIResponse", "pending slice", 0, nil), &response); err8 != nil {
http.Error(w, err8.Error(), http.StatusInternalServerError)
return
}
petitions := response.Results
var responseStats Stats
var pendingStats Stats
if err6 := datastore.Get(c, datastore.NewKey(c, "Stats", "responded", 0, nil), &responseStats); err6 != nil {
http.Error(w, err6.Error(), http.StatusInternalServerError)
return
}
if err7 := datastore.Get(c, datastore.NewKey(c, "Stats", "pending response", 0, nil), &pendingStats); err7 != nil {
http.Error(w, err7.Error(), http.StatusInternalServerError)
return
}
stats.AveragePending = int(math.Floor(pendingStats.AverageDuration.Hours() / 24))
stats.AverageResponse = int(math.Floor(responseStats.AverageDuration.Hours() / 24))
stats.NumberResponse = responseStats.Number
stats.NumberPending = pendingStats.Number
stats.NumberTotal = stats.NumberResponse + stats.NumberPending
stats.PercentResponded = int(math.Floor(100 * float64(stats.NumberResponse) / float64(stats.NumberTotal)))
renderData.Stats = stats
renderData.Petitions = petitions
b := bytes.NewBufferString("")
if r.Method != "GET" || r.URL.Path != "/" {
if err := page404.Execute(b, renderData); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
item := &memcache.Item{
Key: "cached404",
Value: b.Bytes(),
Expiration: time.Duration(90) * time.Minute, // 90 minutes, but note that the data updaters flush memcache
}
if err9 := memcache.Set(c, item); err9 != nil {
c.Errorf("error setting item: %v", err9)
}
b.WriteTo(w)
return
} else {
if len(petitions) == 0 {
if err := empty.Execute(b, renderData); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
} else {
if err := index.Execute(b, renderData); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
item := &memcache.Item{
Key: "cachedIndex",
Value: b.Bytes(),
Expiration: time.Duration(90) * time.Minute, // 90 minutes, but note that the data updaters flush memcache
}
if err9 := memcache.Set(c, item); err9 != nil {
c.Errorf("error setting item: %v", err9)
}
b.WriteTo(w)
return
}
}
func pendingHandler(w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
url := "https://api.whitehouse.gov/v1/petitions.json?status=pending%20response&limit=500"
response, err := getUnrespondedJSON(url, r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var stats Stats
var total time.Duration = 0
now := time.Now()
petitions := response.Results
stats.Number = len(petitions)
sort.Sort(UnrespondedPetitionSet(petitions))
for item := range petitions {
petitions[item].DeadlineTime = time.Unix(int64(petitions[item].Deadline), 0)
petitions[item].UpdatedTime = now
diff := now.Sub(petitions[item].DeadlineTime)
total += diff
if diff.Hours() > 24*365 {
petitions[item].YearAgo = true
}
}
_, err3 := datastore.Put(c, datastore.NewKey(c, "APIResponse", "pending slice", 0, nil), &response)
if err3 != nil {
http.Error(w, err3.Error(), http.StatusInternalServerError)
return
}
if(stats.Number)>0 {
stats.AverageDuration = time.Duration(float64(total) / float64(stats.Number))
} else {
stats.AverageDuration = 0
}
_, err4 := datastore.Put(c, datastore.NewKey(c, "Stats", "pending response", 0, nil), &stats)
if err4 != nil {
http.Error(w, err4.Error(), http.StatusInternalServerError)
return
}
memcache.Flush(c)
fmt.Fprint(w, "OK")
}
func respondedHandler(w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
url := "https://api.whitehouse.gov/v1/petitions.json?status=responded&limit=500"
response, err := getJSON(url, r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
petitions := response.Results
var stats Stats
stats.Number = len(petitions)
totalNumber := stats.Number
var total float64 = 0
for i := range petitions {
// replyTime, err5 := strconv.ParseInt(petitions[i].Response.AssociationTime, 10, 64)
// if err5 == nil {
// total += float64(replyTime) - float64(petitions[i].Deadline)
// } else {
// totalNumber -= 1
// }
total += float64(petitions[i].Response.AssociationTime) - float64(petitions[i].Deadline)
}
average := total / float64(totalNumber)
stats.AverageDuration = time.Duration(average * 1e9)
datastore.Put(c, datastore.NewKey(c, "Stats", "responded", 0, nil), &stats)
memcache.Flush(c)
fmt.Fprint(w, "OK")
}
/*
OTHER FUNCTIONS
*/
func getUnrespondedJSON(url string, r *http.Request) (UnrespondedAPIResponse, error) {
c := appengine.NewContext(r)
transport := urlfetch.Transport{
Context: c,
Deadline: time.Duration(20) * time.Second,
AllowInvalidServerCertificate: false,
}
req, err0 := http.NewRequest("GET", url, nil)
if err0 != nil {
return UnrespondedAPIResponse{}, err0
}
resp, err1 := transport.RoundTrip(req)
if err1 != nil {
return UnrespondedAPIResponse{}, err1
}
body, err2 := ioutil.ReadAll(resp.Body)
if err2 != nil {
return UnrespondedAPIResponse{}, err2
}
resp.Body.Close()
var f UnrespondedAPIResponse
err3 := json.Unmarshal(body, &f)
if err3 != nil {
return UnrespondedAPIResponse{}, err3
}
return f, nil
}
func getJSON(url string, r *http.Request) (APIResponse, error) {
c := appengine.NewContext(r)
transport := urlfetch.Transport{
Context: c,
Deadline: time.Duration(20) * time.Second,
AllowInvalidServerCertificate: false,
}
req, err0 := http.NewRequest("GET", url, nil)
if err0 != nil {
return APIResponse{}, err0
}
resp, err1 := transport.RoundTrip(req)
if err1 != nil {
return APIResponse{}, err1
}
body, err2 := ioutil.ReadAll(resp.Body)
if err2 != nil {
return APIResponse{}, err2
}
resp.Body.Close()
var f APIResponse
err3 := json.Unmarshal(body, &f)
if err3 != nil {
return APIResponse{}, err3
}
return f, nil
}
func Comma(v int) string {
sign := ""
if v < 0 {
sign = "-"
v = 0 - v
}
parts := []string{"", "", "", "", "", "", "", "", ""}
j := len(parts) - 1
for v > 999 {
parts[j] = strconv.FormatInt(int64(v)%1000, 10)
switch len(parts[j]) {
case 2:
parts[j] = "0" + parts[j]
case 1:
parts[j] = "00" + parts[j]
}
v = v / 1000
j--
}
parts[j] = strconv.Itoa(int(v))
return sign + strings.Join(parts[j:len(parts)], ",")
}
// Sorting
func (s PetitionSet) Len() int {
return len(s)
}
func (s PetitionSet) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s PetitionSet) Less(i, j int) bool {
return s[i].Deadline < s[j].Deadline
}
func (s UnrespondedPetitionSet) Len() int {
return len(s)
}
func (s UnrespondedPetitionSet) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s UnrespondedPetitionSet) Less(i, j int) bool {
return s[i].Deadline < s[j].Deadline
}
|
[
0
] |
package redis
import (
"strings"
"testing"
)
const (
JobURL string = "http://de59658a8604eeb307ec-0d35c4f15040cfced3f623ba9067988e.r54.cf1.rackcdn.com/photos/2500125/4de27ff0a2093e8ac1a3068d64c7b262.jpg"
LocalURL string = "/tmp/de59658a8604eeb307ec-0d35c4f15040cfced3f623ba9067988e.r54.cf1.rackcdn.comphotos/2500125/300x400/4de27ff0a2093e8ac1a3068d64c7b262.jpg"
)
func TestQueueJob(t *testing.T) {
if false == QueueJob(JobURL) {
t.Fail()
}
}
func TestGetJob(t *testing.T) {
if jobURL, success := GetJob(); success == false {
t.Fail()
} else {
if strings.EqualFold(jobURL, JobURL) == false {
t.Fail()
}
}
}
func TestAddURL(t *testing.T) {
if false == AddURL(JobURL, LocalURL) {
t.Fail()
}
}
func TestGetURL(t *testing.T) {
if localURL, success := GetURL(JobURL); success == false {
t.Fail()
} else if strings.EqualFold(localURL, LocalURL) == false {
t.Fail()
}
}
|
[
4
] |
package main
import (
"bufio"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"regexp"
"strings"
"sync"
"github.com/anikhasibul/queue"
)
const (
// this is where you can specify how many maxFileDescriptors
// you want to allow open
maxFileDescriptors = 1000
)
var wg sync.WaitGroup
var q = queue.New(1000)
// Final Literation
func main() {
file, err := os.Open("ip.txt")
if err != nil {
log.Fatal(err)
}
defer file.Close()
outfile, err := os.Create("urls.txt")
if err != nil {
log.Fatal(err)
}
defer outfile.Close()
results := make(chan []string, maxFileDescriptors)
go func() {
for output := range results {
for _, url := range output {
fmt.Fprintln(outfile, url)
}
}
close(results)
}()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
wg.Add(1)
q.Add()
go Grabber(q, scanner.Text(), results)
}
wg.Wait()
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
}
// stringInArray do If string in list return true false otherwise.
func stringInArray(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
// Grabber Do Search the bing and collect array of sitelist
func Grabber(q *queue.Q, ip string, results chan []string) {
defer q.Done()
defer wg.Done()
var output []string
if ip == "" {
return
}
page := 1
for page < 251 {
client := &http.Client{}
req, err := http.NewRequest(
http.MethodGet,
fmt.Sprintf(
"http://www.bing.com/search?q=ip:%s+&count=50&first=1",
url.QueryEscape(ip),
),
nil,
)
if err != nil {
fmt.Println(err.Error())
}
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 6.1; rv:57.0) Gecko/20100101 Firefox/57.0")
res, err := client.Do(req)
if err != nil {
fmt.Printf("Invalid Request. ERR: %v \n", err)
return
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
fmt.Println("Couldn't Read")
}
re := regexp.MustCompile(`<h2><a href="(.*?)"`)
links := re.FindAllString(string(body), -1)
if links != nil {
for l := range links {
o := strings.Split(links[l], `"`)
d := strings.Split(o[1], "/")
s := d[0] + "//" + d[2]
if !stringInArray(s, output) {
output = append(output, s)
}
}
}
page = page + 50
}
results <- output
for _, links := range output {
fmt.Println(links)
}
}
|
[
0
] |
package fstk
/*
Copyright (C) Philip Schlump, 2016.
MIT Licensed.
*/
import (
"bufio"
"errors"
"fmt"
"os"
)
type FileStackElement struct {
S_LineNo int
C_LineNo int
File *os.File
Name string // name of the item
Scanner *bufio.Scanner
}
type FileStackType struct {
Stack []FileStackElement
Top int
}
func NewFileStackType() (rv *FileStackType) {
return &FileStackType{Stack: make([]FileStackElement, 0, 10), Top: -1}
}
func (ns *FileStackType) IsEmpty() bool {
return ns.Top <= -1
}
func (ns *FileStackType) Push(S, C int, fp *os.File, name string) {
ns.Top++
if len(ns.Stack) <= ns.Top {
ns.Stack = append(ns.Stack, FileStackElement{S, C, fp, name, nil})
} else {
ns.Stack[ns.Top] = FileStackElement{S, C, fp, name, nil}
}
}
var ErrEmptyStack = errors.New("Empty Stack")
func (ns *FileStackType) Peek() (FileStackElement, error) {
if !ns.IsEmpty() {
return ns.Stack[ns.Top], nil
} else {
return FileStackElement{}, ErrEmptyStack
}
}
func (ns *FileStackType) Pop() {
if !ns.IsEmpty() {
ns.Top--
}
}
func (ns *FileStackType) Length() int {
return ns.Top + 1
}
func (ns *FileStackType) Dump1() {
fmt.Printf("File Stack Dump 1\n")
fmt.Printf(" Top = %d\n", ns.Top)
for ii, vv := range ns.Stack {
if ii <= ns.Top {
fmt.Printf(" %d: Name [%s] LineNo: %d\n", ii, vv.Name, vv.C_LineNo)
}
}
}
func (ns *FileStackType) GetNames() (names []string) {
for ii, vv := range ns.Stack {
if ii <= ns.Top {
names = append(names, vv.Name)
}
}
return
}
func (ns *FileStackType) SetLineNo(n int) {
if !ns.IsEmpty() {
ns.Stack[ns.Top].C_LineNo = n
}
}
func (ns *FileStackType) SetScanner(ss *bufio.Scanner) {
if !ns.IsEmpty() {
ns.Stack[ns.Top].Scanner = ss
}
}
|
[
2
] |
/*
go_ph0n3 is a virtual DTMF phone dialing simulator / tones generator it is
uses Oto as sound lib and is based on Oto's example wich is licensed under the
Apache License Version 2.0.
https://github.com/hajimehoshi/oto/blob/master/example/main.go
*/
package go_ph0n3
import (
"errors"
"fmt"
"github.com/hajimehoshi/oto"
"io"
"log"
"math"
"strings"
"sync"
"time"
)
const (
sampleRate = 44100
bitDepthInBytes = 2
)
// sineWave Because we need to play a sound...
// This is like a "single wave synth"
type sineWave struct {
freq float64
length int64
pos int64
remaining []byte
channelNum int
}
func newSineWave(freq float64, duration time.Duration, channelNum int) *sineWave {
l := int64(channelNum) * bitDepthInBytes * sampleRate * int64(duration) / int64(time.Second)
l = l / 4 * 4
return &sineWave{
freq: freq,
length: l,
channelNum: channelNum,
}
}
func (s *sineWave) Read(buf []byte) (int, error) {
if len(s.remaining) > 0 {
n := copy(buf, s.remaining)
s.remaining = s.remaining[n:]
return n, nil
}
if s.pos == s.length {
return 0, io.EOF
}
eof := false
if s.pos+int64(len(buf)) > s.length {
buf = buf[:s.length-s.pos]
eof = true
}
var origBuf []byte
if len(buf)%4 > 0 {
origBuf = buf
buf = make([]byte, len(origBuf)+4-len(origBuf)%4)
}
length := float64(sampleRate) / float64(s.freq)
num := bitDepthInBytes * s.channelNum
p := s.pos / int64(num)
switch bitDepthInBytes {
case 1:
for i := 0; i < len(buf)/num; i++ {
const max = 127
b := int(math.Sin(2*math.Pi*float64(p)/length) * 0.2 * max)
for ch := 0; ch < s.channelNum; ch++ {
buf[num*i+ch] = byte(b + 128)
}
p++
}
case 2:
for i := 0; i < len(buf)/num; i++ {
const max = 32767
b := int16(math.Sin(2*math.Pi*float64(p)/length) * 0.2 * max)
for ch := 0; ch < s.channelNum; ch++ {
buf[num*i+2*ch] = byte(b)
buf[num*i+1+2*ch] = byte(b >> 8)
}
p++
}
}
s.pos += int64(len(buf))
n := len(buf)
if origBuf != nil {
n = copy(origBuf, buf)
s.remaining = buf[n:]
}
if eof {
return n, io.EOF
}
return n, nil
}
// Ph0n3Options Defines the behavior of a Ph0n3 instance.
type Ph0n3Options struct {
// SpaceDuration Time between tones
SpaceDuration time.Duration `json:"space_duration"`
// ToneDuration Time a tone sounds
ToneDuration time.Duration `json:"tone_duration"`
// DialToneDuration Time during the dial tone will sound before dial 0 = disabled.
DialToneDuration time.Duration `json:"dial_tone_duration"`
// RingingToneTimes Times the ringing tone will sounds after dialing,
// 0 for disable.
RingingToneTimes int
// BusyTonesTimes Times the busy tone will sounds before the call ends,
// 0 for disable.
BusyToneTimes int
// Channel The sound channel number to be used to play the tones.
Channel int
// BuffSizeBytes is the buffer size in bytes
BuffSizeBytes int
// Vervose enables debug messages (the number that is been dialed)
Vervose bool
}
// DefaultPh0n3Options the default values.
var DefaultPh0n3Options = &Ph0n3Options{
SpaceDuration: time.Second / 15,
DialToneDuration: 2,
ToneDuration: time.Second / 4,
BuffSizeBytes: 4096,
Channel: 1,
RingingToneTimes: 2,
BusyToneTimes: 4,
Vervose: false,
}
// Ph0n3Key are keys of the DMTF System
type Ph0n3Key int
// This constants will gonna be safe indexes, we are just ensuring that only
// defined value will be used; tit is based on the standard 16 key names of the
// DTMF (Dual-Tone Multi-Frequency) System.
// https://en.wikipedia.org/wiki/Dual-tone_multi-frequency_signaling
const (
Key1 Ph0n3Key = iota
Key2
Key3
KeyA
Key4
Key5
Key6
KeyB
Key7
Key8
Key9
KeyC
KeyStar
Key0
KeyHash
KeyD
)
// StandarPad is a map of the standard phone keys and its values to the DTMF
// key that it belongs. This allows you to dial numbers like: 01-800-SOMETHING.
var StandarPad = map[string]Ph0n3Key{
"1": Key1,
"2": Key2,
"A": Key2,
"B": Key2,
"C": Key2,
"3": Key3,
"D": Key3,
"E": Key3,
"F": Key3,
"4": Key4,
"G": Key4,
"H": Key4,
"I": Key4,
"5": Key5,
"J": Key5,
"K": Key5,
"L": Key5,
"6": Key6,
"M": Key6,
"N": Key6,
"O": Key6,
"7": Key7,
"P": Key7,
"R": Key7,
"S": Key7,
"8": Key8,
"T": Key8,
"U": Key8,
"V": Key8,
"9": Key9,
"W": Key9,
"X": Key9,
"Y": Key9,
"0": Key0,
"*": KeyStar,
"#": KeyHash,
}
// Hi freqs map
var fqMapCols = []float64{1209, 1336, 1477, 1633}
// Low freqs map
var fqMapRows = []float64{697, 770, 852, 941}
// Ph0n3 is a phone toy you can use to dial a number; it also could be used as
// dialing tone generator.
type Ph0n3 struct {
opt *Ph0n3Options
ctx *oto.Context
isOpen bool
lastEventTime time.Time
dialed string
Close chan bool
}
// NewPh0n3 returns a new phone instance ready to use
func NewPh0n3(opt *Ph0n3Options) *Ph0n3 {
p := new(Ph0n3)
p.Close = make(chan bool, 1)
p.opt = opt
if opt == nil {
p.opt = DefaultPh0n3Options
}
c, err := oto.NewContext(int(sampleRate), p.opt.Channel, bitDepthInBytes, p.opt.BuffSizeBytes)
if err != nil {
panic(err)
}
p.ctx = c
p.lastEventTime = time.Now()
p.dialed = ""
return p
}
// Plays a sin wave with frequency of <freq> during <duration> time, then
// wg.Done()on <wg> wait group.
func (phone *Ph0n3) play(freq float64, duration time.Duration, wg *sync.WaitGroup) {
defer func() {
if wg != nil {
wg.Done()
}
}()
p := phone.ctx.NewPlayer()
s := newSineWave(freq, duration, phone.opt.Channel)
if _, err := io.Copy(p, s); err != nil {
log.Printf("%v", err)
return
}
if err := p.Close(); err != nil {
log.Printf("%v", err)
return
}
return
}
func (phone *Ph0n3) dialing() {
if phone.opt.RingingToneTimes > 0 {
for i := 0; i < phone.opt.RingingToneTimes; i++ {
wg := new(sync.WaitGroup)
wg.Add(2)
go phone.play(480, time.Second*2, wg)
go phone.play(440, time.Second*2, wg)
wg.Wait()
time.Sleep(time.Second * 4)
}
}
phone.endingCall()
}
func (phone *Ph0n3) endingCall() {
if phone.opt.BusyToneTimes > 0 {
if phone.dialed == strings.Repeat("5", 5) {
var f, t float64
for i, v := range []float64{0.055, 233.8, 4, 311.13, 2, 369.99, 4, 415.3,
2, 440, 4, 466.6, 2, 440, 4, 415.3, 2, 369.99, 6, 233.8, 6, 277.18, 6, 311.13, 13} {
if i == 0 {
t = v
continue
}
if (i+3)%2 == 1 {
phone.play(f, time.Duration(t*v*1E9), nil)
} else {
f = v
}
}
}
for i := 0; i < phone.opt.BusyToneTimes; i++ {
wg := new(sync.WaitGroup)
wg.Add(2)
go phone.play(480, time.Second/4, wg)
go phone.play(620, time.Second/4, wg)
wg.Wait()
time.Sleep(time.Second / 4)
}
}
phone.isOpen = false
phone.Close <- true
}
// Open opens the line with a dial tone
func (phone *Ph0n3) Open() *Ph0n3 {
if phone.isOpen {
return phone
}
phone.lastEventTime = time.Now()
phone.isOpen = true
if phone.opt.DialToneDuration > 0 {
wg := new(sync.WaitGroup)
wg.Add(2)
go phone.play(480, time.Second*2, wg)
go phone.play(620, time.Second*2, wg)
wg.Wait()
time.Sleep(time.Second / 4)
}
go func() {
// Waiting for no events during 3s to do the call
for time.Since(phone.lastEventTime) < (3 * time.Second) {
time.Sleep(time.Second / 2)
}
fmt.Print("\n")
phone.dialing()
}()
return phone
}
// Dial dials a key sequence
func (phone *Ph0n3) Dial(keys ...Ph0n3Key) error {
defer func() {
phone.lastEventTime = time.Now()
}()
var wg *sync.WaitGroup
for _, k := range keys {
switch k {
case Key0:
phone.dialed += "0"
case Key1:
phone.dialed += "1"
case Key2:
phone.dialed += "2"
case Key3:
phone.dialed += "3"
case Key4:
phone.dialed += "4"
case Key5:
phone.dialed += "5"
case Key6:
phone.dialed += "6"
case Key7:
phone.dialed += "7"
case Key8:
phone.dialed += "8"
case Key9:
phone.dialed += "9"
case KeyStar:
phone.dialed += "*"
case KeyHash:
phone.dialed += "#"
}
row := int(k) / len(fqMapRows)
if row > len(fqMapRows) {
return errors.New("value out of range")
}
col := (int(k) + len(fqMapRows)) % len(fqMapRows)
if col > len(fqMapCols) {
return errors.New("value out of range")
}
if phone.opt.Vervose {
fmt.Printf("%v", phone.dialed[len(phone.dialed)-1:])
}
wg = new(sync.WaitGroup)
wg.Add(2)
go phone.play(fqMapCols[col], phone.opt.ToneDuration, wg)
go phone.play(fqMapRows[row], phone.opt.ToneDuration, wg)
wg.Wait()
time.Sleep(phone.opt.SpaceDuration)
}
return nil
}
// DialString dial keys from the given strings, if a char does not exists it
// skips and continue with next.
func (phone *Ph0n3) DialString(text string) error {
for _, char := range text {
key, ok := StandarPad[strings.ToUpper(string(char))]
if !ok {
continue
}
err := phone.Dial(key)
if err != nil {
return err
}
}
return nil
}
|
[
1
] |
package storage
import (
"database/sql"
"github.com/lib/pq"
"github.com/pkg/errors"
)
// errors
var (
ErrAlreadyExists = errors.New("object already exists")
ErrDoesNotExist = errors.New("object does not exist")
ErrFrameCounterRetransmission = errors.New("frame-counter did not increment")
ErrFrameCounterReset = errors.New("frame-counter reset or rollover occured")
ErrInvalidMIC = errors.New("invalid MIC")
ErrInvalidAggregationInterval = errors.New("invalid aggregation interval")
ErrInvalidName = errors.New("invalid gateway name")
ErrInvalidFPort = errors.New("invalid fPort (must be > 0)")
)
func handlePSQLError(err error, description string) error {
if err == sql.ErrNoRows {
return ErrDoesNotExist
}
switch err := err.(type) {
case *pq.Error:
switch err.Code.Name() {
case "unique_violation":
return ErrAlreadyExists
case "foreign_key_violation":
return ErrDoesNotExist
}
}
return errors.Wrap(err, description)
}
|
[
7
] |
package astx
import (
"strings"
"testing"
)
func TestParseFile(t *testing.T) {
parsed, err := ParseFile("./example_src.go.txt")
if err != nil {
t.Fatal(err)
}
if parsed == nil {
t.Fatal("parsed file should not be nil")
}
if parsed.Package != "astx" {
t.Error("should parse package name from example.go.txt")
}
if parsed.Path == "" {
t.Error("should include (non-empty) provided file path (./example.go.txt)")
}
if parsed.AbsPath == "" {
t.Error("should resolve (non-empty) absolute path of provided file path")
}
if len(parsed.Imports) != 2 {
t.Error("should parse (1) import specified in example.go.txt")
} else {
imp := parsed.Imports[0]
if imp.Name != "fmt" {
t.Error("should parse 'fmt' import specified in example.go.txt")
}
if imp.Path != `"fmt"` {
t.Error("should parse path for 'fmt' import specified in example.go.txt")
}
if len(imp.Doc) != 1 {
t.Error("should parse (1) doc comment above 'fmt' import specified in example.go.txt")
} else {
if imp.Doc[0] != "// very useful" {
t.Error("should parse full doc comment above 'fmt' import specified in example.go.txt")
}
}
if len(imp.Comments) != 1 {
t.Error("should parse (1) doc comment above 'fmt' import specified in example.go.txt")
} else {
if imp.Comments[0] != "// short for format" {
t.Error("should parse full comment beside 'fmt' import specified in example.go.txt")
}
}
imp = parsed.Imports[1]
if imp.Path != `"io"` {
t.Error("should parse path for 'io' import specified in example.go.txt")
}
}
if len(parsed.Structs) != 1 {
t.Fatal("should parse (1) struct type defined in example.go.txt")
return
}
s := parsed.Structs[0]
if s.Name != "Point" {
t.Error("should parse name for struct type defined in example.go.txt")
}
if len(s.Comments) != 1 {
t.Fatal("should parse (1) comment for struct type defined in example.go.txt")
}
if s.Comments[0] != "// Point is a type of thing" {
t.Error("should receive full contents of comment for struct type defined in example.go.txt")
}
if len(s.Fields) != 5 {
t.Fatal("should parse (5) fields for struct type defined in example.go.txt")
}
if s.Fields[0].Name != "X" {
t.Logf("bad field: %#v", s.Fields[0])
t.Error("should parse names of fields in struct type defined in example.go.txt")
}
if s.Fields[0].Type != "int" {
t.Logf("bad field: %#v", s.Fields[0])
t.Error("should parse types of fields in struct type defined in example.go.txt")
}
if s.Fields[0].Tag.Get("tagz") != "hello" {
t.Logf("bad field: %#v", s.Fields[0])
t.Error("should parse tags of fields in struct type defined in example.go.txt")
}
if s.Fields[1].Name != "Y" {
t.Logf("bad field: %#v", s.Fields[1])
t.Error("should parse names of fields in struct type defined in example.go.txt")
}
if s.Fields[1].Type != "io.Reader" {
t.Logf("bad field: %#v", s.Fields[1])
t.Error("should parse types of fields in struct type defined in example.go.txt")
}
if s.Fields[1].Tag.Get("tagz") != "world" {
t.Logf("bad field: %#v", s.Fields[1])
t.Error("should parse tags of fields in struct type defined in example.go.txt")
}
if s.Fields[2].Name != "Z" {
t.Logf("bad field: %#v", s.Fields[2])
t.Error("should parse names of fields in struct type defined in example.go.txt")
}
if s.Fields[2].Type != "[2]******int" {
t.Logf("bad field: %#v", s.Fields[2])
t.Error("should parse types (with long pointer chains) of fields in struct type defined in example.go.txt")
}
if s.Fields[2].Tag.Get("tagz") != "hello" {
t.Logf("bad field: %#v", s.Fields[2])
t.Error("should parse tags of fields in struct type defined in example.go.txt")
}
if s.Fields[3].Name != "ZZ" {
t.Logf("bad field: %#v", s.Fields[3])
t.Error("should parse names of fields in struct type defined in example.go.txt")
}
if s.Fields[3].Type != "map[string]*[SZ]int" {
t.Logf("bad field: %#v", s.Fields[3])
t.Error("should parse types of fields in struct type defined in example.go.txt")
}
if s.Fields[3].Tag.Get("tagz") != "world" {
t.Logf("bad field: %#v", s.Fields[3])
t.Error("should parse tags of fields in struct type defined in example.go.txt")
}
if !strings.HasPrefix(s.Fields[4].Type, "*struct {") {
t.Error("should include type name for embedded struct type defined in example.go.txt -- found:\n" + s.Fields[4].Type)
}
if s.Fields[4].StructType == nil {
t.Fatal("should parse embedded struct type fields in struct type defined in example.go.txt")
}
if len(s.Fields[4].StructType.Fields) != 2 {
t.Fatal("should parse (2) fields in embedded struct type defined in example.go.txt")
}
if s.Fields[4].StructType.Fields[0].Name != "A, B" {
t.Error("should parse names of fields in embedded struct type defined in example.go.txt")
}
if s.Fields[4].StructType.Fields[0].Type != "string" {
t.Error("should parse types of fields in embedded struct type defined in example.go.txt")
}
if s.Fields[4].StructType.Fields[1].Name != "C" {
t.Error("should parse names of fields in embedded struct type defined in example.go.txt")
}
if s.Fields[4].StructType.Fields[1].Type != "string" {
t.Error("should parse types of fields in embedded struct type defined in example.go.txt")
}
}
|
[
4
] |
package data
import (
"math/big"
"math/bits"
"strconv"
"strings"
"time"
"github.com/olekukonko/tablewriter"
)
//// DATA SLICES /////
func (v DataSlice) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
//// NATIVE SLICES /////
func (v ByteVec) String() string { return string([]byte(v)) }
func (v NilVec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v BoolVec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v IntVec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v Int8Vec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v Int16Vec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v Int32Vec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v UintVec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v Uint8Vec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v Uint16Vec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v Uint32Vec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v FltVec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v Flt32Vec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v ImagVec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v Imag64Vec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v RuneVec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v BytesVec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v StrVec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v BigIntVec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v BigFltVec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v RatioVec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v TimeVec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v DuraVec) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v FlagSet) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
func (v ErrorVec) String() string { return StringSlice("\n", "", "", v) }
//// PAIRS ////
func (p PairVal) String() string {
return "(" + p.Left().String() + ", " + p.Right().String() + ")"
}
//// SETS ////
func (v MapVal) String() string { return StringSlice(", ", "[", "]", v.Slice()...) }
//// NATIVE SETS /////
func (s MapInt) String() string { return StringSlice(", ", "[", "]", s.Slice()...) }
func (s MapUint) String() string { return StringSlice(", ", "[", "]", s.Slice()...) }
func (s MapFloat) String() string { return StringSlice(", ", "[", "]", s.Slice()...) }
func (s MapFlag) String() string { return StringSlice(", ", "[", "]", s.Slice()...) }
func (s MapString) String() string { return StringSlice(", ", "[", "]", s.Slice()...) }
// string nullables
func (NilVal) String() string { return Nil.String() }
func (v ErrorVal) String() string { return "Error: " + v.E.Error() }
func (v ErrorVal) Error() ErrorVal { return ErrorVal{v.E} }
func (v BoolVal) String() string { return strconv.FormatBool(bool(v)) }
func (v IntVal) String() string { return strconv.Itoa(int(v)) }
func (v Int8Val) String() string { return strconv.Itoa(int(v)) }
func (v Int16Val) String() string { return strconv.Itoa(int(v)) }
func (v Int32Val) String() string { return strconv.Itoa(int(v)) }
func (v UintVal) String() string { return strconv.Itoa(int(v)) }
func (v Uint8Val) String() string { return strconv.Itoa(int(v)) }
func (v Uint16Val) String() string { return strconv.Itoa(int(v)) }
func (v Uint32Val) String() string { return strconv.Itoa(int(v)) }
func (v RuneVal) String() string { return string(v) }
func (v StrVal) Key() string { return string(v) }
func (v TimeVal) String() string { return "" + time.Time(v).String() }
func (v DuraVal) String() string { return time.Duration(v).String() }
func (v BigIntVal) String() string { return ((*big.Int)(&v)).String() }
func (v RatioVal) String() string { return ((*big.Rat)(&v)).String() }
func (v BigFltVal) String() string { return ((*big.Float)(&v)).String() }
func (v FltVal) String() string {
return strconv.FormatFloat(float64(v), 'G', -1, 64)
}
func (v Flt32Val) String() string {
return strconv.FormatFloat(float64(v), 'G', -1, 32)
}
func (v ImagVal) String() string {
return strconv.FormatFloat(float64(real(v)), 'G', -1, 64) + " + " +
strconv.FormatFloat(float64(imag(v)), 'G', -1, 64) + "i"
}
func (v Imag64Val) String() string {
return strconv.FormatFloat(float64(real(v)), 'G', -1, 32) + " + " +
strconv.FormatFloat(float64(imag(v)), 'G', -1, 32) + "i"
}
func (v Expression) String() string { return v().String() }
// serializes bitflag to a string representation of the bitwise OR
// operation on a list of principle flags, that yielded this flag
func (v BitFlag) String() string { return StringBitFlag(v) }
func StringBitFlag(v BitFlag) string {
var str string
if bits.OnesCount(uint(v.Uint())) > 1 {
for i, f := range FlagDecompose(v) {
str = str + f.(TyNat).String()
if i < len(FlagDecompose(v))-1 {
str = str + "∙"
}
}
} else {
str = TyNat(v).String()
}
return str
}
// stringer for ordered chains, without any further specification.
func StringSlice(sep, ldelim, rdelim string, s ...Native) string {
var str string
str = str + ldelim
for i, d := range s {
if FlagMatch(d.Type().Flag(), Slice.Type().Flag()) ||
FlagMatch(d.Type().Flag(), Unboxed.Type().Flag()) {
str = str + StringSlice(sep, ldelim, rdelim, d.(Sliceable).Slice()...)
} else {
str = str + d.String()
}
if i < len(s)-1 {
str = str + sep
}
}
str = str + rdelim
return str
}
func StringChainTable(v ...[]Native) string {
var str = &strings.Builder{}
var tab = tablewriter.NewWriter(str)
tab.SetBorder(false)
tab.SetColumnSeparator(" ")
tab.SetAlignment(tablewriter.ALIGN_LEFT)
for _, dr := range v {
var row = []string{}
for _, d := range dr {
row = append(row, d.String())
}
tab.Append(row)
}
tab.Render()
return str.String()
}
func stringChainTable(v ...Native) string {
str := &strings.Builder{}
tab := tablewriter.NewWriter(str)
for i, d := range v {
row := []string{
strconv.Itoa(i), d.String(),
}
tab.Append(row)
}
tab.Render()
return str.String()
}
|
[
0
] |
// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validation
import (
extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apivalidation "k8s.io/apimachinery/pkg/api/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
)
// ValidateBackupBucket validates a BackupBucket object.
func ValidateBackupBucket(bb *extensionsv1alpha1.BackupBucket) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&bb.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath("metadata"))...)
allErrs = append(allErrs, ValidateBackupBucketSpec(&bb.Spec, field.NewPath("spec"))...)
return allErrs
}
// ValidateBackupBucketUpdate validates a BackupBucket object before an update.
func ValidateBackupBucketUpdate(new, old *extensionsv1alpha1.BackupBucket) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...)
allErrs = append(allErrs, ValidateBackupBucketSpecUpdate(&new.Spec, &old.Spec, new.DeletionTimestamp != nil, field.NewPath("spec"))...)
allErrs = append(allErrs, ValidateBackupBucket(new)...)
return allErrs
}
// ValidateBackupBucketSpec validates the specification of a BackupBucket object.
func ValidateBackupBucketSpec(spec *extensionsv1alpha1.BackupBucketSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(spec.Type) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("type"), "field is required"))
}
if len(spec.Region) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("region"), "field is required"))
}
if len(spec.SecretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), "field is required"))
}
return allErrs
}
// ValidateBackupBucketSpecUpdate validates the spec of a BackupBucket object before an update.
func ValidateBackupBucketSpecUpdate(new, old *extensionsv1alpha1.BackupBucketSpec, deletionTimestampSet bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if deletionTimestampSet && !apiequality.Semantic.DeepEqual(new, old) {
allErrs = append(allErrs, apivalidation.ValidateImmutableField(new, old, fldPath)...)
return allErrs
}
allErrs = append(allErrs, apivalidation.ValidateImmutableField(new.Type, old.Type, fldPath.Child("type"))...)
allErrs = append(allErrs, apivalidation.ValidateImmutableField(new.Region, old.Region, fldPath.Child("region"))...)
return allErrs
}
// ValidateBackupBucketStatus validates the status of a BackupBucket object.
func ValidateBackupBucketStatus(spec *extensionsv1alpha1.BackupBucketStatus, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
return allErrs
}
// ValidateBackupBucketStatusUpdate validates the status field of a BackupBucket object.
func ValidateBackupBucketStatusUpdate(newStatus, oldStatus extensionsv1alpha1.BackupBucketStatus) field.ErrorList {
allErrs := field.ErrorList{}
return allErrs
}
|
[
1
] |
package cmdint
import (
"fmt"
"os"
"sort"
"strings"
"unicode/utf8"
)
//////////////////////////////////////////////////////////////////////////
// Option Spec Set
//////////////////////////////////////////////////////////////////////////
type OptionSpec interface {
Parse(opts *Options, args []string) (*Options, error)
Mixed() OptionSpec
Raw() OptionSpec
ArgOption(key string) *OptionConfigHelper
FlagOption(key string) *OptionConfigHelper
GetArgDescription() string
GetOptionHelp() string
GetOptions() map[string]*Option
Get(key string) *Option
}
const (
OPTION_MODE_PARSE = "parse"
OPTION_MODE_MIXED = "mixed"
OPTION_MODE_RAW = "raw"
)
type _OptionSpec struct {
options map[string]*Option
short map[rune]*Option
long map[string]*Option
mode string
}
var _ OptionSpec = &_OptionSpec{}
func NewOptionSpec() OptionSpec {
return &_OptionSpec{map[string]*Option{}, map[rune]*Option{}, map[string]*Option{}, OPTION_MODE_PARSE}
}
func (this *_OptionSpec) Get(key string) *Option {
return this.options[key]
}
func (this *_OptionSpec) GetOptions() map[string]*Option {
return this.options
}
func (this *_OptionSpec) GetArgDescription() string {
if len(this.options) > 0 {
return "<options>"
}
return ""
}
func (this *_OptionSpec) GetOptionHelp() string {
keys := []string{}
desc := ""
for k, _ := range this.options {
keys = append(keys, k)
}
sort.Strings(keys)
max := 0
for _, k := range keys {
o := this.options[k]
a := o.GetArgDescription()
l := utf8.RuneCountInString(strings.TrimSpace(o.longOption()+" "+o.shortOption())) + 1 + utf8.RuneCountInString(a)
if l > max {
max = l
}
}
for _, k := range keys {
o := this.options[k]
d := DecodeDescription(o)
a := o.GetArgDescription()
for i, t := range d {
if i == 0 {
desc += fmt.Sprintf(" %-*s %s\n", max, strings.TrimSpace(o.longOption()+" "+o.shortOption())+" "+a, t)
} else {
desc += fmt.Sprintf(" %-*s %s\n", max, "", t)
}
}
}
return desc
}
//////////////////////////////////////////////////////////////////////////
// Argument parsing
//////////////////////////////////////////////////////////////////////////
func (this *_OptionSpec) Parse(ctx *Options, args []string) (*Options, error) {
var err error
cur := 0
options := NewOptions(nil)
for cur < len(args) {
arg := args[cur]
if len(arg) > 1 && arg[0] == '-' {
if arg[1] == '-' {
if len(arg) == 2 {
cur++
break
}
cur, err = this.parseLongOption(arg[2:], cur+1, args, options)
if err != nil {
return options, err
}
} else {
if this.mode == OPTION_MODE_RAW {
break
}
// check for short option direct argument assignment
i := strings.Index(arg, "=")
if i > 0 {
if i != 2 {
return options, fmt.Errorf("invalid short argument assignment '%s'", arg)
}
cur, err = this.parseLongOption(arg[1:], cur+1, args, options)
if err != nil {
return options, err
}
} else {
// parse short options
cur++
for _, o := range arg[1:] {
cur, err = this.parseShortOption(o, cur, args, options)
if err != nil {
return options, err
}
}
}
}
} else {
if this.mode != OPTION_MODE_MIXED {
break
}
options.Arguments = append(options.Arguments, args[cur])
cur++
}
}
if this.mode == OPTION_MODE_RAW {
options.Raw = true
}
if cur < len(args) {
options.Arguments = append(options.Arguments, args[cur:]...)
}
options.Parent = ctx
if ctx != nil {
options.Defaulted(this, ctx)
options.Context = ctx.Context
}
env := os.Environ()
for _, o := range this.options {
if _, ok := options.Flags[o.Key]; !ok {
options.Flags[o.Key] = false
}
if o.Env != "" {
v := lookup(env, o.Env)
if v != nil {
switch {
case o.Args == 0:
if !options.Flags[o.Key] {
switch strings.ToLower(*v) {
case "true", "1":
options.Flags[o.Key] = true
}
}
case o.Args == 1:
if _, ok := options.SingleArgumentOptions[o.Key]; !ok {
options.SingleArgumentOptions[o.Key] = *v
}
if _, ok := options.SingleArgumentArrayOptions[o.Key]; !ok {
options.SingleArgumentArrayOptions[o.Key] = []string{*v}
}
case o.Args > 1:
args := strings.Split(*v, ",")
if len(args) == o.Args {
if _, ok := options.SingleArgumentArrayOptions[o.Key]; !ok {
options.SingleArgumentArrayOptions[o.Key] = args
}
if _, ok := options.MultiArgumentArrayOptions[o.Key]; !ok {
options.MultiArgumentArrayOptions[o.Key] = [][]string{args}
}
}
}
}
}
}
return options, nil
}
func lookup(env []string, key string) *string {
key = key + "="
for _, e := range env {
if strings.HasPrefix(e, key) {
s := e[len(key):]
return &s
}
}
return nil
}
func (this *_OptionSpec) parseLongOption(name string, cur int, args []string, options *Options) (int, error) {
optargs := []string{}
i := strings.Index(name, "=")
if i > 0 {
optargs = append(optargs, name[i+1:])
name = name[0:i]
}
option, ok := this.long[name]
if !ok {
r, size := utf8.DecodeRuneInString(name)
if len(name) == size {
option, ok = this.short[r]
}
if !ok {
if this.mode == OPTION_MODE_RAW {
options.Arguments = append(options.Arguments, args[cur])
return cur + 1, nil
}
return cur, fmt.Errorf("unknown option '%s'", name)
}
}
return this.parseOption(name, option, optargs, cur, args, options)
}
func (this *_OptionSpec) parseOption(name string, option *Option, optargs []string,
cur int, args []string, options *Options) (int, error) {
fmt.Printf("parse option %s: %v\n", option.Key, optargs)
if option.Args > 0 {
if len(optargs) == 0 {
if len(args) < option.Args+cur {
optargs = args[cur:]
cur = len(args)
} else {
optargs = args[cur : cur+option.Args]
cur += option.Args
}
}
if option.Args != len(optargs) && len(optargs) == 1 {
optargs = strings.Split(optargs[0], ",")
}
if option.Args != len(optargs) {
return cur, fmt.Errorf("option '%s' requires %d argument(s) (have %d)", name, option.Args, len(optargs))
}
}
switch {
case option.List && option.Args == 1:
result, ok := options.SingleArgumentArrayOptions[option.Key]
if !ok {
result = []string{}
}
result = append(result, optargs[0])
options.SingleArgumentArrayOptions[option.Key] = result
case option.List && option.Args > 1:
result, ok := options.MultiArgumentArrayOptions[option.Key]
if !ok {
result = [][]string{}
}
result = append(result, optargs)
options.MultiArgumentArrayOptions[option.Key] = result
case !option.List && option.Args == 1:
_, ok := options.SingleArgumentOptions[option.Key]
if ok {
return cur, fmt.Errorf("multiple option '%s' given", name)
}
options.SingleArgumentOptions[option.Key] = optargs[0]
case !option.List && option.Args > 1:
_, ok := options.SingleArgumentArrayOptions[option.Key]
if ok {
return cur, fmt.Errorf("multiple option '%s' given", name)
}
options.SingleArgumentArrayOptions[option.Key] = optargs
case option.Args == 0:
options.Flags[option.Key] = true
default:
return cur, fmt.Errorf("unknown option kind %+v", option)
}
return cur, nil
}
func (this *_OptionSpec) parseShortOption(name rune, cur int, args []string, options *Options) (int, error) {
optargs := []string{}
option, ok := this.short[name]
if !ok {
if this.mode == OPTION_MODE_RAW {
return cur, nil
}
return cur, fmt.Errorf("unknown option '%s'", string(name))
}
return this.parseOption(string(name), option, optargs, cur, args, options)
}
|
[
0
] |
package main
import "fmt"
func main() {
var trials int
var factorial int
fmt.Scanf("%d",&trials)
for trial:=0; trial<trials;trial++{
fmt.Scanf("%d",&factorial)
var multiples_of_five int;
multiples_of_five =5
var quotient int =1;
var number_of_zeroes int =0;
for quotient >=1{
quotient = factorial / multiples_of_five
multiples_of_five = multiples_of_five *5
number_of_zeroes = number_of_zeroes + quotient
}
fmt.Println(number_of_zeroes)
}
}
|
[
0
] |
package main
import "math"
func simpleGreyscale(iterations, iterationCap int, z, c complex) (R, G, B, A float64) {
col := float64(255*iterations) / float64(iterationCap)
return col, col, col, 255
}
func simpleGreyscaleShip(iterations, iterationCap int, z complex) (R, G, B, A float64) {
col := float64(255*iterations) / float64(iterationCap)
return col, col, col, 255
}
func whackyGrayscale(iterations, iterationCap int, z, c complex) (R, G, B, A float64) {
if iterations%2 == 0 {
return 0, 0, 0, 255
}
return 255, 255, 255, 255
}
func whackyGrayscaleShip(iterations, iterationCap int, z complex) (R, G, B, A float64) {
if iterations%2 == 0 {
return 0, 0, 0, 255
}
return 255, 255, 255, 255
}
func zGreyscale(iterations, iterationCap int, z, c complex) (R, G, B, A float64) {
col := 255.0 * (math.Mod(z.abs(), 2.0) / 2.0)
return col, col, col, 255
}
func smoothGreyscale(iterations, iterationCap int, z, c complex) (R, G, B, A float64) {
z = z.mul(z).add(c)
iterations++
z = z.mul(z).add(c)
iterations++
i := float64(iterations)
if iterations < iterationCap {
i = i - (math.Log(math.Log(z.abs())) / math.Log(2))
}
if int(math.Floor(i))%2 == 0 {
col := 255 * (math.Mod(i, 1))
return col, col, col, 255
}
col := 255 - (255 * math.Mod(i, 1))
return col, col, col, 255
}
func smoothColour(iterations, iterationCap int, z, c complex) (R, G, B, A float64) {
z = z.mul(z).add(c)
iterations++
z = z.mul(z).add(c)
iterations++
i := float64(iterations)
if iterations < iterationCap {
i = i - (math.Log(math.Log(z.abs())) / math.Log(2))
}
nu := math.Mod(i, 1)
switch {
case int(math.Floor(i))%3 == 0:
return 255 * nu, 255 * (1 - nu), 255, 255
case int(math.Floor(i))%3 == 1:
return 255, 255 * nu, 255 * (1 - nu), 255
case int(math.Floor(i))%3 == 2:
return 255 * (1 - nu), 255, 255 * nu, 255
}
return 0, 0, 0, 255
}
func smoothColour2(iterations, iterationCap int, z, c complex) (R, G, B, A float64) {
z = z.mul(z).add(c)
iterations++
z = z.mul(z).add(c)
iterations++
i := float64(iterations)
if iterations < iterationCap {
i = i - (math.Log(math.Log(z.abs())) / math.Log(2))
}
nu := math.Mod(i, 1)
switch {
case int(math.Floor(i))%3 == 0:
return 255 * (1 - nu), 255 * nu, 0, 255
case int(math.Floor(i))%3 == 1:
return 0, 255 * (1 - nu), 255 * nu, 255
case int(math.Floor(i))%3 == 2:
return 255 * nu, 0, 255 * (1 - nu), 255
}
return 0, 0, 0, 255
}
|
[
0,
2
] |
package stockrate
import (
"fmt"
"github.com/PuerkitoBio/goquery"
"log"
"net/http"
"regexp"
"strconv"
"strings"
)
var (
stocksURL = make(StocksInfo)
baseURL = "https://www.moneycontrol.com/technical-analysis"
sourceURL = "https://www.moneycontrol.com/india/stockpricequote/"
)
// GetCompanyList returns the list of all the companies tracked via stockrate
func GetCompanyList() (list []string) {
for key := range stocksURL {
list = append(list, key)
}
return
}
// GetPrice returns current price, previous close, open, variation, percentage and volume for a company
func GetPrice(company string) (StockPrice, error) {
var stockPrice StockPrice
url, err := getURL(company)
if err != nil {
return stockPrice, err
}
doc, err := getStockQuote(url)
if err != nil {
return stockPrice, fmt.Errorf("Error in reading stock Price")
}
doc.Find(".bsedata_bx").Each(func(i int, s *goquery.Selection) {
stockPrice.BSE.Price, _ = strconv.ParseFloat(s.Find(".span_price_wrap").Text(), 64)
stockPrice.BSE.PreviousClose, _ = strconv.ParseFloat(s.Find(".priceprevclose").Text(), 64)
stockPrice.BSE.Open, _ = strconv.ParseFloat(s.Find(".priceopen").Text(), 64)
stockPrice.BSE.Variation, _ = strconv.ParseFloat(strings.Split(s.Find(".span_price_change_prcnt").Text(), " ")[0], 64)
stockPrice.BSE.Percentage, _ = strconv.ParseFloat(strings.Split(strings.Split(s.Find(".span_price_change_prcnt").Text(), "%")[0], "(")[1], 64)
stockPrice.BSE.Volume, _ = strconv.ParseInt(strings.ReplaceAll(s.Find(".volume_data").Text(), ",", ""), 10, 64)
})
doc.Find(".nsedata_bx").Each(func(i int, s *goquery.Selection) {
stockPrice.NSE.Price, _ = strconv.ParseFloat(s.Find(".span_price_wrap").Text(), 64)
stockPrice.NSE.PreviousClose, _ = strconv.ParseFloat(s.Find(".priceprevclose").Text(), 64)
stockPrice.NSE.Open, _ = strconv.ParseFloat(s.Find(".priceopen").Text(), 64)
stockPrice.NSE.Variation, _ = strconv.ParseFloat(strings.Split(s.Find(".span_price_change_prcnt").Text(), " ")[0], 64)
stockPrice.NSE.Percentage, _ = strconv.ParseFloat(strings.Split(strings.Split(s.Find(".span_price_change_prcnt").Text(), "%")[0], "(")[1], 64)
stockPrice.NSE.Volume, _ = strconv.ParseInt(strings.ReplaceAll(s.Find(".volume_data").Text(), ",", ""), 10, 64)
})
return stockPrice, nil
}
// GetTechnicals returns the technical valuations of a company with indications
func GetTechnicals(company string) (StockTechnicals, error) {
stockTechnicals := make(StockTechnicals)
url, err := getURL(company)
if err != nil {
return nil, err
}
doc, err := getStockQuote(url)
if err != nil {
return nil, fmt.Errorf("Error in reading stock Technicals %v", err.Error())
}
doc.Find("#techindd").Find("tbody").Find("tr").Each(func(i int, s *goquery.Selection) {
symbol := strings.Split(strings.Split(s.Find("td").First().Text(), "(")[0], "%")[0]
level, _ := strconv.ParseFloat(strings.ReplaceAll(s.Find("td").Find("strong").First().Text(), ",", ""), 64)
indication := s.Find("td").Find("strong").Last().Text()
if symbol != "" && symbol != "Bollinger Band(20,2)" {
stockTechnicals[symbol] = technicalValue{level, indication}
}
})
return stockTechnicals, nil
}
// GetMovingAverage returns the 5, 10, 20, 50, 100, 200 days moving average respectively
func GetMovingAverage(company string) (StockMovingAverage, error) {
stockMovingAverage := make(StockMovingAverage)
url, err := getURL(company)
if err != nil {
return nil, err
}
doc, err := getStockQuote(url)
if err != nil {
return nil, fmt.Errorf("Error in reading stock Moving Averages %v", err.Error())
}
doc.Find("#movingavgd").Find("tbody").Find("tr").Each(func(i int, s *goquery.Selection) {
period, _ := strconv.Atoi(s.Find("td").First().Text())
sma, _ := strconv.ParseFloat(strings.ReplaceAll(s.Find("td").Find("strong").First().Text(), ",", ""), 64)
indication := s.Find("td").Find("strong").Last().Text()
if period != 0 {
stockMovingAverage[period] = movingAverageValue{sma, indication}
}
})
return stockMovingAverage, nil
}
// GetPivotLevels returns the important pivot levels of a stock given in order R1, R2, R3, Pivot, S1, S2, S3
func GetPivotLevels(company string) (StockPivotLevels, error) {
stockPivotLevels := make(StockPivotLevels)
url, err := getURL(company)
if err != nil {
return nil, err
}
doc, err := getStockQuote(url)
if err != nil {
return nil, fmt.Errorf("Error in reading stock Pivot Levels %v", err.Error())
}
doc.Find("#pevotld").Find("table").First().Find("tbody").Find("tr").Each(func(i int, s *goquery.Selection) {
pivotType := s.Find("td").First().Text()
if pivotType != "" {
var levels []float64
s.Find("td").Next().Each(func(i int, s *goquery.Selection) {
level, _ := strconv.ParseFloat(strings.ReplaceAll(s.Text(), ",", ""), 64)
levels = append(levels, level)
})
stockPivotLevels[pivotType] = pivotPointsValue{
levels[0], levels[1], levels[2], levels[3], levels[4], levels[5], levels[6],
}
}
})
return stockPivotLevels, nil
}
// getStockQuote creates and returns the web document from a web URL
func getStockQuote(URL string) (*goquery.Document, error) {
res, err := http.Get(URL)
if err != nil {
return nil, err
}
defer res.Body.Close()
doc, err := goquery.NewDocumentFromReader(res.Body)
if err != nil {
return nil, err
}
return doc, nil
}
// getURL checks whether we can read data for company and returns its data source URL
func getURL(company string) (URL string, err error) {
if val, found := stocksURL[strings.ToLower(company)]; found {
URL = baseURL + "/" + val.Company + "/" + val.Symbol + "/daily"
return
}
return "", fmt.Errorf("Company Not Found")
}
// Here stocks information necessary is saved and stored, which is calculated everytime package is imported
func init() {
fmt.Println("Reading stocks")
capAlphabets := []string{"A", "B", "C", "D", "E", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"}
for _, char := range capAlphabets {
doc, err := getStockQuote(sourceURL + char)
if err != nil {
log.Panic("Error in fetching stock URLs ", err.Error())
}
doc.Find(".bl_12").Each(func(i int, s *goquery.Selection) {
link, _ := s.Attr("href")
stockName := s.Text()
if match, _ := regexp.MatchString(`^(http:\/\/www\.|https:\/\/www\.|http:\/\/|https:\/\/)?[a-z0-9]+([\-\.]{1}[a-z0-9]+)*\.[a-z]{2,5}(:[0-9]{1,5})?(\/.*)?$`, link); match {
stockURLSplit := strings.Split(link, "/")
stocksURL[strings.ToLower(stockName)] = stockURLValue{stockURLSplit[5], stockURLSplit[6], stockURLSplit[7]}
}
})
}
fmt.Println("Stocks Read Succesfull")
}
|
[
2
] |
package master
import (
"github.com/OHopiak/fractal-load-balancer/core"
"github.com/jinzhu/gorm"
"github.com/labstack/echo/v4"
)
type (
Master struct {
core.Host
Server *echo.Echo
balancer ProxyBalancer
db *gorm.DB
}
)
func New(dbConfig core.DatabaseConfig) Master {
m := Master{
Host: core.Host{
IP: "localhost",
Port: 8000,
},
Server: core.NewServer(),
}
m.configureDatabase(dbConfig)
m.templates()
m.middleware()
m.routes()
return m
}
func (m Master) WithIP(IP string) Master {
m.IP = IP
return m
}
func (m Master) WithPort(port int) Master {
m.Port = port
return m
}
func (m *Master) Start() {
errChan := core.StartServerAsync(m.Server, m.Host)
err := m.PostStart()
if err != nil {
m.Server.Logger.Fatal(err)
}
m.Server.Logger.Fatal(<-errChan)
}
func (m *Master) PostStart() error {
for _, worker := range m.Workers() {
m.Server.Logger.Infof("Adding worker %s", worker.Host())
url, err := worker.Host().Url()
if err != nil {
return err
}
m.balancer.AddTarget(&ProxyTarget{
Name: url.String(),
URL: url,
Meta: echo.Map{
"id": worker.ID,
},
})
}
return nil
}
|
[
2
] |
// MinIO Cloud Storage, (C) 2021 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"bytes"
"crypto/rand"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"github.com/minio/minio/pkg/kms"
"github.com/secure-io/sio-go"
"github.com/secure-io/sio-go/sioutil"
)
// Encrypt encrypts the plaintext with a key managed by KMS.
// The context is bound to the returned ciphertext.
//
// The same context must be provided when decrypting the
// ciphertext.
func Encrypt(KMS kms.KMS, plaintext io.Reader, context kms.Context) (io.Reader, error) {
var algorithm = sio.AES_256_GCM
if !sioutil.NativeAES() {
algorithm = sio.ChaCha20Poly1305
}
key, err := KMS.GenerateKey("", context)
if err != nil {
return nil, err
}
stream, err := algorithm.Stream(key.Plaintext)
if err != nil {
return nil, err
}
nonce := make([]byte, stream.NonceSize())
if _, err := rand.Read(nonce); err != nil {
return nil, err
}
const (
MaxMetadataSize = 1 << 20 // max. size of the metadata
Version = 1
)
var (
header [5]byte
buffer bytes.Buffer
)
metadata, err := json.Marshal(encryptedObject{
KeyID: key.KeyID,
KMSKey: key.Ciphertext,
Algorithm: algorithm,
Nonce: nonce,
})
if err != nil {
return nil, err
}
if len(metadata) > MaxMetadataSize {
return nil, errors.New("config: encryption metadata is too large")
}
header[0] = Version
binary.LittleEndian.PutUint32(header[1:], uint32(len(metadata)))
buffer.Write(header[:])
buffer.Write(metadata)
return io.MultiReader(
&buffer,
stream.EncryptReader(plaintext, nonce, nil),
), nil
}
// Decrypt decrypts the ciphertext using a key managed by the KMS.
// The same context that have been used during encryption must be
// provided.
func Decrypt(KMS kms.KMS, ciphertext io.Reader, context kms.Context) (io.Reader, error) {
const (
MaxMetadataSize = 1 << 20 // max. size of the metadata
Version = 1
)
var header [5]byte
if _, err := io.ReadFull(ciphertext, header[:]); err != nil {
return nil, err
}
if header[0] != Version {
return nil, fmt.Errorf("config: unknown ciphertext version %d", header[0])
}
size := binary.LittleEndian.Uint32(header[1:])
if size > MaxMetadataSize {
return nil, errors.New("config: encryption metadata is too large")
}
var (
metadataBuffer = make([]byte, size)
metadata encryptedObject
)
if _, err := io.ReadFull(ciphertext, metadataBuffer); err != nil {
return nil, err
}
if err := json.Unmarshal(metadataBuffer, &metadata); err != nil {
return nil, err
}
key, err := KMS.DecryptKey(metadata.KeyID, metadata.KMSKey, context)
if err != nil {
return nil, err
}
stream, err := metadata.Algorithm.Stream(key)
if err != nil {
return nil, err
}
if stream.NonceSize() != len(metadata.Nonce) {
return nil, sio.NotAuthentic
}
return stream.DecryptReader(ciphertext, metadata.Nonce, nil), nil
}
type encryptedObject struct {
KeyID string `json:"keyid"`
KMSKey []byte `json:"kmskey"`
Algorithm sio.Algorithm `json:"algorithm"`
Nonce []byte `json:"nonce"`
}
|
[
2
] |
package main
import "fmt"
func main() {
my_func()
}
func my_func() {
/*
"&" 按位运算符 '&' 是双目运算符,其功能是参与运算的两数各对应的二进制位相与。运算规则是:
同为 1,结果为1,否则为0
"|" 按位或运算符 "|" 是双目运算符,其功能是参与运算的两数各对应的二进制相或运算规则是:
有一个为1,结果为1,否则为0,
"^" 按位异或 "^" 是双目运算符。其功能是参与运算的两数各对应的二进位相异或,
运算规则是:当二进位不同时,结果为1,否则位0
"<<" 左移运算符,双目运算符,把 "<<" 左侧的运算符的各二进制位全部左移若干位,高位丢弃,
低位补0,左移n位就是乘2的n次方
">>" 右移运算符,双目运算符,把 ">>" 左边的运算数的各二进制位全部右移若干位,低位丢弃
右移n位,相当于除以2的n次方
*/
n1 := 10
n2 := 20
var max int
if n1 > n2 {
max = n1
} else {
max = n2
}
fmt.Println("max: ", max)
}
|
[
1
] |
package k2tree
import (
"fmt"
)
func max(i, j int) int {
if i > j {
return i
}
return j
}
func min(i, j int) int {
if i < j {
return i
}
return j
}
func abs(i int) int {
if i < 0 {
return -i
}
return i
}
func intPow(a, b int) int {
var result = 1
for 0 != b {
if 0 != (b & 1) {
result *= a
}
b >>= 1
a *= a
}
return result
}
func assert(test bool, errstr string) {
if !test {
panic(errstr)
}
}
type twosHistogram struct {
buckets [65]int
}
func (th *twosHistogram) Add(n int) {
if n == 0 || n == 1 {
th.buckets[0] += 1
return
}
count := 0
for n > 0 {
n = n >> 1
count += 1
}
th.buckets[count] += 1
}
func (th twosHistogram) String() string {
out := "\n"
for i, x := range th.buckets {
out += fmt.Sprintf("%d: %d\n", 1<<i, x)
}
return out
}
|
[
0
] |
package bls12381
import (
"math/big"
)
type nafNumber []int
func (n nafNumber) neg() {
for i := 0; i < len(n); i++ {
n[i] = -n[i]
}
}
var bigZero = big.NewInt(0)
var bigOne = big.NewInt(1)
func (e *Fr) toWNAF(w uint) nafNumber {
naf := nafNumber{}
if w == 0 {
return naf
}
windowSize, halfSize, mask := 1<<(w+1), 1<<w, (1<<(w+1))-1
ee := new(Fr).Set(e)
z := new(Fr)
for !ee.IsZero() {
if !ee.isEven() {
nafSign := int(ee[0]) & mask
if nafSign >= halfSize {
nafSign = nafSign - windowSize
}
naf = append(naf, int(nafSign))
if nafSign < 0 {
laddAssignFR(ee, z.setUint64(uint64(-nafSign)))
} else {
lsubAssignFR(ee, z.setUint64(uint64(nafSign)))
}
} else {
naf = append(naf, 0)
}
ee.div2()
}
return naf
}
func (e *Fr) fromWNAF(naf nafNumber, w uint) *Fr {
if w == 0 {
return e
}
l := (1 << (w - 1))
table := make([]*Fr, l)
table[0] = new(Fr).One()
two := new(Fr).setUint64(2)
for i := 1; i < l; i++ {
table[i] = new(Fr)
table[i].Add(table[i-1], two)
}
acc := new(Fr).Zero()
for i := len(naf) - 1; i >= 0; i-- {
if naf[i] < 0 {
acc.Sub(acc, table[-naf[i]>>1])
} else if naf[i] > 0 {
acc.Add(acc, table[naf[i]>>1])
}
if i != 0 {
acc.Double(acc)
}
}
return e.Set(acc)
}
// caution: does not cover negative case
func bigToWNAF(e *big.Int, w uint) nafNumber {
naf := nafNumber{}
if w == 0 {
return naf
}
windowSize := new(big.Int).Lsh(bigOne, uint(w+1))
halfSize := new(big.Int).Rsh(windowSize, 1)
ee := new(big.Int).Abs(e)
for ee.Cmp(bigZero) != 0 {
if ee.Bit(0) == 1 {
nafSign := new(big.Int)
nafSign.Mod(ee, windowSize)
if nafSign.Cmp(halfSize) >= 0 {
nafSign.Sub(nafSign, windowSize)
}
naf = append(naf, int(nafSign.Int64()))
ee.Sub(ee, nafSign)
} else {
naf = append(naf, 0)
}
ee.Rsh(ee, 1)
}
return naf
}
func bigFromWNAF(naf nafNumber) *big.Int {
acc := new(big.Int)
k := new(big.Int).Set(bigOne)
for i := 0; i < len(naf); i++ {
if naf[i] != 0 {
z := new(big.Int).Mul(k, big.NewInt(int64(naf[i])))
acc.Add(acc, z)
}
k.Lsh(k, 1)
}
return acc
}
|
[
0
] |
package help
import (
"github.com/astaxie/beego/utils"
"math/rand"
"time"
)
func RandNum(min, max int) int {
if min >= max || min == 0 || max == 0 {
return 0
}
rand.Seed(time.Now().UnixNano())
return rand.Intn(max-min) + min
}
func RandStr(n int) string {
return string(utils.RandomCreateBytes(n))
}
|
[
1
] |
package p2p
import (
"context"
"fmt"
"math/rand"
pstore "gx/ipfs/QmPgDWmTmuzvP7QE5zwo1TmjbJme9pmZHNujB2453jkCTr/go-libp2p-peerstore"
ipfscore "gx/ipfs/QmViBzgruNUoLNBnXcx8YWbDNwV8MNGEGKkLo6JGetygdw/go-ipfs/core"
math2 "gx/ipfs/QmViBzgruNUoLNBnXcx8YWbDNwV8MNGEGKkLo6JGetygdw/go-ipfs/thirdparty/math2"
ma "gx/ipfs/QmXY77cVe7rVRQXZZQRioukUM7aRW3BTcAgJe12MCtb3Ji/go-multiaddr"
peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer"
)
// DefaultBootstrapAddresses follows the pattern of IPFS boostrapping off known "gateways".
// This boostrapping is specific to finding qri peers, which are IPFS peers that also
// support the qri protocol.
// (we also perform standard IPFS boostrapping when IPFS networking is enabled, and it's almost always enabled).
// These are addresses to public qri nodes hosted by qri, inc.
// One day it would be super nice to bootstrap from a stored history & only
// use these for first-round bootstrapping.
var DefaultBootstrapAddresses = []string{
"/ip4/35.224.133.67/tcp/4001/ipfs/QmamJUR83rGtDMEvugcC2gtLDx2nhZUTzpzhH6MA2Pb3Md", // EDGI
}
// Bootstrap samples a subset of peers & requests their peers list
// This is a naive version of IPFS bootstrapping, which we'll add in once
// qri's settled on a shared-state implementation
func (n *QriNode) Bootstrap(boostrapAddrs []string, boostrapPeers chan pstore.PeerInfo) {
peers, err := ParseMultiaddrs(boostrapAddrs)
if err != nil {
n.log.Info("error parsing bootstrap addresses:", err.Error())
return
}
pinfos := toPeerInfos(peers)
for _, p := range randomSubsetOfPeers(pinfos, 4) {
go func(p pstore.PeerInfo) {
n.log.Infof("boostrapping to: %s", p.ID.Pretty())
if err := n.Host.Connect(context.Background(), p); err == nil {
if err = n.AddQriPeer(p); err != nil {
n.log.Infof("error adding peer: %s", err.Error())
} else {
boostrapPeers <- p
}
} else {
n.log.Infof("error connecting to host: %s", err.Error())
}
}(p)
}
}
// BootstrapIPFS connects this node to standard ipfs nodes for file exchange
func (n *QriNode) BootstrapIPFS() {
if node, err := n.IPFSNode(); err == nil {
if err := node.Bootstrap(ipfscore.DefaultBootstrapConfig); err != nil {
fmt.Errorf("IPFS bootsrap error: %s", err.Error())
}
}
}
// ParseMultiaddrs turns a slice of strings into a slice of Multiaddrs
func ParseMultiaddrs(addrs []string) (maddrs []ma.Multiaddr, err error) {
maddrs = make([]ma.Multiaddr, len(addrs))
for i, adr := range addrs {
maddrs[i], err = ma.NewMultiaddr(adr)
if err != nil {
return
}
}
return
}
// toPeerInfos turns a slice of multiaddrs into a slice of PeerInfos
func toPeerInfos(addrs []ma.Multiaddr) []pstore.PeerInfo {
pinfos := make(map[peer.ID]*pstore.PeerInfo)
for _, addr := range addrs {
pid, err := addr.ValueForProtocol(ma.P_IPFS)
if err != nil {
return nil
}
peerid, err := peer.IDB58Decode(pid)
if err != nil {
return nil
}
pinfo, ok := pinfos[peerid]
if !ok {
pinfo = new(pstore.PeerInfo)
pinfos[peerid] = pinfo
pinfo.ID = peerid
}
// TODO - support circuit-relay once it normalizes
split := ma.Split(addr)
maddr := ma.Join(split[:len(split)-1]...)
pinfo.Addrs = append(pinfo.Addrs, maddr)
}
var peers []pstore.PeerInfo
for _, pinfo := range pinfos {
peers = append(peers, *pinfo)
}
return peers
}
// randomSubsetOfPeers samples up to max from a slice of PeerInfos
func randomSubsetOfPeers(in []pstore.PeerInfo, max int) []pstore.PeerInfo {
n := math2.IntMin(max, len(in))
var out []pstore.PeerInfo
for _, val := range rand.Perm(len(in)) {
out = append(out, in[val])
if len(out) >= n {
break
}
}
return out
}
|
[
1
] |
package heap
type Comparable interface {
Len() int
/*
Compare must returns -1 if Comparable[i]<Comparable[j]
1 if [i] > [j]
and 0 if equal
*/
Compare(i, j int) int
Swap(i, j int)
}
func HeapParent(index int) int {
if index == 0 {
return -1
} else {
return (index - 1) / 2
}
}
func BuildMaxHeap(heap Comparable) {
for cntr := 0; cntr < heap.Len(); cntr++ {
MaxHeapCheck(heap, cntr)
}
}
func BuildMinHeap(heap Comparable) {
for cntr := 0; cntr < heap.Len(); cntr++ {
MinHeapCheck(heap, cntr)
}
}
func MaxHeapCheck(MaxHeap Comparable, pos int) {
if HeapParent(pos) == -1 {
return
} else if MaxHeap.Compare(HeapParent(pos), pos) == -1 {
MaxHeap.Swap(pos, HeapParent(pos))
MaxHeapCheck(MaxHeap, HeapParent(pos))
}
}
func MaxHeapReheapify(MaxHeap Comparable, pos int) {
if 2*(pos+1)-1 >= MaxHeap.Len() {
return
}
if MaxHeap.Compare(pos, 2*(pos+1)-1) == -1 {
MaxHeap.Swap(pos, 2*(pos+1)-1)
MaxHeapReheapify(MaxHeap, 2*(pos+1)-1)
}
if 2*(pos+1) >= MaxHeap.Len() {
return
}
if MaxHeap.Compare(pos, 2*(pos+1)) == -1 {
MaxHeap.Swap(pos, 2*(pos+1))
MaxHeapReheapify(MaxHeap, 2*(pos+1))
}
return
}
func MinHeapReheapify(MinHeap Comparable, pos int) {
if 2*(pos+1)-1 >= MinHeap.Len() {
return
}
if MinHeap.Compare(pos, 2*(pos+1)-1) == 1 {
MinHeap.Swap(pos, 2*(pos+1)-1)
MinHeapReheapify(MinHeap, 2*(pos+1)-1)
}
if 2*(pos+1) >= MinHeap.Len() {
return
}
if MinHeap.Compare(pos, 2*(pos+1)) == 1 {
MinHeap.Swap(pos, 2*(pos+1))
MinHeapReheapify(MinHeap, 2*(pos+1))
}
return
}
func MinHeapCheck(MinHeap Comparable, pos int) {
parentPos := HeapParent(pos)
if parentPos == -1 {
return
} else if MinHeap.Compare(parentPos, pos) == 1 {
MinHeap.Swap(pos, parentPos)
MinHeapCheck(MinHeap, parentPos)
}
}
|
[
2
] |
package main
import (
"context"
"fmt"
"io"
"math"
"net"
"os"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/reflection"
"google.golang.org/grpc/status"
calcpb "grpc-course/calc/calc_proto"
)
func init() {
log.SetOutput(os.Stdout)
log.SetLevel(log.InfoLevel)
}
type server struct{}
func (*server) CalculateSum(ctx context.Context, req *calcpb.CalculateSumRequest) (*calcpb.CalculateSumResponse, error) {
log.Infof("Received unary call to calculate sum with request: %v", req)
return &calcpb.CalculateSumResponse{
Result: req.X + req.Y,
}, nil
}
func (*server) PrimeDecompose(req *calcpb.PrimeDecomposeRequest, stream calcpb.Calculator_PrimeDecomposeServer) error {
log.Infof("Received server stream call to decompose to prime numbers with request: %v", req)
num := req.Number
var d int64 = 2
for num > 1 {
if num%d == 0 {
stream.Send(&calcpb.PrimeDecomposeResponse{
Number: d,
})
num = num / d
} else {
d = d + 1
}
}
return nil
}
func (*server) CalculateAverage(stream calcpb.Calculator_CalculateAverageServer) error {
sum := 0.0
count := 0.0
for {
req, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&calcpb.CalculateAverageResponse{
Average: (sum / count),
})
}
if err != nil {
log.Fatalf("error reading from stream: %v", err)
}
sum += req.Number
count++
}
}
func (*server) FindMax(stream calcpb.Calculator_FindMaxServer) error {
log.Info("Processing request for bi dir streaming")
max := int64(math.MinInt64)
for {
req, err := stream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
log.Fatalf("Received error while reading request: %v", err)
return err
}
if n := req.Number; n > max {
max = n
err := stream.Send(&calcpb.FindMaxResponse{
Number: max,
})
if err != nil {
log.Fatalf("Error while sending to stream: %v", err)
return err
}
}
}
}
func (*server) SquareRoot(ctx context.Context, req *calcpb.SquareRootRequest) (*calcpb.SquareRootResponse, error) {
number := req.GetNumber()
if number < 0 {
return nil, status.Errorf(
codes.InvalidArgument,
fmt.Sprintf("Received negative number %v", number),
)
}
return &calcpb.SquareRootResponse{
Result: math.Sqrt(float64(number)),
}, nil
}
func main() {
log.Info("Setting up server...")
lis, err := net.Listen("tcp", "localhost:50051")
if err != nil {
log.Fatalf("error listening: %v", err)
}
s := grpc.NewServer()
calcpb.RegisterCalculatorServer(s, &server{})
// Register reflection service
reflection.Register(s)
if err := s.Serve(lis); err != nil {
log.Fatalf("error serving: %v", err)
}
}
|
[
0
] |
package mocks
import context "context"
import dep_radar "github.com/stamm/dep_radar"
import mock "github.com/stretchr/testify/mock"
// IDepTool is an autogenerated mock type for the IDepTool type
type IDepTool struct {
mock.Mock
}
// Deps provides a mock function with given fields: _a0, _a1
func (_m *IDepTool) Deps(_a0 context.Context, _a1 dep_radar.IApp) (dep_radar.AppDeps, error) {
ret := _m.Called(_a0, _a1)
var r0 dep_radar.AppDeps
if rf, ok := ret.Get(0).(func(context.Context, dep_radar.IApp) dep_radar.AppDeps); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(dep_radar.AppDeps)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, dep_radar.IApp) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Name provides a mock function with given fields:
func (_m *IDepTool) Name() string {
ret := _m.Called()
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
var _ dep_radar.IDepTool = (*IDepTool)(nil)
|
[
4
] |
/**
Copyright (c) 2016 The ConnectorDB Contributors
Licensed under the MIT license.
**/
package dbutil
/**
postgresify file provides the ability to convert queries done with question
marks into named queries with the proper query placeholders for postgres.
**/
import "strconv"
var (
postgresQueryConversions = make(map[string]string)
)
func QueryConvert(query, dbtype string) string {
switch dbtype {
case "postgres":
return QueryToPostgres(query)
}
return query
}
// Converts all ? in a query to $n which is the postgres format
func QueryToPostgres(query string) string {
// cacheing
q := postgresQueryConversions[query]
if q != "" {
return q
}
output := ""
position := 1
for _, runeValue := range query {
if runeValue == '?' {
output += "$"
output += strconv.Itoa(position)
position += 1
continue
}
output += string(runeValue)
}
return output
}
|
[
7
] |
package main
import (
"fmt"
"math"
)
func main() {
k := 20
N := 1
i := 0
check := true
limit := math.Sqrt(float64(k))
p := []int{2, 3, 5, 7, 11, 13, 17, 19}
for i < 8 {
a := 1
if check {
if float64(p[i]) <= limit {
a = int(math.Floor(math.Log(float64(k)) / math.Log(float64(p[i]))))
fmt.Printf("p: %d a: %d\n", p[i], a)
} else {
check = false
}
}
N = N * int(math.Pow(float64(p[i]), float64(a)))
fmt.Printf("p: %d N: %d\n", p[i], N)
i++
}
fmt.Printf("%d\n", N)
}
|
[
0
] |
package main
var (
branchProtectionRuleRepository branchProtectionRuleRepositoryInterface
dastVulnerabilityMessageRepository dastVulnerabilityMessageRepositoryInterface
meetingRepository meetingRepositoryInterface
meetingMessageRepository meetingMessageRepositoryInterface
meetingMessageViewerRepository meetingMessageViewerRepositoryInterface
meetingUserRepository meetingUserRepositoryInterface
projectRepository projectRepositoryInterface
projectUserRepository projectUserRepositoryInterface
projectUserRoleRepository projectUserRoleRepositoryInterface
scanRepository sacnRepositoryInterface
teamRepository teamRepositoryInterface
teamUserRepository teamUserRepositoryInterface
teamUserInvitationRequestProjectRepository teamUserInvitationRequestProjectRepositoryInterface
teamUserInvitationRequestRepository teamUserInvitationRequestRepositoryInterface
teamUserRoleRepository teamUserRoleRepositoryInterface
testRepository testRepositoryInterface
testMessageRepository testMessageRepositoryInterface
testMessageViewerRepository testMessageViewerRepositoryInterface
testResultRepository testResultRepositoryInterface
testStatusRepository testStatusRepositoryInterface
userRepository userRepositoryInterface
vulnerabilityRepository vulnerabilityRepositoryInterface
)
const (
storageTypeGORM = "storageTypeGORM"
storageType = storageTypeGORM
loadAllRelation = "__loadAllRelation"
)
func init() {
switch storageType {
case storageTypeGORM:
branchProtectionRuleRepository = &branchProtectionRuleRepositoryGORM{}
dastVulnerabilityMessageRepository = &dastVulnerabilityMessageRepositoryGORM{}
meetingRepository = &meetingRepositoryGORM{}
meetingMessageRepository = &meetingMessageRepositoryGORM{}
meetingMessageViewerRepository = &meetingMessageViewerRepositoryGORM{}
meetingUserRepository = &meetingUserRepositoryGORM{}
projectRepository = &projectRepositoryGORM{}
projectUserRepository = &projectUserRepositoryGORM{}
projectUserRoleRepository = &projectUserRoleRepositoryGORM{}
scanRepository = &scanRepositoryGORM{}
teamRepository = &teamRepositoryGORM{}
teamUserRepository = &teamUserRepositoryGORM{}
teamUserInvitationRequestRepository = &teamUserInvitationRequestRepositoryGORM{}
teamUserInvitationRequestProjectRepository = &teamUserInvitationRequestProjectRepositoryGORM{}
teamUserRoleRepository = &teamUserRoleRepositoryGORM{}
testRepository = &testRepositoryGORM{}
testMessageRepository = &testMessageRepositoryGORM{}
testMessageViewerRepository = &testMessageViewerRepositoryGORM{}
testResultRepository = &testResultRepositoryGORM{}
testStatusRepository = &testStatusRepositoryGORM{}
userRepository = &userRepositoryGORM{}
vulnerabilityRepository = &vulnerabilityRepositoryGORM{}
initGORM()
}
}
type branchProtectionRuleRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]branchProtectionRule, error)
save(map[string]interface{}) (*branchProtectionRule, error)
saveWith(branchname string, projectID uint) (*branchProtectionRule, error)
}
type dastVulnerabilityMessageRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]dastVulnerabilityMessage, error)
findByID(uint, ...string) (*dastVulnerabilityMessage, error)
findOrderLimit(query map[string]interface{}, order string, limit interface{}, preloads ...string) ([]dastVulnerabilityMessage, error)
findWhere([]interface{}, ...string) ([]dastVulnerabilityMessage, error)
save(map[string]interface{}) (*dastVulnerabilityMessage, error)
}
type meetingRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]meeting, error)
findByID(uint, ...string) (*meeting, error)
findByIDs([]uint, ...string) ([]meeting, error)
first(map[string]interface{}, ...string) (*meeting, error)
save(map[string]interface{}) (*meeting, error)
}
type meetingMessageRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]meetingMessage, error)
findByID(uint, ...string) (*meetingMessage, error)
findOrderLimit(query map[string]interface{}, order string, limit interface{}, preloads ...string) ([]meetingMessage, error)
findWhere([]interface{}, ...string) ([]meetingMessage, error)
first(map[string]interface{}, ...string) (*meetingMessage, error)
save(map[string]interface{}) (*meetingMessage, error)
}
type meetingMessageViewerRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]meetingMessageViewer, error)
first(map[string]interface{}, ...string) (*meetingMessageViewer, error)
save(map[string]interface{}) (*meetingMessageViewer, error)
}
type meetingUserRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]meetingUser, error)
save(map[string]interface{}) (*meetingUser, error)
}
type projectRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]project, error)
findByID(uint, ...string) (*project, error)
findByIDs([]uint, ...string) ([]project, error)
first(map[string]interface{}, ...string) (*project, error)
save(map[string]interface{}) (*project, error)
}
type projectUserRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]projectUser, error)
first(map[string]interface{}, ...string) (*projectUser, error)
save(map[string]interface{}) (*projectUser, error)
}
type projectUserRoleRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]projectUserRole, error)
findByID(uint, ...string) (*projectUserRole, error)
findByRole(string, ...string) (*projectUserRole, error)
save(map[string]interface{}) (*projectUserRole, error)
}
type sacnRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]scan, error)
findByID(uint, ...string) (*scan, error)
save(map[string]interface{}) (*scan, error)
saveWith(commitSHA1 string, projectID, userID uint) (*scan, error)
}
type teamRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]team, error)
findByID(uint, ...string) (*team, error)
findByIDs([]uint, ...string) ([]team, error)
findByName(string, ...string) (*team, error)
first(map[string]interface{}, ...string) (*team, error)
save(map[string]interface{}) (*team, error)
}
type teamUserRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]teamUser, error)
first(map[string]interface{}, ...string) (*teamUser, error)
save(map[string]interface{}) (*teamUser, error)
}
type teamUserInvitationRequestProjectRepositoryInterface interface {
delete(map[string]interface{}) error
find(map[string]interface{}, ...string) ([]teamUserInvitationRequestProject, error)
first(map[string]interface{}, ...string) (*teamUserInvitationRequestProject, error)
save(map[string]interface{}) (*teamUserInvitationRequestProject, error)
}
type teamUserInvitationRequestRepositoryInterface interface {
delete(map[string]interface{}) error
deleteByID(uint) error
find(map[string]interface{}, ...string) ([]teamUserInvitationRequest, error)
findByID(uint, ...string) (*teamUserInvitationRequest, error)
first(map[string]interface{}, ...string) (*teamUserInvitationRequest, error)
save(map[string]interface{}) (*teamUserInvitationRequest, error)
saveWith(string, uint, uint, uint, uint) (*teamUserInvitationRequest, error)
}
type teamUserRoleRepositoryInterface interface {
findByID(uint, ...string) (*teamUserRole, error)
findByRole(string, ...string) (*teamUserRole, error)
saveWith(string) (*teamUserRole, error)
}
type testRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]test, error)
findByID(uint, ...string) (*test, error)
findOrder(map[string]interface{}, string, ...string) ([]test, error)
first(map[string]interface{}, ...string) (*test, error)
save(map[string]interface{}) (*test, error)
}
type testMessageRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]testMessage, error)
findByID(uint, ...string) (*testMessage, error)
findOrderLimit(query map[string]interface{}, order string, limit interface{}, preloads ...string) ([]testMessage, error)
findWhere([]interface{}, ...string) ([]testMessage, error)
first(map[string]interface{}, ...string) (*testMessage, error)
save(map[string]interface{}) (*testMessage, error)
}
type testMessageViewerRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]testMessageViewer, error)
first(map[string]interface{}, ...string) (*testMessageViewer, error)
save(map[string]interface{}) (*testMessageViewer, error)
}
type testResultRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]testResult, error)
findByID(uint, ...string) (*testResult, error)
save(map[string]interface{}) (*testResult, error)
update(map[string]interface{}, map[string]interface{}) error
}
type testStatusRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]testStatus, error)
findByID(uint, ...string) (*testStatus, error)
findByText(string, ...string) (*testStatus, error)
save(map[string]interface{}) (*testStatus, error)
}
type userRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]user, error)
findByID(uint, ...string) (*user, error)
findByIDs([]uint, ...string) ([]user, error)
findByName(string, ...string) (*user, error)
first(map[string]interface{}, ...string) (*user, error)
saveWith(name, password, handlename, email, profileImagePath string) (*user, error)
}
type vulnerabilityRepositoryInterface interface {
find(map[string]interface{}, ...string) ([]vulnerability, error)
findByID(uint, ...string) (*vulnerability, error)
first(map[string]interface{}, ...string) (*vulnerability, error)
save(map[string]interface{}) (*vulnerability, error)
}
|
[
7
] |
package bow
import (
crand "crypto/rand"
"fmt"
"math/big"
"github.com/google/uuid"
)
const genDefaultNumRows = 3
// GenSeriesOptions are options to generate random Series:
// - NumRows: number of rows of the resulting Series
// - Name: name of the Series
// - Type: data type of the Series
// - GenStrategy: strategy of data generation
// - MissingData: sets whether the Series includes random nil values
type GenSeriesOptions struct {
NumRows int
Name string
Type Type
GenStrategy GenStrategy
MissingData bool
}
// NewGenBow generates a new random Bow with `numRows` rows and eventual GenSeriesOptions.
func NewGenBow(numRows int, options ...GenSeriesOptions) (Bow, error) {
series := make([]Series, len(options))
nameMap := make(map[string]struct{})
for i, o := range options {
o.NumRows = numRows
o.validate()
if _, ok := nameMap[o.Name]; ok {
o.Name = fmt.Sprintf("%s_%d", o.Name, i)
}
nameMap[o.Name] = struct{}{}
series[i] = o.genSeries()
}
return NewBow(series...)
}
// NewGenSeries returns a new randomly generated Series.
func NewGenSeries(o GenSeriesOptions) Series {
o.validate()
return o.genSeries()
}
func (o *GenSeriesOptions) validate() {
if o.NumRows <= 0 {
o.NumRows = genDefaultNumRows
}
if o.Name == "" {
o.Name = "default"
}
if o.Type == Unknown {
o.Type = Int64
}
if o.GenStrategy == nil {
o.GenStrategy = GenStrategyIncremental
}
}
func (o *GenSeriesOptions) genSeries() Series {
buf := NewBuffer(o.NumRows, o.Type)
for rowIndex := 0; rowIndex < o.NumRows; rowIndex++ {
if !o.MissingData ||
// 20% of nils values
(newRandomNumber(Int64).(int64) > 2) {
buf.SetOrDrop(rowIndex, o.GenStrategy(o.Type, rowIndex))
}
}
return NewSeriesFromBuffer(o.Name, buf)
}
// GenStrategy defines how random values are generated.
type GenStrategy func(typ Type, seed int) interface{}
// GenStrategyRandom generates a random number of type `typ`.
func GenStrategyRandom(typ Type, seed int) interface{} {
return newRandomNumber(typ)
}
// GenStrategyIncremental generates a number of type `typ` equal to the converted `seed` value.
func GenStrategyIncremental(typ Type, seed int) interface{} {
return typ.Convert(seed)
}
// GenStrategyDecremental generates a number of type `typ` equal to the opposite of the converted `seed` value.
func GenStrategyDecremental(typ Type, seed int) interface{} {
return typ.Convert(-seed)
}
// GenStrategyRandomIncremental generates a random number of type `typ` by using the `seed` value.
func GenStrategyRandomIncremental(typ Type, seed int) interface{} {
i := int64(seed) * 10
switch typ {
case Float64:
add, _ := ToFloat64(newRandomNumber(Float64))
return float64(i) + add
default:
add, _ := ToInt64(newRandomNumber(Int64))
return typ.Convert(i + add)
}
}
// GenStrategyRandomDecremental generates a random number of type `typ` by using the `seed` value.
func GenStrategyRandomDecremental(typ Type, seed int) interface{} {
i := -int64(seed) * 10
switch typ {
default:
add, _ := ToInt64(newRandomNumber(Int64))
return typ.Convert(i - add)
}
}
func newRandomNumber(typ Type) interface{} {
n, err := crand.Int(crand.Reader, big.NewInt(10))
if err != nil {
panic(err)
}
switch typ {
case Int64:
return n.Int64()
case Float64:
return float64(n.Int64()) + 0.5
case Boolean:
return n.Int64() > 5
case String:
return uuid.New().String()[:8]
default:
panic("unsupported data type")
}
}
|
[
7
] |
/*
Copyright The ocicrypt Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pkcs11
import (
"errors"
"fmt"
"github.com/containers/ocicrypt/config"
"github.com/containers/ocicrypt/crypto/pkcs11"
"github.com/containers/ocicrypt/keywrap"
"github.com/containers/ocicrypt/utils"
)
type pkcs11KeyWrapper struct {
}
func (kw *pkcs11KeyWrapper) GetAnnotationID() string {
return "org.opencontainers.image.enc.keys.pkcs11"
}
// NewKeyWrapper returns a new key wrapping interface using pkcs11
func NewKeyWrapper() keywrap.KeyWrapper {
return &pkcs11KeyWrapper{}
}
// WrapKeys wraps the session key for recpients and encrypts the optsData, which
// describe the symmetric key used for encrypting the layer
func (kw *pkcs11KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) {
pkcs11Recipients, err := addPubKeys(&ec.DecryptConfig, append(ec.Parameters["pkcs11-pubkeys"], ec.Parameters["pkcs11-yamls"]...))
if err != nil {
return nil, err
}
// no recipients is not an error...
if len(pkcs11Recipients) == 0 {
return nil, nil
}
jsonString, err := pkcs11.EncryptMultiple(pkcs11Recipients, optsData)
if err != nil {
return nil, fmt.Errorf("PKCS11 EncryptMulitple failed: %w", err)
}
return jsonString, nil
}
func (kw *pkcs11KeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString []byte) ([]byte, error) {
var pkcs11PrivKeys []*pkcs11.Pkcs11KeyFileObject
privKeys := kw.GetPrivateKeys(dc.Parameters)
if len(privKeys) == 0 {
return nil, errors.New("No private keys found for PKCS11 decryption")
}
p11conf, err := p11confFromParameters(dc.Parameters)
if err != nil {
return nil, err
}
for _, privKey := range privKeys {
key, err := utils.ParsePrivateKey(privKey, nil, "PKCS11")
if err != nil {
return nil, err
}
switch pkcs11PrivKey := key.(type) {
case *pkcs11.Pkcs11KeyFileObject:
if p11conf != nil {
pkcs11PrivKey.Uri.SetModuleDirectories(p11conf.ModuleDirectories)
pkcs11PrivKey.Uri.SetAllowedModulePaths(p11conf.AllowedModulePaths)
}
pkcs11PrivKeys = append(pkcs11PrivKeys, pkcs11PrivKey)
default:
continue
}
}
plaintext, err := pkcs11.Decrypt(pkcs11PrivKeys, jsonString)
if err == nil {
return plaintext, nil
}
return nil, fmt.Errorf("PKCS11: No suitable private key found for decryption: %w", err)
}
func (kw *pkcs11KeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool {
return len(kw.GetPrivateKeys(dcparameters)) == 0
}
func (kw *pkcs11KeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte {
return dcparameters["pkcs11-yamls"]
}
func (kw *pkcs11KeyWrapper) GetKeyIdsFromPacket(_ string) ([]uint64, error) {
return nil, nil
}
func (kw *pkcs11KeyWrapper) GetRecipients(_ string) ([]string, error) {
return []string{"[pkcs11]"}, nil
}
func addPubKeys(dc *config.DecryptConfig, pubKeys [][]byte) ([]interface{}, error) {
var pkcs11Keys []interface{}
if len(pubKeys) == 0 {
return pkcs11Keys, nil
}
p11conf, err := p11confFromParameters(dc.Parameters)
if err != nil {
return nil, err
}
for _, pubKey := range pubKeys {
key, err := utils.ParsePublicKey(pubKey, "PKCS11")
if err != nil {
return nil, err
}
switch pkcs11PubKey := key.(type) {
case *pkcs11.Pkcs11KeyFileObject:
if p11conf != nil {
pkcs11PubKey.Uri.SetModuleDirectories(p11conf.ModuleDirectories)
pkcs11PubKey.Uri.SetAllowedModulePaths(p11conf.AllowedModulePaths)
}
}
pkcs11Keys = append(pkcs11Keys, key)
}
return pkcs11Keys, nil
}
func p11confFromParameters(dcparameters map[string][][]byte) (*pkcs11.Pkcs11Config, error) {
if _, ok := dcparameters["pkcs11-config"]; ok {
return pkcs11.ParsePkcs11ConfigFile(dcparameters["pkcs11-config"][0])
}
return nil, nil
}
|
[
7
] |
package resourcewatcher
import (
"context"
"fmt"
"os"
"path"
"sort"
"strings"
"sync"
"time"
"kubectlfzf/pkg/k8sresources"
"kubectlfzf/pkg/util"
"github.com/golang/glog"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
)
type LabelKey struct {
Namespace string
Label string
}
type LabelPair struct {
Key LabelKey
Occurrences int
}
type LabelPairList []LabelPair
func (p LabelPairList) Len() int { return len(p) }
func (p LabelPairList) Less(i, j int) bool {
if p[i].Occurrences == p[j].Occurrences {
if p[i].Key.Namespace == p[j].Key.Namespace {
return p[i].Key.Label < p[j].Key.Label
}
return p[i].Key.Namespace < p[j].Key.Namespace
}
return p[i].Occurrences > p[j].Occurrences
}
func (p LabelPairList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// K8sStore stores the current state of k8s resources
type K8sStore struct {
data map[string]k8sresources.K8sResource
labelMap map[LabelKey]int
resourceCtor func(obj interface{}, config k8sresources.CtorConfig) k8sresources.K8sResource
ctorConfig k8sresources.CtorConfig
resourceName string
currentFile *os.File
storeConfig StoreConfig
firstWrite bool
destDir string
dataMutex sync.Mutex
labelMutex sync.Mutex
fileMutex sync.Mutex
labelToDump bool
lastFullDump time.Time
lastLabelDump time.Time
}
// StoreConfig defines parameters used for the cache location
type StoreConfig struct {
ClusterDir string
CacheDir string
TimeBetweenFullDump time.Duration
}
// NewK8sStore creates a new store
func NewK8sStore(ctx context.Context, cfg WatchConfig, storeConfig StoreConfig, ctorConfig k8sresources.CtorConfig) (*K8sStore, error) {
k := K8sStore{}
k.destDir = path.Join(storeConfig.CacheDir, storeConfig.ClusterDir)
k.data = make(map[string]k8sresources.K8sResource, 0)
k.labelMap = make(map[LabelKey]int, 0)
k.resourceCtor = cfg.resourceCtor
k.resourceName = cfg.resourceName
k.currentFile = nil
k.storeConfig = storeConfig
k.firstWrite = true
k.ctorConfig = ctorConfig
k.lastLabelDump = time.Time{}
k.lastFullDump = time.Time{}
go k.periodicLabelDump(ctx)
err := util.WriteStringToFile(cfg.header, k.destDir, k.resourceName, "header")
if err != nil {
return &k, err
}
return &k, nil
}
func resourceKey(obj interface{}) (string, string, map[string]string) {
name := "None"
namespace := "None"
var labels map[string]string
switch v := obj.(type) {
case metav1.ObjectMetaAccessor:
o := v.GetObjectMeta()
namespace = o.GetNamespace()
name = o.GetName()
labels = o.GetLabels()
case *unstructured.Unstructured:
metadata := v.Object["metadata"].(map[string]interface{})
name = metadata["name"].(string)
namespace = metadata["namespace"].(string)
labels = metadata["labels"].(map[string]string)
default:
glog.Warningf("Unknown type %v", obj)
}
return fmt.Sprintf("%s_%s", namespace, name), namespace, labels
}
func (k *K8sStore) periodicLabelDump(ctx context.Context) {
ticker := time.NewTicker(time.Second * 5)
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
k.labelMutex.Lock()
if k.labelToDump {
k.dumpLabel()
}
k.labelMutex.Unlock()
}
}
}
func (k *K8sStore) resetLabelMap() {
k.labelMutex.Lock()
k.labelMap = make(map[LabelKey]int, 0)
k.labelMutex.Unlock()
}
func (k *K8sStore) updateLabelMap(namespace string, labels map[string]string, delta int) {
k.labelMutex.Lock()
for labelKey, labelValue := range labels {
k.labelMap[LabelKey{namespace, fmt.Sprintf("%s=%s", labelKey, labelValue)}] += delta
}
k.labelMutex.Unlock()
}
// AddResourceList clears current state add the objects to the store.
// It will trigger a full dump
// This is used for polled resources, no need for mutex
func (k *K8sStore) AddResourceList(lstRuntime []runtime.Object) {
k.data = make(map[string]k8sresources.K8sResource, 0)
k.resetLabelMap()
for _, runtimeObject := range lstRuntime {
key, ns, labels := resourceKey(runtimeObject)
resource := k.resourceCtor(runtimeObject, k.ctorConfig)
k.data[key] = resource
k.updateLabelMap(ns, labels, 1)
}
err := k.DumpFullState()
if err != nil {
glog.Warningf("Error when dumping state: %v", err)
}
}
// AddResource adds a new k8s object to the store
func (k *K8sStore) AddResource(obj interface{}) {
key, ns, labels := resourceKey(obj)
newObj := k.resourceCtor(obj, k.ctorConfig)
glog.V(11).Infof("%s added: %s", k.resourceName, key)
k.dataMutex.Lock()
k.data[key] = newObj
k.dataMutex.Unlock()
k.updateLabelMap(ns, labels, 1)
err := k.AppendNewObject(newObj)
if err != nil {
glog.Warningf("Error when appending new object to current state: %v", err)
}
}
// DeleteResource removes an existing k8s object to the store
func (k *K8sStore) DeleteResource(obj interface{}) {
key := "Unknown"
ns := "Unknown"
var labels map[string]string
switch v := obj.(type) {
case cache.DeletedFinalStateUnknown:
key, ns, labels = resourceKey(v.Obj)
case unstructured.Unstructured:
case metav1.ObjectMetaAccessor:
key, ns, labels = resourceKey(obj)
default:
glog.V(6).Infof("Unknown object type %v", obj)
return
}
glog.V(11).Infof("%s deleted: %s", k.resourceName, key)
k.dataMutex.Lock()
delete(k.data, key)
k.dataMutex.Unlock()
k.updateLabelMap(ns, labels, -1)
err := k.DumpFullState()
if err != nil {
glog.Warningf("Error when dumping state: %v", err)
}
}
// UpdateResource update an existing k8s object
func (k *K8sStore) UpdateResource(oldObj, newObj interface{}) {
key, _, _ := resourceKey(newObj)
k8sObj := k.resourceCtor(newObj, k.ctorConfig)
k.dataMutex.Lock()
if k8sObj.HasChanged(k.data[key]) {
glog.V(11).Infof("%s changed: %s", k.resourceName, key)
k.data[key] = k8sObj
k.dataMutex.Unlock()
// TODO Handle label diff
// k.updateLabelMap(ns, labels, 1)
err := k.DumpFullState()
if err != nil {
glog.Warningf("Error when dumping state: %v", err)
}
} else {
k.dataMutex.Unlock()
}
}
func (k *K8sStore) updateCurrentFile() (err error) {
destFile := path.Join(k.destDir, fmt.Sprintf("%s_%s", k.resourceName, "resource"))
k.currentFile, err = os.OpenFile(destFile, os.O_APPEND|os.O_WRONLY, 0644)
return err
}
// AppendNewObject appends a new object to the cache dump
func (k *K8sStore) AppendNewObject(resource k8sresources.K8sResource) error {
k.fileMutex.Lock()
if k.currentFile == nil {
var err error
err = util.WriteStringToFile(resource.ToString(), k.destDir, k.resourceName, "resource")
if err != nil {
k.fileMutex.Unlock()
return err
}
err = k.updateCurrentFile()
if err != nil {
k.fileMutex.Unlock()
return err
}
glog.Infof("Initial write of %s", k.currentFile.Name())
}
_, err := k.currentFile.WriteString(resource.ToString())
k.fileMutex.Unlock()
if err != nil {
return err
}
now := time.Now()
k.labelMutex.Lock()
delta := now.Sub(k.lastLabelDump)
if delta < time.Second {
k.labelToDump = true
}
k.labelMutex.Unlock()
return nil
}
func (k *K8sStore) dumpLabel() error {
glog.V(8).Infof("Dump of label file %s", k.resourceName)
k.lastLabelDump = time.Now()
labelOutput, err := k.generateLabel()
if err != nil {
return errors.Wrapf(err, "Error generating label output")
}
err = util.WriteStringToFile(labelOutput, k.destDir, k.resourceName, "label")
if err != nil {
return errors.Wrapf(err, "Error writing label file")
}
k.labelToDump = false
return nil
}
func (k *K8sStore) generateLabel() (string, error) {
k.labelMutex.Lock()
pl := make(LabelPairList, len(k.labelMap))
i := 0
for key, occurrences := range k.labelMap {
pl[i] = LabelPair{key, occurrences}
i++
}
k.labelMutex.Unlock()
sort.Sort(pl)
var res strings.Builder
for _, pair := range pl {
var str string
if pair.Key.Namespace == "" {
str = fmt.Sprintf("%s %s %d\n",
k.ctorConfig.Cluster, pair.Key.Label, pair.Occurrences)
} else {
str = fmt.Sprintf("%s %s %s %d\n",
k.ctorConfig.Cluster, pair.Key.Namespace, pair.Key.Label, pair.Occurrences)
}
_, err := res.WriteString(str)
if err != nil {
return "", errors.Wrapf(err, "Error writing string %s",
str)
}
}
return strings.Trim(res.String(), "\n"), nil
}
func (k *K8sStore) generateOutput() (string, error) {
k.dataMutex.Lock()
var res strings.Builder
keys := make([]string, len(k.data))
i := 0
for key := range k.data {
keys[i] = key
i = i + 1
}
sort.Strings(keys)
for _, key := range keys {
v := k.data[key]
_, err := res.WriteString(v.ToString())
if err != nil {
k.dataMutex.Unlock()
return "", errors.Wrapf(err, "Error writing string %s",
v.ToString())
}
}
k.dataMutex.Unlock()
return res.String(), nil
}
// DumpFullState writes the full state to the cache file
func (k *K8sStore) DumpFullState() error {
glog.V(8).Infof("Dump full state of %s", k.resourceName)
now := time.Now()
delta := now.Sub(k.lastFullDump)
if delta < k.storeConfig.TimeBetweenFullDump {
glog.V(10).Infof("Last full dump for %s happened %s ago, ignoring it", k.resourceName, delta)
return nil
}
k.lastFullDump = now
glog.V(8).Infof("Doing full dump %d %s", len(k.data), k.resourceName)
resourceOutput, err := k.generateOutput()
if err != nil {
return errors.Wrapf(err, "Error generating output")
}
err = util.WriteStringToFile(resourceOutput, k.destDir, k.resourceName, "resource")
if err != nil {
return err
}
err = k.updateCurrentFile()
if err != nil {
return err
}
labelOutput, err := k.generateLabel()
if err != nil {
return errors.Wrapf(err, "Error generating label output")
}
err = util.WriteStringToFile(labelOutput, k.destDir, k.resourceName, "label")
return err
}
|
[
0
] |
package main
import "fmt"
func prime_factors(n int) (pfs []int) {
for n%2 == 0 {
pfs = append(pfs, 2)
n = n / 2
}
for i := 3; i*i <= n; i = i + 2 {
for n%i == 0 {
pfs = append(pfs, i)
n = n / i
}
}
if n > 2 {
pfs = append(pfs, n)
}
return
}
func power(p, i int) int {
result := 1
for j := 0; j < i; j++ {
result *= p
}
return result
}
func sum_of_proper_divisors(n int) int {
pfs := prime_factors(n)
m := make(map[int]int)
for _, prime := range pfs {
_, ok := m[prime]
if ok {
m[prime] += 1
} else {
m[prime] = 1
}
}
sum_of_all_factors := 1
for prime, exponents := range m {
sum_of_all_factors *= (power(prime, exponents+1) - 1) / (prime - 1)
}
return sum_of_all_factors - n
}
func amicable_numbers_under_10000() (amicables []int) {
for i := 3; i < 10000; i++ {
s := sum_of_proper_divisors(i)
if s == i {
continue
}
if sum_of_proper_divisors(s) == i {
amicables = append(amicables, i)
}
}
return
}
func main() {
amicables := amicable_numbers_under_10000()
fmt.Println(amicables)
sum := 0
for i := 0; i < len(amicables); i++ {
sum += amicables[i]
}
fmt.Println(sum)
}
|
[
0
] |
// Copyright 2021 Allstar Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package branch implements the Branch Protection security policy.
package branch
import (
"context"
"fmt"
"net/http"
"path"
"github.com/ossf/allstar/pkg/config"
"github.com/ossf/allstar/pkg/config/operator"
"github.com/ossf/allstar/pkg/policydef"
"github.com/google/go-github/v32/github"
"github.com/rs/zerolog/log"
)
const configFile = "branch_protection.yaml"
const polName = "Branch Protection"
// OrgConfig is the org-level config definition for Branch Protection.
type OrgConfig struct {
// OptConfig is the standard org-level opt in/out config, RepoOverride applies to all
// BP config.
OptConfig config.OrgOptConfig `yaml:"optConfig"`
// Action defines which action to take, default log, other: issue...
Action string `yaml:"action"`
// EnforceDefault : set to true to enforce policy on default branch, default true.
EnforceDefault bool `yaml:"enforceDefault"`
// EnforceBranches is a map of repos and branches. These are other
// non-default branches to enforce policy on, such as branches which releases
// are made from.
EnforceBranches map[string][]string `yaml:"enforceBranches"`
// RequireApproval : set to true to enforce approval on PRs, default true.
RequireApproval bool `yaml:"requireApproval"`
// ApprovalCount is the number of required PR approvals, default 1.
ApprovalCount int `yaml:"approvalCount"`
// DismissStale : set to true to require PR approvalse be dismissed when a PR is updated, default true.
DismissStale bool `yaml:"dismissStale"`
// BlockForce : set to true to block force pushes, default true.
BlockForce bool `yaml:"blockForce"`
}
// RepoConfig is the repo-level config for Branch Protection
type RepoConfig struct {
// OptConfig is the standard repo-level opt in/out config.
OptConfig config.RepoOptConfig `yaml:"optConfig"`
// Action overrides the same setting in org-level, only if present.
Action *string `yaml:"action"`
// EnforceDefault overrides the same setting in org-level, only if present.
EnforceDefault *bool `yaml:"enforceDefault"`
// EnforceBranches adds more branches to the org-level list. Does not
// override. Always allowed irrespective of DisableRepoOverride setting.
EnforceBranches []string `yaml:"enforceBranches"`
// RequireApproval overrides the same setting in org-level, only if present.
RequireApproval *bool `yaml:"requireAppproval"`
// ApprovalCount overrides the same setting in org-level, only if present.
ApprovalCount *int `yaml:"approvalCount"`
// DismissStale overrides the same setting in org-level, only if present.
DismissStale *bool `yaml:"dismissStale"`
// BlockForce overrides the same setting in org-level, only if present.
BlockForce *bool `yaml:"blockForce"`
}
type mergedConfig struct {
Action string
EnforceDefault bool
EnforceBranches []string
RequireApproval bool
ApprovalCount int
DismissStale bool
BlockForce bool
}
type details struct {
PRReviews bool
NumReviews int
DismissStale bool
BlockForce bool
}
var configFetchConfig func(context.Context, *github.Client, string, string, string, interface{}) error
func init() {
configFetchConfig = config.FetchConfig
}
// Branch is the Branch Protection policy object, implements policydef.Policy.
type Branch bool
// NewBranch returns a new BranchProtection polcy.
func NewBranch() policydef.Policy {
var b Branch
return b
}
// Name returns the name of this policy, implementing policydef.Policy.Name()
func (b Branch) Name() string {
return polName
}
type repositories interface {
Get(context.Context, string, string) (*github.Repository,
*github.Response, error)
ListBranches(context.Context, string, string, *github.BranchListOptions) (
[]*github.Branch, *github.Response, error)
GetBranchProtection(context.Context, string, string, string) (
*github.Protection, *github.Response, error)
}
// Check performs the polcy check for Branch Protection based on the
// configuration stored in the org/repo, implementing policydef.Policy.Check()
func (b Branch) Check(ctx context.Context, c *github.Client, owner,
repo string) (*policydef.Result, error) {
return check(ctx, c.Repositories, c, owner, repo)
}
func check(ctx context.Context, rep repositories, c *github.Client, owner,
repo string) (*policydef.Result, error) {
oc, rc := getConfig(ctx, c, owner, repo)
enabled := config.IsEnabled(oc.OptConfig, rc.OptConfig, repo)
log.Info().
Str("org", owner).
Str("repo", repo).
Str("area", polName).
Bool("enabled", enabled).
Msg("Check repo enabled")
mc := mergeConfig(oc, rc, repo)
r, _, err := rep.Get(ctx, owner, repo)
if err != nil {
return nil, err
}
opt := &github.BranchListOptions{
ListOptions: github.ListOptions{
PerPage: 100,
},
}
var branches []*github.Branch
for {
bs, resp, err := rep.ListBranches(ctx, owner, repo, opt)
if err != nil {
return nil, err
}
branches = append(branches, bs...)
if resp.NextPage == 0 {
break
}
opt.Page = resp.NextPage
}
// Don't really need pagination here, only checking if no branches exist.
if len(branches) == 0 {
return &policydef.Result{
Enabled: enabled,
Pass: true,
NotifyText: "No branches to protect",
Details: nil,
}, nil
}
allBranches := mc.EnforceBranches
if mc.EnforceDefault {
allBranches = append(mc.EnforceBranches, r.GetDefaultBranch())
}
if len(allBranches) == 0 {
return &policydef.Result{
Enabled: enabled,
Pass: true,
NotifyText: "No branches configured for enforcement in policy",
Details: nil,
}, nil
}
pass := true
text := ""
ds := make(map[string]details)
for _, b := range allBranches {
p, rsp, err := rep.GetBranchProtection(ctx, owner, repo, b)
if err != nil {
if rsp != nil && rsp.StatusCode == http.StatusNotFound {
// Branch not protected
pass = false
text = text + fmt.Sprintf("No protection found for branch %v\n", b)
ds[b] = details{}
continue
}
return nil, err
}
var d details
rev := p.GetRequiredPullRequestReviews()
if rev != nil {
d.PRReviews = true
d.DismissStale = rev.DismissStaleReviews
if mc.DismissStale && !rev.DismissStaleReviews {
text = text +
fmt.Sprintf("Dismiss stale reviews not configured for branch %v\n", b)
pass = false
}
d.NumReviews = rev.RequiredApprovingReviewCount
if rev.RequiredApprovingReviewCount < mc.ApprovalCount {
pass = false
text = text +
fmt.Sprintf("PR Approvals below threshold %v : %v for branch %v\n",
rev.RequiredApprovingReviewCount, mc.ApprovalCount, b)
}
} else {
if mc.RequireApproval {
pass = false
text = text +
fmt.Sprintf("PR Approvals not configured for branch %v\n", b)
}
}
afp := p.GetAllowForcePushes()
d.BlockForce = true
if afp != nil {
if mc.BlockForce && afp.Enabled {
text = text +
fmt.Sprintf("Block force push not configured for branch %v\n", b)
pass = false
d.BlockForce = false
}
}
ds[b] = d
}
return &policydef.Result{
Enabled: enabled,
Pass: pass,
NotifyText: text,
Details: ds,
}, nil
}
// Fix implementing policydef.Policy.Fix(). Currently not supported. BP plans
// to support this TODO.
func (b Branch) Fix(ctx context.Context, c *github.Client, owner, repo string) error {
log.Warn().
Str("org", owner).
Str("repo", repo).
Str("area", polName).
Msg("Action fix is configured, but not implemented.")
return nil
}
// GetAction returns the configured action from Branch Protection's
// configuration stored in the org-level repo, default log. Implementing
// policydef.Policy.GetAction()
func (b Branch) GetAction(ctx context.Context, c *github.Client, owner, repo string) string {
oc, rc := getConfig(ctx, c, owner, repo)
mc := mergeConfig(oc, rc, repo)
return mc.Action
}
func getConfig(ctx context.Context, c *github.Client, owner, repo string) (*OrgConfig, *RepoConfig) {
oc := &OrgConfig{ // Fill out non-zero defaults
Action: "log",
EnforceDefault: true,
RequireApproval: true,
ApprovalCount: 1,
DismissStale: true,
BlockForce: true,
}
if err := configFetchConfig(ctx, c, owner, operator.OrgConfigRepo, configFile, oc); err != nil {
log.Error().
Str("org", owner).
Str("repo", operator.OrgConfigRepo).
Str("area", polName).
Str("file", configFile).
Err(err).
Msg("Unexpected config error, using defaults.")
}
rc := &RepoConfig{}
if err := configFetchConfig(ctx, c, owner, repo, path.Join(operator.RepoConfigDir, configFile), rc); err != nil {
log.Error().
Str("org", owner).
Str("repo", repo).
Str("area", polName).
Str("file", path.Join(operator.RepoConfigDir, configFile)).
Err(err).
Msg("Unexpected config error, using defaults.")
}
return oc, rc
}
func mergeConfig(oc *OrgConfig, rc *RepoConfig, repo string) *mergedConfig {
mc := &mergedConfig{
Action: oc.Action,
EnforceDefault: oc.EnforceDefault,
EnforceBranches: oc.EnforceBranches[repo],
RequireApproval: oc.RequireApproval,
ApprovalCount: oc.ApprovalCount,
DismissStale: oc.DismissStale,
BlockForce: oc.BlockForce,
}
mc.EnforceBranches = append(mc.EnforceBranches, rc.EnforceBranches...)
if !oc.OptConfig.DisableRepoOverride {
if rc.Action != nil {
mc.Action = *rc.Action
}
if rc.EnforceDefault != nil {
mc.EnforceDefault = *rc.EnforceDefault
}
if rc.RequireApproval != nil {
mc.RequireApproval = *rc.RequireApproval
}
if rc.ApprovalCount != nil {
mc.ApprovalCount = *rc.ApprovalCount
}
if rc.DismissStale != nil {
mc.DismissStale = *rc.DismissStale
}
if rc.BlockForce != nil {
mc.BlockForce = *rc.BlockForce
}
}
return mc
}
|
[
0,
4
] |
package eventsourcing
import (
"github.com/caos/logging"
"github.com/caos/zitadel/internal/cache"
"github.com/caos/zitadel/internal/cache/config"
"github.com/caos/zitadel/internal/eventstore/models"
"github.com/caos/zitadel/internal/project/repository/eventsourcing/model"
)
type ProjectCache struct {
projectCache cache.Cache
}
func StartCache(conf *config.CacheConfig) (*ProjectCache, error) {
projectCache, err := conf.Config.NewCache()
logging.Log("EVENT-CsHdo").OnError(err).Panic("unable to create project cache")
return &ProjectCache{projectCache: projectCache}, nil
}
func (c *ProjectCache) getProject(ID string) (project *model.Project) {
project = &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: ID}}
if err := c.projectCache.Get(ID, project); err != nil {
logging.Log("EVENT-tMydV").WithError(err).Debug("error in getting cache")
}
return project
}
func (c *ProjectCache) cacheProject(project *model.Project) {
err := c.projectCache.Set(project.AggregateID, project)
if err != nil {
logging.Log("EVENT-3wKzj").WithError(err).Debug("error in setting project cache")
}
}
|
[
2
] |
// Copyright IBM Corp. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package validator
import (
"fmt"
"github.com/bytecodealliance/wasmtime-go"
"github.com/pkg/errors"
"google.golang.org/protobuf/proto"
validationv1 "github.com/sykesm/batik/pkg/pb/validation/v1"
)
type WASM struct {
store *wasmtime.Store
module *wasmtime.Module
}
func NewWASM(engine *wasmtime.Engine, asm []byte) (*WASM, error) {
store := wasmtime.NewStore(engine)
module, err := wasmtime.NewModule(engine, asm)
if err != nil {
return nil, err
}
return &WASM{store: store, module: module}, nil
}
func (w *WASM) Validate(req *validationv1.ValidateRequest) (*validationv1.ValidateResponse, error) {
v := &UTXOValidator{
adapter: &adapter{},
store: w.store,
module: w.module,
}
return v.Validate(req)
}
// UTXOValidator implements the validator.Validator interface and provides
// a custom validator for UTXO transaction validation. Currently the web assembly
// module handles transaction signature verification.
type UTXOValidator struct {
adapter *adapter
store *wasmtime.Store
module *wasmtime.Module
}
func (v *UTXOValidator) Validate(req *validationv1.ValidateRequest) (*validationv1.ValidateResponse, error) {
imports, err := v.newImports(v.module)
if err != nil {
return nil, err
}
instance, err := wasmtime.NewInstance(v.store, v.module, imports)
if err != nil {
return nil, err
}
v.adapter.instance = instance
v.adapter.memory = instance.GetExport("memory").Memory()
resolved, err := proto.Marshal(req)
if err != nil {
return nil, err
}
v.adapter.resolved = resolved
res, err := instance.GetExport("validate").Func().Call(99, len(resolved))
if err != nil {
return nil, err
}
code, ok := res.(int32)
if !ok {
return nil, errors.Errorf("unrecognized return value: %v", res)
}
if code != 0 {
return nil, errors.Errorf("validate failed, return code: %d", res)
}
var resp validationv1.ValidateResponse
if err := proto.Unmarshal(v.adapter.response, &resp); err != nil {
return nil, err
}
return &resp, nil
}
func (v *UTXOValidator) newImports(module *wasmtime.Module) ([]*wasmtime.Extern, error) {
var importedFuncs []*wasmtime.Extern
for _, imp := range module.Imports() {
var fn *wasmtime.Func
switch imp.Module() {
case "batik":
if imp.Name() != nil {
switch *imp.Name() {
case "log":
fn = wasmtime.WrapFunc(v.store, v.adapter.log)
case "read":
fn = wasmtime.WrapFunc(v.store, v.adapter.read)
case "write":
fn = wasmtime.WrapFunc(v.store, v.adapter.write)
}
}
}
if fn == nil {
name := "*unknown*"
if imp.Name() != nil {
name = *imp.Name()
}
return nil, errors.Errorf("import %s::%s not found", imp.Module(), name)
}
importedFuncs = append(importedFuncs, fn.AsExtern())
}
return importedFuncs, nil
}
type adapter struct {
instance *wasmtime.Instance
memory *wasmtime.Memory
resolved []byte
idx int
response []byte
}
func (a *adapter) read(streamID, addr, buflen int32) int32 {
buf := a.memory.UnsafeData()[addr:]
idx := a.idx
written := copy(buf, a.resolved[idx:idx+int(buflen)])
a.idx += written
return int32(written)
}
func (a *adapter) write(streamID, addr, buflen int32) int32 {
buf := a.memory.UnsafeData()[addr:]
a.response = append(a.response, buf[:buflen]...)
return buflen
}
func (a *adapter) log(buf, buflen int32) {
fmt.Printf("%s\n", a.memory.UnsafeData()[buf:buf+buflen])
}
|
[
7
] |
package shapes
import (
"fmt"
"log"
"math"
"math/rand"
"strings"
)
// Return the sum of lengths of all strings in a
func TotalLength(a []string) int {
s := 0
for _, n := range a {
s += len(n)
}
return s
}
// Given a string of tokens and a function which maps
// line number -> desired width,
// split tokens into lines which do not exceed desired width,
// unless the desired width is less than the length of one token.
func SplitLines(tokens []string, widthFromLineNo widthFunc) [][]string {
lines := make([][]string, 0)
line_no := 0
token_no := 0
for token_no < len(tokens) {
lines = append(lines, make([]string, 0))
width := widthFromLineNo(line_no)
if width <= 0 {
log.Printf("Negative width, defaulting to 1 : %d on line %d\n", width, line_no)
width = 1
}
for TotalLength(lines[line_no]) < width {
lines[line_no] = append(lines[line_no], tokens[token_no])
token_no++
if token_no == len(tokens) {
return lines
}
}
// advance line number and take off the last token of previous line
// since the last token pushed the string over the square width
// unless the last line was only one token long
if len(lines[line_no]) > 1 {
lines[line_no] = lines[line_no][:len(lines[line_no])-1]
token_no--
}
line_no++
}
return lines
}
// Return the maximum of widthFromLineNo over the domain [0, no_lines)
func maxWidth(no_lines int, widthFromLineNo widthFunc) int {
var max int
for i := 0; i < no_lines; i++ {
val := widthFromLineNo(i)
if val > max {
max = val
}
}
return max
}
// Given a slice of lines, where each line is a slice of token strings that should
// appear on that line and a function that maps line number -> desired width,
// add spaces to each line to make it reach the desired width if possible.
// If centered is set to true, also center the output.
// Join the justified lines together and return a string.
func JustifyByWidth(lines [][]string, widthFromLineNo func(int) int, centered bool) string {
maxW := maxWidth(len(lines), widthFromLineNo)
justifiedLines := make([]string, 0, len(lines))
for i, line := range lines {
width := int(math.Max(float64(widthFromLineNo(i)), 1))
for TotalLength(line) < width {
idx := rand.Intn(len(line))
line[idx] += " "
}
spacing := ""
// center by prepending spaces such that the center is at maxW/2
if centered {
spacing = fmt.Sprintf("%*s", (maxW-width)/2+2, " ")
}
justifiedLines = append(justifiedLines, spacing+strings.Join(line, ""))
}
return strings.Join(justifiedLines, "\n")
}
|
[
1
] |
package loteria
import (
"math/rand"
"time"
)
type (
// Board defines a "tabla", which is 4x4 grid of 16 Cards.
Board struct {
WinningPattern WinningPattern
marked boardIndex
cardsComputed bool
cardsMap map[Card]boardIndex
cards [16]Card
id BoardID
}
// BoardID represents the Board ID
BoardID uint64
// boardIndex indicates the location (bitwise) of cards on the board.
boardIndex uint16
)
const (
// ErrCardNotOnBoard defines the error returned by Mark when the specific
// Card is not part of the board.
ErrCardNotOnBoard = Error("card is not on board")
)
// NewBoard returns a new board using concrete cards.
// FIXME validate: cards uniqueness.
func NewBoard(cards [16]Card) Board {
board := Board{cardsMap: map[Card]boardIndex{}}
var bit uint16
for _, card := range cards {
board.cardsMap[card] = boardIndex(1) << bit
bit++
}
return board
}
// NewRandomBoard returns a board with random Cards.
func NewRandomBoard() Board {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
cards := map[Card]boardIndex{}
for len(cards) < 16 {
v := r.Intn(53)
if _, ok := cards[Card(v)]; !ok {
cards[Card(v)] = boardIndex(1) << uint16(len(cards))
}
}
return Board{cardsMap: cards}
}
// Cards returns the cards on the board.
func (b *Board) Cards() [16]Card {
if b.cardsComputed {
return b.cards
}
b.cardsComputed = true
findIndex := func(n uint16) int {
var i uint16 = 1
var pos uint16 = 1
for i&n == 0 {
i = i << 1
pos++
}
return int(pos - 1)
}
for k, v := range b.cardsMap {
b.cards[findIndex(uint16(v))] = k
}
return b.cards
}
// ID returns the Board Identifier
func (b *Board) ID() BoardID {
if b.id != 0 {
return b.id
}
var res uint64
for c := range b.cardsMap {
res |= 1 << uint64(c)
}
b.id = BoardID(res)
return b.id
}
// Mark marks off the card on the board.
func (b *Board) Mark(c Card) error {
index, ok := b.cardsMap[c]
if !ok {
return ErrCardNotOnBoard
}
b.marked |= index
return nil
}
// IsWinner indicates whether the marked cards win the game.
func (b *Board) IsWinner() bool {
for _, pattern := range defaultWinningPatterns {
if (uint16(b.marked) & uint16(pattern)) == uint16(pattern) {
b.WinningPattern = pattern
return true
}
}
return false
}
|
[
0
] |
package steps
import (
"fmt"
"net/http"
"strconv"
)
func (world *TodoWorld) UserDeletesPreviouslyCreatedTodo() error {
resp, err := callHttpDelete(world.todoID)
if err != nil {
return fmt.Errorf("error on DELETE todo %v", err)
}
world.statusCode = resp.StatusCode
return nil
}
func (world *TodoWorld) UserDeletesTodoWithID(ID int) error {
resp, err := callHttpDelete(ID)
if err != nil {
return fmt.Errorf("error on DELETE todo %v", err)
}
world.statusCode = resp.StatusCode
return nil
}
func callHttpDelete(ID int) (*http.Response, error) {
client := &http.Client{}
req, errDelete := http.NewRequest(http.MethodDelete, serverURL+"/"+strconv.Itoa(ID), nil)
if errDelete != nil {
return nil, errDelete
}
resp, errResp := client.Do(req)
if errResp != nil {
return nil, errResp
}
return resp, nil
}
|
[
2
] |
package vt
func clamp(x, min, max int) int {
if x < min {
return min
}
if x > max {
return max
}
return x
}
func (t *Tty) clampCursor(c Pt) Pt {
return Pt{
X: clamp(c.X, 0, t.Size.X),
Y: clamp(c.Y, 0, t.Size.Y),
}
}
// Clamps cursor strictly within bounds
func (t *Tty) clampCursorStrict(c Pt) Pt {
return Pt{
X: clamp(c.X, 0, t.Size.X-1),
Y: clamp(c.Y, 0, t.Size.Y-1),
}
}
func (t *Tty) cursorMove(delta Pt) {
t.Cursor = t.clampCursor(Pt{
X: t.Cursor.X + delta.X,
Y: t.Cursor.Y + delta.Y,
})
}
func (t *Tty) backspace() {
if t.Cursor.X > 0 {
t.Cursor.X--
if t.CursorMoved != nil {
t.CursorMoved(t, t.Cursor)
}
}
}
func (t *Tty) IsTabStop(x int) bool { return (x & 7) == 0 }
func (t *Tty) posOffset(p Pt) int { return t.Size.Offset(p) }
func (t *Tty) maxOffset() int { return t.Size.Area() }
func (t *Tty) Get(p Pt) AttrChar { return t.Buf[t.posOffset(p)] }
func (t *Tty) Set(p Pt, ach AttrChar) { t.Buf[t.posOffset(p)] = ach }
func (t *Tty) tab() {
z := t.DefaultAttrChar()
for t.Cursor.X < t.Size.X {
t.Set(t.Cursor, z)
t.Cursor.X++
if t.IsTabStop(t.Cursor.X) {
break
}
}
}
func (t *Tty) linefeed() {
t.carriageReturn()
t.verticaltab()
}
func (t *Tty) upline() {
t.Cursor.Y--
if t.Cursor.Y == t.ScrollRange.Low-1 {
t.Cursor.Y = t.ScrollRange.Low
t.Scroll(-1)
} else {
if t.Cursor.Y < 0 {
t.Cursor.Y = 0
}
}
}
func (t *Tty) verticaltab() {
t.Cursor.Y++
if t.Cursor.Y == t.ScrollRange.High {
t.Scroll(t.ScrollRange.High - t.Cursor.Y + 1)
t.Cursor.Y = t.ScrollRange.High - 1
} else if t.Cursor.Y >= t.Size.Y {
t.Cursor.Y = t.Size.Y - 1
}
}
func (t *Tty) formfeed() {
t.ClearScreen()
}
func (t *Tty) carriageReturn() {
t.Cursor.X = 0
}
func (t *Tty) clampCursorX() {
if t.Cursor.X >= t.Size.X {
if t.AutoWrap {
t.Cursor.X = 0
t.verticaltab()
} else {
t.Cursor.X = t.Size.X - 1
}
}
}
|
[
1,
4
] |
package gquic_test
import (
"bytes"
"fmt"
"math/rand"
"os/exec"
"strconv"
"time"
_ "github.com/lucas-clemente/quic-clients" // download clients
"github.com/lucas-clemente/quic-go/integrationtests/tools/proxy"
"github.com/lucas-clemente/quic-go/integrationtests/tools/testserver"
"github.com/lucas-clemente/quic-go/internal/protocol"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
)
// get a random duration between min and max
func getRandomDuration(min, max time.Duration) time.Duration {
return min + time.Duration(rand.Int63n(int64(max-min)))
}
var _ = Describe("Random Duration Generator", func() {
rand.Seed(time.Now().UnixNano())
It("gets a random RTT", func() {
var min time.Duration = time.Hour
var max time.Duration
var sum time.Duration
rep := 10000
for i := 0; i < rep; i++ {
val := getRandomDuration(100*time.Millisecond, 500*time.Millisecond)
sum += val
if val < min {
min = val
}
if val > max {
max = val
}
}
avg := sum / time.Duration(rep)
Expect(avg).To(BeNumerically("~", 300*time.Millisecond, 5*time.Millisecond))
Expect(min).To(BeNumerically(">=", 100*time.Millisecond))
Expect(min).To(BeNumerically("<", 105*time.Millisecond))
Expect(max).To(BeNumerically(">", 495*time.Millisecond))
Expect(max).To(BeNumerically("<=", 500*time.Millisecond))
})
})
var _ = Describe("Random RTT", func() {
var proxy *quicproxy.QuicProxy
runRTTTest := func(minRtt, maxRtt time.Duration, version protocol.VersionNumber) {
rand.Seed(time.Now().UnixNano())
var err error
proxy, err = quicproxy.NewQuicProxy("localhost:", version, &quicproxy.Opts{
RemoteAddr: "localhost:" + testserver.Port(),
DelayPacket: func(_ quicproxy.Direction, _ uint64) time.Duration {
return getRandomDuration(minRtt, maxRtt)
},
})
Expect(err).ToNot(HaveOccurred())
command := exec.Command(
clientPath,
"--quic-version="+strconv.Itoa(int(version)),
"--host=127.0.0.1",
"--port="+strconv.Itoa(proxy.LocalPort()),
"https://quic.clemente.io/prdata",
)
session, err := Start(command, nil, GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
defer session.Kill()
Eventually(session, 20).Should(Exit(0))
Expect(bytes.Contains(session.Out.Contents(), testserver.PRData)).To(BeTrue())
}
AfterEach(func() {
err := proxy.Close()
Expect(err).ToNot(HaveOccurred())
time.Sleep(time.Millisecond)
})
for i := range protocol.SupportedVersions {
version := protocol.SupportedVersions[i]
Context(fmt.Sprintf("with QUIC version %s", version), func() {
It("gets a file a random RTT between 10ms and 30ms", func() {
runRTTTest(10*time.Millisecond, 30*time.Millisecond, version)
})
})
}
})
|
[
1
] |
package main
//https://leetcode-cn.com/problems/minimum-increment-to-make-array-unique/
import "sort"
import "fmt"
func main() {
var a = [] int{3, 2, 1, 2, 1, 7}
res := minIncrementForUnique(a)
fmt.Println(res)
}
func minIncrementForUnique(A []int) int {
sort.Ints(A)
temp := 0
for i := 1; i < len(A); i++ {
if (A[i] <= A[i-1]) {
temp = A[i-1] - A[i] + 1 + temp
A[i] = A[i-1] + 1
}
}
return temp
}
|
[
2
] |
/**
leetcode 119. Pascal's Triangle II
Given an integer rowIndex, return the rowIndexth (0-indexed) row of
the Pascal's triangle.
In Pascal's triangle, each number is the sum of the two numbers directly
above it as shown:
*/
package main
func getRow(rowIndex int) []int {
if rowIndex == 0 {
return []int{1}
}
if rowIndex == 1 {
return []int{1,1}
}
result := make([]int,rowIndex + 1)
result[0] = 1
result[1] = 1
for i := 2; i <= rowIndex; i++ {
for j := i - 1; j >= 1; j-- {
result[j] = result[j] + result[j-1]
}
result[i] = 1
}
return result
}
|
[
0
] |
package rediscache
import (
"encoding/json"
"time"
"github.com/go-redis/redis"
)
var ErrCacheMiss = redis.Nil
type Cache interface {
Get(id string, res interface{}) error
Set(id string, res interface{}) error
SetRaw(id string, data []byte) error
Delete(id string) error
Expire(id string, at time.Time) error
Begin(max time.Duration) Cache
End() error
}
func New(client redis.UniversalClient, prefix string) Cache {
return &rootCache{client, prefix}
}
type rootCache struct {
r redis.UniversalClient
pr string
}
func (c *rootCache) Get(id string, res interface{}) error {
key := c.pr + id
b, err := c.r.Get(key).Bytes()
if err != nil {
return err
}
err = json.Unmarshal(b, res)
if err != nil {
return err
}
return nil
}
func (c *rootCache) Set(id string, res interface{}) error {
b, err := json.Marshal(res)
if err != nil {
return err
}
return c.SetRaw(id, b)
}
func (c *rootCache) SetRaw(id string, data []byte) error {
key := c.pr + id
err := c.r.Set(key, data, time.Hour).Err()
if err != nil {
return err
}
return nil
}
func (c *rootCache) Expire(id string, at time.Time) error {
key := c.pr + id
err := c.r.ExpireAt(key, at).Err()
if err == redis.Nil {
return nil
} else if err != nil {
return err
}
return nil
}
func (c *rootCache) Delete(id string) error {
key := c.pr + id
err := c.r.Del(key).Err()
if err != nil {
return err
}
return nil
}
func (c *rootCache) Begin(max time.Duration) Cache {
return &txnCache{
expireAt: time.Now().Add(max),
parent: c,
mutates: make(map[string][]byte),
}
}
func (c *rootCache) End() error {
return nil
}
type txnCache struct {
parent Cache
mutates map[string][]byte
expireAt time.Time
}
func (c *txnCache) Get(id string, res interface{}) error {
v, ok := c.mutates[id]
if ok {
if v == nil {
return ErrCacheMiss
}
err := json.Unmarshal(v, res)
if err != nil {
return err
}
return nil
}
return c.parent.Get(id, res)
}
func (c *txnCache) Set(id string, res interface{}) error {
b, err := json.Marshal(res)
if err != nil {
return err
}
c.mutates[id] = b
return c.parent.Expire(id, c.expireAt)
}
func (c *txnCache) Expire(id string, at time.Time) error {
return c.parent.Expire(id, at)
}
func (c *txnCache) SetRaw(id string, data []byte) error {
return c.parent.SetRaw(id, data)
}
func (c *txnCache) Delete(id string) error {
c.mutates[id] = nil
return c.parent.Expire(id, c.expireAt)
}
func (c *txnCache) Begin(max time.Duration) Cache {
return nil
}
func (c *txnCache) End() error {
for k, v := range c.mutates {
if v == nil {
err := c.parent.Delete(k)
if err != nil {
return err
}
} else {
err := c.parent.SetRaw(k, v)
if err != nil {
return err
}
}
}
return nil
}
|
[
1
] |
package utils
import (
"errors"
"math/rand"
"time"
)
var (
// ErrIllegalRange means minimum > maximum
ErrIllegalRange = errors.New("illegal random number range [ minimum > maximum ]")
)
// RandInt is to get a rand int
// return min ~ max
func RandInt(min, max int) (int, error) {
if min > max {
return 0, ErrIllegalRange
}
rand.Seed(time.Now().UnixNano())
randNum := rand.Intn(max-min+1) + min
return randNum, nil
}
|
[
1
] |
package zebra
import (
"fmt"
)
type Solution struct {
DrinksWater string
OwnsZebra string
}
// Location starts with 1
type Location int
type LocationRelative int
const (
right LocationRelative = iota + 1
nextTo
)
// Color is: red, green, ivory, yellow, blue
type Color int
const (
red Color = iota + 1
green
ivory
yellow
blue
)
// Nationality is: Englishman, Spaniard, Ukrainian, Norwegian, Japanese
type Nationality int
const (
Englishman Nationality = iota + 1
Spaniard
Ukrainian
Norwegian
Japanese
)
// Pet is: dog, snails, fox, horse, zebra
type Pet int
const (
dog Pet = iota + 1
snails
fox
horse
zebra
)
// Beverage is: coffee, tea, milk, orange juice, water
type Beverage int
const (
coffee Beverage = iota + 1
tea
milk
orangeJuice
water
)
// CigarBrand is: old gold, kools, chesterfields, lucky strike, parliaments
type CigarBrand int
const (
oldGold CigarBrand = iota + 1
kools
cheserfields
luckyStrike
parliaments
)
type House struct {
color Color
nationality Nationality
pet Pet
beverage Beverage
cigarBrand CigarBrand
location Location
locationRelative LocationRelative
}
type Clue struct {
input House
output House
solved bool
}
func SolvePuzzle() Solution {
s := Solution{}
var c *Clue
houses := []*House{}
// There are five houses.
for i := 0; i < 5; i++ {
h := &House{
location: Location(i + 1),
}
houses = append(houses, h)
}
clues := []*Clue{}
// The Englishman lives in the red house.
c = &Clue{
input: House{
color: red,
},
output: House{
nationality: Englishman,
},
}
clues = append(clues, c)
// The Spaniard owns the dog.
c = &Clue{
input: House{
nationality: Spaniard,
},
output: House{
pet: dog,
},
}
clues = append(clues, c)
// Coffee is drunk in the green house.
c = &Clue{
input: House{
color: green,
},
output: House{
beverage: coffee,
},
}
clues = append(clues, c)
// The Ukrainian drinks tea.
c = &Clue{
input: House{
nationality: Ukrainian,
},
output: House{
beverage: tea,
},
}
clues = append(clues, c)
// The green house is immediately to the right of the ivory house.
c = &Clue{
input: House{
color: ivory,
locationRelative: right, // to the right of the input house
},
output: House{
color: green,
},
}
clues = append(clues, c)
// The Old Gold smoker owns snails.
c = &Clue{
input: House{
cigarBrand: oldGold,
},
output: House{
pet: snails,
},
}
clues = append(clues, c)
// Kools are smoked in the yellow house.
c = &Clue{
input: House{
color: yellow,
},
output: House{
cigarBrand: kools,
},
}
clues = append(clues, c)
// Milk is drunk in the middle house (start with 1).
c = &Clue{
input: House{
location: 3,
},
output: House{
beverage: milk,
},
}
clues = append(clues, c)
// The Norwegian lives in the first house.
c = &Clue{
input: House{
location: 1,
},
output: House{
nationality: Norwegian,
},
}
clues = append(clues, c)
// The man who smokes Chesterfields lives in the house next to the man with the fox.
c = &Clue{
input: House{
pet: fox,
locationRelative: nextTo, // next to the input house
},
output: House{
cigarBrand: cheserfields,
},
}
clues = append(clues, c)
// Kools are smoked in the house next to the house where the horse is kept.
// TODO
// The Lucky Strike smoker drinks orange juice.
c = &Clue{
input: House{
cigarBrand: luckyStrike,
},
output: House{
beverage: orangeJuice,
},
}
clues = append(clues, c)
// The Japanese smokes Parliaments.
c = &Clue{
input: House{
nationality: Japanese,
},
output: House{
cigarBrand: parliaments,
},
}
clues = append(clues, c)
// The Norwegian lives next to the blue house.
solve(clues, houses)
return s
}
func solve(clues []*Clue, houses []*House) {
fmt.Println("Solving...")
fmt.Println()
fmt.Printf("Houses (start):\n")
printHouses(houses)
cluesUnsolved := unsolvedClues(clues)
for cluesUnsolved > 0 {
for i := 0; i < len(clues); i++ {
processClue(clues[i], houses)
}
cluesUnsolved = unsolvedClues(clues)
fmt.Println()
fmt.Printf("Clues :\n")
printClues(clues)
}
fmt.Println()
fmt.Printf("Houses (end):\n")
printHouses(houses)
}
func unsolvedClues(clues []*Clue) int {
unsolved := 0
for _, c := range clues {
if !c.solved {
unsolved++
}
}
return unsolved
}
func processClue(c *Clue, houses []*House) {
var solved bool
var h *House
var err error
relative := false
// get the house from input
switch {
case c.input.pet != 0:
h, err = houseByPet(c.input.pet, houses)
case c.input.cigarBrand != 0:
h, err = houseByCigarBrand(c.input.cigarBrand, houses)
case c.input.beverage != 0:
h, err = houseByBeverage(c.input.beverage, houses)
case c.input.nationality != 0:
h, err = houseByNationality(c.input.nationality, houses)
case c.input.location != 0:
h, err = houseByLocation(c.input.location, houses)
case c.input.color != 0:
h, err = houseByColor(c.input.color, houses)
}
if c.input.locationRelative != 0 {
relative = true
}
// failed to find house
if err != nil {
return
}
solved = true
if relative {
var offset Location
switch c.input.locationRelative {
case right:
offset = 1
}
h, err = houseByLocation(h.location+offset, houses)
if err != nil {
panic(err)
}
}
// set attribute in output
switch {
case c.output.pet != 0:
h.pet = c.output.pet
case c.output.cigarBrand != 0:
h.cigarBrand = c.output.cigarBrand
case c.output.beverage != 0:
h.beverage = c.output.beverage
case c.output.nationality != 0:
h.nationality = c.output.nationality
case c.output.location != 0:
h.location = c.output.location
case c.output.color != 0:
h.color = c.output.color
default:
panic("no output set!")
}
// set solved to true if success
c.solved = solved
}
func printHouses(houses []*House) {
for _, h := range houses {
fmt.Printf("%#v\n", h)
}
}
func printClues(clues []*Clue) {
for _, c := range clues {
fmt.Printf("%#v\n", c)
}
}
func houseByAttribute(attr string, value interface{}, houses []*House) (*House, error) {
switch attr {
case "color":
return houseByColor(value.(Color), houses)
case "nationality":
return houseByNationality(value.(Nationality), houses)
case "beverage":
return houseByBeverage(value.(Beverage), houses)
case "cigarbrand":
return houseByCigarBrand(value.(CigarBrand), houses)
case "pet":
return houseByPet(value.(Pet), houses)
}
return nil, fmt.Errorf("invalid attribute")
}
func houseByColor(c Color, houses []*House) (*House, error) {
for _, h := range houses {
if h.color == c {
return h, nil
}
}
return nil, fmt.Errorf("unknown by color")
}
func houseByNationality(n Nationality, houses []*House) (*House, error) {
for _, h := range houses {
if h.nationality == n {
return h, nil
}
}
return nil, fmt.Errorf("unknown by nationality")
}
func houseByPet(p Pet, houses []*House) (*House, error) {
for _, h := range houses {
if h.pet == p {
return h, nil
}
}
return nil, fmt.Errorf("unknown by pet")
}
func houseByBeverage(b Beverage, houses []*House) (*House, error) {
for _, h := range houses {
if h.beverage == b {
return h, nil
}
}
return nil, fmt.Errorf("unknown by beverage")
}
func houseByCigarBrand(c CigarBrand, houses []*House) (*House, error) {
for _, h := range houses {
if h.cigarBrand == c {
return h, nil
}
}
return nil, fmt.Errorf("unknown by cigar brand")
}
func houseByLocation(l Location, houses []*House) (*House, error) {
for _, h := range houses {
if h.location == l {
return h, nil
}
}
return nil, fmt.Errorf("unknown by location")
}
|
[
7
] |
// The timestamp command annotates lines read from standard input
// with the time that they were read. This is useful for seeing
// timing information on running commands from the shell.
//
// With no file arguments, timestamp prints lines read
// from standard input prefixed with a timestamp,
// the time since the timestamp command started.
// The first line is of the form:
//
// start 2006-01-02 15:04:05.000 -0700
//
// giving the absolute start time.
//
// If files are provided, they are read and the timestamp output in
// the named files is merged into one time sequence.
//
// With a single file, file names are omitted from
// the output.
package main
import (
"bufio"
"flag"
"fmt"
"io"
"log"
"os"
"strings"
"time"
)
var (
printMilliseconds = flag.Bool("ms", false, "print milliseconds instead of mm:ss.000")
suppressFilenames = flag.Bool("n", false, "suppress printing of file names")
)
var usage = `usage: timestamp [flags] [file...]
With no file arguments, timestamp prints lines read
from standard input prefixed with a timestamp,
the time since the timestamp command started.
The first line is of the form:
start 2006-01-02 15:04:05.000 -0700
giving the absolute start time.
If files are provided, they are read and the timestamp output in
the named files is merged into one time sequence.
With a single file, file names are omitted from
the output.
`
const headerTimeFormat = "2006-01-02 15:04:05.000 -0700"
func main() {
flag.Usage = func() {
fmt.Fprint(os.Stderr, usage)
flag.PrintDefaults()
os.Exit(2)
}
flag.Parse()
if args := flag.Args(); len(args) > 0 {
if len(args) == 1 {
*suppressFilenames = true
}
mergeFiles(flag.Args())
return
}
t0 := time.Now()
b := bufio.NewReader(os.Stdin)
out := bufio.NewWriter(os.Stdout)
wasPrefix := false
fmt.Fprintf(out, "start %s\n", time.Now().Format(headerTimeFormat))
out.Flush()
for {
line, isPrefix, err := b.ReadLine()
if err != nil {
break
}
if !wasPrefix {
printStamp(out, time.Now().Sub(t0))
}
out.Write(line)
if !isPrefix {
out.WriteByte('\n')
out.Flush()
}
wasPrefix = isPrefix
}
out.Flush()
}
func mergeFiles(files []string) {
fs := make([]*bufio.Reader, len(files))
for i, file := range files {
f, err := os.Open(file)
if err != nil {
log.Fatalf("timestamp: cannot open file: %v", err)
}
fs[i] = bufio.NewReader(f)
}
out := readLines(fs[0], files[0])
for i, f := range fs[1:] {
out = merge(readLines(f, files[i+1]), out)
}
startLine := <-out
if startLine.line != "start\n" {
panic("no start")
}
stdout := bufio.NewWriter(os.Stdout)
t0 := startLine.t
fmt.Fprintf(stdout, "start %s\n", t0.Format(headerTimeFormat))
stdout.Flush()
for line := range out {
printStamp(stdout, line.t.Sub(t0))
if !*suppressFilenames {
fmt.Fprintf(stdout, "%s: ", line.name)
}
stdout.WriteString(line.line)
stdout.Flush()
}
}
func merge(c0, c1 <-chan line) <-chan line {
out := make(chan line)
go func() {
defer close(out)
var line0, line1 line
r0, r1 := c0, c1
ok0, ok1 := true, true
for {
if r0 != nil {
line0, ok0 = <-r0
r0 = nil
}
if r1 != nil {
line1, ok1 = <-r1
r1 = nil
}
switch {
case !ok0 && !ok1:
return
case !ok0:
out <- line1
r1 = c1
case !ok1:
out <- line0
r0 = c0
default:
if line0.t.Before(line1.t) {
out <- line0
r0 = c0
} else {
out <- line1
r1 = c1
}
}
}
}()
return out
}
type line struct {
t time.Time
line string
name string
}
func readLines(r *bufio.Reader, name string) <-chan line {
out := make(chan line)
go func() {
defer close(out)
startLine, err := r.ReadString('\n')
if err != nil || !strings.HasPrefix(startLine, "start ") {
log.Printf("timestamp: cannot read start line")
return
}
start, err := time.Parse(headerTimeFormat, startLine[len("start "):len(startLine)-1])
if err != nil {
log.Printf("timestamp: cannot parse start line %q: %v", startLine, err)
}
out <- line{start, "start\n", name}
prev := start
for {
s, err := r.ReadString('\n')
if err == io.EOF {
break
}
if err != nil {
log.Printf("timestamp: read error: %v", err)
break
}
i := strings.Index(s, " ")
if i == -1 {
log.Printf("timestamp: line has no timestamp: %q", s)
out <- line{prev, s, name}
continue
}
var d time.Duration
if strings.Index(s[0:i], ":") >= 0 {
var min, sec, millisec int
if _, err := fmt.Sscanf(s[0:i], "%d:%d.%d", &min, &sec, &millisec); err != nil {
log.Printf("timestamp: cannot parse timestamp on line %q", s)
out <- line{prev, s, name}
continue
}
d = time.Duration(min)*time.Minute +
time.Duration(sec)*time.Second +
time.Duration(millisec)*time.Millisecond
} else {
var millisec int64
if _, err := fmt.Scanf(s[0:i], "%d", &millisec); err != nil {
log.Printf("timestamp: cannot parse timestamp on line %q", s)
out <- line{prev, s, name}
continue
}
d = time.Duration(millisec) * time.Millisecond
}
t := start.Add(d)
out <- line{t, s[i+1:], name}
prev = t
}
}()
return out
}
func printStamp(w io.Writer, d time.Duration) {
if *printMilliseconds {
fmt.Fprintf(w, "%010d ", d/1e6)
return
}
msec := d / time.Millisecond
sec := d / time.Second
min := d / time.Minute
fmt.Fprintf(w, "%d:%02d.%03d ", min, sec%60, msec%1000)
}
|
[
1
] |
package main
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"regexp"
"sort"
"strconv"
"strings"
)
// DebugLevel is an enum that defines log level groups
type DebugLevel int
// These are the debug groups
const (
DbgScan DebugLevel = 1 << iota // 1
DbgDisk // 2
DbgPart // 4
DbgSwRaid // 8
DbgPv // 16
DbgVg // 32
DbgLv // 64
DbgFs // 128
DbgImage // 256
DbgConfig // 512
DbgAction // 1024
DbgCmd // 2048
)
// Function to print debug
func dmsg(level DebugLevel, f string, args ...interface{}) {
if (debug & level) > 0 {
fmt.Fprintf(os.Stderr, f, args...)
}
}
// Comparator is an interface that allows for
// common routines to manipulate the objects without
// regard to their type.
type Comparator interface {
Equal(Comparator) bool
Merge(Comparator) error
}
// MergeList - This is actual merge over time.
// The merge function assumes that nd is the new object and
// its actual should replace the current d.
func MergeList(base, merge []Comparator) ([]Comparator, error) {
if len(base) == 0 {
base = merge
} else {
for _, nd := range merge {
found := false
for _, d := range base {
if d.Equal(nd) {
if err := d.Merge(nd); err != nil {
return nil, err
}
found = true
break
}
}
if !found {
base = append(base, nd)
}
}
remove := []Comparator{}
for _, d := range base {
found := false
for _, nd := range merge {
if nd.Equal(d) {
found = true
break
}
}
if !found {
remove = append(remove, d)
}
}
for _, rd := range remove {
for i, d := range base {
if d.Equal(rd) {
s := base
s[len(s)-1], s[i] = s[i], s[len(s)-1]
base = s[:len(s)-1]
break
}
}
}
}
return base, nil
}
// SizeParseError is used to indicate a bad size
type SizeParseError error
// sizeParser parses a string size and returns a number.
func sizeParser(v string) (uint64, error) {
sizeRE := regexp.MustCompile(`([0-9.]+) *([KMGT]?[B]?)`)
parts := sizeRE.FindStringSubmatch(v)
if len(parts) < 2 {
return 0, SizeParseError(fmt.Errorf("%s cannot be parsed as a Size", v))
}
f, err := strconv.ParseFloat(parts[1], 10)
if err != nil {
return 0, SizeParseError(err)
}
if len(parts) == 3 {
switch parts[2] {
case "PB", "P":
f = f * 1024
case "TB", "T":
f = f * 1024
fallthrough
case "GB", "G":
f = f * 1024
fallthrough
case "MB", "M":
f = f * 1024
fallthrough
case "KB", "K":
f = f * 1024
case "B":
default:
return 0, SizeParseError(fmt.Errorf("%s is not a valid size suffix", parts[2]))
}
}
return uint64(f), nil
}
// sizeStringer takes a number and returns a string size.
func sizeStringer(s uint64, unit string) string {
var suffix string
var i int
for i, suffix = range []string{"B", "KB", "MB", "GB", "TB", "PB"} {
if unit == suffix {
break
}
mul := uint64(1) << ((uint64(i) + 1) * 10)
if uint64(s) < mul {
break
}
}
if i != 0 {
resVal := float64(s) / float64(uint64(1)<<(uint64(i)*10))
return fmt.Sprintf("%s%s", strconv.FormatFloat(resVal, 'g', -1, 64), suffix)
}
return fmt.Sprintf("%dB", s)
}
func runCommand(command string, args ...string) (string, error) {
dmsg(DbgCmd, "Run Command: %s %v\n", command, args)
out, err := exec.Command(command, args...).CombinedOutput()
dmsg(DbgCmd, "Returned: %s\n%v\n", string(out), err)
if err != nil {
err = fmt.Errorf("Command: %s failed\n%v\n%s", command, err, string(out))
}
return string(out), err
}
func runCommandNoStdErr(command string, args ...string) (string, error) {
dmsg(DbgCmd, "Run Command: %s %v\n", command, args)
out, err := exec.Command(command, args...).Output()
dmsg(DbgCmd, "Returned: %s\n%v\n", string(out), err)
if err != nil {
err = fmt.Errorf("Command: %s failed\n%v\n%s", command, err, string(out))
}
return string(out), err
}
// runParted runs parted with the provided options.
func runParted(options ...string) error {
dmsg(DbgCmd, "Parted Command: parted -a opt -s %s\n", strings.Join(options, " "))
lopts := []string{"-a", "opt", "-s"}
lopts = append(lopts, options...)
out, err := exec.Command("parted", lopts...).CombinedOutput()
dmsg(DbgCmd, "Parted Returned: %s\n%v\n", string(out), err)
if err != nil {
err = fmt.Errorf("Parted Failed: %v\n%s", err, string(out))
}
return err
}
// udevUpdate causes the system to rescan devices.
func udevUpdate() error {
if out, err := exec.Command("udevadm", "trigger").CombinedOutput(); err != nil {
return fmt.Errorf("udevadm trigger Failed: %v\n%s", err, string(out))
}
if out, err := exec.Command("udevadm", "settle").CombinedOutput(); err != nil {
return fmt.Errorf("udevadm settle Failed: %v\n%s", err, string(out))
}
return nil
}
func mountTmpFS(path string) (string, error) {
dir, err := ioutil.TempDir("/tmp", "example")
if err != nil {
dmsg(DbgAction, "Failed to create tempdir: %v\n", err)
return "", err
}
_, err = exec.Command("mount", path, dir).CombinedOutput()
if err != nil {
dmsg(DbgAction, "Failed to mount tempdir: %s on %s: %v\n", path, dir, err)
os.RemoveAll(dir)
return "", err
}
return dir, nil
}
func unmountTmpFS(path string) error {
_, err := exec.Command("umount", path).CombinedOutput()
if err != nil {
dmsg(DbgAction, "Failed to mount tempdir: %s: %v\n", path, err)
}
os.RemoveAll(path)
return err
}
type byLen []string
func (a byLen) Len() int {
return len(a)
}
func (a byLen) Less(i, j int) bool {
return len(a[i]) < len(a[j])
}
func (a byLen) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func getAllFileSystems(disks Disks, vgs VolumeGroups) ([]string, map[string]string) {
fsPathMap := map[string]string{}
fsPathList := []string{}
for _, d := range disks {
if d.FileSystem != nil && d.FileSystem.Mount != "" {
fsPathMap[d.FileSystem.Mount] = d.Path
fsPathList = append(fsPathList, d.FileSystem.Mount)
}
for _, p := range d.Partitions {
if p.FileSystem != nil && p.FileSystem.Mount != "" {
fsPathMap[p.FileSystem.Mount] = fmt.Sprintf("%s%d", p.parent.Path, p.ID)
fsPathList = append(fsPathList, p.FileSystem.Mount)
}
}
}
// GREG: SwRaid
for _, vg := range vgs {
for _, lv := range vg.LogicalVolumes {
if lv.FileSystem != nil && lv.FileSystem.Mount != "" {
fsPathMap[lv.FileSystem.Mount] = lv.Path
fsPathList = append(fsPathList, lv.FileSystem.Mount)
}
}
}
sort.Sort(byLen(fsPathList))
return fsPathList, fsPathMap
}
func mountAll(disks Disks, vgs VolumeGroups) (string, error) {
fsPathList, fsPathMap := getAllFileSystems(disks, vgs)
dir, err := ioutil.TempDir("/tmp", "example")
if err != nil {
dmsg(DbgAction, "Failed to create tempdir: %v\n", err)
return "", err
}
for _, fs := range fsPathList {
path := fmt.Sprintf("%s%s", dir, fs)
if err := os.MkdirAll(path, 0755); err != nil {
unmountAll(dir, disks, vgs)
return "", err
}
if _, err := runCommand("mount", fsPathMap[fs], path); err != nil {
unmountAll(dir, disks, vgs)
return "", err
}
}
return dir, nil
}
func unmountAll(dir string, disks Disks, vgs VolumeGroups) error {
fsPathList, _ := getAllFileSystems(disks, vgs)
for i := len(fsPathList) - 1; i >= 0; i-- {
fs := fsPathList[i]
path := fmt.Sprintf("%s%s", dir, fs)
if _, err := runCommand("umount", path); err != nil {
return err
}
}
return os.RemoveAll(dir)
}
|
[
0
] |
package main
import (
"context"
"fmt"
"os"
"github.com/go-redis/redis/v8"
)
var ctx = context.Background()
func consume(pubsub *redis.PubSub) {
var err error
// Wait for confirmation that subscription is created before publishing anything.
_, err = pubsub.Receive(ctx)
if err != nil {
panic(err)
}
// Go channel which receives messages.
ch := pubsub.Channel()
// Consume messages.
for msg := range ch {
fmt.Println(msg.Channel, msg.Payload)
}
}
func main() {
var err error
rdb := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "", // no password set
DB: 1, // use default DB
})
pong, err := rdb.Ping(ctx).Result()
fmt.Println(pong, err)
pubsub := rdb.Subscribe(ctx, "mychannel1")
args := os.Args
// fmt.Println(args[1])
if len(args) >= 2 {
switch args[1] {
case "sub":
fmt.Println("Sub")
consume(pubsub)
return
}
}
fmt.Println("Pub")
// Publish a message.
err = rdb.Publish(ctx, "mychannel1", "hello").Err()
if err != nil {
panic(err)
}
}
|
[
7
] |
package main
import (
"fmt"
)
func Sqrt(x float64) float64{
z := 1.0
n := 1
for ; n < 10; {
n += n
z = z - (z*z - x)/(2*z)
}
return z
}
func main(){
fmt.Println(Sqrt(2))
}
|
[
0
] |
package helpers
import (
"github.com/gin-gonic/gin"
"github.com/restuwahyu13/gin-rest-api/schemas"
)
func APIResponse(ctx *gin.Context, Message string, StatusCode int, Method string, Data interface{}) {
jsonResponse := schemas.SchemaResponses{
StatusCode: StatusCode,
Method: Method,
Message: Message,
Data: Data,
}
if StatusCode >= 400 {
ctx.AbortWithStatusJSON(StatusCode, jsonResponse)
} else {
ctx.JSON(StatusCode, jsonResponse)
}
}
func ValidatorErrorResponse(ctx *gin.Context, StatusCode int, Method string, Error interface{}) {
errResponse := schemas.SchemaErrorResponse{
StatusCode: StatusCode,
Method: Method,
Error: Error,
}
ctx.AbortWithStatusJSON(StatusCode, errResponse)
}
|
[
2
] |
package main
import (
"flag"
"fmt"
)
func main() {
}
type celsiusFlag struct {
Celsius
}
type Celsius float64
type Fahrenheit float64
func CToF(c Celsius) Fahrenheit {return Fahrenheit(c*9/5 + 32)}
func FToC(f Fahrenheit) Celsius {return Celsius((f-32)*5/9)}
func (f *celsiusFlag) Set(s string) error {
var uint string
var value float64
fmt.Scanf(s, "%f%s", &value, &uint) // 从输入 s中解析一个浮点值value和一个字符串unit
switch uint {
case "C", "oC" :
f.Celsius = Celsius(value)
return nil
case "F", "oF" :
f.Celsius = FToC(Fahrenheit(value))
return nil
}
return fmt.Errorf("invalid temperature %q", s)
}
func CelsiusFlag(name string, value Celsius, usage string) * Celsius {
f := celsiusFlag{value}
flag.CommandLine.Var(&f, name, usage)
return &f.Celsius
}
|
[
1
] |
package main
import (
"fmt"
)
func is(a byte) bool {
if (a >= 65 && a <= 90) || (a >= 97 && a <= 122) {
return true
}
return false
}
func reverseOnlyLetters(S string) string {
b := []byte(S)
l := len(b)
i, j := 0, l-1
for i < j {
if is(b[i]) == false {
i++
}
if is(b[j]) == false {
j--
}
if is(b[i]) == true && is(b[j]) == true {
b[i], b[j] = b[j], b[i]
i++
j--
}
}
return string(b)
}
func main() {
fmt.Println(reverseOnlyLetters("-S2,_"))
}
|
[
2
] |
package variables
// START OMIT
func isValid(v int) bool {
var ( // HL
min = 0 // HL
max = 100 // HL
) // HL
if v < min {
return false
}
if v > max {
return false
}
return true
}
// END OMIT
|
[
1
] |
/*
App: JJService
Author: Landers
Github: https://github.com/landers1037
Date: 2020-09-23
*/
package service
import (
"encoding/json"
"io/ioutil"
"net/http"
"os"
"path"
"github.com/gin-gonic/gin"
"jjservice/src/util"
)
func initDefault(r *gin.Engine) {
defaultHandler := func(c *gin.Context) {
util.JJResponse(
c,
http.StatusOK,
"default page",
c.FullPath(),
)
}
De := r.Group("")
{
De.GET("/", home)
De.GET("/api", home)
De.GET("/about", defaultHandler)
De.GET("/api/logo", logo)
De.POST("/api/login", login)
De.POST("/api/changelog", changelog)
}
}
// 主页,使用渲染模板的方式
func home(c *gin.Context) {
util.JJResponse(
c,
http.StatusOK,
"welcome to JJ Service",
"Powered by app.renj.io",
)
}
// 返回logo
func logo(c *gin.Context) {
path, _ := os.Getwd()
path = path + "/staticfile/logo.png"
util.JJFile(
c,
path,
)
}
func login(c *gin.Context) {
// 验证来自参数的token
inToken := c.Request.Header.Get("tokenstring")
newToken := ""
if inToken != "" && inToken == util.Conf.CrsfToken {
newToken = util.TokenEncrypt()
}
util.JJResponse(
c,
http.StatusOK,
"Login to JJ Service",
newToken,
)
}
func changelog(c *gin.Context) {
cwd, _ := os.Getwd()
var logE []interface{}
data, err := ioutil.ReadFile(path.Join(cwd, "conf", "update.json"))
if err != nil {
util.JJResponse(
c,
http.StatusOK,
"This is jjservice's change logs.",
"",
)
}else {
json.Unmarshal(data, &logE)
util.JJResponse(
c,
http.StatusOK,
"This is jjservice's change logs.",
logE,
)
}
}
|
[
0
] |
package models
import (
"encoding/base64"
"encoding/json"
"fmt"
"github.com/jmoiron/sqlx"
)
type User struct {
Id int `json:"id"`
Email string `json:"email"`
Fans int `json:"fans"`
Videos int `json:"videos"`
Flavor string `json:"flavor"`
CreatedAt int64 `json:"created_at"`
}
const USER_SELECT = "SELECT id, fans, videos, email, UNIX_TIMESTAMP(created_at) as createdat from users"
func (u *User) Encode() string {
b, _ := json.Marshal(u)
s := string(b)
sEnc := base64.StdEncoding.EncodeToString([]byte(s))
return sEnc
}
func DecodeUser(s string) *User {
var user User
decoded, _ := base64.StdEncoding.DecodeString(s)
err := json.Unmarshal([]byte(decoded), &user)
if err != nil {
return nil
}
return &user
}
func SelectUsers(db *sqlx.DB) ([]User, string) {
users := []User{}
sql := fmt.Sprintf("%s order by created_at desc", USER_SELECT)
err := db.Select(&users, sql)
s := ""
if err != nil {
s = err.Error()
}
return users, s
}
func SelectUser(db *sqlx.DB, id int) (*User, string) {
user := User{}
sql := fmt.Sprintf("%s where id=:id", USER_SELECT)
rows, err := db.NamedQuery(sql, map[string]interface{}{"id": id})
if err != nil {
return &user, err.Error()
} else {
if rows.Next() {
rows.StructScan(&user)
}
}
return &user, ""
}
func IncrementUserCount(db *sqlx.DB, field string, id int) string {
_, err := db.NamedExec(fmt.Sprintf("UPDATE users set %s=%s+1 where id=:id", field, field),
map[string]interface{}{"id": id})
if err != nil {
return err.Error()
}
return ""
}
func UpdateUser(db *sqlx.DB, phrase, email string) string {
_, err := db.NamedExec("UPDATE users set phrase=SHA1(:phrase) where email=:email",
map[string]interface{}{"phrase": phrase, "email": email})
if err != nil {
return err.Error()
}
return ""
}
|
[
4
] |
package random
import (
"math/rand"
"strconv"
"time"
"unsafe"
)
// Int generating a pseudo random integer between min and max
func Int(min, max int) int {
rand.Seed(time.Now().UnixNano())
return rand.Intn(max-min) + min
}
// IntAsString generating a pseudo random integer, in string format, in the range between min and max.
func IntAsString(min, max int) string {
return strconv.Itoa(Int(min, max))
}
// String character n length generation of a pseudo random string
func String(n int) string {
const (
letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
letterIdxBits = 6
letterIdxMask = 1<<letterIdxBits - 1
letterIdxMax = 63 / letterIdxBits
)
rand.Seed(time.Now().UnixNano())
b := make([]byte, n)
for i, cache, remain := n-1, rand.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = rand.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return *(*string)(unsafe.Pointer(&b))
}
|
[
1
] |
package main
import (
"errors"
"fmt"
"time"
)
type CircuitBreaker interface {
State() string
Execute(func()) (interface{}, error)
Reset()
HalfOpen()
IsClosed()
Trip(err error)
LastError()
}
type CircuitBreakerImpl struct {
openStateTimeout time.Time
lastError error
state string
failureCounter int
failureTreshold int
}
func (this *CircuitBreakerImpl) Execute(f func() (interface{}, error)) (interface{}, error) {
if !this.IsClosed() && this.State() != "half-open" {
return nil, errors.New("Circuit breaker tripped!")
}
r, e := f()
if e != nil {
if this.State() == "half-open" {
this.Trip(e)
} else {
this.failureCounter = this.failureCounter + 1
if this.failureCounter > this.failureTreshold {
this.Trip(e)
}
}
} else {
this.failureCounter = 0
if this.State() == "half-open" {
this.Reset()
}
}
if e != nil {
fmt.Println(e.Error())
}
return r, e
}
func (this *CircuitBreakerImpl) State() string {
return this.state
}
func (this *CircuitBreakerImpl) Reset() {
this.state = "closed"
this.failureCounter = 0
fmt.Println("Circuit Reset")
}
func (this *CircuitBreakerImpl) HalfOpen() {
this.state = "half-open"
fmt.Println("Circuit state changed to half-open")
}
func (this *CircuitBreakerImpl) Trip(err error) {
fmt.Println("circuit tripped to open")
fmt.Println(err.Error())
this.lastError = err
this.state = "open"
this.failureCounter = 0
this.openStateTimeout = time.Now().Add(30 * time.Second)
go func() {
time.Sleep(5 * time.Second)
this.HalfOpen()
}()
}
func (this *CircuitBreakerImpl) IsClosed() bool {
return this.state == "closed"
}
func (this *CircuitBreakerImpl) LastError() error {
return this.lastError
}
|
[
0
] |
package main
import (
"bufio"
"flag"
"fmt"
"os"
"regexp"
"strconv"
"strings"
)
func validBetween(val string, min, max int) bool {
number, _ := strconv.Atoi(val)
if number >= min && number <= max {
return true
}
return false
}
func validByr(input string) bool {
return validBetween(input, 1920, 2002)
}
func validIyr(input string) bool {
return validBetween(input, 2010, 2020)
}
func validEyr(input string) bool {
return validBetween(input, 2020, 2030)
}
func validHgt(input string) bool {
if strings.Contains(input, "cm") {
return validBetween(input[0:len(input)-2], 150, 193)
} else if strings.Contains(input, "in") {
return validBetween(input[0:len(input)-2], 59, 76)
}
return false
}
func validHcl(input string) bool {
found, _ := regexp.MatchString("^#[a-f0-9]{6}$", input)
return found
}
var validEye = map[string]int{"amb": 1, "blu": 1, "brn": 1, "gry": 1, "grn": 1, "hzl": 1, "oth": 1}
func validEcl(input string) bool {
_, ok := validEye[input]
return ok
}
func validPID(input string) bool {
found, _ := regexp.MatchString("^[0-9]{9}$", input)
return found
}
func main() {
fmt.Println("Day4")
validators := map[string]func(string) bool{
"byr": validByr,
"iyr": validIyr,
"eyr": validEyr,
"hgt": validHgt,
"hcl": validHcl,
"ecl": validEcl,
"pid": validPID,
}
var fileName string
flag.StringVar(&fileName, "input", "inputs/day01", "Filename to read")
flag.Parse()
fh, err := os.Open(fileName)
if err != nil {
panic(err)
}
scanner := bufio.NewScanner(fh)
completeIds := []string{}
var partialID string
for scanner.Scan() {
line := scanner.Text()
if line == "" {
partialID = strings.TrimSpace(partialID)
completeIds = append(completeIds, partialID)
partialID = ""
continue
}
partialID = partialID + line + " "
}
partialID = strings.TrimSpace(partialID)
completeIds = append(completeIds, partialID)
validA := 0
validB := 0
for _, id := range completeIds {
parts := strings.Split(id, " ")
validParts := true
numValidFields := 0
for _, part := range parts {
index := strings.Index(part, ":")
field, value := part[0:index], part[index+1:len(part)]
if fieldValidator, ok := validators[field]; ok {
numValidFields++
if !fieldValidator(value) {
validParts = false
}
}
}
if numValidFields == 7 {
validA++
}
if validParts && numValidFields == 7 {
validB++
}
}
fmt.Printf("ValidA: %d ValidB: %d\n", validA, validB)
}
|
[
1
] |
/*
Copyright 2019 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"context"
"testing"
"google.golang.org/protobuf/proto"
"vitess.io/vitess/go/vt/topo"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
// checkShardReplication tests ShardReplication objects
func checkShardReplication(t *testing.T, ctx context.Context, ts *topo.Server) {
if _, err := ts.GetShardReplication(ctx, LocalCellName, "test_keyspace", "-10"); !topo.IsErrType(err, topo.NoNode) {
t.Errorf("GetShardReplication(not there): %v", err)
}
sr := &topodatapb.ShardReplication{
Nodes: []*topodatapb.ShardReplication_Node{
{
TabletAlias: &topodatapb.TabletAlias{
Cell: "c1",
Uid: 1,
},
},
},
}
if err := ts.UpdateShardReplicationFields(ctx, LocalCellName, "test_keyspace", "-10", func(oldSr *topodatapb.ShardReplication) error {
return topo.NewError(topo.NoUpdateNeeded, LocalCellName)
}); err != nil {
t.Fatalf("UpdateShardReplicationFields() failed: %v", err)
}
if err := ts.UpdateShardReplicationFields(ctx, LocalCellName, "test_keyspace", "-10", func(oldSr *topodatapb.ShardReplication) error {
proto.Reset(oldSr)
proto.Merge(oldSr, sr)
return nil
}); err != nil {
t.Fatalf("UpdateShardReplicationFields() failed: %v", err)
}
if sri, err := ts.GetShardReplication(ctx, LocalCellName, "test_keyspace", "-10"); err != nil {
t.Errorf("GetShardReplication(new guy) failed: %v", err)
} else {
if len(sri.Nodes) != 1 ||
sri.Nodes[0].TabletAlias.Cell != "c1" ||
sri.Nodes[0].TabletAlias.Uid != 1 {
t.Errorf("GetShardReplication(new guy) returned wrong value: %v", *sri)
}
}
if err := ts.UpdateShardReplicationFields(ctx, LocalCellName, "test_keyspace", "-10", func(sr *topodatapb.ShardReplication) error {
sr.Nodes = append(sr.Nodes, &topodatapb.ShardReplication_Node{
TabletAlias: &topodatapb.TabletAlias{
Cell: "c3",
Uid: 3,
},
})
return nil
}); err != nil {
t.Errorf("UpdateShardReplicationFields() failed: %v", err)
}
if sri, err := ts.GetShardReplication(ctx, LocalCellName, "test_keyspace", "-10"); err != nil {
t.Errorf("GetShardReplication(after append) failed: %v", err)
} else {
if len(sri.Nodes) != 2 ||
sri.Nodes[0].TabletAlias.Cell != "c1" ||
sri.Nodes[0].TabletAlias.Uid != 1 ||
sri.Nodes[1].TabletAlias.Cell != "c3" ||
sri.Nodes[1].TabletAlias.Uid != 3 {
t.Errorf("GetShardReplication(new guy) returned wrong value: %v", *sri)
}
}
if err := ts.DeleteShardReplication(ctx, LocalCellName, "test_keyspace", "-10"); err != nil {
t.Errorf("DeleteShardReplication(existing) failed: %v", err)
}
if err := ts.DeleteShardReplication(ctx, LocalCellName, "test_keyspace", "-10"); !topo.IsErrType(err, topo.NoNode) {
t.Errorf("DeleteShardReplication(again) returned: %v", err)
}
// Some implementations may already remove the directory if not data is in there, so we ignore topo.ErrNoNode.
if err := ts.DeleteKeyspaceReplication(ctx, LocalCellName, "test_keyspace"); err != nil && !topo.IsErrType(err, topo.NoNode) {
t.Errorf("DeleteKeyspaceReplication(existing) failed: %v", err)
}
// The second time though, it should be gone.
if err := ts.DeleteKeyspaceReplication(ctx, LocalCellName, "test_keyspace"); !topo.IsErrType(err, topo.NoNode) {
t.Errorf("DeleteKeyspaceReplication(again) returned: %v", err)
}
}
|
[
4
] |
package main
import (
"context"
"flag"
"fmt"
"log"
"strings"
"time"
pb "github.com/youngbupark/raft-membership/pkg/proto"
epb "google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var (
name = flag.String("name", "membersip", "membership")
serverAddr = flag.String("server_addr", "localhost:10000", "The server address in the format of host:port")
)
func newWatchStream(addr string) (*grpc.ClientConn, pb.RaftMemberShip_WatchClient) {
var opts []grpc.DialOption
opts = append(opts, grpc.WithInsecure())
conn, err := grpc.Dial(addr, grpc.WithInsecure())
if err != nil {
log.Printf("fail to dial: %v\n", err)
return nil, nil
}
client := pb.NewRaftMemberShipClient(conn)
stream, err := client.Watch(context.Background())
if err != nil {
log.Printf("%v.Watch(_) = _, %v\n", client, err)
conn.Close()
return nil, nil
}
return conn, stream
}
func runMessageLoop(stream pb.RaftMemberShip_WatchClient) (string, error) {
leaderAddr := ""
var returnError error
go func() {
for {
in, err := stream.Recv()
returnError = err
if err != nil {
errStatus, ok := status.FromError(err)
if !ok {
log.Printf("Failed to send a note 1: %v \n", err)
return
}
log.Printf("Failed to send a note 2: %v\n", err)
switch errStatus.Code() {
case codes.PermissionDenied:
errStatus, _ := status.FromError(err)
errInfo := errStatus.Details()[0].(*epb.ErrorInfo)
leaderAddr = errInfo.Metadata["leader_addr"]
returnError = nil
stream.CloseSend()
log.Printf("new leader elected:%s", leaderAddr)
}
return
}
log.Printf("Got response: %s", in.Data)
}
}()
for i := 0; i <= 10000; i++ {
req := &pb.WatchRequest{
Version: "0",
Name: fmt.Sprintf("membership-%d", i),
}
fmt.Printf("Sending request - %v\n", req)
err := stream.Send(req)
if err != nil {
returnError = err
log.Printf("Failed to send a note 2: %v\n", err)
break
}
time.Sleep(time.Second * 2)
}
return leaderAddr, returnError
}
func main() {
flag.Parse()
servers := strings.Split(*serverAddr, ",")
currentServerIndex := 0
for {
log.Printf("\n\nTry to connect server: %s", servers[currentServerIndex])
conn, stream := newWatchStream(servers[currentServerIndex])
if stream != nil {
leader, err := runMessageLoop(stream)
if leader != "" || err != nil {
currentServerIndex = (currentServerIndex + 1) % len(servers)
}
conn.Close()
}
time.Sleep(time.Second * 1)
}
}
|
[
7
] |
package interpreter
import (
"errors"
"log"
"strings"
)
var (
NilObj = &Object{}
TrueObj = &Object{Type: BOOL, Value: true}
FalseObj = &Object{Type: BOOL, Value: false}
)
func Eval(s *Scope, expr Expression) (*Object, error) {
switch expr.(type) {
case *Object:
return expr.(*Object), nil
case *Concat:
return evalConcat(s, expr.(*Concat))
case *Assign:
return evalAssign(s, expr.(*Assign))
case *If:
return evalIfStmt(s, expr.(*If))
case *BlockExpr:
return evalBlockExpr(s, expr.(*BlockExpr))
case *Lookup:
return evalLookup(s, expr.(*Lookup))
case *Return:
return Eval(s, expr.(*Return).Expr)
case *Lambda:
return evalLambda(s, expr.(*Lambda))
case *Infix:
return evalInfixExpr(s, expr.(*Infix))
default:
log.Fatal("Unknown expression ", expr.Info())
}
return nil, nil
}
func evalConcat(s *Scope, c *Concat) (*Object, error) {
str := []string{}
for _, v := range c.Source {
tmp, err := NilErrCheck(Eval(s, v))
if err != nil {
return nil, err
}
str = append(str, tmp.String())
}
return &Object{Type: STRING, Value: strings.Join(str, "")}, nil
}
func evalAssign(s *Scope, a *Assign) (*Object, error) {
// Eval RHS
rhs, err := Eval(s, a.RHS)
if err != nil {
return nil, err
}
// Find the object and check if it exists in scope
// Burst if it is not an object or child field of a struct
switch a.LHS.(type) {
case *Object:
obj, err := s.Update(a.LHS.(*Object).Name, rhs)
if err != nil {
return nil, err
}
return obj, nil
default:
return nil, errors.New("Can not assign to LHS")
}
}
func evalIfStmt(s *Scope, i *If) (*Object, error) {
obj, err := Eval(s, i.Condition)
if err != nil {
return nil, err
}
if isTruthy(obj) {
return Eval(s, i.Success)
}
return Eval(s, i.Fail)
}
func evalBlockExpr(s *Scope, b *BlockExpr) (*Object, error) {
for _, v := range b.Expressions {
res, err := Eval(s, v)
if err != nil {
return nil, err
}
switch v.(type) {
case *Return:
return res, nil
}
}
return NilObj, nil
}
func evalLookup(s *Scope, l *Lookup) (*Object, error) {
obj := s.Lookup(l.Name)
if obj.Type == NilObj.Type {
return nil, errors.New(l.Name + " not found")
}
return obj, nil
}
func evalLambda(s *Scope, l *Lambda) (*Object, error) {
child := NewScope("child", s)
for k, v := range l.Params {
data, err := Eval(s, v)
if err != nil {
return nil, err
}
child.Update(k, data)
}
return evalBlockExpr(child, l.Body)
}
func evalInfixExpr(s *Scope, i *Infix) (*Object, error) {
var IntInput = map[InfixOperation]bool{
ADD: true,
SUBTRACT: true,
MULTIPLY: true,
DIVIDE: true,
MOD: true,
GREATER: true,
LESSER: true,
}
l, err := NilErrCheck(Eval(s, i.Left))
if err != nil {
return nil, err
}
r, err := NilErrCheck(Eval(s, i.Right))
if err != nil {
return nil, err
}
var returnObj = &Object{}
if IntInput[i.Operator] {
if l.Type != INT {
return nil, errors.New("LHS is not an integer")
}
if r.Type != INT {
return nil, errors.New("RHS is not an integer")
}
}
switch i.Operator {
case ADD:
returnObj.Type = INT
returnObj.Value = l.Value.(int) + r.Value.(int)
case SUBTRACT:
returnObj.Type = INT
returnObj.Value = l.Value.(int) - r.Value.(int)
case MULTIPLY:
returnObj.Type = INT
returnObj.Value = l.Value.(int) * r.Value.(int)
case DIVIDE:
returnObj.Type = INT
returnObj.Value = l.Value.(int) / r.Value.(int)
case MOD:
returnObj.Type = INT
returnObj.Value = l.Value.(int) % r.Value.(int)
case GREATER:
returnObj.Type = BOOL
returnObj.Value = l.Value.(int) > r.Value.(int)
case LESSER:
returnObj.Type = BOOL
returnObj.Value = l.Value.(int) < r.Value.(int)
case EQUAL:
returnObj.Type = BOOL
returnObj.Value = false
if l.Type == r.Type && l.Value == r.Value {
returnObj.Value = true
}
case NOTEQUAL:
returnObj.Type = BOOL
returnObj.Value = true
if l.Type == r.Type && l.Value == r.Value {
returnObj.Value = false
}
case AND:
returnObj.Type = BOOL
returnObj.Value = isTruthy(l) && isTruthy(r)
case OR:
returnObj.Type = BOOL
returnObj.Value = isTruthy(l) || isTruthy(r)
}
return returnObj, nil
}
func NilErrCheck(obj *Object, err error) (*Object, error) {
if err != nil {
return nil, err
}
if obj == nil {
return nil, errors.New("Got nil from eval")
}
return obj, err
}
|
[
7
] |
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"database/sql"
"fmt"
"strings"
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/util/charset"
)
type checker struct {
db *sql.DB
dbName string
tbls []string
warnings int
errs int
}
func (c *checker) close() error {
return closeDB(c.db)
}
func (c *checker) connectDB() (err error) {
c.db, err = openDB(c.dbName)
if err != nil {
log.Fatal("Open database connection failed:", err)
}
return
}
func (c *checker) check() error {
log.Infof("Checking database %s", c.dbName)
if c.db == nil {
err := c.connectDB()
if err != nil {
return errors.Trace(err)
}
}
if len(c.tbls) == 0 {
c.tbls = make([]string, 0)
err := c.getTables()
if err != nil {
return errors.Trace(err)
}
}
for _, t := range c.tbls {
log.Infof("Checking table %s", t)
err := c.checkTable(t)
if err != nil {
c.errs++
log.Errorf("Check table %s failed with err: %s", t, errors.ErrorStack(err))
} else {
log.Infof("Check table %s succ", t)
}
}
return nil
}
func (c *checker) getTables() error {
rs, err := querySQL(c.db, "show tables;")
if err != nil {
return errors.Trace(err)
}
defer rs.Close()
for rs.Next() {
var name string
rs.Scan(&name)
c.tbls = append(c.tbls, name)
}
return nil
}
func (c *checker) getCreateTable(tn string) (string, error) {
stmt := fmt.Sprintf("show create table `%s`;", tn)
rs, err := querySQL(c.db, stmt)
if err != nil {
return "", errors.Trace(err)
}
defer rs.Close()
if rs.Next() {
var (
name string
cs string
)
rs.Scan(&name, &cs)
return cs, nil
}
return "", errors.Errorf("Can not find table %s", tn)
}
func (c *checker) checkTable(tableName string) error {
createSQL, err := c.getCreateTable(tableName)
if err != nil {
return errors.Trace(err)
}
err = c.checkCreateSQL(createSQL)
if err != nil {
return errors.Trace(err)
}
return nil
}
func (c *checker) checkCreateSQL(createSQL string) error {
stmt, err := parser.New().ParseOneStmt(createSQL, "", "")
if err != nil {
return errors.Annotatef(err, " parse %s error", createSQL)
}
// Analyze ast
err = c.checkAST(stmt)
if err != nil {
log.Errorf("checkAST error: %s", err)
return errors.Trace(err)
}
return nil
}
func (c *checker) checkAST(stmt ast.StmtNode) error {
st, ok := stmt.(*ast.CreateTableStmt)
if !ok {
return errors.Errorf("Expect CreateTableStmt but got %T", stmt)
}
var err error
// check columns
for _, def := range st.Cols {
err = c.checkColumnDef(def)
if err != nil {
return errors.Trace(err)
}
}
// check constrains
for _, cst := range st.Constraints {
err = c.checkConstraint(cst)
if err != nil {
return errors.Trace(err)
}
}
// check options
for _, opt := range st.Options {
err = c.checkTableOption(opt)
if err != nil {
return errors.Trace(err)
}
}
return errors.Trace(err)
}
func (c *checker) checkColumnDef(def *ast.ColumnDef) error {
return nil
}
func (c *checker) checkConstraint(cst *ast.Constraint) error {
switch cst.Tp {
case ast.ConstraintForeignKey:
log.Errorf("Foreign Key is parsed but ignored by TiDB.")
c.warnings++
return nil
}
return nil
}
func (c *checker) checkTableOption(opt *ast.TableOption) error {
switch opt.Tp {
case ast.TableOptionCharset:
// Check charset
cs := strings.ToLower(opt.StrValue)
if cs != "binary" && !charset.ValidCharsetAndCollation(cs, "") {
return errors.Errorf("Unsupported charset %s", opt.StrValue)
}
}
return nil
}
func querySQL(db *sql.DB, query string) (*sql.Rows, error) {
var (
err error
rows *sql.Rows
)
log.Debugf("[query][sql]%s", query)
rows, err = db.Query(query)
if err != nil {
log.Errorf("query sql[%s] failed %v", query, errors.ErrorStack(err))
return nil, errors.Trace(err)
}
return rows, nil
}
|
[
7
] |
package scoredb
import (
"fmt"
"math"
"math/rand"
)
type ShardedDb struct {
Shards []StreamingDb
}
var reservedShardBits = uint(14)
func NewShardedDb(shards []StreamingDb) (*ShardedDb, error) {
maxShards := (1 << reservedShardBits) - 1
if len(shards) >= 1<<reservedShardBits {
return nil, fmt.Errorf("Too many shards (%d); maximum number of shards is %d", len(shards), maxShards)
}
return &ShardedDb{Shards: shards}, nil
}
func ShardIdToExt(idInShard int64, shardNum int) int64 {
return (int64(shardNum) << uint(64-reservedShardBits)) | idInShard
}
func (db ShardedDb) BulkIndex(records []map[string]float32) ([]int64, error) {
numShards := len(db.Shards)
// TODO do something more complex some day? Parallelize it like the query side?
shardNum := rand.Intn(numShards)
results, err := db.Shards[shardNum].BulkIndex(records)
if err != nil {
return nil, err
}
for idx, v := range results {
results[idx] = ShardIdToExt(v, shardNum)
}
return results, nil
}
func (db ShardedDb) QueryItr(scorer []interface{}) (DocItr, error) {
parts := make([]DocItr, len(db.Shards))
for idx, shard := range db.Shards {
itr, err := shard.QueryItr(scorer)
if err != nil {
return nil, err
}
parts[idx] = itr
}
return NewParallelDocItr(parts), nil
}
type CandidateResult struct {
DocId int64
Score float32
WorkerNum int
}
type Bounds struct {
min, max float32
}
type ParallelDocItr struct {
score float32
docId int64
NumAlive int
Bounds Bounds
ResultChannel chan CandidateResult
Comms []chan Bounds
}
func RunItr(itr DocItr, myWorkerNum int, resultChannel chan CandidateResult, boundsChannel chan Bounds) {
bounds := Bounds{min: float32(math.Inf(-1)), max: float32(math.Inf(1))}
docId := int64(-1)
var score float32
for {
if !itr.Next(docId + 1) {
break
}
docId, score = itr.Cur()
if score <= bounds.min || score >= bounds.max {
continue
}
resultChannel <- CandidateResult{DocId: docId, Score: score, WorkerNum: myWorkerNum}
/*
select {
case newBounds, ok := <- boundsChannel:
if ok {
if bounds != newBounds {
bounds = newBounds
itr.SetBounds(bounds.min, bounds.max)
}
}
}
*/
newBounds := <-boundsChannel
if bounds != newBounds {
bounds = newBounds
itr.SetBounds(bounds.min, bounds.max)
}
}
itr.Close()
resultChannel <- CandidateResult{DocId: -1}
}
func NewParallelDocItr(parts []DocItr) *ParallelDocItr {
op := ParallelDocItr{
score: 0.0,
docId: -1,
NumAlive: len(parts),
Bounds: Bounds{min: float32(math.Inf(-1)), max: float32(math.Inf(1))},
ResultChannel: make(chan CandidateResult),
Comms: make([](chan Bounds), len(parts)),
}
for idx, part := range parts {
part := part
curMin, curMax := part.GetBounds()
op.Bounds.min = Min(op.Bounds.min, curMin)
op.Bounds.max = Max(op.Bounds.max, curMax)
boundsChannel := make(chan Bounds)
op.Comms[idx] = boundsChannel
go RunItr(part, idx, op.ResultChannel, boundsChannel)
}
return &op
}
func (op *ParallelDocItr) Name() string {
return "ParallelDocItr"
}
func (op *ParallelDocItr) SetBounds(min, max float32) bool {
op.Bounds.min, op.Bounds.max = min, max
return true
}
func (op *ParallelDocItr) GetBounds() (min, max float32) {
return op.Bounds.min, op.Bounds.max
}
func (op *ParallelDocItr) Next(minId int64) bool {
for {
result := <-op.ResultChannel
if result.DocId == -1 {
op.NumAlive -= 1
if op.NumAlive <= 0 {
return false
}
} else {
workerNum := result.WorkerNum
if result.Score > op.Bounds.min && result.Score < op.Bounds.max {
op.docId = ShardIdToExt(result.DocId, workerNum)
op.score = result.Score
op.Comms[workerNum] <- op.Bounds
return true
} else {
op.Comms[workerNum] <- op.Bounds
}
}
}
}
func (op *ParallelDocItr) Close() {} // unsure...
func (op *ParallelDocItr) Cur() (int64, float32) {
return op.docId, op.score
}
|
[
1
] |
package builtin
import (
"fmt"
"github.com/dhl1402/covidscript/internal/core"
)
func Join() *core.FunctionExpression {
return &core.FunctionExpression{
Params: []core.Identifier{{Name: "array"}, {Name: "separator"}},
NativeFunction: func(ec *core.ExecutionContext) (core.Expression, error) {
arg1, _ := ec.Get("array")
arexp, ok := arg1.(*core.ArrayExpression)
if !ok {
return nil, fmt.Errorf("Runtime error: first argument must be array.")
}
arg2, _ := ec.Get("separator")
lexp, ok := arg2.(*core.LiteralExpression)
if !ok || (lexp.Type != core.LiteralTypeString && lexp.Type != core.LiteralTypeUndefined) {
return nil, fmt.Errorf("Runtime error: second argument must be string.")
}
var sep string
if lexp.Type == core.LiteralTypeUndefined {
sep = ","
} else {
sep = lexp.Value
}
result := ""
for _, elem := range arexp.Elements {
result = result + fmt.Sprintf("%s%s", elem.ToString(), sep)
}
if len(arexp.Elements) > 0 {
result = result[:len(result)-len(sep)]
}
return &core.LiteralExpression{
Type: core.LiteralTypeString,
Value: result,
}, nil
},
}
}
|
[
0
] |
package dao
type ContentItemContentSubType struct {
ContentItem `gorm:"column:ContentItem"`
ContentSubType `gorm:"column:ContentSubType"`
}
type MenusSub struct {
Item ContentSubType
SubType []MenusSub
}
func (m MenusSub) Get(ID uint64) MenusSub {
for index := range m.SubType {
if m.SubType[index].Item.ID == ID {
return m.SubType[index]
}
}
return MenusSub{}
}
type Menus struct {
Item ContentItem
SubType []MenusSub
}
func (m Menus) Get(ID uint64) MenusSub {
for index := range m.SubType {
if m.SubType[index].Item.ID == ID {
return m.SubType[index]
}
}
return MenusSub{}
}
type MenusData struct {
List []Menus
ID uint64
SubID uint64
SubChildID uint64
Top Menus
Sub MenusSub
SubChild MenusSub
}
func (m *MenusData) SetCurrentMenus(ID, SubID, SubChildID uint64) {
for index := range m.List {
if m.List[index].Item.ID == ID {
m.Top = m.List[index]
m.Sub = m.Top.Get(SubID)
m.SubChild = m.Sub.Get(SubChildID)
m.ID = ID
m.SubID = SubID
m.SubChildID = SubChildID
break
}
}
}
func (m MenusData) Get(ID uint64) Menus {
for index := range m.List {
if m.List[index].Item.ID == ID {
return m.List[index]
}
}
return Menus{}
}
|
[
2
] |
// generated by genORMModelDB.go
package orm
import (
"errors"
"fmt"
"github.com/jinzhu/gorm"
"github.com/thomaspeugeot/sandbox01/b/go/models"
)
// BclassAPI is the input in POST API
//
// for POST, API, one needs the fields of the model as well as the fields
// from associations ("Has One" and "Has Many") that are generated to
// fullfill the ORM requirements for associations
//
// swagger:model bclassAPI
type BclassAPI struct {
models.Bclass
// association fields
}
// BclassDB describes a bclass in the database
//
// It incorporates all fields : from the model, from the generated field for the API and the GORM ID
//
// swagger:model bclassDB
type BclassDB struct {
gorm.Model
BclassAPI
}
// BclassDBs arrays bclassDBs
// swagger:response bclassDBsResponse
type BclassDBs []BclassDB
// BclassDBResponse provides response
// swagger:response bclassDBResponse
type BclassDBResponse struct {
BclassDB
}
// ModelToORMBclassTranslate is a translation function from models object to ORM objects
func ModelToORMBclassTranslate(
translationImpact TranslationImpact,
db *gorm.DB) (Error error) {
if translationImpact == CreateMode {
// check that bclassStore is nil as well as bclassDBs
if map_BclassDBID_BclassPtr != nil {
err := errors.New("In CreateMode translation, map_BclassDBID_BclassPtr should be nil")
return err
}
if map_BclassDBID_BclassDB != nil {
err := errors.New("In CreateMode translation, map_BclassDBID_BclassDB should be nil")
return err
}
if map_BclassPtr_BclassDBID != nil {
err := errors.New("In CreateMode translation, map_BclassPtr_BclassDBID should be nil")
return err
}
tmp := make(map[uint]*models.Bclass, 0)
map_BclassDBID_BclassPtr = &tmp
tmpDB := make(map[uint]*BclassDB, 0)
map_BclassDBID_BclassDB = &tmpDB
tmpID := make(map[*models.Bclass]uint, 0)
map_BclassPtr_BclassDBID = &tmpID
for _, bclass := range models.AllModelStore.Bclasss {
// initiate bclass
var bclassDB BclassDB
bclassDB.Bclass = *bclass
query := db.Create(&bclassDB)
if query.Error != nil {
return query.Error
}
// update stores
(*map_BclassPtr_BclassDBID)[bclass] = bclassDB.ID
(*map_BclassDBID_BclassPtr)[bclassDB.ID] = bclass
(*map_BclassDBID_BclassDB)[bclassDB.ID] = &bclassDB
}
} else { // UpdateMode, update IDs of Pointer Fields of ORM object
// check that bclassStore is not nil
if map_BclassDBID_BclassPtr == nil {
err := errors.New("In UpdateMode translation, bclassStore should not be nil")
return err
}
if map_BclassDBID_BclassDB == nil {
err := errors.New("In UpdateMode translation, bclassStore should not be nil")
return err
}
// update fields of bclassDB with fields of bclass
for _, bclass := range models.AllModelStore.Bclasss {
bclassDBID := (*map_BclassPtr_BclassDBID)[bclass]
bclassDB := (*map_BclassDBID_BclassDB)[bclassDBID]
bclassDB.Bclass = *bclass
}
// parse model objects ot update associations
for idx, bclass := range *map_BclassDBID_BclassPtr {
// fetch matching bclassDB
if bclassDB, ok := (*map_BclassDBID_BclassDB)[idx]; ok {
// set {{Fieldname}}ID
query := db.Save(&bclassDB)
if query.Error != nil {
return query.Error
}
} else {
err := errors.New(
fmt.Sprintf("In UpdateMode translation, bclassStore should not be nil %v %v",
bclassDB, bclass))
return err
}
}
}
return nil
}
// stores BclassDB according to their gorm ID
var map_BclassDBID_BclassDB *map[uint]*BclassDB
// stores BclassDB ID according to Bclass address
var map_BclassPtr_BclassDBID *map[*models.Bclass]uint
// stores Bclass according to their gorm ID
var map_BclassDBID_BclassPtr *map[uint]*models.Bclass
// ORMToModelBclassTranslate is a translation function from ORM object to models objects
// This function used the uint ID of the ORM object to create or update (according to translationImpact)
// maps of respectively ORM and models objects
//
// In create mode,
func ORMToModelBclassTranslate(
translationImpact TranslationImpact,
db *gorm.DB) (Error error) {
if translationImpact == CreateMode {
// check that bclassStores are nil
if map_BclassDBID_BclassPtr != nil {
err := errors.New("In CreateMode translation, Parameters bclassStore should be nil")
return err
}
if map_BclassDBID_BclassDB != nil {
err := errors.New("In CreateMode translation, parameters BclassDBStore should be nil")
return err
}
// init stores
tmp := make(map[uint]*models.Bclass, 0)
map_BclassDBID_BclassPtr = &tmp
tmpDB := make(map[uint]*BclassDB, 0)
map_BclassDBID_BclassDB = &tmpDB
tmpID := make(map[*models.Bclass]uint, 0)
map_BclassPtr_BclassDBID = &tmpID
models.AllModelStore.Bclasss = make([]*models.Bclass, 0)
bclassDBArray := make([]BclassDB, 0)
query := db.Find(&bclassDBArray)
if query.Error != nil {
return query.Error
}
// copy orm objects to the two stores
for _, bclassDB := range bclassDBArray {
// create entries in the tree maps.
bclass := bclassDB.Bclass
(*map_BclassDBID_BclassPtr)[bclassDB.ID] = &bclass
(*map_BclassPtr_BclassDBID)[&bclass] = bclassDB.ID
bclassDBCopy := bclassDB
(*map_BclassDBID_BclassDB)[bclassDB.ID] = &bclassDBCopy
// append model store with the new element
models.AllModelStore.Bclasss = append(models.AllModelStore.Bclasss, &bclass)
}
} else { // UpdateMode
// for later, update of the data field
// check that bclassStore is not nil
if map_BclassDBID_BclassPtr == nil {
err := errors.New("In UpdateMode translation, bclassStore should not be nil")
return err
}
if map_BclassDBID_BclassDB == nil {
err := errors.New("In UpdateMode translation, bclassStore should not be nil")
return err
}
// update fields of bclassDB with fields of bclass
for _, bclass := range models.AllModelStore.Bclasss {
bclassDBID := (*map_BclassPtr_BclassDBID)[bclass]
bclassDB := (*map_BclassDBID_BclassDB)[bclassDBID]
*bclass = bclassDB.Bclass
}
// parse all DB instance and update all pointer fields of the translated models instance
for _, bclassDB := range *map_BclassDBID_BclassDB {
bclass := (*map_BclassDBID_BclassPtr)[bclassDB.ID]
if bclass == nil {
err := errors.New("cannot find translated instance in models store")
return err
}
}
}
return nil
}
func (allORMStoreStruct *AllORMStoreStruct) CreateORMBclass(bclass *models.Bclass) {
CreateORMBclass(allORMStoreStruct.db, bclass)
}
// CreateORMBclass creates ORM{{Strucname}} in DB from bclass
func CreateORMBclass(
db *gorm.DB,
bclass *models.Bclass) (Error error) {
// initiate bclass
var bclassDB BclassDB
bclassDB.Bclass = *bclass
query := db.Create(&bclassDB)
if query.Error != nil {
return query.Error
}
// update stores
(*map_BclassPtr_BclassDBID)[bclass] = bclassDB.ID
(*map_BclassDBID_BclassPtr)[bclassDB.ID] = bclass
(*map_BclassDBID_BclassDB)[bclassDB.ID] = &bclassDB
return
}
func (allORMStoreStruct *AllORMStoreStruct) DeleteORMBclass(bclass *models.Bclass) {
DeleteORMBclass(allORMStoreStruct.db, bclass)
}
func DeleteORMBclass(
db *gorm.DB,
bclass *models.Bclass) (Error error) {
bclassDBID := (*map_BclassPtr_BclassDBID)[bclass]
bclassDB := (*map_BclassDBID_BclassDB)[bclassDBID]
query := db.Unscoped().Delete(&bclassDB)
if query.Error != nil {
return query.Error
}
delete(*map_BclassPtr_BclassDBID, bclass)
delete(*map_BclassDBID_BclassPtr, bclassDB.ID)
delete(*map_BclassDBID_BclassDB, bclassDBID)
return
}
|
[
2
] |
package card
import (
"errors"
"strconv"
"strings"
)
// карта
type Card struct {
id int
Owner string
issuer string
Balance int
Currency string
Number string
Icon string
}
var (
ErrFromCardNotEnoughMoney = errors.New("Source card: not enough money")
)
// сервис
type Service struct {
BankName string
Cards []*Card
}
// конструктор
func NewService(bankName string) *Service {
return &Service{BankName: bankName}
}
// возвращаем всех владельцев карт и их карты
func (s *Service) Owners() (result map[string][]*Card) {
result = make(map[string][]*Card)
for _, c := range s.Cards {
result[c.Owner] = append(result[c.Owner], c)
}
return result
}
// возвращает карты владельца
func (s *Service) OwnerCards(owner string) (result []*Card) {
result = make([]*Card, 0)
for cardOwner, cards := range s.Owners() {
if cardOwner == owner {
result = append(result, cards...)
}
}
return result
}
// добавляет карту
func (s *Service) AddCard(card *Card) {
SetBankName(card, s.BankName)
s.Cards = append(s.Cards, card)
}
// возвращает карту по номеру карты или nil
func (s *Service) FindCardByNumber(number string) (card *Card) {
if isCardInternal(number) {
var cardInternal *Card = nil
for _, c := range s.Cards {
if c.Number == number {
cardInternal = c
}
}
return cardInternal
}
return nil
}
// перенос средст: transferFrom = true - с карты, transferFrom = false - на карту
func (s *Service) Transfer(card *Card, amount int, transferFrom bool) (err error) {
if transferFrom {
if card.Balance >= amount {
card.Balance -= amount
} else {
err = ErrFromCardNotEnoughMoney
}
} else {
card.Balance += amount
}
return err
}
// устанаваливает наименование банка
func SetBankName(card *Card, bankName string) {
card.issuer = bankName
}
// возвращает метку принадлежит карта нашему банку или нет
func isCardInternal(number string) bool {
if strings.HasPrefix(number, "510621") {
return true
}
return true
}
// возвращает метку валидности номера карты поалгоритмы Луна (упрощенному)
func (s *Service) CheckCardNumberByLuna(number string) bool {
number = strings.ReplaceAll(number, " ", "")
numberInString := strings.Split(number, "")
numberInNumders := make([]int, 0)
for s := range numberInString {
if n, err := strconv.Atoi(numberInString[s]); err == nil {
numberInNumders = append(numberInNumders, n)
} else {
return false
}
}
sum := 0
for n := range numberInNumders {
if (n+1)%2 > 0 {
numberInNumders[n] = numberInNumders[n] * 2
if numberInNumders[n] > 9 {
numberInNumders[n] = numberInNumders[n] - 9
}
}
sum += numberInNumders[n]
}
if (((sum % 10) - 10) * -1) == 10 {
return true
}
return false
}
|
[
0
] |
package sled_test
import (
"testing"
"github.com/cheekybits/is"
"github.com/Avalanche-io/sled"
)
func TestNewGetSet(t *testing.T) {
is := is.New(t)
sl := sled.New()
type TestStruct struct {
Foo string
}
tests := []struct {
Key string
Value interface{}
}{
{
Key: "foo",
Value: "bar",
},
{
Key: "bar",
Value: 12,
},
{
Key: "baz",
Value: TestStruct{"bat"},
},
{
Key: "foo2",
Value: strptr("bar"),
},
{
Key: "bar2",
Value: intptr(12),
},
{
Key: "baz2",
Value: &TestStruct{"bat"},
},
}
for _, tt := range tests {
switch v := tt.Value.(type) {
// case string:
// sl.Set(tt.Key, v)
// case int:
// sl.Set(tt.Key, v)
default:
sl.Set(tt.Key, v)
}
switch tt.Value.(type) {
case string:
var output string
err := sl.Get(tt.Key, &output)
is.NoErr(err)
is.Equal(output, tt.Value)
case int:
var output int
err := sl.Get(tt.Key, &output)
is.NoErr(err)
is.Equal(output, tt.Value)
case *string:
var output *string
err := sl.Get(tt.Key, &output)
is.NoErr(err)
is.Equal(output, tt.Value)
case *int:
var output *int
err := sl.Get(tt.Key, &output)
is.NoErr(err)
is.Equal(output, tt.Value)
case TestStruct:
var output TestStruct
err := sl.Get(tt.Key, &output)
is.NoErr(err)
is.Equal(output, tt.Value)
case *TestStruct:
var output *TestStruct
err := sl.Get(tt.Key, &output)
is.NoErr(err)
is.Equal(output, tt.Value)
}
}
err := sl.Close()
is.NoErr(err)
}
func TestInvalidTypeError(t *testing.T) {
is := is.New(t)
sl := sled.New()
err := sl.Set("foo", "bar")
is.NoErr(err)
var not_ptr string
err = sl.Get("foo", not_ptr)
is.Err(err)
is.Equal(err.Error(), "argument must be a pointer")
var Nil *string
err = sl.Get("foo", Nil)
is.Err(err)
is.Equal(err.Error(), "argument is nil")
err = sl.Close()
is.NoErr(err)
}
func strptr(value string) *string {
return &value
}
func intptr(value int) *int {
return &value
}
func indexOf(list []string, key string) int {
for i, k := range list {
if k == key {
return i
}
}
return -1
}
func TestIterate(t *testing.T) {
is := is.New(t)
sl := sled.New()
keys := []string{"foo", "bar", "baz"}
values := []string{"value 1", "value 2", "value 3"}
for i := 0; i < len(keys); i++ {
sl.Set(keys[i], values[i])
}
cnt := 0
for elem := range sl.Iterate(nil) {
i := indexOf(keys, elem.Key())
is.NotEqual(i, -1)
is.Equal(elem.Value(), values[i])
elem.Close()
cnt++
}
is.Equal(cnt, 3)
// cancel
cancel := make(chan struct{})
cnt = 0
for elem := range sl.Iterate(cancel) {
if cnt == 1 {
close(cancel)
}
is.True(cnt < 2)
i := indexOf(keys, elem.Key())
is.NotEqual(i, -1)
is.Equal(elem.Value(), values[i])
elem.Close()
cnt++
}
is.Equal(cnt, 2)
err := sl.Close()
is.NoErr(err)
}
func TestSetIfNil(t *testing.T) {
is := is.New(t)
sl := sled.New()
sl.Set("foo", "bar")
is.False(sl.SetIfNil("foo", "bar"))
is.True(sl.SetIfNil("baz", "bat"))
err := sl.Close()
is.NoErr(err)
}
func TestDelete(t *testing.T) {
is := is.New(t)
sl := sled.New()
sl.Set("foo", "bar")
baz, baz_not_ok := sl.Delete("baz")
foo, foo_ok := sl.Delete("foo")
is.OK(!baz_not_ok)
is.Nil(baz)
is.OK(foo_ok)
is.NotNil(foo)
is.Equal(foo.(string), "bar")
var nil_value interface{}
err := sl.Get("foo", nil_value)
is.Nil(nil_value)
is.Equal(err.Error(), "key does not exist")
err = sl.Close()
is.NoErr(err)
}
func TestSnapshot(t *testing.T) {
is := is.New(t)
sl := sled.New()
sl.Set("foo", "bar")
snap := sl.Snapshot(sled.ReadWrite)
sl.Set("bat", "baz")
// snap should have "foo"
var bar_value string
err := snap.Get("foo", &bar_value)
is.NoErr(err)
is.Equal(bar_value, "bar")
// but not have "bat"
var nil_value interface{}
err = snap.Get("bat", nil_value)
is.Err(err)
is.Nil(nil_value)
err = sl.Close()
is.NoErr(err)
}
|
[
7
] |
// Copyright (c) The Thanos Authors.
// Licensed under the Apache License 2.0.
package bos
import (
"context"
"fmt"
"io"
"math"
"math/rand"
"net/http"
"os"
"strings"
"testing"
"time"
"github.com/baidubce/bce-sdk-go/bce"
"github.com/baidubce/bce-sdk-go/services/bos"
"github.com/baidubce/bce-sdk-go/services/bos/api"
"github.com/go-kit/log"
"github.com/pkg/errors"
"gopkg.in/yaml.v2"
"github.com/thanos-io/objstore"
)
// partSize 128MB.
const partSize = 1024 * 1024 * 128
// Bucket implements the store.Bucket interface against bos-compatible(Baidu Object Storage) APIs.
type Bucket struct {
logger log.Logger
client *bos.Client
name string
}
// Config encapsulates the necessary config values to instantiate an bos client.
type Config struct {
Bucket string `yaml:"bucket"`
Endpoint string `yaml:"endpoint"`
AccessKey string `yaml:"access_key"`
SecretKey string `yaml:"secret_key"`
}
func (conf *Config) validate() error {
if conf.Bucket == "" ||
conf.Endpoint == "" ||
conf.AccessKey == "" ||
conf.SecretKey == "" {
return errors.New("insufficient BOS configuration information")
}
return nil
}
// parseConfig unmarshal a buffer into a Config with default HTTPConfig values.
func parseConfig(conf []byte) (Config, error) {
config := Config{}
if err := yaml.Unmarshal(conf, &config); err != nil {
return Config{}, err
}
return config, nil
}
// NewBucket new bos bucket.
func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) {
if logger == nil {
logger = log.NewNopLogger()
}
config, err := parseConfig(conf)
if err != nil {
return nil, errors.Wrap(err, "parsing BOS configuration")
}
return NewBucketWithConfig(logger, config, component)
}
// NewBucketWithConfig returns a new Bucket using the provided bos config struct.
func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) {
if err := config.validate(); err != nil {
return nil, errors.Wrap(err, "validating BOS configuration")
}
client, err := bos.NewClient(config.AccessKey, config.SecretKey, config.Endpoint)
if err != nil {
return nil, errors.Wrap(err, "creating BOS client")
}
client.Config.UserAgent = fmt.Sprintf("thanos-%s", component)
bkt := &Bucket{
logger: logger,
client: client,
name: config.Bucket,
}
return bkt, nil
}
// Name returns the bucket name for the provider.
func (b *Bucket) Name() string {
return b.name
}
// Delete removes the object with the given name.
func (b *Bucket) Delete(_ context.Context, name string) error {
return b.client.DeleteObject(b.name, name)
}
// Upload the contents of the reader as an object into the bucket.
func (b *Bucket) Upload(_ context.Context, name string, r io.Reader) error {
size, err := objstore.TryToGetSize(r)
if err != nil {
return errors.Wrapf(err, "getting size of %s", name)
}
partNums, lastSlice := int(math.Floor(float64(size)/partSize)), size%partSize
if partNums == 0 {
body, err := bce.NewBodyFromSizedReader(r, lastSlice)
if err != nil {
return errors.Wrapf(err, "failed to create SizedReader for %s", name)
}
if _, err := b.client.PutObject(b.name, name, body, nil); err != nil {
return errors.Wrapf(err, "failed to upload %s", name)
}
return nil
}
result, err := b.client.BasicInitiateMultipartUpload(b.name, name)
if err != nil {
return errors.Wrapf(err, "failed to initiate MultipartUpload for %s", name)
}
uploadEveryPart := func(partSize int64, part int, uploadId string) (string, error) {
body, err := bce.NewBodyFromSizedReader(r, partSize)
if err != nil {
return "", err
}
etag, err := b.client.UploadPart(b.name, name, uploadId, part, body, nil)
if err != nil {
if err := b.client.AbortMultipartUpload(b.name, name, uploadId); err != nil {
return etag, err
}
return etag, err
}
return etag, nil
}
var parts []api.UploadInfoType
for part := 1; part <= partNums; part++ {
etag, err := uploadEveryPart(partSize, part, result.UploadId)
if err != nil {
return errors.Wrapf(err, "failed to upload part %d for %s", part, name)
}
parts = append(parts, api.UploadInfoType{PartNumber: part, ETag: etag})
}
if lastSlice != 0 {
etag, err := uploadEveryPart(lastSlice, partNums+1, result.UploadId)
if err != nil {
return errors.Wrapf(err, "failed to upload the last part for %s", name)
}
parts = append(parts, api.UploadInfoType{PartNumber: partNums + 1, ETag: etag})
}
if _, err := b.client.CompleteMultipartUploadFromStruct(b.name, name, result.UploadId, &api.CompleteMultipartUploadArgs{Parts: parts}); err != nil {
return errors.Wrapf(err, "failed to set %s upload completed", name)
}
return nil
}
// Iter calls f for each entry in the given directory (not recursive). The argument to f is the full
// object name including the prefix of the inspected directory.
func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, opt ...objstore.IterOption) error {
if dir != "" {
dir = strings.TrimSuffix(dir, objstore.DirDelim) + objstore.DirDelim
}
delimiter := objstore.DirDelim
if objstore.ApplyIterOptions(opt...).Recursive {
delimiter = ""
}
var marker string
for {
if err := ctx.Err(); err != nil {
return err
}
objects, err := b.client.ListObjects(b.name, &api.ListObjectsArgs{
Delimiter: delimiter,
Marker: marker,
MaxKeys: 1000,
Prefix: dir,
})
if err != nil {
return err
}
marker = objects.NextMarker
for _, object := range objects.Contents {
if err := f(object.Key); err != nil {
return err
}
}
for _, object := range objects.CommonPrefixes {
if err := f(object.Prefix); err != nil {
return err
}
}
if !objects.IsTruncated {
break
}
}
return nil
}
// Get returns a reader for the given object name.
func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) {
return b.getRange(ctx, b.name, name, 0, -1)
}
// GetRange returns a new range reader for the given object name and range.
func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
return b.getRange(ctx, b.name, name, off, length)
}
// Exists checks if the given object exists in the bucket.
func (b *Bucket) Exists(_ context.Context, name string) (bool, error) {
_, err := b.client.GetObjectMeta(b.name, name)
if err != nil {
if b.IsObjNotFoundErr(err) {
return false, nil
}
return false, errors.Wrapf(err, "getting object metadata of %s", name)
}
return true, nil
}
func (b *Bucket) Close() error {
return nil
}
// ObjectSize returns the size of the specified object.
func (b *Bucket) ObjectSize(_ context.Context, name string) (uint64, error) {
objMeta, err := b.client.GetObjectMeta(b.name, name)
if err != nil {
return 0, err
}
return uint64(objMeta.ContentLength), nil
}
// Attributes returns information about the specified object.
func (b *Bucket) Attributes(_ context.Context, name string) (objstore.ObjectAttributes, error) {
objMeta, err := b.client.GetObjectMeta(b.name, name)
if err != nil {
return objstore.ObjectAttributes{}, errors.Wrapf(err, "gettting objectmeta of %s", name)
}
lastModified, err := time.Parse(time.RFC1123, objMeta.LastModified)
if err != nil {
return objstore.ObjectAttributes{}, err
}
return objstore.ObjectAttributes{
Size: objMeta.ContentLength,
LastModified: lastModified,
}, nil
}
// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations.
func (b *Bucket) IsObjNotFoundErr(err error) bool {
switch bosErr := errors.Cause(err).(type) {
case *bce.BceServiceError:
if bosErr.StatusCode == http.StatusNotFound || bosErr.Code == "NoSuchKey" {
return true
}
}
return false
}
func (b *Bucket) getRange(_ context.Context, bucketName, objectKey string, off, length int64) (io.ReadCloser, error) {
if len(objectKey) == 0 {
return nil, errors.Errorf("given object name should not empty")
}
ranges := []int64{off}
if length != -1 {
ranges = append(ranges, off+length-1)
}
obj, err := b.client.GetObject(bucketName, objectKey, map[string]string{}, ranges...)
if err != nil {
return nil, err
}
return obj.Body, nil
}
func configFromEnv() Config {
c := Config{
Bucket: os.Getenv("BOS_BUCKET"),
Endpoint: os.Getenv("BOS_ENDPOINT"),
AccessKey: os.Getenv("BOS_ACCESS_KEY"),
SecretKey: os.Getenv("BOS_SECRET_KEY"),
}
return c
}
// NewTestBucket creates test bkt client that before returning creates temporary bucket.
// In a close function it empties and deletes the bucket.
func NewTestBucket(t testing.TB) (objstore.Bucket, func(), error) {
c := configFromEnv()
if err := validateForTest(c); err != nil {
return nil, nil, err
}
if c.Bucket != "" {
if os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "" {
return nil, nil, errors.New("BOS_BUCKET is defined. Normally this tests will create temporary bucket " +
"and delete it after test. Unset BOS_BUCKET env variable to use default logic. If you really want to run " +
"tests against provided (NOT USED!) bucket, set THANOS_ALLOW_EXISTING_BUCKET_USE=true. WARNING: That bucket " +
"needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " +
"to safety (accidentally pointing prod bucket for test) as well as BOS not being fully strong consistent.")
}
bc, err := yaml.Marshal(c)
if err != nil {
return nil, nil, err
}
b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test")
if err != nil {
return nil, nil, err
}
if err := b.Iter(context.Background(), "", func(f string) error {
return errors.Errorf("bucket %s is not empty", c.Bucket)
}); err != nil {
return nil, nil, errors.Wrapf(err, "checking bucket %s", c.Bucket)
}
t.Log("WARNING. Reusing", c.Bucket, "BOS bucket for BOS tests. Manual cleanup afterwards is required")
return b, func() {}, nil
}
src := rand.NewSource(time.Now().UnixNano())
tmpBucketName := strings.Replace(fmt.Sprintf("test_%x", src.Int63()), "_", "-", -1)
if len(tmpBucketName) >= 31 {
tmpBucketName = tmpBucketName[:31]
}
c.Bucket = tmpBucketName
bc, err := yaml.Marshal(c)
if err != nil {
return nil, nil, err
}
b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test")
if err != nil {
return nil, nil, err
}
if _, err := b.client.PutBucket(b.name); err != nil {
return nil, nil, err
}
t.Log("created temporary BOS bucket for BOS tests with name", tmpBucketName)
return b, func() {
objstore.EmptyBucket(t, context.Background(), b)
if err := b.client.DeleteBucket(b.name); err != nil {
t.Logf("deleting bucket %s failed: %s", tmpBucketName, err)
}
}, nil
}
func validateForTest(conf Config) error {
if conf.Endpoint == "" ||
conf.AccessKey == "" ||
conf.SecretKey == "" {
return errors.New("insufficient BOS configuration information")
}
return nil
}
|
[
7
] |
/*
We have a collection of stones, each stone has a positive integer weight.
Each turn, we choose the two heaviest stones and smash them together. Suppose the stones have weights x and y with x <= y. The result of this smash is:
If x == y, both stones are totally destroyed;
If x != y, the stone of weight x is totally destroyed, and the stone of weight y has new weight y-x.
At the end, there is at most 1 stone left. Return the weight of this stone (or 0 if there are no stones left.)
Example 1:
Input: [2,7,4,1,8,1]
Output: 1
Explanation:
We combine 7 and 8 to get 1 so the array converts to [2,4,1,1,1] then,
we combine 2 and 4 to get 2 so the array converts to [2,1,1,1] then,
we combine 2 and 1 to get 1 so the array converts to [1,1,1] then,
we combine 1 and 1 to get 0 so the array converts to [1] then that's the value of last stone.
Note:
1 <= stones.length <= 30
1 <= stones[i] <= 1000
*/
package main
import (
"errors"
"log"
"sort"
)
func main() {
tests := [][]int{{2, 7, 4, 1, 8, 1}, {1,3}, {9,3,2,10}}
// tests := [][]int{{9,3,2,10}}
for _, test := range tests {
log.Printf("lastStoneWeight2(%v) == %d\n", test, lastStoneWeight2(test))
}
}
func lastStoneWeight(stones []int) int {
// sort stones by weight
sort.Ints(stones)
for len(stones) > 1 {
// smash largest
smash := stones[len(stones)-1] - stones[len(stones)-2]
// copy over, replacing two largest with smash result
newStones := []int{}
for i := 0; i < (len(stones)-2); i++ {
newStones = append(newStones, stones[i])
}
newStones = append(newStones, smash)
stones = newStones
sort.Ints(stones)
}
if len(stones) <= 0 {
log.Fatal("len(stones) <= 0")
}
return stones[0]
}
func lastStoneWeight2(stones []int) int {
for len(stones) > 1 {
log.Printf("==============================")
log.Printf("Stones: %v\n", stones)
// find largest value
largestIndex, largestVal, err := largest(stones)
if err != nil {
log.Fatal(err)
}
log.Printf("Largest/index: %d/%d\n", largestVal, largestIndex)
// find second largest value
secLargestIndex, secLargestVal, err := secLargest(stones, largestIndex)
if err != nil {
log.Fatal(err)
}
log.Printf("Sec. largest/index: %d/%d\n", secLargestVal, secLargestIndex)
// smash result
smashRes := largestVal - secLargestVal
log.Printf("SmashRes: %d\n", smashRes)
// create new stones array and copy all elements in except largest and second largest: copy smash result instead
newStones := []int{}
for k, v := range stones {
if k != largestIndex && k != secLargestIndex {
newStones = append(newStones, v)
}
}
newStones = append(newStones, smashRes)
log.Printf("New Stones: %v\n", newStones)
stones = newStones
log.Printf("STONES: %v\n\n", stones)
}
if len(stones) <= 0 {
log.Fatalf("len(stones) <= 0")
}
return stones[0]
}
// given an array of ints, and it's largest value's index, return the index and value of second largest element
func secLargest(x []int, largestValIndex int) (int, int, error) {
var secLargestVal int
var secLargestIndex int
log.Printf("\tsecLargest(): largestValIndex: %d\n", largestValIndex)
if len(x) <= 0 {
return secLargestIndex, secLargestVal, errors.New("secLargest(): empty input array.")
}
first := true
for k, v := range x {
// if v <= largestVal {
if k != largestValIndex {
// second largest value candidate
log.Printf("\tsecLargest(): considering x[%d] == %d as second largest candidate..\n", k, v)
if first {
log.Printf("\tsecLargest(): initializing x[%d] == %d to sec largest val\n", k, v)
secLargestVal = v
secLargestIndex = k
first = false
} else {
if v > secLargestVal {
log.Printf("\tsecLargest(): competing second largest value found at x[%d] == %d, resetting prev val..\n")
secLargestVal = v
secLargestIndex = k
}
}
}
}
return secLargestIndex, secLargestVal, nil
}
// given an array of ints, return the index and value of largest element
func largest(x []int) (int, int, error) {
var largestVal int
var largestIndex int
if len(x) <= 0 {
return largestIndex, largestVal, errors.New("largest(): empty input array.")
}
for k, v := range x {
if k == 0 {
// initialize
largestVal = v
largestIndex = k
} else {
if v > largestVal {
// update
largestVal = v
largestIndex = k
}
}
}
return largestIndex, largestVal, nil
}
|
[
4
] |
// 149. Max Points on a Line
// https://leetcode.com/problems/max-points-on-a-line/description/
package main
import (
"fmt"
)
/*
Input: [[1,1],[2,2],[3,3]]
Output: 3
^
|
| o
| o
| o
+------------->
0 1 2 3 4
Input: [[-1,0],[3,2],[5,3],[4,1],[2,3],[7,4]]
Output: 4
^
|
| o
| o o
| o
| o
---o--+----------------------->
-1 0 1 2 3 4 5 6 7
*/
type Point struct {
X int
Y int
}
func getSlope(p, p2 Point) Point {
x, y := p2.X-p.X, p2.Y-p.Y
switch {
case y == 0:
return Point{1, 0}
case x == 0:
return Point{0, 1}
case y < 0:
x, y = -x, -y
}
a, b := x, y
if a < 0 {
a = -x
}
for b > 0 {
a, b = b, a%b
}
return Point{x / a, y / a}
}
func maxPoints(points []Point) (max int) {
counts := make(map[Point]int)
uniq := make([]Point, 0, len(points))
for _, p := range points {
counts[p] += 1
if counts[p] == 1 {
uniq = append(uniq, p)
}
if counts[p] > max {
max = counts[p]
}
}
slopes := make(map[[2]Point]int, len(points)*len(points)/2)
for i, p := range uniq {
for _, p2 := range uniq[i+1:] {
slopes[[2]Point{p, getSlope(p, p2)}] += counts[p2]
}
}
for pointSlope, count := range slopes {
c := count + counts[pointSlope[0]]
if c > max {
max = c
}
}
return max
}
func main() {
fmt.Println("149. Max Points on a Line")
fmt.Println(maxPoints([]Point{{-1, 0}, {3, 2}, {5, 3}, {4, 1}, {2, 3}, {7, 4}}))
fmt.Println(maxPoints([]Point{{3, 2}, {5, 3}, {4, 1}, {2, 3}, {7, 4}}))
fmt.Println(maxPoints([]Point{{1, 2}, {4, 2}}))
fmt.Println(maxPoints([]Point{{1, 2}, {1, 2}}))
fmt.Println(maxPoints([]Point{{1, 2}, {0, 0}, {1, 2}}))
fmt.Println(maxPoints([]Point{{1, 2}}))
fmt.Println(maxPoints([]Point{}))
fmt.Println(maxPoints([]Point{{0, 0}, {94911151, 94911150}, {94911152, 94911151}}))
fmt.Println(maxPoints([]Point{{0, 9}, {138, 429}, {115, 359}, {115, 359}, {-30, -102}, {230, 709}, {-150, -686}, {-135, -613}, {-60, -248}, {-161, -481}, {207, 639}, {23, 79}, {-230, -691}, {-115, -341}, {92, 289}, {60, 336}, {-105, -467}, {135, 701}, {-90, -394}, {-184, -551}, {150, 774}}))
fmt.Println(maxPoints([]Point{{-4, 1}, {-7, 7}, {-1, 5}, {9, -25}}))
fmt.Println(maxPoints([]Point{{2, 3}, {3, 3}, {-5, 3}}))
}
|
[
1
] |
package main
/*
#include <pwd.h>
#include <sys/types.h>
*/
import "C"
// http://www.gnu.org/software/libc/manual/html_node/NSS-Modules-Interface.html
const (
NssStatusSuccess = 1
NssStatusNotFound = 0
NssStatusUnavailable = -1
NssStatusTryAgain = -2
)
func main() {}
// initialize returns false if startup failed.
func initialize() bool {
if initSuccessful {
return true
}
if initError != nil {
return false
}
initError = LoadConfig(configPath)
if initError == nil {
initSuccessful = true
return true
}
fatal("init", initError)
return false
}
func set(pwd *C.struct_passwd, User user) {
pwd.pw_uid = C.__uid_t(User.UID)
pwd.pw_name = C.CString(User.Username)
if User.Directory == "" {
pwd.pw_dir = C.CString("/home/" + User.Username)
} else {
pwd.pw_dir = C.CString(User.Directory)
}
if User.Shell == "" {
pwd.pw_shell = C.CString("/bin/bash")
} else {
pwd.pw_shell = C.CString(User.Shell)
}
pwd.pw_gid = C.__gid_t(User.GID)
pwd.pw_passwd = C.CString("x")
pwd.pw_gecos = C.CString(User.Gecos)
}
//export _nss_tls_getpwnam_r
func _nss_tls_getpwnam_r(name *C.char, pwd *C.struct_passwd, buffer *C.char, bufsize C.size_t, result **C.struct_passwd) C.int {
if !initialize() {
return NssStatusUnavailable
}
resp, err := getUserByName(C.GoString(name), configuration.Token)
if err != nil {
fatal("GETPWNAM_R", err)
return NssStatusTryAgain
}
set(pwd, resp.User)
result = &pwd // nolint: ineffassign
return NssStatusSuccess
}
//export _nss_tls_getpwuid_r
func _nss_tls_getpwuid_r(uid C.__uid_t, pwd *C.struct_passwd, buffer *C.char, bufsize C.size_t, result **C.struct_passwd) C.int {
if !initialize() {
return NssStatusUnavailable
}
resp, err := getUserByUID(int(uid), configuration.Token)
if err != nil {
fatal("GETPWNAM_R", err)
return NssStatusTryAgain
}
set(pwd, resp.User)
result = &pwd // nolint: ineffassign
return NssStatusSuccess
}
//export _nss_tls_setpwent
func _nss_tls_setpwent() C.int {
return NssStatusNotFound
}
//export _nss_tls_endpwent
func _nss_tls_endpwent() {
}
//export _nss_tls_getpwent_r
func _nss_tls_getpwent_r(pwd *C.struct_passwd, buffer *C.char, bufsize C.size_t, result **C.struct_passwd) C.int {
return NssStatusNotFound
}
|
[
2
] |
package tokens
import (
"testing"
"time"
"github.com/rancher/rancher/pkg/auth/tokens/hashers"
"github.com/rancher/rancher/pkg/features"
v3 "github.com/rancher/rancher/pkg/generated/norman/management.cattle.io/v3"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestVerifyToken(t *testing.T) {
tokenName := "test-token"
hashedTokenName := "hashed-test-token"
tokenKey := "dddddddddddddddddddddddddddddddddddddddddddddddddddddd"
badTokenKey := "cccccccccccccccccccccccccccccccccccccccccccccccccccccc"
// SHA3 hash of tokenKey
hashedTokenKey := "$3:1:uFrxm43ggfw:zsN1zEFC7SvABTdR58o7yjIqfrI4cQ/HSYz3jBwwVnx5X+/ph4etGDIU9dvIYuy1IvnYUVe6a/Ar95xE+gfjhA"
invalidHashToken := "$-1:111:111"
unhashedToken := v3.Token{
ObjectMeta: metav1.ObjectMeta{
Name: tokenName,
},
Token: tokenKey,
TTLMillis: 0,
}
hashedToken := v3.Token{
ObjectMeta: metav1.ObjectMeta{
Name: hashedTokenName,
Annotations: map[string]string{
TokenHashed: "true",
},
},
Token: hashedTokenKey,
TTLMillis: 0,
}
invalidHashedToken := *hashedToken.DeepCopy()
invalidHashedToken.Token = invalidHashToken
tests := []struct {
name string
token *v3.Token
tokenName string
tokenKey string
wantResponseCode int
wantErr bool
}{
{
name: "valid non-hashed token",
token: &unhashedToken,
tokenName: tokenName,
tokenKey: tokenKey,
wantResponseCode: 200,
},
{
name: "valid hashed token",
token: &hashedToken,
tokenName: hashedTokenName,
tokenKey: tokenKey,
wantResponseCode: 200,
},
{
name: "valid hashed token, incorrect key",
token: &hashedToken,
tokenName: hashedTokenName,
tokenKey: badTokenKey,
wantResponseCode: 422,
wantErr: true,
},
{
name: "wrong token",
token: &unhashedToken,
tokenName: hashedTokenName,
tokenKey: tokenKey,
wantResponseCode: 422,
wantErr: true,
},
{
name: "incorrect token key",
token: &unhashedToken,
tokenName: tokenName,
tokenKey: badTokenKey,
wantResponseCode: 422,
wantErr: true,
},
{
name: "expired token",
token: expireToken(&unhashedToken),
tokenName: tokenName,
tokenKey: tokenKey,
wantResponseCode: 410,
wantErr: true,
},
{
name: "expired hashed token",
token: expireToken(&hashedToken),
tokenName: hashedTokenName,
tokenKey: tokenKey,
wantResponseCode: 410,
wantErr: true,
},
{
name: "nil token",
token: nil,
tokenName: tokenName,
tokenKey: tokenKey,
wantResponseCode: 422,
wantErr: true,
},
{
name: "unable to retrieve hasher",
token: &invalidHashedToken,
tokenName: hashedTokenName,
tokenKey: tokenKey,
wantResponseCode: 500,
wantErr: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
responseCode, err := VerifyToken(test.token, test.tokenName, test.tokenKey)
if test.wantErr {
require.Error(t, err)
}
require.Equal(t, test.wantResponseCode, responseCode)
})
}
}
func TestConvertTokenKeyToHash(t *testing.T) {
plaintextToken := "cccccccccccccccccccccccccccccccccccccccccccccccccccccc"
token := v3.Token{
ObjectMeta: metav1.ObjectMeta{
Name: "test-token",
},
Token: plaintextToken,
TTLMillis: 0,
}
tests := []struct {
name string
tokenHashingEnabled bool
token *v3.Token
wantError bool
wantHashedAnnotation bool
wantHashedVal bool
}{
{
name: "token hashing enabled",
tokenHashingEnabled: true,
token: &token,
wantHashedAnnotation: true,
wantHashedVal: true,
},
{
name: "token hashing disabled",
tokenHashingEnabled: false,
token: &token,
wantHashedAnnotation: false,
wantHashedVal: false,
},
{
name: "nil token",
tokenHashingEnabled: false,
token: nil,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
// token will be modified by the consuming function, deep copy to avoid changing the original token
features.TokenHashing.Set(test.tokenHashingEnabled)
var testToken *v3.Token
if test.token != nil {
testToken = test.token.DeepCopy()
}
err := ConvertTokenKeyToHash(testToken)
if test.wantError {
require.Error(t, err)
}
if test.wantHashedAnnotation {
require.Contains(t, testToken.Annotations, TokenHashed)
require.Equal(t, "true", testToken.Annotations[TokenHashed])
} else {
if test.token != nil {
require.NotContains(t, testToken.Annotations, TokenHashed)
}
}
if test.wantHashedVal {
hasher, err := hashers.GetHasherForHash(testToken.Token)
require.NoError(t, err)
err = hasher.VerifyHash(testToken.Token, plaintextToken)
require.NoError(t, err)
} else {
if test.token != nil {
require.Equal(t, plaintextToken, testToken.Token)
}
}
})
}
}
func expireToken(token *v3.Token) *v3.Token {
newToken := token.DeepCopy()
newToken.CreationTimestamp = metav1.NewTime(time.Now().Add(-time.Second * 10))
newToken.TTLMillis = 1
return newToken
}
|
[
4
] |
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package redis
import (
"time"
"github.com/garyburd/redigo/redis"
b "github.com/elastic/beats/libbeat/common/backoff"
"github.com/elastic/beats/libbeat/publisher"
)
type backoffClient struct {
client *client
reason failReason
done chan struct{}
backoff b.Backoff
}
// failReason is used to track the cause of an error.
// The redis client forces us to reconnect on any error (even for redis
// internal errors). The backoff timer must not be reset on a successful
// reconnect after publishing failed with a redis internal
// error (e.g. OutOfMemory), so we can still guarantee the backoff duration
// increases exponentially.
type failReason uint8
const (
failNone failReason = iota
failRedis
failOther
)
func newBackoffClient(client *client, init, max time.Duration) *backoffClient {
done := make(chan struct{})
backoff := b.NewEqualJitterBackoff(done, init, max)
return &backoffClient{
client: client,
done: done,
backoff: backoff,
}
}
func (b *backoffClient) Connect() error {
err := b.client.Connect()
if err != nil {
// give the client a chance to promote an internal error to a network error.
b.updateFailReason(err)
b.backoff.Wait()
} else if b.reason != failRedis { // Only reset backoff duration if failure was due to IO errors.
b.resetFail()
}
return err
}
func (b *backoffClient) Close() error {
err := b.client.Close()
close(b.done)
return err
}
func (b *backoffClient) Publish(batch publisher.Batch) error {
err := b.client.Publish(batch)
if err != nil {
b.client.Close()
b.updateFailReason(err)
b.backoff.Wait()
} else {
b.resetFail()
}
return err
}
func (b *backoffClient) updateFailReason(err error) {
if b.reason == failRedis {
// we only allow 'Publish' to recover from an redis internal error
return
}
if err == nil {
b.reason = failNone
return
}
if _, ok := err.(redis.Error); ok {
b.reason = failRedis
} else {
b.reason = failOther
}
}
func (b *backoffClient) resetFail() {
b.reason = failNone
b.backoff.Reset()
}
func (b *backoffClient) String() string {
return b.client.String()
}
|
[
1
] |
package leetcode
func reverseBits(num uint32) uint32 {
output := uint32(0)
for i := 0; i < 32; i++ {
bit := (num & 1)
num = num >> 1
output = (output << 1) + bit
}
return output
}
// beat 100% execute time
func reverseBits2(num uint32) uint32 {
output := uint32(0)
for i := 0; i < 32; i++ {
num, output = num>>1, (output<<1)|(num&1)
}
return output
}
|
[
0
] |
package middleware
import (
"strconv"
)
/*
type W interface {
Balance() int
}
type realWallet_Id struct{
}
type fakeWallet_Id struct{
balance int
}
func (r realWallet_Id) Balance(i int) int {
return getDbBal()
}
func (f fakeWallet_Id) Balance(i int) int {
return f.balance
}
type WalletAccount struct {
W
}
*/
/*
func DebitAccount (currentBalance int,TrnAmt int) string{
var endBal int
var txnResult string
var statusCode int
var errMsg string
errMsg=""
statusCode=0
endBal=currentBalance-TrnAmt
txnResult=strconv.Itoa(statusCode)+"|"+strconv.Itoa(endBal)+"|"+errMsg
// txn_status 0 success 1 fail|endBal|error desc (if error)
return txnResult
}
*/
type WalletAccount struct {
walletId string
balance int
}
func (w *WalletAccount)DebitAccount (TrnAmt int) string{
var txnResult string
var statusCode int
var errMsg string
errMsg=""
if w.balance>=TrnAmt {
w.balance=w.balance-TrnAmt
} else {
errMsg="Not enough money"
statusCode=1
}
txnResult=strconv.Itoa(statusCode)+"|"+errMsg
// txn_status 0 success 1 fail|endBal|error desc (if error)
return txnResult
}
|
[
0,
2
] |
package solution
import "math"
func reverse(x int) int {
ret := 0
for x != 0 {
ret = ret*10 + x%10
x = x / 10
}
if ret < math.MinInt32 || ret > math.MaxInt32 {
return 0
}
return ret
}
|
[
0
] |
package lsh;
import (
"math/rand"
"github.com/wenkesj/rphash/types"
);
type LSH struct {
hash types.Hash;
decoder types.Decoder;
projector types.Projector;
distance float64;
};
func NewLSH(hash types.Hash,
decoder types.Decoder,
projector types.Projector) *LSH {
return &LSH{
hash: hash,
decoder: decoder,
projector: projector,
distance: 0.0,
};
};
func (this *LSH) LSHHashStream(r []float64, radius float64, randomseed int64, n int) ([]int32, int) {
var noise [][]float64;
/* Generate a new source of random numbers */
random := rand.New(rand.NewSource(randomseed));
/* Project a vector into a smaller dimension
* Decode the vector to determine its counterpart
* Calculate lengths */
projectedVector := this.projector.Project(r);
noNoise := this.decoder.Decode(projectedVector);
nLength, rLength, pLength := len(noNoise), len(r), len(projectedVector);
/* Create a matrix of random vectors which will
* symbolize a noise matrix. */
for h := 1; h < n; h++ {
tempVector := make([]float64, rLength);
for i := 0; i < rLength; i++ {
tempVector[i] = random.NormFloat64() * radius;
}
noise = append(noise, tempVector);
}
/* Formulate a result. */
result := make([]int32, n * nLength);
count := copy(result, noNoise);
rTempVector := make([]float64, pLength);
for j := 1; j < n; j++ {
count = copy(rTempVector, projectedVector);
tempVector := noise[j - 1];
for k := 0; k < pLength; k++ {
rTempVector[k] = rTempVector[k] + tempVector[k];
}
noNoise = this.decoder.Decode(rTempVector);
nLength = len(noNoise);
count = copy(result[j * nLength : j * nLength + nLength], noNoise);
}
return result, count;
};
func (this *LSH) LSHHashSimple(r []float64) int32 {
return this.hash.Hash(this.decoder.Decode(this.projector.Project(r)));
};
func (this *LSH) Distance() float64 {
return this.distance;
};
func (this *LSH) UpdateDecoderVariance(vari float64) {
this.decoder.SetVariance(vari);
};
|
[
0
] |
package main
import (
"github.com/as27/gop5js"
)
var (
vdown = Vector{0, 0.1}
vup = Vector{0, -0.1}
vleft = Vector{-0.1, 0}
vright = Vector{0.1, 0}
)
type Vector struct {
x, y float64
}
func (v *Vector) add(v2 *Vector) {
v.x = v.x + v2.x
v.y = v.y + v2.y
}
type Mover struct {
location *Vector
velocity *Vector
acceleration *Vector
dna []byte
pointer int
size float64
}
func newMover(x, y float64, dna []byte) *Mover {
m := Mover{dna: dna}
m.pointer = 0
m.size = 10
m.location = &Vector{x, y}
m.velocity = &Vector{0, 0}
m.acceleration = &Vector{0, 0}
return &m
}
func (m *Mover) accelerate(v *Vector) {
m.acceleration.add(v)
}
func (m *Mover) update() {
if m.pointer >= len(m.dna) {
m.pointer = 0
}
var v *Vector
switch m.dna[m.pointer] {
case 1:
v = &vup
case 2:
v = &vdown
case 3:
v = &vleft
case 4:
v = &vright
default:
v = &Vector{0, 0}
}
m.accelerate(v)
m.pointer++
m.velocity.add(m.acceleration)
m.location.add(m.velocity)
}
func (m *Mover) draw() {
gop5js.Ellipse(m.location.x, m.location.y, m.size, m.size)
}
|
[
0
] |
package model
import (
"fmt"
"net/url"
"os"
"strconv"
"github.com/tarkov-database/website/model/api"
"github.com/tarkov-database/website/model/item"
"github.com/tarkov-database/website/version"
"github.com/google/logger"
)
var host string
func init() {
if env := os.Getenv("HOST"); len(env) > 0 {
host = env
} else {
logger.Warning("Host is not set!")
}
}
type Page struct {
App *version.Application
API *api.API
Host string
Path string
URI string
}
func CreatePage(u *url.URL) *Page {
return &Page{
App: version.App,
Host: host,
Path: u.Path,
URI: u.RequestURI(),
}
}
func CreatePageWithAPI(u *url.URL) (*Page, error) {
p := CreatePage(u)
var err error
p.API, err = api.GetAPI()
if err != nil {
return p, err
}
return p, nil
}
type IndexPage struct {
*Page
}
func (p *Page) GetIndex() *IndexPage {
return &IndexPage{p}
}
type EntityPage struct {
*Page
}
type ItemPage struct {
*EntityPage
Item item.Entity
}
func (p *Page) Item(e item.Entity) *ItemPage {
return &ItemPage{EntityPage: &EntityPage{p}, Item: e}
}
type EntityList struct {
*Page
IsSearch bool
Keyword string
TotalCount int64
PageCount int64
PageNumber int64
PageNext *Pagination
PagePrev *Pagination
}
type Pagination struct {
Number int64
URL string
}
const itemLimit = 100
func (l *EntityList) GetPagination() {
if l.TotalCount > itemLimit && l.URI != "" {
u, err := url.Parse(l.URI)
if err != nil {
logger.Error(err)
return
}
query := u.Query()
if len(query.Get("p")) == 0 {
query.Set("p", "")
}
page := &query["p"][0]
var p int64 = 1
if len(*page) != 0 {
p, err = strconv.ParseInt(*page, 10, 0)
if err != nil {
logger.Error(err)
return
}
}
if p < 1 {
p = 1
}
total := l.TotalCount / itemLimit
if (l.TotalCount % itemLimit) != 0 {
total = total + 1
}
var next int64
if total > p {
next = p + 1
}
var prev int64
if p > 1 {
prev = p - 1
}
l.PageNumber = p
*page = strconv.FormatInt(next, 10)
l.PageNext = &Pagination{
Number: next,
URL: fmt.Sprintf("%s?%s", u.Path, query.Encode()),
}
*page = strconv.FormatInt(prev, 10)
l.PagePrev = &Pagination{
Number: prev,
URL: fmt.Sprintf("%s?%s", u.Path, query.Encode()),
}
}
}
type ItemList struct {
*EntityList
List []item.Entity
}
func (p *Page) ItemResult(res item.EntityResult, kw string, search bool) *ItemList {
l := &ItemList{
EntityList: &EntityList{
Page: p,
IsSearch: search,
Keyword: kw,
TotalCount: res.GetCount(),
PageCount: int64(len(res.GetEntities())),
},
List: res.GetEntities(),
}
l.GetPagination()
return l
}
|
[
0
] |
package cluster
import (
"context"
"fmt"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
vsv1alpha1 "github.com/ryo-watanabe/k8s-volume-snap/pkg/apis/volumesnapshot/v1alpha1"
"github.com/ryo-watanabe/k8s-volume-snap/pkg/objectstore"
)
// Interface for taking and restoring snapshots of k8s clusters
type Interface interface {
Snapshot(snapshot *vsv1alpha1.VolumeSnapshot,
bucket objectstore.Objectstore,
localKubeClient kubernetes.Interface) error
Restore(restore *vsv1alpha1.VolumeRestore,
snapshot *vsv1alpha1.VolumeSnapshot,
bucket objectstore.Objectstore,
localKubeClient kubernetes.Interface) error
DeleteSnapshot(snapshot *vsv1alpha1.VolumeSnapshot,
bucket objectstore.Objectstore,
localKubeClient kubernetes.Interface) error
}
// Cluster for execute cluster commands
type Cluster struct {
Interface
}
// NewCluster returns new Cmd
func NewCluster() *Cluster {
return &Cluster{}
}
// Setup Kubernetes client for target cluster.
func buildKubeClient(kubeconfig string) (*kubernetes.Clientset, error) {
// Check if Kubeconfig available.
if kubeconfig == "" {
return nil, fmt.Errorf("Cannot create Kubeconfig : Kubeconfig not given")
}
// Setup Rancher Kubeconfig to access customer cluster.
cfg, err := clientcmd.RESTConfigFromKubeConfig([]byte(kubeconfig))
if err != nil {
return nil, fmt.Errorf("Error building kubeconfig: %s", err.Error())
}
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
return nil, fmt.Errorf("Error building kubernetes clientset: %s", err.Error())
}
return kubeClient, err
}
// Get namespace UID
func getNamespaceUID(ctx context.Context, name string, kubeClient kubernetes.Interface) (string, error) {
ns, err := kubeClient.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{})
if err != nil {
return "", fmt.Errorf("Error getting namespace UID : %s", err.Error())
}
return string(ns.ObjectMeta.GetUID()), nil
}
// k8s api errors not to retry
var apiPermErrors = []string{
"Unauthorized",
"the server has asked for the client to provide credentials",
}
func apiPermError(error string) bool {
for _, e := range apiPermErrors {
if strings.Contains(error, e) {
return true
}
}
return false
}
// Object store errors not to retry
var obstPermErrors = []string{
"SignatureDoesNotMatch",
"InvalidAccessKeyId",
"NoSuchBucket",
}
func objectstorePermError(error string) bool {
for _, e := range obstPermErrors {
if strings.Contains(error, e) {
return true
}
}
return false
}
|
[
1
] |
package main
import "fmt"
/*
给定一个字符串,请你找出其中不含有重复字符的 最长子串 的长度。
示例 1:
输入: "abcabcbb"
输出: 3
解释: 因为无重复字符的最长子串是 "abc",所以其长度为 3。
示例 2:
输入: "bbbbb"
输出: 1
解释: 因为无重复字符的最长子串是 "b",所以其长度为 1。
示例 3:
输入: "pwwkew"
输出: 3
解释: 因为无重复字符的最长子串是 "wke",所以其长度为 3。
请注意,你的答案必须是 子串 的长度,"pwke" 是一个子序列,不是子串。
*/
func main() {
str := "abcabcbb"
fmt.Println(lengthOfLongestSubstring(str))
str = "abcdeaaaabcdaa"
fmt.Println(lengthOfLongestSubstring(str))
str = "pwwkew"
fmt.Println(lengthOfLongestSubstring(str))
}
func lengthOfLongestSubstring(s string) int {
var start, max int
for i := 0; i < len(s); i++ {
for j := start; j < i; j++ {
if s[j] == s[i] {
start = j + 1
break
}
}
if i-start+1 > max {
max = i - start + 1
}
}
return max
}
|
[
1
] |
package main
import (
"fmt"
)
func findCircleNum(M [][]int) int {
table := make([]int,len(M))
for i,_ := range table{
table[i]=i
}
for i:=0;i<len(M);i++{
for j:=i;j<len(M);j++{
if M[i][j]==1{
personi,personj,groupi,groupj := i,j,table[i],table[j]
for personi!=groupi{
personi = groupi
groupi = table[personi]
}
for personj!=groupj{
personj = groupj
groupj = table[personj]
}
if groupi == groupj{
continue
}else{
tmpj := j
for tmpj != personj{
tmp := table[tmpj]
table[tmpj] = groupi
tmpj=tmp
}
table[personj] = groupi
}
}
}
}
cnt := 0
for i,v := range table{
if i==v{
cnt++
}
}
return cnt
}
func main() {
fmt.Println("hello world")
}
|
[
2
] |
// Copyright 2013 Sonia Keys
// License: MIT
// Semidiameter: Chapter 55, Semidiameters of the Sun, Moon, and Planets.
package semidiameter
import (
"math"
"github.com/mooncaker816/learnmeeus/v3/base"
"github.com/mooncaker816/learnmeeus/v3/parallax"
"github.com/soniakeys/unit"
)
// Standard semidiameters at unit distance of 1 AU.
var (
Sun = unit.AngleFromSec(959.63)
Mercury = unit.AngleFromSec(3.36)
VenusSurface = unit.AngleFromSec(8.34)
VenusCloud = unit.AngleFromSec(8.41)
Mars = unit.AngleFromSec(4.68)
JupiterEquatorial = unit.AngleFromSec(98.44)
JupiterPolar = unit.AngleFromSec(92.06)
SaturnEquatorial = unit.AngleFromSec(82.73)
SaturnPolar = unit.AngleFromSec(73.82)
Uranus = unit.AngleFromSec(35.02)
Neptune = unit.AngleFromSec(33.50)
Pluto = unit.AngleFromSec(2.07)
Moon = unit.AngleFromSec(358473400 / base.AU)
)
// Semidiameter returns semidiameter at specified distance.
// 天体视半径,s0为天体的半径
//
// Δ must be observer-body distance in AU.
func Semidiameter(s0 unit.Angle, Δ float64) unit.Angle {
return s0.Div(Δ)
}
// SaturnApparentPolar returns apparent polar semidiameter of Saturn
// 土星极视半径
// at specified distance.
//
// Argument Δ must be observer-Saturn distance in AU. Argument B is
// Saturnicentric latitude of the observer as given by function saturnring.UB()
// for example.
func SaturnApparentPolar(Δ float64, B unit.Angle) unit.Angle {
k := (SaturnPolar.Rad() / SaturnEquatorial.Rad())
k = 1 - k*k
cB := B.Cos()
return SaturnEquatorial.Mul(math.Sqrt(1-k*cB*cB) / Δ)
}
// MoonTopocentric returns observed topocentric semidiameter of the Moon.
// 在地面上(站心)看到的月亮视半径
//
// Δ is distance to Moon in AU.
// δ is declination of Moon.
// H is hour angle of Moon.
// ρsφʹ, ρcφʹ are parallax constants as returned by
// globe.Ellipsoid.ParallaxConstants, for example.
func MoonTopocentric(Δ float64, δ unit.Angle, H unit.HourAngle, ρsφʹ, ρcφʹ float64) float64 {
const k = .272481
sπ := parallax.Horizontal(Δ).Sin()
// q computed by (40.6, 40.7) p. 280, ch 40.
sδ, cδ := δ.Sincos()
sH, cH := H.Sincos()
A := cδ * sH
B := cδ*cH - ρcφʹ*sπ
C := sδ - ρsφʹ*sπ
q := math.Sqrt(A*A + B*B + C*C)
return k / q * sπ
}
// MoonTopocentric2 returns observed topocentric semidiameter of the Moon
// 月亮的站心视半径(简单公式)
// by a less rigorous method.
//
// Δ is distance to Moon in AU, h is altitude of the Moon above the observer's
// horizon.
func MoonTopocentric2(Δ float64, h unit.Angle) unit.Angle {
return Moon.Mul((1 + h.Sin()*parallax.Horizontal(Δ).Sin()) / Δ)
}
// AsteroidDiameter returns approximate diameter given absolute magnitude H
// and albedo A.
//
// Result is in km.
func AsteroidDiameter(H, A float64) float64 {
return math.Pow(10, 3.12-.2*H-.5*math.Log10(A))
}
// Asteroid returns semidiameter of an asteroid with a given diameter
// at given distance.
//
// Argument d is diameter in km, Δ is distance in AU.
//
// Result is semidiameter.
func Asteroid(d, Δ float64) unit.Angle {
return unit.AngleFromSec(.0013788).Mul(d / Δ)
}
|
[
2
] |
package main
import (
"fmt"
"math"
"strconv"
"strings"
)
type bounds struct {
min, max uint
names map[string]uint
}
var (
minuteBound = bounds{0, 59, nil}
hourBound = bounds{0, 23, nil}
domBound = bounds{1, 31, nil}
monthBound = bounds{1, 12, map[string]uint{
"jan": 1,
"feb": 2,
"mar": 3,
"apr": 4,
"may": 5,
"jun": 6,
"jul": 7,
"aug": 8,
"sep": 9,
"oct": 10,
"nov": 11,
"dec": 12,
}}
dowBound = bounds{0, 6, map[string]uint{
"sun": 0,
"mon": 1,
"tue": 2,
"wed": 3,
"thu": 4,
"fri": 5,
"sat": 6,
}}
)
const (
// Set the top bit if a star was included in the expression.
starBit = 1 << 63
)
func parseTimeField(field string, r bounds) (uint64, error) {
var bits uint64
ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' })
for _, expr := range ranges {
bit, err := parseTimeRange(expr, r)
if err != nil {
return bits, err
}
bits |= bit
}
return bits, nil
}
func parseTimeRange(expr string, r bounds) (uint64, error) {
var (
start, end, step uint
rangeAndStep = strings.Split(expr, "/")
lowAndHigh = strings.Split(rangeAndStep[0], "-")
singleDigit = len(lowAndHigh) == 1
err error
)
var extra uint64
if lowAndHigh[0] == "*" {
start = r.min
end = r.max
extra = starBit
} else {
start, err = parseIntOrName(lowAndHigh[0], r.names)
if err != nil {
return 0, err
}
switch len(lowAndHigh) {
case 1:
end = start
case 2:
end, err = parseIntOrName(lowAndHigh[1], r.names)
if err != nil {
return 0, err
}
default:
return 0, fmt.Errorf("too many hyphens: %s", expr)
}
}
switch len(rangeAndStep) {
case 1:
step = 1
case 2:
step, err = mustParseInt(rangeAndStep[1])
if err != nil {
return 0, err
}
// Special handling: "N/step" means "N-max/step".
if singleDigit {
end = r.max
}
if step > 1 {
extra = 0
}
default:
return 0, fmt.Errorf("too many slashes: %s", expr)
}
if start < r.min {
return 0, fmt.Errorf("beginning of range (%d) below minimum (%d): %s", start, r.min, expr)
}
if end > r.max {
return 0, fmt.Errorf("end of range (%d) above maximum (%d): %s", end, r.max, expr)
}
if start > end {
return 0, fmt.Errorf("beginning of range (%d) beyond end of range (%d): %s", start, end, expr)
}
if step == 0 {
return 0, fmt.Errorf("step of range should be a positive number: %s", expr)
}
return getBits(start, end, step) | extra, nil
}
func parseIntOrName(expr string, names map[string]uint) (uint, error) {
if names != nil {
if namedInt, ok := names[strings.ToLower(expr)]; ok {
return namedInt, nil
}
}
return mustParseInt(expr)
}
func mustParseInt(expr string) (uint, error) {
num, err := strconv.Atoi(expr)
if err != nil {
return 0, fmt.Errorf("failed to parse int from %s", expr)
}
if num < 0 {
return 0, fmt.Errorf("negative number (%d) not allowed: %s", num, expr)
}
return uint(num), nil
}
func getBits(min, max, step uint) uint64 {
var bits uint64
// If step is 1, use shifts.
if step == 1 {
return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min)
}
// Else, use a simple loop.
for i := min; i <= max; i += step {
bits |= 1 << i
}
return bits
}
func ParseJobs(fname, tab string) ([]*Job, error) {
jobs := []*Job{}
lines := strings.Split(tab, "\n")
for lno, l := range lines {
parseError := func(err error) error {
return fmt.Errorf("parse error %s:%d %s", fname, lno, err)
}
if strings.TrimSpace(l) == "" || l[0] == '#' {
continue
}
// Split out our 7 fields
curField := &strings.Builder{}
fields := []string{}
const ST_FIELD = 0
const ST_WS = 1
state := ST_WS
for _, r := range l {
switch state {
case ST_FIELD:
if len(fields) != 6 && (r == ' ' || r == '\t') {
state = ST_WS
fields = append(fields, curField.String())
curField.Reset()
} else {
curField.WriteRune(r)
}
case ST_WS:
if r != ' ' && r != '\t' {
state = ST_FIELD
curField.WriteRune(r)
}
}
}
fields = append(fields, curField.String())
if len(fields) == 0 {
continue
}
if len(fields) != 7 {
return nil, parseError(fmt.Errorf("expected a label, timespec and a command"))
}
name := fields[0]
minute, err := parseTimeField(fields[1], minuteBound)
if err != nil {
return nil, parseError(fmt.Errorf("invalid minute spec: %s", err))
}
hour, err := parseTimeField(fields[2], hourBound)
if err != nil {
return nil, parseError(fmt.Errorf("invalid hour spec: %s", err))
}
dom, err := parseTimeField(fields[3], domBound)
if err != nil {
return nil, parseError(fmt.Errorf("invalid day of month spec: %s", err))
}
month, err := parseTimeField(fields[4], monthBound)
if err != nil {
return nil, parseError(fmt.Errorf("invalid month spec: %s", err))
}
dow, err := parseTimeField(fields[5], dowBound)
if err != nil {
return nil, parseError(fmt.Errorf("invalid day of week spec: %s", err))
}
command := fields[6]
jobs = append(jobs, &Job{
Name: name,
Minute: minute,
Hour: hour,
Dom: dom,
Month: month,
Dow: dow,
Command: command,
})
}
return jobs, nil
}
|
[
1
] |
package models
import (
"github.com/hzwy23/dbobj"
"errors"
"github.com/hzwy23/asofdate/hauth/hrpc"
"github.com/astaxie/beego/logs"
)
type PasswdModels struct {
}
func (PasswdModels) UpdateMyPasswd(newPd, User_id, oriEn string) (string ,error){
flag,_,_,_:= hrpc.CheckPasswd(User_id,oriEn)
if !flag{
return "error_old_passwd",errors.New("error_old_passwd")
}
_,err := dbobj.Exec(sys_rdbms_014, newPd, User_id, oriEn)
if err != nil {
logs.Error(err)
return "error_passwd_modify",err
}
return "success",nil
}
func (PasswdModels) UpdateUserPasswd(newPd, userid string) error {
_,err :=dbobj.Exec(sys_rdbms_015, newPd, userid)
return err
}
|
[
2
] |
package twitter
import (
"encoding/base64"
"encoding/json"
"errors"
"io/ioutil"
"net/http"
"strconv"
)
// User is a Twitter user.
type User struct {
ID string
Name string
Username string
}
// Tweet represents a single tweet belonging to a user.
type Tweet struct {
ID string
Text string
}
// Client is a Twitter API client.
type Client struct {
BaseURL string
Token string
Cache *CacheClient
}
// NewClient instantiates a new Twitter client.
func NewClient(token string) *Client {
return &Client{
BaseURL: "https://api.twitter.com/2",
Token: token,
Cache: newCache("./data/cache"),
}
}
// GetTweetsForUserID fetches a list of tweets for the given user ID.
func (c *Client) GetTweetsForUserID(userID string, limit int) ([]Tweet, error) {
// Get first page.
tweets, next, err := c.getTweets(userID, "")
// Get ensuing pages.
for {
if len(tweets) > limit {
break
}
ts, n, err := c.getTweets(userID, next)
if err != nil {
return []Tweet{}, err
}
next = n
tweets = append(tweets, ts...)
}
return tweets, err
}
// GetTweetsForUsername fetches a list of tweets for the given username.
func (c *Client) GetTweetsForUsername(username string, limit int) ([]Tweet, error) {
user, err := c.GetUserByUsername(username)
if err != nil {
return []Tweet{}, err
}
// Get first page.
tweets, next, err := c.getTweets(user.ID, "")
// Get ensuing pages.
for {
if len(tweets) >= limit {
break
}
ts, n, err := c.getTweets(user.ID, next)
if err != nil {
return []Tweet{}, err
}
next = n
for _, t := range ts {
if len(tweets) >= limit {
break
}
tweets = append(tweets, t)
}
}
return tweets, err
}
func (c *Client) getTweets(userID, token string) ([]Tweet, string, error) {
// Check that userID is numeric
_, err := strconv.Atoi(userID)
if err != nil {
return []Tweet{}, "", errors.New("userID must be numeric")
}
url := "/users/" + userID + "/tweets"
url = url + "?max_results=100"
if len(token) > 0 {
url = url + "&pagination_token=" + token
}
body, err := c.get(url)
if err != nil {
return []Tweet{}, "", err
}
var res TweetsResponse
err = json.Unmarshal(body, &res)
if err != nil {
return []Tweet{}, "", err
}
return res.Tweets, res.Meta.Next, nil
}
// GetUserByUsername fetches a user from the Twitter API by their username.
func (c *Client) GetUserByUsername(username string) (User, error) {
body, err := c.get("/users/by/username/" + username)
if err != nil {
return User{}, err
}
var res UserByUsernameResponse
err = json.Unmarshal(body, &res)
if err != nil {
return User{}, err
}
return res.User, nil
}
// get runs an actual request against the Twitter API.
func (c *Client) get(endpoint string) ([]byte, error) {
// If it exists in cache, return the cached one.
val, found := c.Cache.Get(endpoint)
if found {
// []byte is json.Marshalled to a base64-encoded string, so we need
// to decode it here to make it useful.
return base64.StdEncoding.DecodeString(val.(string))
}
req, err := http.NewRequest("GET", c.BaseURL+endpoint, nil)
if err != nil {
return []byte{}, err
}
req.Header.Add("Authorization", "Bearer "+c.Token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return []byte{}, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return []byte{}, err
}
c.Cache.Set(endpoint, body)
return body, nil
}
|
[
0
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.