code
stringlengths 67
15.9k
| labels
listlengths 1
4
|
---|---|
package lib
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"runtime"
"strings"
"time"
)
// Check for errors and panic, if found
func checkForErrors(err error) {
if err != nil {
pc, fn, line, _ := runtime.Caller(1)
msg := fmt.Sprintf("[Error] in %s[%s:%d] %v",
runtime.FuncForPC(pc).Name(), fn, line, err)
log.Fatal(msg)
}
}
// Get the IP address of the docker host as this is run from within container
func getDockerHostIP() string {
cmd := fmt.Sprintf("netstat -nr | grep '^0\\.0\\.0\\.0' | awk '{print $2}'")
out, err := exec.Command("sh", "-c", cmd).Output()
checkForErrors(err)
ip := string(out)
ip = strings.Replace(ip, "\n", "", -1)
return ip
}
// Compose the etcd API host:port location
func getEtcdAPI(host string, port string) string {
return fmt.Sprintf("http://%s:%s", host, port)
}
func httpGetRequest(url string) []byte {
resp, err := http.Get(url)
checkForErrors(err)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
checkForErrors(err)
return body
}
func httpPutRequest(url string, json_data []byte) *http.Response {
req, _ := http.NewRequest("PUT", url, bytes.NewBuffer(json_data))
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
checkForErrors(err)
defer resp.Body.Close()
return resp
}
func getFullAPIURL(port, etcdAPIPath string) string {
etcdAPI := getEtcdAPI(getDockerHostIP(), port)
url := fmt.Sprintf("%s/%s", etcdAPI, etcdAPIPath)
return url
}
func WaitForFleetMachines(
fleetMachines *FleetMachines, expectedMachineCount int) {
// Issue request to get machines & parse it. Sleep if cluster not ready yet
url := getFullAPIURL("4001", "v2/keys/_coreos.com/fleet/machines")
jsonResponse := httpGetRequest(url)
err := json.Unmarshal(jsonResponse, fleetMachines)
checkForErrors(err)
totalMachines := len(fleetMachines.Node.Nodes)
for totalMachines < expectedMachineCount {
log.Printf("Waiting for all (%d) machines to be available "+
"in fleet. Currently at: (%d)",
expectedMachineCount, totalMachines)
time.Sleep(1 * time.Second)
jsonResponse := httpGetRequest(url)
err := json.Unmarshal(jsonResponse, fleetMachines)
checkForErrors(err)
totalMachines = len(fleetMachines.Node.Nodes)
}
}
func WaitForFleetMachineMetadata(
fleetMachinesNodeNodesValue *FleetMachinesNodeNodesValue,
fleetMachineObjectNodeValue *FleetMachineObjectNodeValue,
expectedMachineCount int) {
// Issue request to get machines & parse it. Sleep if cluster not ready yet
id := strings.Split(fleetMachinesNodeNodesValue.Key, "fleet/machines/")[1]
path := fmt.Sprintf("v2/keys/_coreos.com/fleet/machines/%s/object", id)
url := getFullAPIURL("4001", path)
jsonResponse := httpGetRequest(url)
var fleetMachineObject FleetMachineObject
err := json.Unmarshal(jsonResponse, &fleetMachineObject)
checkForErrors(err)
err = json.Unmarshal(
[]byte(fleetMachineObject.Node.Value), &fleetMachineObjectNodeValue)
checkForErrors(err)
for len(fleetMachineObjectNodeValue.Metadata) == 0 ||
fleetMachineObjectNodeValue.Metadata["kubernetes_role"] == nil {
log.Printf("Waiting for machine (%s) metadata to be available "+
"in fleet...", fleetMachineObjectNodeValue.ID)
time.Sleep(1 * time.Second)
err = json.Unmarshal(
[]byte(fleetMachineObject.Node.Value), &fleetMachineObjectNodeValue)
checkForErrors(err)
}
}
func createMasterUnits(
entity *FleetMachineObjectNodeValue,
minionIPAddrs string,
unitPathInfo []map[string]string,
) {
files := map[string]string{
"api": "[email protected]",
"controller": "[email protected]",
"scheduler": "[email protected]",
"download": "[email protected]",
}
// Form apiserver service file from template
readfile, err := ioutil.ReadFile(
fmt.Sprintf("/templates/%s", files["api"]))
checkForErrors(err)
apiserver := string(readfile)
apiserver = strings.Replace(apiserver, "<ID>", entity.ID, -1)
apiserver = strings.Replace(
apiserver, "<MINION_IP_ADDRS>", minionIPAddrs, -1)
// Write apiserver service file
filename := strings.Replace(files["api"], "@", "@"+entity.ID, -1)
apiserver_file := fmt.Sprintf("%s/%s", unitPathInfo[1]["path"], filename)
err = ioutil.WriteFile(apiserver_file, []byte(apiserver), 0644)
checkForErrors(err)
// Form controller service file from template
readfile, err = ioutil.ReadFile(
fmt.Sprintf("/templates/%s", files["controller"]))
checkForErrors(err)
controller := string(readfile)
controller = strings.Replace(controller, "<ID>", entity.ID, -1)
// Write controller service file
filename = strings.Replace(files["controller"], "@", "@"+entity.ID, -1)
controller_file := fmt.Sprintf("%s/%s", unitPathInfo[1]["path"], filename)
err = ioutil.WriteFile(controller_file, []byte(controller), 0644)
checkForErrors(err)
// Form scheduler service file from template
readfile, err = ioutil.ReadFile(
fmt.Sprintf("/templates/%s", files["scheduler"]))
checkForErrors(err)
scheduler := string(readfile)
scheduler = strings.Replace(scheduler, "<ID>", entity.ID, -1)
// Write scheduler service file
filename = strings.Replace(files["scheduler"], "@", "@"+entity.ID, -1)
scheduler_file := fmt.Sprintf("%s/%s", unitPathInfo[1]["path"], filename)
err = ioutil.WriteFile(scheduler_file, []byte(scheduler), 0644)
checkForErrors(err)
// Form download service file from template
readfile, err = ioutil.ReadFile(
fmt.Sprintf("/templates/%s", files["download"]))
checkForErrors(err)
download := string(readfile)
download = strings.Replace(download, "<ID>", entity.ID, -1)
// Write download service file
filename = strings.Replace(files["download"], "@", "@"+entity.ID, -1)
download_file := fmt.Sprintf("%s/%s",
unitPathInfo[0]["path"], filename)
err = ioutil.WriteFile(download_file, []byte(download), 0644)
checkForErrors(err)
}
func createMinionUnits(entity *FleetMachineObjectNodeValue,
unitPathInfo []map[string]string,
) {
files := map[string]string{
"kubelet": "[email protected]",
"proxy": "[email protected]",
"download": "[email protected]",
}
// Form kubelet service file from template
readfile, err := ioutil.ReadFile(
fmt.Sprintf("/templates/%s", files["kubelet"]))
checkForErrors(err)
kubelet := string(readfile)
kubelet = strings.Replace(kubelet, "<ID>", entity.ID, -1)
kubelet = strings.Replace(kubelet, "<IP_ADDR>", entity.PublicIP, -1)
// Write kubelet service file
filename := strings.Replace(files["kubelet"], "@", "@"+entity.ID, -1)
kubelet_file := fmt.Sprintf("%s/%s", unitPathInfo[1]["path"], filename)
err = ioutil.WriteFile(kubelet_file, []byte(kubelet), 0644)
checkForErrors(err)
// Form proxy service file from template
readfile, err = ioutil.ReadFile(
fmt.Sprintf("/templates/%s", files["proxy"]))
checkForErrors(err)
proxy := string(readfile)
proxy = strings.Replace(proxy, "<ID>", entity.ID, -1)
// Write proxy service file
filename = strings.Replace(files["proxy"], "@", "@"+entity.ID, -1)
proxy_file := fmt.Sprintf("%s/%s", unitPathInfo[1]["path"], filename)
err = ioutil.WriteFile(proxy_file, []byte(proxy), 0644)
checkForErrors(err)
// Form download service file from template
readfile, err = ioutil.ReadFile(
fmt.Sprintf("/templates/%s", files["download"]))
checkForErrors(err)
download := string(readfile)
download = strings.Replace(download, "<ID>", entity.ID, -1)
// Write download service file
filename = strings.Replace(files["download"], "@", "@"+entity.ID, -1)
download_file := fmt.Sprintf("%s/%s",
unitPathInfo[0]["path"], filename)
err = ioutil.WriteFile(download_file, []byte(download), 0644)
checkForErrors(err)
}
func getMinionIPAddrs(
fleetMachineEntities *[]FleetMachineObjectNodeValue) string {
output := ""
for _, entity := range *fleetMachineEntities {
switch entity.Metadata["kubernetes_role"] {
case "minion":
output += entity.PublicIP + ","
}
}
k := strings.LastIndex(output, ",")
return output[:k]
}
func CreateUnitFiles(
fleetMachineEntities *[]FleetMachineObjectNodeValue,
unitPathInfo []map[string]string,
) {
perm := os.FileMode(os.ModeDir)
for _, v := range unitPathInfo {
err := os.RemoveAll(v["path"])
checkForErrors(err)
os.MkdirAll(v["path"], perm)
}
for _, entity := range *fleetMachineEntities {
switch entity.Metadata["kubernetes_role"] {
case "master":
minionIPAddrs := getMinionIPAddrs(fleetMachineEntities)
createMasterUnits(&entity, minionIPAddrs, unitPathInfo)
case "minion":
createMinionUnits(&entity, unitPathInfo)
}
}
log.Printf("Created systemd unit files for kubernetes deployment")
}
func Usage() {
fmt.Printf("Usage: %s\n", os.Args[0])
flag.PrintDefaults()
}
func SetupFlags() (int, int) {
masterCount :=
flag.Int("master_count", 1,
"Expected number of kubernetes masters in cluster")
minionCount :=
flag.Int("minion_count", 2,
"Expected number of kubernetes minions in cluster")
flag.Parse()
return *masterCount, *minionCount
}
|
[
6,
7
] |
package main
import (
"bufio"
"fmt"
"os"
"strings"
)
func cipher(text string, shft int, direction int) string {
shift, offset := rune(shft), rune(26)
runes := []rune(text)
for index, char := range runes {
switch direction {
case -1: // encoding
if char >= 'a' && char+shift <= 'z' ||
char >= 'A' && char+shift <= 'Z' {
char = char + shift
} else if char > 'z'-shift && char <= 'z' ||
char > 'Z'-shift && char <= 'Z' {
char = char + shift - offset
}
case +1: // decoding
if char >= 'a'+shift && char <= 'z' ||
char >= 'A'+shift && char <= 'Z' {
char = char - shift
} else if char >= 'a' && char < 'a'+shift ||
char >= 'A' && char < 'A'+shift {
char = char - shift + offset
}
}
runes[index] = char
}
return string(runes)
}
func encode(text string, shift int) string { return cipher(text, shift, -1) }
func decode(text string, shift int) string { return cipher(text, shift, +1) }
func main() {
var text string
var words string
var words_list []string
fmt.Println("Enter your text:")
reader := bufio.NewReader(os.Stdin)
text, _ = reader.ReadString('\n')
text = strings.Replace(text, "\n", "", -1)
fmt.Println("Enter 5 source words (comma-separated):")
words, _ = reader.ReadString('\n')
words = strings.Replace(words, "\n", "", -1)
words_list = strings.Split(words, ",")
shift_found := false
shift := -1
for shift < 25 && !shift_found {
shift++
shift_found = true
for _, w := range words_list {
w = strings.TrimSpace(w)
if !strings.Contains(text, encode(w, shift)) {
shift_found = false
}
}
}
if shift_found {
fmt.Println("Shift:", shift)
fmt.Println(decode(text, shift))
} else {
fmt.Println("Decoding failed!")
}
}
|
[
0,
6
] |
package main
import (
"flag"
"fmt"
oi "github.com/reiver/go-oi"
telnet "github.com/reiver/go-telnet"
termbox "github.com/nsf/termbox-go"
)
func main() {
host := flag.String("host", "localhost", "host")
port := flag.String("port", "23", "port")
flag.Parse()
// Init termbox to use the getchar
if err := termbox.Init(); err != nil {
panic(err)
}
defer termbox.Close()
termbox.SetCursor(0, 0)
termbox.HideCursor()
if err := telnet.DialToAndCall(*host+":"+*port, caller{}); err != nil {
fmt.Println(err)
}
}
type caller struct{}
func (c caller) CallTELNET(ctx telnet.Context, w telnet.Writer, r telnet.Reader) {
quit := make(chan struct{}, 1)
defer close(quit)
// Write to telnet server
readBlocker := make(chan struct{}, 1)
defer close(readBlocker)
go func() {
for {
switch ev := termbox.PollEvent(); ev.Type {
case termbox.EventKey:
if ev.Key == termbox.KeyCtrlC {
quit <- struct{}{}
return
}
if isASCII(ev.Ch) {
fmt.Printf("%c", ev.Ch)
oi.LongWrite(w, []byte{byte(ev.Ch)})
readBlocker <- struct{}{}
}
}
}
}()
// Read from telnet server
go func() {
var buffer [1]byte
p := buffer[:]
for {
<-readBlocker
r.Read(p)
fmt.Printf("%c", p[0])
}
}()
<-quit
}
func isASCII(r rune) bool {
return r <= '~'
}
|
[
7
] |
// Code generated by mockery v2.24.0. DO NOT EDIT.
package mocks
import (
taskfile "github.com/go-task/task/v3/taskfile"
mock "github.com/stretchr/testify/mock"
)
// SourcesCheckable is an autogenerated mock type for the SourcesCheckable type
type SourcesCheckable struct {
mock.Mock
}
type SourcesCheckable_Expecter struct {
mock *mock.Mock
}
func (_m *SourcesCheckable) EXPECT() *SourcesCheckable_Expecter {
return &SourcesCheckable_Expecter{mock: &_m.Mock}
}
// IsUpToDate provides a mock function with given fields: t
func (_m *SourcesCheckable) IsUpToDate(t *taskfile.Task) (bool, error) {
ret := _m.Called(t)
var r0 bool
var r1 error
if rf, ok := ret.Get(0).(func(*taskfile.Task) (bool, error)); ok {
return rf(t)
}
if rf, ok := ret.Get(0).(func(*taskfile.Task) bool); ok {
r0 = rf(t)
} else {
r0 = ret.Get(0).(bool)
}
if rf, ok := ret.Get(1).(func(*taskfile.Task) error); ok {
r1 = rf(t)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// SourcesCheckable_IsUpToDate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsUpToDate'
type SourcesCheckable_IsUpToDate_Call struct {
*mock.Call
}
// IsUpToDate is a helper method to define mock.On call
// - t *taskfile.Task
func (_e *SourcesCheckable_Expecter) IsUpToDate(t interface{}) *SourcesCheckable_IsUpToDate_Call {
return &SourcesCheckable_IsUpToDate_Call{Call: _e.mock.On("IsUpToDate", t)}
}
func (_c *SourcesCheckable_IsUpToDate_Call) Run(run func(t *taskfile.Task)) *SourcesCheckable_IsUpToDate_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(*taskfile.Task))
})
return _c
}
func (_c *SourcesCheckable_IsUpToDate_Call) Return(_a0 bool, _a1 error) *SourcesCheckable_IsUpToDate_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *SourcesCheckable_IsUpToDate_Call) RunAndReturn(run func(*taskfile.Task) (bool, error)) *SourcesCheckable_IsUpToDate_Call {
_c.Call.Return(run)
return _c
}
// Kind provides a mock function with given fields:
func (_m *SourcesCheckable) Kind() string {
ret := _m.Called()
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// SourcesCheckable_Kind_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Kind'
type SourcesCheckable_Kind_Call struct {
*mock.Call
}
// Kind is a helper method to define mock.On call
func (_e *SourcesCheckable_Expecter) Kind() *SourcesCheckable_Kind_Call {
return &SourcesCheckable_Kind_Call{Call: _e.mock.On("Kind")}
}
func (_c *SourcesCheckable_Kind_Call) Run(run func()) *SourcesCheckable_Kind_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *SourcesCheckable_Kind_Call) Return(_a0 string) *SourcesCheckable_Kind_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *SourcesCheckable_Kind_Call) RunAndReturn(run func() string) *SourcesCheckable_Kind_Call {
_c.Call.Return(run)
return _c
}
// OnError provides a mock function with given fields: t
func (_m *SourcesCheckable) OnError(t *taskfile.Task) error {
ret := _m.Called(t)
var r0 error
if rf, ok := ret.Get(0).(func(*taskfile.Task) error); ok {
r0 = rf(t)
} else {
r0 = ret.Error(0)
}
return r0
}
// SourcesCheckable_OnError_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnError'
type SourcesCheckable_OnError_Call struct {
*mock.Call
}
// OnError is a helper method to define mock.On call
// - t *taskfile.Task
func (_e *SourcesCheckable_Expecter) OnError(t interface{}) *SourcesCheckable_OnError_Call {
return &SourcesCheckable_OnError_Call{Call: _e.mock.On("OnError", t)}
}
func (_c *SourcesCheckable_OnError_Call) Run(run func(t *taskfile.Task)) *SourcesCheckable_OnError_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(*taskfile.Task))
})
return _c
}
func (_c *SourcesCheckable_OnError_Call) Return(_a0 error) *SourcesCheckable_OnError_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *SourcesCheckable_OnError_Call) RunAndReturn(run func(*taskfile.Task) error) *SourcesCheckable_OnError_Call {
_c.Call.Return(run)
return _c
}
// Value provides a mock function with given fields: t
func (_m *SourcesCheckable) Value(t *taskfile.Task) (interface{}, error) {
ret := _m.Called(t)
var r0 interface{}
var r1 error
if rf, ok := ret.Get(0).(func(*taskfile.Task) (interface{}, error)); ok {
return rf(t)
}
if rf, ok := ret.Get(0).(func(*taskfile.Task) interface{}); ok {
r0 = rf(t)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(interface{})
}
}
if rf, ok := ret.Get(1).(func(*taskfile.Task) error); ok {
r1 = rf(t)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// SourcesCheckable_Value_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Value'
type SourcesCheckable_Value_Call struct {
*mock.Call
}
// Value is a helper method to define mock.On call
// - t *taskfile.Task
func (_e *SourcesCheckable_Expecter) Value(t interface{}) *SourcesCheckable_Value_Call {
return &SourcesCheckable_Value_Call{Call: _e.mock.On("Value", t)}
}
func (_c *SourcesCheckable_Value_Call) Run(run func(t *taskfile.Task)) *SourcesCheckable_Value_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(*taskfile.Task))
})
return _c
}
func (_c *SourcesCheckable_Value_Call) Return(_a0 interface{}, _a1 error) *SourcesCheckable_Value_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *SourcesCheckable_Value_Call) RunAndReturn(run func(*taskfile.Task) (interface{}, error)) *SourcesCheckable_Value_Call {
_c.Call.Return(run)
return _c
}
type mockConstructorTestingTNewSourcesCheckable interface {
mock.TestingT
Cleanup(func())
}
// NewSourcesCheckable creates a new instance of SourcesCheckable. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewSourcesCheckable(t mockConstructorTestingTNewSourcesCheckable) *SourcesCheckable {
mock := &SourcesCheckable{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
|
[
4
] |
package view
import (
"github.com/caos/zitadel/internal/iam/repository/view"
"github.com/caos/zitadel/internal/iam/repository/view/model"
)
func (v *View) MailTemplateByAggregateID(aggregateID string, mailTemplateTableVar string) (*model.MailTemplateView, error) {
return view.GetMailTemplateByAggregateID(v.Db, mailTemplateTableVar, aggregateID)
}
|
[
6
] |
// Code generated by mockery v1.0.0. DO NOT EDIT.
package mocks
import (
libcnb "github.com/buildpacks/libcnb"
libpak "github.com/paketo-buildpacks/libpak"
mock "github.com/stretchr/testify/mock"
)
// KeyProvider is an autogenerated mock type for the KeyProvider type
type KeyProvider struct {
mock.Mock
}
// Detect provides a mock function with given fields: context, result
func (_m *KeyProvider) Detect(context libcnb.DetectContext, result *libcnb.DetectResult) error {
ret := _m.Called(context, result)
var r0 error
if rf, ok := ret.Get(0).(func(libcnb.DetectContext, *libcnb.DetectResult) error); ok {
r0 = rf(context, result)
} else {
r0 = ret.Error(0)
}
return r0
}
// Key provides a mock function with given fields: context
func (_m *KeyProvider) Key(context libcnb.BuildContext) ([]byte, error) {
ret := _m.Called(context)
var r0 []byte
if rf, ok := ret.Get(0).(func(libcnb.BuildContext) []byte); ok {
r0 = rf(context)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]byte)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(libcnb.BuildContext) error); ok {
r1 = rf(context)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Participate provides a mock function with given fields: resolver
func (_m *KeyProvider) Participate(resolver libpak.PlanEntryResolver) (bool, error) {
ret := _m.Called(resolver)
var r0 bool
if rf, ok := ret.Get(0).(func(libpak.PlanEntryResolver) bool); ok {
r0 = rf(resolver)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(libpak.PlanEntryResolver) error); ok {
r1 = rf(resolver)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
|
[
4
] |
/**
Copyright 2019 Dayou Du
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
**/
package main
import (
"bytes"
"encoding/gob"
"log"
context "golang.org/x/net/context"
"github.com/nyu-distributed-systems-fa18/raft-extension/pb"
)
// The struct for data to send over channel
type InputChannelType struct {
command pb.Command
response chan pb.Result
}
// The struct for key value stores.
type KVStore struct {
C chan InputChannelType
store map[string]string
}
func (s *KVStore) PeerJoin(ctx context.Context, peer *pb.Peer) (*pb.Result, error) {
c := make(chan pb.Result)
r := pb.Command{Operation: pb.Op_PeerJoin, Arg: &pb.Command_PeerJoinLeave{PeerJoinLeave: peer}}
s.C <- InputChannelType{command: r, response: c}
log.Printf("Waiting for peer join response")
result := <-c
return &result, nil
}
func (s *KVStore) PeerLeave(ctx context.Context, peer *pb.Peer) (*pb.Result, error) {
c := make(chan pb.Result)
r := pb.Command{Operation: pb.Op_PeerLeave, Arg: &pb.Command_PeerJoinLeave{PeerJoinLeave: peer}}
s.C <- InputChannelType{command: r, response: c}
log.Printf("Waiting for peer join response")
result := <-c
return &result, nil
}
func (s *KVStore) Get(ctx context.Context, key *pb.Key) (*pb.Result, error) {
// Create a channel
c := make(chan pb.Result)
// Create a request
r := pb.Command{Operation: pb.Op_GET, Arg: &pb.Command_Get{Get: key}}
// Send request over the channel
s.C <- InputChannelType{command: r, response: c}
log.Printf("Waiting for get response")
result := <-c
// The bit below works because Go maps return the 0 value for non existent keys, which is empty in this case.
return &result, nil
}
func (s *KVStore) Set(ctx context.Context, in *pb.KeyValue) (*pb.Result, error) {
// Create a channel
c := make(chan pb.Result)
// Create a request
r := pb.Command{Operation: pb.Op_SET, Arg: &pb.Command_Set{Set: in}}
// Send request over the channel
s.C <- InputChannelType{command: r, response: c}
log.Printf("Waiting for set response")
result := <-c
// The bit below works because Go maps return the 0 value for non existent keys, which is empty in this case.
return &result, nil
}
func (s *KVStore) Clear(ctx context.Context, in *pb.Empty) (*pb.Result, error) {
// Create a channel
c := make(chan pb.Result)
// Create a request
r := pb.Command{Operation: pb.Op_CLEAR, Arg: &pb.Command_Clear{Clear: in}}
// Send request over the channel
s.C <- InputChannelType{command: r, response: c}
log.Printf("Waiting for clear response")
result := <-c
// The bit below works because Go maps return the 0 value for non existent keys, which is empty in this case.
return &result, nil
}
func (s *KVStore) CAS(ctx context.Context, in *pb.CASArg) (*pb.Result, error) {
// Create a channel
c := make(chan pb.Result)
// Create a request
r := pb.Command{Operation: pb.Op_CAS, Arg: &pb.Command_Cas{Cas: in}}
// Send request over the channel
s.C <- InputChannelType{command: r, response: c}
log.Printf("Waiting for CAS response")
result := <-c
// The bit below works because Go maps return the 0 value for non existent keys, which is empty in this case.
return &result, nil
}
// Used internally to generate a result for a get request. This function assumes that it is called from a single thread of
// execution, and hence does not handle races.
func (s *KVStore) GetInternal(k string) pb.Result {
log.Printf("Executing GetInternal, with key: %v", k)
v := s.store[k]
return pb.Result{Result: &pb.Result_Kv{Kv: &pb.KeyValue{Key: k, Value: v}}}
}
// Used internally to set and generate an appropriate result. This function assumes that it is called from a single
// thread of execution and hence does not handle race conditions.
func (s *KVStore) SetInternal(k string, v string) pb.Result {
log.Printf("Executing SetInternal, with key: %v, value: %v", k, v)
s.store[k] = v
return pb.Result{Result: &pb.Result_Kv{Kv: &pb.KeyValue{Key: k, Value: v}}}
}
// Used internally, this function clears a kv store. Assumes no racing calls.
func (s *KVStore) ClearInternal() pb.Result {
log.Printf("Executing ClearInternal")
s.store = make(map[string]string)
return pb.Result{Result: &pb.Result_S{S: &pb.Success{}}}
}
// Used internally this function performs CAS assuming no races.
func (s *KVStore) CasInternal(k string, v string, vn string) pb.Result {
log.Printf("Executing CasInternal, with key: %v, value: %v, v_new: %v", k, v, vn)
vc := s.store[k]
if vc == v {
s.store[k] = vn
return pb.Result{Result: &pb.Result_Kv{Kv: &pb.KeyValue{Key: k, Value: vn}}}
} else {
return pb.Result{Result: &pb.Result_Kv{Kv: &pb.KeyValue{Key: k, Value: vc}}}
}
}
// Used internally to dump out all the values
// Could be used as creating snapshot or shard migration
func (s *KVStore) CreateSnapshot() []byte {
log.Printf("Dump out all the values from KV store")
write := new(bytes.Buffer)
encoder := gob.NewEncoder(write)
encoder.Encode(s.store)
snapshot := write.Bytes()
return snapshot
}
// Used internall to restore from a snapshot
// WARNING: this will force clean the store
func (s *KVStore) RestoreSnapshot(snapshot []byte) {
log.Printf("Restore all value from a snapshot")
read := bytes.NewBuffer(snapshot)
decoder := gob.NewDecoder(read)
decoder.Decode(&s.store)
}
func (s *KVStore) HandleCommand(op InputChannelType) {
switch c := op.command; c.Operation {
case pb.Op_GET:
arg := c.GetGet()
result := s.GetInternal(arg.Key)
op.response <- result
case pb.Op_SET:
arg := c.GetSet()
result := s.SetInternal(arg.Key, arg.Value)
op.response <- result
case pb.Op_CLEAR:
result := s.ClearInternal()
op.response <- result
case pb.Op_CAS:
arg := c.GetCas()
result := s.CasInternal(arg.Kv.Key, arg.Kv.Value, arg.Value.Value)
op.response <- result
case pb.Op_PeerJoin:
result := pb.Result{Result: &pb.Result_S{S: &pb.Success{}}}
op.response <- result
case pb.Op_PeerLeave:
result := pb.Result{Result: &pb.Result_S{S: &pb.Success{}}}
op.response <- result
default:
// Sending a blank response to just free things up, but we don't know how to make progress here.
op.response <- pb.Result{}
log.Fatalf("Unrecognized operation %v", c)
}
}
|
[
6
] |
// Copyright 2018 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"fmt"
kubeApiCore "k8s.io/api/core/v1"
kubeExtClient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
kubeApiMeta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/client-go/dynamic"
kubeClient "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth" // Needed for auth
"k8s.io/client-go/rest"
)
// Accessor is a helper for accessing Kubernetes programmatically. It bundles some of the high-level
// operations that is frequently used by the test framework.
type Accessor struct {
restConfig *rest.Config
ctl *kubectl
set *kubeClient.Clientset
extSet *kubeExtClient.Clientset
dynClient dynamic.Interface
}
// NewAccessor returns a new instance of an accessor.
func NewAccessor(kubeConfig string, baseWorkDir string) (*Accessor, error) {
restConfig, err := BuildClientConfig(kubeConfig, "")
if err != nil {
return nil, fmt.Errorf("failed to create rest config. %v", err)
}
restConfig.APIPath = "/api"
restConfig.GroupVersion = &kubeApiCore.SchemeGroupVersion
restConfig.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: scheme.Codecs}
set, err := kubeClient.NewForConfig(restConfig)
if err != nil {
return nil, err
}
extSet, err := kubeExtClient.NewForConfig(restConfig)
if err != nil {
return nil, err
}
dynClient, err := dynamic.NewForConfig(restConfig)
if err != nil {
return nil, fmt.Errorf("failed to create dynamic client: %v", err)
}
return &Accessor{
restConfig: restConfig,
ctl: &kubectl{
kubeConfig: kubeConfig,
baseDir: baseWorkDir,
},
set: set,
extSet: extSet,
dynClient: dynClient,
}, nil
}
func (a *Accessor) GetPods(pod, ns string) (string, error) {
return a.ctl.pods(pod, ns)
}
// Logs calls the logs command for the specified pod, with -c, if container is specified.
func (a *Accessor) Logs(namespace string, pod string, container string, previousLog bool) (string, error) {
return a.ctl.logs(namespace, pod, container, previousLog)
}
func (a *Accessor) DumpInfo(outputDir, ns string) (string, error) {
return a.ctl.dumpInfo(outputDir, ns)
}
func (a *Accessor) GetNamespaces() ([]kubeApiCore.Namespace, error) {
var opts kubeApiMeta.ListOptions
n, err := a.set.CoreV1().Namespaces().List(opts)
if err != nil {
return nil, err
}
return n.Items, nil
}
func (a *Accessor) DescribePod(pod, ns string) (string, error) {
return a.ctl.describePod(pod, ns)
}
func (a *Accessor) DescribeCM(cm, ns string) (string, error) {
return a.ctl.describeCm(cm, ns)
}
func (a *Accessor) DescribeSVC(svc, ns string) (string, error) {
return a.ctl.describeSVC(svc, ns)
}
func (a *Accessor) DescribeCRD(crd, ns string) (string, error) {
return a.ctl.describeCRD(crd, ns)
}
func (a *Accessor) DescribeCR(cr, crd, ns string) (string, error) {
return a.ctl.describeCR(cr, crd, ns)
}
|
[
6
] |
package mnemosynetest
import "github.com/piotrkowalczuk/mnemosyne/mnemosynerpc"
import "github.com/stretchr/testify/mock"
import google_protobuf1 "github.com/golang/protobuf/ptypes/empty"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
type SessionManagerClient struct {
mock.Mock
}
// Context provides a mock function with given fields: ctx, in, opts
func (_m *SessionManagerClient) Context(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*mnemosynerpc.ContextResponse, error) {
ret := _m.Called(ctx, in, opts)
var r0 *mnemosynerpc.ContextResponse
if rf, ok := ret.Get(0).(func(context.Context, *google_protobuf1.Empty, ...grpc.CallOption) *mnemosynerpc.ContextResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*mnemosynerpc.ContextResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *google_protobuf1.Empty, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Get provides a mock function with given fields: ctx, in, opts
func (_m *SessionManagerClient) Get(ctx context.Context, in *mnemosynerpc.GetRequest, opts ...grpc.CallOption) (*mnemosynerpc.GetResponse, error) {
ret := _m.Called(ctx, in, opts)
var r0 *mnemosynerpc.GetResponse
if rf, ok := ret.Get(0).(func(context.Context, *mnemosynerpc.GetRequest, ...grpc.CallOption) *mnemosynerpc.GetResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*mnemosynerpc.GetResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *mnemosynerpc.GetRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// List provides a mock function with given fields: ctx, in, opts
func (_m *SessionManagerClient) List(ctx context.Context, in *mnemosynerpc.ListRequest, opts ...grpc.CallOption) (*mnemosynerpc.ListResponse, error) {
ret := _m.Called(ctx, in, opts)
var r0 *mnemosynerpc.ListResponse
if rf, ok := ret.Get(0).(func(context.Context, *mnemosynerpc.ListRequest, ...grpc.CallOption) *mnemosynerpc.ListResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*mnemosynerpc.ListResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *mnemosynerpc.ListRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Exists provides a mock function with given fields: ctx, in, opts
func (_m *SessionManagerClient) Exists(ctx context.Context, in *mnemosynerpc.ExistsRequest, opts ...grpc.CallOption) (*mnemosynerpc.ExistsResponse, error) {
ret := _m.Called(ctx, in, opts)
var r0 *mnemosynerpc.ExistsResponse
if rf, ok := ret.Get(0).(func(context.Context, *mnemosynerpc.ExistsRequest, ...grpc.CallOption) *mnemosynerpc.ExistsResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*mnemosynerpc.ExistsResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *mnemosynerpc.ExistsRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Start provides a mock function with given fields: ctx, in, opts
func (_m *SessionManagerClient) Start(ctx context.Context, in *mnemosynerpc.StartRequest, opts ...grpc.CallOption) (*mnemosynerpc.StartResponse, error) {
ret := _m.Called(ctx, in, opts)
var r0 *mnemosynerpc.StartResponse
if rf, ok := ret.Get(0).(func(context.Context, *mnemosynerpc.StartRequest, ...grpc.CallOption) *mnemosynerpc.StartResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*mnemosynerpc.StartResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *mnemosynerpc.StartRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Abandon provides a mock function with given fields: ctx, in, opts
func (_m *SessionManagerClient) Abandon(ctx context.Context, in *mnemosynerpc.AbandonRequest, opts ...grpc.CallOption) (*mnemosynerpc.AbandonResponse, error) {
ret := _m.Called(ctx, in, opts)
var r0 *mnemosynerpc.AbandonResponse
if rf, ok := ret.Get(0).(func(context.Context, *mnemosynerpc.AbandonRequest, ...grpc.CallOption) *mnemosynerpc.AbandonResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*mnemosynerpc.AbandonResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *mnemosynerpc.AbandonRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// SetValue provides a mock function with given fields: ctx, in, opts
func (_m *SessionManagerClient) SetValue(ctx context.Context, in *mnemosynerpc.SetValueRequest, opts ...grpc.CallOption) (*mnemosynerpc.SetValueResponse, error) {
ret := _m.Called(ctx, in, opts)
var r0 *mnemosynerpc.SetValueResponse
if rf, ok := ret.Get(0).(func(context.Context, *mnemosynerpc.SetValueRequest, ...grpc.CallOption) *mnemosynerpc.SetValueResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*mnemosynerpc.SetValueResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *mnemosynerpc.SetValueRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Delete provides a mock function with given fields: ctx, in, opts
func (_m *SessionManagerClient) Delete(ctx context.Context, in *mnemosynerpc.DeleteRequest, opts ...grpc.CallOption) (*mnemosynerpc.DeleteResponse, error) {
ret := _m.Called(ctx, in, opts)
var r0 *mnemosynerpc.DeleteResponse
if rf, ok := ret.Get(0).(func(context.Context, *mnemosynerpc.DeleteRequest, ...grpc.CallOption) *mnemosynerpc.DeleteResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*mnemosynerpc.DeleteResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *mnemosynerpc.DeleteRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
|
[
4
] |
package main
import (
"flag"
"fmt"
"net"
"os"
"strconv"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/olekukonko/tablewriter"
)
func main() {
var region string
var defaultVPC bool
flag.BoolVar(&defaultVPC, "default-vpc", false, "Include Default VPC(s).")
flag.StringVar(®ion, "region", "us-west-2", "AWS Region.")
flag.Parse()
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Name", "Subnet", "Used", "Available", "Size", "Sum Used", "Sum Max"})
var sumUsed uint64
var sumMax uint64
sess, err := session.NewSession(&aws.Config{
Region: ®ion},
)
svc := ec2.New(sess)
vpcInput := ec2.DescribeVpcsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("isDefault"),
Values: []*string{
aws.String(strconv.FormatBool(defaultVPC)),
},
},
}}
result, err := svc.DescribeVpcs(&vpcInput)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
for _, v := range result.Vpcs {
input := &ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("vpc-id"),
Values: []*string{
v.VpcId,
},
},
}}
result, err := svc.DescribeSubnets(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
for _, v := range result.Subnets {
used, total := parseResults(*v.CidrBlock, *v.AvailableIpAddressCount)
sumUsed += used
sumMax += total
var tag string
for _, v := range v.Tags {
if *v.Key == "Name" {
tag = *v.Value
}
}
table.Append([]string{tag, *v.CidrBlock, strconv.FormatUint(used, 10), strconv.FormatInt(*v.AvailableIpAddressCount, 10), strconv.FormatUint(total, 10), strconv.FormatUint(sumUsed, 10), strconv.FormatUint(sumMax, 10)})
}
}
table.Render()
}
func parseResults(CidrBlock string, AvailableIpAddressCount int64) (uint64, uint64) {
_, netCIDR, err := net.ParseCIDR(CidrBlock)
if err != nil {
fmt.Errorf("Failed to parse CIDR: %v", err)
}
count := AddressCount(netCIDR)
return (count - uint64(AvailableIpAddressCount)), count
}
// AddressCount returns the number of distinct host addresses within the given
// CIDR range.
//
// Since the result is a uint64, this function returns meaningful information
// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65.
func AddressCount(network *net.IPNet) uint64 {
prefixLen, bits := network.Mask.Size()
return 1 << (uint64(bits) - uint64(prefixLen))
}
|
[
2,
7
] |
package main
import (
"fmt"
"sort"
)
func main() {
fmt.Println(maximumRemovals("abcacb", "ab", []int{3, 1, 0}))
}
func maximumRemovals(s string, p string, removable []int) int {
n := len(removable)
return sort.Search(n, func(index int) bool {
m := make(map[int]bool)
for i := 0; i < len(removable[:index+1]); i++ {
m[removable[i]] = true
}
i, j := 0, 0
for i < len(p) && j < len(s) {
if p[i] == s[j] && m[j] == false {
i++
}
j++
}
return i != len(p)
})
}
|
[
6
] |
package gin
import (
"fmt"
"reflect"
"github.com/gin-gonic/gin/binding"
"github.com/go-playground/locales/en"
universalTranslator "github.com/go-playground/universal-translator"
validator "github.com/go-playground/validator/v10"
enTranslations "github.com/go-playground/validator/v10/translations/en"
"github.com/spf13/cast"
)
type ValidateFuncs struct {
TagName string
Fn validator.Func
Message string
}
var Trans universalTranslator.Translator
func InitValidator(validateFuncs []*ValidateFuncs) error {
var err error
var ok bool
var v *validator.Validate
if v, ok = binding.Validator.Engine().(*validator.Validate); ok {
for _, f := range validateFuncs{
fmt.Printf("RegisterValidation func:%s\n", f.TagName)
err = v.RegisterValidation(f.TagName, f.Fn)
if err!=nil{
return err
}
}
}
return initTrans(validateFuncs)
}
// initTrans ...
func initTrans(validateFuns []*ValidateFuncs) (err error) {
local := "en"
var ok bool
var v *validator.Validate
if v, ok = binding.Validator.Engine().(*validator.Validate); ok {
enT := en.New()
uni := universalTranslator.New(enT, enT)
Trans, ok = uni.GetTranslator(local)
if !ok {
return fmt.Errorf("uni.GetTranslator(%s) failed", local)
}
err = enTranslations.RegisterDefaultTranslations(v, Trans)
if err != nil {
return err
}
for _, f := range validateFuns{
fmt.Printf("RegisterTranslation func:%s message:%s\n", f.TagName, f.Message)
err = v.RegisterTranslation(
f.TagName,
Trans,
registerTranslator(f.TagName, f.Message),
translate,
)
if err != nil {
return err
}
}
v.RegisterTagNameFunc(func(fld reflect.StructField) string {
return fld.Tag.Get("json")
})
}
return nil
}
// registerTranslator ...
func registerTranslator(tag string, msg string) validator.RegisterTranslationsFunc {
return func(trans universalTranslator.Translator) error {
if err := trans.Add(tag, msg, false); err != nil {
return err
}
return nil
}
}
// translate ....
func translate(trans universalTranslator.Translator, fe validator.FieldError) string {
msg, err := trans.T(fe.Tag(), fe.Field(), fe.Param())
if err != nil {
panic(fe.(error).Error())
}
return msg
}
// Refer to here to write a custom check function
// https://github.com/go-playground/validator/blob/f6584a41c8acc5dfc0b62f7962811f5231c11530/baked_in.go
// https://github.com/go-playground/validator/issues/524
func IsLess(fl validator.FieldLevel) bool {
value := fl.Field().Int()
param:= fl.Param()
return value < cast.ToInt64(param)
}
|
[
6
] |
package mcts
import "github.com/BattlesnakeOfficial/rules"
type RoyaleRuleset struct {
rules.StandardRuleset
Hazards []rules.Point
}
func (r *RoyaleRuleset) CreateNextBoardState(prevState *rules.BoardState,
moves []rules.SnakeMove) (*rules.BoardState, error) {
nextBoardState, err := r.StandardRuleset.CreateNextBoardState(prevState, moves)
if err != nil {
return nil, err
}
r.damageOutOfBounds(nextBoardState)
return nextBoardState, nil
}
func (r *RoyaleRuleset) damageOutOfBounds(b *rules.BoardState) {
for i := 0; i < len(b.Snakes); i++ {
snake := &b.Snakes[i]
if snake.EliminatedCause == "" {
head := snake.Body[0]
for _, p := range r.Hazards {
if head == p {
// Snake is now out of bounds, reduce health
snake.Health = snake.Health - 15
if snake.Health <= 0 {
snake.Health = 0
snake.EliminatedCause = "out-of-health"
}
}
}
}
}
}
|
[
0
] |
package auth
import (
"fmt"
"github.com/sauerbraten/waiter/pkg/protocol/role"
)
type callbacks struct {
onSuccess func(role.ID)
onFailure func(error)
}
type Manager struct {
providersByDomain map[string]Provider
callbacksByRequest map[uint32]callbacks
}
func NewManager(providers map[string]Provider) *Manager {
return &Manager{
providersByDomain: providers,
callbacksByRequest: map[uint32]callbacks{},
}
}
func (m *Manager) TryAuthentication(domain, name string, onChal func(reqID uint32, chal string), onSuccess func(role.ID), onFailure func(error)) {
p, ok := m.providersByDomain[domain]
if !ok {
onFailure(fmt.Errorf("auth: no provider for domain '%s'", domain))
return
}
p.GenerateChallenge(name, func(reqID uint32, chal string, err error) {
if err != nil {
onFailure(err)
return
}
m.callbacksByRequest[reqID] = callbacks{
onSuccess: onSuccess,
onFailure: onFailure,
}
onChal(reqID, chal)
})
return
}
func (m *Manager) CheckAnswer(reqID uint32, domain string, answ string) (err error) {
defer delete(m.callbacksByRequest, reqID)
p, ok := m.providersByDomain[domain]
if !ok {
err = fmt.Errorf("auth: no provider for domain '%s'", domain)
return
}
callbacks, ok := m.callbacksByRequest[reqID]
if !ok {
err = fmt.Errorf("auth: unkown request '%d'", reqID)
return
}
p.ConfirmAnswer(reqID, answ, func(rol role.ID, err error) {
if err != nil {
go callbacks.onFailure(err)
return
}
go callbacks.onSuccess(rol)
})
return
}
|
[
6
] |
package main
import (
"bytes"
"encoding/json"
"github.com/gocolly/colly"
"io/ioutil"
"log"
"net/http"
"os"
"regexp"
"strings"
)
type Data struct {
Cursor string `json:"cursor"`
Links []string `json:"links"`
Instagram string `json:"instagram"`
Categories []string `json:"categories"`
}
const SuggestedPagesSelector = "._5ay5[data-id='10']"
const ProfileLinkSelector = "a._8o._8t._ohe"
const ProfileInstagramSelector = "._4bl9 > a[href*='instagram']"
const ProfileCategorySelector = "._4bl9 > a[href*='/pages/category/']"
var HeadhunterURL = os.Getenv("HEADHUNTER_URL")
var CatchYCursors = 2
//TODO: make it better, we should keep proxies as map for better crawler results
// for example map of geolocation and proxy url and make shake with roundrobin algorithm on colly
var CrawlerProxies []string
func Handler(w http.ResponseWriter, r *http.Request) {
if err := json.Unmarshal([]byte(os.Getenv("CRAWLER_PROXIES")), &CrawlerProxies); err != nil {
panic(err)
}
cursors := getCrawlingCursor()
if len(cursors) > 1 {
cursors = cursors[0:CatchYCursors]
}
crawlerData := make([]Data, len(cursors))
for i, cursor := range cursors {
crawlerData[i] = Data{
Cursor: cursor,
}
scrapePage(cursor, func(links []string) {
crawlerData[i].Links = links
})
scrapeAbout(
cursor,
func(category string) {
crawlerData[i].Categories = append(crawlerData[i].Categories, category)
},
func(instagram string) {
crawlerData[i].Instagram = instagram
})
}
if err := sendCrawlingData(crawlerData); err != nil {
w.WriteHeader(500)
panic(err)
return
}
b, _ := json.Marshal(cursors)
w.Write(b)
}
func scrapePage(cursor string, cb func([]string)) {
c := colly.NewCollector()
if err := c.SetProxy(CrawlerProxies[0]); err != nil {
log.Fatal(err)
}
c.OnRequest(func(r *colly.Request) {
log.Println("Visiting", r.URL)
})
c.OnHTML(
SuggestedPagesSelector,
func(e *colly.HTMLElement) {
if cursor == "" {
return
}
var links []string
e.ForEach(ProfileLinkSelector, func(i int, element *colly.HTMLElement) {
href := element.Attr("href")
re := regexp.MustCompile(`^[^?]+`)
link := re.FindString(href)
links = append(links, link)
})
cb(links)
},
)
if err := c.Visit(cursor); err != nil {
panic(err)
}
c.Wait()
}
func scrapeAbout(cursor string, cb func(string), instagramCb func(string)) {
onProfileCategory := func(e *colly.HTMLElement) {
categoryLink := e.Attr("href")
category := strings.Replace(
strings.Split(categoryLink, "/category")[1],
"/",
"",
-1,
)
cb(category)
}
c := colly.NewCollector()
if err := c.SetProxy(CrawlerProxies[0]); err != nil {
log.Fatal(err)
}
c.OnRequest(func(r *colly.Request) {
log.Println("Visiting", r.URL)
})
c.OnHTML(
ProfileInstagramSelector,
func(e *colly.HTMLElement) {
instagramCb(e.Attr("href"))
},
)
c.OnHTML(
ProfileCategorySelector,
onProfileCategory,
)
if err := c.Visit(cursor + "about"); err != nil {
panic(err)
}
c.Wait()
}
func getCrawlingCursor() []string {
response, _ := http.Get(
HeadhunterURL + "/api/server/crawling/cursor",
)
defer response.Body.Close()
cursor, err := ioutil.ReadAll(response.Body)
if err != nil {
panic(err)
}
var links []string
if err := json.Unmarshal(cursor, &links); err != nil {
panic(err)
}
return links
}
func sendCrawlingData(crawlerData []Data) error {
jsonData, err := json.Marshal(crawlerData)
response, _ := http.Post(
HeadhunterURL+"/api/server/crawling",
"application/json",
bytes.NewBuffer(jsonData),
)
defer response.Body.Close()
_, err = ioutil.ReadAll(response.Body)
return err
}
|
[
6
] |
package stdsql
import (
"fmt"
"github.com/amortaza/bsn/flux"
"github.com/amortaza/bsn/flux/normalization"
"github.com/amortaza/bsn/flux/utils"
)
type RowInserter struct {
sqlRunner *SQLRunner
}
func NewRowInserter(sqlRunner *SQLRunner) *RowInserter {
return &RowInserter{
sqlRunner,
}
}
func (inserter *RowInserter) Insert(table string, values *flux.RecordMap) (string, error) {
newId := utils.NewUUID()
sql := inserter.generateSQL(table, newId, values)
return newId, inserter.sqlRunner.Run(sql)
}
func (inserter *RowInserter) generateSQL(table string, newId string, values *flux.RecordMap) string {
columnsSQL := "`" + normalization.PrimaryKeyFieldname + "`"
valuesSQL := fmt.Sprintf("'%s'", newId)
for column, value := range values.Data {
sqlValue := inserter.valueToSQL(value)
columnsSQL = fmt.Sprintf("%s, `%s`", columnsSQL, column)
valuesSQL = fmt.Sprintf("%s, %s", valuesSQL, sqlValue)
}
return fmt.Sprintf("INSERT INTO `%s` (%s) VALUES(%s);", table, columnsSQL, valuesSQL)
}
func (inserter *RowInserter) valueToSQL(value interface{}) string {
sql := ""
if stringValue, ok := value.(string); ok {
sql = fmt.Sprintf("'%s'", stringValue)
} else {
sql = fmt.Sprintf("%v", value)
}
return sql
}
|
[
6
] |
package main
import "fmt"
func main () {
// defining for loop variables
i := 1
for i <= 3 {
fmt.Println(i)
i = i + 1
}
// basic conditions
for j := 7; j <= 9; j++ {
fmt.Println(j)
}
// break
for {
fmt.Println("Loop")
fmt.Println("break jumps out of the for loop")
break
}
// continue
for n := 0; n <= 5; n++ {
if n%2 == 0 {
continue
}
fmt.Println("n%2 = 0 => continue jumps to next iteration")
fmt.Println(n)
}
}
|
[
0
] |
package resolver
// This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end.
import (
"context"
"github.com/99designs/gqlgen/graphql"
"github.com/acelot/articles/internal/feature/article"
"github.com/acelot/articles/internal/gql/model"
"github.com/acelot/articles/internal/gql/runtime"
"go.uber.org/zap"
)
func (r *articleFindListResolver) TotalCount(ctx context.Context, obj *model.ArticleFindList, estimate uint) (model.TotalCountResolvingResult, error) {
filter := graphql.GetFieldContext(ctx).Parent.Args["filter"].(*model.ArticleFindFilterInput)
count, err := r.env.Services.ArticleService.CountArticles(filter, estimate)
if err != nil {
r.env.Logger.Error("article.Service.CountArticles", zap.Error(err))
return NewInternalErrorProblem(), nil
}
return model.TotalCount{
Value: count,
}, nil
}
func (r *articleQueryResolver) Find(ctx context.Context, obj *model.ArticleQuery, filter *model.ArticleFindFilterInput, sort model.ArticleFindSortEnum, pageSize uint, pageNumber uint) (model.ArticleFindResult, error) {
articles, err := r.env.Services.ArticleService.FindArticles(filter, sort, pageSize, pageNumber)
if err != nil {
r.env.Logger.Error("article.Service.FindArticles", zap.Error(err))
return NewInternalErrorProblem(), nil
}
return model.ArticleFindList{
Items: article.MapManyToGqlModels(articles),
}, nil
}
// ArticleFindList returns runtime.ArticleFindListResolver implementation.
func (r *Resolver) ArticleFindList() runtime.ArticleFindListResolver {
return &articleFindListResolver{r}
}
type articleFindListResolver struct{ *Resolver }
|
[
6
] |
package set1
import "errors"
// Hex - each entry is really a 4-bit "nibble"
type Hex []byte
const b16 = "0123456789ABCDEF"
// GetHexFromString - return a valid Hexidecimal number
func GetHexFromString(in string) (Hex, error) {
if len(in)&1 != 0 {
return nil, errors.New("Invalid Hex: Length must be even")
}
var offset int
hex := make(Hex, len(in))
for i, cur := range in {
if cur >= '0' && cur <= '9' {
offset = '0'
} else if cur >= 'a' && cur <= 'f' {
offset = 'a' - 10
} else if cur >= 'A' && cur <= 'F' {
offset = 'A' - 10
} else {
return nil, errors.New("Invalid Hex: Character " + string(cur))
}
hex[i] = byte(int(cur) - offset)
}
return hex, nil
}
// HeXOr - return input Hex values XOr'd together
func HeXOr(h1, h2 Hex) Hex {
diff := len(h1) - len(h2)
if diff > 0 {
h1, h2 = h2, h1 // force h1 shorter than h2
} else {
diff *= -1 // Ensure diff is positive
}
xor := make(Hex, 0, len(h2))
xor = append(xor, h2[:diff]...)
for i, nib := range h1 {
val := nib ^ h2[i+diff]
xor = append(xor, val)
}
return xor
}
// TextToHex - return hex version of plain text
func TextToHex(text []byte) (hex Hex) {
for _, char := range text {
hex = append(hex, DecToHexString(char)...)
}
return
}
// HexToBytes - return plain text of given Hex value
func HexToBytes(hex Hex) []byte {
out := make([]byte, len(hex)/2)
for i := 0; i < len(hex); i += 2 {
num := hex[i]<<4 + hex[i+1]
out[i/2] = num
}
return out
}
// DecToHexString - translate decimal number into hex representation
func DecToHexString(dec byte) Hex {
return Hex{dec >> 4, dec & 0x0F}
}
func (hex Hex) String() string {
base16 := make([]byte, len(hex))
for i, nib := range hex {
base16[i] = b16[nib]
}
return string(base16)
}
|
[
5
] |
package medium
const (
int32Max = 1<<31 - 1
int32Min = -1 << 31
)
/**
8. 字符串转换整数 (atoi)
https://leetcode-cn.com/problems/string-to-integer-atoi/
请你来实现一个 atoi 函数,使其能将字符串转换成整数。
首先,该函数会根据需要丢弃无用的开头空格字符,直到寻找到第一个非空格的字符为止。接下来的转化规则如下:
如果第一个非空字符为正或者负号时,则将该符号与之后面尽可能多的连续数字字符组合起来,形成一个有符号整数。
假如第一个非空字符是数字,则直接将其与之后连续的数字字符组合起来,形成一个整数。
该字符串在有效的整数部分之后也可能会存在多余的字符,那么这些字符可以被忽略,它们对函数不应该造成影响。
注意:假如该字符串中的第一个非空格字符不是一个有效整数字符、字符串为空或字符串仅包含空白字符时,则你的函数不需要进行转换,即无法进行有效转换。
在任何情况下,若函数不能进行有效的转换时,请返回 0 。
示例 1:
输入: "42"
输出: 42
示例 2:
输入: " -42"
输出: -42
解释: 第一个非空白字符为 '-', 它是一个负号。
我们尽可能将负号与后面所有连续出现的数字组合起来,最后得到 -42 。
示例 3:
输入: "4193 with words"
输出: 4193
解释: 转换截止于数字 '3' ,因为它的下一个字符不为数字。
示例 4:
输入: "words and 987"
输出: 0
解释: 第一个非空字符是 'w', 但它不是数字或正、负号。
因此无法执行有效的转换。
示例 5:
输入: "-91283472332"
输出: -2147483648
解释: 数字 "-91283472332" 超过 32 位有符号整数范围。
因此返回 INT_MIN (−231) 。
*/
func myAtoi(str string) int {
n := len(str)
var i, j int
neg := false
for i = 0; i < n; i++ {
if str[i] >= '0' && str[i] <= '9' {
break
} else if str[i] == '+' {
i++
break
} else if str[i] == '-' {
neg = true
i++
break
} else if str[i] != ' ' {
return 0
}
}
for j = i; j < n; j++ {
if str[j] < '0' || str[j] > '9' {
break
}
}
ret := 0
for k := i; k < j; k++ {
cur := int(str[k] - '0')
if !neg {
ret = ret*10 + cur
if ret > int32Max {
return int32Max
}
} else {
ret = ret*10 - cur
if ret < int32Min {
return int32Min
}
}
}
return ret
}
|
[
5
] |
package main
import (
"crypto/tls"
"fmt"
"io"
"net/http"
"os"
"path"
"github.com/jteeuwen/go-pkg-optarg"
)
func main() {
optarg.Add("o", "output-document", "output filename", "")
var (
filename string
)
for opt := range optarg.Parse() {
switch opt.ShortName {
case "o":
filename = opt.String()
}
}
if len(optarg.Remainder) == 1 {
url := optarg.Remainder[0]
if len(filename) == 0 {
_, filename = path.Split(url)
}
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{
Transport: transport,
}
res, err := client.Get(url)
if err != nil {
panic(err)
}
defer res.Body.Close()
file, err := os.Create(filename)
if err != nil {
panic(err)
}
defer file.Close()
n, err := io.Copy(file, res.Body)
if err != nil {
panic(err)
}
fmt.Println(n, "bytes downloaded.")
} else {
optarg.Usage()
}
}
|
[
7
] |
package main
import (
"fmt"
"math"
"sort"
"strconv"
"strings"
"github.com/seiffert/advent-of-code/lib"
)
var (
seatLocationBinarizer = strings.NewReplacer("F", "0", "B", "1", "L", "0", "R", "1")
)
func main() {
list := NewSeatList(lib.MustReadFile("input.txt"))
fmt.Printf("the highest set ID is %d\n", list.HighestSeatID())
fmt.Printf("my seat is %d\n", list.MySeatID())
}
type SeatList []Seat
func NewSeatList(input string) SeatList {
lines := strings.Split(input, "\n")
sl := make(SeatList, 0, len(lines))
for _, line := range lines {
if strings.TrimSpace(line) != "" {
sl = append(sl, NewSeat(line))
}
}
return sl
}
func (sl SeatList) HighestSeatID() int {
var max int
for _, s := range sl {
max = int(math.Max(float64(max), float64(s.ID())))
}
return max
}
func (sl SeatList) MySeatID() int {
sort.Slice(sl, func(i, j int) bool { return sl[i].ID() < sl[j].ID() })
var prev int
for i := 0; i < len(sl); i++ {
if prev != 0 && sl[i].ID() != prev+1 {
return prev + 1
}
prev = sl[i].ID()
}
return -1
}
type Seat struct{ row, column int }
func NewSeat(input string) Seat {
binarySeatLog := seatLocationBinarizer.Replace(input)
row, _ := strconv.ParseInt(binarySeatLog[:7], 2, 64)
column, _ := strconv.ParseInt(binarySeatLog[7:], 2, 64)
return Seat{int(row), int(column)}
}
func (s Seat) ID() int {
return s.column + s.row*8
}
|
[
1
] |
package store
// scopeTrees returns a list of commit IDs that are matched by the
// filters. If potentially all commits could match, or if enough
// commits could potentially match that it would probably be cheaper
// to iterate through all of them, then a nil slice is returned. If
// none match, an empty slice is returned.
//
// scopeTrees is used to select which TreeStores to query.
//
// TODO(sqs): return an error if the filters are mutually exclusive?
func scopeTrees(filters []interface{}) ([]string, error) {
commitIDs := map[string]struct{}{}
everHadAny := false // whether unitIDs ever contained any commitIDs
for _, f := range filters {
switch f := f.(type) {
case ByCommitIDsFilter:
if len(commitIDs) == 0 && !everHadAny {
everHadAny = true
for _, c := range f.ByCommitIDs() {
commitIDs[c] = struct{}{}
}
} else {
// Intersect.
newCommitIDs := make(map[string]struct{}, (len(commitIDs)+len(f.ByCommitIDs()))/2)
for _, c := range f.ByCommitIDs() {
if _, present := commitIDs[c]; present {
newCommitIDs[c] = struct{}{}
}
}
commitIDs = newCommitIDs
}
}
}
if len(commitIDs) == 0 && !everHadAny {
// No unit scoping filters were present, so scope includes
// potentially all commitIDs.
return nil, nil
}
ids := make([]string, 0, len(commitIDs))
for commitID := range commitIDs {
ids = append(ids, commitID)
}
return ids, nil
}
// A treeStoreOpener opens the TreeStore for the specified tree.
type treeStoreOpener interface {
openTreeStore(commitID string) TreeStore
openAllTreeStores() (map[string]TreeStore, error)
}
// openCommitstores is a helper func that calls o.openTreeStore for
// each tree returned by scopeTrees(filters...).
func openTreeStores(o treeStoreOpener, filters interface{}) (map[string]TreeStore, error) {
commitIDs, err := scopeTrees(storeFilters(filters))
if err != nil {
return nil, err
}
if commitIDs == nil {
return o.openAllTreeStores()
}
tss := make(map[string]TreeStore, len(commitIDs))
for _, commitID := range commitIDs {
tss[commitID] = o.openTreeStore(commitID)
}
return tss, nil
}
|
[
7
] |
package routes
import (
"errors"
"fmt"
"net/http"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/gin-gonic/gin"
"github.com/ovh/tat/models"
"github.com/ovh/tat/utils"
"github.com/spf13/viper"
)
// private
var (
tatHeaderPassword = "Tat_password"
tatHeaderPasswordLower = "tat_password"
tatHeaderPasswordLowerDash = "tat-password"
)
type tatHeaders struct {
username string
password string
trustUsername string
}
// CheckAdmin is a middleware, abort request if user is not admin
func CheckAdmin() gin.HandlerFunc {
return func(ctx *gin.Context) {
if !utils.IsTatAdmin(ctx) {
ctx.AbortWithError(http.StatusForbidden, errors.New("user is not admin"))
}
}
}
// CheckPassword is a middleware, check username / password in Request Header and validate
// them in DB. If username/password is invalid, abort request
func CheckPassword() gin.HandlerFunc {
return func(ctx *gin.Context) {
// refresh store to avoid lost connection on mongo
models.RefreshStore()
tatHeaders, err := extractTatHeaders(ctx)
if err != nil {
ctx.AbortWithError(http.StatusBadRequest, err)
return
}
user, err := validateTatHeaders(tatHeaders)
if err != nil {
log.Errorf("Error, send 401, err : %s", err.Error())
ctx.AbortWithError(http.StatusUnauthorized, err)
return
}
err = storeInContext(ctx, user)
if err != nil {
ctx.AbortWithError(http.StatusInternalServerError, err)
return
}
}
}
// extractTatHeadesr extracts Tat_username and Tat_password from Headers Request
// try match tat_username, tat_password, tat-username, tat-password
// try dash version, thanks to perl lib...
func extractTatHeaders(ctx *gin.Context) (tatHeaders, error) {
var tatHeaders tatHeaders
for k, v := range ctx.Request.Header {
if strings.ToLower(k) == utils.TatHeaderUsernameLower {
tatHeaders.username = v[0]
} else if strings.ToLower(k) == tatHeaderPasswordLower {
tatHeaders.password = v[0]
} else if strings.ToLower(k) == utils.TatHeaderUsernameLowerDash {
tatHeaders.username = v[0]
} else if strings.ToLower(k) == tatHeaderPasswordLowerDash {
tatHeaders.password = v[0]
} else if k == viper.GetString("header_trust_username") {
tatHeaders.trustUsername = v[0]
}
}
if tatHeaders.password != "" && tatHeaders.username != "" {
return tatHeaders, nil
}
if tatHeaders.trustUsername != "" && tatHeaders.trustUsername != "null" {
return tatHeaders, nil
}
return tatHeaders, errors.New("Invalid Tat Headers")
}
// validateTatHeaders fetch user in db and check Password
func validateTatHeaders(tatHeaders tatHeaders) (models.User, error) {
user := models.User{}
if tatHeaders.trustUsername != "" && tatHeaders.trustUsername != "null" {
err := user.TrustUsername(tatHeaders.trustUsername)
if err != nil {
return user, fmt.Errorf("User %s does not exist. Please register before. Err:%s", tatHeaders.trustUsername, err.Error())
}
} else {
err := user.FindByUsernameAndPassword(tatHeaders.username, tatHeaders.password)
if err != nil {
return user, fmt.Errorf("Invalid Tat credentials for username %s, err:%s", tatHeaders.username, err.Error())
}
}
return user, nil
}
// storeInContext stores username and isAdmin flag only
func storeInContext(ctx *gin.Context, user models.User) error {
ctx.Set(utils.TatHeaderUsername, user.Username)
ctx.Set(utils.TatCtxIsAdmin, user.IsAdmin)
ctx.Set(utils.TatCtxIsSystem, user.IsSystem)
if user.IsAdmin {
log.Debugf("user %s isAdmin", user.Username)
}
if user.IsSystem {
log.Debugf("user %s isSystem", user.Username)
}
return nil
}
|
[
5
] |
package in
import (
"fmt"
"io/ioutil"
"net/http"
"strings"
"github.com/apundir/wsbalancer/common"
)
type admin struct {
manager *Manager
backendAdmin common.BackendAdministrator
}
type backendAction func(string) (int, error)
// registerHTTPHandlers - registers actionable endpoints to supplied mux
func (adm *admin) registerHTTPHandlers(http *http.ServeMux) {
http.HandleFunc("/backend/list", adm.listBackendHandler)
http.HandleFunc("/backend/add", adm.addBackendHandler)
http.HandleFunc("/backend/config/", adm.backendConfigHandler)
http.HandleFunc("/backend/disable/", adm.disableBackendHandler)
http.HandleFunc("/backend/enable/", adm.enableBackendHandler)
http.HandleFunc("/backend/failover/", adm.failoverBackendHandler)
http.HandleFunc("/backend/reset/", adm.resetBackendHandler)
http.HandleFunc("/backend/delete/", adm.deleteBackendHandler)
http.HandleFunc("/session/list", adm.listSessionHandler)
http.HandleFunc("/session/abort/", adm.abortSessionHandler)
http.HandleFunc("/session/failover/", adm.failoverSessionHandler)
http.HandleFunc("/frontend/list", adm.listFrontendHandler)
http.HandleFunc("/frontend/pause/", adm.pauseFrontendHandler)
http.HandleFunc("/frontend/resume/", adm.resumeFrontendHandler)
}
func (adm *admin) isHTTPPost(w http.ResponseWriter, r *http.Request) bool {
if r.Method == "POST" {
return true
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
fmt.Fprint(w, "{\"result\":\"failed\", \"reason\":\"OnlyPostAllowed\"}")
return false
}
func (adm *admin) sendNotImplementedResponse(w http.ResponseWriter) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusNotImplemented)
fmt.Fprint(w, "{\"result\":\"failed\", \"reason\":\"NotImplemented\"}")
}
// listBackendHandler - list all existing backends
func (adm *admin) listBackendHandler(w http.ResponseWriter, r *http.Request) {
if adm.backendAdmin == nil {
adm.sendNotImplementedResponse(w)
return
}
w.Header().Set("Content-Type", "application/json")
fmt.Fprint(w, adm.backendAdmin.ListBackends())
}
func processBkActionRequest(w http.ResponseWriter, r *http.Request, actor backendAction) (string, int) {
bid := beIDRe.FindStringSubmatch(r.URL.RequestURI())[3]
w.Header().Set("Content-Type", "application/json")
resp, err := actor(bid)
if err != nil {
fmt.Fprint(w, fmt.Sprintf("{\"result\":\"failed\", \"reason\":\"%v\"}", err.Error()))
}
if resp == common.ResultSuccess {
fmt.Fprint(w, "{\"result\":\"success\"}")
} else if resp == common.ResultNoActionReqd {
fmt.Fprint(w, "{\"result\":\"success\", \"reason\":\"NoActionRequired\"}")
} else {
fmt.Fprint(w, "{\"result\":\"failed\"}")
}
return bid, resp
}
// addBackendHandler - adds new backend
func (adm *admin) addBackendHandler(w http.ResponseWriter, r *http.Request) {
if adm.backendAdmin == nil {
adm.sendNotImplementedResponse(w)
return
}
if !adm.isHTTPPost(w, r) {
return
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
alog.Warnf("Error reading body: %v", err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
resp := adm.backendAdmin.AddBackend(body)
w.Header().Set("Content-Type", "application/json")
fmt.Fprint(w, resp)
}
// backendConfigHandler dumps backend configuration
func (adm *admin) backendConfigHandler(w http.ResponseWriter, r *http.Request) {
if adm.backendAdmin == nil {
adm.sendNotImplementedResponse(w)
return
}
w.Header().Set("Content-Type", "application/json")
bid := beIDRe.FindStringSubmatch(r.URL.RequestURI())
fmt.Fprint(w, adm.backendAdmin.BackendConfig(bid[3]))
}
// disableBackendHandler - Disable given backend
func (adm *admin) disableBackendHandler(w http.ResponseWriter, r *http.Request) {
if adm.backendAdmin == nil {
adm.sendNotImplementedResponse(w)
return
}
if !adm.isHTTPPost(w, r) {
return
}
processBkActionRequest(w, r, adm.backendAdmin.DisableBackend)
}
// enableBackendHandler - Enable given backend
func (adm *admin) enableBackendHandler(w http.ResponseWriter, r *http.Request) {
if adm.backendAdmin == nil {
adm.sendNotImplementedResponse(w)
return
}
if !adm.isHTTPPost(w, r) {
return
}
processBkActionRequest(w, r, adm.backendAdmin.EnableBackend)
}
func (adm *admin) resetBackendHandler(w http.ResponseWriter, r *http.Request) {
if adm.backendAdmin == nil {
adm.sendNotImplementedResponse(w)
return
}
if !adm.isHTTPPost(w, r) {
return
}
processBkActionRequest(w, r, adm.backendAdmin.ResetBackend)
}
// deleteBackendHandler - Remove given backend and close all it's current connections
func (adm *admin) deleteBackendHandler(w http.ResponseWriter, r *http.Request) {
if adm.backendAdmin == nil {
adm.sendNotImplementedResponse(w)
return
}
if !adm.isHTTPPost(w, r) {
return
}
bid, res := processBkActionRequest(w, r, adm.backendAdmin.DeleteBackend)
if bid != "" && res == common.ResultSuccess {
go adm.manager.abortAllBackendConnections(bid)
}
}
// failoverBackendHandler closes all backend connection thus forcing failover for
// all the sessions communicating with this backend right now
func (adm *admin) failoverBackendHandler(w http.ResponseWriter, r *http.Request) {
if !adm.isHTTPPost(w, r) {
return
}
processBkActionRequest(w, r, adm.manager.abortAllBackendConnections)
}
func (adm *admin) listSessionHandler(w http.ResponseWriter, r *http.Request) {
adm.manager.ctsLock.RLock()
var b strings.Builder
b.Write([]byte{'['})
isFirst := true
for fc, bc := range adm.manager.clients {
if !isFirst {
b.Write([]byte{','})
}
fmt.Fprintf(&b, "{\"id\":\"%v\",\"uri\":\"%v\",\"backendId\":\"%v\""+
",\"reconnects\":%v,\"msgsReceived\":%v,\"msgsSent\":%v}",
fc.id, jsonEscape(fc.RequestURI()), bc.BackendID(), fc.totalReconnects,
fc.totalReceived, fc.totalSent)
isFirst = false
}
b.Write([]byte{']'})
adm.manager.ctsLock.RUnlock()
w.Header().Set("Content-Type", "application/json")
fmt.Fprint(w, b.String())
}
func (adm *admin) abortSessionHandler(w http.ResponseWriter, r *http.Request) {
if !adm.isHTTPPost(w, r) {
return
}
processBkActionRequest(w, r, adm.manager.abortFrontendConnection)
}
func (adm *admin) failoverSessionHandler(w http.ResponseWriter, r *http.Request) {
if !adm.isHTTPPost(w, r) {
return
}
processBkActionRequest(w, r, adm.manager.abortBackendConnection)
}
func (adm *admin) pauseFrontendHandler(w http.ResponseWriter, r *http.Request) {
if !adm.isHTTPPost(w, r) {
return
}
processBkActionRequest(w, r, adm.manager.pauseFrontend)
}
func (adm *admin) resumeFrontendHandler(w http.ResponseWriter, r *http.Request) {
if !adm.isHTTPPost(w, r) {
return
}
processBkActionRequest(w, r, adm.manager.resumeFrontend)
}
func (adm *admin) listFrontendHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
fmt.Fprintf(w, "[{\"id\":\"%v\"}]", adm.manager.id)
}
|
[
5
] |
package rbtree
// helper functions
func isRed(n *Node) bool { return n != nil && n.color == RED }
func isBlack(n *Node) bool { return n == nil || n.color == BLACK }
func (t *Tree) insertFix(x *Node) {
var y *Node
for x.p != nil && x.p.color == RED {
if x.p == x.p.p.left {
y = x.p.p.right
if isRed(y) {
/*
* [BLACK] RED (ANY) -> x <-
*
* 1.
* [g]
* / \
* p y
* /
* -> x
* --->
* g <-
* / \
* [p] [y]
* /
* x
*/
x.p.color = BLACK
y.color = BLACK
x.p.p.color = RED
x = x.p.p
} else {
if x == x.p.right {
/*
* 2.
* [g]
* / \
* p [y]
* \
* -> x
* --->
* [g]
* / \
* -> x [y]
* /
* p
*/
x = x.p
t.leftRotate(x)
}
/*
* 3.
* [g]
* / \
* p [y]
* /
* -> x
* --->
* [p]
* / \
* -> x g
* \
* [y]
*/
x.p.color = BLACK
x.p.p.color = RED
t.rightRotate(x.p.p)
}
} else {
y = x.p.p.left
if isRed(y) {
x.p.color = BLACK
y.color = BLACK
x.p.p.color = RED
x = x.p.p
} else {
if x == x.p.left {
x = x.p
t.rightRotate(x)
}
x.p.color = BLACK
x.p.p.color = RED
t.leftRotate(x.p.p)
}
}
}
t.root.color = BLACK
}
// x can be nil, but it should be treated as a leaf.
func (t *Tree) deleteFix(p, x *Node) {
var y *Node
for x != t.root && isBlack(x) {
if x == p.left {
y = p.right
if isRed(y) {
/*
* 1.
* [p]
* / \
* [x] y
* / \
* [a] [b]
* --->
* [y]
* / \
* p [b]
* / \
* [x] [a] <- y
*/
y.color = BLACK
p.color = RED
t.leftRotate(p)
y = p.right
}
if isBlack(y.right) && isBlack(y.left) {
/*
* 2.
* (p)
* / \
* [x] [y]
* / \
* [a] [b]
* --->
* (p) <- x
* / \
* [x] y
* / \
* [a] [b]
*/
y.color = RED
x, p = p, p.p
// Don't worry :), if p is red, loop ends and it's set to black.
} else {
if isBlack(y.right) {
/*
* 3.
* (p)
* / \
* [x] [y]
* / \
* a [b]
* /
* [c]
* --->
* (p)
* / \
* [x] [a] <- y
* / \
* [c] y
* \
* [b]
*/
y.left.color = BLACK
y.color = RED
t.rightRotate(y)
y = p.right
}
/*
* 4.
* (p)
* / \
* [x] [y]
* / \
* (a) b
* --->
* (y)
* / \
* [p] [b]
* / \
* [x] (a)
*/
y.color = p.color
p.color = BLACK
y.right.color = BLACK
t.leftRotate(p)
x, p = t.root, nil
}
} else {
y = p.left
if isRed(y) {
y.color = BLACK
p.color = RED
t.rightRotate(p)
y = p.left
}
if isBlack(y.left) && isBlack(y.right) {
y.color = RED
x, p = p, p.p
} else {
if isBlack(y.left) {
y.right.color = BLACK
y.color = RED
t.leftRotate(y)
y = p.left
}
y.color = p.color
p.color = BLACK
y.left.color = BLACK
t.rightRotate(p)
x, p = t.root, nil
}
}
}
if x != nil {
x.color = BLACK
}
}
// transplant s to the position of t
func (t *Tree) transplant(pos, n *Node) {
if pos.p == nil {
t.root = n
} else if pos == pos.p.left {
pos.p.left = n
} else {
pos.p.right = n
}
if n != nil {
n.p = pos.p
}
}
func (t *Tree) newNode(v interface{}) *Node {
return &Node{
left: nil,
right: nil,
p: nil,
v: v,
color: RED,
}
}
/*
* x
* / \
* a y
* / \
* b c
* ->
* y
* / \
* x c
* / \
* a b
*/
func (t *Tree) leftRotate(x *Node) {
y := x.right
x.right = y.left
if y.left != nil {
y.left.p = x
}
t.transplant(x, y)
y.left = x
x.p = y
}
/*
* x
* / \
* y c
* / \
* a b
* ->
* y
* / \
* a x
* / \
* b c
*/
func (t *Tree) rightRotate(x *Node) {
y := x.left
x.left = y.right
if y.right != nil {
y.right.p = x
}
t.transplant(x, y)
y.right = x
x.p = y
}
|
[
5
] |
package main
import (
"fmt"
"log"
"strconv"
"strings"
"uk.co.lewis-od.aoc2020/common"
)
type GamesConsole struct {
instructions []Instruction
accumulator int
instructionCounter int
counterHistory []int
}
func (gc *GamesConsole) execute() bool {
if gc.instructionCounter == len(gc.instructions) {
return true
} else if common.ArrayContains(gc.counterHistory, gc.instructionCounter) {
return false
}
currentInstruction := gc.instructions[gc.instructionCounter]
op := currentInstruction.operation
counterDelta := 1
if op == "jmp" {
counterDelta = currentInstruction.argument
} else if op == "acc" {
gc.accumulator += currentInstruction.argument
} else if op != "nop" {
log.Fatal("Encountered unknown operation", op)
}
gc.counterHistory = append(gc.counterHistory, gc.instructionCounter)
gc.instructionCounter += counterDelta
return gc.execute()
}
type Instruction struct {
operation string
argument int
}
func main() {
inputRows := common.ReadAndSanitiseRows("input.txt")
instructions := make([]Instruction, len(inputRows))
for i, inputRow := range inputRows {
instructions[i] = parseInstruction(inputRow)
}
fmt.Println("Part 1:")
fmt.Println("acc =", part1(instructions))
fmt.Println()
fmt.Println("Part 2:")
accumulator, err := part2(instructions)
if err != nil {
log.Fatal(err)
}
fmt.Println("acc =", accumulator)
}
func parseInstruction(text string) Instruction {
parts := strings.Split(text, " ")
argument, err := strconv.Atoi(parts[1])
if err != nil {
log.Fatal("Error converting", parts[1], "to int")
}
return Instruction{
operation: parts[0],
argument: argument,
}
}
func part1(instructions []Instruction) int {
accumulator, _ := runInstructions(instructions)
return accumulator
}
func part2(instructions []Instruction) (int, error) {
for index, instruction := range instructions {
if instruction.operation == "jmp" {
flippedInstruction := instruction
flippedInstruction.operation = "nop"
modifiedInstructions := make([]Instruction, len(instructions))
copy(modifiedInstructions, instructions)
modifiedInstructions[index] = flippedInstruction
accumulator, didTerminate := runInstructions(modifiedInstructions)
if didTerminate {
return accumulator, nil
}
}
}
return 0, common.SolutionNotFoundError("Unable to find solution")
}
func runInstructions(instructions []Instruction) (int, bool) {
console := GamesConsole{
instructions: instructions,
accumulator: 0,
instructionCounter: 0,
counterHistory: make([]int, 0),
}
didTerminate := console.execute()
return console.accumulator, didTerminate
}
|
[
5
] |
package models
// Action 是过滤器要执行动作的接口,只有一个 Exec 函数需要去实现
type Action interface {
Exec(email *Email, args ...interface{}) error
}
// LabelAction 是给邮件打标记的动作
type LabelAction struct{}
// MarkAsReadAction 是把邮件标记为已读的动作
type MarkAsReadAction struct{}
// MarkAsDeleteAction 是把邮件标记为已删除的动作
type MarkAsDeleteAction struct{}
// ForwardAction 是转发邮件的动作,暂未实现
type ForwardAction struct{}
// ReplyAction 是自动邮件的动作,暂未实现
type ReplyAction struct{}
// MoveMessaeAction 暂未实现
type MoveMessaeAction struct{}
// CopyMessageAction 暂未实现
type CopyMessageAction struct{}
// ChangeStatusAction 暂未实现
type ChangeStatusAction struct{}
// Exec 给邮件打Tag
func (e LabelAction) Exec(email *Email, args ...interface{}) error {
value := args[0]
switch value.(type) {
case string:
label := value.(string)
if email.Tags == nil {
email.Tags = make([]*Tag, 0)
}
// 这里不会访问数据库,存储的事情放到 Receiver 里面去做
email.Tags = append(email.Tags, &Tag{Name: label})
}
return nil
}
// Exec 是转发邮件的接口实现,暂未完成
func (e ForwardAction) Exec(email *Email, args ...interface{}) error {
return nil
}
// Exec 是回复邮件的接口实现,暂未完成
func (e ReplyAction) Exec(email *Email, args ...interface{}) error {
return nil
}
// Exec 是移动邮件的接口实现,暂未完成
func (e MoveMessaeAction) Exec(email *Email, args ...interface{}) error {
return nil
}
// Exec 是拷贝邮件的接口实现,暂未完成
func (e CopyMessageAction) Exec(email *Email, args ...interface{}) error {
return nil
}
// Exec 是 ChangeStatusAction 接口实现,暂未完成
func (e ChangeStatusAction) Exec(email *Email, args ...interface{}) error {
return nil
}
// Exec 是 MarkAsReadAction 接口实现,暂未完成
func (e MarkAsReadAction) Exec(email *Email, args ...interface{}) error {
email.IsRead = 1
return nil
}
// Exec 是 MarkAsDeleteAction 接口实现,暂未完成
func (e MarkAsDeleteAction) Exec(email *Email, args ...interface{}) error {
email.IsDelete = 1
return nil
}
// NewAction 创建过滤器要执行的动作
func NewAction(t string) Action {
switch t {
case "Label":
return LabelAction{}
case "MarkAsRead":
return MarkAsReadAction{}
case "MarkAsDelete":
return MarkAsDeleteAction{}
}
return nil
}
|
[
7
] |
package problem003
// 我的提交 4ms
func MylengthOfLongestSubstring(s string) int {
var i, ret int // i子串头,ret最长子串
m := [256]int{} // 假定输入的字符串只有ASCII字符
for i := range m {
m[i] = -1
}
for j := 0; j < len(s); j++ {
// 如果当前字符s[j]出现过,那么字典m中就会有对应的位置v
// 此时,计算一下当前子串的长度len(s[i:j]),并对比已经找到的长度ret
// 子串头重置,有可能上一个s[j]出现在子串之前
// 如当前子串在之前出现过,因此i取和v+1比较后较大的值
v := m[s[j]]
// 当前字符s[j]出现有两种情况
// 1.重复字符v在i和j之间,说明上次循环过程中,计算的字符串肯定比这次长
// 因此此次不做ret更新,直接更新i
if v >= i {
i = v + 1
} else {
// 2.如果v出现在i之前
// 说明,本次有可能是最长子串,判断一下进行更新
if j-i+1 > ret {
ret = j - i + 1
}
}
// 无论当前字符s[j]是否重复出现,都更新出现位置
m[s[j]] = j
}
return ret
}
|
[
4
] |
package autoscaling
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/pkg/errors"
)
func (s *Service) GetAutoScallingGroupsByCluster(clusterName string) ([]*autoscaling.Group, error) {
asGroups := []*autoscaling.Group{}
// input := &autoscaling.DescribeAutoScalingGroupsInput{}
out, err := s.AutoScaling.DescribeAutoScalingGroups(nil)
switch {
// case IsNotFound(err):
// return nil, nil
case err != nil:
return nil, errors.Wrap(err, "failed to describe instances by tags")
}
for _, asg := range out.AutoScalingGroups {
for _, tag := range asg.Tags {
if (*tag.Key == "clusterid") && (*tag.Value == clusterName) {
asGroups = append(asGroups, asg)
break
}
}
}
// fmt.Printf("ASGs: %s", out)
return asGroups, nil
}
func (s *Service) DeleteAutoScalingGroupsByCluster(clusterName string) ([]string, error) {
var launchConfigList []string
asgs, err := s.GetAutoScallingGroupsByCluster(clusterName)
if err != nil {
return nil, err
}
launchConfigList, err = s.DeleteAutoScalingGroupsAndWait(asgs)
if err != nil {
return nil, err
}
return launchConfigList, nil
}
func (s *Service) DeleteAutoScalingGroupsAndWait(asgs []*autoscaling.Group) ([]string, error) {
forceDelete := true
var asgsNames []string
var launchConfigList []string
for _, asg := range asgs {
// Keep list of deleted ASGs for the waiting step
asgsNames = append(asgsNames, *asg.AutoScalingGroupName)
launchConfigList = append(launchConfigList, *asg.LaunchConfigurationName)
fmt.Printf("Deleting autoscaling group: %s\n", *asg.AutoScalingGroupName)
input := &autoscaling.DeleteAutoScalingGroupInput{
AutoScalingGroupName: asg.AutoScalingGroupName,
ForceDelete: &forceDelete,
}
_, err := s.AutoScaling.DeleteAutoScalingGroup(input)
if err != nil {
return nil, err
}
}
fmt.Printf("Waiting for deletion to be completed on: %s\n", asgsNames)
inputWait := &autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: aws.StringSlice(asgsNames),
}
err := s.AutoScaling.WaitUntilGroupNotExists(inputWait)
if err != nil {
return nil, err
}
return launchConfigList, nil
}
|
[
7
] |
/*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package schema
import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"strings"
"camlistore.org/pkg/blobref"
)
var _ = log.Printf
const closedIndex = -1
var errClosed = errors.New("filereader is closed")
// A DirReader reads the entries of a "directory" schema blob's
// referenced "static-set" blob.
type DirReader struct {
fetcher blobref.SeekFetcher
ss *Superset
staticSet []*blobref.BlobRef
current int
}
// NewDirReader creates a new directory reader and prepares to
// fetch the static-set entries
func NewDirReader(fetcher blobref.SeekFetcher, dirBlobRef *blobref.BlobRef) (*DirReader, error) {
ss := new(Superset)
err := ss.setFromBlobRef(fetcher, dirBlobRef)
if err != nil {
return nil, err
}
if ss.Type != "directory" {
return nil, fmt.Errorf("schema/filereader: expected \"directory\" schema blob for %s, got %q", dirBlobRef, ss.Type)
}
dr, err := ss.NewDirReader(fetcher)
if err != nil {
return nil, fmt.Errorf("schema/filereader: creating DirReader for %s: %v", dirBlobRef, err)
}
dr.current = 0
return dr, nil
}
func (ss *Superset) NewDirReader(fetcher blobref.SeekFetcher) (*DirReader, error) {
if ss.Type != "directory" {
return nil, fmt.Errorf("Superset not of type \"directory\"")
}
return &DirReader{fetcher: fetcher, ss: ss}, nil
}
func (ss *Superset) setFromBlobRef(fetcher blobref.SeekFetcher, blobRef *blobref.BlobRef) error {
if blobRef == nil {
return errors.New("schema/filereader: blobref was nil")
}
ss.BlobRef = blobRef
rsc, _, err := fetcher.Fetch(blobRef)
if err != nil {
return fmt.Errorf("schema/filereader: fetching schema blob %s: %v", blobRef, err)
}
if err = json.NewDecoder(rsc).Decode(ss); err != nil {
return fmt.Errorf("schema/filereader: decoding schema blob %s: %v", blobRef, err)
}
return nil
}
// StaticSet returns the whole of the static set members of that directory
func (dr *DirReader) StaticSet() ([]*blobref.BlobRef, error) {
if dr.staticSet != nil {
return dr.staticSet, nil
}
staticSetBlobref := blobref.Parse(dr.ss.Entries)
if staticSetBlobref == nil {
return nil, fmt.Errorf("schema/filereader: Invalid blobref\n")
}
rsc, _, err := dr.fetcher.Fetch(staticSetBlobref)
if err != nil {
return nil, fmt.Errorf("schema/filereader: fetching schema blob %s: %v", staticSetBlobref, err)
}
ss := new(Superset)
if err = json.NewDecoder(rsc).Decode(ss); err != nil {
return nil, fmt.Errorf("schema/filereader: decoding schema blob %s: %v", staticSetBlobref, err)
}
if ss.Type != "static-set" {
return nil, fmt.Errorf("schema/filereader: expected \"static-set\" schema blob for %s, got %q", staticSetBlobref, ss.Type)
}
for _, s := range ss.Members {
member := blobref.Parse(s)
if member == nil {
return nil, fmt.Errorf("schema/filereader: invalid (static-set member) blobref\n")
}
dr.staticSet = append(dr.staticSet, member)
}
return dr.staticSet, nil
}
// Readdir implements the Directory interface.
func (dr *DirReader) Readdir(n int) (entries []DirectoryEntry, err error) {
sts, err := dr.StaticSet()
if err != nil {
return nil, fmt.Errorf("schema/filereader: can't get StaticSet: %v\n", err)
}
up := dr.current + n
if n <= 0 {
dr.current = 0
up = len(sts)
} else {
if n > (len(sts) - dr.current) {
err = io.EOF
up = len(sts)
}
}
for _, entryBref := range sts[dr.current:up] {
entry, err := NewDirectoryEntryFromBlobRef(dr.fetcher, entryBref)
if err != nil {
return nil, fmt.Errorf("schema/filereader: can't create dirEntry: %v\n", err)
}
entries = append(entries, entry)
}
return entries, err
}
// A FileReader reads the bytes of "file" and "bytes" schema blobrefs.
type FileReader struct {
*io.SectionReader
fetcher blobref.SeekFetcher
ss *Superset
size int64 // total number of bytes
}
// NewFileReader returns a new FileReader reading the contents of fileBlobRef,
// fetching blobs from fetcher. The fileBlobRef must be of a "bytes" or "file"
// schema blob.
func NewFileReader(fetcher blobref.SeekFetcher, fileBlobRef *blobref.BlobRef) (*FileReader, error) {
// TODO(bradfitz): make this take a blobref.FetcherAt instead?
// TODO(bradfitz): rename this into bytes reader? but for now it's still
// named FileReader, but can also read a "bytes" schema.
if fileBlobRef == nil {
return nil, errors.New("schema/filereader: NewFileReader blobref was nil")
}
rsc, _, err := fetcher.Fetch(fileBlobRef)
if err != nil {
return nil, fmt.Errorf("schema/filereader: fetching file schema blob: %v", err)
}
defer rsc.Close()
ss := new(Superset)
if err = json.NewDecoder(rsc).Decode(ss); err != nil {
return nil, fmt.Errorf("schema/filereader: decoding file schema blob: %v", err)
}
if ss.Type != "file" && ss.Type != "bytes" {
return nil, fmt.Errorf("schema/filereader: expected \"file\" or \"bytes\" schema blob, got %q", ss.Type)
}
fr, err := ss.NewFileReader(fetcher)
if err != nil {
return nil, fmt.Errorf("schema/filereader: creating FileReader for %s: %v", fileBlobRef, err)
}
return fr, nil
}
func (ss *Superset) NewFileReader(fetcher blobref.SeekFetcher) (*FileReader, error) {
if ss.Type != "file" && ss.Type != "bytes" {
return nil, fmt.Errorf("schema/filereader: Superset not of type \"file\" or \"bytes\"")
}
size := int64(ss.SumPartsSize())
fr := &FileReader{
fetcher: fetcher,
ss: ss,
size: size,
}
fr.SectionReader = io.NewSectionReader(fr, 0, size)
return fr, nil
}
// FileSchema returns the reader's schema superset. Don't mutate it.
func (fr *FileReader) FileSchema() *Superset {
return fr.ss
}
func (fr *FileReader) Close() error {
// TODO: close cached blobs?
return nil
}
// Skip skips past skipBytes of the file.
// It is equivalent to but more efficient than:
//
// io.CopyN(ioutil.Discard, fr, skipBytes)
//
// It returns the number of bytes skipped.
//
// TODO(bradfitz): delete this. Legacy interface; callers should just Seek now.
func (fr *FileReader) Skip(skipBytes uint64) uint64 {
oldOff, err := fr.Seek(0, os.SEEK_CUR)
if err != nil {
panic("Failed to seek")
}
remain := fr.size - oldOff
if int64(skipBytes) > remain {
skipBytes = uint64(remain)
}
newOff, err := fr.Seek(int64(skipBytes), os.SEEK_CUR)
if err != nil {
panic("Failed to seek")
}
skipped := newOff - oldOff
if skipped < 0 {
panic("")
}
return uint64(skipped)
}
var _ interface {
io.ReaderAt
io.Reader
io.Closer
Size() int64
} = (*FileReader)(nil)
func (fr *FileReader) ReadAt(p []byte, offset int64) (n int, err error) {
if offset < 0 {
return 0, errors.New("schema/filereader: negative offset")
}
if offset >= fr.Size() {
return 0, io.EOF
}
want := len(p)
for len(p) > 0 && err == nil {
var rc io.ReadCloser
rc, err = fr.readerForOffset(offset)
if err != nil {
return
}
var n1 int64 // never bigger than an int
n1, err = io.CopyN(&sliceWriter{p}, rc, int64(len(p)))
rc.Close()
if err == io.EOF {
err = nil
}
if n1 == 0 {
break
}
p = p[n1:]
offset += int64(n1)
n += int(n1)
}
if n < want && err == nil {
err = io.ErrUnexpectedEOF
}
return n, err
}
type sliceWriter struct {
dst []byte
}
func (sw *sliceWriter) Write(p []byte) (n int, err error) {
n = copy(sw.dst, p)
sw.dst = sw.dst[n:]
return n, nil
}
var eofReader io.ReadCloser = ioutil.NopCloser(strings.NewReader(""))
// readerForOffset returns a ReadCloser that reads some number of bytes and then EOF
// from the provided offset. Seeing EOF doesn't mean the end of the whole file; just the
// chunk at that offset. The caller must close the ReadCloser when done reading.
func (fr *FileReader) readerForOffset(off int64) (io.ReadCloser, error) {
if off < 0 {
panic("negative offset")
}
if off >= fr.size {
return eofReader, nil
}
offRemain := off
parts := fr.ss.Parts
for len(parts) > 0 && parts[0].Size <= uint64(offRemain) {
offRemain -= int64(parts[0].Size)
parts = parts[1:]
}
if len(parts) == 0 {
return eofReader, nil
}
p0 := parts[0]
var rsc blobref.ReadSeekCloser
var err error
switch {
case p0.BlobRef != nil && p0.BytesRef != nil:
return nil, fmt.Errorf("part illegally contained both a blobRef and bytesRef")
case p0.BlobRef == nil && p0.BytesRef == nil:
return &nZeros{int(p0.Size - uint64(offRemain))}, nil
case p0.BlobRef != nil:
rsc, _, err = fr.fetcher.Fetch(p0.BlobRef)
case p0.BytesRef != nil:
rsc, err = NewFileReader(fr.fetcher, p0.BytesRef)
}
if err != nil {
return nil, err
}
offRemain += int64(p0.Offset)
if offRemain > 0 {
newPos, err := rsc.Seek(offRemain, os.SEEK_SET)
if err != nil {
return nil, err
}
if newPos != offRemain {
panic("Seek didn't work")
}
}
return struct {
io.Reader
io.Closer
}{
io.LimitReader(rsc, int64(p0.Size)),
rsc,
}, nil
}
// nZeros is a ReadCloser that reads remain zeros before EOF.
type nZeros struct {
remain int
}
func (z *nZeros) Read(p []byte) (n int, err error) {
for len(p) > 0 && z.remain > 0 {
p[0] = 0
n++
z.remain--
}
if n == 0 && z.remain == 0 {
err = io.EOF
}
return
}
func (*nZeros) Close() error { return nil }
|
[
4
] |
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/4316fc1aa18bb04678b156f23b22c9d3f996f9c9
package types
// TrackHits holds the union for the following types:
//
// bool
// int
//
// https://github.com/elastic/elasticsearch-specification/blob/4316fc1aa18bb04678b156f23b22c9d3f996f9c9/specification/_global/search/_types/hits.ts#L122-L130
type TrackHits interface{}
// TrackHitsBuilder holds TrackHits struct and provides a builder API.
type TrackHitsBuilder struct {
v TrackHits
}
// NewTrackHits provides a builder for the TrackHits struct.
func NewTrackHitsBuilder() *TrackHitsBuilder {
return &TrackHitsBuilder{}
}
// Build finalize the chain and returns the TrackHits struct
func (u *TrackHitsBuilder) Build() TrackHits {
return u.v
}
func (u *TrackHitsBuilder) Bool(bool bool) *TrackHitsBuilder {
u.v = &bool
return u
}
func (u *TrackHitsBuilder) Int(int int) *TrackHitsBuilder {
u.v = &int
return u
}
|
[
1
] |
package dao
import (
"context"
"fmt"
"time"
"github.com/IsaiasMorochi/twitter-clone-backend/config"
"github.com/IsaiasMorochi/twitter-clone-backend/models"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
)
func SearchProfile(ID string) (models.Users, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*15)
defer cancel()
db := config.MongoCnx.Database("clone-twitter")
collection := db.Collection("users")
var profile models.Users
objectId, _ := primitive.ObjectIDFromHex(ID)
condition := bson.M{
"_id": objectId,
}
err := collection.FindOne(ctx, condition).Decode(&profile)
if err != nil {
fmt.Println("Registro no encontrado " + err.Error())
return profile, err
}
return profile, nil
}
|
[
2
] |
package suffixer
import (
"log"
"regexp"
"strconv"
)
// OfSuffixer extracts fileName suffix (XX of XX)
type OfSuffixer struct {
Max int
}
// NewOfSuffixer returns the suffixer with the pattern (X of X)
func NewOfSuffixer(max int) *OfSuffixer {
return &OfSuffixer{max}
}
// Extract the base, nb and ext from a given fileName
func (s *OfSuffixer) Extract(fileName string) (base, ext string, nb int) {
r := regexp.MustCompile(`(.*)? \((\d+) of \d+\)(\..+)?`)
if r.Match([]byte(fileName)) {
result := r.FindAllStringSubmatch(fileName, -1)
base = result[0][1]
var err error
if nb, err = strconv.Atoi(result[0][2]); err != nil {
log.Fatal(err)
}
ext = result[0][3]
return
}
r = regexp.MustCompile(`(.*)(\..+)`)
if r.Match([]byte(fileName)) {
result := r.FindAllStringSubmatch(fileName, -1)
base = result[0][1]
ext = result[0][2]
return
}
base = fileName
return
}
|
[
1
] |
package main
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
func main() {
file, _ := os.Open("input.txt")
scanner := bufio.NewScanner(file)
/**
Part 1
*/
validPasswords := 0
for scanner.Scan() {
s := strings.Split(scanner.Text(), ":")
password := strings.TrimSpace(s[1])
s = strings.Split(s[0], " ")
char := s[1]
s = strings.Split(s[0], "-")
min, _:= strconv.Atoi(s[0])
max, _ := strconv.Atoi(s[1])
occurrences := strings.Count(password,char)
if occurrences <= max && occurrences >= min {
validPasswords++
}
}
fmt.Print("Part 1: ")
fmt.Println(validPasswords)
file.Close()
/**
Part 2
*/
validPasswords = 0
file, _ = os.Open("input.txt")
scanner = bufio.NewScanner(file)
for scanner.Scan() {
s := strings.Split(scanner.Text(), ":")
password := strings.TrimSpace(s[1])
s = strings.Split(s[0], " ")
char := s[1]
s = strings.Split(s[0], "-")
first, _ := strconv.Atoi(s[0])
second, _ := strconv.Atoi(s[1])
first--
second--
charPosition1 := string([]rune(password)[first])
charPosition2 := string([]rune(password)[second])
if charPosition1 != char && charPosition2 != char {
}else if charPosition1 == char && charPosition2 == char {
} else if charPosition1 == char || charPosition2 == char {
validPasswords++
}
}
fmt.Print("Part 2: ")
fmt.Println(validPasswords)
}
|
[
5
] |
package main
import (
"fmt"
"log"
)
func CheckAllFiles() {
tx, err := DB.Begin()
if err != nil {
log.Fatal("Unable to begin transaction: ", err)
}
var files []FileList = getAllFileNamesDB(tx)
fmt.Println("Checking:", len(files), "files")
var onepercent int = len(files) / 100
for i, file := range files {
if i != 0 && (i%onepercent) == 0 {
fmt.Println(".")
}
if !file.Ignored {
var hashes []FileHashes = getAllHashesDB(tx, file.FileName)
var issue = false
for i, nhash := range hashes {
if i+1 < len(hashes) {
var ohash FileHashes = hashes[i+1]
if nhash.Size != ohash.Size {
fmt.Println(nhash.FileName, ":::: changed sizes between", ohash.ScanDate, "and", nhash.ScanDate)
issue = true
break
} else if nhash.MD5 != "" && ohash.MD5 != "" && nhash.MD5 != ohash.MD5 {
fmt.Println(nhash.FileName, ":::: md5 changed between", ohash.ScanDate, "and", nhash.ScanDate)
issue = true
break
} else if nhash.SHA1 != "" && ohash.SHA1 != "" && nhash.SHA1 != ohash.SHA1 {
fmt.Println(nhash.FileName, ":::: sha1 changed between", ohash.ScanDate, "and", nhash.ScanDate)
issue = true
break
} else if nhash.SHA256 != "" && ohash.SHA256 != "" && nhash.SHA256 != ohash.SHA256 {
fmt.Println(nhash.FileName, ":::: sha256 changed between", ohash.ScanDate, "and", nhash.ScanDate)
issue = true
break
} else if nhash.Tiger != "" && ohash.Tiger != "" && nhash.Tiger != ohash.Tiger {
fmt.Println(nhash.FileName, ":::: tiger changed between", ohash.ScanDate, "and", nhash.ScanDate)
issue = true
break
} else if nhash.Whirlpool != "" && ohash.Whirlpool != "" && nhash.Whirlpool != ohash.Whirlpool {
fmt.Println(nhash.FileName, ":::: whirlpool changed between", ohash.ScanDate, "and", nhash.ScanDate)
issue = true
break
}
}
}
if !issue {
issue = true
}
}
}
tx.Commit()
}
|
[
5
] |
/**
inspired by https://www.geeksforgeeks.org/closest-pair-of-points-using-divide-and-conquer-algorithm/
*/
package closest_pair_problem
import (
"math"
"sort"
)
func closestInStrip(data []point, min float64) (length float64, closestPair []point) {
length = min
out := make([]point, len(data))
copy(out, data)
sort.Slice(out, func(i, j int) bool {
return out[i].y < out[j].y
})
for i := 0; i < len(out); i++ {
for j := i + 1; j < len(out) && (data[j].y-data[i].y) < length; j++ {
if data[i].distance(data[j]) < length {
length = data[i].distance(data[j])
closestPair = []point{data[i], data[j]}
}
}
}
return
}
func divide(data []point) (length float64, closestPair []point) {
if len(data) <= 3 {
return bruteForce(data)
}
middle := len(data) / 2
middlePoint := data[middle]
dl, inLeft := divide(data[:middle])
dr, inRight := divide(data[middle:])
d := math.Min(dl, dr)
strip := make([]point, 0)
for _, el := range data {
if math.Abs(el.x-middlePoint.x) < d {
strip = append(strip, el)
}
}
ds, inStrip := closestInStrip(strip, d)
length = ds
closestPair = inStrip
if dl < dr && dl < ds {
length = dl
closestPair = inLeft
} else if dr < ds {
length = dr
closestPair = inRight
}
return
}
func planarCase(data []point) (length float64, closestPair []point) {
out := make([]point, len(data))
copy(out, data)
sort.Slice(out, func(i, j int) bool {
return out[i].x < out[j].x
})
return divide(out)
}
|
[
1
] |
package stake
import (
"errors"
"fmt"
"runtime/debug"
"github.com/QOSGroup/qbase/context"
btypes "github.com/QOSGroup/qbase/types"
ecomapper "github.com/QOSGroup/qos/module/eco/mapper"
ecotypes "github.com/QOSGroup/qos/module/eco/types"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto"
)
/*
custom path:
/custom/stake/$query path
query path:
/delegation/:delegatorAddr/:ownerAddr : 根据delegator和owner查询委托信息(first: delegator)
/delegations/owner/:ownerAddr : 查询owner下的所有委托信息
/delegations/delegator/:delegatorAddr : 查询delegator的所有委托信息
return:
json字节数组
*/
func Query(ctx context.Context, route []string, req abci.RequestQuery) (res []byte, err btypes.Error) {
defer func() {
if r := recover(); r != nil {
err = btypes.ErrInternal(string(debug.Stack()))
return
}
}()
if len(route) < 3 {
return nil, btypes.ErrInternal("custom query miss parameters")
}
var data []byte
var e error
if route[0] == ecotypes.Delegation {
deleAddr, _ := btypes.GetAddrFromBech32(route[1])
ownerAddr, _ := btypes.GetAddrFromBech32(route[2])
data, e = getDelegationByOwnerAndDelegator(ctx, ownerAddr, deleAddr)
} else if route[0] == ecotypes.Delegations && route[1] == ecotypes.Owner {
ownerAddr, _ := btypes.GetAddrFromBech32(route[2])
data, e = getDelegationsByOwner(ctx, ownerAddr)
} else if route[0] == ecotypes.Delegations && route[1] == ecotypes.Delegator {
deleAddr, _ := btypes.GetAddrFromBech32(route[2])
data, e = getDelegationsByDelegator(ctx, deleAddr)
} else {
data = nil
e = errors.New("not found match path")
}
if e != nil {
return nil, btypes.ErrInternal(e.Error())
}
return data, nil
}
func getDelegationByOwnerAndDelegator(ctx context.Context, owner, delegator btypes.Address) ([]byte, error) {
validatorMapper := ecomapper.GetValidatorMapper(ctx)
delegationMapper := ecomapper.GetDelegationMapper(ctx)
validator, exsits := validatorMapper.GetValidatorByOwner(owner)
if !exsits {
return nil, fmt.Errorf("validator not exsits. owner: %s", owner.String())
}
info, exsits := delegationMapper.GetDelegationInfo(delegator, validator.GetValidatorAddress())
if !exsits {
return nil, fmt.Errorf("delegationInfo not exsits. owner: %s , deleAddr: %s", owner.String(), delegator.String())
}
result := infoToDelegationQueryResult(validator, info)
return validatorMapper.GetCodec().MarshalJSON(result)
}
func getDelegationsByOwner(ctx context.Context, owner btypes.Address) ([]byte, error) {
validatorMapper := ecomapper.GetValidatorMapper(ctx)
delegationMapper := ecomapper.GetDelegationMapper(ctx)
validator, exsits := validatorMapper.GetValidatorByOwner(owner)
if !exsits {
return nil, fmt.Errorf("validator not exsits. owner: %s", owner.String())
}
var result []DelegationQueryResult
delegationMapper.IterateDelegationsValDeleAddr(validator.GetValidatorAddress(), func(valAddr, deleAddr btypes.Address) {
info, _ := delegationMapper.GetDelegationInfo(deleAddr, valAddr)
result = append(result, infoToDelegationQueryResult(validator, info))
})
return validatorMapper.GetCodec().MarshalJSON(result)
}
func getDelegationsByDelegator(ctx context.Context, delegator btypes.Address) ([]byte, error) {
validatorMapper := ecomapper.GetValidatorMapper(ctx)
delegationMapper := ecomapper.GetDelegationMapper(ctx)
var result []DelegationQueryResult
delegationMapper.IterateDelegationsInfo(delegator, func(info ecotypes.DelegationInfo) {
validator, _ := validatorMapper.GetValidator(info.ValidatorAddr)
result = append(result, infoToDelegationQueryResult(validator, info))
})
return validatorMapper.GetCodec().MarshalJSON(result)
}
func infoToDelegationQueryResult(validator ecotypes.Validator, info ecotypes.DelegationInfo) DelegationQueryResult {
return NewDelegationQueryResult(info.DelegatorAddr, validator.Owner, validator.ValidatorPubKey, info.Amount, info.IsCompound)
}
type DelegationQueryResult struct {
DelegatorAddr btypes.Address `json:"delegator_address"`
OwnerAddr btypes.Address `json:"owner_address"`
ValidatorPubKey crypto.PubKey `json:"validator_pub_key"`
Amount uint64 `json:"delegate_amount"`
IsCompound bool `json:"is_compound"`
}
func NewDelegationQueryResult(deleAddr, ownerAddr btypes.Address, valPubkey crypto.PubKey, amount uint64, compound bool) DelegationQueryResult {
return DelegationQueryResult{
DelegatorAddr: deleAddr,
OwnerAddr: ownerAddr,
ValidatorPubKey: valPubkey,
Amount: amount,
IsCompound: compound,
}
}
|
[
5
] |
package build
import (
"archive/tar"
"bytes"
"context"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"github.com/pkg/errors"
"github.com/windmilleng/tilt/internal/dockerfile"
"github.com/windmilleng/tilt/internal/model"
opentracing "github.com/opentracing/opentracing-go"
)
type ArchiveBuilder struct {
tw *tar.Writer
buf *bytes.Buffer
filter model.PathMatcher
}
func NewArchiveBuilder(filter model.PathMatcher) *ArchiveBuilder {
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
if filter == nil {
filter = model.EmptyMatcher
}
return &ArchiveBuilder{tw: tw, buf: buf, filter: filter}
}
func (a *ArchiveBuilder) close() error {
return a.tw.Close()
}
func (a *ArchiveBuilder) archiveDf(ctx context.Context, df dockerfile.Dockerfile) error {
span, ctx := opentracing.StartSpanFromContext(ctx, "daemon-archiveDf")
defer span.Finish()
tarHeader := &tar.Header{
Name: "Dockerfile",
Typeflag: tar.TypeReg,
Size: int64(len(df)),
ModTime: time.Now(),
AccessTime: time.Now(),
ChangeTime: time.Now(),
}
err := a.tw.WriteHeader(tarHeader)
if err != nil {
return err
}
_, err = a.tw.Write([]byte(df))
if err != nil {
return err
}
return nil
}
// ArchivePathsIfExist creates a tar archive of all local files in `paths`. It quietly skips any paths that don't exist.
func (a *ArchiveBuilder) ArchivePathsIfExist(ctx context.Context, paths []pathMapping) error {
span, ctx := opentracing.StartSpanFromContext(ctx, "daemon-ArchivePathsIfExist")
defer span.Finish()
for _, p := range paths {
err := a.tarPath(ctx, p.LocalPath, p.ContainerPath)
if err != nil {
return errors.Wrapf(err, "tarPath '%s'", p.LocalPath)
}
}
return nil
}
func (a *ArchiveBuilder) BytesBuffer() (*bytes.Buffer, error) {
err := a.close()
if err != nil {
return nil, err
}
return a.buf, nil
}
// tarPath writes the given source path into tarWriter at the given dest (recursively for directories).
// e.g. tarring my_dir --> dest d: d/file_a, d/file_b
// If source path does not exist, quietly skips it and returns no err
func (a *ArchiveBuilder) tarPath(ctx context.Context, source, dest string) error {
span, ctx := opentracing.StartSpanFromContext(ctx, fmt.Sprintf("daemon-tarPath-%s", source))
span.SetTag("source", source)
span.SetTag("dest", dest)
defer span.Finish()
sourceInfo, err := os.Stat(source)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return errors.Wrapf(err, "%s: stat", source)
}
sourceIsDir := sourceInfo.IsDir()
if sourceIsDir {
// Make sure we can trim this off filenames to get valid relative filepaths
if !strings.HasSuffix(source, "/") {
source += "/"
}
}
dest = strings.TrimPrefix(dest, "/")
err = filepath.Walk(source, func(path string, info os.FileInfo, err error) error {
if err != nil {
return errors.Wrapf(err, "error walking to %s", path)
}
matches, err := a.filter.Matches(path, info.IsDir())
if err != nil {
return err
} else if matches {
return nil
}
header, err := tar.FileInfoHeader(info, path)
if err != nil {
return errors.Wrapf(err, "%s: making header", path)
}
if sourceIsDir {
// Name of file in tar should be relative to source directory...
header.Name = strings.TrimPrefix(path, source)
// ...and live inside `dest`
header.Name = filepath.Join(dest, header.Name)
} else if strings.HasSuffix(dest, string(filepath.Separator)) {
header.Name = filepath.Join(dest, filepath.Base(source))
} else {
header.Name = dest
}
header.Name = filepath.Clean(header.Name)
err = a.tw.WriteHeader(header)
if err != nil {
return errors.Wrapf(err, "%s: writing header", path)
}
if info.IsDir() {
return nil
}
if header.Typeflag == tar.TypeReg {
file, err := os.Open(path)
if err != nil {
// In case the file has been deleted since we last looked at it.
if os.IsNotExist(err) {
return nil
}
return errors.Wrapf(err, "%s: open", path)
}
defer func() {
_ = file.Close()
}()
_, err = io.CopyN(a.tw, file, info.Size())
if err != nil && err != io.EOF {
return errors.Wrapf(err, "%s: copying Contents", path)
}
}
return nil
})
return err
}
func (a *ArchiveBuilder) len() int {
return a.buf.Len()
}
func tarContextAndUpdateDf(ctx context.Context, df dockerfile.Dockerfile, paths []pathMapping, filter model.PathMatcher) (*bytes.Buffer, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "daemon-tarContextAndUpdateDf")
defer span.Finish()
ab := NewArchiveBuilder(filter)
err := ab.ArchivePathsIfExist(ctx, paths)
if err != nil {
return nil, errors.Wrap(err, "archivePaths")
}
err = ab.archiveDf(ctx, df)
if err != nil {
return nil, errors.Wrap(err, "archiveDf")
}
return ab.BytesBuffer()
}
func tarDfOnly(ctx context.Context, df dockerfile.Dockerfile) (*bytes.Buffer, error) {
ab := NewArchiveBuilder(model.EmptyMatcher)
err := ab.archiveDf(ctx, df)
if err != nil {
return nil, errors.Wrap(err, "tarDfOnly")
}
return ab.BytesBuffer()
}
|
[
5
] |
package robots
import (
"bufio"
"errors"
"fmt"
"log"
"os"
"strconv"
)
// UserInputRobot handles user's input
type UserInputRobot struct{}
// Start UserInputRobot
func (robot *UserInputRobot) Start(state *State) {
searchTerm := robot.askForSearchTerm()
prefix := robot.askForPrefix()
state.SearchTerm = searchTerm
state.Prefix = prefix
fmt.Println("[user_input] => Successfully requested user's input")
fmt.Printf("[user_input] => Search term: %s\n", searchTerm)
fmt.Printf("[user_input] => Prefix: %s\n", prefix)
fmt.Println("[user_input] => Done, adiós xD")
}
func (robot *UserInputRobot) askForSearchTerm() string {
fmt.Println("Type a Wikipedia search term:")
searchTerm, err := robot.readline()
if err != nil {
log.Fatalf("\n[user_input] => Error asking for search term %v", err)
}
return searchTerm
}
func (robot *UserInputRobot) askForPrefix() string {
fmt.Println("Select one option:")
prefixes := [3]string{"Who is", "What is", "The history of"}
for i, prefix := range prefixes {
fmt.Printf("[%d] - %s\n", i+1, prefix)
}
selected, err := robot.readline()
if err != nil {
log.Fatalf("\n[user_input] => Error asking for prefix %v", err)
}
prefixIndex, err := strconv.Atoi(selected)
prefixIndex = prefixIndex - 1
if prefixIndex > len(prefixes) || prefixIndex < 0 || err != nil {
fmt.Println("✖ Invalid option, please try again.")
robot.askForPrefix()
}
return prefixes[prefixIndex]
}
func (robot *UserInputRobot) readline() (string, error) {
fmt.Print("> ")
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
return scanner.Text(), nil
}
return "", errors.New("✖ Error reading user's input")
}
|
[
0
] |
package postgres
import (
"log"
"time"
"strconv"
"colsys-backend/pkg/domain"
sq "github.com/Masterminds/squirrel"
pgx "github.com/jackc/pgx"
)
func sensorQuery() *sq.SelectBuilder {
query := psql.Select("id, connection, name, type, status").From("sensor").Where("isDeleted=?", false).OrderBy("updatedAt DESC")
return &query
}
func updateSensor() *sq.UpdateBuilder {
query := psql.Update("sensor").
Set("updatedAt", time.Now())
return &query
}
func scanSensor(row *pgx.Row, s *domain.Sensor) error {
err := row.Scan(&s.ID, &s.Connection, &s.Name, &s.Type, &s.Status)
return err
}
func sensorReturnField() string {
return "RETURNING id, connection, name, type, status"
}
func buildSensorMap(Sensor *domain.Sensor) map[string]interface{} {
sensorData := sq.Eq{
"connection": Sensor.Connection,
"name": Sensor.Name,
"Type": Sensor.Type,
"Status": Sensor.Status,
}
return sensorData
}
func Sensors() ([]*domain.Sensor) {
query, params, err := sensorQuery().ToSql()
rows, err := conn.Query(query, params...)
if err != nil {
log.Print(err)
}
defer rows.Close()
var sensors []*domain.Sensor
for rows.Next() {
var s domain.Sensor
err = rows.Scan(&s.ID, &s.Connection, &s.Name, &s.Type, &s.Status)
if err != nil {
log.Print(err)
}
sensors = append(sensors, &s)
}
return sensors
}
func Sensor(ID int) *domain.Sensor {
var s domain.Sensor
query, params, _ := sensorQuery().Where("id=?", ID).ToSql()
err := scanSensor(conn.QueryRow(query, params...), &s)
if err != nil {
log.Print(err)
}
return &s
}
func CreateSensor(newSensor *domain.Sensor) *domain.Sensor {
var s domain.Sensor
query, params, _ := psql.Insert("sensor").
SetMap(buildSensorMap(newSensor)).
Suffix(sensorReturnField()).ToSql()
err := scanSensor(conn.QueryRow(query, params...), &s)
if err != nil {
log.Print(err)
}
return &s
}
func UpdateSensor(ID int, newSensor *domain.Sensor) *domain.Sensor {
var s domain.Sensor
query, params, _ := updateSensor().
SetMap(buildSensorMap(newSensor)).
Where("id=?", ID).
Suffix(sensorReturnField()).ToSql()
err := scanSensor(conn.QueryRow(query, params...), &s)
if err != nil {
log.Print(err)
}
return &s
}
func DeleteSensor(ID int) *domain.Sensor {
var s domain.Sensor
query, params, _ := updateSensor().
SetMap(sq.Eq{"isDeleted":true}).
Where("id=?", ID).
Suffix(sensorReturnField()).ToSql()
err := scanSensor(conn.QueryRow(query, params...), &s)
if err != nil {
log.Print(err)
}
return &s
}
func RecordSensorData(sensorData *domain.SensorData) {
sID, _ := strconv.Atoi(sensorData.SensorID)
conn.Exec("INSERT INTO sensorData (sensorID, val, time) VALUES ($1, $2, $3)", sID, sensorData.Val, sensorData.Time)
// if err != nil {
// log.Print(err)
// }
}
func GetSensorData(sensorID int, limit int32) ([]*domain.SensorData) {
query, params, _ := psql.Select("val, time").
From("sensorData").
Where("sensorID=?", sensorID).
OrderBy("time DESC").
Limit(uint64(limit)).ToSql()
rows, err := conn.Query(query, params...)
if err != nil {
log.Print(err)
}
defer rows.Close()
var datas []*domain.SensorData
for rows.Next() {
var sd domain.SensorData
err = rows.Scan(&sd.Val, &sd.Time)
if err != nil {
log.Print(err)
}
datas = append(datas, &sd)
}
return datas
}
|
[
2
] |
// Copyright 2016 The Neural Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package neural
import (
"fmt"
"math"
"math/rand"
)
// Function32 defines a function that takes a float32 and returns a float32
type Function32 func(x float32) float32
// FunctionPair32 represents a function, a derivative of the function, and a
// transform used for inference during training
type FunctionPair32 struct {
F, T, DF Function32
}
// Neural32 is a 32 bit neural network
type Neural32 struct {
Layers []int
Weights [][][]float32
Changes [][][]float32
Functions []FunctionPair32
}
// WeightInitializer32 is a function that initializes the neural network weights
// See: http://stats.stackexchange.com/questions/47590/what-are-good-initial-weights-in-a-neural-network
type WeightInitializer32 func(in, out int) float32
// WeightInitializer32Basic basic weight initialization
func WeightInitializer32Basic(in, out int) float32 {
return random32(-1, 1)
}
// WeightInitializer32FanIn fan in weight initialization
func WeightInitializer32FanIn(in, out int) float32 {
return random32(-1, 1) / float32(math.Sqrt(float64(in)))
}
// WeightInitializer32FanInFanOut fan in/fan out weight initialization
func WeightInitializer32FanInFanOut(in, out int) float32 {
return random32(-1, 1) * float32(4*math.Sqrt(6/float64(in+out)))
}
// Init initializes the neural network
func (n *Neural32) Init(initializer WeightInitializer32, layers ...int) {
depth := len(layers) - 1
if depth < 1 {
panic("there should be at least 2 layers")
}
n.Layers = layers
for l := range layers[:depth] {
layers[l]++
}
n.Weights = make([][][]float32, depth)
for l := range layers[:depth] {
weights := matrix32(layers[l+1], layers[l])
for i := 0; i < layers[l]; i++ {
for j := 0; j < layers[l+1]; j++ {
weights[j][i] = initializer(layers[l], layers[l+1])
}
}
n.Weights[l] = weights
}
n.Changes = make([][][]float32, depth)
for l := range layers[:depth] {
n.Changes[l] = matrix32(layers[l], layers[l+1])
}
n.Functions = make([]FunctionPair32, depth)
for f := range n.Functions {
n.Functions[f] = FunctionPair32{
F: sigmoid32,
T: identity,
DF: dsigmoid32,
}
}
}
// UseTanh use tanh for the activation function
func (n *Neural32) UseTanh() {
for f := range n.Functions {
n.Functions[f].F = tanh32
n.Functions[f].DF = dtanh32
}
}
// EnableRegression removes the activation function from the last layer so
// that regression is performed
func (n *Neural32) EnableRegression() {
output := len(n.Functions) - 1
n.Functions[output].F = identity
n.Functions[output].DF = one
}
// EnableDropout enables dropout based regularization
// See: http://iamtrask.github.io/2015/07/28/dropout/
func (n *Neural32) EnableDropout(probability float32) {
depth := len(n.Layers) - 1
for i := range n.Functions[:depth-1] {
n.Functions[i].T = func(x float32) float32 {
if rand.Float32() > 1-probability {
x = 0
} else {
x *= 1 / (1 - probability)
}
return x
}
}
}
// NewNeural32 creates a neural network with the given configuration
func NewNeural32(config func(neural *Neural32)) *Neural32 {
neural := &Neural32{}
config(neural)
return neural
}
// Context32 is an inference context
type Context32 struct {
*Neural32
Activations [][]float32
}
// SetInput sets the input to the neural network
func (c *Context32) SetInput(input []float32) {
copy(c.Activations[0], input)
}
// GetOutput gets the output of the neural network
func (c *Context32) GetOutput() []float32 {
return c.Activations[len(c.Activations)-1]
}
// NewContext creates a new inference context from the given neural network
func (n *Neural32) NewContext() *Context32 {
layers, depth := n.Layers, len(n.Layers)
activations := make([][]float32, depth)
for i, width := range layers {
activations[i] = vector32(width, 1.0)
}
return &Context32{
Neural32: n,
Activations: activations,
}
}
// Infer runs inference
func (c *Context32) Infer() {
depth := len(c.Layers) - 1
if depth > 1 {
for i := range c.Activations[:depth-1] {
activations, weights := c.Activations[i], c.Weights[i]
for j := range weights[:len(weights)-1] {
sum := dot32(activations, weights[j])
c.Activations[i+1][j] = c.Functions[i].F(sum)
}
}
}
i := depth - 1
activations, weights := c.Activations[i], c.Weights[i]
for j := range weights[:len(weights)] {
sum := dot32(activations, weights[j])
c.Activations[i+1][j] = c.Functions[i].F(sum)
}
}
// InferWithT runs inference using a transform in between layers
func (c *Context32) InferWithT() {
depth := len(c.Layers) - 1
if depth > 1 {
for i := range c.Activations[:depth-1] {
activations, weights := c.Activations[i], c.Weights[i]
for j := range weights[:len(weights)-1] {
sum := dot32(activations, weights[j])
c.Activations[i+1][j] = c.Functions[i].T(c.Functions[i].F(sum))
}
}
}
i := depth - 1
activations, weights := c.Activations[i], c.Weights[i]
for j := range weights[:len(weights)] {
sum := dot32(activations, weights[j])
c.Activations[i+1][j] = c.Functions[i].T(c.Functions[i].F(sum))
}
}
// BackPropagate run the backpropagation algorithm
func (c *Context32) BackPropagate(targets []float32, lRate, mFactor float32) float32 {
depth, layers := len(c.Layers), c.Layers
deltas := make([][]float32, depth-1)
for i := range deltas {
deltas[i] = vector32(layers[i+1], 0)
}
l := depth - 2
for i := 0; i < layers[l+1]; i++ {
activation := c.Activations[l+1][i]
e := targets[i] - activation
deltas[l][i] = c.Functions[l].DF(activation) * e
}
l--
for l >= 0 {
for i := 0; i < layers[l+1]; i++ {
var e float32
for j := 0; j < layers[l+2]; j++ {
e += deltas[l+1][j] * c.Weights[l+1][j][i]
}
deltas[l][i] = c.Functions[l].DF(c.Activations[l+1][i]) * e
}
l--
}
for l := 0; l < depth-1; l++ {
change := make([]float32, layers[l+1])
for i := 0; i < layers[l]; i++ {
copy(change, deltas[l])
scal32(c.Activations[l][i], change)
scal32(mFactor, c.Changes[l][i])
axpy32(lRate, change, c.Changes[l][i])
for j := 0; j < layers[l+1]; j++ {
c.Weights[l][j][i] = c.Weights[l][j][i] + c.Changes[l][i][j]
}
copy(c.Changes[l][i], change)
}
}
var e float32
for i := 0; i < len(targets); i++ {
f := targets[i] - c.Activations[depth-1][i]
e += f * f
}
return e
}
// Train trains a neural network using data from source
func (n *Neural32) Train(source func(iteration int) [][][]float32, iterations int, lRate, mFactor float32) []float32 {
context, errors := n.NewContext(), make([]float32, iterations)
for i := 0; i < iterations; i++ {
var (
e float32
n int
)
patterns := source(i)
for _, p := range patterns {
context.SetInput(p[0])
context.InferWithT()
e += context.BackPropagate(p[1], lRate, mFactor)
n += len(p[1])
}
errors[i] = e / float32(n)
}
return errors
}
func (n *Neural32) test(patterns [][][]float32) {
context := n.NewContext()
for _, p := range patterns {
context.SetInput(p[0])
context.Infer()
fmt.Println(p[0], "->", context.GetOutput(), " : ", p[1])
}
}
|
[
0
] |
package main
import (
"../../../lib/liboct"
"flag"
"fmt"
)
// The case now could be like this:
// in this case type, we will send all the files to all the hostOS
// casegroup
// |____ casedir
// | |___ casename.json
// | |___ `source` (must be `source`)
// | |____ file1
// | |____ ...
// | |____ fileN
// | |____ dir1
// | |____ ...
// | |____ dirN
// |
// |____ caselibdir
// |_____ libfile1
// |_____ ....
// |_____ libfile2
//
//
// The ideal case should be like this:
//
// casedir
// |___ `config.json` (must be `config.json`
// |___ `source` (must be `source` dir)
// |____ file1
// |____ ...
// |____ fileN
// |____ dir1 with files
// |____ ...
// |____ dirN with files
//
func main() {
var caseDir = flag.String("d", "", "input the case dir")
var caseFile = flag.String("f", "", "input the file url, case.tar.gz")
var caseName = flag.String("n", "", "input the 'case name' in the case dir, if there were multiply cases in the case dir. You can use this with -d and -f.")
var caseID = flag.String("id", "", "input the 'case id' provided by 'Test Case server', please make sure the the tcserver is running.")
flag.Parse()
var warning_msg []liboct.ValidatorMessage
var err_msg []liboct.ValidatorMessage
if len(*caseID) > 0 {
} else if len(*caseFile) > 0 {
liboct.ValidateByFile(*caseFile)
} else if len(*caseDir) > 0 {
warning_msg, err_msg = liboct.ValidateByDir(*caseDir, *caseName)
} else {
fmt.Println("Please input the test case")
return
}
if len(err_msg) > 0 {
fmt.Printf("The case is invalid, there are %d error(errors) and %d warning(warnings)", len(err_msg), len(warning_msg))
fmt.Println("Please see the details:")
fmt.Println(err_msg)
fmt.Println(warning_msg)
} else if len(warning_msg) > 0 {
fmt.Printf("The case is OK, but there are %d warning(warnings)", len(warning_msg))
fmt.Println("Please see the details:")
fmt.Println(warning_msg)
} else {
fmt.Println("Good case.")
}
}
|
[
5
] |
package handler
import (
. "RevokeBot/model"
"RevokeBot/util"
"fmt"
"strconv"
"strings"
)
func FriendMsgHandler(msg Message) {
data := msg.CurrentPacket.Data
fmt.Println(data.Content)
if data.Content == "!grant clean" {
GrantList.Clear()
Send(int(data.FromUin), 1, "Grant: "+GrantList.String())
} else if data.Content == "!status" {
var FlashStatus string
if RevokeGroupList.IsEmpty() {
Send(int(data.FromUin), 1, "Revoke is "+strconv.Itoa(EnableRevokePrompt)+"\nRevoke list is empty"+FlashStatus+
"\nGrant: "+GrantList.String()+"\nisGrantAll:"+strconv.FormatBool(GrantAll))
} else {
Send(int(data.FromUin), 1, "Revoke is "+strconv.Itoa(EnableRevokePrompt)+"\nRevoke list is "+RevokeGroupList.String()+FlashStatus+
"\nGrant: "+GrantList.String()+"\nisGrantAll:"+strconv.FormatBool(GrantAll))
}
} else if data.Content == "!revoke on" {
EnableRevokePrompt = 1
Send(int(data.FromUin), 1, "RevokePrompt is on")
} else if data.Content == "!revoke off" {
EnableRevokePrompt = 0
Send(int(data.FromUin), 1, "Revoke is off")
} else {
arg := strings.Split(data.Content, " ")
if len(arg) < 3 {
return
}
if arg[0] == "!revoke" && arg[1] == "add" {
RevokeGroupList.Add(strconv.Atoi(arg[2]))
Send(int(data.FromUin), 1, "Add successfully\nUse !status to check status")
}
if arg[0] == "!revoke" && arg[1] == "sub" {
RevokeGroupList.Remove(strconv.Atoi(arg[2]))
Send(int(data.FromUin), 1, "Remove successfully\nUse !status to check status")
}
}
util.WriteConfig()
}
|
[
5
] |
package main
import "fmt"
func findNumbers(nums []int) int {
var res int
for _,v :=range nums{
if findDigit(v){
res++
}
}
return res
}
func findDigit(num int)bool{
var n int
for num!=0{
num=num/10
n++
}
if n%2==0{
return true
}
return false
}
func main() {
fmt.Println(findNumbers([]int{12,345,2,6,7896}))
fmt.Println(findNumbers([]int{555,901,482,1771}))
}
|
[
0
] |
package task
import (
"context"
"log"
"github.com/letitbeat/selection-process/pkg/db"
"go.mongodb.org/mongo-driver/bson"
)
type Repository struct {
DBURI string
}
func (r *Repository) GetByID(ID string) (Task, error) {
ctx := context.Background()
client, err := db.Client(ctx, r.DBURI)
if err != nil {
log.Printf("error connecting to DB %s", r.DBURI)
}
collection := client.Database("main").Collection("tasks")
filter := bson.D{{"_id", ID}}
var task Task
err = collection.FindOne(ctx, filter).Decode(&task)
if err != nil {
log.Printf("error fetching task ID: %s from DB", ID)
return task, err
}
return task, nil
}
|
[
2
] |
package nats
import (
"github.com/gocontrib/pubsub"
nats "github.com/nats-io/nats.go"
log "github.com/sirupsen/logrus"
)
func init() {
pubsub.RegisterDriver(&driver{}, "nats", "natsio")
}
type driver struct{}
func (d *driver) Create(config pubsub.HubConfig) (pubsub.Hub, error) {
url, ok := config["url"].(string)
if ok {
return Open(url)
}
return Open("")
}
// Open creates pubsub hub connected to nats server.
func Open(URL ...string) (pubsub.Hub, error) {
if len(URL) == 0 {
URL = []string{nats.DefaultURL}
}
log.Info("connecting to nats hub: %v", URL)
conn, err := nats.Connect(URL[0], func(options *nats.Options) error {
options.Name = "pandora-pubsub"
options.AllowReconnect = true
options.Verbose = true
return nil
})
if err != nil {
return nil, err
}
return &hub{
conn: conn,
subs: make(map[*sub]struct{}),
}, nil
}
|
[
2
] |
package sdk
import (
"errors"
"github.com/33cn/chain33-sdk-go/crypto"
"github.com/33cn/chain33-sdk-go/crypto/ed25519"
"github.com/33cn/chain33-sdk-go/crypto/gm"
)
type Account struct {
PrivateKey []byte
PublicKey []byte
Address string
SignType string
}
func NewAccount(signType string) (*Account, error) {
if signType == "" {
signType = crypto.SECP256K1
}
account := Account{}
account.SignType = signType
if signType == crypto.SECP256K1 {
account.PrivateKey = crypto.GeneratePrivateKey()
account.PublicKey = crypto.PubKeyFromPrivate(account.PrivateKey)
addr, err := crypto.PubKeyToAddress(account.PublicKey)
if err != nil {
return nil, err
}
account.Address = addr
} else if signType == crypto.SM2 {
account.PrivateKey, account.PublicKey = gm.GenerateKey()
addr, err := crypto.PubKeyToAddress(account.PublicKey)
if err != nil {
return nil, err
}
account.Address = addr
} else if signType == crypto.ED25519 {
priv, pub, err := ed25519.GenerateKey()
if err != nil {
return nil, err
}
copy(account.PrivateKey, priv)
copy(account.PublicKey, pub)
addr, err := crypto.PubKeyToAddress(account.PublicKey)
if err != nil {
return nil, err
}
account.Address = addr
} else {
return nil, errors.New("sign type not support")
}
return &account, nil
}
|
[
5
] |
// Copyright 2022-2023 The Parca Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package debuginfo
import (
"context"
"encoding/hex"
"errors"
"fmt"
"io"
"path"
"time"
"github.com/go-kit/log"
"github.com/google/uuid"
"github.com/thanos-io/objstore"
"github.com/thanos-io/objstore/client"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/timestamppb"
debuginfopb "github.com/parca-dev/parca/gen/proto/go/parca/debuginfo/v1alpha1"
)
var ErrDebuginfoNotFound = errors.New("debuginfo not found")
type CacheProvider string
const (
FILESYSTEM CacheProvider = "FILESYSTEM"
)
type Config struct {
Bucket *client.BucketConfig `yaml:"bucket"`
Cache *CacheConfig `yaml:"cache"`
}
type FilesystemCacheConfig struct {
Directory string `yaml:"directory"`
}
type CacheConfig struct {
Type CacheProvider `yaml:"type"`
Config interface{} `yaml:"config"`
}
type MetadataManager interface {
MarkAsDebuginfodSource(ctx context.Context, servers []string, buildID string, typ debuginfopb.DebuginfoType) error
MarkAsUploading(ctx context.Context, buildID, uploadID, hash string, typ debuginfopb.DebuginfoType, startedAt *timestamppb.Timestamp) error
MarkAsUploaded(ctx context.Context, buildID, uploadID string, typ debuginfopb.DebuginfoType, finishedAt *timestamppb.Timestamp) error
Fetch(ctx context.Context, buildID string, typ debuginfopb.DebuginfoType) (*debuginfopb.Debuginfo, error)
}
type Store struct {
debuginfopb.UnimplementedDebuginfoServiceServer
tracer trace.Tracer
logger log.Logger
bucket objstore.Bucket
metadata MetadataManager
debuginfodClients DebuginfodClients
signedUpload SignedUpload
maxUploadDuration time.Duration
maxUploadSize int64
timeNow func() time.Time
}
type SignedUploadClient interface {
SignedPUT(ctx context.Context, objectKey string, size int64, expiry time.Time) (signedURL string, err error)
}
type SignedUpload struct {
Enabled bool
Client SignedUploadClient
}
// NewStore returns a new debug info store.
func NewStore(
tracer trace.Tracer,
logger log.Logger,
metadata MetadataManager,
bucket objstore.Bucket,
debuginfodClients DebuginfodClients,
signedUpload SignedUpload,
maxUploadDuration time.Duration,
maxUploadSize int64,
) (*Store, error) {
return &Store{
tracer: tracer,
logger: log.With(logger, "component", "debuginfo"),
bucket: bucket,
metadata: metadata,
debuginfodClients: debuginfodClients,
signedUpload: signedUpload,
maxUploadDuration: maxUploadDuration,
maxUploadSize: maxUploadSize,
timeNow: time.Now,
}, nil
}
const (
ReasonDebuginfoInDebuginfod = "Debuginfo exists in debuginfod, therefore no upload is necessary."
ReasonFirstTimeSeen = "First time we see this Build ID, and it does not exist in debuginfod, therefore please upload!"
ReasonUploadStale = "A previous upload was started but not finished and is now stale, so it can be retried."
ReasonUploadInProgress = "A previous upload is still in-progress and not stale yet (only stale uploads can be retried)."
ReasonDebuginfoAlreadyExists = "Debuginfo already exists and is not marked as invalid, therefore no new upload is needed."
ReasonDebuginfoAlreadyExistsButForced = "Debuginfo already exists and is not marked as invalid, therefore wouldn't have accepted a new upload, but accepting it because it's requested to be forced."
ReasonDebuginfoInvalid = "Debuginfo already exists but is marked as invalid, therefore a new upload is needed. Hash the debuginfo and initiate the upload."
ReasonDebuginfoEqual = "Debuginfo already exists and is marked as invalid, but the proposed hash is the same as the one already available, therefore the upload is not accepted as it would result in the same invalid debuginfos."
ReasonDebuginfoNotEqual = "Debuginfo already exists but is marked as invalid, therefore a new upload will be accepted."
ReasonDebuginfodSource = "Debuginfo is available from debuginfod already and not marked as invalid, therefore no new upload is needed."
ReasonDebuginfodInvalid = "Debuginfo is available from debuginfod already but is marked as invalid, therefore a new upload is needed."
)
// ShouldInitiateUpload returns whether an upload should be initiated for the
// given build ID. Checking if an upload should even be initiated allows the
// parca-agent to avoid extracting debuginfos unnecessarily from a binary.
func (s *Store) ShouldInitiateUpload(ctx context.Context, req *debuginfopb.ShouldInitiateUploadRequest) (*debuginfopb.ShouldInitiateUploadResponse, error) {
ctx, span := s.tracer.Start(ctx, "ShouldInitiateUpload")
defer span.End()
span.SetAttributes(attribute.String("build_id", req.BuildId))
buildID := req.BuildId
if err := validateInput(buildID); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
dbginfo, err := s.metadata.Fetch(ctx, buildID, req.Type)
if err != nil && !errors.Is(err, ErrMetadataNotFound) {
return nil, status.Error(codes.Internal, err.Error())
} else if errors.Is(err, ErrMetadataNotFound) {
// First time we see this Build ID.
existsInDebuginfods, err := s.debuginfodClients.Exists(ctx, buildID)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if len(existsInDebuginfods) > 0 {
if err := s.metadata.MarkAsDebuginfodSource(ctx, existsInDebuginfods, buildID, req.Type); err != nil {
return nil, status.Error(codes.Internal, fmt.Errorf("mark Build ID to be available from debuginfod: %w", err).Error())
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfoInDebuginfod,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonFirstTimeSeen,
}, nil
} else {
// We have seen this Build ID before and there is metadata for it.
switch dbginfo.Source {
case debuginfopb.Debuginfo_SOURCE_UPLOAD:
if dbginfo.Upload == nil {
return nil, status.Error(codes.Internal, "metadata inconsistency: upload is nil")
}
switch dbginfo.Upload.State {
case debuginfopb.DebuginfoUpload_STATE_UPLOADING:
if s.uploadIsStale(dbginfo.Upload) {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonUploadStale,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonUploadInProgress,
}, nil
case debuginfopb.DebuginfoUpload_STATE_UPLOADED:
if dbginfo.Quality == nil || !dbginfo.Quality.NotValidElf {
if req.Force {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfoAlreadyExistsButForced,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfoAlreadyExists,
}, nil
}
if req.Hash == "" {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfoInvalid,
}, nil
}
if dbginfo.Upload.Hash == req.Hash {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfoEqual,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfoNotEqual,
}, nil
default:
return nil, status.Error(codes.Internal, "metadata inconsistency: unknown upload state")
}
case debuginfopb.Debuginfo_SOURCE_DEBUGINFOD:
if dbginfo.Quality == nil || !dbginfo.Quality.NotValidElf {
// We already have debuginfo that's also not marked to be
// invalid, so we don't need to upload it again.
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfodSource,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfodInvalid,
}, nil
default:
return nil, status.Errorf(codes.Internal, "unknown debuginfo source %q", dbginfo.Source)
}
}
}
func (s *Store) InitiateUpload(ctx context.Context, req *debuginfopb.InitiateUploadRequest) (*debuginfopb.InitiateUploadResponse, error) {
ctx, span := s.tracer.Start(ctx, "InitiateUpload")
defer span.End()
span.SetAttributes(attribute.String("build_id", req.BuildId))
if req.Hash == "" {
return nil, status.Error(codes.InvalidArgument, "hash must be set")
}
if req.Size == 0 {
return nil, status.Error(codes.InvalidArgument, "size must be set")
}
// We don't want to blindly accept upload initiation requests that
// shouldn't have happened.
shouldInitiateResp, err := s.ShouldInitiateUpload(ctx, &debuginfopb.ShouldInitiateUploadRequest{
BuildId: req.BuildId,
Hash: req.Hash,
Force: req.Force,
Type: req.Type,
})
if err != nil {
return nil, err
}
if !shouldInitiateResp.ShouldInitiateUpload {
if shouldInitiateResp.Reason == ReasonDebuginfoEqual {
return nil, status.Error(codes.AlreadyExists, ReasonDebuginfoEqual)
}
return nil, status.Errorf(codes.FailedPrecondition, "upload should not have been attempted to be initiated, a previous check should have failed with: %s", shouldInitiateResp.Reason)
}
if req.Size > s.maxUploadSize {
return nil, status.Errorf(codes.InvalidArgument, "upload size %d exceeds maximum allowed size %d", req.Size, s.maxUploadSize)
}
uploadID := uuid.New().String()
uploadStarted := s.timeNow()
uploadExpiry := uploadStarted.Add(s.maxUploadDuration)
if !s.signedUpload.Enabled {
if err := s.metadata.MarkAsUploading(ctx, req.BuildId, uploadID, req.Hash, req.Type, timestamppb.New(uploadStarted)); err != nil {
return nil, fmt.Errorf("mark debuginfo upload as uploading via gRPC: %w", err)
}
return &debuginfopb.InitiateUploadResponse{
UploadInstructions: &debuginfopb.UploadInstructions{
BuildId: req.BuildId,
UploadId: uploadID,
UploadStrategy: debuginfopb.UploadInstructions_UPLOAD_STRATEGY_GRPC,
Type: req.Type,
},
}, nil
}
signedURL, err := s.signedUpload.Client.SignedPUT(ctx, objectPath(req.BuildId, req.Type), req.Size, uploadExpiry)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if err := s.metadata.MarkAsUploading(ctx, req.BuildId, uploadID, req.Hash, req.Type, timestamppb.New(uploadStarted)); err != nil {
return nil, fmt.Errorf("mark debuginfo upload as uploading via signed URL: %w", err)
}
return &debuginfopb.InitiateUploadResponse{
UploadInstructions: &debuginfopb.UploadInstructions{
BuildId: req.BuildId,
UploadId: uploadID,
UploadStrategy: debuginfopb.UploadInstructions_UPLOAD_STRATEGY_SIGNED_URL,
SignedUrl: signedURL,
Type: req.Type,
},
}, nil
}
func (s *Store) MarkUploadFinished(ctx context.Context, req *debuginfopb.MarkUploadFinishedRequest) (*debuginfopb.MarkUploadFinishedResponse, error) {
ctx, span := s.tracer.Start(ctx, "MarkUploadFinished")
defer span.End()
span.SetAttributes(attribute.String("build_id", req.BuildId))
span.SetAttributes(attribute.String("upload_id", req.UploadId))
buildID := req.BuildId
if err := validateInput(buildID); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
err := s.metadata.MarkAsUploaded(ctx, buildID, req.UploadId, req.Type, timestamppb.New(s.timeNow()))
if errors.Is(err, ErrDebuginfoNotFound) {
return nil, status.Error(codes.NotFound, "no debuginfo metadata found for build id")
}
if errors.Is(err, ErrUploadMetadataNotFound) {
return nil, status.Error(codes.NotFound, "no debuginfo upload metadata found for build id")
}
if errors.Is(err, ErrUploadIDMismatch) {
return nil, status.Error(codes.InvalidArgument, "upload id mismatch")
}
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &debuginfopb.MarkUploadFinishedResponse{}, nil
}
func (s *Store) Upload(stream debuginfopb.DebuginfoService_UploadServer) error {
if s.signedUpload.Enabled {
return status.Error(codes.Unimplemented, "signed URL uploads are the only supported upload strategy for this service")
}
req, err := stream.Recv()
if err != nil {
return status.Errorf(codes.Unknown, "failed to receive upload info: %q", err)
}
var (
buildID = req.GetInfo().BuildId
uploadID = req.GetInfo().UploadId
r = &UploadReader{stream: stream}
typ = req.GetInfo().Type
)
ctx, span := s.tracer.Start(stream.Context(), "Upload")
defer span.End()
span.SetAttributes(attribute.String("build_id", buildID))
span.SetAttributes(attribute.String("upload_id", uploadID))
if err := s.upload(ctx, buildID, uploadID, typ, r); err != nil {
return err
}
return stream.SendAndClose(&debuginfopb.UploadResponse{
BuildId: buildID,
Size: r.size,
})
}
func (s *Store) upload(ctx context.Context, buildID, uploadID string, typ debuginfopb.DebuginfoType, r io.Reader) error {
if err := validateInput(buildID); err != nil {
return status.Errorf(codes.InvalidArgument, "invalid build ID: %q", err)
}
dbginfo, err := s.metadata.Fetch(ctx, buildID, typ)
if err != nil {
if errors.Is(err, ErrMetadataNotFound) {
return status.Error(codes.FailedPrecondition, "metadata not found, this indicates that the upload was not previously initiated")
}
return status.Error(codes.Internal, err.Error())
}
if dbginfo.Upload == nil {
return status.Error(codes.FailedPrecondition, "upload metadata not found, this indicates that the upload was not previously initiated")
}
if dbginfo.Upload.Id != uploadID {
return status.Error(codes.InvalidArgument, "the upload ID does not match the one returned by the InitiateUpload call")
}
if err := s.bucket.Upload(ctx, objectPath(buildID, typ), r); err != nil {
return status.Error(codes.Internal, fmt.Errorf("upload debuginfo: %w", err).Error())
}
return nil
}
func (s *Store) uploadIsStale(upload *debuginfopb.DebuginfoUpload) bool {
return upload.StartedAt.AsTime().Add(s.maxUploadDuration + 2*time.Minute).Before(s.timeNow())
}
func validateInput(id string) error {
_, err := hex.DecodeString(id)
if err != nil {
return fmt.Errorf("failed to validate input: %w", err)
}
if len(id) <= 2 {
return errors.New("unexpectedly short input")
}
return nil
}
func objectPath(buildID string, typ debuginfopb.DebuginfoType) string {
switch typ {
case debuginfopb.DebuginfoType_DEBUGINFO_TYPE_EXECUTABLE:
return path.Join(buildID, "executable")
case debuginfopb.DebuginfoType_DEBUGINFO_TYPE_SOURCES:
return path.Join(buildID, "sources")
default:
return path.Join(buildID, "debuginfo")
}
}
|
[
5
] |
package main
import (
"encoding/xml"
"reflect"
"strings"
)
type BaseInfo struct {
XMLName xml.Name
Version string `xml:"version,attr"`
Deprecated string `xml:"deprecated,attr"`
DeprecatedVersion string `xml:"deprecated-version,attr"`
Introspectable string `xml:"introspectable,attr"`
Stability string `xml:"stability,attr"`
Doc *Doc `xml:"doc"`
DocDeprecated *Doc `xml:"doc-deprecated"`
DocVersion *Doc `xml:"doc-version"`
DocStability *DocStability `xml:"doc-stability"`
NotHandled []*NotHandled `xml:",any"`
}
type Doc struct {
Space string `xml:"space,attr"`
Whitespace string `xml:"whitespace,attr"`
Text string `xml:",chardata"`
}
type DocStability struct {
Space string `xml:"space,attr"`
}
type NotHandled struct {
XMLName xml.Name
Xml string `xml:",innerxml"`
}
type Repository struct {
BaseInfo
Glib string `xml:"glib,attr"`
CSymbolPrefixes string `xml:"symbol-prefixes,attr"`
CIdentifierPrefixes string `xml:"identifier-prefixes,attr"`
C string `xml:"c,attr"`
Xmlns string `xml:"xmlns,attr"`
Package *Package `xml:"package"`
CInclude *CInclude `xml:"include"`
Namespace *Namespace `xml:"namespace"`
Constants []*Constant `xml:"constant"`
}
type Package struct {
BaseInfo
Name string `xml:"name,attr"`
CType string `xml:"type"`
}
type CInclude struct {
BaseInfo
Name string `xml:"name,attr"`
}
type Namespace struct {
BaseInfo
Name string `xml:"name,attr"`
CSymbolPrefixes string `xml:"symbol-prefixes,attr"`
CIdentifierPrefixes string `xml:"identifier-prefixes,attr"`
SharedLibrary string `xml:"shared-library,attr"`
CPrefix string `xml:"prefix,attr"`
CSymbolPrefix string `xml:"symbol-prefix,attr"`
Functions []*Function `xml:"function"`
Callbacks []*Function `xml:"callback"`
Classes []*Class `xml:"class"`
Interfaces []*Class `xml:"interface"`
Records []*Class `xml:"record"`
Bitfields []*Enum `xml:"bitfield"`
Enums []*Enum `xml:"enumeration"`
Unions []*Union `xml:"union"`
Constants []*Constant `xml:"constant"`
Boxeds []*Boxed `xml:"boxed"`
ErrorDomains []*ErrorDomain `xml:"errordomain"`
Aliases []*Alias `xml:"alias"`
}
type ErrorDomain struct {
BaseInfo
Name string `xml:"name,attr"`
GetQuark string `xml:"get-quark,attr"`
Codes string `xml:"codes,attr"`
Annotation *Annotation `xml:"annotation"`
}
type Annotation struct {
BaseInfo
Key string `xml:"key,attr"`
Value string `xml:"value,attr"`
}
type Alias struct {
BaseInfo
Name string `xml:"name,attr"`
Left string `xml:"type,attr"`
Right *Type `xml:"type"`
}
type Constant struct {
BaseInfo
Name string `xml:"name,attr"`
Value string `xml:"value,attr"`
CName string `xml:"type,attr"`
CIdentifier string `xml:"identifier,attr"`
Type *Type `xml:"type"`
Ignored bool
}
type Type struct {
BaseInfo
Name string `xml:"name,attr"`
CType string `xml:"type,attr"`
Type string `xml:"type"`
Array *Array `xml:"array"`
}
type Class struct {
BaseInfo
Name string `xml:"name,attr"`
CSymbolPrefix string `xml:"symbol-prefix,attr"` // for class
CType string `xml:"type,attr"`
Parent string `xml:"parent,attr"`
GlibTypeName string `xml:"type-name,attr"`
GlibGetType string `xml:"get-type,attr"`
GlibTypeStruct string `xml:"type-struct,attr"`
GlibFundamental string `xml:"fundamental,attr"`
GlibGetValueFunc string `xml:"get-value-func,attr"` // for Gst.MiniObject
GlibSetValueFunc string `xml:"set-value-func,attr"` // for Gst.MiniObject
GlibRefFunc string `xml:"ref-func,attr"` // for Gst.MiniObject
GlibUnrefFunc string `xml:"unref-func,attr"` // for Gst.MiniObject
Disguised string `xml:"disguised,attr"`
Foreign string `xml:"foreign,attr"`
IsGTypeStruct string `xml:"is-gtype-struct-for,attr"`
Abstract string `xml:"abstract,attr"`
Prerequisite *Prerequisite `xml:"prerequisite"` // for interface
Implements []*Implement `xml:"implements"`
Constructors []*Function `xml:"constructor"`
VirtualMethods []*Function `xml:"virtual-method"`
Methods []*Function `xml:"method"`
Functions []*Function `xml:"function"`
Properties []*Property `xml:"property"`
Union *Union `xml:"union"` // for record
Fields []*Field `xml:"field"`
Signals []*Function `xml:"signal"`
Namespace string
}
type Implement struct {
BaseInfo
Interface string `xml:"name,attr"`
}
type Prerequisite struct {
BaseInfo
Name string `xml:"name,attr"`
}
type Property struct {
BaseInfo
Name string `xml:"name,attr"`
Writable string `xml:"writable,attr"`
TransferOwnership string `xml:"transfer-ownership,attr"`
Construct string `xml:"construct,attr"`
ConstructOnly string `xml:"construct-only,attr"`
Readable string `xml:"readable,attr"`
Type *Type `xml:"type"`
Array *Array `xml:"array"`
}
type Field struct {
BaseInfo
Name string `xml:"name,attr"`
Writable string `xml:"writable,attr"`
Private string `xml:"private,attr"`
Readable string `xml:"readable,attr"`
Bits string `xml:"bits,attr"`
Type *Type `xml:"type"`
Array *Array `xml:"array"`
Callback *Function `xml:"callback"`
}
type Union struct {
BaseInfo
Name string `xml:"name,attr"`
CType string `xml:"type,attr"`
GlibTypeName string `xml:"type-name,attr"`
GlibGetType string `xml:"get-type,attr"`
CSymbolPrefix string `xml:"symbol-prefix,attr"`
Fields []*Field `xml:"field"`
Methods []*Function `xml:"method"`
Record *Class `xml:"record"`
Functions []*Function `xml:"function"`
Constructors []*Function `xml:"constructor"`
}
type Array struct {
BaseInfo
Name string `xml:"name,attr"`
ZeroTerminated string `xml:"zero-terminated,attr"`
CType string `xml:"type,attr"`
Length string `xml:"length,attr"`
FixedSize string `xml:"fixed-size,attr"`
Type *Type `xml:"type"`
Array *Array `xml:"array"`
}
type Function struct {
BaseInfo
Name string `xml:"name,attr"`
CType string `xml:"type,attr"` // for callback
CIdentifier string `xml:"identifier,attr"`
MovedTo string `xml:"moved-to,attr"` // in Gdk-3.0
Throws string `xml:"throws,attr"`
Shadows string `xml:"shadows,attr"`
ShadowedBy string `xml:"shadowed-by,attr"`
Invoker string `xml:"invoker,attr"` // for virtual-method
When string `xml:"when,attr"` // for signal
Action string `xml:"action,attr"` // for signal
NoHooks string `xml:"no-hooks,attr"` // for signal
NoRecurse string `xml:"no-recurse,attr"` // for signal
Detailed string `xml:"detailed,attr"` // for signal
Return *Param `xml:"return-value"`
InstanceParam *Param `xml:"parameters>instance-parameter"`
Params []*Param `xml:"parameters>parameter"`
GoName string
IsVarargs bool
IsConstructor bool
}
type Param struct {
BaseInfo
Name string `xml:"name,attr"`
TransferOwnership string `xml:"transfer-ownership,attr"`
Direction string `xml:"direction,attr"`
CallerAllocates string `xml:"caller-allocates,attr"`
AllowNone string `xml:"allow-none,attr"`
Scope string `xml:"scope,attr"`
Destroy string `xml:"destroy,attr"`
Closure string `xml:"closure,attr"`
Skip string `xml:"skip,attr"`
Array *Array `xml:"array"`
Type *Type `xml:"type"`
Varargs *Varargs `xml:"varargs"`
GoName string
MappedType string
IsArray bool
IsVoid bool
CType string
CTypeName string
GoType string
ElementCType string // for array
ElementCTypeName string // for array
ElementGoType string // for array
LenParamName string // for array
IsZeroTerminated bool // for array
TypeSpec string
CgoParam string
CgoBeforeStmt string
CgoAfterStmt string
Function *Function
Generator *Generator
}
type Varargs struct {
BaseInfo
}
type Member struct {
BaseInfo
Name string `xml:"name,attr"`
Value string `xml:"value,attr"`
CIdentifier string `xml:"identifier,attr"`
Nick string `xml:"nick,attr"`
}
type Enum struct {
BaseInfo
Name string `xml:"name,attr"`
CType string `xml:"type,attr"`
GlibErrorDomain string `xml:"error-domain,attr"`
GlibTypeName string `xml:"type-name,attr"`
GlibGetType string `xml:"get-type,attr"`
Members []*Member `xml:"member"`
Functions []*Function `xml:"function"`
}
type Boxed struct {
BaseInfo
GlibName string `xml:"name,attr"`
CSymbolPrefix string `xml:"symbol-prefix,attr"`
GlibTypeName string `xml:"type-name,attr"`
GlibGetType string `xml:"get-type,attr"`
}
func (self *Generator) Parse(contents []byte) *Repository {
// unmarshal
var repo Repository
err := xml.Unmarshal(contents, &repo)
checkError(err)
// dump
dumpNotHandled(reflect.ValueOf(repo))
return &repo
}
func dumpNotHandled(v reflect.Value) {
if !v.IsValid() {
return
}
t := v.Type()
kind := t.Kind()
if kind == reflect.Ptr {
dumpNotHandled(v.Elem())
} else if kind == reflect.Slice {
for i := 0; i < v.Len(); i++ {
dumpNotHandled(v.Index(i))
}
} else if kind == reflect.Struct {
for i := 0; i < v.NumField(); i++ {
field := t.Field(i)
if field.Name == "NotHandled" {
for _, n := range v.Field(i).Interface().([]*NotHandled) {
p("%s %s\n", n.XMLName.Local, strings.Repeat("=", 64))
p("%s\n", n.Xml)
}
} else if field.Name == "NotHandledAttr" {
for _, attr := range v.Field(i).Interface().([]xml.Attr) {
p("%s = %s %s\n", attr.Name.Local, attr.Value, strings.Repeat("=", 64))
}
} else {
dumpNotHandled(v.Field(i))
}
}
}
}
|
[
5
] |
package main
import (
"io/ioutil"
"os"
"reflect"
"testing"
)
// reduce code duplication for a common pattern
func helper(t *testing.T, expectedErr interface{}, code string) {
_, err := getExportedFunctions("", code)
if err == nil {
t.Error("Expected error, got nil!")
}
expectedErrType := reflect.TypeOf(expectedErr)
actualErrType := reflect.TypeOf(err)
if actualErrType != expectedErrType {
t.Error("Expected error", expectedErrType.String(), "but got",
actualErrType.String(), "with message", err)
}
}
func Test_getExportedFunctions(t *testing.T) {
t.Run("noFunctions", func(t *testing.T) {
helper(t, badImportError{}, "package main")
})
t.Run("nonexistentfile", func(t *testing.T) {
_, err := getExportedFunctions("thisfiledoesnotexist", nil)
if err == nil {
t.Error("Expected failure for nonexistent file.")
}
})
t.Run("fromFile", func(t *testing.T) {
res, err := getExportedFunctions("testdata/src0.go", nil)
if err != nil {
t.Error("Expected result (len 1), not error.")
} else {
if len(res) != 1 {
t.Errorf("Expected length 1, not %d.", len(res))
}
}
})
t.Run("badExportComment", func(t *testing.T) {
helper(t, badExportError{}, `
package main
import "C"
//export funky
func funko() {}
`)
})
t.Run("hasReceiver", func(t *testing.T) {
helper(t, receiverError{}, `
package main
import "C"
//export hasreceiver
func (m int) hasreceiver() {}
`)
})
t.Run("multipleReturnValues", func(t *testing.T) {
helper(t, multipleReturnValuesError{}, `
package main
import "C"
//export multipleitemsreturned
func multipleitemsreturned() (int, error) {
return 0, nil
}
`)
})
t.Run("wrongPackageName", func(t *testing.T) {
helper(t, wrongPackageError{}, `
package hello
`)
})
t.Run("nonEmptyMainFunction", func(t *testing.T) {
helper(t, nonEmptyMainFunctionError{}, `
package main
import "C"
func main() {
println("Hello")
}
`)
})
t.Run("noMainFunction", func(t *testing.T) {
helper(t, noMainFunctionError{}, `
package main
import "C"
//export foo
func foo(i int) {
}
`)
})
t.Run("noFunctionsToExport", func(t *testing.T) {
helper(t, noFunctionsToExportError{}, `
package main
import "C"
func main(){}
`)
})
t.Run("noArgsOrReturnValues", func(t *testing.T) {
helper(t, noArgsOrReturnValuesError{}, `
package main
import "C"
//export foo
func foo(){}
func main(){}
`)
})
}
func Test_generateCcode(t *testing.T) {
t.Run("someName", func(t *testing.T) {
// FIXME implement....
})
}
func Test_main(t *testing.T) {
oldArgs := os.Args
os.Setenv("TESTING_ESPARRAGO", "true")
defer func() { os.Unsetenv("TESTING_ESPARRAGO") }()
defer func() { os.Args = oldArgs }()
t.Run("withValidArg", func(t *testing.T) {
outfile, err := ioutil.TempFile("", "src0.c")
if err != nil {
t.Log("Couldn't open test output file.")
t.Fail()
}
outfileName := outfile.Name()
defer os.Remove(outfileName)
if err := outfile.Close(); err != nil {
t.Log("Couldn't close test output file.")
t.Fail()
}
os.Args = []string{"cmd", "testdata/src0.go", outfileName}
main()
})
t.Run("withNonexistentFile", func(t *testing.T) {
os.Args = []string{"cmd", "a_file_that_does_not_exist", "nonexistent"}
main()
if os.Getenv("ESPARRAGO_EXIT_CODE") != "1" {
t.Error("Expected exit with code 1, got.",
os.Getenv("ESPARRAGO_EXIT_CODE"))
}
})
t.Run("withNoArguments", func(t *testing.T) {
os.Args = []string{"cmd"}
main()
if os.Getenv("ESPARRAGO_EXIT_CODE") != "1" {
t.Error("Expected exit with code 1, got.",
os.Getenv("ESPARRAGO_EXIT_CODE"))
}
})
}
|
[
4
] |
package braintree
import "context"
type SubscriptionGateway struct {
*Braintree
}
func (g *SubscriptionGateway) Create(ctx context.Context, sub *SubscriptionRequest) (*Subscription, error) {
resp, err := g.execute(ctx, "POST", "subscriptions", sub)
if err != nil {
return nil, err
}
switch resp.StatusCode {
case 201:
return resp.subscription()
}
return nil, &invalidResponseError{resp}
}
func (g *SubscriptionGateway) Update(ctx context.Context, sub *SubscriptionRequest) (*Subscription, error) {
resp, err := g.execute(ctx, "PUT", "subscriptions/"+sub.Id, sub)
if err != nil {
return nil, err
}
switch resp.StatusCode {
case 200:
return resp.subscription()
}
return nil, &invalidResponseError{resp}
}
func (g *SubscriptionGateway) Find(ctx context.Context, subId string) (*Subscription, error) {
resp, err := g.execute(ctx, "GET", "subscriptions/"+subId, nil)
if err != nil {
return nil, err
}
switch resp.StatusCode {
case 200:
return resp.subscription()
}
return nil, &invalidResponseError{resp}
}
func (g *SubscriptionGateway) Cancel(ctx context.Context, subId string) (*Subscription, error) {
resp, err := g.execute(ctx, "PUT", "subscriptions/"+subId+"/cancel", nil)
if err != nil {
return nil, err
}
switch resp.StatusCode {
case 200:
return resp.subscription()
}
return nil, &invalidResponseError{resp}
}
// RetryCharge retries to charge for a Subscription. All options,
// including the Subscription ID, are to be provided by the
// SubscriptionTransactionRequest passed as an argument. Note that the
// Amount has to be > 0.
func (g *SubscriptionGateway) RetryCharge(ctx context.Context, txReq *SubscriptionTransactionRequest) error {
resp, err := g.execute(ctx, "POST", "transactions", txReq)
if err != nil {
return err
}
switch resp.StatusCode {
case 201:
return nil
}
return &invalidResponseError{resp}
}
|
[
7
] |
/**
* Link to the problem
* https://leetcode.com/problems/remove-outermost-parentheses/
*/
package leetcode
func removeOuterParentheses(S string) string {
outer := ""
opened := 0
for i := 0; i < len(S); i++ {
if S[i] == '(' {
if opened > 0 {
outer += string(S[i])
}
opened++
} else if S[i] == ')' {
if opened > 1 {
outer += string(S[i])
}
opened--
}
}
return outer
}
|
[
2
] |
package main
func convertToTxt(board state) string {
var res string
if board.player == true {
res = res + "p"
} else {
res = res + "m"
}
for y := 0; y < 8; y++ {
for x := 0; x < 8; x++ {
res = res + board.addr(x, y).letterRepresent()
}
}
return res
}
func convertFromTxt(text string) state {
var res = state{0, 0, [64]piece{}, true}
if text[0] == 'm' {
res.player = false
} else {
res.player = true
}
for y := 0; y < 8; y++ {
for x := 0; x < 8; x++ {
var p piece
switch string(text[x+(y*8)+1]) {
case empty{true}.letterRepresent():
p = empty{true}
case empty{false}.letterRepresent():
p = empty{false}
case king{true}.letterRepresent():
p = king{true}
case king{false}.letterRepresent():
p = king{false}
case queen{true}.letterRepresent():
p = queen{true}
case queen{false}.letterRepresent():
p = queen{false}
case rook{true}.letterRepresent():
p = rook{true}
case rook{false}.letterRepresent():
p = rook{false}
case bishop{true}.letterRepresent():
p = bishop{true}
case bishop{false}.letterRepresent():
p = bishop{false}
case knight{true}.letterRepresent():
p = knight{true}
case knight{false}.letterRepresent():
p = knight{false}
case pawn{true, false}.letterRepresent():
p = pawn{true, false}
case pawn{false, false}.letterRepresent():
p = pawn{false, false}
}
res = res.set(p, x, y)
}
}
return res
}
|
[
0
] |
package main
import (
"encoding/json"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
func init() {
registerServiceHandler("aur", aurServiceHandler{})
}
type aurServiceHandler struct{}
type aurInfoResult struct {
Version int `json:"version"`
Type string `json:"type"`
Resultcount int `json:"resultcount"`
Results []struct {
ID int `json:"ID"`
Name string `json:"Name"`
PackageBaseID int `json:"PackageBaseID"`
PackageBase string `json:"PackageBase"`
Version string `json:"Version"`
Description string `json:"Description"`
URL string `json:"URL"`
NumVotes int `json:"NumVotes"`
Popularity float64 `json:"Popularity"`
OutOfDate int `json:"OutOfDate"`
Maintainer string `json:"Maintainer"`
FirstSubmitted int `json:"FirstSubmitted"`
LastModified int `json:"LastModified"`
URLPath string `json:"URLPath"`
Depends []string `json:"Depends"`
License []string `json:"License"`
Keywords []string `json:"Keywords"`
MakeDepends []string `json:"MakeDepends,omitempty"`
} `json:"results"`
}
func (a aurServiceHandler) GetDocumentation() serviceHandlerDocumentationList {
return serviceHandlerDocumentationList{
{
ServiceName: "AUR package version",
DemoPath: "/aur/version/yay",
Arguments: []string{"version", "<package name>"},
},
{
ServiceName: "AUR package votes",
DemoPath: "/aur/votes/yay",
Arguments: []string{"votes", "<package name>"},
},
{
ServiceName: "AUR package license",
DemoPath: "/aur/license/yay",
Arguments: []string{"license", "<package name>"},
},
{
ServiceName: "AUR package last update",
DemoPath: "/aur/updated/yay",
Arguments: []string{"updated", "<package name>"},
},
}
}
func (aurServiceHandler) IsEnabled() bool { return true }
func (a aurServiceHandler) Handle(ctx context.Context, params []string) (title, text, color string, err error) {
if len(params) < 2 {
return title, text, color, errors.New("No service-command / parameters were given")
}
switch params[0] {
case "license":
return a.handleAURLicense(ctx, params[1:])
case "updated":
return a.handleAURUpdated(ctx, params[1:])
case "version":
return a.handleAURVersion(ctx, params[1:])
case "votes":
return a.handleAURVotes(ctx, params[1:])
default:
return title, text, color, errors.New("An unknown service command was called")
}
}
func (a aurServiceHandler) handleAURLicense(ctx context.Context, params []string) (title, text, color string, err error) {
title = params[0]
text, err = cacheStore.Get("aur_license", title)
if err != nil {
info, err := a.fetchAURInfo(ctx, params[0])
if err != nil {
return title, text, color, err
}
text = strings.Join(info.Results[0].License, ", ")
cacheStore.Set("aur_license", title, text, 10*time.Minute)
}
return "license", text, "blue", nil
}
func (a aurServiceHandler) handleAURVersion(ctx context.Context, params []string) (title, text, color string, err error) {
title = params[0]
text, err = cacheStore.Get("aur_version", title)
if err != nil {
info, err := a.fetchAURInfo(ctx, params[0])
if err != nil {
return title, text, color, err
}
text = info.Results[0].Version
cacheStore.Set("aur_version", title, text, 10*time.Minute)
}
return title, text, "blue", nil
}
func (a aurServiceHandler) handleAURUpdated(ctx context.Context, params []string) (title, text, color string, err error) {
title = params[0]
text, err = cacheStore.Get("aur_updated", title)
if err != nil {
info, err := a.fetchAURInfo(ctx, params[0])
if err != nil {
return title, text, color, err
}
update := time.Unix(int64(info.Results[0].LastModified), 0)
text = update.Format("2006-01-02 15:04:05")
if info.Results[0].OutOfDate > 0 {
text = text + " (outdated)"
}
cacheStore.Set("aur_updated", title, text, 10*time.Minute)
}
color = "blue"
if strings.Contains(text, "outdated") {
color = "red"
}
return "last updated", text, color, nil
}
func (a aurServiceHandler) handleAURVotes(ctx context.Context, params []string) (title, text, color string, err error) {
title = params[0]
text, err = cacheStore.Get("aur_votes", title)
if err != nil {
info, err := a.fetchAURInfo(ctx, params[0])
if err != nil {
return title, text, color, err
}
text = strconv.Itoa(info.Results[0].NumVotes) + " votes"
cacheStore.Set("aur_votes", title, text, 10*time.Minute)
}
return title, text, "brightgreen", nil
}
func (a aurServiceHandler) fetchAURInfo(ctx context.Context, pkg string) (*aurInfoResult, error) {
params := url.Values{
"v": []string{"5"},
"type": []string{"info"},
"arg": []string{pkg},
}
url := "https://aur.archlinux.org/rpc/?" + params.Encode()
req, _ := http.NewRequest("GET", url, nil)
resp, err := http.DefaultClient.Do(req.WithContext(ctx))
if err != nil {
return nil, errors.Wrap(err, "Failed to fetch AUR info")
}
defer resp.Body.Close()
out := &aurInfoResult{}
if err := json.NewDecoder(resp.Body).Decode(out); err != nil {
return nil, errors.Wrap(err, "Failed to parse AUR info")
}
if out.Resultcount == 0 {
return nil, errors.New("No package was found")
}
return out, nil
}
|
[
0
] |
package photos
import (
"fmt"
"sort"
"strings"
"time"
)
const (
layout = "2006-01-02 15:04:05"
)
type FileData struct {
Extension string
Name string
Timestamp time.Time
Index int
}
type FileDataIndexed struct {
Name string
NameIndex int
Extension string
Index int
}
type FileNameCounter struct {
fileNameCounterMap map[string]int
}
func NewFileNameCounter() *FileNameCounter {
f := &FileNameCounter{
fileNameCounterMap: make(map[string]int),
}
return f
}
func (f *FileNameCounter) AddFileName(fileName string) {
if val, ok := f.fileNameCounterMap[fileName]; ok {
val = val + 1
f.fileNameCounterMap[fileName] = val
} else {
f.fileNameCounterMap[fileName] = 1
}
}
func (f *FileNameCounter) GetFormatedIndex(fileName string, index int) string {
if index >= 10 {
return fmt.Sprintf("%d", index)
}
if val, ok := f.fileNameCounterMap[fileName]; ok {
if val >= 10 {
return fmt.Sprintf("0%d", index)
} else {
return fmt.Sprintf("%d", index)
}
} else {
return fmt.Sprintf("%d", index)
}
}
func RenamePhotos(s string) string {
var (
fileDataList []FileData
fileDataMap = make(map[string]int)
fileDataIndexedList []FileDataIndexed
b strings.Builder
fileNameCounter *FileNameCounter
)
fileNameCounter = NewFileNameCounter()
photos := strings.Split(s, "\n")
for i, photo := range photos {
data := strings.Split(photo, ",")
extension := strings.Split(strings.TrimSpace(data[0]), ".")
name := strings.TrimSpace(data[1])
str := strings.TrimSpace(data[2])
timestamp, _ := time.Parse(layout, str)
fileData := FileData{
Extension: extension[1],
Name: name,
Timestamp: timestamp,
Index: i,
}
fileDataList = append(fileDataList, fileData)
fileNameCounter.AddFileName(name)
}
// sort data by date
sort.Slice(fileDataList, func(i, j int) bool {
return fileDataList[i].Timestamp.Before(fileDataList[j].Timestamp)
})
// index the data
for _, f := range fileDataList {
var i int
if v, ok := fileDataMap[f.Name]; ok {
v = v + 1
fileDataMap[f.Name] = v
i = v
} else {
fileDataMap[f.Name] = 1
i = 1
}
fileDataIndexed := FileDataIndexed{
Name: f.Name,
NameIndex: i,
Extension: f.Extension,
Index: f.Index,
}
fileDataIndexedList = append(fileDataIndexedList, fileDataIndexed)
}
// sort by index
sort.Slice(fileDataIndexedList, func(i, j int) bool {
return fileDataIndexedList[i].Index < (fileDataIndexedList[j].Index)
})
for _, f := range fileDataIndexedList {
nameIndex := fileNameCounter.GetFormatedIndex(f.Name, f.NameIndex)
b.WriteString(fmt.Sprintf("%s%s.%s\n", f.Name, nameIndex, f.Extension))
}
return b.String()
}
|
[
0
] |
package main
import (
"net/http"
"regexp"
"sort"
"strings"
"text/template"
)
// SearchTemplateData template context
type SearchTemplateData struct {
SearchResults *SearchResults
GlobalContext *GlobalContext
}
// SearchResults contains all the matches found
type SearchResults struct {
SearchTerm string
NameMatches []string
ContentMatches []ContentMatch
}
// ContentMatch contains a page content match
type ContentMatch struct {
Name string
Content string
NumberOfMatches int
}
var searchResults SearchResults
var searchTemplate = template.Must(
template.ParseFiles("templates/search.html", "templates/base.html"))
var pageCache = map[string]string{}
func cacheAllPages() {
for mapPos := 0; mapPos < len(sitemap); mapPos++ {
page, _ := loadPage(sitemap[mapPos].Name())
pageCache[page.Name] = string(page.Content)
}
}
func updatePageCache(page *Page) {
pageCache[page.Name] = string(page.Content)
}
func highlightSubString(mainString string, matches [][]int) string {
subStringStartPos := matches[0][0]
subStringEndPos := matches[0][1]
lineStartPos := strings.LastIndex(mainString[:subStringStartPos], "\n") + 1
lineEndPos := strings.Index(mainString[subStringEndPos:], "\n")
if lineEndPos == -1 {
lineEndPos = len(mainString)
} else {
lineEndPos = lineEndPos + subStringEndPos
}
return mainString[lineStartPos:subStringStartPos] + "<b>" + mainString[subStringStartPos:subStringEndPos] + "</b>" + mainString[subStringEndPos:lineEndPos]
}
func search(searchTerm string) {
var nameMatches []string
var contentMatches []ContentMatch
caseinsensitiveMatch, err := regexp.Compile(`(?i)` + searchTerm)
if err != nil {
searchResults = SearchResults{
SearchTerm: searchTerm,
NameMatches: nameMatches,
ContentMatches: contentMatches}
return
}
for mapPos := 0; mapPos < len(sitemap); mapPos++ {
if caseinsensitiveMatch.MatchString(sitemap[mapPos].Name()) {
nameMatches = append(nameMatches, sitemap[mapPos].Name())
}
}
for name, content := range pageCache {
matches := caseinsensitiveMatch.FindAllStringIndex(content, -1)
if len(matches) > 0 {
contentMatches = append(contentMatches,
ContentMatch{
Name: name,
Content: highlightSubString(content, matches),
NumberOfMatches: len(matches)})
}
}
sort.Slice(contentMatches, func(i, j int) bool {
if contentMatches[i].NumberOfMatches == contentMatches[j].NumberOfMatches {
return contentMatches[i].Name < contentMatches[j].Name
}
return contentMatches[i].NumberOfMatches > contentMatches[j].NumberOfMatches
})
searchResults = SearchResults{SearchTerm: searchTerm, NameMatches: nameMatches, ContentMatches: contentMatches}
}
func searchHandler(writer http.ResponseWriter, request *http.Request) {
searchTerm := request.FormValue("search")
search(searchTerm)
templateData := SearchTemplateData{SearchResults: &searchResults, GlobalContext: &globalContext}
searchTemplate.ExecuteTemplate(writer, "base", templateData)
}
|
[
0
] |
package protocol
import (
"bytes"
"context"
"errors"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
"github.com/opentracing/opentracing-go"
xcontext "golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
serrors "gopkg.in/src-d/go-errors.v1"
"github.com/bblfsh/sdk/v3/driver"
"github.com/bblfsh/sdk/v3/uast/nodes"
"github.com/bblfsh/sdk/v3/uast/nodes/nodesproto"
)
//go:generate protoc --proto_path=$GOPATH/src:. --gogo_out=plugins=grpc:. ./driver.proto
const (
mb = 1 << 20
// DefaultGRPCMaxMessageBytes is maximum msg size for gRPC.
DefaultGRPCMaxMessageBytes = 100 * mb
)
// ServerOptions returns a set of common options that should be used in bblfsh server.
//
// It automatically enables OpenTrace if a global tracer is set.
func ServerOptions() []grpc.ServerOption {
opts := []grpc.ServerOption{
grpc.MaxSendMsgSize(DefaultGRPCMaxMessageBytes),
grpc.MaxRecvMsgSize(DefaultGRPCMaxMessageBytes),
}
tracer := opentracing.GlobalTracer()
if _, ok := tracer.(opentracing.NoopTracer); ok {
return opts
}
opts = append(opts,
grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer)),
grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(tracer)),
)
return opts
}
// DialOptions returns a set of common options that should be used when dialing bblfsh server.
//
// It automatically enables OpenTrace if a global tracer is set.
func DialOptions() []grpc.DialOption {
opts := []grpc.DialOption{grpc.WithDefaultCallOptions(
grpc.MaxCallSendMsgSize(DefaultGRPCMaxMessageBytes),
grpc.MaxCallRecvMsgSize(DefaultGRPCMaxMessageBytes),
)}
tracer := opentracing.GlobalTracer()
if _, ok := tracer.(opentracing.NoopTracer); ok {
return opts
}
opts = append(opts,
grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer)),
grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer)),
)
return opts
}
func RegisterDriver(srv *grpc.Server, d driver.Driver) {
RegisterDriverServer(srv, &driverServer{d: d})
}
func AsDriver(cc *grpc.ClientConn) driver.Driver {
return &client{c: NewDriverClient(cc)}
}
func toParseErrors(err error) []*ParseError {
if e, ok := err.(*driver.ErrMulti); ok {
errs := make([]*ParseError, 0, len(e.Errors))
for _, e := range e.Errors {
errs = append(errs, &ParseError{Text: e.Error()})
}
return errs
}
return []*ParseError{
{Text: err.Error()},
}
}
type driverServer struct {
d driver.Driver
}
// Parse implements DriverServer.
func (s *driverServer) Parse(rctx xcontext.Context, req *ParseRequest) (*ParseResponse, error) {
sp, ctx := opentracing.StartSpanFromContext(rctx, "bblfsh.server.Parse")
defer sp.Finish()
opts := &driver.ParseOptions{
Mode: driver.Mode(req.Mode),
Language: req.Language,
Filename: req.Filename,
}
var resp ParseResponse
n, err := s.d.Parse(ctx, req.Content, opts)
resp.Language = opts.Language // can be set during the call
if e, ok := err.(*serrors.Error); ok {
cause := e.Cause()
if driver.ErrDriverFailure.Is(err) {
return nil, status.Error(codes.Internal, cause.Error())
} else if driver.ErrTransformFailure.Is(err) {
return nil, status.Error(codes.FailedPrecondition, cause.Error())
} else if driver.ErrModeNotSupported.Is(err) {
return nil, status.Error(codes.InvalidArgument, cause.Error())
}
if !driver.ErrSyntax.Is(err) {
return nil, err // unknown error
}
// partial parse or syntax error; we will send an OK status code, but will fill Errors field
resp.Errors = toParseErrors(cause)
}
dsp, _ := opentracing.StartSpanFromContext(ctx, "uast.Encode")
defer dsp.Finish()
buf := bytes.NewBuffer(nil)
err = nodesproto.WriteTo(buf, n)
if err != nil {
return nil, err // unknown error = server failure
}
resp.Uast = buf.Bytes()
return &resp, nil
}
type client struct {
c DriverClient
}
// Parse implements DriverClient.
func (c *client) Parse(rctx context.Context, src string, opts *driver.ParseOptions) (nodes.Node, error) {
sp, ctx := opentracing.StartSpanFromContext(rctx, "bblfsh.client.Parse")
defer sp.Finish()
req := &ParseRequest{Content: src}
if opts != nil {
req.Mode = Mode(opts.Mode)
req.Language = opts.Language
req.Filename = opts.Filename
}
resp, err := c.c.Parse(ctx, req)
if s, ok := status.FromError(err); ok {
var kind *serrors.Kind
switch s.Code() {
case codes.Internal:
kind = driver.ErrDriverFailure
case codes.FailedPrecondition:
kind = driver.ErrTransformFailure
case codes.InvalidArgument:
kind = driver.ErrModeNotSupported
}
if kind != nil {
return nil, kind.Wrap(errors.New(s.Message()))
}
}
if err != nil {
return nil, err // server or network error
}
if opts != nil && opts.Language == "" {
opts.Language = resp.Language
}
dsp, _ := opentracing.StartSpanFromContext(ctx, "uast.Decode")
defer dsp.Finish()
// it may be still a parsing error
return resp.Nodes()
}
func (m *ParseResponse) Nodes() (nodes.Node, error) {
ast, err := nodesproto.ReadTree(bytes.NewReader(m.Uast))
if err != nil {
return nil, err
}
if len(m.Errors) != 0 {
var errs []error
for _, e := range m.Errors {
errs = append(errs, errors.New(e.Text))
}
// syntax error or partial parse - return both UAST and an error
err = driver.ErrSyntax.Wrap(driver.JoinErrors(errs))
}
return ast, err
}
|
[
5
] |
//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite
package processors
import (
"context"
"testing"
"github.com/stretchr/testify/suite"
"github.com/stellar/go/ingest"
"github.com/stellar/go/services/horizon/internal/db2/history"
"github.com/stellar/go/xdr"
)
func TestLiquidityPoolsChangeProcessorTestSuiteState(t *testing.T) {
suite.Run(t, new(LiquidityPoolsChangeProcessorTestSuiteState))
}
type LiquidityPoolsChangeProcessorTestSuiteState struct {
suite.Suite
ctx context.Context
processor *LiquidityPoolsChangeProcessor
mockQ *history.MockQLiquidityPools
sequence uint32
}
func (s *LiquidityPoolsChangeProcessorTestSuiteState) SetupTest() {
s.ctx = context.Background()
s.mockQ = &history.MockQLiquidityPools{}
s.sequence = 456
s.processor = NewLiquidityPoolsChangeProcessor(s.mockQ, s.sequence)
}
func (s *LiquidityPoolsChangeProcessorTestSuiteState) TearDownTest() {
s.Assert().NoError(s.processor.Commit(s.ctx))
s.mockQ.AssertExpectations(s.T())
}
func (s *LiquidityPoolsChangeProcessorTestSuiteState) TestNoEntries() {
s.mockQ.On("CompactLiquidityPools", s.ctx, s.sequence-100).Return(int64(0), nil).Once()
}
func (s *LiquidityPoolsChangeProcessorTestSuiteState) TestNoEntriesWithSequenceLessThanWindow() {
s.sequence = 50
s.processor.sequence = s.sequence
// Nothing processed, assertions in TearDownTest.
}
func (s *LiquidityPoolsChangeProcessorTestSuiteState) TestCreatesLiquidityPools() {
lastModifiedLedgerSeq := xdr.Uint32(123)
lpoolEntry := xdr.LiquidityPoolEntry{
LiquidityPoolId: xdr.PoolId{0xca, 0xfe, 0xba, 0xbe, 0xde, 0xad, 0xbe, 0xef},
Body: xdr.LiquidityPoolEntryBody{
Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct,
ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{
Params: xdr.LiquidityPoolConstantProductParameters{
AssetA: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"),
AssetB: xdr.MustNewNativeAsset(),
Fee: 34,
},
ReserveA: 450,
ReserveB: 500,
TotalPoolShares: 412241,
PoolSharesTrustLineCount: 52115,
},
},
}
lp := history.LiquidityPool{
PoolID: "cafebabedeadbeef000000000000000000000000000000000000000000000000",
Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct,
Fee: 34,
TrustlineCount: 52115,
ShareCount: 412241,
AssetReserves: []history.LiquidityPoolAssetReserve{
{
xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"),
450,
},
{
xdr.MustNewNativeAsset(),
500,
},
},
LastModifiedLedger: 123,
}
s.mockQ.On("UpsertLiquidityPools", s.ctx, []history.LiquidityPool{lp}).Return(nil).Once()
s.mockQ.On("CompactLiquidityPools", s.ctx, s.sequence-100).Return(int64(0), nil).Once()
err := s.processor.ProcessChange(s.ctx, ingest.Change{
Type: xdr.LedgerEntryTypeLiquidityPool,
Pre: nil,
Post: &xdr.LedgerEntry{
Data: xdr.LedgerEntryData{
Type: xdr.LedgerEntryTypeLiquidityPool,
LiquidityPool: &lpoolEntry,
},
LastModifiedLedgerSeq: lastModifiedLedgerSeq,
},
})
s.Assert().NoError(err)
}
func TestLiquidityPoolsChangeProcessorTestSuiteLedger(t *testing.T) {
suite.Run(t, new(LiquidityPoolsChangeProcessorTestSuiteLedger))
}
type LiquidityPoolsChangeProcessorTestSuiteLedger struct {
suite.Suite
ctx context.Context
processor *LiquidityPoolsChangeProcessor
mockQ *history.MockQLiquidityPools
sequence uint32
}
func (s *LiquidityPoolsChangeProcessorTestSuiteLedger) SetupTest() {
s.ctx = context.Background()
s.mockQ = &history.MockQLiquidityPools{}
s.sequence = 456
s.processor = NewLiquidityPoolsChangeProcessor(s.mockQ, s.sequence)
}
func (s *LiquidityPoolsChangeProcessorTestSuiteLedger) TearDownTest() {
s.Assert().NoError(s.processor.Commit(s.ctx))
s.mockQ.AssertExpectations(s.T())
}
func (s *LiquidityPoolsChangeProcessorTestSuiteLedger) TestNoTransactions() {
s.mockQ.On("CompactLiquidityPools", s.ctx, s.sequence-100).Return(int64(0), nil).Once()
}
func (s *LiquidityPoolsChangeProcessorTestSuiteLedger) TestNoEntriesWithSequenceLessThanWindow() {
s.sequence = 50
s.processor.sequence = s.sequence
// Nothing processed, assertions in TearDownTest.
}
func (s *LiquidityPoolsChangeProcessorTestSuiteLedger) TestNewLiquidityPool() {
lastModifiedLedgerSeq := xdr.Uint32(123)
lpEntry := xdr.LiquidityPoolEntry{
LiquidityPoolId: xdr.PoolId{0xca, 0xfe, 0xba, 0xbe, 0xde, 0xad, 0xbe, 0xef},
Body: xdr.LiquidityPoolEntryBody{
Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct,
ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{
Params: xdr.LiquidityPoolConstantProductParameters{
AssetA: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"),
AssetB: xdr.MustNewNativeAsset(),
Fee: 34,
},
ReserveA: 450,
ReserveB: 500,
TotalPoolShares: 412241,
PoolSharesTrustLineCount: 52115,
},
},
}
pre := xdr.LedgerEntry{
Data: xdr.LedgerEntryData{
Type: xdr.LedgerEntryTypeLiquidityPool,
LiquidityPool: &lpEntry,
},
LastModifiedLedgerSeq: lastModifiedLedgerSeq,
Ext: xdr.LedgerEntryExt{
V: 1,
V1: &xdr.LedgerEntryExtensionV1{
SponsoringId: nil,
},
},
}
err := s.processor.ProcessChange(s.ctx, ingest.Change{
Type: xdr.LedgerEntryTypeLiquidityPool,
Pre: nil,
Post: &pre,
})
s.Assert().NoError(err)
// add sponsor
post := xdr.LedgerEntry{
Data: xdr.LedgerEntryData{
Type: xdr.LedgerEntryTypeLiquidityPool,
LiquidityPool: &lpEntry,
},
LastModifiedLedgerSeq: lastModifiedLedgerSeq,
Ext: xdr.LedgerEntryExt{
V: 1,
V1: &xdr.LedgerEntryExtensionV1{
SponsoringId: xdr.MustAddressPtr("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"),
},
},
}
pre.LastModifiedLedgerSeq = pre.LastModifiedLedgerSeq - 1
err = s.processor.ProcessChange(s.ctx, ingest.Change{
Type: xdr.LedgerEntryTypeLiquidityPool,
Pre: &pre,
Post: &post,
})
s.Assert().NoError(err)
postLP := history.LiquidityPool{
PoolID: "cafebabedeadbeef000000000000000000000000000000000000000000000000",
Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct,
Fee: 34,
TrustlineCount: 52115,
ShareCount: 412241,
AssetReserves: []history.LiquidityPoolAssetReserve{
{
xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"),
450,
},
{
xdr.MustNewNativeAsset(),
500,
},
},
LastModifiedLedger: 123,
}
s.mockQ.On("UpsertLiquidityPools", s.ctx, []history.LiquidityPool{postLP}).Return(nil).Once()
s.mockQ.On("CompactLiquidityPools", s.ctx, s.sequence-100).Return(int64(0), nil).Once()
}
func (s *LiquidityPoolsChangeProcessorTestSuiteLedger) TestUpdateLiquidityPool() {
lpEntry := xdr.LiquidityPoolEntry{
LiquidityPoolId: xdr.PoolId{0xca, 0xfe, 0xba, 0xbe, 0xde, 0xad, 0xbe, 0xef},
Body: xdr.LiquidityPoolEntryBody{
Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct,
ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{
Params: xdr.LiquidityPoolConstantProductParameters{
AssetA: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"),
AssetB: xdr.MustNewNativeAsset(),
Fee: 34,
},
ReserveA: 450,
ReserveB: 500,
TotalPoolShares: 412241,
PoolSharesTrustLineCount: 52115,
},
},
}
lastModifiedLedgerSeq := xdr.Uint32(123)
pre := xdr.LedgerEntry{
Data: xdr.LedgerEntryData{
Type: xdr.LedgerEntryTypeLiquidityPool,
LiquidityPool: &lpEntry,
},
LastModifiedLedgerSeq: lastModifiedLedgerSeq - 1,
Ext: xdr.LedgerEntryExt{
V: 1,
V1: &xdr.LedgerEntryExtensionV1{
SponsoringId: nil,
},
},
}
// add sponsor
post := xdr.LedgerEntry{
Data: xdr.LedgerEntryData{
Type: xdr.LedgerEntryTypeLiquidityPool,
LiquidityPool: &lpEntry,
},
LastModifiedLedgerSeq: lastModifiedLedgerSeq,
Ext: xdr.LedgerEntryExt{
V: 1,
V1: &xdr.LedgerEntryExtensionV1{
SponsoringId: xdr.MustAddressPtr("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"),
},
},
}
err := s.processor.ProcessChange(s.ctx, ingest.Change{
Type: xdr.LedgerEntryTypeLiquidityPool,
Pre: &pre,
Post: &post,
})
s.Assert().NoError(err)
postLP := history.LiquidityPool{
PoolID: "cafebabedeadbeef000000000000000000000000000000000000000000000000",
Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct,
Fee: 34,
TrustlineCount: 52115,
ShareCount: 412241,
AssetReserves: []history.LiquidityPoolAssetReserve{
{
xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"),
450,
},
{
xdr.MustNewNativeAsset(),
500,
},
},
LastModifiedLedger: 123,
}
s.mockQ.On("UpsertLiquidityPools", s.ctx, []history.LiquidityPool{postLP}).Return(nil).Once()
s.mockQ.On("CompactLiquidityPools", s.ctx, s.sequence-100).Return(int64(0), nil).Once()
}
func (s *LiquidityPoolsChangeProcessorTestSuiteLedger) TestRemoveLiquidityPool() {
lpEntry := xdr.LiquidityPoolEntry{
LiquidityPoolId: xdr.PoolId{0xca, 0xfe, 0xba, 0xbe, 0xde, 0xad, 0xbe, 0xef},
Body: xdr.LiquidityPoolEntryBody{
Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct,
ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{
Params: xdr.LiquidityPoolConstantProductParameters{
AssetA: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"),
AssetB: xdr.MustNewNativeAsset(),
Fee: 34,
},
ReserveA: 450,
ReserveB: 123,
TotalPoolShares: 412241,
PoolSharesTrustLineCount: 52115,
},
},
}
lastModifiedLedgerSeq := xdr.Uint32(123)
pre := xdr.LedgerEntry{
Data: xdr.LedgerEntryData{
Type: xdr.LedgerEntryTypeLiquidityPool,
LiquidityPool: &lpEntry,
},
LastModifiedLedgerSeq: lastModifiedLedgerSeq - 1,
Ext: xdr.LedgerEntryExt{
V: 1,
V1: &xdr.LedgerEntryExtensionV1{
SponsoringId: nil,
},
},
}
err := s.processor.ProcessChange(s.ctx, ingest.Change{
Type: xdr.LedgerEntryTypeLiquidityPool,
Pre: &pre,
Post: nil,
})
s.Assert().NoError(err)
deleted := s.processor.ledgerEntryToRow(&pre)
deleted.Deleted = true
deleted.LastModifiedLedger = s.processor.sequence
s.mockQ.On("UpsertLiquidityPools", s.ctx, []history.LiquidityPool{deleted}).Return(nil).Once()
s.mockQ.On("CompactLiquidityPools", s.ctx, s.sequence-100).Return(int64(0), nil).Once()
}
|
[
0
] |
package sync
import (
"io"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/juju/errors"
"github.com/covexo/devspace/pkg/devspace/clients/kubectl"
"github.com/rjeczalik/notify"
)
type upstream struct {
events chan notify.EventInfo
interrupt chan bool
config *SyncConfig
stdinPipe io.WriteCloser
stdoutPipe io.ReadCloser
stderrPipe io.ReadCloser
}
func (u *upstream) start() error {
u.events = make(chan notify.EventInfo, 10000) // High buffer size so we don't miss any fsevents if there are a lot of changes
u.interrupt = make(chan bool, 1)
err := u.startShell()
if err != nil {
return errors.Trace(err)
}
return nil
}
func (u *upstream) collectChanges() error {
for {
var changes []*fileInformation
changeAmount := 0
for {
select {
case <-u.interrupt:
return nil
case event := <-u.events:
events := make([]notify.EventInfo, 0, 10)
events = append(events, event)
// We need this loop to catch up if we got a lot of change events
for eventsLeft := true; eventsLeft == true; {
select {
case event := <-u.events:
events = append(events, event)
break
default:
eventsLeft = false
break
}
}
u.config.fileIndex.ExecuteSafe(func(fileMap map[string]*fileInformation) {
changes = append(changes, u.getfileInformationFromEvent(fileMap, events)...)
})
case <-time.After(time.Millisecond * 600):
break
}
// We gather changes till there are no more changes for 1 second
if changeAmount == len(changes) && changeAmount > 0 {
break
}
changeAmount = len(changes)
}
var files []*fileInformation
lenChanges := len(changes)
u.config.Logf("[Upstream] Processing %d changes\n", lenChanges)
for index, element := range changes {
if element.Mtime > 0 {
if lenChanges <= 10 {
u.config.Logf("[Upstream] Create %s\n", element.Name)
}
files = append(files, element)
// Look ahead
if len(changes) <= index+1 || changes[index+1].Mtime == 0 {
err := u.sendFiles(files)
if err != nil {
return errors.Trace(err)
}
u.config.Logf("[Upstream] Successfully sent %d create changes\n", len(changes))
files = make([]*fileInformation, 0, 10)
}
} else {
if lenChanges <= 10 {
u.config.Logf("[Upstream] Remove %s\n", element.Name)
}
files = append(files, element)
// Look ahead
if len(changes) <= index+1 || changes[index+1].Mtime > 0 {
err := u.config.fileIndex.ExecuteSafeError(func(fileMap map[string]*fileInformation) error {
return u.applyRemoves(fileMap, files)
})
if err != nil {
return errors.Trace(err)
}
u.config.Logf("[Upstream] Successfully sent %d delete changes\n", len(changes))
files = make([]*fileInformation, 0, 10)
}
}
}
}
}
func (u *upstream) getfileInformationFromEvent(fileMap map[string]*fileInformation, events []notify.EventInfo) []*fileInformation {
changes := make([]*fileInformation, 0, len(events))
OUTER:
for _, event := range events {
fullpath := event.Path()
relativePath := getRelativeFromFullPath(fullpath, u.config.WatchPath)
// Exclude files on the exclude list
if u.config.ignoreMatcher != nil {
if u.config.ignoreMatcher.MatchesPath(relativePath) {
continue OUTER // Path is excluded
}
}
stat, err := os.Stat(fullpath)
if err == nil { // Does exist -> Create File or Folder
if fileMap[relativePath] != nil {
if stat.IsDir() {
continue // Folder already exists
} else {
if ceilMtime(stat.ModTime()) == fileMap[relativePath].Mtime &&
stat.Size() == fileMap[relativePath].Size {
continue // File did not change or was changed by downstream
}
}
}
// New Create Task
changes = append(changes, &fileInformation{
Name: relativePath,
Mtime: ceilMtime(stat.ModTime()),
Size: stat.Size(),
IsDirectory: stat.IsDir(),
})
} else { // Does not exist -> Remove
if fileMap[relativePath] == nil {
continue // File / Folder was already deleted from map so event was already processed or should not be processed
}
// New Remove Task
changes = append(changes, &fileInformation{
Name: relativePath,
})
}
}
return changes
}
func (u *upstream) applyRemoves(fileMap map[string]*fileInformation, files []*fileInformation) error {
u.config.Logf("[Upstream] Handling %d removes\n", len(files))
// Send rm commands with max 50 input args
for i := 0; i < len(files); i = i + 50 {
rmCommand := "rm -R "
removeArguments := 0
for j := 0; j < 50 && i+j < len(files); j++ {
relativePath := files[i+j].Name
if fileMap[relativePath] != nil {
relativePath = strings.Replace(relativePath, "'", "\\'", -1)
rmCommand += "'" + u.config.DestPath + relativePath + "' "
removeArguments++
if fileMap[relativePath].IsDirectory {
u.config.fileIndex.RemoveDirInFileMap(relativePath)
} else {
delete(fileMap, relativePath)
}
}
}
if removeArguments > 0 {
rmCommand += " >/dev/null && printf \"" + EndAck + "\" || printf \"" + EndAck + "\"\n"
// u.config.Logf("[Upstream] Handle command %s", rmCommand)
if u.stdinPipe != nil {
_, err := u.stdinPipe.Write([]byte(rmCommand))
if err != nil {
return errors.Trace(err)
}
waitTill(EndAck, u.stdoutPipe)
}
}
}
return nil
}
func (u *upstream) startShell() error {
stdinPipe, stdoutPipe, stderrPipe, err := kubectl.Exec(u.config.Kubectl, u.config.Pod, u.config.Container.Name, []string{"sh"}, false, nil)
if err != nil {
return errors.Trace(err)
}
u.stdinPipe = stdinPipe
u.stdoutPipe = stdoutPipe
u.stderrPipe = stderrPipe
go func() {
pipeStream(os.Stderr, u.stderrPipe)
}()
return nil
}
func (u *upstream) sendFiles(files []*fileInformation) error {
filename, writtenFiles, err := writeTar(files, u.config)
if err != nil {
return errors.Trace(err)
}
if len(writtenFiles) == 0 {
return nil
}
f, err := os.Open(filename)
if err != nil {
return errors.Trace(err)
}
defer f.Close()
stat, err := f.Stat()
if err != nil {
return errors.Trace(err)
}
if stat.Size()%512 != 0 {
return errors.New("[Upstream] Tar archive has wrong size (Not dividable by 512)")
}
return u.upload(f, strconv.Itoa(int(stat.Size())), writtenFiles)
}
func (u *upstream) upload(file *os.File, fileSize string, writtenFiles map[string]*fileInformation) error {
u.config.fileIndex.fileMapMutex.Lock()
defer u.config.fileIndex.fileMapMutex.Unlock()
// TODO: Implement timeout to prevent endless loop
cmd := "fileSize=" + fileSize + `;
tmpFile="/tmp/devspace-upstream";
mkdir -p /tmp;
mkdir -p '` + u.config.DestPath + `';
pid=$$;
cat </proc/$pid/fd/0 >"$tmpFile" &
ddPid=$!;
echo "` + StartAck + `";
while true; do
bytesRead=$(stat -c "%s" "$tmpFile" 2>/dev/null || printf "0");
if [ "$bytesRead" = "$fileSize" ]; then
kill $ddPid;
break;
fi;
sleep 0.1;
done;
tar xf "$tmpFile" -C '` + u.config.DestPath + `/.' 2>/dev/null;
echo "` + EndAck + `";
` // We need that extra new line or otherwise the command is not sent
if u.stdinPipe != nil {
n, err := u.stdinPipe.Write([]byte(cmd))
if err != nil {
u.config.Logf("[Upstream] Writing to u.stdinPipe failed: %s\n", err.Error())
return errors.Trace(err)
}
// Wait till confirmation
err = waitTill(StartAck, u.stdoutPipe)
if err != nil {
return errors.Trace(err)
}
buf := make([]byte, 512, 512)
for {
n, err = file.Read(buf)
if n == 0 {
if err == nil {
continue
}
if err == io.EOF {
break
}
return errors.Trace(err)
}
// process buf
if err != nil && err != io.EOF {
return errors.Trace(err)
}
n, err = u.stdinPipe.Write(buf)
if err != nil {
return errors.Trace(err)
}
if n < 512 {
return errors.New("[Upstream] Only " + strconv.Itoa(n) + " Bytes written to stdin pipe (512 expected)")
}
}
}
// Delete file
file.Close()
err := os.Remove(file.Name())
if err != nil {
return errors.Trace(err)
}
// Wait till confirmation
err = waitTill(EndAck, u.stdoutPipe)
if err != nil {
return errors.Trace(err)
}
// Update filemap
for _, element := range writtenFiles {
u.config.fileIndex.CreateDirInFileMap(path.Dir(element.Name))
u.config.fileIndex.fileMap[element.Name] = element
}
return nil
}
|
[
0,
4
] |
package kmeans
import (
"fmt"
"math/rand"
)
func randomInt(min, max int) int {
return min + rand.Intn(max-min)
}
func ExampleNew() {
sampleData := make([][]int, 60)
for i := range sampleData {
sampleData[i] = []int{randomInt(0, 10), randomInt(0, 10)}
}
for i := 0; i < 20; i++ {
// clearly separable
sampleData[i+20][0] = sampleData[i+20][0] + 6
sampleData[i+40][1] = sampleData[i+40][1] + 6
}
clusterCount := 3
cmap, _ := New(sampleData, clusterCount)
fmt.Printf("Within cluster variation: %.3f", cmap.Wss)
// Output: Within cluster variation: 731.525
}
|
[
0,
1
] |
package solution
// Solution is solution
func Solution(A []int, K int) []int {
aLen := len(A)
retA := make([]int, aLen, aLen)
for index, item := range A {
retA[((index + K) % aLen)] = item
}
return retA
}
|
[
2
] |
package initialize
import (
"bytes"
"fmt"
"gin-web/pkg/global"
"gin-web/pkg/utils"
"github.com/gin-gonic/gin"
"github.com/gobuffalo/packr/v2"
"github.com/spf13/viper"
"os"
"strconv"
"strings"
)
const (
configBoxName = "gin-conf-box"
configType = "yml"
configPath = "../conf" // 配置文件目录, packr.Box基于当前包目录, 文件名需要写完整, 即使viper可以自动获取
developmentConfig = "config.dev.yml"
stagingConfig = "config.stage.yml"
productionConfig = "config.prod.yml"
defaultConnectTimeout = 5
)
var ctx *gin.Context // 生成启动时request id
// 初始化配置文件
func Config(c *gin.Context) {
ctx = c
// 初始化配置盒子
var box global.CustomConfBox
ginWebConf := strings.ToLower(os.Getenv("GIN_WEB_CONF"))
// 从环境变量中读取配置路径
if ginWebConf != "" {
if strings.HasPrefix(ginWebConf, "/") {
// 指定的目录为绝对路径
box.ConfEnv = ginWebConf
} else {
// 指定的目录为相对路径
box.ConfEnv = utils.GetWorkDir() + "/" + ginWebConf
}
}
// 获取viper实例(可创建多实例读取多个配置文件, 这里不做演示)
box.ViperIns = viper.New()
// 环境变量不存在, 需要打包到二进制文件中
if box.ConfEnv == "" {
// 使用packr将配置文件打包到二进制文件中, 如果以docker镜像方式运行将会非常舒服
box.PackrBox = packr.New(configBoxName, configPath)
}
global.ConfBox = &box
v := box.ViperIns
// 读取开发环境配置作为默认配置项
readConfig(v, developmentConfig)
// 将default中的配置全部以默认配置写入
settings := v.AllSettings()
for index, setting := range settings {
v.SetDefault(index, setting)
}
// 读取当前go运行环境变量
env := strings.ToLower(os.Getenv("GIN_WEB_MODE"))
configName := ""
if env == global.Stage {
configName = stagingConfig
} else if env == global.Prod {
configName = productionConfig
} else {
env = global.Dev
}
global.Mode = env
if configName != "" {
// 读取不同环境中的差异部分
readConfig(v, configName)
}
// 转换为结构体
if err := v.Unmarshal(&global.Conf); err != nil {
panic(fmt.Sprintf("初始化配置文件失败: %v, 环境变量GIN_WEB_CONF: %s", err, global.ConfBox.ConfEnv))
}
// 从环境变量中加载配置: 如config.yml中system.port, 对应的环境变量为CFG_SYSTEM_PORT
readConfigFromEnv(&global.Conf)
if global.Conf.System.ConnectTimeout < 1 {
global.Conf.System.ConnectTimeout = defaultConnectTimeout
}
if strings.TrimSpace(global.Conf.System.UrlPathPrefix) == "" {
global.Conf.System.UrlPathPrefix = "api"
}
if strings.TrimSpace(global.Conf.System.ApiVersion) == "" {
global.Conf.System.UrlPathPrefix = "v1"
}
global.Conf.Redis.BinlogPos = fmt.Sprintf("%s_%s", global.Conf.Mysql.Database, global.Conf.Redis.BinlogPos)
// 表前缀去掉后缀_
if strings.TrimSpace(global.Conf.Mysql.TablePrefix) != "" && strings.HasSuffix(global.Conf.Mysql.TablePrefix, "_") {
global.Conf.Mysql.TablePrefix = strings.TrimSuffix(global.Conf.Mysql.TablePrefix, "_")
}
// 初始化OperationLogDisabledPaths
global.Conf.System.OperationLogDisabledPathArr = make([]string, 0)
if strings.TrimSpace(global.Conf.System.OperationLogDisabledPaths) != "" {
global.Conf.System.OperationLogDisabledPathArr = strings.Split(global.Conf.System.OperationLogDisabledPaths, ",")
}
// 加载rsa公私钥(优先从configBox中读取)
publicBytes, err := global.ConfBox.Find(global.Conf.System.RSAPublicKey)
if err != nil || len(publicBytes) == 0 {
publicBytes = utils.RSAReadKeyFromFile(global.Conf.System.RSAPublicKey)
}
if len(publicBytes) == 0 {
fmt.Println("RSA公钥未能加载, 请检查路径: ", global.Conf.System.RSAPublicKey)
} else {
global.Conf.System.RSAPublicBytes = publicBytes
}
privateBytes, err := global.ConfBox.Find(global.Conf.System.RSAPrivateKey)
if err != nil || len(privateBytes) == 0 {
privateBytes = utils.RSAReadKeyFromFile(global.Conf.System.RSAPrivateKey)
}
if len(privateBytes) == 0 {
fmt.Println("RSA私钥未能加载, 请检查路径: ", global.Conf.System.RSAPrivateKey)
} else {
global.Conf.System.RSAPrivateBytes = privateBytes
}
// 初始化Sentinel.Addresses
global.Conf.Redis.Sentinel.AddressArr = make([]string, 0)
if strings.TrimSpace(global.Conf.Redis.Sentinel.Addresses) != "" {
global.Conf.Redis.Sentinel.AddressArr = strings.Split(global.Conf.Redis.Sentinel.Addresses, ",")
}
fmt.Println("初始化配置文件完成, 环境变量GIN_WEB_CONF: ", global.ConfBox.ConfEnv)
}
func readConfig(v *viper.Viper, configFile string) {
v.SetConfigType(configType)
config, err := global.ConfBox.Find(configFile)
if err != nil {
panic(fmt.Sprintf("初始化配置文件失败: %v, 环境变量GIN_WEB_CONF: %s", err, global.ConfBox.ConfEnv))
}
// 加载配置
if err = v.ReadConfig(bytes.NewReader(config)); err != nil {
panic(fmt.Sprintf("初始化配置文件失败: %v, 环境变量GIN_WEB_CONF: %s", err, global.ConfBox.ConfEnv))
}
}
// 从环境变量中加载配置(适用于docker镜像中不方便临时修改配置, 直接修改环境变量重启即可)
func readConfigFromEnv(defaultConfig *global.Configuration) {
cfgMap := make(map[string]interface{}, 0)
utils.Struct2StructByJson(defaultConfig, &cfgMap)
newMap := parseCfgMap("", cfgMap)
utils.Struct2StructByJson(newMap, &defaultConfig)
}
func parseCfgMap(parentKey string, m map[string]interface{}) map[string]interface{} {
if parentKey == "" {
parentKey = "CFG"
}
newMap := make(map[string]interface{}, 0)
// json的几种基础类型(string/bool/float64)
for key, item := range m {
newKey := strings.ToUpper(fmt.Sprintf("%s_%s", utils.SnakeCase(parentKey), utils.SnakeCase(key)))
switch item.(type) {
case map[string]interface{}:
// 仍然是map, 继续向下解析
itemM, _ := item.(map[string]interface{})
newMap[key] = parseCfgMap(newKey, itemM)
continue
case string:
env := strings.TrimSpace(os.Getenv(newKey))
if env != "" {
newMap[key] = env
fmt.Println(fmt.Sprintf("[从环境变量中加载配置]读取到%s: %v", newKey, newMap[key]))
continue
}
case bool:
env := strings.TrimSpace(os.Getenv(newKey))
if env != "" {
itemB, ok := item.(bool)
b, err := strconv.ParseBool(env)
if ok && err == nil {
if itemB && !b {
// 原值为true, 现为false
newMap[key] = false
fmt.Println(fmt.Sprintf("[从环境变量中加载配置]读取到%s: %v", newKey, newMap[key]))
continue
} else if !itemB && b {
// 原值为false, 现为true
newMap[key] = true
fmt.Println(fmt.Sprintf("[从环境变量中加载配置]读取到%s: %v", newKey, newMap[key]))
continue
}
}
}
case float64:
env := strings.TrimSpace(os.Getenv(newKey))
if env != "" {
v, err := strconv.ParseFloat(env, 64)
if err == nil {
newMap[key] = v
fmt.Println(fmt.Sprintf("[从环境变量中加载配置]读取到%s: %v", newKey, newMap[key]))
continue
}
}
}
// 值没有发生变化
newMap[key] = item
}
return newMap
}
|
[
5
] |
package main
import (
"github.com/urfave/cli"
"github.com/golang/glog"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var VirtualServiceCommands cli.Command
func init() {
VirtualServiceCommands = cli.Command{
Name:
"virtual-service",
Aliases: []string{"vs"},
Usage: "virtual-service create|update|delete|get|list",
Subcommands: []cli.Command{
{
Name: "list",
Aliases: []string{"ls"},
Usage: "list virtual services",
Action: listVirtualService,
},
{
Name: "get",
Usage: "get virtual service",
Flags: []cli.Flag{
cli.StringFlag{
Name: "name, n",
Value: "",
Usage: "name of virtual service",
},
},
Action: getVirtualService,
},
{
Name: "create",
Usage: "create a virtual service",
Flags: []cli.Flag{
cli.StringFlag{
Name: "file, f",
Value: "",
Usage: "file to load description from",
},
},
Action: createVirtualService,
},
{
Name: "delete",
Usage: "delete a virtual service",
Flags: []cli.Flag{
cli.StringFlag{
Name: "file, f",
Value: "",
Usage: "file to load description from",
},
},
Action: deleteVirtualService,
},
{
Name: "update",
Usage: "update a virtual service",
Flags: []cli.Flag{
cli.StringFlag{
Name: "file, f",
Value: "",
Usage: "file to load description from",
},
},
Action: updateVirtualService,
},
},
}
}
func listVirtualService(c *cli.Context) error {
ic, namespace, err := setup(c)
if err != nil {
return err
}
vsList, err := ic.NetworkingV1alpha3().VirtualServices(namespace).List(metav1.ListOptions{})
if err != nil {
glog.Fatalf("Failed to get VirtualService in %s namespace: %s", namespace, err)
}
format := c.GlobalString("format")
if format == "json" {
fmt.Println(toJsonString(vsList))
} else if format == "yaml" {
fmt.Println(toYamlString(vsList))
} else {
return cli.NewExitError("Unknown format", 1)
}
return nil
/* for i := range vsList.Items {
vs := vsList.Items[i]
glog.Infof("Index: %d VirtualService Hosts: %+v\n", i, vs.Spec.GetHosts())
}
*/
return nil
}
func getVirtualService(c *cli.Context) error {
ic, namespace, err := setup(c)
if err != nil {
return err
}
vsList, err := ic.NetworkingV1alpha3().VirtualServices(namespace).List(metav1.ListOptions{LabelSelector: namespace, FieldSelector: c.String("name")})
for i := range vsList.Items {
vs := vsList.Items[i]
glog.Infof("Index: %d VirtualService Hosts: %+v\n", i, vs.Spec.GetHosts())
}
return nil
}
func createVirtualService(c *cli.Context) error {
if c.GlobalIsSet("debug") {
}
fmt.Printf("create virtual service %s %+v\n", c.Args().First(), c)
return nil
}
func deleteVirtualService(c *cli.Context) error {
if c.GlobalIsSet("debug") {
}
fmt.Printf("delete virtual service %s %+v\n", c.Args().First(), c)
return nil
}
func updateVirtualService(c *cli.Context) error {
if c.GlobalIsSet("debug") {
}
fmt.Printf("update virtual service %s %+v\n", c.Args().First(), c)
return nil
}
|
[
5
] |
package lib
import (
"fmt"
)
type flag uint16
const (
INVALID flag = 0
REVERSED = 1 << 1
GROUPED = 1 << 2
ANALYSED = 1 << 3
RELABLED = 1 << 4
NORMALISED = 1 << 5
NUMBERED = 1 << 6
CANONICALISED = 1 << 7
RECURSE = 1 << 8
)
// Describes a test failure. A test failure is an instance of a coin and weight such that the
// results of the weighings for that coin and weight are indistiguishable from some other coin
// and weight.
type Failure struct {
Coin int `json:"coin"`
Weight Weight `json:"weight"`
}
// Describes a possibly invalid solution to the 12 coins problem.
type Solution struct {
encoding
Weighings [3]Weighing `json:"-"`
Coins []int `json:"coins,omitempty"` // a mapping between abs(9*a+3*b+c-13)-1 and the coin identity
Weights []Weight `json:"weights,omitempty"` // a mapping between sgn(9*a+3*b+c-13)-1 and the coin weight
Unique CoinSet `json:"-"` // the coins that appear in one weighing
Pairs [3]CoinSet `json:"-"` // the pairs that appear in exactly two weighings
Triples CoinSet `json:"-"` // the coins that appear in all 3 weighings
Failures []Failure `json:"failures,omitempty"` // a list of tests for which the solution is ambiguous
Structure [3]Structure `json:"-"` // the structure of the permutation
flags flag // indicates that invariants are true
order [3]int // permutation that maps from canonical order to this order
flips Flips // permutation that flips from canonical order to this order
}
// Decide the relative weight of a coin by generating a linear combination of the three weighings and using
// this to index the array.
func (s *Solution) decide(scale Scale) (int, Weight, int) {
z := s.GetZeroCoin()
scale.SetZeroCoin(z)
results := [3]Weight{}
results[0] = scale.Weigh(s.Weighings[0].Left().AsCoins(z), s.Weighings[0].Right().AsCoins(z))
results[1] = scale.Weigh(s.Weighings[1].Left().AsCoins(z), s.Weighings[1].Right().AsCoins(z))
results[2] = scale.Weigh(s.Weighings[2].Left().AsCoins(z), s.Weighings[2].Right().AsCoins(z))
if s.encoding.Flip != nil {
results[*s.encoding.Flip] = Heavy - results[*s.encoding.Flip]
}
a := results[0]
b := results[1]
c := results[2]
i := int(a*9 + b*3 + c - 13) // must be between 0 and 26, inclusive.
o := abs(i)
if len(s.Coins) == 12 {
if o < 1 || o > 12 {
// this can only happen if flip hasn't be set correctly.
panic(fmt.Errorf("index out of bounds: %d, %v", o, []Weight{a, b, c}))
}
o = o - 1
} else {
o = i + 13
}
f := s.Coins[o]
w := s.Weights[o]
if i > 0 {
w = Heavy - w
}
return f, w, o
}
// The internal reset is used to reset the analysis of the receiver but
// does not undo the reversed state.
func (s *Solution) reset() {
s.Unique = nil
s.Triples = nil
s.Pairs = [3]CoinSet{nil, nil, nil}
s.Structure = [3]Structure{nil, nil, nil}
s.encoding = encoding{
ZeroCoin: s.encoding.ZeroCoin,
Flip: s.encoding.Flip,
}
s.flags = s.flags &^ (GROUPED | ANALYSED | CANONICALISED)
}
// The external reset creates a new clone in which only the weighings
// are preserved.
func (s *Solution) Reset() *Solution {
r := s.Clone()
r.reset()
r.Coins = []int{}
r.Weights = []Weight{}
r.flags = INVALID
r.encoding.Flip = nil
return r
}
func (s *Solution) markInvalid() {
s.flags = INVALID | (s.flags & RECURSE)
}
// Invoke the internal decide method to decide which coin
// is counterfeit and what it's relative weight is.
func (s *Solution) Decide(scale Scale) (int, Weight) {
if s.flags&REVERSED == 0 {
panic(fmt.Errorf("This solution must be reversed first."))
}
f, w, _ := s.decide(scale)
return f, w
}
// Configure the zero coin of the solution.
func (s *Solution) SetZeroCoin(coin int) {
if coin == ONE_BASED {
s.encoding.ZeroCoin = nil
} else {
s.encoding.ZeroCoin = pi(coin)
}
}
func (s *Solution) GetZeroCoin() int {
if s.encoding.ZeroCoin == nil {
return 1
} else {
return *s.encoding.ZeroCoin
}
}
// Create a deep clone of the receiver.
func (s *Solution) Clone() *Solution {
tmp := s.encoding.Flip
if tmp != nil {
tmp = pi(*tmp)
}
clone := Solution{
encoding: encoding{
ZeroCoin: s.encoding.ZeroCoin,
Flip: s.encoding.Flip,
},
Weighings: [3]Weighing{},
Coins: make([]int, len(s.Coins)),
Weights: make([]Weight, len(s.Weights)),
Unique: s.Unique,
Triples: s.Triples,
Failures: make([]Failure, len(s.Failures)),
flags: s.flags,
order: s.order,
flips: s.flips,
}
copy(clone.Pairs[0:], s.Pairs[0:])
copy(clone.Weighings[0:], s.Weighings[0:])
copy(clone.Coins[0:], s.Coins[0:])
copy(clone.Weights[0:], s.Weights[0:])
copy(clone.Failures[0:], s.Failures[0:])
copy(clone.Structure[0:], s.Structure[0:])
return &clone
}
// Sort the coins in each weighing in increasing numerical order.
func (s *Solution) Normalize() *Solution {
clone := s.Clone()
for i, _ := range clone.Weighings {
clone.Weighings[i] = NewWeighing(clone.Weighings[i].Pan(0).Sort(), clone.Weighings[i].Pan(1).Sort())
}
clone.flags |= NORMALISED &^ (CANONICALISED)
return clone
}
// Returns a new solution such that the LLL weighing is always invalid.
func (s *Solution) Flip() (*Solution, error) {
var r *Solution
if s.flags&REVERSED == 0 {
var err error
if r, err = s.Reverse(); err != nil {
return r, err
}
} else {
r = s.Clone()
}
r.reset()
if r.encoding.Flip != nil {
w := r.Weighings[*r.encoding.Flip]
r.Weighings[*r.encoding.Flip] = NewWeighing(w.Right(), w.Left())
r.encoding.Flip = nil
r.markInvalid()
return r.Reverse()
}
return r, nil
}
// Answer true if the solution is a valid solution. This will be true if it could
// be successfully reversed, false otherwise.
func (s *Solution) IsValid() bool {
if s.flags&REVERSED == 0 {
c, err := s.Reverse()
return err == nil && c.flags&REVERSED != 0
} else {
return true
}
}
func (s *Solution) N() (uint, error) {
if s, err := s.AnalyseStructure(); err != nil {
return 0, err
} else if s.encoding.N != nil {
return *s.encoding.N, nil
} else {
return 0, fmt.Errorf("getN: failed to derive N")
}
}
|
[
0
] |
package database
import (
"fmt"
"net"
"strings"
"time"
)
func memcachedCheck(Host string, Port int) (bool, error) {
client, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", Host, Port), 5*time.Second)
if err != nil {
return false, err
}
err = client.SetDeadline(time.Now().Add(5 * time.Second))
if err != nil {
return false, err
}
_, err = client.Write([]byte("stats\n"))
if err != nil {
return false, err
}
rev := make([]byte, 1024)
n, err := client.Read(rev)
if err != nil {
return false, err
}
if strings.Contains(string(rev[:n]), "STAT") == false {
return false, nil
}
return true, nil
}
|
[
2
] |
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gammabench
import "testing"
import . "math"
// Global exported variables are used to store the
// return values of functions measured in the benchmarks.
// Storing the results in these variables prevents the compiler
// from completely optimizing the benchmarked functions away.
var (
GlobalI int
GlobalB bool
GlobalF float64
)
func BenchmarkLgamma16(b *testing.B) {
x := 0.0
y := 0
for i := 0; i < b.N; i++ {
x, y = Lgamma16(2.5)
}
GlobalF = x
GlobalI = y
}
func BenchmarkLgamma17(b *testing.B) {
x := 0.0
y := 0
for i := 0; i < b.N; i++ {
x, y = Lgamma17(2.5)
}
GlobalF = x
GlobalI = y
}
func BenchmarkLgammaZZZ(b *testing.B) {
x := 0.0
y := 0
for i := 0; i < b.N; i++ {
x, y = LgammaZZZ(2.5)
}
GlobalF = x
GlobalI = y
}
func TestLgamma(t *testing.T) {
try := func(Lgamma func(float64) (float64, int)) func(t *testing.T) {
return func(t *testing.T) {
for i := 0; i < len(vf); i++ {
if f, s := Lgamma(vf[i]); !close(lgamma[i].f, f) || lgamma[i].i != s {
t.Errorf("Lgamma(%g) = %g, %d, want %g, %d", vf[i], f, s, lgamma[i].f, lgamma[i].i)
}
}
for i := 0; i < len(vflgammaSC); i++ {
if f, s := Lgamma(vflgammaSC[i]); !alike(lgammaSC[i].f, f) || lgammaSC[i].i != s {
t.Errorf("Lgamma(%g) = %g, %d, want %g, %d", vflgammaSC[i], f, s, lgammaSC[i].f, lgammaSC[i].i)
}
}
}
}
t.Run("Lgamma16", try(Lgamma16))
t.Run("Lgamma17", try(Lgamma17))
t.Run("LgammaZZZ", try(LgammaZZZ))
}
var vf = []float64{
4.9790119248836735e+00,
7.7388724745781045e+00,
-2.7688005719200159e-01,
-5.0106036182710749e+00,
9.6362937071984173e+00,
2.9263772392439646e+00,
5.2290834314593066e+00,
2.7279399104360102e+00,
1.8253080916808550e+00,
-8.6859247685756013e+00,
}
type fi struct {
f float64
i int
}
var lgamma = []fi{
{3.146492141244545774319734e+00, 1},
{8.003414490659126375852113e+00, 1},
{1.517575735509779707488106e+00, -1},
{-2.588480028182145853558748e-01, 1},
{1.1989897050205555002007985e+01, 1},
{6.262899811091257519386906e-01, 1},
{3.5287924899091566764846037e+00, 1},
{4.5725644770161182299423372e-01, 1},
{-6.363667087767961257654854e-02, 1},
{-1.077385130910300066425564e+01, -1},
}
var vflgammaSC = []float64{
Inf(-1),
-3,
0,
1,
2,
Inf(1),
NaN(),
}
var lgammaSC = []fi{
{Inf(-1), 1},
{Inf(1), 1},
{Inf(1), 1},
{0, 1},
{0, 1},
{Inf(1), 1},
{NaN(), 1},
}
func tolerance(a, b, e float64) bool {
// Multiplying by e here can underflow denormal values to zero.
// Check a==b so that at least if a and b are small and identical
// we say they match.
if a == b {
return true
}
d := a - b
if d < 0 {
d = -d
}
// note: b is correct (expected) value, a is actual value.
// make error tolerance a fraction of b, not a.
if b != 0 {
e = e * b
if e < 0 {
e = -e
}
}
return d < e
}
func close(a, b float64) bool { return tolerance(a, b, 1e-14) }
func veryclose(a, b float64) bool { return tolerance(a, b, 4e-16) }
func soclose(a, b, e float64) bool { return tolerance(a, b, e) }
func alike(a, b float64) bool {
switch {
case IsNaN(a) && IsNaN(b):
return true
case a == b:
return Signbit(a) == Signbit(b)
}
return false
}
|
[
0
] |
package account
import (
"context"
"errors"
"fmt"
"github.com/gidyon/antibug/internal/pkg/auth"
"github.com/gidyon/antibug/internal/pkg/errs"
"github.com/gidyon/antibug/pkg/api/account"
"github.com/golang/protobuf/ptypes/empty"
"github.com/google/uuid"
"github.com/jinzhu/gorm"
"golang.org/x/crypto/bcrypt"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"strings"
)
type accountAPIServer struct {
sqlDB *gorm.DB
logger grpclog.LoggerV2
authAPI auth.Interface
}
// Options contains parameters for passing to NewAccountAPI
type Options struct {
SQLDB *gorm.DB
Logger grpclog.LoggerV2
SigningKey string
}
// NewAccountAPI is factory for creating account APIs
func NewAccountAPI(ctx context.Context, opt *Options) (account.AccountAPIServer, error) {
var err error
switch {
case ctx == nil:
err = errs.NilObject("Context")
case opt.SQLDB == nil:
err = errs.NilObject("SqlDB")
case opt.Logger == nil:
err = errs.NilObject("Logger")
case opt.SigningKey == "":
err = errs.MissingField("Jwt SigningKey")
}
if err != nil {
return nil, err
}
authAPI, err := auth.NewAPI(opt.SigningKey)
if err != nil {
return nil, err
}
api := &accountAPIServer{
sqlDB: opt.SQLDB,
logger: opt.Logger,
authAPI: authAPI,
}
// Perform automigration
err = api.sqlDB.AutoMigrate(&Account{}).Error
if err != nil {
return nil, fmt.Errorf("failed to automigrate table: %v", err)
}
return api, nil
}
func (api *accountAPIServer) Login(
ctx context.Context, loginReq *account.LoginRequest,
) (*account.LoginResponse, error) {
// Request must not be nil
if loginReq == nil {
return nil, errs.NilObject("LoginRequest")
}
// Validation
var err error
switch {
case loginReq.Username == "":
err = errs.MissingField("username")
case loginReq.Password == "":
err = errs.MissingField("password")
}
if err != nil {
return nil, err
}
// Query model
accountDB := &Account{}
err = api.sqlDB.First(accountDB, "email=? OR phone=?", loginReq.Username, loginReq.Username).Error
switch {
case err == nil:
case errors.Is(err, gorm.ErrRecordNotFound):
return nil, errs.AccountNotFound(loginReq.Username)
default:
return nil, errs.SQLQueryFailed(err, "LOGIN")
}
// Check password
if accountDB.Password == "" {
return nil, errs.WrapMessage(
codes.PermissionDenied, "account has no password; please request new password",
)
}
accountPB, err := getAccountPB(accountDB)
if err != nil {
return nil, err
}
// Check that account is not blocked
if !accountPB.Active {
return nil, errs.WrapMessage(
codes.PermissionDenied, "account is not active; please activate account first",
)
}
// Compare passwords
err = compareHash(accountDB.Password, loginReq.Password)
if err != nil {
return nil, errs.WrapMessage(
codes.Unauthenticated, "wrong password",
)
}
accountID := fmt.Sprint(accountDB.ID)
// Generate token
token, err := api.authAPI.GenToken(ctx, &auth.Payload{
ID: accountID,
FirstName: accountDB.FirstName,
LastName: accountDB.LastName,
Group: accountDB.Group,
}, 0)
if err != nil {
return nil, errs.FailedToGenToken(err)
}
// Populate response
return &account.LoginResponse{
Token: token,
AccountId: accountID,
AccountState: accountDB.Active,
AccountGroup: accountDB.Group,
}, nil
}
// generates hashed version of password
func genHash(password string) (string, error) {
hashedBytes, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
if err != nil {
return "", err
}
return string(hashedBytes), nil
}
// compares hashed password with password
func compareHash(hashedPassword, password string) error {
return bcrypt.CompareHashAndPassword([]byte(hashedPassword), []byte(password))
}
func (api *accountAPIServer) CreateAccount(
ctx context.Context, createReq *account.CreateAccountRequest,
) (*account.CreateAccountResponse, error) {
// Request must not be nil
if createReq == nil {
return nil, errs.NilObject("Account")
}
// Validation
var err error
accountPB := createReq.GetAccount()
switch {
case accountPB == nil:
err = errs.NilObject("Account")
case accountPB.FirstName == "":
err = errs.MissingField("FirstName")
case accountPB.LastName == "":
err = errs.MissingField("LastName")
case accountPB.Phone == "":
err = errs.MissingField("Phone")
case accountPB.Email == "":
err = errs.MissingField("Email")
case accountPB.Gender == "":
err = errs.MissingField("Gender")
}
if err != nil {
return nil, err
}
// Get model
accountDB, err := getAccountDB(accountPB)
if err != nil {
return nil, err
}
// Check passord if not empty
if createReq.Password != "" {
// Passowrds must match
if createReq.Password != createReq.ConfirmPassword {
return nil, errs.WrapMessage(
codes.InvalidArgument, "passwords do not match",
)
}
// Hash password
hashedPass, err := genHash(createReq.Password)
if err != nil {
return nil, errs.FailedToGenHashedPass(err)
}
accountDB.Password = hashedPass
}
accountDB.Active = false
// Create in database
err = api.sqlDB.Create(accountDB).Error
switch {
case err == nil:
default:
errStr := err.Error()
switch {
case strings.Contains(errStr, "email"):
return nil, errs.DuplicateField("email", accountPB.Email)
case strings.Contains(errStr, "phone"):
return nil, errs.DuplicateField("phone", accountPB.Phone)
default:
return nil, errs.SQLQueryFailed(err, "CREATE")
}
}
return &account.CreateAccountResponse{
AccountId: fmt.Sprint(accountDB.ID),
}, nil
}
func (api *accountAPIServer) ActivateAccount(
ctx context.Context, activateReq *account.ActivateAccountRequest,
) (*empty.Empty, error) {
// Request must not be nil
if activateReq == nil {
return nil, errs.NilObject("ActivateAccountRequest")
}
// Authorize request
_, err := api.authAPI.AuthorizeActor(ctx, activateReq.AccountId)
if err != nil {
return nil, err
}
// Validation
switch {
case activateReq.AccountId == "":
err = errs.MissingField("account id")
}
if err != nil {
return nil, err
}
// Update account state in database
err = api.sqlDB.Table(accountsTable).Where("id=?", activateReq.AccountId).Update("active", true).Error
if err != nil {
return nil, errs.SQLQueryFailed(err, "UPDATE")
}
return &empty.Empty{}, nil
}
func (api *accountAPIServer) UpdateAccount(
ctx context.Context, updateReq *account.UpdateAccountRequest,
) (*empty.Empty, error) {
// Request must not be nil
if updateReq == nil {
return nil, errs.NilObject("UpdateAccountRequest")
}
// Authorize request
_, err := api.authAPI.AuthorizeActor(ctx, updateReq.GetAccountId())
if err != nil {
return nil, err
}
// Validation
switch {
case updateReq.AccountId == "":
err = errs.MissingField("account id")
case updateReq.Account == nil:
err = errs.NilObject("account")
}
if err != nil {
return nil, err
}
// Get model
accountDB, err := getAccountDB(updateReq.Account)
if err != nil {
return nil, err
}
// Save in model
err = api.sqlDB.Table(accountsTable).Where("id=?", updateReq.AccountId).
Updates(accountDB).Error
if err != nil {
return nil, errs.SQLQueryFailed(err, "UPDATE")
}
return &empty.Empty{}, nil
}
func (api *accountAPIServer) GetAccount(
ctx context.Context, getReq *account.GetRequest,
) (*account.Account, error) {
// Request must not be nil
if getReq == nil {
return nil, errs.NilObject("GetRequest")
}
// Authorize request
_, err := api.authAPI.AuthorizeActor(ctx, getReq.GetAccountId())
if err != nil {
return nil, err
}
// Validation
if getReq.AccountId == "" {
return nil, errs.MissingField("account id")
}
// Get from model
accountDB := &Account{}
err = api.sqlDB.First(accountDB, "id=?", getReq.AccountId).Error
switch {
case err == nil:
case errors.Is(err, gorm.ErrRecordNotFound):
return nil, errs.NotFound("account", getReq.AccountId)
default:
return nil, errs.SQLQueryFailed(err, "GET")
}
accountPB, err := getAccountPB(accountDB)
if err != nil {
return nil, err
}
return accountPB, nil
}
func (api *accountAPIServer) GetSettings(
ctx context.Context, getReq *account.GetRequest,
) (*account.Settings, error) {
// Request must not be nil
if getReq == nil {
return nil, errs.NilObject("GetRequest")
}
// Authorize request
_, err := api.authAPI.AuthorizeActor(ctx, getReq.GetAccountId())
if err != nil {
return nil, err
}
// Validation
if getReq.AccountId == "" {
return nil, errs.MissingField("account id")
}
// Get from database
data := make([]byte, 0)
err = api.sqlDB.Table(accountsTable).Where("id=?", getReq.AccountId).Select("settings").
Row().Scan(&data)
switch {
case err == nil:
case errors.Is(err, gorm.ErrRecordNotFound):
return nil, errs.NotFound("account", getReq.AccountId)
default:
return nil, errs.SQLQueryFailed(err, "GET")
}
settings, err := getSettingsPB(data)
if err != nil {
return nil, err
}
return settings, nil
}
func (api *accountAPIServer) UpdateSettings(
ctx context.Context, updateReq *account.UpdateSettingsRequest,
) (*empty.Empty, error) {
// Request must nt be nil
if updateReq == nil {
return nil, errs.NilObject("UpdateSettingsRequest")
}
// Authorize request
_, err := api.authAPI.AuthorizeActor(ctx, updateReq.GetAccountId())
if err != nil {
return nil, err
}
// Validation
switch {
case updateReq.Settings == nil:
err = errs.NilObject("Settings")
case updateReq.AccountId == "":
err = errs.MissingField("AccountId")
}
if err != nil {
return nil, err
}
// Marshal settings
data, err := getSettingsDB(updateReq.Settings)
if err != nil {
return nil, err
}
// Update model
err = api.sqlDB.Table(accountsTable).Where("id=?", updateReq.AccountId).
Update("settings", data).Error
switch {
case err == nil:
default:
return nil, errs.SQLQueryFailed(err, "UPDATE")
}
return &empty.Empty{}, nil
}
func (api *accountAPIServer) GetJobs(
ctx context.Context, getReq *account.GetRequest,
) (*account.Jobs, error) {
// Request must not be nil
if getReq == nil {
return nil, errs.NilObject("GetRequest")
}
// Authorize request
_, err := api.authAPI.AuthorizeActor(ctx, getReq.GetAccountId())
if err != nil {
return nil, err
}
// Validation
if getReq.AccountId == "" {
return nil, errs.MissingField("account id")
}
// Get from database
data := make([]byte, 0)
err = api.sqlDB.Table(accountsTable).Where("id=?", getReq.AccountId).Select("jobs").
Row().Scan(&data)
switch {
case err == nil:
case errors.Is(err, gorm.ErrRecordNotFound):
return nil, errs.NotFound("account", getReq.AccountId)
default:
return nil, errs.SQLQueryFailed(err, "GET")
}
jobs, err := getJobsPB(data)
if err != nil {
return nil, err
}
return &account.Jobs{
Jobs: jobs,
}, nil
}
func (api *accountAPIServer) UpdateJobs(
ctx context.Context, updateReq *account.UpdateJobsRequest,
) (*empty.Empty, error) {
// Request must nt be nil
if updateReq == nil {
return nil, errs.NilObject("UpdateJobsRequest")
}
// Authorize request
_, err := api.authAPI.AuthorizeActor(ctx, updateReq.GetAccountId())
if err != nil {
return nil, err
}
// Validation
switch {
case updateReq.Jobs == nil:
err = errs.NilObject("Jobs")
case updateReq.AccountId == "":
err = errs.MissingField("AccountId")
}
if err != nil {
return nil, err
}
// Validate passed jobs
for index, job := range updateReq.Jobs {
switch {
case job.GetFacilityId() == "":
err = errs.MissingField(fmt.Sprintf("facility id at index %d", index))
case job.GetFacilityName() == "":
err = errs.MissingField(fmt.Sprintf("facility name at index %d", index))
case job.GetRole() == "":
err = errs.MissingField(fmt.Sprintf("job role at index %d", index))
}
if err != nil {
return nil, err
}
job.JobId = uuid.New().String()
}
// Marshal settings
data, err := getJobsDB(updateReq.Jobs)
if err != nil {
return nil, err
}
// Update model
err = api.sqlDB.Table(accountsTable).Where("id=?", updateReq.AccountId).
Update("jobs", data).Error
switch {
case err == nil:
default:
return nil, errs.SQLQueryFailed(err, "UPDATE")
}
return &empty.Empty{}, nil
}
func (api *accountAPIServer) GetStarredFacilities(
ctx context.Context, getReq *account.GetRequest,
) (*account.StarredFacilities, error) {
// Request must not be nil
if getReq == nil {
return nil, errs.NilObject("GetRequest")
}
// Authorize request
_, err := api.authAPI.AuthorizeActor(ctx, getReq.GetAccountId())
if err != nil {
return nil, err
}
// Validation
if getReq.AccountId == "" {
return nil, errs.MissingField("account id")
}
// Get from database
data := make([]byte, 0)
err = api.sqlDB.Table(accountsTable).Where("id=?", getReq.AccountId).Select("starred_facilities").
Row().Scan(&data)
switch {
case err == nil:
case errors.Is(err, gorm.ErrRecordNotFound):
return nil, errs.NotFound("account", getReq.AccountId)
default:
return nil, errs.SQLQueryFailed(err, "GET")
}
starredFacilities, err := getStarredFacilityPB(data)
if err != nil {
return nil, err
}
return &account.StarredFacilities{
Facilities: starredFacilities,
}, nil
}
func (api *accountAPIServer) UpdateStarredFacilities(
ctx context.Context, updateReq *account.UpdateStarredFacilitiesRequest,
) (*empty.Empty, error) {
// Request must nt be nil
if updateReq == nil {
return nil, errs.NilObject("UpdateStarredFacilitiesRequest")
}
// Authorize request
_, err := api.authAPI.AuthorizeActor(ctx, updateReq.GetAccountId())
if err != nil {
return nil, err
}
// Validation
switch {
case len(updateReq.Facilities) == 0:
err = errs.NilObject("Facilities")
case updateReq.AccountId == "":
err = errs.MissingField("AccountId")
}
if err != nil {
return nil, err
}
// Validate passed facilities
for index, facilty := range updateReq.Facilities {
switch {
case facilty.GetFacilityId() == "":
err = errs.MissingField(fmt.Sprintf("facility id at index %d", index))
case facilty.GetFacilityId() == "":
err = errs.MissingField(fmt.Sprintf("facility name at index %d", index))
}
if err != nil {
return nil, err
}
}
// Marshal settings
data, err := getStarredFacilityDB(updateReq.Facilities)
if err != nil {
return nil, err
}
// Update model
err = api.sqlDB.Table(accountsTable).Where("id=?", updateReq.AccountId).
Update("starred_facilities", data).Error
switch {
case err == nil:
default:
return nil, errs.SQLQueryFailed(err, "UPDATE")
}
return &empty.Empty{}, nil
}
|
[
7
] |
package gotp
import (
"crypto/hmac"
"crypto/sha1"
"encoding/base32"
"fmt"
"hash"
"math"
"strings"
)
type Hasher struct {
HashName string
Digest func() hash.Hash
}
type OTP struct {
secret string // secret in base32 format
digits int // number of integers in the OTP. Some apps expect this to be 6 digits, others support more.
hasher *Hasher // digest function to use in the HMAC (expected to be sha1)
}
func NewOTP(secret string, digits int, hasher *Hasher) OTP {
if hasher == nil {
hasher = &Hasher{
HashName: "sha1",
Digest: sha1.New,
}
}
return OTP{
secret: secret,
digits: digits,
hasher: hasher,
}
}
/*
params
input: the HMAC counter value to use as the OTP input. Usually either the counter, or the computed integer based on the Unix timestamp
*/
func (o *OTP) generateOTP(input int64) string {
if input < 0 {
panic("input must be positive integer")
}
hasher := hmac.New(o.hasher.Digest, o.byteSecret())
hasher.Write(Itob(input))
hmacHash := hasher.Sum(nil)
offset := int(hmacHash[len(hmacHash)-1] & 0xf)
code := ((int(hmacHash[offset]) & 0x7f) << 24) |
((int(hmacHash[offset+1] & 0xff)) << 16) |
((int(hmacHash[offset+2] & 0xff)) << 8) |
(int(hmacHash[offset+3]) & 0xff)
code = code % int(math.Pow10(o.digits))
return fmt.Sprintf(fmt.Sprintf("%%0%dd", o.digits), code)
}
func (o *OTP) byteSecret() []byte {
missingPadding := len(o.secret) % 8
if missingPadding != 0 {
o.secret = o.secret + strings.Repeat("=", 8-missingPadding)
}
bytes, err := base32.StdEncoding.DecodeString(o.secret)
if err != nil {
panic("decode secret failed")
}
return bytes
}
|
[
0
] |
package api
import (
"context"
"github.com/dgrijalva/jwt-go"
"github.com/labstack/echo"
"github.com/satori/go.uuid"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"log"
"net/http"
"server/router"
"server/util"
"time"
)
func CreateFile(c echo.Context) error {
id := uuid.NewV4()
token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{
"id": id,
"nbf": time.Now().Unix(),
})
tokenString, err := token.SignedString([]byte(util.ConfigFile.Secret.JwtKey.File))
req := new(router.RequestData)
err = c.Bind(req)
_, err = router.FileCollection.InsertOne(context.TODO(), router.File{
Id: id.String(),
CreatedAt: req.CreatedAt,
UpdatedAt: req.UpdatedAt,
Name: req.Name,
Type: req.Type,
Content: req.Content,
Options: req.Options,
})
if err != nil {
log.Println(err)
res := new(router.ResponseError)
res.Message = "Database Error"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusInternalServerError, &res)
}
type ResponseData struct {
Message string `json:"message"`
Data struct {
Id uuid.UUID `json:"id"`
Token string `json:"token"`
} `json:"data"`
}
res := new(ResponseData)
res.Message = "Got it"
res.Data.Id = id
res.Data.Token = tokenString
return c.JSON(http.StatusOK, &res)
}
func QueryFile(c echo.Context) error {
id := c.Param("id")
authentication := util.VerifyFileToken(c)
type QueryData struct {
Id string `json:"id"`
}
var file router.File
err := router.FileCollection.FindOne(context.TODO(), QueryData{Id: id}).Decode(&file)
if err != nil {
if err == mongo.ErrNoDocuments {
log.Println(err)
res := new(router.ResponseError)
res.Message = "File Not Found"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusNotFound, &res)
}
log.Println(err)
res := new(router.ResponseError)
res.Message = "Database Error"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusInternalServerError, &res)
}
type ResponseData struct {
Message string `json:"message"`
Data router.File `json:"data"`
Authentication string `json:"authentication"`
}
res := new(ResponseData)
res.Message = "Got it"
res.Data = file
res.Authentication = authentication
return c.JSON(http.StatusOK, &res)
}
func QueryRawFile(c echo.Context) error {
id := c.Param("id")
type QueryData struct {
Id string `json:"id"`
}
var file router.File
err := router.FileCollection.FindOne(context.TODO(), QueryData{Id: id}).Decode(&file)
if err != nil {
if err == mongo.ErrNoDocuments {
log.Println(err)
res := new(router.ResponseError)
res.Message = "File Not Found"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusNotFound, &res)
}
log.Println(err)
res := new(router.ResponseError)
res.Message = "Database Error"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusInternalServerError, &res)
}
return c.String(http.StatusOK, file.Content)
}
func UpdateFile(c echo.Context) error {
id := c.Param("id")
if util.VerifyFileToken(c) == "ghost" {
res := new(router.ResponseError)
res.Message = "Permission Denied"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusForbidden, &res)
}
type QueryData struct {
Id string `json:"id"`
}
type UpdateData struct {
UpdatedAt string `json:"updated_at"`
Name string `json:"name"`
Type string `json:"type"`
Content string `json:"content"`
Options router.FileOptions `json:"options"`
}
req := new(UpdateData)
err := c.Bind(req)
var pre router.File
err = router.FileCollection.FindOneAndUpdate(context.TODO(), QueryData{Id: id}, bson.D{{"$set", &req}}).Decode(&pre)
if err != nil {
if err == mongo.ErrNoDocuments {
log.Println(err)
res := new(router.ResponseError)
res.Message = "File Not Found"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusNotFound, &res)
}
log.Println(err)
res := new(router.ResponseError)
res.Message = "Database Error"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusInternalServerError, &res)
}
t, err := time.Parse(time.RFC3339, pre.UpdatedAt)
if err != nil {
log.Println(err)
res := new(router.ResponseError)
res.Message = "Database Error"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusInternalServerError, &res)
}
if util.ConfigFile.App.History.Enable && time.Now().After(t.Add(router.ArchivePeriod)) && req.Content != pre.Content {
_, err = router.HistoryCollection.InsertOne(context.TODO(), &pre)
if err != nil {
log.Println(err)
res := new(router.ResponseError)
res.Message = "Database Error"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusInternalServerError, &res)
}
}
type ResponseData struct {
Message string `json:"message"`
}
res := new(ResponseData)
res.Message = "Updated"
return c.JSON(http.StatusOK, &res)
}
func UpdateFilePatch(c echo.Context) error {
id, key := c.Param("id"), c.Param("key")
if util.VerifyFileToken(c) == "ghost" {
res := new(router.ResponseError)
res.Message = "Permission Denied"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusForbidden, &res)
}
type QueryData struct {
Id string `json:"id"`
}
var err error
var pre router.File
flag := false
if key == "name" {
type UpdateData struct {
Name string `json:"name"`
Type string `json:"type"`
UpdatedAt string `json:"updated_at"`
}
req := new(UpdateData)
err = c.Bind(req)
if err != nil {
log.Println(err)
res := new(router.ResponseError)
res.Message = "Invalid Request"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusBadRequest, &res)
}
err = router.FileCollection.FindOneAndUpdate(context.TODO(), QueryData{Id: id}, bson.D{{"$set", &req}}).Decode(&pre)
} else if key == "content" {
type UpdateData struct {
Content string `json:"content"`
UpdatedAt string `json:"updated_at"`
}
req := new(UpdateData)
err = c.Bind(req)
if err != nil {
log.Println(err)
res := new(router.ResponseError)
res.Message = "Invalid Request"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusBadRequest, &res)
}
flag = true
err = router.FileCollection.FindOneAndUpdate(context.TODO(), QueryData{Id: id}, bson.D{{"$set", &req}}).Decode(&pre)
} else if key == "options" {
type UpdateData struct {
Options router.FileOptions `json:"options"`
}
req := new(UpdateData)
err = c.Bind(req)
if err != nil {
log.Println(err)
res := new(router.ResponseError)
res.Message = "Invalid Request"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusBadRequest, &res)
}
err = router.FileCollection.FindOneAndUpdate(context.TODO(), QueryData{Id: id}, bson.D{{"$set", &req}}).Decode(&pre)
} else {
log.Println(err)
res := new(router.ResponseError)
res.Message = "Invalid Request"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusBadRequest, &res)
}
if err != nil {
if err == mongo.ErrNoDocuments {
log.Println(err)
res := new(router.ResponseError)
res.Message = "File Not Found"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusNotFound, &res)
}
log.Println(err)
res := new(router.ResponseError)
res.Message = "Database Error"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusInternalServerError, &res)
}
t, err := time.Parse(time.RFC3339, pre.UpdatedAt)
if err != nil {
log.Println(err)
res := new(router.ResponseError)
res.Message = "Database Error"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusInternalServerError, &res)
}
if util.ConfigFile.App.History.Enable && time.Now().After(t.Add(router.ArchivePeriod)) && flag {
_, err = router.HistoryCollection.InsertOne(context.TODO(), &pre)
if err != nil {
log.Println(err)
res := new(router.ResponseError)
res.Message = "Database Error"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusInternalServerError, &res)
}
}
type ResponseData struct {
Message string `json:"message"`
}
res := new(ResponseData)
res.Message = "Updated"
return c.JSON(http.StatusOK, &res)
}
func RemoveFile(c echo.Context) error {
id := c.Param("id")
if util.VerifyFileToken(c) == "ghost" {
res := new(router.ResponseError)
res.Message = "Permission Denied"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusForbidden, &res)
}
type QueryData struct {
Id string `json:"id"`
}
_, err := router.FileCollection.DeleteOne(context.TODO(), QueryData{Id: id})
if err != nil {
if err == mongo.ErrNoDocuments {
log.Println(err)
res := new(router.ResponseError)
res.Message = "File Not Found"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusNotFound, &res)
}
log.Println(err)
res := new(router.ResponseError)
res.Message = "Database Error (File)"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusInternalServerError, &res)
}
_, err = router.HistoryCollection.DeleteMany(context.TODO(), QueryData{Id: id})
if err != nil {
log.Println(err)
res := new(router.ResponseError)
res.Message = "Database Error (History)"
res.Documentation = "https://lifeni.github.io/i-show-you/api"
return c.JSON(http.StatusInternalServerError, &res)
}
type ResponseData struct {
Message string `json:"message"`
}
res := new(ResponseData)
res.Message = "Deleted"
return c.JSON(http.StatusOK, &res)
}
|
[
5
] |
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"os"
"prog/dep"
)
//go:noinline
func first() {
println("whee")
}
//go:noinline
func second() {
println("oy")
}
//go:noinline
func third(x int) int {
if x != 0 {
return 42
}
println("blarg")
return 0
}
//go:noinline
func fourth() int {
return 99
}
func main() {
println(dep.Dep1())
dep.PDep(2)
if len(os.Args) > 1 {
second()
third(1)
} else if len(os.Args) > 2 {
fourth()
} else {
first()
third(0)
}
}
|
[
5
] |
package gpxcharts
import (
"bytes"
"encoding/xml"
"errors"
"fmt"
"image"
"image/png"
"math"
"strings"
"github.com/llgcode/draw2d/draw2dsvg"
)
func SVGToBytes(svg *draw2dsvg.Svg) ([]byte, error) {
byts, err := xml.Marshal(svg)
if err != nil {
return nil, errors.New("error marhslling")
}
return byts, nil
}
func RGBAToBytes(m *image.RGBA) ([]byte, error) {
var b bytes.Buffer
if err := png.Encode(&b, m); err != nil {
return nil, errors.New("error encoding png")
}
return b.Bytes(), nil
}
func FormatSpeed(meters_per_seconds float64, unit_type UnitType, round bool) string {
if meters_per_seconds <= 0 {
return "n/a"
}
if len(unit_type) == 0 {
unit_type = UnitTypeMetric
}
var (
speed float64
unit string
)
if unit_type == UnitTypeImperial {
speed = meters_per_seconds * 60 * 60 / UnitTypeImperial.Units()["mi"]
unit = "mph"
} else if unit_type == UnitTypeNautical {
speed = meters_per_seconds * 60 * 60 / 1852.
unit = "kn"
} else {
speed = meters_per_seconds * 60 * 60 / 1000.
unit = "kmh"
}
if round {
return fmt.Sprintf("%d%s", int(math.Round(speed)), unit)
}
if speed < 10 {
return fmt.Sprintf("%.2f%s", speed, unit)
}
return fmt.Sprintf("%.1f%s", speed, unit)
}
func FormatLength(lengthM float64, ut UnitType) string {
if lengthM < 0 {
return "n/a"
}
if len(ut) == 0 {
ut = UnitTypeMetric
}
if ut == UnitTypeNautical {
miles := ConvertFromM(lengthM, "NM")
if miles < 10 {
return FormatFloat(miles, 2) + "NM"
} else {
return FormatFloat(miles, 1) + "NM"
}
} else if ut == UnitTypeImperial {
miles := ConvertFromM(lengthM, "mi")
if miles < 10 {
return FormatFloat(miles, 2) + "mi"
} else {
return FormatFloat(miles, 1) + "mi"
}
} else { // metric:
if lengthM < 1000 {
return FormatFloat(lengthM, 0) + "m"
}
if lengthM < 50000 {
return FormatFloat(lengthM/1000, 2) + "km"
}
}
return FormatFloat(lengthM/1000, 1) + "km"
}
// Convert from meters (or m/s if speed) into...
func ConvertFromM(n float64, toUnit string) float64 {
toUnit = strings.TrimSpace(strings.ToLower(toUnit))
if v, is := SPEED_UNITS[toUnit]; is {
return n / v
}
if v, is := Units[toUnit]; is {
return n / v
}
return 0
}
func FormatFloat(f float64, digits int) string {
format := fmt.Sprintf("%%.%df", digits)
res := fmt.Sprintf(format, f)
if strings.Contains(res, ".") {
res = strings.TrimRight(res, "0")
}
return strings.TrimRight(res, ".")
}
func FormatAltitude(altitude_m float64, unit_type UnitType) string {
if altitude_m < -20000 || altitude_m > 20000 {
return "n/a"
}
if unit_type == UnitTypeMetric {
return FormatFloat(altitude_m, 0) + "m"
}
return FormatFloat(ConvertFromM(altitude_m, "ft"), 0) + "ft"
}
func IsNanOrOnf(f float64) bool {
return math.IsNaN(f) || math.IsInf(f, 0)
}
|
[
5
] |
package main
import (
"bufio"
"fmt"
"math/rand"
"os"
"strings"
"time"
)
func handleError(e error) {
if e != nil {
panic(e)
}
}
func shuffle(list []int) {
rand.Seed(time.Now().UnixNano())
for i := len(list); i > 1; i-- {
j := rand.Intn(i)
list[i-1], list[j] = list[j], list[i-1]
}
}
func getCards() (cards []int) {
cards = make([]int, 52)
for i := range make([]int, 52) {
if ((i % 13) + 1) > 10 {
cards[i] = 10
} else {
cards[i] = (i % 13) + 1
}
}
return
}
func main() {
// initialize state
cards := getCards()
reader := bufio.NewReader(os.Stdin)
player := 0
dealer := 0
shuffle(cards)
// log.Println(cards)
// set first player points
player = cards[0]
cards = cards[1:]
fmt.Print("Your points: ", player, "\n")
player = player + cards[0]
cards = cards[1:]
fmt.Print("Your points: ", player, "\n")
game:
for true {
fmt.Print("Plese press key. [H(Hit)/S(Stand)]: ")
text, err := reader.ReadString('\n')
handleError(err)
switch strings.TrimSpace(text) {
case "H":
hit := cards[0]
fmt.Print("Hit!: ", hit, "\n")
player = player + cards[0]
fmt.Print("your points: ", player, "\n")
cards = cards[1:]
case "S":
fmt.Println("Stand.")
break game
default:
fmt.Println("dontmatch", text)
}
// log.Println(cards)
fmt.Println()
}
for dealer < 17 {
dealer = dealer + cards[0]
cards = cards[1:]
fmt.Print("Dealer points: ", dealer, "\n")
}
fmt.Println()
fmt.Print("Dealer final points: ", dealer, "\n")
fmt.Print("Your final points: ", player, "\n")
}
|
[
0
] |
package main
// Leetcode 33. (medium)
func search(nums []int, target int) int {
if len(nums) == 0 {
return -1
}
left, right := 0, len(nums)-1
for left <= right {
mid := left + (right-left)/2
if nums[mid] == nums[right] {
break
} else if nums[mid] > nums[right] {
left = mid + 1
} else {
right = mid
}
}
if target >= nums[right] && target <= nums[len(nums)-1] {
left, right = right, len(nums)-1
} else {
left, right = 0, right-1
}
for left <= right {
mid := left + (right-left)/2
if nums[mid] == target {
return mid
} else if nums[mid] > target {
right = mid - 1
} else {
left = mid + 1
}
}
return -1
}
func search2(nums []int, target int) int {
left, right := 0, len(nums)-1
for left <= right {
mid := left + (right-left)/2
if nums[mid] == target {
return mid
} else if nums[mid] < nums[right] {
if nums[mid] < target && target <= nums[right] {
left = mid + 1
} else {
right = mid - 1
}
} else {
if nums[left] <= target && target < nums[mid] {
right = mid - 1
} else {
left = mid + 1
}
}
}
return -1
}
// Leetcode 81. (medium)
func search3(nums []int, target int) bool {
left, right := 0, len(nums)-1
for left <= right {
mid := left + (right-left)/2
if nums[mid] == target {
return true
} else if nums[mid] == nums[right] {
right--
} else if nums[mid] < nums[right] {
if nums[mid] < target && target <= nums[right] {
left = mid + 1
} else {
right = mid - 1
}
} else if nums[left] == nums[mid] {
left++
} else {
if nums[left] <= target && target < nums[mid] {
right = mid - 1
} else {
left = mid + 1
}
}
}
return false
}
|
[
5
] |
package main
import (
"bufio"
"fmt"
"os"
"path"
)
var A Matrix = Matrix{3, 4, "ABCESFCSADEE"}
type Matrix struct {
M, N int
Value string
}
func (a Matrix) Get(i, j int) rune {
if i < 0 || i >= a.M || j < 0 || j >= a.N {
return ' '
}
return rune(a.Value[i*a.N+j])
}
func (a Matrix) Index(i, j int) int {
return i*a.N + j
}
func (a Matrix) String() string {
results := ""
for i := 0; i < a.M; i++ {
results += a.Value[i*a.N:i*a.N+a.N] + "\n"
}
return results
}
func contains(s []int, e int) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
func (a Matrix) Contains(s string, p int, path []int) bool {
if len(s) == 0 {
return true
}
firstChar := rune(s[0])
newPath := append(path, p)
if p == -1 {
for i, char := range a.Value {
if char == firstChar && a.Contains(s[1:], i, newPath) {
return true
}
}
} else {
i := p / a.N
j := p % a.N
for _, d := range []int{-1, 1} {
if !contains(newPath, a.Index(i+d, j)) && a.Get(i+d, j) == firstChar && a.Contains(s[1:], a.Index(i+d, j), newPath) {
return true
}
if !contains(newPath, a.Index(i, j+d)) && a.Get(i, j+d) == firstChar && a.Contains(s[1:], a.Index(i, j+d), newPath) {
return true
}
}
}
return false
}
func processLine(line string) bool {
return A.Contains(line, -1, []int{})
}
func readLine(file *os.File) <-chan string {
out := make(chan string)
go func() {
in := bufio.NewReader(file)
linePartial := ""
for {
bytes, isPrefix, err := in.ReadLine()
if err != nil {
break
} else if isPrefix {
linePartial += string(bytes)
} else {
out <- linePartial + string(bytes)
linePartial = ""
}
}
close(out)
}()
return out
}
func main() {
if len(os.Args) < 2 {
fmt.Println("usage:", path.Base(os.Args[0]), "file")
os.Exit(1)
}
file, err := os.Open(os.Args[1])
defer file.Close()
if err != nil {
fmt.Println("error opening file", os.Args[1], ":", err)
os.Exit(1)
}
for line := range readLine(file) {
if line != "" {
if processLine(line) {
fmt.Println("True")
} else {
fmt.Println("False")
}
}
}
}
|
[
5
] |
package mysqlrouter
import (
"errors"
"net/http"
)
const apiVer = "20190715"
// Client holds the configuration for 20190715 version API client.
type Client struct {
URL string
Username string
Password string
Options *Options
}
type Options struct {
Transport *http.Transport
}
func newClient(url, user, pass string, Options *Options) *Client {
return &Client{
URL: url + "/api/" + apiVer,
Username: user,
Password: pass,
Options: Options,
}
}
// New creates a new API client.
func New(url, user, pass string, Options *Options) (*Client, error) {
if url == "" {
return nil, errors.New(errEmptyClientInformation)
}
client := newClient(url, user, pass, Options)
err := client.verifyConnection()
if err != nil {
return nil, err
}
return client, nil
}
func (c *Client) verifyConnection() error {
_, err := c.request(c.URL + "/swagger.json")
return err
}
|
[
2
] |
// Code generated by mockery v1.0.0. DO NOT EDIT.
package mocks
import mock "github.com/stretchr/testify/mock"
import upvote "github.com/jackharley7/golang-upvote-microservice/internal/upvote"
// Repository is an autogenerated mock type for the Repository type
type Repository struct {
mock.Mock
}
// Downvote provides a mock function with given fields: userID, catItemID, increment
func (_m *Repository) Downvote(userID int64, catItemID string, increment int) error {
ret := _m.Called(userID, catItemID, increment)
var r0 error
if rf, ok := ret.Get(0).(func(int64, string, int) error); ok {
r0 = rf(userID, catItemID, increment)
} else {
r0 = ret.Error(0)
}
return r0
}
// GetVote provides a mock function with given fields: userID, catItemID
func (_m *Repository) GetVote(userID int64, catItemID string) (*upvote.Upvote, error) {
ret := _m.Called(userID, catItemID)
var r0 *upvote.Upvote
if rf, ok := ret.Get(0).(func(int64, string) *upvote.Upvote); ok {
r0 = rf(userID, catItemID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*upvote.Upvote)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(int64, string) error); ok {
r1 = rf(userID, catItemID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetVoteCount provides a mock function with given fields: catItemID
func (_m *Repository) GetVoteCount(catItemID string) (int64, error) {
ret := _m.Called(catItemID)
var r0 int64
if rf, ok := ret.Get(0).(func(string) int64); ok {
r0 = rf(catItemID)
} else {
r0 = ret.Get(0).(int64)
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(catItemID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetVotes provides a mock function with given fields: userID, itemIDs
func (_m *Repository) GetVotes(userID int64, itemIDs []string) (map[string]upvote.UpvoteType, error) {
ret := _m.Called(userID, itemIDs)
var r0 map[string]upvote.UpvoteType
if rf, ok := ret.Get(0).(func(int64, []string) map[string]upvote.UpvoteType); ok {
r0 = rf(userID, itemIDs)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[string]upvote.UpvoteType)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(int64, []string) error); ok {
r1 = rf(userID, itemIDs)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// RemoveVote provides a mock function with given fields: userID, catItemID, increment
func (_m *Repository) RemoveVote(userID int64, catItemID string, increment int) error {
ret := _m.Called(userID, catItemID, increment)
var r0 error
if rf, ok := ret.Get(0).(func(int64, string, int) error); ok {
r0 = rf(userID, catItemID, increment)
} else {
r0 = ret.Error(0)
}
return r0
}
// Upvote provides a mock function with given fields: userID, catItemID, increment
func (_m *Repository) Upvote(userID int64, catItemID string, increment int) error {
ret := _m.Called(userID, catItemID, increment)
var r0 error
if rf, ok := ret.Get(0).(func(int64, string, int) error); ok {
r0 = rf(userID, catItemID, increment)
} else {
r0 = ret.Error(0)
}
return r0
}
|
[
4
] |
package postgres
import (
"context"
"github.com/geekshacking/geekhub-backend/ent"
entproject "github.com/geekshacking/geekhub-backend/ent/project"
entuser "github.com/geekshacking/geekhub-backend/ent/user"
"github.com/geekshacking/geekhub-backend/repository"
)
type project struct {
client *ent.Client
}
func NewProject(client *ent.Client) repository.Project {
return &project{client}
}
func (p *project) Find(ctx context.Context, ID int) (*ent.Project, error) {
result, err := p.client.Project.Query().
Where(entproject.ID(ID)).
WithTags().
WithOwner().
WithUsers().
WithLanguages().
Only(ctx)
if err != nil {
return nil, err
}
return result, nil
}
func (p *project) FindByUserAuth0ID(ctx context.Context, userID string) ([]*ent.Project, error) {
result, err := p.client.Project.Query().
Where(entproject.HasUsersWith(entuser.Auth0ID(userID))).
All(ctx)
if err != nil {
return nil, err
}
return result, nil
}
func (p *project) Create(ctx context.Context, model ent.Project) (*ent.Project, error) {
result, err := p.client.Project.Create().
SetName(model.Name).
SetDescription(model.Description).
SetRepository(model.Repository).
SetOwner(model.Edges.Owner).
AddUsers(model.Edges.Users...).
AddTags(model.Edges.Tags...).
AddTickets(model.Edges.Tickets...).
AddLanguages(model.Edges.Languages...).
Save(ctx)
if err != nil {
return nil, err
}
return result, nil
}
func (p *project) Update(ctx context.Context, model ent.Project) (*ent.Project, error) {
result, err := p.client.Project.UpdateOneID(model.ID).
SetName(model.Name).
SetDescription(model.Description).
SetRepository(model.Repository).
AddUsers(model.Edges.Users...).
AddTags(model.Edges.Tags...).
AddTickets(model.Edges.Tickets...).
AddLanguages(model.Edges.Languages...).
Save(ctx)
if err != nil {
return nil, err
}
return result, nil
}
func (p *project) BulkAddLanguage(ctx context.Context, ID int, models []*ent.Language) (*ent.Project, error) {
return p.client.Project.UpdateOneID(ID).AddLanguages(models...).Save(ctx)
}
|
[
2
] |
package models
import (
"fmt"
"encoding/json"
"time"
)
type Precision string
const (
Date = "date"
YearMonth = "year-month"
Year = "year"
Timestamp = "timestamp"
Time = "time"
)
type FHIRDateTime struct {
Time time.Time
Precision Precision
}
func (f *FHIRDateTime) UnmarshalJSON(data []byte) (err error) {
strData := string(data)
if len(data) <= 12 {
f.Precision = Precision("date")
f.Time, err = time.ParseInLocation("\"2006-01-02\"", strData, time.Local)
if err != nil {
f.Precision = Precision("year-month")
f.Time, err = time.ParseInLocation("\"2006-01\"", strData, time.Local)
}
if err != nil {
f.Precision = Precision("year")
f.Time, err = time.ParseInLocation("\"2006\"", strData, time.Local)
}
if err != nil {
// TODO: should move time into a separate type
f.Precision = Precision("time")
f.Time, err = time.ParseInLocation("\"15:04:05\"", strData, time.Local)
}
if err != nil {
err = fmt.Errorf("unable to parse DateTime: %s", strData)
f.Precision = ""
}
} else {
f.Precision = Precision("timestamp")
f.Time = time.Time{}
err = f.Time.UnmarshalJSON(data)
}
return err
}
func (f FHIRDateTime) MarshalJSON() ([]byte, error) {
if f.Precision == Timestamp {
return json.Marshal(f.Time.Format(time.RFC3339))
} else if f.Precision == YearMonth {
return json.Marshal(f.Time.Format("2006-01"))
} else if f.Precision == Year {
return json.Marshal(f.Time.Format("2006"))
} else if f.Precision == Time {
return json.Marshal(f.Time.Format("15:04:05"))
} else if f.Precision == Date {
return json.Marshal(f.Time.Format("2006-01-02"))
} else {
return nil, fmt.Errorf("FHIRDateTime.MarshalJSON: unrecognised precision: %s", f.Precision)
}
}
|
[
5
] |
/* Copyright (C) 2020 by iamslash */
package main
import "fmt"
// C[i][j] = probability until i-th coins with j heads
// = Prob until i-1 th coins with j heads * Prob i th coins with back +
// Prob until i-1 th coins with j-1 heads * Prob i th coins with head
// C[0][0] = 1
// C[i][j] = C[i-1][j-1] * P[i] + C[i-1][j] * (1 - P[i])
//
// P: [0.4, 0.8]
// T: 1
//
// 0 1
// 0 1.0 0
// 1 0.4 0.24
// 2 0.08
// 20ms 83.33% 12.6MB 100.00%
// dynamic programming
// O(NT) O(NT)
func probabilityOfHeads(P []float64, T int) float64 {
C := make([][]float64, len(P)+1)
for i := 0; i < len(C); i++ {
C[i] = make([]float64, T+1)
}
C[0][0] = 1.0
for i := 1; i < len(C); i++ {
C[i][0] = C[i-1][0] * (1 - P[i-1])
}
for i := 1; i < len(C); i++ {
for j := 1; j < len(C[i]); j++ {
C[i][j] = C[i-1][j-1] * P[i-1] + C[i-1][j] * (1-P[i-1])
}
}
return C[len(P)][T]
}
func main() {
fmt.Println("hello world")
}
|
[
2
] |
package main
import (
"crypto/tls"
"fmt"
"net"
"net/http"
"net/url"
"os"
"github.com/gorilla/websocket"
"github.com/jhunt/go-ansi"
"github.com/shieldproject/shield/client/v2/shield"
"golang.org/x/crypto/ssh/terminal"
)
// This program connects to the SHIELD websocket and then just doesn't read the
// buffer
func main() {
if len(os.Args) < 2 {
bailWith("positional argument <URL> is required")
}
targetURLStr := os.Args[1]
targetURL, err := url.Parse(targetURLStr)
if err != nil {
bailWith("Could not parse URL: %s", err)
}
if targetURL.Scheme == "" {
targetURL.Scheme = "http"
} else if targetURL.Scheme != "http" && targetURL.Scheme != "https" {
bailWith("Unknown scheme: %s", targetURL.Scheme)
}
if targetURL.Port() == "" {
switch targetURL.Scheme {
case "http":
targetURL.Host = targetURL.Host + ":80"
case "https":
targetURL.Host = targetURL.Host + ":443"
default:
bailWith("Cannot determine URL port")
}
}
shieldClient := shield.Client{
URL: targetURLStr,
InsecureSkipVerify: true,
}
var username, password string
fmt.Fprint(os.Stderr, "SHIELD Username: ")
fmt.Scanln(&username)
fmt.Fprint(os.Stderr, "SHIELD Password: ")
passBytes, err := terminal.ReadPassword(int(os.Stdout.Fd()))
fmt.Println("")
if err != nil {
bailWith("could not read password: %s", err)
}
password = string(passBytes)
err = shieldClient.Authenticate(&shield.LocalAuth{
Username: username,
Password: password,
})
if err != nil {
bailWith("failed to authenticate: %s", err)
}
websocketDialer := websocket.Dialer{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
headers := http.Header{}
headers.Add("X-Shield-Session", shieldClient.Session)
if targetURL.Scheme == "http" {
targetURL.Scheme = "ws"
} else {
targetURL.Scheme = "wss"
}
targetURL.Path = "/v2/events"
conn, _, err := websocketDialer.Dial(targetURL.String(), headers)
if err != nil {
bailWith("error when dialing: %s", err.Error())
}
netConn := conn.UnderlyingConn()
tcpConn := netConn.(*net.TCPConn)
fmt.Fprintf(os.Stderr, "Setting read buffer size\n")
err = tcpConn.SetReadBuffer(4096)
if err != nil {
bailWith("Could not set read buffer size: %s", err)
}
fmt.Fprintf(os.Stderr, "Successfully set buffer size\n")
quitChan := make(chan bool)
go func() {
for {
fmt.Fprintf(os.Stderr, "Type `quit' to exit: ")
var input string
fmt.Scanln(&input)
if input == "quit" || input == "exit" {
quitChan <- true
break
}
}
}()
<-quitChan
conn.Close()
}
func bailWith(format string, args ...interface{}) {
_, err := ansi.Fprintf(os.Stderr, "@R{"+format+"}\n", args...)
if err != nil {
panic(fmt.Sprintf(format, args...))
}
os.Exit(1)
}
|
[
0
] |
package demo
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"sync"
"time"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
v1 "k8s.io/api/core/v1"
"github.com/windmilleng/tilt/internal/container"
"github.com/windmilleng/tilt/internal/engine"
"github.com/windmilleng/tilt/internal/hud"
"github.com/windmilleng/tilt/internal/k8s"
"github.com/windmilleng/tilt/internal/logger"
"github.com/windmilleng/tilt/internal/model"
"github.com/windmilleng/tilt/internal/store"
"github.com/windmilleng/tilt/internal/tiltfile"
)
type RepoBranch string
// Runs the demo script
type Script struct {
hud hud.HeadsUpDisplay
upper engine.Upper
store *store.Store
env k8s.Env
kClient k8s.Client
branch RepoBranch
runtime container.Runtime
tfl tiltfile.TiltfileLoader
readTiltfileCh chan string
podMonitor *podMonitor
}
func NewScript(upper engine.Upper, hud hud.HeadsUpDisplay, kClient k8s.Client,
env k8s.Env, st *store.Store, branch RepoBranch, runtime container.Runtime,
tfl tiltfile.TiltfileLoader) Script {
s := Script{
upper: upper,
hud: hud,
env: env,
kClient: kClient,
branch: branch,
readTiltfileCh: make(chan string),
podMonitor: &podMonitor{},
store: st,
runtime: runtime,
tfl: tfl,
}
st.AddSubscriber(s.podMonitor)
return s
}
type podMonitor struct {
hasBuildError bool
hasPodRestart bool
healthy bool
mu sync.Mutex
}
func (m *podMonitor) OnChange(ctx context.Context, st store.RStore) {
m.mu.Lock()
defer m.mu.Unlock()
state := st.RLockState()
defer st.RUnlockState()
m.hasPodRestart = false
m.hasBuildError = false
m.healthy = true
if len(state.ManifestTargets) == 0 {
m.healthy = false
}
if state.CurrentlyBuilding != "" {
m.healthy = false
}
for _, ms := range state.ManifestStates() {
pod := ms.MostRecentPod()
if pod.Phase != v1.PodRunning {
m.healthy = false
}
if pod.ContainerRestarts > 0 {
m.hasPodRestart = true
m.healthy = false
}
if ms.LastBuild().Error != nil {
m.hasBuildError = true
m.healthy = false
}
for _, status := range ms.BuildStatuses {
if len(status.PendingFileChanges) > 0 {
m.healthy = false
}
}
}
}
func (m *podMonitor) waitUntilPodsReady(ctx context.Context) error {
return m.waitUntilCond(ctx, func() bool {
return m.healthy
})
}
func (m *podMonitor) waitUntilBuildError(ctx context.Context) error {
return m.waitUntilCond(ctx, func() bool {
return m.hasBuildError
})
}
func (m *podMonitor) waitUntilPodRestart(ctx context.Context) error {
return m.waitUntilCond(ctx, func() bool {
return m.hasPodRestart
})
}
func (m *podMonitor) waitUntilCond(ctx context.Context, f func() bool) error {
for {
m.mu.Lock()
cond := f()
m.mu.Unlock()
if cond {
return nil
}
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(100 * time.Millisecond):
}
}
}
func (s Script) Run(ctx context.Context) error {
if !s.env.IsLocalCluster() {
_, _ = fmt.Fprintf(os.Stderr, "tilt demo mode only supports Docker For Mac, Minikube, and MicroK8s\n")
_, _ = fmt.Fprintf(os.Stderr, "check your current cluster with:\n")
_, _ = fmt.Fprintf(os.Stderr, "\nkubectl config get-contexts\n\n")
return nil
} else if s.runtime != container.RuntimeDocker {
_, _ = fmt.Fprintf(os.Stderr, "tilt demo mode only supports clusters configured with docker\n")
_, _ = fmt.Fprintf(os.Stderr, "Current container runtime: %s\n", s.runtime)
return nil
}
l := engine.NewLogActionLogger(ctx, s.store.Dispatch)
out := l.Writer(logger.InfoLvl)
ctx = logger.WithLogger(ctx, l)
ctx, cancel := context.WithCancel(ctx)
g, ctx := errgroup.WithContext(ctx)
g.Go(func() error {
defer cancel()
return s.hud.Run(ctx, s.upper.Dispatch, hud.DefaultRefreshInterval)
})
g.Go(func() error {
defer cancel()
return s.runSteps(ctx, out)
})
g.Go(func() error {
defer cancel()
var dir string
select {
case dir = <-s.readTiltfileCh:
case <-ctx.Done():
return ctx.Err()
}
tfPath := filepath.Join(dir, tiltfile.FileName)
// TODO(dmiller): not this?
tlr, err := s.tfl.Load(ctx, tfPath, nil)
if err != nil {
return err
}
defer s.cleanUp(newBackgroundContext(ctx), tlr.Manifests)
initAction := engine.InitAction{
WatchFiles: true,
Manifests: tlr.Manifests,
TiltfilePath: tfPath,
Warnings: tlr.Warnings,
}
return s.upper.Init(ctx, initAction)
})
return g.Wait()
}
func newBackgroundContext(ctx context.Context) context.Context {
l := logger.Get(ctx)
return logger.WithLogger(context.Background(), l)
}
func (s Script) cleanUp(ctx context.Context, manifests []model.Manifest) {
if manifests == nil {
return
}
entities, err := engine.ParseYAMLFromManifests(manifests...)
if err != nil {
logger.Get(ctx).Infof("Parsing entities: %v", err)
return
}
err = s.kClient.Delete(ctx, entities)
if err != nil {
logger.Get(ctx).Infof("Deleting entities: %v", err)
}
}
func (s Script) runSteps(ctx context.Context, out io.Writer) error {
tmpDir, err := ioutil.TempDir("", "tiltdemo")
if err != nil {
return errors.Wrap(err, "demo.runSteps")
}
defer func() {
_ = os.RemoveAll(tmpDir)
}()
for _, step := range steps {
if step.ChangeBranch && s.branch == "" {
continue
}
err := s.hud.SetNarrationMessage(ctx, step.Narration)
if err != nil {
return err
}
if step.Command != "" {
cmd := exec.CommandContext(ctx, "sh", "-c", step.Command)
cmd.Stdout = out
cmd.Stderr = out
cmd.Dir = tmpDir
err := cmd.Run()
if err != nil {
return errors.Wrap(err, "demo.runSteps")
}
} else if step.CreateManifests {
s.readTiltfileCh <- tmpDir
} else if step.ChangeBranch {
cmd := exec.CommandContext(ctx, "git", "checkout", string(s.branch))
cmd.Stdout = out
cmd.Stderr = out
cmd.Dir = tmpDir
err := cmd.Run()
if err != nil {
return errors.Wrap(err, "demo.runSteps")
}
}
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(Pause):
}
if step.WaitForHealthy {
_ = s.podMonitor.waitUntilPodsReady(ctx)
continue
} else if step.WaitForBuildError {
_ = s.podMonitor.waitUntilBuildError(ctx)
continue
} else if step.WaitForPodRestart {
_ = s.podMonitor.waitUntilPodRestart(ctx)
continue
}
}
return nil
}
|
[
5
] |
package solution
func maximalSquare(matrix [][]byte) int {
if len(matrix) == 0 {
return 0
}
var max int
var dp [2][]int
numRow, numCol := len(matrix), len(matrix[0])
dp[0] = make([]int, numCol+1)
dp[1] = make([]int, numCol+1)
for i := 0; i < numRow; i++ {
for j := 0; j < numCol; j++ {
var checkSize int
row := j + 1
if dp[1][row-1] < dp[0][row-1] {
checkSize = dp[1][row-1] + 1
} else {
checkSize = dp[0][row-1] + 1
}
size := detectSquareSize(matrix, i, j, checkSize)
dp[1][row] = size
if max < size {
max = size
}
}
copy(dp[0], dp[1])
}
return max * max
}
func detectSquareSize(matrix [][]byte, n, m, maxSize int) int {
var i int
for ; i < maxSize && i <= n; i++ {
if matrix[n-i][m] == zero {
break
}
}
return i
}
const (
zero = '0'
one = '1'
)
|
[
1
] |
package main
import (
"fmt"
"math"
)
type Vertex struct {
X, Y float64
}
func (v Vertex) Abs() float64 {
var operation float64 = v.X*v.X + v.Y*v.Y
var res float64 = math.Sqrt(operation)
return res
}
func (v Vertex) Scale(f float64) {
v.X = v.X * f
v.Y = v.Y * f
}
func main() {
v := Vertex{3, 4}
v.Scale(10)
fmt.Println(v.Abs())
}
|
[
0
] |
package controller
import (
"testing"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/controller"
kresourcequota "k8s.io/kubernetes/pkg/controller/resourcequota"
"k8s.io/kubernetes/pkg/runtime"
imageapi "github.com/openshift/origin/pkg/image/api"
)
// testReplenishment lets us test replenishment functions are invoked
type testReplenishment struct {
groupKind unversioned.GroupKind
namespace string
}
// mock function that holds onto the last kind that was replenished
func (t *testReplenishment) Replenish(groupKind unversioned.GroupKind, namespace string, object runtime.Object) {
t.groupKind = groupKind
t.namespace = namespace
}
func TestImageStreamReplenishmentUpdateFunc(t *testing.T) {
for _, tc := range []struct {
name string
oldISStatus imageapi.ImageStreamStatus
newISStatus imageapi.ImageStreamStatus
expectedUpdate bool
}{
{
name: "empty",
expectedUpdate: false,
},
{
name: "no change",
oldISStatus: imageapi.ImageStreamStatus{
Tags: map[string]imageapi.TagEventList{
"foo": {
Items: []imageapi.TagEvent{
{DockerImageReference: "foo-ref"},
},
},
},
},
newISStatus: imageapi.ImageStreamStatus{
Tags: map[string]imageapi.TagEventList{
"foo": {
Items: []imageapi.TagEvent{
{DockerImageReference: "foo-ref"},
},
},
},
},
expectedUpdate: false,
},
{
name: "first image stream tag",
newISStatus: imageapi.ImageStreamStatus{
Tags: map[string]imageapi.TagEventList{
"latest": {
Items: []imageapi.TagEvent{
{DockerImageReference: "latest-ref"},
{DockerImageReference: "older"},
},
},
},
},
expectedUpdate: true,
},
{
name: "image stream tag event deleted",
oldISStatus: imageapi.ImageStreamStatus{
Tags: map[string]imageapi.TagEventList{
"latest": {
Items: []imageapi.TagEvent{
{DockerImageReference: "latest-ref"},
{DockerImageReference: "older"},
},
},
},
},
newISStatus: imageapi.ImageStreamStatus{
Tags: map[string]imageapi.TagEventList{
"latest": {
Items: []imageapi.TagEvent{
{DockerImageReference: "latest-ref"},
},
},
},
},
expectedUpdate: true,
},
} {
mockReplenish := &testReplenishment{}
options := kresourcequota.ReplenishmentControllerOptions{
GroupKind: kapi.Kind("ImageStream"),
ReplenishmentFunc: mockReplenish.Replenish,
ResyncPeriod: controller.NoResyncPeriodFunc,
}
oldIS := &imageapi.ImageStream{
ObjectMeta: kapi.ObjectMeta{Namespace: "test", Name: "is"},
Status: tc.oldISStatus,
}
newIS := &imageapi.ImageStream{
ObjectMeta: kapi.ObjectMeta{Namespace: "test", Name: "is"},
Status: tc.newISStatus,
}
updateFunc := ImageStreamReplenishmentUpdateFunc(&options)
updateFunc(oldIS, newIS)
if tc.expectedUpdate {
if mockReplenish.groupKind != kapi.Kind("ImageStream") {
t.Errorf("[%s]: Unexpected group kind %v", tc.name, mockReplenish.groupKind)
}
if mockReplenish.namespace != oldIS.Namespace {
t.Errorf("[%s]: Unexpected namespace %v", tc.name, mockReplenish.namespace)
}
} else {
if mockReplenish.groupKind.Group != "" || mockReplenish.groupKind.Kind != "" || mockReplenish.namespace != "" {
t.Errorf("[%s]: Update function unexpectedly called on %s in namespace %s", tc.name, mockReplenish.groupKind, mockReplenish.namespace)
}
}
}
}
|
[
4
] |
package router
import (
"github.com/gin-gonic/gin"
v1 "goblog/api/v1"
)
func InitUserRouter(Router *gin.RouterGroup) {
UserRouter := Router.Group("user")
{
UserRouter.POST("register", v1.Register)
UserRouter.POST("login", v1.Login)
// UserRouter.POST("changePassword", v1.ChangePassword) // 修改密码
// UserRouter.PUT("setUserInfo", v1.SetUserInfo) // 设置用户信息
}
}
|
[
2
] |
package authsession
import (
"database/sql"
"errors"
"fmt"
"strconv"
. "go-sugar/db"
"go-sugar/db/request"
)
// Columns
const (
UserID string = "user_id"
DeviceIDColumn string = "device_id"
Token string = "token"
CreatedAt string = "created_at"
UpdatedAt string = "updated_at"
)
// Repository User Repository
type Repository struct {
tableName string
}
// Repo repository
var Repo = Repository{tableName: "auth_session"}
// CleanBeforeCreate - remove previous all user's session
func (r *Repository) CleanBeforeCreate(a *Auth) (bool, error) {
Request := request.New(DB)
_, err := Request.
Delete().
From(r.tableName).
Where(Request.NewCond(UserID, "=", strconv.Itoa(a.UserID))).
Where(Request.NewCond(DeviceIDColumn, "=", a.DeviceID)).
Exec()
if err != nil {
fmt.Println("CleanBeforeCreate: ", err)
return false, err
}
return true, nil
}
// Create new auth session
func (r *Repository) Create(auth *Auth) (*Auth, error) {
str := `INSERT INTO ` + r.tableName + ` (user_id, device_id, token) values(?, ?, ?)`
fmt.Println(str)
_, err := DB.Exec(str, auth.UserID, auth.DeviceID, auth.Token)
if err != nil {
return nil, err
}
return auth, nil
}
// GetByDeviceID get Auth session by device_id
func (r *Repository) GetByDeviceID(DeviceID string) (*Auth, error) {
Request := request.New(DB)
var orderBy []string
orderBy = append(orderBy, CreatedAt)
rows, err := Request.
Select([]string{}).
From(r.tableName).
Where(Request.NewCond(DeviceIDColumn, "=", DeviceID)).
OrderBy(orderBy).
Desc().
Limit(1).
Query()
if err != nil {
return nil, err
}
auths := parseRows(rows)
if len(auths) > 0 {
return &auths[0], nil
}
return nil, errors.New("no user with this device id")
}
func parseRows(rows *sql.Rows) []Auth {
var auths []Auth
for rows.Next() {
p, err := parseRow(rows)
if err != nil {
fmt.Println("Parse Error")
fmt.Println(err)
continue
}
auths = append(auths, p)
}
return auths
}
func parseRow(row *sql.Rows) (Auth, error) {
p := Auth{}
err := row.Scan(&p.UserID, &p.DeviceID, &p.Token, &p.CreatedAt, &p.UpdatedAt)
return p, err
}
|
[
2
] |
package main
import (
"bytes"
"encoding/hex"
"flag"
"fmt"
"github.com/larryhou/unity-gocache/client"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"math"
"math/rand"
"net"
"net/http"
"strconv"
"strings"
"time"
"unsafe"
)
var environ struct {
secret string
count int
close float64
down float64
addr string
port int
cmdPort int
verify bool
queue []*Context
library []*client.Entity
cutouts int
idle chan *Context
closed chan struct{}
entreq chan *Context
entity chan *client.Entity
rand *rand.Rand
}
type Context struct {
work chan int
entpsh chan *client.Entity
u *client.Unity
}
func (c *Context) Uintptr() uintptr {
return uintptr(unsafe.Pointer(c))
}
func (c *Context) Close() error {
close(c.entpsh)
close(c.work)
if c.u != nil {return c.u.Close()}
return nil
}
var logger *zap.Logger
func main() {
level := 0
flag.IntVar(&environ.count, "count", 10, "initial client count")
flag.Float64Var(&environ.close, "close", 0.15, "close ratio[0,1] after upload/download")
flag.StringVar(&environ.secret, "secret", "larryhou", "command secret")
flag.Float64Var(&environ.down, "down", 0.90, "download operation ratio[0,1]")
flag.StringVar(&environ.addr, "addr", "127.0.0.1", "server address")
flag.IntVar(&environ.port, "port", 9966, "server port")
flag.IntVar(&environ.cmdPort, "cmd-port", 19966, "local command server port")
flag.IntVar(&level, "log-level", -1, "log level debug=-1 info=0 warn=1 error=2 dpanic=3 panic=4 fatal=5")
flag.BoolVar(&environ.verify, "verify", true, "verify sha256")
flag.Parse()
if v, err := zap.NewDevelopment(zap.IncreaseLevel(zapcore.Level(level))); err != nil {panic(err)} else {logger = v}
environ.idle = make(chan *Context)
environ.closed = make(chan struct{})
environ.entreq = make(chan *Context)
environ.entity = make(chan *client.Entity)
s := rand.NewSource(time.Now().UnixNano())
r := rand.New(s)
environ.rand = r
go func() {
for {
select {
case ent := <-environ.entity:
logger.Debug("ENTITY", zap.String("guid", hex.EncodeToString(ent.Guid)))
environ.library = append(environ.library, ent)
case ctx := <-environ.idle:
num := r.Int() + 1
logger.Debug("IDLE", zap.Uintptr("ctx", ctx.Uintptr()), zap.Int("num", num))
if environ.cutouts > 0 {
logger.Debug("cut", zap.Uintptr("ctx", ctx.Uintptr()), zap.Int("num", num))
environ.cutouts--
ctx.Close()
} else if float64(num%10000)/10000 > environ.close {
logger.Debug("assign", zap.Uintptr("ctx", ctx.Uintptr()), zap.Int("num", num))
ctx.work <- num
} else {
ctx.Close()
go func() {
environ.closed <- struct{}{} /* notify close event */
logger.Debug("quit", zap.Uintptr("ctx", ctx.Uintptr()), zap.Float64("close", environ.close), zap.Float64("ratio", 1 - float64(num%100)/100))
}()
}
case ctx := <-environ.entreq:
logger.Debug("ENTREQ", zap.Uintptr("ctx", ctx.Uintptr()))
for {
if len(environ.library) > 0 {
p := r.Float64()
span := len(environ.library)
if span > 200 { span = 200 }
n := math.Pow(p, 4) * float64(span)
go func() {
ctx.entpsh <- environ.library[len(environ.library) - int(n) - 1]
logger.Debug("send entity", zap.Uintptr("ctx", ctx.Uintptr()))
}()
break
}
time.Sleep(time.Second)
}
case <-environ.closed:
logger.Debug("CLOSED")
go addClients(1)
}
}
}()
go http.ListenAndServe(":9999", nil)
addClients(environ.count)
server, err := net.Listen("tcp", fmt.Sprintf(":%d", environ.cmdPort))
if err != nil { panic(err) }
for {
if c, err := server.Accept(); err == nil { go handle(c) }
}
}
func readString(c net.Conn) (string, error) {
var buf bytes.Buffer
b := make([]byte, 1)
for {
if _, err := c.Read(b); err != nil {return "", err}
if b[0] == 0 {return buf.String(),nil}
buf.WriteByte(b[0])
}
}
func readInt(c net.Conn) (int, error) {
num, err := readString(c)
if err != nil {
c.Write([]byte(fmt.Sprintf("read int err: %v", err)))
return 0, err
}
num = strings.TrimSpace(num)
if v, err := strconv.Atoi(num); err != nil {
c.Write([]byte(fmt.Sprintf("wrong int value: %s", num)))
return 0, err
} else { return v, nil }
}
func handle(c net.Conn) {
defer c.Close()
if secret, err := readString(c); err != nil {return} else {
if environ.secret != secret {
c.Write([]byte(fmt.Sprintf("secret not match: %s", secret)))
return
}
}
buf := make([]byte, 64)
if _, err := c.Read(buf[:3]); err != nil {
c.Write([]byte(fmt.Sprintf("read command err: %v", err)))
return
}
for {
cmd := string(buf[:3])
logger.Info("command", zap.String("name", cmd), zap.String("addr", c.RemoteAddr().String()))
switch cmd {
case "add":
if num, err := readInt(c); err != nil {return} else {go addClients(num)}
case "cut":
if num, err := readInt(c); err != nil {return} else {go cutClients(num)}
case "clo":
if num, err := readInt(c); err != nil {return} else {
if num > 100 { num = 100 } else if num < 0 { num = 0 }
environ.close = float64(num) / 100
}
case "dow":
if num, err := readInt(c); err != nil {return} else {
if num > 100 { num = 100 } else if num < 0 { num = 0 }
environ.down = float64(num) / 100
}
default: return
}
}
}
func addClients(num int) {
for i := 0; i < num; i++ {
u := &client.Unity{Addr: environ.addr, Port: environ.port, Verify: environ.verify, Rand: environ.rand}
if err := u.Connect(); err != nil {
u.Close()
go func() {
environ.closed <- struct{}{}
logger.Error("connect err", zap.Error(err))
}()
continue
}
go runClient(u)
}
}
func runClient(u *client.Unity) {
defer u.Close()
ctx := &Context{u: u, work: make(chan int), entpsh: make(chan *client.Entity)}
logger.Debug("client", zap.Uintptr("ctx", ctx.Uintptr()))
go func() {
environ.idle <- ctx
logger.Debug("push idle n", zap.Uintptr("ctx", ctx.Uintptr()))
}()
for {
select {
case num := <-ctx.work:
if num == 0 {return}
logger.Debug("++++", zap.Uintptr("ctx", ctx.Uintptr()), zap.Int("num", num))
if float64(num%10000)/10000 > environ.down || len(environ.library) == 0 {
logger.Debug("upload", zap.Uintptr("ctx", ctx.Uintptr()))
if ent, err := u.Upload(); err == nil {
logger.Debug("upload", zap.Uintptr("ctx", ctx.Uintptr()), zap.String("guid", hex.EncodeToString(ent.Guid)))
go func() {
environ.idle <- ctx
logger.Debug("push idle u", zap.Uintptr("ctx", ctx.Uintptr()))
}()
go func() {
environ.entity <- ent
logger.Debug("push entity", zap.Uintptr("ctx", ctx.Uintptr()))
}()
} else {
logger.Error("upload err: %v", zap.Error(err))
}
} else {
go func() {
environ.entreq <- ctx /* send download request */
logger.Debug("down req")
}()
}
case ent := <-ctx.entpsh:
if ent == nil {return}
logger.Debug("down", zap.Uintptr("ctx", ctx.Uintptr()), zap.String("guid", hex.EncodeToString(ent.Guid)))
if err := u.Download(ent); err != nil {
logger.Error("down err: %v",
zap.String("guid", hex.EncodeToString(ent.Guid)),
zap.String("hash", hex.EncodeToString(ent.Hash)), zap.Error(err))
} else {
go func() {
environ.idle <- ctx
logger.Debug("push idle d", zap.Uintptr("ctx", ctx.Uintptr()))
}()
}
}
}
}
func cutClients(num int) {
if num > 0 { environ.cutouts = num }
}
|
[
4,
5
] |
// -----
// xkcd.go
//
// A program for indexing all existing xkcd comics, and allowing the CLI user to search them by keyword.
//
// Author: Kawai Washburn <[email protected]>
// -----
package main
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"strings"
)
const xkcdURL = "https://xkcd.com/"
const xkcdSuffix = "info.0.json"
const indexPath = "./comix.dat"
type comic struct {
Month string
Num int
Link string
Year string
News string
SafeTitle string `json:"safe_title"`
Transcript string
Alt string
Img string
Title string
Day string
}
type comicIdx struct {
idx map[string]comic
}
func main() {
args, err := getArgs()
if err != nil {
log.Fatal(err)
}
comIdx, err := loadIdx()
if err != nil {
log.Fatalf("Unable to load index from file: %s", err)
}
comm := args[0]
switch comm {
case "index":
idx := args[1]
log.Printf("the current command is 'index' with last index '%s'\n", idx)
getComics(idx, &comIdx)
case "search":
phrase := args[1]
log.Printf("the current command is 'search' with search phrase '%s'\n", phrase)
cList, err := comIdx.search(phrase)
if err != nil {
log.Printf("Error while searching for comic: %s", err)
}
if len(cList) == 0 {
log.Printf("No results found for '%s'", phrase)
}
// TODO: need to print the resulting cList in a more useful way
for _, cmc := range cList {
fmt.Printf("Found '%s' in comic %v, with transcript:\n \"%s\"\n\n", phrase, cmc.Num, cmc.Transcript)
}
}
err = dumpIdx(&comIdx)
if err != nil {
log.Fatalf("Error writing index to disk: %v", err)
}
}
func getArgs() ([]string, error) {
args := os.Args[1:]
if len(args) > 2 {
return nil, errors.New("too many arguments supplied")
} else if args[0] == "index" {
// If the index command is called with no number,
// default to only indexing the latest comic (i.e. the last '1' comics)
if len(args) == 1 {
args = append(args, "1")
} else {
if args[1] != "all" {
i, err := strconv.Atoi(args[1])
if err != nil {
log.Fatalf("invalid value for index: %v", args[1])
}
if i < 0 {
args[1] = "1"
}
}
}
}
return args, nil
}
func loadIdx() (comicIdx, error) {
cIdx := comicIdx{idx: make(map[string]comic)}
b, err := ioutil.ReadFile(indexPath)
if err != nil {
return cIdx, errors.New("error opening index from disk at" + indexPath)
}
if err := json.Unmarshal(b, &cIdx.idx); err != nil {
return cIdx, errors.New("error loading index from disk at" + indexPath)
}
return cIdx, nil
}
func dumpIdx(cIdx *comicIdx) error {
idx, err := json.Marshal(cIdx.idx)
if err != nil {
return fmt.Errorf("Unable to encode comic index: %s", err)
}
err = ioutil.WriteFile(indexPath, idx, 0644)
if err != nil {
return fmt.Errorf("Unable to flush index to disk: %s", err)
}
return nil
}
// TODO: This is serial, and inefficient
// ideally should be asynchronous or concurrent
func getComics(idx string, cIdx *comicIdx) {
latest, n := getIdxWindow(idx)
for i := latest; i >= latest-n; i-- {
log.Printf("Getting comic %v...", i)
cNum := strconv.Itoa(n)
if _, present := cIdx.idx[cNum]; !present {
current, err := getXkcdComic(i)
if err != nil {
log.Printf("Unable to get xkcd comic: %s", err)
} else {
log.Printf("Got comic %v", current.Num)
cIdx.indexComic(current)
}
} else {
log.Printf("Comic already indexed.")
}
}
}
func getIdxWindow(idx string) (int, int) {
var n int
c, err := getXkcdComic(0)
if err != nil {
log.Fatalf("Unable to get latest xkcd comic number: %s", err)
}
latest := c.Num
if idx != "all" {
n, err = strconv.Atoi(idx)
if err != nil {
log.Fatalf("invalid value for index: %v", n)
}
n = n - 1
} else {
n = latest
}
return latest, n
}
func getXkcdComic(idx int) (comic, error) {
var cNum string
if idx == 0 {
cNum = ""
} else {
cNum = strconv.Itoa(idx)
}
resp, err := http.Get(xkcdURL + cNum + "/" + xkcdSuffix)
if err != nil {
return comic{}, fmt.Errorf("could not get xkcd info from remote")
} else if resp.StatusCode != http.StatusOK {
resp.Body.Close()
return comic{}, fmt.Errorf("error in request to xcd: %s", resp.Status)
}
var current comic
if err := json.NewDecoder(resp.Body).Decode(¤t); err != nil {
return comic{}, fmt.Errorf("unable to decode json value from xkcd: %s", err)
}
return current, nil
}
func (cIdx *comicIdx) indexComic(cmc comic) {
cNum := strconv.Itoa(cmc.Num)
if _, present := cIdx.idx[cNum]; !present {
cIdx.idx[cNum] = cmc
log.Printf("Indexed comic %v:", cNum)
}
}
func (cIdx *comicIdx) search(phrase string) ([]comic, error) {
var cList []comic
for _, cmc := range cIdx.idx {
if strings.Contains(cmc.Transcript, phrase) {
cList = append(cList, cmc)
}
}
return cList, nil
}
|
[
0,
4
] |
package resolvertests
import (
"math"
"math/bits"
"fmt"
"github.com/miekg/dns"
"time"
"strings"
"math/rand"
)
type Response struct {
Ip string
IsAlive int
HasRecursion int
HasDNSSEC int
HasDNSSECfail int
QidRatio int
PortRatio int
Txt string
}
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
const (
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
func randString(n int) string {
var src = rand.NewSource(time.Now().UnixNano())
b := make([]byte, n)
// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return string(b)
}
// 1 is random
func testRandomness(shorts []uint16) int {
var pos,neg,n int = 0,0,0
for _, ui := range shorts {
pos += bits.OnesCount16(ui)
neg += 16 - bits.OnesCount16(ui)
n += 16
}
s := math.Abs(float64(pos - neg)) / math.Sqrt (float64(n))
if math.Erfc(s) < 0.01 {
return 0
}
return 1
}
func checkRandomness(ip string) (int,int) {
var ids []uint16
for i:= 0; i < 10; i++ {
line := "bip"+randString(16)+".niclabs.cl"
c := new(dns.Client)
m := new(dns.Msg)
m.SetQuestion(dns.Fqdn(line),dns.TypeA)
msg ,_ , err := c.Exchange(m , ip + ":53")
if err == nil {
ids = append(ids,msg.MsgHdr.Id)
}
}
return testRandomness(ids),0
}
func checkAuthority(ip string) string {
b := strings.Split(ip , ".")
arp := b[3] + "." + b[2] + "." + b[1] + "." + b[0] + ".in-addr.arpa"
c := new(dns.Client)
m := new(dns.Msg)
m.SetQuestion(dns.Fqdn(arp),dns.TypePTR)
msg ,_ ,err := c.Exchange(m,ip + ":53")
if err != nil {
return ""
}
if (len(msg.Answer) < 1 || len(msg.Ns) < 1) {
return ""
}
return fmt.Sprintf("%s",msg.Ns)
}
func checkDNSSECok (ip string) int {
line := "sigok.verteiltesysteme.net"
c := new(dns.Client)
m := new(dns.Msg)
m.SetQuestion(dns.Fqdn(line),dns.TypeA)
m.SetEdns0(4096,true)
msg ,_ ,err := c.Exchange(m, ip +":53")
if err != nil {
return -1 // other error, typically i/o timeout
}
return msg.MsgHdr.Rcode
}
func checkDNSSECnook (ip string) int {
line := "sigfail.verteiltesysteme.net"
c := new(dns.Client)
m := new(dns.Msg)
m.SetQuestion(dns.Fqdn(line),dns.TypeA)
m.SetEdns0(4096,true)
msg ,_ ,err := c.Exchange(m, ip + ":53")
if err != nil {
return -1 // other error
}
return msg.MsgHdr.Rcode
}
func CheckDNS(id int, ips <- chan string, results chan <- Response) {
line := "www.google.com"
for ip := range ips {
ip := strings.TrimSpace(ip)
c := new(dns.Client)
m := new(dns.Msg)
r := Response{Ip : ip, IsAlive : 1}
m.SetQuestion(dns.Fqdn(line), dns.TypeA)
m.RecursionDesired = true
m.CheckingDisabled = false
msg ,_ ,err := c.Exchange(m, ip + ":53")
if err != nil {
r.IsAlive = 0;
} else {
if msg != nil {
if msg.Rcode != dns.RcodeRefused && msg.RecursionAvailable {
r.HasRecursion = 1
r.HasDNSSEC = checkDNSSECok(ip);
r.HasDNSSECfail = checkDNSSECnook(ip);
r.Txt = checkAuthority(ip);
// r.qidRatio,_ = checkRandomness(ip);
}
}
}
results <- r
}
}
|
[
4
] |
package main
import (
"fmt"
"github.com/fatih/color"
"log"
"net"
"strings"
"sync"
)
const (
PORT = "5000"
HOST = ""
CTYPE = "udp4"
)
type Client struct {
Ip string
Port string
Addr *net.UDPAddr
Num int
Nickname string
Count uint
}
func handleRequest(lisen *net.UDPConn, cbuf []byte, addr *net.UDPAddr, n int, clientsChan map[string]*Client, addrAlone string) {
mutex.Lock()
defer mutex.Unlock()
writeChar := []byte("$ ")
red := color.New(color.FgRed).SprintFunc()
bleu := color.New(color.FgBlue).SprintFunc()
if clientsChan[addrAlone].Count == 1 {
lisen.WriteToUDP([]byte("What's your nickname: "), addr)
} else if clientsChan[addrAlone].Count == 2 {
msg := fmt.Sprintf("\n[%s] is in the game ;)\n$ ", bleu(clientsChan[addrAlone].Nickname))
for _, v := range clientsChan {
/*Other chatters*/
if v.Ip!=addrAlone {
if len(v.Nickname)>0 {
lisen.WriteToUDP([]byte(msg), v.Addr)
}
}else{
lisen.WriteToUDP(writeChar, v.Addr)
}
}
} else if clientsChan[addrAlone].Count > 2 {
msg := fmt.Sprintf("\n[%s] %s \n$ ", red(clientsChan[addrAlone].Nickname), cleanUp(string(cbuf[0:n])))
for _, v := range clientsChan {
/*Other chatters*/
if v.Ip!=addrAlone {
if len(v.Nickname)>0 {
lisen.WriteToUDP([]byte(msg), v.Addr)
}
}else{
lisen.WriteToUDP(writeChar, v.Addr)
}
}
}
}
func makeClients(clients map[string]*Client,clientsChan chan map[string]*Client,addrSplit []string,addr *net.UDPAddr,n int, cbuf []byte){
mutex.Lock()
if _,ok := clients[addrSplit[0]];!ok {
clients[addrSplit[0]] = &Client{Ip: addrSplit[0], Port: addrSplit[1], Addr: addr, Nickname: "", Num: n, Count: 0}
}
clients[addrSplit[0]].Count += 1
if clients[addrSplit[0]].Count == 2 {
clients[addrSplit[0]].Nickname = cleanUp(string(cbuf[0:n]))
}
clientsChan <- clients
mutex.Unlock()
}
var mutex sync.Mutex
func main() {
fmt.Println("UDP server")
udpAdd, err := net.ResolveUDPAddr(CTYPE, fmt.Sprintf("%s:%s", HOST, PORT))
errorHand(err)
lisen, err := net.ListenUDP(CTYPE, udpAdd)
errorHand(err)
defer lisen.Close()
clients := make(map[string]*Client, 0)
clientsChan := make(chan map[string]*Client, 0)
cbuf := make([]byte, 1024)
for {
n, addr, err := lisen.ReadFromUDP(cbuf)
errorHand(err)
addrSplit := strings.Split(addr.String(), ":")
go makeClients(clients,clientsChan,addrSplit,addr,n, cbuf)
go handleRequest(lisen, cbuf, addr, n, <-clientsChan, addrSplit[0])
}
}
func errorHand(err error) {
if err != nil {
log.Fatal(err)
}
}
func cleanUp(s string) string {
return strings.Replace(s, "\n", "", -1)
}
|
[
5
] |
package feeder
import (
"encoding/xml"
"io"
"strings"
"time"
)
type RSS struct {
ID string `xml:"id"`
Version string `xml:"version,attr"`
Title string `xml:"channel>title"`
PubDate string `xml:"channel>pubDate"`
LastBuildDate string `xml:"channel>lastBuildDate"`
Description string `xml:"channel>description"`
Language string `xml:"channel>language"`
ManagingEditor string `xml:"channel>managingEditor"`
WebMaster string `xml:"channel>webMaster"`
Docs string `xml:"channel>docs"`
Link string `xml:"channel>link"`
Category string `xml:"channel>category"`
Generator string `xml:"channel>generator"`
Copyright string `xml:"channel>copyright"`
TTL string `xml:"channel>ttl"`
Image RSSImage `xml:"channel>image"`
Rating string `xml:"channel>rating"`
TextInput string `xml:"channel>testinput"`
Cloud string `xml:"channel>cloud"`
SkipHours []int `xml:"channel>skiphours"`
SkipDays []string `xml:"channel>skipdays"`
Items []RSSItem `xml:"channel>item"`
}
type RSSItem struct {
ID string `xml:"id"`
Title string `xml:"title"`
Description string `xml:"description"`
Comment string `xml:"comment"`
Enclosure string `xml:"enclosure"`
PubDated string `xml:"pubDate"`
GUID string `xml:"guid"`
Author string `xml:"author"`
Link string `xml:"link"`
Category string `xml:"category"`
Source RSS `xml:"source"`
}
type RSSImage struct {
URL string `xml:"url"`
Title string `xml:"title"`
Link string `xml:"link"`
Width string `xml:"width"`
Height string `xml:"height"`
Description string `xml:"description"`
}
func ParseRSS(feed io.Reader) (*Feed, error) {
RSS := RSS{}
d := xml.NewDecoder(feed)
err := d.Decode(&RSS)
if err != nil {
return nil, err
}
result := new(Feed)
result.Language = RSS.Language
result.Title = RSS.Title
result.Link = RSS.Link
result.Subtitle = RSS.Description
author := new(Person)
author.Email = RSS.WebMaster
result.Author = author
generator := new(Person)
generator.Name = RSS.Generator
result.Author = author
result.Updated = ParseDate(RSS.PubDate)
result.Link = RSS.Link
result.Category = RSS.Category
// result.Icon.URL = RSS.Icon.URL
// result.Logo.URL = RSS.Logo.URL
result.Copyights = RSS.Copyright
result.Generator = RSS.Generator
result.Items = ParseItems(RSS.Items)
return result, err
}
func ParseItems(RSSItems []RSSItem) []*Item {
items := []*Item{}
for _, v := range RSSItems {
item := new(Item)
author := new(Person)
author.Email = v.Author
item.Author = author
item.Title = v.Title
item.Content = v.Description
item.Link = v.Link
item.Category = v.Category
item.Published = ParseDate(v.PubDated)
// item.Source = v.Source
items = append(items, item)
}
return items
}
func ParseDate(t string) *time.Time {
then := time.Time{}
if len(t) >= 25 {
if strings.HasSuffix(t, "0000") {
then, _ = time.Parse("Mon, 02 Jan 2006 15:04:05 +0000", t)
} else if strings.HasSuffix(t, "GMT") {
then, _ = time.Parse("Mon, 02 Jan 2006 15:04:05 GMT", t)
} else if strings.HasSuffix(t, "UTC") {
then, _ = time.Parse("Mon, 02 Jan 2006 15:04:05 UTC", t)
} else if strings.HasSuffix(t, "CST") {
then, _ = time.Parse("Mon, 02 Jan 2006 15:04:05 CST", t)
} else if strings.HasSuffix(t, "0400") {
then, _ = time.Parse("Mon, 02 Jan 2006 15:04:05 -0400", t)
} else if strings.HasSuffix(t, "Z") {
then, _ = time.Parse(time.RFC3339, t)
} else if strings.HasSuffix(t, "0800") {
then, _ = time.Parse("Mon, 02 Jan 2006 15:04:05 +0800", t)
}
} else {
if strings.HasSuffix(t, "0000") {
then, _ = time.Parse("02 Jan 06 15:04 +0000", t)
} else if strings.HasSuffix(t, "GMT") {
then, _ = time.Parse("02 Jan 06 15:04 GMT", t)
} else if strings.HasSuffix(t, "UTC") {
then, _ = time.Parse("02 Jan 06 15:04 UTC", t)
} else if strings.HasSuffix(t, "CST") {
then, _ = time.Parse("02 Jan 06 15:04 CST", t)
} else if strings.HasSuffix(t, "0400") {
then, _ = time.Parse("02 Jan 06 15:04 -0400", t)
} else if strings.HasSuffix(t, "Z") {
then, _ = time.Parse(time.RFC3339, t)
} else if strings.HasSuffix(t, "0800") {
then, _ = time.Parse("02 Jan 06 15:04 +0800", t)
}
}
return &then
}
|
[
2,
5
] |
package main
import (
"fmt"
)
// 这里是一个非常简单粗暴的解法
func rotate0(nums []int, k int) {
length := len(nums)
if length == 0 {
return
}
k %= length
for i := 0; i < k; i++ {
temp := nums[length-1]
for j := length - 2; j >= 0; j-- {
nums[j+1] = nums[j]
}
nums[0] = temp
}
}
// 这里涉及到一个切片作为参数,如何修改元素的问题,直接修改了便是了
// 可以将切片理解为一个mallock出来的一个堆内存,即便复制了,这个堆内存还是共享的
func rotate(nums []int, k int) {
reverse := func(a []int, start, end int) {
for i, j := start, end; i < j; i, j = i+1, j-1 {
a[i], a[j] = a[j], a[i]
}
}
length := len(nums)
if length == 0 {
return
}
k = k % length
reverse(nums, 0, length-k-1)
reverse(nums, length-k, length-1)
reverse(nums, 0, length-1)
}
func main() {
input := []int{1, 2, 3, 4, 5, 6, 7}
rotate(input, 3)
fmt.Println(input)
}
|
[
0
] |
package main
import (
"fmt"
"time"
)
const (
mutetm= 5*60
timeout= 60
stickers= 3
)
type stickent struct {
n int
t time.Time
}
func (b * Bot) updates(last_upd int) []Update {
upds, e := b.GetUpdates(last_upd, 0, 0)
if e != nil {
fmt.Print(e)
}
return upds
}
func (b *Bot) send(id, reply int, s string) {
if s == "" {
if err := b.SendMessage(id, reply, "-/-/-/-"); err != nil {
fmt.Print(err)
}
return
}
for len(s) > 4096 {
if err := b.SendMessage(id, reply, string(s[:4095])); err != nil {
fmt.Print(err)
}
s = s[4096:]
}
if err := b.SendMessage(id, reply, s); err != nil {
fmt.Print(err)
}
}
func isstick(m Message) bool {
return m.Sticker.File_id != ""
}
func sendedbefore(m Message) bool {
tm := time.Unix(int64(m.Date), 0)
/*
* Lets assume that all messages recived before timeout should be ignored.
*/
tm = tm.Add(time.Second*timeout)
return tm.Before(time.Now())
}
func (b *Bot) botjoined(m Message) bool {
me, e := b.GetMe()
if e != nil {
fmt.Print(e)
return false
}
for _, i := range m.Newmem {
if i.Is_bot && int(me.Id) == int(i.Id) {
return true
}
}
return false
}
func (b *Bot) greet(c int) {
s := "Hello everyone! *I want to play a game...*\n"
s = s + "Now you have only *%d* stickers. "
s = s + "If you spam you will be muted for *%d* seconds.\n"
s = s + "*Good luck.*"
s = fmt.Sprintf(s, stickers, mutetm)
b.send(c, 0, s)
}
func main() {
b := Bot{"oops here should be your bot token", "Markdown"}
last_upd := 0
stickdb := make(map[int]stickent)
muted := func (sender int) bool {
v, ok := stickdb[sender]
return ok && v.n == 0
}
for {
upds := b.updates(last_upd)
for i := range upds {
if int(upds[i].Id) <= last_upd {
continue
}
M := upds[i].Mes
last_upd = int(upds[i].Id)
chat := int(M.Chat.Id)
sender := int(M.From.Id)
mes := int(M.Id)
firstn := M.From.Firstn
if b.botjoined(M) {
b.greet(chat)
continue
}
if muted(sender) {
b.DeleteMessage(chat, mes)
continue
}
if !isstick(M) {
continue
}
if sendedbefore(M) {
continue
}
if _, ok := stickdb[sender]; !ok {
var a stickent
a.n = stickers
stickdb[sender] = a
}
if stickdb[sender].n > 1 {
var a stickent
a.n = stickdb[sender].n - 1
a.t = time.Now().Add(time.Second*mutetm)
stickdb[sender] = a
s := fmt.Sprintf("*%s warning!* only %d stickers more allowed\n",
firstn, stickdb[sender].n)
b.send(chat, mes, s)
} else {
var a stickent
a.n = 0
a.t = time.Now().Add(time.Second*mutetm)
stickdb[sender] = a
s := fmt.Sprintf("*%s* is now *muted* for *%d* seconds!\n",
firstn, mutetm)
b.send(chat, mes, s)
}
}
for k, v := range stickdb {
if v.t.Before(time.Now()) {
delete(stickdb, k)
}
}
}
}
|
[
0
] |
package volume
import (
"html/template"
"os"
"path/filepath"
"time"
"github.com/hjjg200/sprout/i18n"
"github.com/hjjg200/sprout/cache"
)
type RealtimeVolume struct {
vol *BasicVolume
srcPath string
modTime map[string] time.Time
}
// VVOLUME METHODS
func NewRealtimeVolume( srcPath string ) *RealtimeVolume {
srcPath = filepath.ToSlash( filepath.Clean( srcPath ) )
vol := NewBasicVolume()
vol.ImportDirectory( srcPath )
return &RealtimeVolume{
vol: vol,
srcPath: srcPath,
modTime: make( map[string] time.Time ),
}
}
func( rtv *RealtimeVolume ) abs( path string ) string {
return rtv.srcPath + "/" + path
}
func( rtv *RealtimeVolume ) validate( path string ) error {
absPath := rtv.abs( path )
fi, err := os.Stat( absPath )
if err != nil {
if os.IsNotExist( err ) {
// If Compiled
if in, ok := DefaultCompilers.InputOf( path ); ok {
var err2 error
for _, i := range in {
err2 = rtv.validate( i )
if err2 == nil { return nil }
}
return err2
} else {
// Remove item if there is any in the underlying volume
if rtv.vol.HasItem( path ) {
return rtv.vol.RemoveItem( path )
}
}
return ErrPathNonExistent.Append( path )
}
return ErrFileError.Append( path, err )
}
mt, ok := rtv.modTime[path]
if ok {
if fi.ModTime().Sub( mt ) <= 0 {
return nil
}
}
// Write to modtTime
buf := make( map[string] time.Time )
for k, v := range rtv.modTime { buf[k] = v }
buf[path] = fi.ModTime()
rtv.modTime = buf
// Write
f, err := os.Open( absPath )
if err != nil {
return ErrFileError.Append( absPath, err )
}
defer f.Close()
// Put
return rtv.vol.PutItem( path, f, fi.ModTime() )
}
func( rtv *RealtimeVolume ) validateTemplates() error {
return filepath.Walk( rtv.srcPath + "/" + c_templateDirectory, func( osPath string, fi os.FileInfo, err error ) error {
// Ignore dir
if fi.IsDir() {
return nil
}
// Rel
relPath, relErr := filepath.Rel( rtv.srcPath, osPath )
if relErr != nil {
return ErrInvalidPath.Append( relErr, "basePath:", rtv.srcPath, "osPath:", osPath )
}
relPath = filepath.ToSlash( relPath )
// Add and ignore invalid path error
err = rtv.validate( relPath )
if err != nil {
return err
}
return nil
} )
}
func( rtv *RealtimeVolume ) validateI18n() error {
for _, path := range rtv.vol.localePath {
err := rtv.validate( path )
if !ErrPathNonExistent.Is( err ) && err != nil {
return nil
}
}
return nil
}
func( rtv *RealtimeVolume ) walkI18nDirectory() error {
i18nDir := rtv.abs( c_i18nDirectory )
{ // Ensure the i18n Directory
fi, err := os.Stat( i18nDir )
if err != nil {
return ErrDirectoryError.Append( i18nDir, err )
} else if !fi.IsDir() {
return ErrDirectoryError.Append( i18nDir, "it is not a directory" )
}
}
return filepath.Walk( i18nDir, func( absPath string, fi os.FileInfo, err error ) error {
// Rel
relPath, relErr := filepath.Rel( rtv.srcPath, absPath )
if relErr != nil {
return relErr
}
if fi.IsDir() {
return nil
}
relPath = filepath.ToSlash( relPath )
return rtv.validate( relPath )
} )
}
func( rtv *RealtimeVolume ) Asset( path string ) ( *Asset ) {
err := rtv.validate( path )
if !ErrPathNonExistent.Is( err ) && err != nil {
return nil
}
return rtv.vol.Asset( path )
}
func( rtv *RealtimeVolume ) I18n() ( *i18n.I18n ) {
err := rtv.walkI18nDirectory()
if !ErrPathNonExistent.Is( err ) && err != nil {
return nil
}
return rtv.vol.I18n()
}
func( rtv *RealtimeVolume ) Localizer( lcName string ) ( *i18n.Localizer ) {
// Valiate
path, ok := rtv.vol.localePath[lcName]
if ok {
err := rtv.validate( path )
if !ErrPathNonExistent.Is( err ) && err != nil {
return nil
}
} else {
// Walk
err := rtv.walkI18nDirectory()
if !ErrPathNonExistent.Is( err ) && err != nil {
return nil
}
}
return rtv.vol.Localizer( lcName )
}
func( rtv *RealtimeVolume ) Template( path string ) ( *template.Template ) {
err := rtv.validateTemplates()
if !ErrPathNonExistent.Is( err ) && err != nil {
return nil
}
return rtv.vol.Template( path )
}
func( rtv *RealtimeVolume ) SetFallback( vol Volume ) {
rtv.vol.SetFallback( vol )
}
func( rtv *RealtimeVolume ) Export() ( *cache.Cache, error ) {
return nil, nil
}
func( rtv *RealtimeVolume ) Import( chc *cache.Cache ) error {
return nil
}
|
[
4
] |
package main
import (
"errors"
"fmt"
"path/filepath"
"strings"
"github.com/google/uuid"
"github.com/urfave/cli/v2"
"github.com/weaveworks/pctl/pkg/catalog"
"github.com/weaveworks/pctl/pkg/client"
"github.com/weaveworks/pctl/pkg/git"
"github.com/weaveworks/pctl/pkg/install"
"github.com/weaveworks/pctl/pkg/runner"
)
var createPRFlags = []cli.Flag{
&cli.BoolFlag{
Name: "create-pr",
Value: false,
Usage: "If given, upgrade will create a PR for the modifications it outputs.",
},
&cli.StringFlag{
Name: "pr-remote",
Value: "origin",
DefaultText: "origin",
Usage: "The remote to push the branch to.",
},
&cli.StringFlag{
Name: "pr-base",
Value: "main",
DefaultText: "main",
Usage: "The base branch to open a PR against.",
},
&cli.StringFlag{
Name: "pr-branch",
Usage: "The branch to create the PR from. Generated if not set.",
},
&cli.StringFlag{
Name: "pr-repo",
Value: "",
Usage: "The repository to open a pr against. Format is: org/repo-name.",
},
}
func installCmd() *cli.Command {
return &cli.Command{
Name: "install",
Usage: "generate a profile installation",
UsageText: "To install from a profile catalog entry: pctl --catalog-url <URL> install --name pctl-profile --namespace default --profile-branch main --config-map configmap-name <CATALOG>/<PROFILE>[/<VERSION>]\n " +
"To install directly from a profile repository: pctl install --name pctl-profile --namespace default --profile-branch development --profile-url https://github.com/weaveworks/profiles-examples --profile-path bitnami-nginx",
Flags: append(createPRFlags,
&cli.StringFlag{
Name: "name",
DefaultText: "pctl-profile",
Value: "pctl-profile",
Usage: "The name of the installation.",
},
&cli.StringFlag{
Name: "namespace",
DefaultText: "default",
Value: "default",
Usage: "The namespace to use for generating resources.",
},
&cli.StringFlag{
Name: "profile-branch",
Value: "main",
DefaultText: "main",
Usage: "The branch to use on the repository in which the profile is.",
},
&cli.StringFlag{
Name: "config-map",
Value: "",
Usage: "The name of the ConfigMap which contains values for this profile.",
},
&cli.StringFlag{
Name: "out",
DefaultText: "current",
Value: ".",
Usage: "Optional location to create the profile installation folder in. This should be relative to the current working directory.",
},
&cli.StringFlag{
Name: "profile-url",
Value: "",
Usage: "Optional value defining the URL of the profile.",
},
&cli.StringFlag{
Name: "profile-path",
Value: ".",
DefaultText: "<root>",
Usage: "Value defining the path to a profile when url is provided.",
},
&cli.StringFlag{
Name: "git-repository",
Value: "",
Usage: "The namespace and name of the GitRepository object governing the flux repo.",
}),
Action: func(c *cli.Context) error {
// Run installation main
if err := installProfile(c); err != nil {
return err
}
// Create a pull request if desired
if c.Bool("create-pr") {
if err := createPullRequest(c); err != nil {
return err
}
}
return nil
},
}
}
// install runs the install part of the `install` command.
func installProfile(c *cli.Context) error {
var (
err error
catalogClient *client.Client
profilePath string
catalogName string
profileName string
version = "latest"
)
// only set up the catalog if a url is not provided
url := c.String("profile-url")
if url != "" && c.Args().Len() > 0 {
return errors.New("it looks like you provided a url with a catalog entry; please choose either format: url/branch/path or <CATALOG>/<PROFILE>[/<VERSION>]")
}
if url == "" {
profilePath, catalogClient, err = parseArgs(c)
if err != nil {
_ = cli.ShowCommandHelp(c, "install")
return err
}
parts := strings.Split(profilePath, "/")
if len(parts) < 2 {
_ = cli.ShowCommandHelp(c, "install")
return errors.New("both catalog name and profile name must be provided")
}
if len(parts) == 3 {
version = parts[2]
}
catalogName, profileName = parts[0], parts[1]
}
branch := c.String("profile-branch")
subName := c.String("name")
namespace := c.String("namespace")
configMap := c.String("config-map")
dir := c.String("out")
path := c.String("profile-path")
gitRepository := c.String("git-repository")
var source string
if url != "" && path != "" {
source = fmt.Sprintf("repository %s, path: %s and branch %s", url, path, branch)
} else if url != "" && path == "" {
source = fmt.Sprintf("repository %s and branch %s", url, branch)
} else {
source = fmt.Sprintf("catalog entry %s/%s/%s", catalogName, profileName, version)
}
fmt.Printf("generating profile installation from source: %s\n", source)
r := &runner.CLIRunner{}
g := git.NewCLIGit(git.CLIGitConfig{}, r)
var (
gitRepoNamespace string
gitRepoName string
)
if gitRepository != "" {
split := strings.Split(gitRepository, "/")
if len(split) != 2 {
return fmt.Errorf("git-repository must in format <namespace>/<name>; was: %s", gitRepository)
}
gitRepoNamespace = split[0]
gitRepoName = split[1]
}
installer := install.NewInstaller(install.Config{
ProfileName: profileName,
GitClient: g,
RootDir: filepath.Join(dir, profileName),
GitRepoNamespace: gitRepoNamespace,
GitRepoName: gitRepoName,
})
cfg := catalog.InstallConfig{
Clients: catalog.Clients{
CatalogClient: catalogClient,
Installer: installer,
},
Profile: catalog.Profile{
ProfileConfig: catalog.ProfileConfig{
CatalogName: catalogName,
ConfigMap: configMap,
Namespace: namespace,
Path: path,
ProfileBranch: branch,
ProfileName: profileName,
SubName: subName,
URL: url,
Version: version,
},
GitRepoConfig: catalog.GitRepoConfig{
Namespace: gitRepoNamespace,
Name: gitRepoName,
},
},
}
manager := &catalog.Manager{}
err = manager.Install(cfg)
if err == nil {
fmt.Println("installation completed successfully")
}
return err
}
// createPullRequest runs the pull request creation part of the `install` command.
func createPullRequest(c *cli.Context) error {
branch := c.String("pr-branch")
repo := c.String("pr-repo")
base := c.String("pr-base")
remote := c.String("pr-remote")
directory := c.String("out")
if repo == "" {
return errors.New("repo must be defined if create-pr is true")
}
if branch == "" {
branch = c.String("name") + "-" + uuid.NewString()[:6]
}
fmt.Printf("Creating a PR to repo %s with base %s and branch %s\n", repo, base, branch)
r := &runner.CLIRunner{}
g := git.NewCLIGit(git.CLIGitConfig{
Directory: directory,
Branch: branch,
Remote: remote,
Base: base,
}, r)
scmClient, err := git.NewClient(git.SCMConfig{
Branch: branch,
Base: base,
Repo: repo,
})
if err != nil {
return fmt.Errorf("failed to create scm client: %w", err)
}
return catalog.CreatePullRequest(scmClient, g, branch)
}
|
[
5
] |
package main
import (
"fmt"
"html/template"
"net/http"
"strconv"
"strings"
)
var (
A, B, C, U, I, D, SD, tmp []int
mass string
mas []int
a int
err error
f bool
)
var cache_templ = template.Must(template.ParseFiles("index.html"))
func check_contain(el int, mas *[]int) bool {
var i int
for i = 0; i < len(*mas); i++ {
if (*mas)[i] == el {
return true
}
}
return false
}
func union(x, y *[]int) []int {
var l, i, k int
l = len(*x) + len(*y) + 1
var un = make([]int, l)
copy(un, *x)
k = len(*x)
for i = 0; i < len(*y); i++ {
if !check_contain((*y)[i], &un) {
k++
un[k-1] = (*y)[i]
}
}
return un[:k]
}
func inter(x, y, z *[]int) []int {
var l, i int
l = len(*x) + len(*y) + len(*z)
var in = make([]int, l)
l = 0
for i = 0; i < len(*x); i++ {
if check_contain((*x)[i], y) && check_contain((*x)[i], z) {
in[l] = (*x)[i]
l++
}
}
return in[:l]
}
func diff(x, y, z *[]int) []int {
var l, i int
l = len(*x)
var dif = make([]int, l)
l = 0
for i = 0; i < len(*x); i++ {
a = (*x)[i]
if !check_contain(a, y) && !check_contain(a, z) {
dif[l] = a
l++
}
}
return dif[:l]
}
func sym_dif(x, y, z *[]int) []int {
var tmp1 = diff(x, y, z)
var tmp2 = diff(z, x, y)
var tmp3 = diff(y, x, z)
var tmp4 = union(&tmp1, &tmp2)
tmp4 = union(&tmp4, &tmp3)
return tmp4
}
func equal(x, y *[]int) bool {
f = false
if len(*x) != len(*y) {
return false
}
for i := 0; i < len(*x); i++ {
a = (*x)[i]
if !check_contain(a, y) {
return false
}
}
return true
}
func inn(x, y *[]int) bool {
if equal(x, y) {
return false
}
for i := 0; i < len(*x); i++ {
a = (*x)[i]
if !check_contain(a, y) {
return false
}
}
return true
}
func enter(s string, mas *[]int) []int {
var a, k int
var ss = strings.Fields(s)
*mas = make([]int, len(ss))
for _, tmp := range ss {
a, err = strconv.Atoi(tmp)
if err != nil {
panic(err)
}
if !check_contain(a, mas) {
(*mas)[k] = a
k++
}
}
return *mas
}
func handler(w http.ResponseWriter, r *http.Request) {
err := cache_templ.ExecuteTemplate(w, "index.html", mass)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func go_handler(w http.ResponseWriter, r *http.Request) {
A = enter(r.FormValue("A"), &A)
B = enter(r.FormValue("B"), &B)
C = enter(r.FormValue("C"), &C)
fmt.Fprintln(w, " ")
fmt.Fprintln(w, "Объединение множеств: ")
tmp = union(&B, &C)
U = union(&A, &tmp)
fmt.Fprintln(w, U)
fmt.Fprintln(w, "Пересечение множеств: ")
I = inter(&A, &B, &C)
if len(I) == 0 {
fmt.Fprintln(w, "пустое множество")
} else {
fmt.Fprintln(w, I)
}
fmt.Fprintln(w, "Разность A-B-C")
D = diff(&A, &B, &C)
if len(D) == 0 {
fmt.Fprintln(w, "пустое множество")
} else {
fmt.Fprintln(w, D)
}
fmt.Fprintln(w, "Разность B-A-C")
D = diff(&B, &A, &C)
if len(D) == 0 {
fmt.Fprintln(w, "пустое множество")
} else {
fmt.Fprintln(w, D)
}
fmt.Fprintln(w, "Разность C-A-B")
D = diff(&C, &A, &B)
if len(D) == 0 {
fmt.Fprintln(w, "пустое множество")
} else {
fmt.Fprintln(w, D)
}
fmt.Fprintln(w, "Симметрическая разность")
SD = sym_dif(&A, &B, &C)
SD = union(&SD, &I)
fmt.Fprintln(w, SD)
fmt.Fprintln(w, "<br><br>")
if equal(&A, &B) {
fmt.Fprintln(w, "Множествa A и В равны")
} else {
fmt.Fprintln(w, "Множествa A и В не равны")
}
if inn(&A, &B) {
fmt.Fprintln(w, "Множество A входит в B")
} else if inn(&B, &A) {
fmt.Fprintln(w, "Множество В входит в А")
} else {
fmt.Fprintln(w, "Множество А не входит в В")
fmt.Fprintln(w, "Множество В не входит в А")
}
}
func main() {
http.HandleFunc("/", handler)
http.HandleFunc("/go/", go_handler)
http.ListenAndServe(":80", nil)
}
|
[
5
] |
package main
import "fmt"
var (
zahl1 int
zahl2 int
)
func main() {
fmt.Print("Bitte gib die erste Zahl ein: ")
fmt.Scan(&zahl1)
fmt.Print("Bitte gib die zweite Zahl ein: ")
fmt.Scan(&zahl2)
if (zahl1 < 1) || (zahl2 < 1) {
fmt.Println("Beide Zahlen müssen positiv sein!")
} else {
fmt.Print("Der ggT von ", zahl1, " und ", zahl2, " ist ")
for zahl1 != zahl2 {
if zahl1 > zahl2 {
zahl1 = zahl1 - zahl2
} else {
zahl2 = zahl2 - zahl1
}
}
fmt.Println(zahl1, ".")
}
}
|
[
0
] |
package configurableroutes
import (
"context"
"fmt"
"strings"
configv1 "github.com/openshift/api/config/v1"
logf "github.com/openshift/cluster-ingress-operator/pkg/log"
operatorcontroller "github.com/openshift/cluster-ingress-operator/pkg/operator/controller"
util "github.com/openshift/cluster-ingress-operator/pkg/util"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
ControllerName = "configurable_route_controller"
componentRouteHashLabelKey = "ingress.operator.openshift.io/component-route-hash"
)
var (
log = logf.Logger.WithName(ControllerName)
)
// New creates the configurable route controller from configuration. This is the controller
// that handles all the logic for generating roles and rolebindings for operators that
// include routes with configurable hostnames and serving certificates.
//
// Cluster-admins may provide a custom hostname and serving certificate for a route
// by creating a spec.componentRoute entry in the ingresses.config.openshift.io/cluster
// resource. If a componentRoute entry exists in the status.componentRoutes list with
// a matching namespace and name this controller will generate:
// - A role that grants get/list/watch permissions for the secret defined in the spec.
// - A roleBinding that binds the aforementioned role to each consumingUser specified
// in the corresponding status entry.
func New(mgr manager.Manager, config Config, eventRecorder events.Recorder) (controller.Controller, error) {
kubeClient, err := kubernetes.NewForConfig(mgr.GetConfig())
if err != nil {
return nil, err
}
operatorCache := mgr.GetCache()
reconciler := &reconciler{
kclient: kubeClient,
config: config,
client: mgr.GetClient(),
cache: operatorCache,
eventRecorder: eventRecorder,
}
c, err := controller.New(ControllerName, mgr, controller.Options{Reconciler: reconciler})
if err != nil {
return nil, err
}
// Trigger reconcile requests for the cluster ingress resource.
clusterNamePredicate := predicate.NewPredicateFuncs(func(o client.Object) bool {
clusterIngressResource := operatorcontroller.IngressClusterConfigName()
return o.GetName() == clusterIngressResource.Name && o.GetNamespace() == clusterIngressResource.Namespace
})
if err := c.Watch(source.Kind(operatorCache, &configv1.Ingress{}), &handler.EnqueueRequestForObject{}, clusterNamePredicate); err != nil {
return nil, err
}
// Trigger reconcile requests for the roles and roleBindings with the componentRoute label.
defaultPredicate := predicate.NewPredicateFuncs(func(o client.Object) bool {
labels := o.GetLabels()
_, ok := labels[componentRouteHashLabelKey]
return ok
})
if err := c.Watch(source.Kind(operatorCache, &rbacv1.Role{}), handler.EnqueueRequestsFromMapFunc(reconciler.resourceToClusterIngressConfig), defaultPredicate); err != nil {
return nil, err
}
if err := c.Watch(source.Kind(operatorCache, &rbacv1.RoleBinding{}), handler.EnqueueRequestsFromMapFunc(reconciler.resourceToClusterIngressConfig), defaultPredicate); err != nil {
return nil, err
}
return c, nil
}
// resourceToClusterIngressConfig is used to only trigger reconciles on the cluster ingress config.
func (r *reconciler) resourceToClusterIngressConfig(ctx context.Context, o client.Object) []reconcile.Request {
return []reconcile.Request{
{
NamespacedName: operatorcontroller.IngressClusterConfigName(),
},
}
}
// Config holds all the things necessary for the controller to run.
type Config struct {
SecretNamespace string
}
// reconciler handles the actual ingress reconciliation logic in response to
// events.
type reconciler struct {
config Config
client client.Client
kclient kubernetes.Interface
cache cache.Cache
eventRecorder events.Recorder
}
// Reconcile expects request to refer to the
// ingresses.config.openshift.io/cluster object and will do all the work to
// ensure that RBAC for any configured component routes is in the desired state.
func (r *reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
log.Info("reconciling", "request", request)
// Only proceed if we can get the ingress resource.
ingress := &configv1.Ingress{}
if err := r.cache.Get(ctx, request.NamespacedName, ingress); err != nil {
if apierrors.IsNotFound(err) {
log.Info("ingress cr not found; reconciliation will be skipped", "request", request)
return reconcile.Result{}, nil
}
return reconcile.Result{}, fmt.Errorf("failed to get ingress %q: %w", request.NamespacedName, err)
}
// Get the list of componentRoutes defined in both the spec and status of the ingress resource that require
// roles and roleBindings.
componentRoutes := intersectingComponentRoutes(ingress.Spec.ComponentRoutes, ingress.Status.ComponentRoutes)
// Ensure role and roleBindings exist for each valid componentRoute.
for _, componentRoute := range componentRoutes {
// Ensure role.
roleName, err := r.ensureServiceCertKeyPairSecretRole(componentRoute)
if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to create role: %v", err)
}
// Get the role just created so the UID is available for the ownerReference on the roleBinding.
role := &rbacv1.Role{}
if err := r.client.Get(ctx, types.NamespacedName{Namespace: r.config.SecretNamespace, Name: roleName}, role); err != nil {
return reconcile.Result{}, err
}
// Ensure roleBinding.
if err := r.ensureServiceCertKeyPairSecretRoleBinding(role, componentRoute); err != nil {
return reconcile.Result{}, fmt.Errorf("failed to create roleBinding: %v", err)
}
}
existingHashes := sets.String{}
for _, cr := range componentRoutes {
existingHashes.Insert(cr.Hash)
}
// Delete any roles or roleBindings that were generated for componentRoutes that are no longer defined.
// RoleBindings are cleanedup by garbage collector due to owner reference to Role.
if err := utilerrors.NewAggregate(r.deleteOrphanedRoles(componentRoutes, existingHashes)); err != nil {
return reconcile.Result{}, fmt.Errorf("error(s) deleting orphaned roles: %v", err)
}
return reconcile.Result{}, nil
}
// newAggregatedComponentRoute returns an aggregatedComponentRoute.
func newAggregatedComponentRoute(spec configv1.ComponentRouteSpec, status configv1.ComponentRouteStatus) aggregatedComponentRoute {
// Copy the list of consuming users.
consumingUsersCopy := make([]configv1.ConsumingUser, len(status.ConsumingUsers))
copy(consumingUsersCopy, status.ConsumingUsers)
return aggregatedComponentRoute{
Name: spec.Name,
Hash: util.Hash(namespacedName(spec.Namespace, spec.Name)),
ServingCertificateName: spec.ServingCertKeyPairSecret.Name,
ConsumingUsers: consumingUsersCopy,
}
}
// aggregatedComponeRoute contains information from the ComponentRouteSpec
// and ComponentRouteStatus to generate the required Role and RoleBinding.
type aggregatedComponentRoute struct {
Name string
Hash string
ServingCertificateName string
ConsumingUsers []configv1.ConsumingUser
}
// getSubjects returns a list of subjects defined in the aggregatedComponentRoute.
func (componentRoute *aggregatedComponentRoute) getSubjects() []rbacv1.Subject {
subjects := []rbacv1.Subject{}
for _, consumingUser := range componentRoute.ConsumingUsers {
splitConsumingUser := strings.Split(string(consumingUser), ":")
// Ignore invalid consuming users.
if len(splitConsumingUser) != 4 {
continue
}
switch splitConsumingUser[1] {
case "serviceaccount":
subjects = append(subjects, rbacv1.Subject{
Kind: rbacv1.ServiceAccountKind,
APIGroup: "",
Name: splitConsumingUser[3],
Namespace: splitConsumingUser[2],
})
}
}
return subjects
}
// requiresRBAC returns a boolean indicating if the componentRoute requires roles or rolebindings to be generated.
func (componentRoute *aggregatedComponentRoute) requiresRBAC() bool {
// Do not generate RBAC if no consuming users exist.
if len(componentRoute.getSubjects()) == 0 {
return false
}
// Do not generate RBAC if no secret is specified.
if componentRoute.ServingCertificateName == "" {
return false
}
return true
}
// intersectingComponentRoutes takes a slice of componentRouteSpec and a slice
// of componentRouteStatus, identifies which (namespace,name) tuples appear in
// both slices, and returns a slice of aggregatedComponentRoute corresponding to
// those tuples if they require Roles and RoleBindings.
func intersectingComponentRoutes(componentRouteSpecs []configv1.ComponentRouteSpec, componentRouteStatuses []configv1.ComponentRouteStatus) []aggregatedComponentRoute {
componentRouteHashToComponentRouteStatus := map[string]configv1.ComponentRouteStatus{}
for _, componentRouteStatus := range componentRouteStatuses {
componentRouteHash := util.Hash(namespacedName(componentRouteStatus.Namespace, componentRouteStatus.Name))
componentRouteHashToComponentRouteStatus[componentRouteHash] = componentRouteStatus
}
componentRoutes := []aggregatedComponentRoute{}
for _, componentRouteSpec := range componentRouteSpecs {
hash := util.Hash(namespacedName(componentRouteSpec.Namespace, componentRouteSpec.Name))
if componentRouteStatus, ok := componentRouteHashToComponentRouteStatus[hash]; ok {
componentRoute := newAggregatedComponentRoute(componentRouteSpec, componentRouteStatus)
if componentRoute.requiresRBAC() {
componentRoutes = append(componentRoutes, componentRoute)
}
}
}
return componentRoutes
}
func namespacedName(namespace, name string) string {
return fmt.Sprintf("%s/%s", namespace, name)
}
func componentRouteResources(componentRoute aggregatedComponentRoute) []client.ListOption {
return []client.ListOption{
client.MatchingLabels{
componentRouteHashLabelKey: componentRoute.Hash,
},
client.InNamespace(operatorcontroller.GlobalUserSpecifiedConfigNamespace),
}
}
func allComponentRouteResources() []client.ListOption {
return []client.ListOption{
client.HasLabels{componentRouteHashLabelKey},
client.InNamespace(operatorcontroller.GlobalUserSpecifiedConfigNamespace),
}
}
func (r *reconciler) deleteOrphanedRoles(componentRoutes []aggregatedComponentRoute, existingHashes sets.String) []error {
errors := []error{}
roleList := &rbacv1.RoleList{}
if err := r.cache.List(context.TODO(), roleList, allComponentRouteResources()...); err != nil {
return append(errors, err)
}
for _, item := range roleList.Items {
expectedHash, ok := item.GetLabels()[componentRouteHashLabelKey]
if !ok {
errors = append(errors, fmt.Errorf("Unable to find componentRoute hash label on role %s/%s", item.GetNamespace(), item.GetName()))
continue
}
if !existingHashes.Has(expectedHash) {
log.Info("deleting role", "name", item.GetName(), "namespace", item.GetNamespace())
if err := r.client.Delete(context.TODO(), &item); err != nil && !apierrors.IsNotFound(err) {
errors = append(errors, err)
}
}
}
return errors
}
func (r *reconciler) ensureServiceCertKeyPairSecretRole(componentRoute aggregatedComponentRoute) (string, error) {
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
GenerateName: componentRoute.Name + "-",
Namespace: r.config.SecretNamespace,
Labels: map[string]string{
componentRouteHashLabelKey: componentRoute.Hash,
},
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"get", "list", "watch"},
APIGroups: []string{""},
Resources: []string{"secrets"},
ResourceNames: []string{componentRoute.ServingCertificateName},
},
},
}
roleList := &rbacv1.RoleList{}
if err := r.cache.List(context.TODO(), roleList, componentRouteResources(componentRoute)...); err != nil {
return "", err
}
if len(roleList.Items) == 0 {
if err := r.client.Create(context.TODO(), role); err != nil {
return "", err
}
} else {
role.Name = roleList.Items[0].Name
role.GenerateName = ""
if _, _, err := resourceapply.ApplyRole(context.TODO(), r.kclient.RbacV1(), r.eventRecorder, role); err != nil {
return "", err
}
}
return role.GetName(), nil
}
func (r *reconciler) ensureServiceCertKeyPairSecretRoleBinding(role *rbacv1.Role, componentRoute aggregatedComponentRoute) error {
if role == nil {
return fmt.Errorf("cannot be passed nil role")
}
roleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: role.GetName(),
Namespace: r.config.SecretNamespace,
Labels: map[string]string{
componentRouteHashLabelKey: componentRoute.Hash,
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: rbacv1.SchemeGroupVersion.String(),
Kind: "Role",
Name: role.GetName(),
UID: role.GetUID(),
},
},
},
Subjects: componentRoute.getSubjects(),
RoleRef: rbacv1.RoleRef{
Kind: "Role",
Name: role.GetName(),
APIGroup: rbacv1.GroupName,
},
}
_, _, err := resourceapply.ApplyRoleBinding(context.TODO(), r.kclient.RbacV1(), r.eventRecorder, roleBinding)
return err
}
|
[
7
] |
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: connected.protobuf
package s2cProtobuf
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type Connected struct {
}
func (m *Connected) Reset() { *m = Connected{} }
func (m *Connected) String() string { return proto.CompactTextString(m) }
func (*Connected) ProtoMessage() {}
func (*Connected) Descriptor() ([]byte, []int) { return fileDescriptorConnectedbuf, []int{0} }
func init() {
proto.RegisterType((*Connected)(nil), "s2cProtobuf.Connected")
}
func (m *Connected) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Connected) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
return i, nil
}
func encodeVarintConnectedbuf(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *Connected) Size() (n int) {
var l int
_ = l
return n
}
func sovConnectedbuf(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozConnectedbuf(x uint64) (n int) {
return sovConnectedbuf(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *Connected) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowConnectedbuf
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Connected: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Connected: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
default:
iNdEx = preIndex
skippy, err := skipConnectedbuf(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthConnectedbuf
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipConnectedbuf(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowConnectedbuf
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowConnectedbuf
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowConnectedbuf
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthConnectedbuf
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowConnectedbuf
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipConnectedbuf(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthConnectedbuf = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowConnectedbuf = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("connected.protobuf", fileDescriptorConnectedbuf) }
var fileDescriptorConnectedbuf = []byte{
// 83 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4a, 0xce, 0xcf, 0xcb,
0x4b, 0x4d, 0x2e, 0x49, 0x4d, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0x13, 0xe2,
0x2e, 0x36, 0x4a, 0x0e, 0x80, 0x72, 0x94, 0xb8, 0xb9, 0x38, 0x9d, 0x61, 0x4a, 0x9c, 0x04, 0x4e,
0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x19, 0x8f, 0xe5, 0x18,
0x92, 0xd8, 0xc0, 0xba, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x75, 0x30, 0x2e, 0xf2, 0x48,
0x00, 0x00, 0x00,
}
|
[
7
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.