code
stringlengths 67
15.9k
| labels
listlengths 1
4
|
---|---|
package gphotos
import (
"bytes"
"io/ioutil"
"net/http"
"os"
"strconv"
)
// UploadingMedia is the only instance of UploadingMediaRequests(https://godoc.org/github.com/Q-Brains/gphotos#UploadMediaRequests).
var UploadingMedia UploadingMediaRequests = uploadingMediaRequests{}
// UploadingMediaRequests is a collection of request methods belonging to `UploadingMedia`.
// The only instance of UploadMediaRequests is UploadMedia(https://godoc.org/github.com/Q-Brains/gphotos#UploadMedia).
// Source: https://developers.google.com/photos/library/guides/overview
type UploadingMediaRequests interface {
baseURL() string
// UploadMedia is a method that uploads media items to a user’s library or album.
// Source: https://developers.google.com/photos/library/guides/upload-media
UploadMedia(client *http.Client, filePath string, filename string) (uploadToken string, err error)
// ResumableUploads is a method.
// Source: https://developers.google.com/photos/library/guides/resumable-uploads
ResumableUploads(client *http.Client, filePath string, filename string) (uploadToken string, err error)
}
type uploadingMediaRequests struct{}
func (upload uploadingMediaRequests) baseURL() string {
return "https://photoslibrary.googleapis.com/v1/uploads"
}
func (upload uploadingMediaRequests) UploadMedia(client *http.Client, filePath string, filename string) (uploadToken string, err error) {
file, err := os.Open(filePath)
if err != nil {
return "", err
}
req, err := http.NewRequest("POST", upload.baseURL(), file)
if err != nil {
return "", err
}
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("X-Goog-Upload-File-Name", filename)
req.Header.Set("X-Goog-Upload-Protocol", "raw")
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return bytes.NewBuffer(b).String(), nil
}
func (upload uploadingMediaRequests) ResumableUploads(client *http.Client, filePath string, filename string) (uploadToken string, err error) {
file, err := os.Open(filePath)
if err != nil {
return "", err
}
req, err := http.NewRequest("POST", upload.baseURL(), nil)
if err != nil {
return "", err
}
contentType, err := detectContentType(file)
if err != nil {
return "", err
}
length, err := byteLength(file)
if err != nil {
return "", err
}
req.Header.Set("Content-Length", strconv.Itoa(0))
req.Header.Set("X-Goog-Upload-Command", "start")
req.Header.Set("X-Goog-Upload-Content-Type", contentType)
req.Header.Set("X-Goog-Upload-File-Name", filename)
req.Header.Set("X-Goog-Upload-Protocol", "resumable")
req.Header.Set("X-Goog-Upload-Raw-Size", strconv.FormatInt(length, 10))
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
uploadURL := resp.Header.Get("X-Goog-Upload-URL")
req, err = http.NewRequest("POST", uploadURL, file)
req.Header.Set("Content-Length", strconv.FormatInt(length, 10))
req.Header.Set("X-Goog-Upload-Command", "upload, finalize")
req.Header.Set("X-Goog-Upload-Offset", strconv.Itoa(0))
resp, err = client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
return "", nil
}
func detectContentType(file *os.File) (string, error) {
buffer := make([]byte, 512)
_, err := file.Read(buffer)
if err != nil {
return "", err
}
return http.DetectContentType(buffer), nil
}
func byteLength(file *os.File) (int64, error) {
fi, err := file.Stat()
if err != nil {
return 0, err
}
return fi.Size(), nil
}
|
[
6
] |
package twitch
import (
"encoding/json"
"fmt"
twitchirc "github.com/gempir/go-twitch-irc"
"github.com/gorilla/websocket"
)
// Event stream event
type Event struct {
Type string `json:"type"`
Content string `json:"content"`
}
// IRCToWSStreamer streamer for events from irc to websocket
type IRCToWSStreamer struct {
ws *websocket.Conn
ircClient *twitchirc.Client
streamerName string
token string
}
// NewIRCToWSStreamer creates new IRCToWSStreamer
func NewIRCToWSStreamer(ws *websocket.Conn, userName string, streamerName string, token string) *IRCToWSStreamer {
return &IRCToWSStreamer{
ws: ws,
ircClient: twitchirc.NewClient(userName, "oauth:"+token),
streamerName: streamerName,
token: token,
}
}
// Stream streams events from irc to websocket
func (ircws *IRCToWSStreamer) Stream() error {
ircws.ircClient.OnNewWhisper(func(user twitchirc.User, message twitchirc.Message) {
event := Event{
Type: "whisper",
Content: fmt.Sprintf("%s: %s", user.DisplayName, message.Text),
}
data, _ := json.Marshal(event)
err := ircws.ws.WriteMessage(websocket.TextMessage, data)
if err != nil {
ircws.ircClient.Disconnect()
}
})
ircws.ircClient.OnNewMessage(func(channel string, user twitchirc.User, message twitchirc.Message) {
event := Event{
Type: "message",
Content: fmt.Sprintf("%s: %s", user.DisplayName, message.Text),
}
data, _ := json.Marshal(event)
err := ircws.ws.WriteMessage(websocket.TextMessage, data)
if err != nil {
ircws.ircClient.Disconnect()
}
})
ircws.ircClient.OnNewRoomstateMessage(func(channel string, user twitchirc.User, message twitchirc.Message) {
event := Event{
Type: "room state message",
Content: fmt.Sprintf("%s: %s", user.DisplayName, message.Text),
}
data, _ := json.Marshal(event)
err := ircws.ws.WriteMessage(websocket.TextMessage, data)
if err != nil {
ircws.ircClient.Disconnect()
}
})
ircws.ircClient.OnNewClearchatMessage(func(channel string, user twitchirc.User, message twitchirc.Message) {
event := Event{
Type: "clear chat message",
Content: fmt.Sprintf("%s: %s", user.DisplayName, message.Text),
}
data, _ := json.Marshal(event)
err := ircws.ws.WriteMessage(websocket.TextMessage, data)
if err != nil {
ircws.ircClient.Disconnect()
}
})
ircws.ircClient.OnNewUsernoticeMessage(func(channel string, user twitchirc.User, message twitchirc.Message) {
event := Event{
Type: "user notice message",
Content: fmt.Sprintf("%s: %s", user.DisplayName, message.Text),
}
data, _ := json.Marshal(event)
err := ircws.ws.WriteMessage(websocket.TextMessage, data)
if err != nil {
ircws.ircClient.Disconnect()
}
})
ircws.ircClient.OnNewNoticeMessage(func(channel string, user twitchirc.User, message twitchirc.Message) {
event := Event{
Type: "notice message",
Content: fmt.Sprintf("%s: %s", user.DisplayName, message.Text),
}
data, _ := json.Marshal(event)
err := ircws.ws.WriteMessage(websocket.TextMessage, data)
if err != nil {
ircws.ircClient.Disconnect()
}
})
ircws.ircClient.OnNewUserstateMessage(func(channel string, user twitchirc.User, message twitchirc.Message) {
event := Event{
Type: "user state message",
Content: fmt.Sprintf("%s: %s", user.DisplayName, message.Text),
}
data, _ := json.Marshal(event)
err := ircws.ws.WriteMessage(websocket.TextMessage, data)
if err != nil {
ircws.ircClient.Disconnect()
}
})
ircws.ircClient.OnUserJoin(func(channel, user string) {
event := Event{
Type: "user join",
Content: user,
}
data, _ := json.Marshal(event)
err := ircws.ws.WriteMessage(websocket.TextMessage, data)
if err != nil {
ircws.ircClient.Disconnect()
}
})
ircws.ircClient.OnUserPart(func(channel, user string) {
event := Event{
Type: "user part",
Content: user,
}
data, _ := json.Marshal(event)
err := ircws.ws.WriteMessage(websocket.TextMessage, data)
if err != nil {
ircws.ircClient.Disconnect()
}
})
ircws.ircClient.Join(ircws.streamerName)
return ircws.ircClient.Connect()
}
|
[
6
] |
package auth
import (
"fmt"
"github.com/RentTheRunway/vault-auto-config/pkg/vault-auto-config/client"
"github.com/RentTheRunway/vault-auto-config/pkg/vault-auto-config/config"
)
// Reads config state for an auth backend
func ReadAuthConfigState(client client.Client, name string, node *config.Node) error {
node = node.AddNode("config")
var err error
if node.Config, err = client.Read("auth/%s/config", name); err != nil {
return err
}
return nil
}
// Reads a generic auth resource state for an auth backend (e.g. groups, roles, etc.)
func ReadAuthResourceState(resourceClient client.Client, name string, listResource string, resource string, node *config.Node) error {
node = node.AddNode(resource)
resources, err := resourceClient.List("auth/%s/%s", name, listResource)
if err != nil {
return err
}
for _, resource := range resources {
resourceNode := node.AddNode(resource.Name)
resourceNode.Config = resource.Value
}
return nil
}
// Add additional config to a node from another auth resource
// Useful for combining multiple resources
func AppendAuthState(resourceClient client.Client, name string, resource string, subResource string, node *config.Node) error {
node = node.Children[resource]
if node == nil {
return fmt.Errorf("unable to append state. No child %s", resource)
}
for childName, node := range node.Children {
payload, err := resourceClient.Read("auth/%s/%s/%s/%s", name, resource, childName, subResource)
if err != nil {
return err
}
client.MergePayloads(node.Config, payload)
}
return nil
}
// Reads group states for an auth backend
func ReadAuthGroupsState(client client.Client, name string, node *config.Node) error {
return ReadAuthResourceState(client, name, "groups", "groups", node)
}
// Reads user states for an auth backend
func ReadAuthUsersState(client client.Client, name string, node *config.Node) error {
return ReadAuthResourceState(client, name, "users", "users", node)
}
// Reads role states for an auth backend
func ReadAuthRolesState(client client.Client, name string, node *config.Node) error {
return ReadAuthResourceState(client, name, "roles", "roles", node)
}
// Reads role states for an auth backend, but with the singular name "role"
func ReadAuthRoleState(client client.Client, name string, node *config.Node) error {
return ReadAuthResourceState(client, name, "role", "role", node)
}
func AppendAuthRoleIdState(client client.Client, name string, node *config.Node) error {
return AppendAuthState(client, name, "role", "role-id", node)
}
func AppendAuthSecretIdState(client client.Client, name string, node *config.Node) error {
return AppendAuthState(client, name, "role", "custom-secret-id", node)
}
|
[
6
] |
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
genjobs automatically generates the security repo presubmits from the
kubernetes presubmits
NOTE: this makes a few assumptions
- $PWD/../../prow/config.yaml is where the config lives (unless you supply --config=)
- $PWD/.. is where the job configs live (unless you supply --jobs=)
- the output is job configs ($PWD/..) + /kubernetes-security/generated-security-jobs.yaml (unless you supply --output)
*/
package main
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"reflect"
"regexp"
"strings"
flag "github.com/spf13/pflag"
"sigs.k8s.io/yaml"
coreapi "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
prowapi "k8s.io/test-infra/prow/apis/prowjobs/v1"
"k8s.io/test-infra/prow/config"
)
var configPath = flag.String("config", "", "path to prow/config.yaml, defaults to $PWD/../../prow/config.yaml")
var jobsPath = flag.String("jobs", "", "path to prowjobs, defaults to $PWD/../")
var outputPath = flag.String("output", "", "path to output the generated jobs to, defaults to $PWD/generated-security-jobs.yaml")
// remove merged presets from a podspec
func undoPreset(preset *config.Preset, labels map[string]string, pod *coreapi.PodSpec) {
// skip presets that do not match the job labels
for l, v := range preset.Labels {
if v2, ok := labels[l]; !ok || v2 != v {
return
}
}
// collect up preset created keys
removeEnvNames := sets.NewString()
for _, e1 := range preset.Env {
removeEnvNames.Insert(e1.Name)
}
removeVolumeNames := sets.NewString()
for _, volume := range preset.Volumes {
removeVolumeNames.Insert(volume.Name)
}
removeVolumeMountNames := sets.NewString()
for _, volumeMount := range preset.VolumeMounts {
removeVolumeMountNames.Insert(volumeMount.Name)
}
// remove volumes from spec
filteredVolumes := []coreapi.Volume{}
for _, volume := range pod.Volumes {
if !removeVolumeNames.Has(volume.Name) {
filteredVolumes = append(filteredVolumes, volume)
}
}
pod.Volumes = filteredVolumes
// remove env and volume mounts from containers
for i := range pod.Containers {
filteredEnv := []coreapi.EnvVar{}
for _, env := range pod.Containers[i].Env {
if !removeEnvNames.Has(env.Name) {
filteredEnv = append(filteredEnv, env)
}
}
pod.Containers[i].Env = filteredEnv
filteredVolumeMounts := []coreapi.VolumeMount{}
for _, mount := range pod.Containers[i].VolumeMounts {
if !removeVolumeMountNames.Has(mount.Name) {
filteredVolumeMounts = append(filteredVolumeMounts, mount)
}
}
pod.Containers[i].VolumeMounts = filteredVolumeMounts
}
}
// undo merged presets from loaded presubmit and its children
func undoPresubmitPresets(presets []config.Preset, presubmit *config.Presubmit) {
if presubmit.Spec == nil {
return
}
for _, preset := range presets {
undoPreset(&preset, presubmit.Labels, presubmit.Spec)
}
}
// convert a kubernetes/kubernetes job to a kubernetes-security/kubernetes job
// dropLabels should be a set of "k: v" strings
// xref: prow/config/config_test.go replace(...)
// it will return the same job mutated, or nil if the job should be removed
func convertJobToSecurityJob(j *config.Presubmit, dropLabels sets.String, defaultDecoration *prowapi.DecorationConfig, podNamespace string) *config.Presubmit {
// if a GKE job, disable it
if strings.Contains(j.Name, "gke") {
return nil
}
// filter out the unwanted labels
if len(j.Labels) > 0 {
filteredLabels := make(map[string]string)
for k, v := range j.Labels {
if !dropLabels.Has(fmt.Sprintf("%s: %s", k, v)) {
filteredLabels[k] = v
}
}
j.Labels = filteredLabels
}
originalName := j.Name
// fix name and triggers for all jobs
j.Name = strings.Replace(originalName, "pull-kubernetes", "pull-security-kubernetes", -1)
j.RerunCommand = strings.Replace(j.RerunCommand, "pull-kubernetes", "pull-security-kubernetes", -1)
j.Trigger = strings.Replace(j.Trigger, "pull-kubernetes", "pull-security-kubernetes", -1)
j.Context = strings.Replace(j.Context, "pull-kubernetes", "pull-security-kubernetes", -1)
if j.Namespace != nil && *j.Namespace == podNamespace {
j.Namespace = nil
}
if j.DecorationConfig != nil && reflect.DeepEqual(j.DecorationConfig, defaultDecoration) {
j.DecorationConfig = nil
}
// handle k8s job args, volumes etc
if j.Agent == "kubernetes" {
j.Cluster = "security"
container := &j.Spec.Containers[0]
// check for args that need hijacking
endsWithScenarioArgs := false
needGCSFlag := false
needGCSSharedFlag := false
needStagingFlag := false
isGCPe2e := false
for i, arg := range container.Args {
if arg == "--" {
endsWithScenarioArgs = true
// handle --repo substitution for main repo
} else if arg == "--repo=k8s.io/kubernetes" || strings.HasPrefix(arg, "--repo=k8s.io/kubernetes=") || arg == "--repo=k8s.io/$(REPO_NAME)" || strings.HasPrefix(arg, "--repo=k8s.io/$(REPO_NAME)=") {
container.Args[i] = strings.Replace(arg, "k8s.io/", "github.com/kubernetes-security/", 1)
// handle upload bucket
} else if strings.HasPrefix(arg, "--upload=") {
container.Args[i] = "--upload=gs://kubernetes-security-prow/pr-logs"
// check if we need to change staging artifact location for bazel-build and e2es
} else if strings.HasPrefix(arg, "--release") {
needGCSFlag = true
needGCSSharedFlag = true
} else if strings.HasPrefix(arg, "--stage") {
needStagingFlag = true
} else if strings.HasPrefix(arg, "--use-shared-build") {
needGCSSharedFlag = true
}
}
// NOTE: this needs to be before the bare -- and then bootstrap args so we prepend it
container.Args = append([]string{"--ssh=/etc/ssh-security/ssh-security"}, container.Args...)
// check for scenario specific tweaks
// NOTE: jobs are remapped to their original name in bootstrap to de-dupe config
scenario := ""
for _, arg := range container.Args {
if strings.HasPrefix(arg, "--scenario=") {
scenario = strings.TrimPrefix(arg, "--scenario=")
}
}
// check if we need to change staging artifact location for bazel-build and e2es
if scenario == "kubernetes_bazel" {
for _, arg := range container.Args {
if strings.HasPrefix(arg, "--release") {
needGCSFlag = true
needGCSSharedFlag = true
break
}
}
}
if scenario == "kubernetes_e2e" {
for _, arg := range container.Args {
if strings.Contains(arg, "gcp") {
isGCPe2e = true
}
if strings.HasPrefix(arg, "--stage") {
needStagingFlag = true
} else if strings.HasPrefix(arg, "--use-shared-build") {
needGCSSharedFlag = true
}
}
}
// NOTE: these needs to be at the end and after a -- if there is none (it's a scenario arg)
if !endsWithScenarioArgs && (needGCSFlag || needGCSSharedFlag || needStagingFlag) {
container.Args = append(container.Args, "--")
}
if needGCSFlag {
container.Args = append(container.Args, "--gcs=gs://kubernetes-security-prow/ci/"+j.Name)
}
if needGCSSharedFlag {
container.Args = append(container.Args, "--gcs-shared=gs://kubernetes-security-prow/bazel")
}
if needStagingFlag {
container.Args = append(container.Args, "--stage=gs://kubernetes-security-prow/ci/"+j.Name)
}
// GCP e2e use a fixed project for security testing
if isGCPe2e {
container.Args = append(container.Args, "--gcp-project=k8s-jkns-pr-gce-etcd3")
}
// add ssh key volume / mount
container.VolumeMounts = append(
container.VolumeMounts,
coreapi.VolumeMount{
Name: "ssh-security",
MountPath: "/etc/ssh-security",
},
)
defaultMode := int32(0400)
j.Spec.Volumes = append(
j.Spec.Volumes,
coreapi.Volume{
Name: "ssh-security",
VolumeSource: coreapi.VolumeSource{
Secret: &coreapi.SecretVolumeSource{
SecretName: "ssh-security",
DefaultMode: &defaultMode,
},
},
},
)
}
return j
}
// these are unnecessary, and make the config larger so we strip them out
func yamlBytesStripNulls(yamlBytes []byte) []byte {
nullRE := regexp.MustCompile("(?m)[\n]+^[^\n]+: null$")
return nullRE.ReplaceAll(yamlBytes, []byte{})
}
func yamlBytesToEntry(yamlBytes []byte, indent int) []byte {
var buff bytes.Buffer
// spaces of length indent
prefix := bytes.Repeat([]byte{32}, indent)
// `- ` before the first field of a yaml entry
prefix[len(prefix)-2] = byte(45)
buff.Write(prefix)
// put back space
prefix[len(prefix)-2] = byte(32)
for i, b := range yamlBytes {
buff.WriteByte(b)
// indent after newline, except the last one
if b == byte(10) && i+1 != len(yamlBytes) {
buff.Write(prefix)
}
}
return buff.Bytes()
}
func copyFile(srcPath, destPath string) error {
// fallback to copying the file instead
src, err := os.Open(srcPath)
if err != nil {
return err
}
dst, err := os.OpenFile(destPath, os.O_WRONLY, 0666)
if err != nil {
return err
}
_, err = io.Copy(dst, src)
if err != nil {
return err
}
dst.Sync()
dst.Close()
src.Close()
return nil
}
func main() {
flag.Parse()
// default to $PWD/prow/config.yaml
pwd, err := os.Getwd()
if err != nil {
log.Fatalf("Failed to get $PWD: %v", err)
}
if *configPath == "" {
*configPath = pwd + "/../../prow/config.yaml"
}
if *jobsPath == "" {
*jobsPath = pwd + "/../"
}
if *outputPath == "" {
*outputPath = pwd + "/generated-security-jobs.yaml"
}
// read in current prow config
parsed, err := config.Load(*configPath, *jobsPath)
if err != nil {
log.Fatalf("Failed to read config file: %v", err)
}
// create temp file to write updated config
f, err := ioutil.TempFile(filepath.Dir(*configPath), "temp")
if err != nil {
log.Fatalf("Failed to create temp file: %v", err)
}
defer os.Remove(f.Name())
// write the header
io.WriteString(f, "# Autogenerated by genjobs.go, do NOT edit!\n")
io.WriteString(f, "# see genjobs.go, which you can run with hack/update-config.sh\n")
io.WriteString(f, "presubmits:\n kubernetes-security/kubernetes:\n")
// this is the set of preset labels we want to remove
// we remove the bazel remote cache because we do not deploy one to this build cluster
dropLabels := sets.NewString("preset-bazel-remote-cache-enabled: true")
// convert each kubernetes/kubernetes presubmit to a
// kubernetes-security/kubernetes presubmit and write to the file
for i := range parsed.Presubmits["kubernetes/kubernetes"] {
job := &parsed.Presubmits["kubernetes/kubernetes"][i]
// undo merged presets, this needs to occur first!
undoPresubmitPresets(parsed.Presets, job)
// now convert the job
job = convertJobToSecurityJob(job, dropLabels, parsed.Plank.DefaultDecorationConfig, parsed.PodNamespace)
if job == nil {
continue
}
jobBytes, err := yaml.Marshal(job)
if err != nil {
log.Fatalf("Failed to marshal job: %v", err)
}
// write, properly indented, and stripped of `foo: null`
jobBytes = yamlBytesStripNulls(jobBytes)
f.Write(yamlBytesToEntry(jobBytes, 4))
}
f.Sync()
// move file to replace original
f.Close()
err = os.Rename(f.Name(), *outputPath)
if err != nil {
// fallback to copying the file instead
err = copyFile(f.Name(), *outputPath)
if err != nil {
log.Fatalf("Failed to replace config with updated version: %v", err)
}
}
}
|
[
5
] |
package plugins
import (
"fmt"
"log"
"math"
"github.com/stellar/go/build"
"github.com/stellar/go/clients/horizon"
"github.com/stellar/kelp/model"
"github.com/stellar/kelp/support/utils"
)
type orderConstraintsFilter struct {
oc *model.OrderConstraints
baseAsset horizon.Asset
quoteAsset horizon.Asset
}
var _ SubmitFilter = &orderConstraintsFilter{}
// MakeFilterOrderConstraints makes a submit filter based on the passed in orderConstraints
func MakeFilterOrderConstraints(
oc *model.OrderConstraints,
baseAsset horizon.Asset,
quoteAsset horizon.Asset,
) SubmitFilter {
return &orderConstraintsFilter{
oc: oc,
baseAsset: baseAsset,
quoteAsset: quoteAsset,
}
}
// Apply impl.
func (f *orderConstraintsFilter) Apply(
ops []build.TransactionMutator,
sellingOffers []horizon.Offer,
buyingOffers []horizon.Offer,
) ([]build.TransactionMutator, error) {
numKeep := 0
numDropped := 0
filteredOps := []build.TransactionMutator{}
for _, op := range ops {
var keep bool
var e error
var opPtr *build.ManageOfferBuilder
switch o := op.(type) {
case *build.ManageOfferBuilder:
keep, e = f.shouldKeepOffer(o)
if e != nil {
return nil, fmt.Errorf("could not transform offer (pointer case): %s", e)
}
opPtr = o
case build.ManageOfferBuilder:
keep, e = f.shouldKeepOffer(&o)
if e != nil {
return nil, fmt.Errorf("could not check transform offer (non-pointer case): %s", e)
}
opPtr = &o
default:
keep = true
}
if keep {
filteredOps = append(filteredOps, opPtr)
numKeep++
} else {
numDropped++
// figure out how to convert the offer to a dropped state
if opPtr.MO.OfferId == 0 {
// new offers can be dropped, so don't add to filteredOps
} else if opPtr.MO.Amount != 0 {
// modify offers should be converted to delete offers
opCopy := *opPtr
opCopy.MO.Amount = 0
filteredOps = append(filteredOps, opCopy)
} else {
return nil, fmt.Errorf("unable to drop manageOffer operation (probably a delete op that should not have reached here): offerID=%d, amountRaw=%.8f", opPtr.MO.OfferId, float64(opPtr.MO.Amount))
}
}
}
log.Printf("orderConstraintsFilter: dropped %d, kept %d ops from original %d ops, len(filteredOps) = %d\n", numDropped, numKeep, len(ops), len(filteredOps))
return filteredOps, nil
}
func (f *orderConstraintsFilter) shouldKeepOffer(op *build.ManageOfferBuilder) (bool, error) {
// delete operations should never be dropped
if op.MO.Amount == 0 {
return true, nil
}
isSell, e := utils.IsSelling(f.baseAsset, f.quoteAsset, op.MO.Selling, op.MO.Buying)
if e != nil {
return false, fmt.Errorf("error when running the isSelling check: %s", e)
}
sellPrice := float64(op.MO.Price.N) / float64(op.MO.Price.D)
if isSell {
baseAmount := float64(op.MO.Amount) / math.Pow(10, 7)
quoteAmount := baseAmount * sellPrice
if baseAmount < f.oc.MinBaseVolume.AsFloat() {
log.Printf("orderConstraintsFilter: selling, keep = (baseAmount) %.8f < %s (MinBaseVolume): keep = false\n", baseAmount, f.oc.MinBaseVolume.AsString())
return false, nil
}
if f.oc.MinQuoteVolume != nil && quoteAmount < f.oc.MinQuoteVolume.AsFloat() {
log.Printf("orderConstraintsFilter: selling, keep = (quoteAmount) %.8f < %s (MinQuoteVolume): keep = false\n", quoteAmount, f.oc.MinQuoteVolume.AsString())
return false, nil
}
log.Printf("orderConstraintsFilter: selling, baseAmount=%.8f, quoteAmount=%.8f, keep = true\n", baseAmount, quoteAmount)
return true, nil
}
// buying
quoteAmount := float64(op.MO.Amount) / math.Pow(10, 7)
baseAmount := quoteAmount * sellPrice
if baseAmount < f.oc.MinBaseVolume.AsFloat() {
log.Printf("orderConstraintsFilter: buying, keep = (baseAmount) %.8f < %s (MinBaseVolume): keep = false\n", baseAmount, f.oc.MinBaseVolume.AsString())
return false, nil
}
if f.oc.MinQuoteVolume != nil && quoteAmount < f.oc.MinQuoteVolume.AsFloat() {
log.Printf("orderConstraintsFilter: buying, keep = (quoteAmount) %.8f < %s (MinQuoteVolume): keep = false\n", quoteAmount, f.oc.MinQuoteVolume.AsString())
return false, nil
}
log.Printf("orderConstraintsFilter: buying, baseAmount=%.8f, quoteAmount=%.8f, keep = true\n", baseAmount, quoteAmount)
return true, nil
}
|
[
5,
6
] |
/*
Copyright (c) Facebook, Inc. and its affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clientgenlib
import (
"fmt"
"net"
"sync/atomic"
"syscall"
"time"
"golang.org/x/sys/unix"
ptp "github.com/facebookincubator/ptp/protocol"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/google/gopacket/pfring"
"github.com/kpango/fastime"
log "github.com/sirupsen/logrus"
)
// inPacket is input packet data + receive timestamp
type inPacket struct {
data []byte
ts time.Time
fromTX bool
}
type outPacket struct {
data *gopacket.SerializeBuffer
getTS bool
pktType uint8
sentTS time.Time
cl *SingleClientGen
}
func startIOWorker(cfg *ClientGenConfig) {
rxStartDone := make(chan bool)
for rxwkr := 0; rxwkr < cfg.NumRXWorkers; rxwkr++ {
func(i int) {
cfg.Eg.Go(func() error {
doneChan := make(chan error, 1)
go func() {
var profiler Profiler
profiler.Init(cfg.Eg, cfg.Ctx, true, fmt.Sprintf("RX Worker %d", i))
cfg.PerfProfilers = append(cfg.PerfProfilers, &profiler)
var ring *pfring.Ring
var rawIn *inPacket
var err error
// 1<<24 is PF_RING_DISCARD_INJECTED_PKTS , if you transmit a packet via the ring, doesn't read it back
if ring, err = pfring.NewRing(cfg.Iface, 4096, (1<<24)|pfring.FlagPromisc|pfring.FlagHWTimestamp); err != nil {
log.Errorf("pfring ring creation error:", err)
doneChan <- err
return
}
defer ring.Close()
// just use fixed cluster number 1, round robin packets
if err = ring.SetCluster(1, pfring.ClusterType(pfring.ClusterRoundRobin)); err != nil {
log.Errorf("pfring SetCluster error:", err)
doneChan <- err
return
}
if err = ring.SetDirection(pfring.ReceiveOnly); err != nil {
log.Errorf("pfring failed to set direction")
doneChan <- err
return
}
if err = ring.SetPollWatermark(1); err != nil {
log.Errorf("pfring failed to set poll watermark")
doneChan <- err
return
}
if err = ring.SetPollDuration(1); err != nil {
log.Errorf("pfring failed to set poll watermark")
doneChan <- err
return
}
if err = ring.SetSamplingRate(1); err != nil {
log.Errorf("pfring failed to set sample rate")
doneChan <- err
return
}
// only using read for now
if err = ring.SetSocketMode(pfring.ReadOnly); err != nil {
log.Errorf("pfring SetSocketMode error: %v", err)
doneChan <- err
return
} else if err = ring.Enable(); err != nil {
log.Errorf("pfring Enable error: %v", err)
doneChan <- err
return
}
if cfg.DebugPrint || cfg.DebugIoWkrRX {
log.Debugf("RX wkr %d pfring done!", i)
}
var data []byte
var ci gopacket.CaptureInfo
rxStartDone <- true
for {
// try to read from handle
data, ci, err = ring.ReadPacketData()
if err != nil || data == nil || len(data) == 0 {
continue
}
profiler.Tick()
if cfg.DebugPrint || cfg.DebugIoWkrRX {
log.Debugf("PFring listener %d got data ts %v", i, ci.Timestamp)
}
rawIn = cfg.RunData.inPacketPool.Get().(*inPacket)
rawIn.data = data
rawIn.ts = ci.Timestamp
rawIn.fromTX = false
cfg.RunData.rawInput[getRxChanNumToUse(cfg)] <- rawIn
atomic.AddUint64(&cfg.Counters.TotalPacketsRcvd, 1)
atomic.AddUint64(&cfg.perIORX[i], 1)
profiler.Tock()
}
}()
select {
case <-(*cfg.Ctx).Done():
log.Errorf("RX %d done due to context", i)
return (*cfg.Ctx).Err()
case err := <-doneChan:
return err
}
})
}(rxwkr)
select {
case <-rxStartDone:
if cfg.DebugPrint || cfg.DebugIoWkrRX {
log.Debugf("RX worker %d running", rxwkr)
}
continue
case <-(*cfg.Ctx).Done():
log.Errorf("Rx worker startup error")
return
}
}
txStartDone := make(chan bool)
for txwkr := 0; txwkr < cfg.NumTXWorkers; txwkr++ {
func(i int) {
cfg.Eg.Go(func() error {
doneChan := make(chan error, 1)
go func() {
// PFring doesn't implement TX timestamps actually
// API documentation lists it, but at a low level, its not actually used
// create a raw socket and send packets via it , read TS similar to Oleg's method
var profiler Profiler
profiler.Init(cfg.Eg, cfg.Ctx, true, fmt.Sprintf("TX worker %d", i))
cfg.PerfProfilers = append(cfg.PerfProfilers, &profiler)
txTSworker := make([]Profiler, cfg.NumTXTSWorkerPerTx)
for j := 0; j < cfg.NumTXTSWorkerPerTx; j++ {
txTSworker[j].Init(cfg.Eg, cfg.Ctx, true, fmt.Sprintf("TX worker %d TSRead worker %d", i, j))
cfg.PerfProfilers = append(cfg.PerfProfilers, &txTSworker[j])
}
ifInfo, err := net.InterfaceByName(cfg.Iface)
if err != nil {
log.Errorf("Interface by name failed in start tx worker")
doneChan <- err
return
}
var haddr [8]byte
copy(haddr[0:7], ifInfo.HardwareAddr[0:7])
addr := syscall.SockaddrLinklayer{
Protocol: syscall.ETH_P_ALL,
Ifindex: ifInfo.Index,
Halen: uint8(len(ifInfo.HardwareAddr)),
Addr: haddr,
}
fdTS, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, syscall.ETH_P_ALL)
if err != nil {
log.Errorf("Failed to make raw socket for TS worker %d err %v", i, err)
}
defer syscall.Close(fdTS)
err = syscall.Bind(fdTS, &addr)
if err != nil {
log.Errorf("Failed to bind TS socket %v", err)
}
if err := ptp.IoctlTimestamp(fdTS, cfg.Iface); err != nil {
log.Errorf("Failed to ioctl timestamp tx worker %v", i)
return
}
// Enable hardware timestamp capabilities on socket
flags := unix.SOF_TIMESTAMPING_TX_HARDWARE |
unix.SOF_TIMESTAMPING_RX_HARDWARE |
unix.SOF_TIMESTAMPING_RAW_HARDWARE
if err := unix.SetsockoptInt(fdTS, unix.SOL_SOCKET, ptp.Timestamping(), flags); err != nil {
log.Errorf("Failed to set flags tx worker %v err %v", i, err)
return
}
if err := unix.SetsockoptInt(fdTS, unix.SOL_SOCKET, unix.SO_SELECT_ERR_QUEUE, 1); err != nil {
log.Errorf("Failed to select err queue tx worker %v", i)
return
}
/* simple socket for non-timestamping */
fd, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, syscall.ETH_P_ALL)
if err != nil {
log.Errorf("Creating simple socket for tx worker %d failed err %v", i, err)
}
defer syscall.Close(fd)
err = syscall.Bind(fd, &addr)
if err != nil {
log.Errorf("Simple socket bind failed tx worker %d err %v", i, err)
}
var txTSBytesReceived uint64
// start go-routines to handle TX TS
txTSStartDone := make(chan bool)
for j := 0; j < cfg.NumTXTSWorkerPerTx; j++ {
go func(workerNum int) {
var pktSent []byte
var inPkt *inPacket
var pktSentLen int
var err error
var msgs []byte
var txTS time.Time
msgs = make([]byte, 1000)
pktSent = cfg.RunData.bytePool.Get().([]byte)
// check if there are control messages on timestamp socket
txTSStartDone <- true
for {
// ideally should use ptp.PeekRecvMsgs , but maybe similar overhead, just leave this
txTSworker[workerNum].Tick()
pktSentLen, _, _, _, err = unix.Recvmsg(fdTS, pktSent, msgs, unix.MSG_ERRQUEUE)
if err != nil || pktSentLen == 0 {
continue
}
txTS, err = ptp.SocketControlMessageTimestamp(msgs)
if err != nil {
log.Errorf("SocketControlMessageTimestamp err %v", err)
}
inPkt = cfg.RunData.inPacketPool.Get().(*inPacket)
inPkt.data = pktSent
inPkt.ts = txTS
inPkt.fromTX = true
cfg.RunData.rawInput[getRxChanNumToUse(cfg)] <- inPkt
pktSent = cfg.RunData.bytePool.Get().([]byte)
atomic.AddUint64(&cfg.Counters.TotalTXTSRead, 1)
atomic.AddUint64(&txTSBytesReceived, uint64(pktSentLen))
txTSworker[workerNum].Tock()
}
}(j)
select {
case <-txTSStartDone:
if cfg.DebugPrint || cfg.DebugIoWkrTX {
log.Infof("TX %d TS worker %d running", txwkr, j)
}
continue
case <-(*cfg.Ctx).Done():
log.Errorf("Tx TS worker startup error")
return
}
}
var txTSBytesSent uint64
var diff uint64
var out *outPacket
txStartDone <- true
for {
out = <-(cfg.RunData.rawOutput[i]) // want to send a packet
if out == nil || len((*out.data).Bytes()) == 0 {
log.Infof("empty data bad!")
continue
}
if cfg.DebugPrint || cfg.DebugIoWkrTX {
// debug print
debugPkt := gopacket.NewPacket((*out.data).Bytes(), layers.LinkTypeEthernet, gopacket.Default)
log.Debugf("Debug txWkr %d send packet %v", i, debugPkt)
}
profiler.Tick()
if out.getTS {
// some backpressure, let TXTS worker keep up
// keep the difference below a certain amount
for {
diff = txTSBytesSent - atomic.LoadUint64(&txTSBytesReceived)
if diff < 15000 {
break
}
}
n, err := syscall.Write(fdTS, (*out.data).Bytes())
if err != nil || n == 0 {
log.Errorf("txWkr %d send packet TS failed, n %v err %v", i, n, err)
}
if out.cl != nil {
out.cl.CountOutgoingPackets++
}
atomic.AddUint64(&cfg.Counters.TotalTXTSPacketsSent, 1)
txTSBytesSent += uint64(len((*out.data).Bytes()))
diff = txTSBytesSent - atomic.LoadUint64(&txTSBytesReceived)
if diff > atomic.LoadUint64(&cfg.Counters.MaxTXTSBytesOutstanding) {
atomic.StoreUint64(&cfg.Counters.MaxTXTSBytesOutstanding, diff)
}
} else {
_, err := syscall.Write(fd, (*out.data).Bytes())
if err != nil {
log.Errorf("txWkr %d send packet failed, %v", i, err)
}
if out.cl != nil {
out.cl.CountOutgoingPackets++
out.sentTS = fastime.Now()
if out.pktType == pktAnnounceGrantReq {
out.cl.SentAnnounceGrantReqTime = out.sentTS
} else if out.pktType == pktSyncGrantReq {
out.cl.SentlastSyncGrantReqTime = out.sentTS
} else if out.pktType == pktDelayRespGrantReq {
out.cl.SentDelayRespGrantReqTime = out.sentTS
} else if out.pktType == pktDelayReq {
out.cl.SentDelayReqTime = out.sentTS
}
}
if cfg.DebugPrint || cfg.DebugIoWkrTX {
log.Debugf("Debug txWkr %d send packet", i)
}
if err != nil {
log.Errorf("Raw socket write packet data failed %v", err)
doneChan <- fmt.Errorf("Raw socket write packet data failed %v", err)
return
}
}
atomic.AddUint64(&cfg.Counters.TotalPacketsSent, 1)
atomic.AddUint64(&cfg.perIOTX[i], 1)
cfg.RunData.outPacketPool.Put(out)
profiler.Tock()
}
}()
var err error
select {
case <-(*cfg.Ctx).Done():
log.Infof("TX worker %d cancelling due to context done", i)
return (*cfg.Ctx).Err()
case err = <-doneChan:
return err
}
})
}(txwkr)
select {
case <-txStartDone:
if cfg.DebugPrint || cfg.DebugIoWkrTX {
log.Debugf("TX worker %d running", txwkr)
}
continue
case <-(*cfg.Ctx).Done():
log.Errorf("Tx worker startup error")
return
}
}
}
|
[
5
] |
// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
// See License.txt for license information.
package app
import (
l4g "github.com/alecthomas/log4go"
"github.com/mattermost/platform/model"
goi18n "github.com/nicksnyder/go-i18n/i18n"
)
type HeaderProvider struct {
}
const (
CMD_HEADER = "header"
)
func init() {
RegisterCommandProvider(&HeaderProvider{})
}
func (me *HeaderProvider) GetTrigger() string {
return CMD_HEADER
}
func (me *HeaderProvider) GetCommand(T goi18n.TranslateFunc) *model.Command {
return &model.Command{
Trigger: CMD_HEADER,
AutoComplete: true,
AutoCompleteDesc: T("api.command_channel_header.desc"),
AutoCompleteHint: T("api.command_channel_header.hint"),
DisplayName: T("api.command_channel_header.name"),
}
}
func (me *HeaderProvider) DoCommand(args *model.CommandArgs, message string) *model.CommandResponse {
channel, err := GetChannel(args.ChannelId)
if err != nil {
return &model.CommandResponse{Text: args.T("api.command_channel_header.channel.app_error"), ResponseType: model.COMMAND_RESPONSE_TYPE_EPHEMERAL}
}
if channel.Type == model.CHANNEL_OPEN && !SessionHasPermissionToChannel(args.Session, args.ChannelId, model.PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES) {
return &model.CommandResponse{Text: args.T("api.command_channel_header.permission.app_error"), ResponseType: model.COMMAND_RESPONSE_TYPE_EPHEMERAL}
}
if channel.Type == model.CHANNEL_PRIVATE && !SessionHasPermissionToChannel(args.Session, args.ChannelId, model.PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES) {
return &model.CommandResponse{Text: args.T("api.command_channel_header.permission.app_error"), ResponseType: model.COMMAND_RESPONSE_TYPE_EPHEMERAL}
}
if len(message) == 0 {
return &model.CommandResponse{Text: args.T("api.command_channel_header.message.app_error"), ResponseType: model.COMMAND_RESPONSE_TYPE_EPHEMERAL}
}
oldChannelHeader := channel.Header
channel.Header = message
updateChannel, err := UpdateChannel(channel)
if err != nil {
return &model.CommandResponse{Text: args.T("api.command_channel_header.update_channel.app_error"), ResponseType: model.COMMAND_RESPONSE_TYPE_EPHEMERAL}
}
messageWs := model.NewWebSocketEvent(model.WEBSOCKET_EVENT_CHANNEL_UPDATED, "", channel.Id, "", nil)
messageWs.Add("channel", channel.ToJson())
Publish(messageWs)
if err := PostUpdateChannelHeaderMessage(args.Session.UserId, channel.Id, args.TeamId, oldChannelHeader, updateChannel.Header); err != nil {
l4g.Error(err.Error())
}
return &model.CommandResponse{}
}
|
[
2
] |
package main
import (
"flag"
"fmt"
"gopkg.in/yaml.v2"
"io/ioutil"
"os"
"path/filepath"
"strings"
)
type Data struct {
path string
guid string
dependencies []string
}
var (
root string
data []Data
)
func setupData(path string, f os.FileInfo, err error) error {
if strings.Contains(path, "png.meta") {
guid, err := getGuid(path)
if err != nil {
panic(err)
}
data = append(data, Data{path: path, guid: guid})
}
return nil
}
func getGuid(path string) (string, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
fmt.Printf("Failed to read file %s", path)
return "", err
}
m := make(map[interface{}]interface{})
err = yaml.Unmarshal(data, &m)
if err != nil {
fmt.Printf("Failed to read file %s", path)
return "", err
}
guid := m["guid"].(string)
return guid, nil
}
func setupDependencies(path string, f os.FileInfo, err error) error {
if strings.Contains(path, ".prefab") {
for i, element := range data {
if checkContainsGuid(path, element.guid) {
// includes
data[i].dependencies = append(element.dependencies, path)
}
}
// fmt.Printf("%v", data)
}
return nil
}
func checkContainsGuid(path string, guid string) bool {
data, err := ioutil.ReadFile(path)
if err != nil {
fmt.Printf("Failed to read file %s", path)
return false
}
return strings.Contains(string(data), guid)
}
func showResult() {
for _, element := range data {
fmt.Printf("---------------------------------------------------\n")
fmt.Printf("IMG: %s\n", strings.TrimSuffix(element.path, ".meta"))
count := len(element.dependencies)
fmt.Printf("\tCount: %d\n", count)
if count > 0 {
for index, dep := range element.dependencies {
fmt.Printf("%d Used at: %s\n", index, dep)
}
}
fmt.Printf("---------------------------------------------------\n")
}
}
func main() {
flag.Parse()
root = flag.Arg(0)
if root == "" {
fmt.Printf("root path is not selected")
os.Exit(0)
}
data = make([]Data, 1)
err := filepath.Walk(root, setupData)
if err != nil {
fmt.Printf("filepath.Walk() returned error :%v\n", err)
}
err = filepath.Walk(root, setupDependencies)
if err != nil {
fmt.Printf("filepath.Walk() returned error :%v\n", err)
}
showResult()
}
|
[
6
] |
// Copyright (c) 2021 Terminus, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"github.com/pkg/errors"
"github.com/erda-project/erda/tools/cli/command"
)
var (
ErrName = errors.New("not provide Name")
ErrOptionalArgPosition = errors.New("optional Arg should be the last arg")
ErrOptionalArgNum = errors.New("too many optional arg, support only 1 optional arg yet")
)
func validate(cmd command.Command, fname string) error {
if cmd.Name == "" {
return errors.Wrap(ErrName, fname)
}
optionalArgNum := 0
for _, arg := range cmd.Args {
if arg.IsOption() {
optionalArgNum++
} else {
if optionalArgNum > 0 {
return errors.Wrap(ErrOptionalArgPosition, fname)
}
}
}
if optionalArgNum > 1 {
return errors.Wrap(ErrOptionalArgNum, fname)
}
return nil
}
|
[
4
] |
package cgteamwork
import (
"context"
"encoding/json"
"strings"
client "github.com/WuLiFang/csheet/server/v6/internal/client/gateway/cgteamwork"
"github.com/WuLiFang/csheet/server/v6/internal/gateway/domain/cgteamwork/flow"
"github.com/WuLiFang/csheet/server/v6/internal/gateway/domain/cgteamwork/pipeline"
"github.com/WuLiFang/csheet/server/v6/internal/gateway/domain/cgteamwork/status"
"github.com/WuLiFang/csheet/server/v6/internal/gateway/domain/cgteamwork/table_column"
"github.com/WuLiFang/csheet/server/v6/internal/gateway/domain/cgteamwork/user"
"github.com/WuLiFang/csheet/server/v6/internal/util"
"github.com/tidwall/gjson"
"golang.org/x/sync/errgroup"
)
// FindFlow implements Client
func (c *clientV6_1) FindFlow(ctx context.Context, database string, module string) util.Iterator[flow.Flow] {
return util.NewIterator(func(cb func(flow.Flow) error) (err error) {
token, err := c.systemToken.SystemAccessToken(ctx)
if err != nil {
return
}
res, err := c.http.Call(
ctx,
struct {
Controller string `json:"controller"`
Method string `json:"method"`
Database string `json:"db"`
}{
"v_flow",
"get_project_flows",
database,
},
token,
)
if err != nil {
return
}
var flowType = "template"
var key = gjson.GetBytes(res.data, "id").String()
if key == "" {
flowType = "project"
key = database
}
res, err = c.http.Call(
ctx,
struct {
Controller string `json:"controller"`
Method string `json:"method"`
Module string `json:"module"`
Type string `json:"type"`
Key string `json:"key"`
}{
"v_flow",
"get_data",
module,
flowType,
key,
},
token,
)
if err != nil {
return
}
g, ctx := errgroup.WithContext(ctx)
var flowCh = make(chan flow.Flow)
gjson.GetBytes(res.data, "flows").ForEach(func(_, i gjson.Result) bool {
g.Go(func() (err error) {
var rawPipelineID = i.Get("pipe_id").String()
pipelineID, err := pipeline.NewID(database, rawPipelineID)
if err != nil {
return
}
i.Get("data").ForEach(func(_, j gjson.Result) bool {
err = func() (err error) {
var rawID = j.Get("id").String()
var name = j.Get("name").String()
// get stages
stages, err := func() (stages []flow.Stage, err error) {
type stageResult struct {
index int
v flow.Stage
}
var (
nextIndex int
stageCh = make(chan stageResult)
)
g, ctx := errgroup.WithContext(ctx)
j.Get("review").ForEach(func(_, k gjson.Result) bool {
var index = nextIndex
nextIndex++
g.Go(func() (err error) {
var rawColumnID = k.Get("field_id").String()
columnID, err := table_column.NewID(database, rawColumnID)
if err != nil {
return
}
var name = k.Get("name").String()
// inspectors
var rawInspectorIDs = strings.Split(k.Get("qc_group_id").String(), ",")
inspectorIDs, err := util.MapSliceE(
util.FilterSlice(rawInspectorIDs, func(i string, index int) bool { return i != "" }),
user.NewID,
)
if err != nil {
return
}
nodeResult, err := c.http.Call(
ctx,
struct {
Controller string `json:"controller"`
Method string `json:"method"`
FlowID string `json:"flow_id"`
SubmitType string `json:"submit_type"`
FieldID string `json:"field_id"`
PipelineID string `json:"pipeline_id"`
Type string `json:"type"`
Key string `json:"key"`
}{
"v_flow",
"edit_node",
rawID,
"review",
rawColumnID,
rawPipelineID,
flowType,
key,
},
token,
)
if err != nil {
return
}
statusIDs, err := util.MapSliceE(
util.FilterSlice(
strings.Split(gjson.GetBytes(nodeResult.data, "status_id_list").String(), ","),
func(i string, index int) bool { return i != "" },
),
status.NewID,
)
if err != nil {
return
}
stage, err := flow.StageFromRepository(
name,
columnID,
statusIDs,
inspectorIDs,
)
if err != nil {
return
}
select {
case stageCh <- stageResult{index, *stage}:
case <-ctx.Done():
return ctx.Err()
}
return
})
return true
})
go func() {
g.Wait()
close(stageCh)
}()
stages = make([]flow.Stage, nextIndex)
for i := range stageCh {
stages[i.index] = i.v
}
err = g.Wait()
return
}()
if err != nil {
return
}
v, err := flow.FromRepository(
database,
module,
rawID,
name,
pipelineID,
stages,
)
if err != nil {
return
}
select {
case flowCh <- *v:
case <-ctx.Done():
return ctx.Err()
}
return
}()
return err == nil
})
return
})
return true
})
go func() {
g.Wait()
close(flowCh)
}()
for i := range flowCh {
err = cb(i)
if err != nil {
return
}
}
err = g.Wait()
return
})
}
// UpdateFlow implements Client
func (c *clientV6_1) UpdateFlow(ctx context.Context, database string, module string, moduleType string, id string, field string, status string, message client.Message, token string) (err error) {
messageData, err := c.encodeMessage(message)
if err != nil {
return
}
_, err = c.http.Call(
ctx,
struct {
Controller string `json:"controller"`
Method string `json:"method"`
Database string `json:"db"`
Module string `json:"module"`
ModuleType string `json:"module_type"`
TaskID string `json:"task_id"`
Field string `json:"field_sign"`
Status string `json:"status"`
Message json.RawMessage `json:"dom_text_array"`
}{
"c_work_flow",
"python_update_flow",
database, module, moduleType, id,
field, status, messageData,
},
token,
)
return
}
|
[
6
] |
// PulseAudio audio source
package pulsesource
import (
"github.com/mesilliac/pulse-simple"
)
type Source struct {
stream *pulse.Stream
}
func New(sampleRate float64) (src Source, err error) {
ss := pulse.SampleSpec{
Format: pulse.SAMPLE_U8,
Rate: uint32(sampleRate),
Channels: 2,
}
s, err := pulse.Capture("anna", "stereo input", &ss)
if err != nil {
return Source{}, err
}
return Source{s}, nil
}
func (src Source) Read(left []float64, right []float64) (err error) {
if len(left) != len(right) {
panic("left and right must have the same length")
}
buf := make([]byte, len(left)*2)
_, err = src.stream.Read(buf)
if err != nil {
return err
}
j := 0
for i := range left {
l, r := buf[j], buf[j+1]
left[i] = float64(l - 128) / 128.0
right[i] = float64(r - 128) / 128.0
j += 2
}
return nil
}
func (src Source) Close() {
src.stream.Free()
}
|
[
6
] |
package db
import (
"crypto/rand"
"encoding/base64"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"strconv"
"testing"
)
func TestGet(t *testing.T) {
fmt.Println("Testing GET")
resp, err := http.Get("http://localhost:3001/get/testKey")
if err != nil {
t.Error("Couldn't get testKey: ", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Error("Expected testValue got ", err)
} else if string(body) != "testValue" {
t.Error("Expected testValue got ", string(body))
} else {
fmt.Println("Test Passed!")
}
}
func TestSet(t *testing.T) {
fmt.Println("Testing SET")
c := 9
b := make([]byte, c)
_, err := rand.Read(b)
if err != nil {
t.Error("Couldn't generate random string: ", err)
}
s := base64.URLEncoding.EncodeToString(b)
resp, err := http.PostForm("http://localhost:3001/set/"+s, url.Values{"value": {s}})
if err != nil {
t.Error("Expected ", s, " got ", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Error("Expected ", s, " got ", err)
} else if string(body) != s {
t.Error("Expected ", s, " got ", string(body))
} else {
fmt.Println("Test passed!")
}
}
func TestSetAndGet(t *testing.T) {
fmt.Println("Testing SET & GET")
c := 9
b := make([]byte, c)
_, err := rand.Read(b)
if err != nil {
t.Error("Couldn't generate random string: ", err)
}
s := base64.URLEncoding.EncodeToString(b)
resp, err := http.PostForm("http://localhost:3001/set/"+s, url.Values{"value": {s}})
if err != nil {
t.Error("Couldn't set ", s, ": ", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Error("Expected ", s, " got ", err)
} else if string(body) != s {
t.Error("Expected ", s, " got ", string(body))
} else {
fmt.Println("Set ", s)
}
resp, err = http.Get("http://localhost:3001/get/" + s)
if err != nil {
t.Error("Couldn't get ", s, ": ", err)
}
defer resp.Body.Close()
body, err = ioutil.ReadAll(resp.Body)
if err != nil {
t.Error("Expected ", s, " got ", err)
} else if string(body) != s {
t.Error("Expected ", s, " got ", string(body))
} else {
fmt.Println("Test passed!")
}
}
func TestConcurrency(t *testing.T) {
fmt.Println("Testing Concurrency")
maxRequests := os.Getenv("MAX_REQUESTS")
max, err := strconv.Atoi(maxRequests)
if err != nil {
max = 250
}
for i := 0; i < max; i++ {
s := strconv.Itoa(i)
resp, err := http.PostForm("http://localhost:3001/set/"+s, url.Values{"value": {s}})
if err != nil {
t.Error("Expected ", s, " got ", err)
}
defer resp.Body.Close()
}
for i := 0; i < max; i++ {
s := strconv.Itoa(i)
resp, err := http.Get("http://localhost:3001/get/" + s)
if err != nil {
t.Error("Expected ", s, " got ", err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil || string(body) != s {
t.Error("Expected ", s, " got ", err)
}
defer resp.Body.Close()
}
}
|
[
5
] |
package choice
import (
"encoding/xml"
"errors"
"fmt"
"reflect"
)
type appender int
const (
Set appender = iota
Append
)
var appenders = map[appender]func(container reflect.Value, value reflect.Value){
Set: func(container reflect.Value, value reflect.Value) {
container.Set(value)
},
Append: func(container reflect.Value, value reflect.Value) {
container.Set(reflect.Append(container, value))
},
}
type ChoiceParser map[string]func() interface{}
func (c ChoiceParser) Parse(d *xml.Decoder, start xml.StartElement) (interface{}, error) {
choice, ok := c[start.Name.Local]
if !ok {
return nil, errors.New(start.Name.Local + " is not a listed option.")
}
ptr := choice()
err := d.DecodeElement(ptr, &start)
obj := reflect.ValueOf(ptr).Interface()
return obj, err
}
func (c ChoiceParser) ParseList(d *xml.Decoder, start xml.StartElement, containerPtr interface{}, typeofPtr interface{}, appenderType appender) error {
typeof := reflect.TypeOf(typeofPtr).Elem()
container := reflect.ValueOf(containerPtr).Elem()
token, err := d.Token()
for token != start.End() {
if err != nil {
return err
}
next, ok := token.(xml.StartElement)
if ok {
item, err := c.Parse(d, next)
if err != nil {
return err
}
val := reflect.ValueOf(item)
if !val.Type().Implements(typeof) {
return fmt.Errorf("Item is not a valid %v.", typeof.Name())
}
appendFn := appenders[appenderType]
appendFn(container, val)
}
token, err = d.Token()
}
return nil
}
func WrapList(e *xml.Encoder, listName xml.Name, list interface{}) error {
listVal := reflect.ValueOf(list)
token := xml.StartElement{
Name: listName,
}
err := e.EncodeToken(token)
if err != nil {
return err
}
count := listVal.Len()
for index := 0; index < count; index++ {
err = e.Encode(listVal.Index(index).Interface())
if err != nil {
return err
}
}
return e.EncodeToken(token.End())
}
|
[
6
] |
/*
Copyright 2013 Ashish Gandhi
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// An implementation of Rob Pike's regular expression matcher that
// handles the following constructs:
//
// c matches any literal character c
// . matches any single character
// ^ matches the beginning of the input string
// $ matches the end of the input string
// * matches zero or more occurrences of the previous character
//
// http://www.cs.princeton.edu/courses/archive/spr09/cos333/beautiful.html
package pike
import "errors"
var invalidRegexErr = errors.New("Invalid regex")
// Search for regexp anywhere in text
func Match(regexStr string, textStr string) (result bool, err error) {
regex := []rune(regexStr)
text := []rune(textStr)
if len(regex) > 0 && regex[0] == '^' {
return matchHere(regex[1:], text)
}
if len(text) == 0 {
return matchHere(regex, text)
}
for i, _ := range text {
r, e := matchHere(regex, text[i:])
if r || e != nil {
return r, e
}
}
return result, err
}
// Search for regexp at beginning of text
func matchHere(regex []rune, text []rune) (result bool, err error) {
if len(regex) == 0 {
return true, err
}
if regex[0] == '*' {
return result, invalidRegexErr
}
if regex[0] == '$' {
if len(regex) > 1 {
return result, invalidRegexErr
}
return len(text) == 0, err
}
if len(regex) > 1 && regex[1] == '*' {
return matchStar(regex[0], regex[2:], text)
}
if regex[0] == '.' || regex[0] == text[0] {
return matchHere(regex[1:], text[1:])
}
return result, err
}
// Search for c*regexp at beginning of text
func matchStar(c rune, regex []rune, text []rune) (result bool, err error) {
if len(text) == 0 {
return matchHere(regex, text)
}
for i, tc := range text {
r, e := matchHere(regex, text[i:])
if r || e != nil {
return r, e
}
// Important to not check this before the first loop as
// c* matches zero or more c
if tc != c && c != '.' {
break
}
}
return result, err
}
|
[
6
] |
package converter
import (
"bytes"
"github.com/mbordner/kazaam/transform"
)
type Eqs struct {
ConverterBase
}
func (c *Eqs) Convert(jsonData []byte, value []byte, args []byte) (newValue []byte, err error) {
var argsValue *transform.JSONValue
argsValue, err = c.NewJSONValue(args)
if err != nil {
return
}
argsBytes := []byte(argsValue.GetStringValue())
if bytes.Equal(value, argsBytes) == true {
return []byte("true"), nil
}
return []byte("false"), nil
}
|
[
6
] |
package main
// #include <string.h>
// #include <stdbool.h>
// #include <mysql.h>
// #cgo CFLAGS: -O3 -I/usr/include/mysql -fno-omit-frame-pointer
import "C"
import (
"log"
"os"
"unsafe"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/pkg/errors"
"github.com/valyala/bytebufferpool"
)
// main function is needed even for generating shared object files
func main() {}
var l = log.New(os.Stderr, "s3-download: ", log.Ldate|log.Ltime|log.Lmicroseconds|log.Llongfile)
func msg(message *C.char, s string) {
m := C.CString(s)
defer C.free(unsafe.Pointer(m))
C.strcpy(message, m)
}
type byteBufferPoolWriteAt struct {
w *bytebufferpool.ByteBuffer
}
func (bb byteBufferPoolWriteAt) WriteAt(p []byte, offset int64) (n int, err error) {
return bb.w.Write(p)
}
//export s3_download_init
func s3_download_init(initid *C.UDF_INIT, args *C.UDF_ARGS, message *C.char) C.bool {
if args.arg_count != 3 {
msg(message, "`s3_download` requires 3 parameters: the region, the bucket, and the key")
return C.bool(true)
}
argsTypes := (*[2]uint32)(unsafe.Pointer(args.arg_type))
argsTypes[0] = C.STRING_RESULT
initid.maybe_null = 1
return C.bool(false)
}
//export s3_download
func s3_download(initid *C.UDF_INIT, args *C.UDF_ARGS, result *C.char, length *uint64, isNull *C.char, message *C.char) *C.char {
c := 3
argsArgs := (*[1 << 30]*C.char)(unsafe.Pointer(args.args))[:c:c]
argsLengths := (*[1 << 30]uint64)(unsafe.Pointer(args.lengths))[:c:c]
*length = 0
*isNull = 1
if argsArgs[0] == nil ||
argsArgs[1] == nil ||
argsArgs[2] == nil {
return nil
}
a := make([]string, c, c)
for i, argsArg := range argsArgs {
a[i] = C.GoStringN(argsArg, C.int(argsLengths[i]))
}
sess, err := session.NewSession(&aws.Config{Region: &a[0]})
if err != nil {
l.Println(errors.Wrapf(err, "failed to create AWS session"))
return nil
}
bb := bytebufferpool.Get()
downloader := s3manager.NewDownloader(sess)
downloader.Concurrency = 1
_, err = downloader.Download(byteBufferPoolWriteAt{bb},
&s3.GetObjectInput{
Bucket: &a[1],
Key: &a[2],
})
if err != nil {
l.Println(errors.Wrapf(err, "failed to download file from S3"))
return nil
}
*length = uint64(bb.Len())
*isNull = 0
cString := C.CString(bb.String())
bytebufferpool.Put(bb)
return cString
}
//export s3_download_with_credentials_init
func s3_download_with_credentials_init(initid *C.UDF_INIT, args *C.UDF_ARGS, message *C.char) C.bool {
if args.arg_count != 5 {
msg(message, "`s3_download` requires 5 parameters: the access token ID, the secret access key, the the region, the bucket, and the key")
return C.bool(true)
}
argsTypes := (*[2]uint32)(unsafe.Pointer(args.arg_type))
argsTypes[0] = C.STRING_RESULT
initid.maybe_null = 1
return C.bool(false)
}
//export s3_download_with_credentials
func s3_download_with_credentials(initid *C.UDF_INIT, args *C.UDF_ARGS, result *C.char, length *uint64, isNull *C.char, message *C.char) *C.char {
c := 5
argsArgs := (*[1 << 30]*C.char)(unsafe.Pointer(args.args))[:c:c]
argsLengths := (*[1 << 30]uint64)(unsafe.Pointer(args.lengths))[:c:c]
*length = 0
*isNull = 1
if argsArgs[0] == nil ||
argsArgs[1] == nil ||
argsArgs[2] == nil ||
argsArgs[3] == nil ||
argsArgs[4] == nil {
return nil
}
a := make([]string, c, c)
for i, argsArg := range argsArgs {
a[i] = C.GoStringN(argsArg, C.int(argsLengths[i]))
}
sess, err := session.NewSession(&aws.Config{
Region: &a[2],
Credentials: credentials.NewStaticCredentials(a[0], a[1], "")})
if err != nil {
l.Println(errors.Wrapf(err, "failed to create AWS session"))
return nil
}
bb := bytebufferpool.Get()
downloader := s3manager.NewDownloader(sess)
downloader.Concurrency = 1
_, err = downloader.Download(byteBufferPoolWriteAt{bb},
&s3.GetObjectInput{
Bucket: &a[3],
Key: &a[4],
})
if err != nil {
l.Println(errors.Wrapf(err, "failed to download file from S3"))
return nil
}
*length = uint64(bb.Len())
*isNull = 0
cString := C.CString(bb.String())
bytebufferpool.Put(bb)
return cString
}
|
[
6
] |
package user
import "gorm.io/gorm"
type Repository interface {
Save(user User) (User, error)
FindByEmail(email string) (User, error)
FindByID(ID int) (User, error)
Update(user User) (User, error)
FindAll() ([]User, error)
}
type repository struct {
db *gorm.DB
}
func NewRepository(db *gorm.DB) *repository {
return &repository{db}
}
func (r *repository) Save(user User) (User, error) {
err := r.db.Create(&user).Error
if err != nil {
return user, err
}
return user, nil
}
func (r *repository) FindByEmail(email string) (User, error) {
var user User
err := r.db.Where("email = ?", email).Find(&user).Error
if err != nil {
return user, err
}
return user, nil
}
func (r *repository) FindByID(ID int) (User, error) {
var user User
err := r.db.Where("id = ?", ID).Find(&user).Error
if err != nil {
return user, err
}
return user, nil
}
func (r *repository) Update(user User) (User, error) {
err := r.db.Save(&user).Error
if err != nil {
return user, err
}
return user, nil
}
func (r *repository) FindAll() ([]User, error) {
var users []User
err := r.db.Find(&users).Error
if err != nil {
return users, err
}
return users, nil
}
|
[
2
] |
package ringbuffer
import (
"math"
"testing"
"time"
)
func testBasic(t *testing.T, rb *RingBuffer, expectLen uint32, expectLeftSpace uint32) {
if rb.Len() != expectLen {
t.Fatal(rb.Len())
}
if rb.LeftSpace() != expectLeftSpace {
t.Fatal(rb.LeftSpace())
}
}
func TestInit(t *testing.T) {
rb := New(DefaultCacheMaxSize)
if rb.maxsize != DefaultCacheMaxSize {
t.Fatal(rb.maxsize, DefaultCacheMaxSize)
}
testBasic(t, rb, 0, DefaultCacheMaxSize)
in := []byte{'a', 'b'}
ret := rb.Push(in)
if ret != 2 {
t.Fatal(ret)
}
testBasic(t, rb, 2, DefaultCacheMaxSize-2)
if rb.buffer[0] != 'a' || rb.buffer[1] != 'b' {
t.Fatal(rb.buffer)
}
out := make([]byte, 2)
ret = rb.Get(out)
if ret != 2 {
t.Fatal(ret)
}
testBasic(t, rb, 0, DefaultCacheMaxSize)
if out[0] != 'a' || out[1] != 'b' {
t.Fatal(out)
}
}
func TestOverflow(t *testing.T) {
rb := New(4)
testBasic(t, rb, 0, 4)
in := []byte{'a', 'b', 'c', 'd', 'e'}
ret := rb.Push(in)
if ret != 4 {
t.Fatal(ret)
}
testBasic(t, rb, 4, 0)
if rb.buffer[0] != 'a' ||
rb.buffer[1] != 'b' ||
rb.buffer[2] != 'c' ||
rb.buffer[3] != 'd' {
t.Fatal(rb.buffer)
}
out := make([]byte, 1)
ret = rb.Get(out)
if ret != 1 {
t.Fatal(ret)
}
testBasic(t, rb, 3, 1)
if out[0] != 'a' {
t.Fatal(out)
}
in2 := []byte{'x', 'y'}
ret = rb.Push(in2)
if ret != 1 {
t.Fatal(ret)
}
testBasic(t, rb, 4, 0)
if rb.buffer[0] != 'x' ||
rb.buffer[1] != 'b' ||
rb.buffer[2] != 'c' ||
rb.buffer[3] != 'd' {
t.Fatal(rb.buffer)
}
out2 := make([]byte, 5)
ret = rb.Get(out2)
if ret != 4 {
t.Fatal(ret)
}
testBasic(t, rb, 0, 4)
if out2[0] != 'b' ||
out2[1] != 'c' ||
out2[2] != 'd' ||
out2[3] != 'x' {
t.Fatal(out2)
}
}
func TestInOutOverFlow1(t *testing.T) {
/*
0 1 2 3 ..... max-2, max-1, max
*/
rb := New(8)
rb.in = 1
rb.out = math.MaxUint32 - 1
testBasic(t, rb, 3, 5)
in := []byte{'a'}
ret := rb.Push(in)
if ret != 1 {
t.Fatal(ret)
}
testBasic(t, rb, 4, 4)
out := make([]byte, 5)
ret = rb.Get(out)
if ret != 4 {
t.Fatal(ret)
}
testBasic(t, rb, 0, 8)
}
func TestInOutOverFlow2(t *testing.T) {
rb := New(8)
rb.in = math.MaxUint32
rb.out = math.MaxUint32 - 1
testBasic(t, rb, 1, 7)
idx := rb.in % 8
in := []byte{'a'}
ret := rb.Push(in)
if ret != 1 {
t.Fatal(ret)
}
testBasic(t, rb, 2, 6)
if rb.buffer[idx] != 'a' {
t.Fatal(rb.buffer)
}
if rb.in != 0 {
t.Fatal(rb)
}
}
func TestCrossPush(t *testing.T) {
rb := New(4)
rb.in = 2
rb.out = 2
testBasic(t, rb, 0, 4)
in := []byte{'a', 'b', 'c', 'd', 'e'}
ret := rb.Push(in)
if ret != 4 {
t.Fatal(ret)
}
testBasic(t, rb, 4, 0)
if rb.buffer[0] != 'c' ||
rb.buffer[1] != 'd' ||
rb.buffer[2] != 'a' ||
rb.buffer[3] != 'b' {
t.Fatal(rb.buffer)
}
if rb.in != 6 {
t.Fatal(rb.in)
}
}
func TestProducerConsumer(t *testing.T) {
rb := New(1024)
data := []byte(string("huangjian"))
N := 10000
go func() {
for i := 0; i < N; {
if int(rb.LeftSpace()) > len(data) {
rb.Push(data)
i++
}
time.Sleep(time.Microsecond)
}
}()
time.Sleep(time.Second)
for i := 0; i < N; i++ {
out := make([]byte, len(data))
ret := rb.Get(out)
if string(out) != "huangjian" {
t.Fatal(i, ret, out)
}
time.Sleep(2 * time.Microsecond)
}
}
|
[
6
] |
package streamer
import (
"sync"
)
type buffer struct {
currentSize int
buf [][]byte
in chan []byte
out chan [][]byte
maxSize int
trigger chan struct{}
sync.RWMutex
}
func newBuffer() *buffer {
b := &buffer{
maxSize: DefaultMaxBuffer,
in: make(chan []byte, 10),
out: make(chan [][]byte, 1),
trigger: make(chan struct{}, 1),
}
b.reset()
return b
}
func (buf *buffer) init() {
go func() {
for {
select {
case <-buf.trigger:
buf.flush()
case b, ok := <-buf.in:
if !ok {
// channel is closed we are not getting any more.
buf.flush()
close(buf.out)
return
}
bytesSize := len(b)
if buf.currentSize+bytesSize > buf.maxSize {
buf.flush()
}
buf.Lock()
buf.currentSize = buf.currentSize + bytesSize
buf.buf = append(buf.buf, b)
buf.Unlock()
}
}
}()
}
func (buf *buffer) flush() {
buf.Lock()
out := make([][]byte, len(buf.buf))
copy(out, buf.buf)
buf.reset()
buf.Unlock()
buf.out <- out
}
func (buf *buffer) reset() {
buf.currentSize = 0
buf.buf = make([][]byte, 0)
}
|
[
0
] |
package webservice
import (
ae "appengine"
"data"
"encoding/json"
"errors"
"googlebooks"
"isbn13"
"net/http"
"persistence"
"persistence/tx"
)
type Call struct {
Context ae.Context
Request *http.Request
Response http.ResponseWriter
StatusCode int
}
func (call *Call) writeError(err error, strings []string) []string {
if me, ok := err.(ae.MultiError); ok {
for _, next := range me {
strings = call.writeError(next, strings)
}
} else {
str := err.Error()
call.Context.Errorf("Error reported: %s", str)
strings = append(strings, str)
}
return strings
}
func (call *Call) DetermineCountry() string {
header := call.Request.Header["X-Appengine-Country"]
if len(header) > 0 {
country := header[0]
if country != "ZZ" {
return country
}
call.Context.Debugf("Falling back to user-supplied country in dev server")
} else {
call.Context.Warningf("Could not auto-detect country (available headers: %v), falling back to query parameter", call.Request.Header)
}
query := call.Request.URL.Query()["country"]
if len(query) > 0 {
return query[0]
}
return "unknown"
}
type CallHandler func(*Call) (interface{}, error)
func (function CallHandler) ServeHTTP(w http.ResponseWriter, rq *http.Request) {
call := Call{
Context: ae.NewContext(rq),
Request: rq,
Response: w,
}
call.Response.Header().Add("Content-Type", "application/json;charset=UTF-8")
result, err := function(&call)
if err != nil {
if call.StatusCode == 0 {
call.StatusCode = http.StatusInternalServerError
}
errors := call.writeError(err, nil)
result = map[string][]string{"errors": errors}
} else {
if call.StatusCode == 0 {
call.StatusCode = http.StatusOK
}
}
encoder := json.NewEncoder(w)
call.Response.WriteHeader(call.StatusCode)
call.Context.Debugf("Returning %v to client", result)
encoder.Encode(result)
}
func init() {
http.Handle("/volumes/", CallHandler(serveVolumeSingle))
http.Handle("/volumes", CallHandler(serveVolumeBulk))
http.Handle("/ping", CallHandler(servePing))
}
func servePing(call *Call) (interface{}, error) {
call.Context.Infof("Ping called")
if call.Request.Method == "GET" {
shelf, err := persistence.LookupBookshelfMeta(call.Context)
return shelf.LastUpdate, err
} else {
call.StatusCode = http.StatusMethodNotAllowed
call.Response.Header().Add("Allow", "GET")
err := errors.New("Unsupported operation. Only GET method is allowed")
return nil, err
}
}
func serveVolumeBulk(call *Call) (interface{}, error) {
var shelf *data.LookupReply
var err error
switch call.Request.Method {
case "GET":
shelf, err = getVolumeBulk(call)
default:
call.StatusCode = http.StatusMethodNotAllowed
call.Response.Header().Add("Allow", "GET")
err = errors.New("Unsupported operation. Only GET method is allowed")
}
return shelf, err
}
func getVolumeBulk(call *Call) (reply *data.LookupReply, err error) {
var shelf *data.Bookshelf
if shelf, err = persistence.LookupBookshelf(call.Context); err == nil {
call.Context.Debugf("Bookshelf contains %d volumes", len(shelf.Books))
infos := shelf.Books
for _, str := range call.Request.URL.Query()["search"] {
matches := shelf.Search(str)
infos = make([]data.BookMetaData, len(matches))
for i, ptr := range matches {
infos[i] = *ptr
}
call.Context.Debugf("Filtered by \"%s\", down to %d entries: %v", str, len(infos), infos)
}
reply = &data.LookupReply{
Count: len(infos),
BookInfos: shelf.Books,
}
}
return
}
func serveVolumeSingle(call *Call) (interface{}, error) {
isbn, err := isbn13.New(call.Request.URL.Path[9:])
var book *data.BookMetaData
if err == nil {
switch call.Request.Method {
case "GET":
book, err = compositeISBNLookup(call.Context, call.DetermineCountry(), isbn)
case "PUT":
book, err = putVolumeSingle(call, isbn)
case "DELETE":
book, err = deleteVolumeSingle(call, isbn)
default:
call.StatusCode = http.StatusMethodNotAllowed
call.Response.Header().Add("Allow", "GET, PUT, DELETE")
err = errors.New("Unsupported operation. Only GET, PUT and DELETE methods are allowed")
}
} else {
call.StatusCode = http.StatusNotFound
}
return book, err
}
func deleteVolumeSingle(call *Call, isbn isbn13.ISBN13) (info *data.BookMetaData, err error) {
call.Context.Infof("Deleting ISBN %d", isbn)
err = persistence.UpdateBookshelf(call.Context, func(t *tx.Transaction, shelf *data.Bookshelf) error {
for i := range shelf.Books {
if ptr := &shelf.Books[i]; ptr.ISBN == isbn.String() {
shelf.Books = append(shelf.Books[:i], shelf.Books[i+1:]...)
t.Delete = []tx.KeyDeriver{ptr}
return nil
}
}
call.StatusCode = http.StatusNotFound
return errors.New("ISBN not found")
})
return
}
func putVolumeSingle(call *Call, isbn isbn13.ISBN13) (info *data.BookMetaData, err error) {
call.Context.Debugf("Updating %d", isbn)
decode := json.NewDecoder(call.Request.Body)
info = new(data.BookMetaData)
if err = decode.Decode(info); err == nil {
call.Context.Debugf("Raw decoded entity: %v", info)
info.ISBN = isbn.String()
normalize(call, info)
call.Context.Debugf("Normalized decoded entity: %v", info)
persistence.UpdateBookshelf(call.Context, func(t *tx.Transaction, shelf *data.Bookshelf) error {
if ptr := shelf.LookupInfo(isbn); ptr != nil {
*ptr = *info
t.Put = []tx.KeyDeriver{ptr}
} else {
info.Parent = shelf
shelf.Books = append(shelf.Books, *info)
t.Put = []tx.KeyDeriver{info}
}
return nil
})
}
return info, err
}
func normalize(call *Call, info *data.BookMetaData) {
var isbn isbn13.ISBN13
var err error
if isbn, err = isbn13.New(info.ISBN); err == nil {
var base *data.BookMetaData
if base, err = persistence.LookupISBN(call.Context, call.DetermineCountry(), isbn); err == nil {
info.Volume.Images = base.Volume.Images
return
} else {
info.Volume.Images = data.ImageLinks{}
}
} else {
call.Context.Errorf("Could not normalize data for %s: %s", info.ISBN, err.Error())
}
}
func compositeISBNLookup(ctx ae.Context, country string, isbn isbn13.ISBN13) (resp *data.BookMetaData, err error) {
// look in my own library first - if this fails badly, abort anything further
if shelf, err := persistence.LookupBookshelf(ctx); err == nil {
if book := shelf.LookupInfo(isbn); book != nil {
return book, nil
}
} else {
return nil, err
}
var errors ae.MultiError
if book, err := persistence.LookupISBN(ctx, country, isbn); err == nil {
return book, nil
} else {
errors = append(errors, err)
}
if r, err := googlebooks.LookupISBN(ctx, country, isbn); err == nil {
persistence.StoreISBNResult(ctx, country, isbn, r)
return r, nil
} else {
errors = append(errors, err)
}
return nil, errors
}
|
[
4
] |
package scouter
import (
"github.com/globalsign/mgo/bson"
"github.com/google/go-github/github"
)
type User struct {
ID int64 `bson:"_id" json:"id"`
*github.User
Contribution int `bson:"contribution" json:"contribution"`
}
func CountUsers() (int, error) {
return CountCollectionRecords(UserCollection)
}
func FindUser(query bson.M) (User, error) {
var user User
if err := mongoSession.DB("").C(UserCollection).Find(query).One(&user); err != nil {
return user, err
}
return user, nil
}
func FindUsers(selector bson.M, sort string, page, pageSize int) ([]User, error) {
var users []User
skip := (page - 1) * pageSize
if skip < 0 {
skip = 0
}
if err := mongoSession.DB("").C(UserCollection).Find(selector).Sort(sort).Skip(skip).Limit(pageSize).All(&users); err != nil {
return users, nil
}
return users, nil
}
func InsertUsers(users []User) error {
for _, user := range users {
if err := InsertRecord(UserCollection, user); err != nil {
return err
}
}
return nil
}
func UpdateUserById(id interface{}, update interface{}) error {
return UpdateById(UserCollection, id, update)
}
func UpsertUser(user User) error {
_, err := UpsertRecord(UserCollection, bson.M{"_id": user.ID}, user)
if err != nil {
return err
}
return nil
}
func UpsertUsers(users []User) error {
for _, user := range users {
if err := UpsertUser(user); err != nil {
return err
}
}
return nil
}
func PatchUserContribution(user User) error {
return UpdateById(UserCollection, bson.M{"_id": user.ID}, bson.M{"contribution": user.Contribution})
}
|
[
6
] |
package godis
import (
ds "data_struct"
"db"
"errors"
"log"
"store"
"sync/atomic"
"time"
)
const (
Overide = iota
DeleteDbKey
AddDbKey
Lookup
SavePoint
)
var (
err_ts_lock_timeout = errors.New("[error] lock object timeout")
err_no_start_ts = errors.New("[error] no open transaction!")
err_not_found_savepoint = errors.New("[error] not found savepoint!")
err_not_found_ts = errors.New("[error] not found transaction!")
err_rollback_fail = errors.New("[error] rollback fail!")
err_commit_back = errors.New("[error] commit fail!")
)
// 全局事务编号
var TsGlobalId uint64 = 0
type Ts struct {
TsId uint64
CurTsrId int
timeout time.Duration
tsrList ds.List
magicDB map[string]*ds.Object
// magicHT *HashTable
// magicHT *List
curSavePoint int
datalog *store.DataLog
tsLog *store.TsLog
status uint8
position *store.RecordPosition
}
type TsRecord struct {
TsrId int
SavePointId int
Op uint8
Key []byte
Value []byte
Dbptr *db.DB
Position *store.RecordPosition
}
func NewTsRecord(op uint8) *TsRecord {
return &TsRecord{
Op: op,
}
}
func NewTs(godis *Godis) *Ts {
return &Ts{
TsId: atomic.AddUint64(&TsGlobalId, 1),
timeout: godis.Tstimeout,
tsrList: ds.NewList(),
magicDB: make(map[string]*ds.Object),
datalog: godis.Dl,
tsLog: godis.Tl,
}
}
func (ts *Ts) AddTsRecord(tsr *TsRecord) {
tsr.TsrId = ts.CurTsrId
ts.tsrList.Put(tsr)
ts.CurTsrId++
}
func (ts *Ts) AddSavePoint() {
sp := NewTsRecord(SavePoint)
sp.SavePointId = ts.curSavePoint
ts.curSavePoint++
ts.AddTsRecord(sp)
}
func (ts *Ts) RlockDB(db *db.DB) bool {
return db.Lock.TryRLock(ts.timeout, ts.TsId)
}
func (ts *Ts) LockDB(db *db.DB) bool {
log.Println("LockDB", db.Lock)
return db.Lock.TryLock(ts.timeout, ts.TsId)
}
func (ts *Ts) UnLockDB(db *db.DB) {
db.Lock.Cancel()
}
func (ts *Ts) GetDBKeys(db *db.DB) ds.List {
list := ds.NewList()
for key, _ := range db.Data {
list.Put(ds.CreateObject([]byte(key), ds.BIN, ts.TsId))
}
for key, _ := range ts.magicDB {
list.Put(ds.CreateObject([]byte(key), ds.BIN, ts.TsId))
}
return list
}
func (ts *Ts) SetDBKey(db *db.DB, t uint8, key []byte, value []byte) {
var err error
tsr := NewTsRecord(AddDbKey)
if origObj := db.GetDbObj(key); origObj != nil {
log.Println("SetDBKey() origObj:", origObj)
tsr.Position = ts.tsLog.Put(db, origObj.GetObjectType(), key,
origObj.GetBuffer())
if tsr.Position == nil {
log.Panicln(err)
}
}
tsr.Key = key
tsr.Dbptr = db
ts.AddTsRecord(tsr)
ts.setMagicDb(key, ds.CreateObject(value, t, ts.TsId))
}
func (ts *Ts) DeleteDBKey(db *db.DB, key []byte) {
var err error
tsr := NewTsRecord(DeleteDbKey)
tsr.Key = key
tsr.Dbptr = db
if origObj := db.GetDbObj(key); origObj != nil {
tsr.Position = ts.tsLog.Put(db, origObj.GetObjectType(), key,
origObj.GetBuffer())
if tsr.Position == nil {
log.Panicln(err)
}
}
ts.AddTsRecord(tsr)
obj := ts.getMagicDb(key)
if obj != nil {
ts.delMagicDb(key)
} else {
obj = db.GetDbObj(key)
if obj != nil {
ts.setMagicDb(key, obj)
db.DeleteDbObj(key)
}
}
}
func (ts *Ts) GetDBKey(db *db.DB, key []byte) *ds.Object {
if obj := ts.getMagicDb(key); obj != nil {
return obj
}
return db.GetDbObj(key)
}
func (ts *Ts) doCommit() {
var (
tsr *TsRecord
ok bool
)
for e := ts.tsrList.GetFirstNode(); e != nil; e = e.Next {
if tsr, ok = e.Value.(*TsRecord); !ok {
continue
}
ts.commitATsr(tsr)
if tsr.Dbptr != nil {
tsr.Dbptr.Lock.Cancel()
}
}
}
func (ts *Ts) storeTsr() {
var tsr *TsRecord
var ok bool
list := ds.NewList()
for e := ts.tsrList.GetFirstNode(); e != nil; e = e.Next {
if tsr, ok = e.Value.(*TsRecord); !ok {
continue
}
if tsr.Position != nil {
list.Put(tsr.Position)
}
}
if list.Len() == 0 {
return
}
ts.position = ts.tsLog.PutAMeta(store.Commit, ts.TsId, list)
if ts.position == nil {
log.Panicln("storeTsr()", list.Len())
}
}
func (ts *Ts) Commit() error {
log.Println("tsr Len()", ts.tsrList.Len())
if ts.tsrList.Len() == 0 {
return err_not_found_ts
}
log.Println("开始commmit")
printTsrArray(ts.tsrList)
// 保存事务日志
ts.storeTsr()
// 打上Commit标志
if ts.position != nil {
ts.tsLog.SetTsStatus(ts.position, store.Commit)
}
// 开始提交
ts.doCommit()
// 打上Commited标志
if ts.position != nil {
ts.tsLog.SetTsStatus(ts.position, store.Committed)
}
return nil
}
func printTsrArray(tsrList ds.List) {
var tsr *TsRecord
var ok bool
log.Println("----------------")
for e := tsrList.GetFirstNode(); e != nil; e = e.Next {
if tsr, ok = e.Value.(*TsRecord); !ok {
continue
}
log.Println(tsr)
}
log.Println("----------------")
}
func (ts *Ts) subTsrListBySavePoint(savepoint int) ds.List {
var i int = 0
var tsr *TsRecord
var ok bool
var l ds.List = ts.tsrList
for e := l.GetFirstNode(); e != nil; e = e.Next {
if tsr, ok = e.Value.(*TsRecord); !ok {
continue
}
if tsr.Op == SavePoint && savepoint == tsr.SavePointId {
break
}
i++
}
if list := l.SubList(i, l.Len()); list != nil {
for e := list.GetFirstNode(); e != nil; e = e.Next {
l.Remove(e.Value)
}
return list
}
return l
}
func (ts *Ts) RollBack(savepoint int) error {
var tsr *TsRecord
var ok bool
var rollbacklist ds.List
if ts.tsrList.Len() == 0 {
return err_not_found_ts
}
if savepoint >= ts.curSavePoint {
return err_not_found_savepoint
}
printTsrArray(ts.tsrList)
if savepoint >= 0 {
rollbacklist = ts.subTsrListBySavePoint(savepoint)
} else {
rollbacklist = ts.tsrList
}
printTsrArray(rollbacklist)
printTsrArray(ts.tsrList)
if rollbacklist.Len() == 0 {
return nil
}
for e := rollbacklist.GetTailNode(); e != nil; e = e.Prev {
if tsr, ok = e.Value.(*TsRecord); !ok {
continue
}
ts.rollBackATsr(tsr)
if tsr.Dbptr != nil {
tsr.Dbptr.Lock.Cancel()
}
}
return nil
}
func (ts *Ts) rollBackATsr(tsr *TsRecord) {
switch tsr.Op {
case DeleteDbKey:
ts.rollbackDbDel(tsr.Dbptr, tsr.Key)
case AddDbKey:
ts.rollbackDbAdd(tsr.Dbptr, tsr.Key)
// case SavePoint:
// ts.curSavePoint--
}
}
func (ts *Ts) commitATsr(tsr *TsRecord) {
switch tsr.Op {
case DeleteDbKey:
ts.commitDbDel(tsr.Dbptr, tsr.Key)
case AddDbKey:
ts.commitDbAdd(tsr.Dbptr, tsr.Key)
}
}
func (ts *Ts) rollbackDbDel(db *db.DB, key []byte) {
obj := ts.getMagicDb(key)
if obj != nil {
db.SetDbObj(key, obj)
}
ts.delMagicDb(key)
}
func (ts *Ts) commitDbDel(db *db.DB, key []byte) {
obj := ts.getMagicDb(key)
if obj != nil {
ts.datalog.PutKeyValue(db, key, store.Del, obj)
delete(ts.magicDB, string(key))
}
}
func (ts *Ts) rollbackDbAdd(db *db.DB, key []byte) {
ts.delMagicDb(key)
}
func (ts *Ts) commitDbAdd(db *db.DB, key []byte) {
obj := ts.getMagicDb(key)
log.Println("Obj", obj)
if obj != nil {
ts.datalog.PutKeyValue(db, key, store.None, obj)
db.SetDbObj(key, obj)
}
}
func (ts *Ts) setMagicDb(key []byte, value *ds.Object) {
ts.magicDB[string(key)] = value
}
func (ts *Ts) getMagicDb(key []byte) *ds.Object {
obj, ok := ts.magicDB[string(key)]
if ok {
return obj
}
return nil
}
func (ts *Ts) delMagicDb(key []byte) *ds.Object {
obj := ts.getMagicDb(key)
delete(ts.magicDB, string(key))
return obj
}
|
[
6
] |
package recover_binary_search_tree
import (
"fmt"
"math"
)
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func recoverTree(root *TreeNode) {
var FirstHit, LastHit *TreeNode
PreScan := &TreeNode{math.MinInt64, nil, nil}
FirstHit, LastHit, PreScan = FillTheTwoHit(root, FirstHit, LastHit, PreScan)
temp := FirstHit.Val
FirstHit.Val = LastHit.Val
LastHit.Val = temp
}
func FillTheTwoHit(root *TreeNode, FirstHit *TreeNode, LastHit *TreeNode,
PreScan *TreeNode) (*TreeNode, *TreeNode, *TreeNode) {
if root != nil {
FirstHit, LastHit, PreScan = FillTheTwoHit(root.Left, FirstHit, LastHit, PreScan)
if root.Val < PreScan.Val {
if FirstHit == nil {
FirstHit = PreScan
}
LastHit = root
}
PreScan = root
FirstHit, LastHit, PreScan = FillTheTwoHit(root.Right, FirstHit, LastHit, PreScan)
}
return FirstHit, LastHit, PreScan
}
func PrintInOrder(root *TreeNode) {
if root == nil {
fmt.Println("nil")
return
}
PrintInOrder(root.Left)
fmt.Println(root.Val)
PrintInOrder(root.Right)
}
func PrintPostOrder(root *TreeNode) {
if root == nil {
fmt.Println("nil")
return
}
fmt.Println(root.Val)
PrintPostOrder(root.Left)
PrintPostOrder(root.Right)
}
|
[
2,
6
] |
package main
func main() {
}
// leetcode848_字母移位
func shiftingLetters(S string, shifts []int) string {
arr := []byte(S)
shifts = append(shifts, 0)
for i := len(S) - 1; i >= 0; i-- {
shifts[i] = (shifts[i] + shifts[i+1]) % 26
arr[i] = 'a' + (S[i]-'a'+byte(shifts[i]))%26
}
return string(arr)
}
|
[
2
] |
package api
import (
"encoding/json"
"github.com/asp437/pg_elastic/api/search"
"github.com/asp437/pg_elastic/db"
"github.com/asp437/pg_elastic/server"
"github.com/asp437/pg_elastic/utils"
"io/ioutil"
"net/http"
"regexp"
"strings"
"time"
)
type shardInfo struct {
Total int `json:"total"`
Failed int `json:"failed"`
Successful int `json:"successful"`
}
type searchHits struct {
MaxScore float32 `json:"max_score"`
Total int `json:"total"`
Hits []documentSearchResponse `json:"hits"`
}
type documentPutResponse struct {
Shards shardInfo `json:"_shards"`
Index string `json:"_index"`
Type string `json:"_type"`
ID string `json:"_id"`
Version int `json:"_version"`
Created bool `json:"created"`
Result string `json:"result"`
}
type documentGetResponse struct {
Index string `json:"_index"`
Type string `json:"_type"`
ID string `json:"_id"`
Version int `json:"_version"`
Found bool `json:"found"`
Document interface{} `json:"_source"`
}
type documentSearchResponse struct {
Index string `json:"_index"`
Type string `json:"_type"`
ID string `json:"_id"`
Score float32 `json:"_score"`
Document interface{} `json:"_source"`
}
type searchResponse struct {
Took int `json:"took"`
TimedOut bool `json:"timed_out"`
Shards shardInfo `json:"_shards"`
Hits searchHits `json:"hits"`
}
func formatDocumentSearchResponse(index, typeName string, doc db.ElasticSearchDocument) documentSearchResponse {
return documentSearchResponse{
Index: index,
Type: typeName,
ID: doc.ID,
Score: 1.0,
Document: doc.Document,
}
}
// PutDocumentHandler handles request to put document into storage
func PutDocumentHandler(index, typeName, endpoint string, r *http.Request, s server.PGElasticServer) (interface{}, error) {
var documentObject *db.ElasticSearchDocument
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, utils.NewInternalIOError(err.Error())
}
if strings.Compare(endpoint, "") == 0 {
documentObject, err = s.GetDBClient().CreateDocument(index, typeName, string(body), "")
if err != nil {
return nil, err
}
} else {
documentID := endpoint
exists, err := s.GetDBClient().IsDocumentExists(index, typeName, documentID)
if err != nil {
return nil, err
}
if exists {
documentObject, err = s.GetDBClient().UpdateDocument(index, typeName, string(body), documentID)
} else {
documentObject, err = s.GetDBClient().CreateDocument(index, typeName, string(body), documentID)
}
if err != nil {
return nil, err
}
}
response := documentPutResponse{
Shards: shardInfo{1, 0, 1},
Index: index,
Type: typeName,
ID: documentObject.ID,
Version: documentObject.Version,
Created: documentObject.Version == 1,
Result: func() string {
if documentObject.Version == 1 {
return "created"
}
return "updated"
}(),
}
return response, nil
}
// GetDocumentHandler handles request to get document from storage
func GetDocumentHandler(index, typeName, endpoint string, r *http.Request, s server.PGElasticServer) (response interface{}, err error) {
documentID := endpoint
documentObject, err := s.GetDBClient().GetDocument(index, typeName, documentID)
if err != nil {
return nil, err
}
if documentObject != nil {
response = documentGetResponse{
Index: index,
Type: typeName,
ID: documentObject.ID,
Version: documentObject.Version,
Found: true,
Document: documentObject.Document,
}
} else {
response = documentGetResponse{
Index: index,
Type: typeName,
Found: false,
}
}
return response, nil
}
// DeleteDocumentHandler handles request to delete document from storage
func DeleteDocumentHandler(index, typeName, endpoint string, r *http.Request, s server.PGElasticServer) (response interface{}, err error) {
documentID := endpoint
documentObject, err := s.GetDBClient().DeleteDocument(index, typeName, documentID)
if err != nil {
return nil, err
}
if documentObject != nil {
response = documentGetResponse{
Index: index,
Type: typeName,
ID: documentObject.ID,
Version: documentObject.Version,
Found: true,
Document: documentObject.Document,
}
} else {
response = documentGetResponse{
Index: index,
Type: typeName,
Found: false,
}
}
return response, nil
}
// FindDocumentHandler handles request to find document on storage
func FindDocumentHandler(indexPattern, typePattern, endpoint string, r *http.Request, s server.PGElasticServer) (response interface{}, err error) {
startTime := time.Now()
var parsedQuery interface{}
indices, err := s.GetDBClient().FindIndices(indexPattern)
if err != nil {
return nil, utils.NewInternalError(err.Error())
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, utils.NewInternalIOError(err.Error())
}
err = json.Unmarshal(body, &parsedQuery)
if err != nil {
return nil, utils.NewJSONWrongFormatError(err.Error())
}
for _, index := range indices {
types, err := s.GetDBClient().FindTypes(index, typePattern)
if err != nil {
return nil, utils.NewInternalError(err.Error())
}
for _, typeName := range types {
for k, v := range parsedQuery.(map[string]interface{}) {
switch k {
case "query":
var typeMapping map[string]interface{}
// Get type mapping from system type record
docType, err := s.GetDBClient().GetType(index, typeName)
if err != nil {
return nil, err
}
json.Unmarshal([]byte(docType.Options), &typeMapping)
query := s.GetDBClient().NewQuery(index, typeName)
search.ParseSearchQuery(v.(map[string]interface{}), query, typeMapping)
docs, err := s.GetDBClient().ProcessSearchQuery(index, typeName, query)
if err != nil {
return nil, err
}
response := searchResponse{
Took: 1,
TimedOut: false,
Shards: shardInfo{1, 0, 1},
Hits: searchHits{
MaxScore: 0,
Total: 0,
Hits: []documentSearchResponse{},
},
}
for _, doc := range docs {
docResponse := formatDocumentSearchResponse(index, typeName, doc)
response.Hits.Hits = append(response.Hits.Hits, docResponse)
if response.Hits.MaxScore < docResponse.Score {
response.Hits.MaxScore = docResponse.Score
}
response.Hits.Total += 1
}
response.Took = (int)(time.Since(startTime).Nanoseconds() / 1000000.0)
return response, nil
}
}
}
}
return nil, utils.NewIllegalQueryError("Illegal search query")
}
// FindIndexDocumentHandler handles request to find document of any type on storage
func FindIndexDocumentHandler(endpoint string, r *http.Request, s server.PGElasticServer) (response interface{}, err error) {
var indexHandlerPattern = regexp.MustCompile("^/(?P<index>\\w+)/_search")
indexName := indexHandlerPattern.ReplaceAllString(endpoint, "${index}")
return FindDocumentHandler(indexName, "*", endpoint, r, s)
}
|
[
7
] |
package testhelper
import (
"bytes"
"io/ioutil"
"net/http"
"reflect"
"strconv"
"strings"
"testing"
"github.com/Jeffail/gabs"
)
var endColor = "\033[0m"
type TestHelper struct {
ShouldLog bool
CookieBucket map[string]*http.Cookie
HeaderBucket map[string]string
ResponseBucket map[string]map[string]*gabs.Container
ErrorColor string
SuccessColor string
InfoColor string
}
func NewHTTPTestHelper(
logging bool,
errorColor string,
successColor string,
infoColor string,
) *TestHelper {
if errorColor == "" {
errorColor = "\033[31m"
}
if errorColor == "none" {
errorColor = ""
}
if successColor == "" {
successColor = "\033[32m"
}
if successColor == "none" {
successColor = ""
}
if infoColor == "" {
infoColor = "\033[0m"
}
if infoColor == "none" {
infoColor = ""
}
return &TestHelper{
ErrorColor: errorColor,
SuccessColor: successColor,
InfoColor: infoColor,
ShouldLog: logging,
ResponseBucket: make(map[string]map[string]*gabs.Container),
CookieBucket: make(map[string]*http.Cookie),
HeaderBucket: make(map[string]string),
}
}
type HTTPTestIn struct {
Note string
Label string
TestCode string
Body []byte
URL string
Method string
Headers map[string]string
}
type HTTPTestOut struct {
RawBody []byte
KeyValues map[string]string
Keys []string
Status string
Code int
Headers map[string]string
IgnoredHeaders []string
}
type HTTPTest struct {
HTTPTestIn
HTTPTestOut
}
func (th *TestHelper) NewHTTPTest(testIn HTTPTestIn, testOut HTTPTestOut) *HTTPTest {
return &HTTPTest{
HTTPTestIn: testIn,
HTTPTestOut: testOut,
}
}
func (th *TestHelper) sendRequest(HTTPTest *HTTPTest, t *testing.T) (*http.Response, []byte) {
if th.ShouldLog {
t.Log(th.InfoColor, "===============================================================", endColor)
t.Log(th.InfoColor, HTTPTest.HTTPTestIn.Method, "(", HTTPTest.HTTPTestIn.URL, ")", endColor)
}
req, err := http.NewRequest(HTTPTest.HTTPTestIn.Method, HTTPTest.HTTPTestIn.URL, bytes.NewBuffer(HTTPTest.HTTPTestIn.Body))
if err != nil {
t.Error(th.ErrorColor, "Could not make request:", endColor, err)
t.Skip()
}
for _, v := range th.CookieBucket {
req.AddCookie(v)
}
for i, v := range HTTPTest.HTTPTestIn.Headers {
req.Header.Set(i, v)
}
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
t.Error(th.ErrorColor, "Could not send request:", endColor, err)
t.Skip()
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
if th.ShouldLog {
t.Log(th.InfoColor, "CODE (", strconv.Itoa(resp.StatusCode), ") STATUS (", resp.Status, ")", endColor)
if len(body) > 0 {
t.Log(th.InfoColor, strings.TrimSuffix(string(body), "\n"), endColor)
} else {
t.Log(th.InfoColor, "NO RESPONSE BODY", endColor)
}
if len(resp.Cookies()) < 1 {
t.Log(th.InfoColor, "===============================================================", endColor)
}
}
if len(resp.Cookies()) > 0 {
if th.ShouldLog {
t.Log(th.InfoColor, "==================== RECEIVED NEW COOKIES ===============", endColor)
}
for _, v := range resp.Cookies() {
if th.ShouldLog {
t.Log(v)
}
th.CookieBucket[v.Name] = v
}
if th.ShouldLog {
t.Log(th.InfoColor, "==============================================================", endColor)
}
}
if th.ShouldLog {
t.Log(th.InfoColor, "==================== RECEIVED HEADERS ===============", endColor)
}
for i, v := range resp.Header {
if th.ShouldLog {
t.Log(i + " : " + v[0])
}
th.HeaderBucket[i] = v[0]
}
if th.ShouldLog {
t.Log(th.InfoColor, "==============================================================", endColor)
}
return resp, body
}
func (th *TestHelper) checkHTTPStatus(response *http.Response, expectedStatus string, t *testing.T) {
if response.Status != expectedStatus {
t.Error(th.ErrorColor, "Expected Status (", expectedStatus, ") but got (", response.Status, ")", endColor)
}
if th.ShouldLog {
t.Log(th.SuccessColor, "Wanted status (", expectedStatus, ") and got (", response.Status, ")", endColor)
}
}
func (th *TestHelper) checkHTTPCode(response *http.Response, expectedCode int, t *testing.T) {
if response.StatusCode != expectedCode {
t.Error(th.ErrorColor, "Expected Code (", strconv.Itoa(expectedCode), ") but got (", strconv.Itoa(response.StatusCode), ")", endColor)
}
if th.ShouldLog {
t.Log(th.SuccessColor, "Wanted code (", strconv.Itoa(expectedCode), ") and got( ", strconv.Itoa(response.StatusCode), ")", endColor)
}
}
func (th *TestHelper) checkFields(decodedBody map[string]*gabs.Container, Fields []string, t *testing.T) {
if len(decodedBody) < 1 && len(Fields) < 1 {
return
} else if len(decodedBody) < 1 && len(Fields) > 0 {
if th.ShouldLog {
t.Error(th.ErrorColor, "Expecting (", strconv.Itoa(len(Fields)), ") fields in response but got (", strconv.Itoa(len(decodedBody)), ")", endColor)
}
return
}
for _, key := range Fields {
continueInOuterLoop := false
for decodedBodyKey := range decodedBody {
if decodedBodyKey == key {
if th.ShouldLog {
t.Log(th.SuccessColor, "Key (", key, ") found in response", endColor)
}
continueInOuterLoop = true
continue
}
}
if continueInOuterLoop {
continue
}
t.Error(th.ErrorColor, "Key (", key, ") not found in response", endColor)
}
}
func (th *TestHelper) checkKeyValues(decodedBody map[string]*gabs.Container, KeyValues map[string]string, t *testing.T) {
for key, value := range KeyValues {
var valueToCheck string
decodedBodyValue := decodedBody[key].Data()
if decodedBodyValue == nil {
t.Error(th.ErrorColor, "Key ( "+key+" ) with value ("+value+") not found in request", endColor)
continue
}
switch reflect.TypeOf(decodedBodyValue).Kind() {
case reflect.Bool:
if th.ShouldLog {
t.Log(th.InfoColor, "Key ", key, "is of type ( bool )", endColor)
}
valueToCheck = strconv.FormatBool(decodedBodyValue.(bool))
case reflect.Int:
if th.ShouldLog {
t.Log(th.InfoColor, "Key ", key, "is of type ( int )", endColor)
}
valueToCheck = strconv.Itoa(decodedBodyValue.(int))
case reflect.Float64:
if th.ShouldLog {
t.Log(th.InfoColor, "Key ", key, "is of type ( float64 )", endColor)
}
valueToCheck = strconv.FormatFloat(decodedBodyValue.(float64), 'f', -1, 64)
default:
if th.ShouldLog {
t.Log(th.InfoColor, "Key ", key, "is of type ( string )", endColor)
}
valueToCheck = decodedBodyValue.(string)
}
if valueToCheck != value {
t.Error(th.ErrorColor, "Expected ( "+value+" ) in key ( "+key+" ) but got ( "+valueToCheck+" )", endColor)
return
}
if th.ShouldLog {
t.Log(th.SuccessColor, "Wanted value (", value, ") in key (", key, ") and got (", valueToCheck, ")", endColor)
}
}
}
func (th *TestHelper) decodeBody(body []byte, t *testing.T) map[string]*gabs.Container {
if len(body) < 1 {
return nil
}
jsonParsed, err := gabs.ParseJSON(body)
if err != nil {
t.Error(th.ErrorColor, "Request body could not be converted to JSON or XML:\033[30m", endColor, err)
return nil
}
children := jsonParsed.S().ChildrenMap()
return children
}
func (th *TestHelper) checkHeaders(response *http.Response, out *HTTPTestOut, t *testing.T) {
for header, expectedHeaderValue := range out.Headers {
for _, headerToBeIgnored := range out.IgnoredHeaders {
if header == headerToBeIgnored {
continue
}
}
actualHeaderValue := response.Header.Get(header)
if actualHeaderValue != expectedHeaderValue {
t.Error(th.ErrorColor, "Excpected header (", header, ") with value (", expectedHeaderValue, ") but got (", actualHeaderValue, ")", endColor)
}
if th.ShouldLog {
t.Log(th.SuccessColor, "Found header (", header, ") with value (", actualHeaderValue, ") in response", endColor)
}
}
}
func (th *TestHelper) checkRawBody(responseBody string, expectedBody string, t *testing.T) {
if strings.TrimRight(responseBody, "\n") != strings.TrimRight(expectedBody, "\n") {
t.Error(th.ErrorColor, "Excpected body: ", strings.TrimRight(responseBody, "\n"), "\n but got: ", strings.TrimRight(expectedBody, "\n"), endColor)
}
}
func (th *TestHelper) TestThis(
HTTPTest *HTTPTest,
t *testing.T) {
t.Run(HTTPTest.HTTPTestIn.TestCode+":"+HTTPTest.HTTPTestIn.Label, func(t *testing.T) {
if HTTPTest.HTTPTestIn.Note != "" {
t.Log(th.InfoColor, "==================== NOTE =====================================", endColor)
t.Log(th.InfoColor, HTTPTest.HTTPTestIn.Note, endColor)
}
response, body := th.sendRequest(HTTPTest, t)
th.checkHTTPStatus(response, HTTPTest.HTTPTestOut.Status, t)
th.checkHTTPCode(response, HTTPTest.HTTPTestOut.Code, t)
th.checkHeaders(response, &HTTPTest.HTTPTestOut, t)
if HTTPTest.HTTPTestOut.RawBody != nil {
th.checkRawBody(string(body), string(HTTPTest.HTTPTestOut.RawBody), t)
} else {
th.ResponseBucket[HTTPTest.HTTPTestIn.TestCode] = th.decodeBody(body, t)
th.checkKeyValues(th.ResponseBucket[HTTPTest.HTTPTestIn.TestCode], HTTPTest.HTTPTestOut.KeyValues, t)
th.checkFields(th.ResponseBucket[HTTPTest.HTTPTestIn.TestCode], HTTPTest.HTTPTestOut.Keys, t)
}
})
}
|
[
2,
6
] |
package ppos
import (
"fmt"
"github.com/fuyuntt/cchess/util"
"regexp"
"strings"
)
var pieceMap = map[int32]Piece{
'k': PcBKing,
'a': PcBAdvisor,
'b': PcBBishop,
'n': PcBKnight,
'r': PcBRook,
'c': PcBCannon,
'p': PcBPawn,
'K': PcRKing,
'A': PcRAdvisor,
'B': PcRBishop,
'N': PcRKnight,
'R': PcRRook,
'C': PcRCannon,
'P': PcRPawn,
}
const initFen = "rnbakabnr/9/1c5c1/p1p1p1p1p/9/9/P1P1P1P1P/1C5C1/9/RNBAKABNR w - - 0 1"
var positionRegexp = regexp.MustCompile(`^(?:fen (?P<fen>[kabnrcpKABNRCP1-9/]+ [wrb] - - \d+ \d+)|(?P<startpos>startpos))(?: moves (?P<moves>[a-i]\d[a-i]\d(?: [a-i]\d[a-i]\d)*))?$`)
func parsePosition(positionStr string) (*Position, error) {
groups := util.ParseGroup(positionRegexp, positionStr)
var pos *Position
for _, group := range groups {
switch group.Key {
case "fen":
fenPos, err := parseFen(group.Value)
if err != nil {
return nil, err
}
pos = fenPos
case "startpos":
fenPos, err := parseFen(initFen)
if err != nil {
return nil, err
}
pos = fenPos
case "moves":
if pos == nil {
continue
}
for _, mv := range strings.Split(group.Value, " ") {
pos.MakeMove(GetMoveFromICCS(mv))
}
}
}
if pos == nil {
return nil, fmt.Errorf("illegle positionStr: %s", positionStr)
}
return pos, nil
}
func parseFen(fenStr string) (*Position, error) {
pos := CreatePosition()
fenParts := strings.Split(fenStr, " ")
x, y := 0, 0
for _, b := range fenParts[0] {
if b >= '0' && b <= '9' {
x += int(b - '0')
} else if b == '/' {
y++
x = 0
} else {
piece, ok := pieceMap[b]
if !ok {
return nil, fmt.Errorf("fen parse error: %s", fenStr)
}
pos.AddPiece(GetSquare(x, y), piece)
x++
}
}
side := fenParts[1]
if side == "b" {
pos.ChangeSide()
}
return pos, nil
}
|
[
5
] |
/*
* @Time : 2020/7/15 20:48
* @Author : cancan
* @File : 1486.go
* @Function : 数组异或操作
*/
/*
* Question:
* 给你两个整数,n 和 start 。
* 数组 nums 定义为:nums[i] = start + 2*i(下标从 0 开始)且 n == nums.length 。
* 请返回 nums 中所有元素按位异或(XOR)后得到的结果。
*
* Example 1:
* 输入:n = 5, start = 0
* 输出:8
* 解释:数组 nums 为 [0, 2, 4, 6, 8],其中 (0 ^ 2 ^ 4 ^ 6 ^ 8) = 8 。
* "^" 为按位异或 XOR 运算符。
*
* Example 2:
* 输入:n = 4, start = 3
* 输出:8
* 解释:数组 nums 为 [3, 5, 7, 9],其中 (3 ^ 5 ^ 7 ^ 9) = 8.
*
* Example 3:
* 输入:n = 1, start = 7
* 输出:7
*
* Example 4:
* 输入:n = 10, start = 5
* 输出:2
*
* Note:
* - 1 <= n <= 1000
* - 0 <= start <= 1000
* - n == nums.length
*/
package QuestionBank
func xorOperation(n int, start int) int {
ans := start
for v := 1; v < n; v++ {
ans ^= start + 2*v
}
return ans
}
|
[
6
] |
package main
import (
"database/sql"
"log"
"strconv"
"time"
_ "github.com/lib/pq"
)
var db *sql.DB
func connectToDB() {
connStr := "user=postgres password=postgres dbname=library sslmode=disable host=localhost port=5432"
database, err := sql.Open("postgres", connStr)
if err != nil {
log.Panic(err)
}
db = database
}
func getNotes() []note {
rows, err := db.Query("SELECT * FROM note")
if err != nil {
log.Panic(err)
}
result := make([]note, 0)
for rows.Next() {
tmp := note{}
err = rows.Scan(&tmp.ID, &tmp.BookName, &tmp.EmergenceDate, &tmp.BuyingDate, &tmp.ReadingEndDate, &tmp.Status, &tmp.UserAccountID)
if err != nil {
log.Panic(err)
}
result = append(result, tmp)
}
return result
}
func getNoteByID(ID string) note {
intID, err := strconv.Atoi(ID)
if err != nil {
log.Panic(err)
}
row := db.QueryRow("SELECT * FROM note WHERE id = $1", intID)
tmp := note{}
err = row.Scan(&tmp.ID, &tmp.BookName, &tmp.EmergenceDate, &tmp.BuyingDate, &tmp.ReadingEndDate, &tmp.Status, &tmp.UserAccountID)
if err != nil {
log.Panic(err)
}
return tmp
}
func addNote(note note) error {
_, err := db.Exec("INSERT INTO note(book_name, emergence_date, buying_date, reading_end_date, status, user_account_id) VALUES ($1, $2, $3, $4, $5, $6)", note.BookName, note.EmergenceDate, note.BuyingDate, note.ReadingEndDate, note.Status, note.UserAccountID)
return err
}
func changeDateAndStatusOfNote(note note) error {
if note.Status != "want" {
note.ReadingEndDate = time.Now()
note.Status = "read"
_, err := db.Exec("UPDATE note SET reading_end_date = $1, status = $2 WHERE id = $3", note.ReadingEndDate, note.Status, note.ID)
return err
}
note.BuyingDate = time.Now()
note.Status = "bought"
_, err := db.Exec("UPDATE note SET buying_date = $1, status = $2 WHERE id = $3", note.BuyingDate, note.Status, note.ID)
return err
}
func createUserAccount(u userAccount) error {
passwordHash, err := hashPassword(u.Password)
if err != nil {
return err
}
_, err = db.Exec("INSERT INTO user_account(name, email, password) VALUES ($1, $2, $3)", u.Name, u.Email, passwordHash)
return err
}
func getUserAccount(email string) (userAccount, error) {
row := db.QueryRow("SELECT * FROM user_account WHERE email=$1", email)
uA := userAccount{}
err := row.Scan(&uA.ID, &uA.Name, &uA.Email, &uA.Password)
if err != nil {
log.Println(err)
return userAccount{}, err
}
return uA, nil
}
func getNotesByUserAccountID(userAccountID int) []note {
rows, err := db.Query("SELECT * FROM note WHERE user_account_id = $1", userAccountID)
if err != nil {
log.Panic(err)
}
result := make([]note, 0)
for rows.Next() {
tmp := note{}
err = rows.Scan(&tmp.ID, &tmp.BookName, &tmp.EmergenceDate, &tmp.BuyingDate, &tmp.ReadingEndDate, &tmp.Status, &tmp.UserAccountID)
if err != nil {
log.Panic(err)
}
result = append(result, tmp)
}
return result
}
|
[
2
] |
package tipo_conta
import (
"fmt"
"time"
"github.com/paulocsilvajr/controle_pessoal_de_financas/API/v1/helper"
"github.com/paulocsilvajr/controle_pessoal_de_financas/API/v1/model/erro"
)
// ITipoConta é uma interface que exige a implementação dos métodos obrigatórios em TipoConta
type ITipoConta interface {
String() string
Repr() string
VerificaAtributos() error
Altera(string, string, string) error
AlteraCampos(map[string]string) error
Ativa()
Inativa()
CorrigeData()
}
// TipoConta é uma struct que representa um tipo de conta. Possui notações JSON para cada campo e tem a composição da interface ITipoConta
type TipoConta struct {
Nome string `json:"nome" gorm:"primaryKey;size:50;not null"`
DescricaoDebito string `json:"descricao_debito" gorm:"size:20;not null"`
DescricaoCredito string `json:"descricao_credito" gorm:"size:20;not null"`
DataCriacao time.Time `json:"data_criacao" gorm:"not null;autoCreateTime"`
DataModificacao time.Time `json:"data_modificacao" gorm:"not null;autoUpdateTime"`
Estado bool `json:"estado" gorm:"not null;default:true"`
}
type TTipoConta TipoConta
func (TTipoConta) TableName() string {
return "tipo_conta"
}
// GetNomeTabelaTipoConta retorna o nome da tabela TipoConta
func GetNomeTabelaTipoConta() string {
return new(TTipoConta).TableName()
}
// MaxNome: tamanho máximo para o nome do tipo de conta
// MaxDescricao: tamanho máximo para as descrições de débito e crédito de tipo de conta
// MsgErroNome01: mensagem erro padrão 01 para nome
// MsgErroDescricao01: mensagem erro padrão 01 para descrição(débito)
// MsgErroDescricao02: mensagem erro padrão 02 para descrição(crédito)
const (
MaxNome = 50
MaxDescricao = 20
MsgErroNome01 = "Nome com tamanho inválido"
MsgErroDescricao01 = "Descrição do débito com tamanho inválido"
MsgErroDescricao02 = "Descrição do crédito com tamanho inválido"
)
// ITiposConta é uma interface que exige a implementação dos métodos ProcuraTipoConta e Len para representar um conjunto/lista(slice) de Tipos de Contas genéricas
type ITiposConta interface {
Len() int
ProcuraTipoConta(string) *TipoConta
}
// TiposConta representa um conjunto/lista(slice) de Tipos de Contas(*TipoConta)
type TiposConta []*TipoConta
// TTiposConta representa um conjunto/lista(slice) de Tipos de Contas de acordo com o GORM(*TTipoConta)
type TTiposConta []*TTipoConta
// New retorna uma novo Tipo de Conta(*TipoConta) através dos parâmetros informados(nome, descDebito e descCredito). Função equivalente a criação de um TipoConta via literal &TipoConta{Nome: ..., ...}. Data de criação e modificação são definidos como o horário atual e o estado é definido como ativo
func New(nome, descDebito, descCredito string) *TipoConta {
return &TipoConta{
Nome: nome,
DescricaoDebito: descDebito,
DescricaoCredito: descCredito,
DataCriacao: time.Now().Local(),
DataModificacao: time.Now().Local(),
Estado: true}
}
// NewTipoConta cria uma novo TipoConta semelhante a função New(), mas faz a validação dos campos informados nos parâmetros nome, descDebito e descCredito
func NewTipoConta(nome, descDebito, descCredito string) (tipoConta *TipoConta, err error) {
tipoConta = New(nome, descDebito, descCredito)
if err = tipoConta.VerificaAtributos(); err != nil {
tipoConta = nil
}
return
}
// GetTipoContaTest retorna um TipoConta teste usado para testes em geral
func GetTipoContaTest() (tipoConta *TipoConta) {
tipoConta = New("banco teste 01", "saque", "depósito")
tipoConta.DataCriacao = time.Date(2000, 2, 1, 12, 30, 0, 0, new(time.Location))
tipoConta.DataModificacao = time.Date(2000, 2, 1, 12, 30, 0, 0, new(time.Location))
tipoConta.Estado = true
return
}
// String retorna uma representação textual de um TipoConta. Datas são formatadas usando a função helper.DataFormatada() e campo estado é formatado usando a função helper.GetEstado()
func (t *TipoConta) String() string {
estado := helper.GetEstado(t.Estado)
dataCriacao := helper.DataFormatada(t.DataCriacao)
dataModificacao := helper.DataFormatada(t.DataModificacao)
return fmt.Sprintf("%s\t%s\t%s\t%s\t%s\t%s", t.Nome, t.DescricaoDebito, t.DescricaoCredito, dataCriacao, dataModificacao, estado)
}
// Repr é um método que retorna uma string da representação de um TipoConta, sem formatações especiais
func (t *TipoConta) Repr() string {
return fmt.Sprintf("%s\t%s\t%s\t%s\t%s\t%v", t.Nome, t.DescricaoDebito, t.DescricaoCredito, t.DataCriacao, t.DataModificacao, t.Estado)
}
// VerificaAtributos é um método de tipoConta que verifica os campos Nome, DescricaoDebito e DescricaoCredito, retornando um erro != nil caso ocorra um problema
func (t *TipoConta) VerificaAtributos() error {
return verifica(t.Nome, t.DescricaoDebito, t.DescricaoCredito)
}
// Altera é um método que modifica os dados do TipoConta a partir dos parâmetros informados depois da verificação de cada parâmetro e atualiza a data de modificação dele. Retorna um erro != nil, caso algum parâmetro seja inválido
func (t *TipoConta) Altera(nome string, descDebito string, descCredito string) (err error) {
if err = verifica(nome, descDebito, descCredito); err != nil {
return
}
t.Nome = nome
t.DescricaoDebito = descDebito
t.DescricaoCredito = descCredito
t.DataModificacao = time.Now().Local()
return
}
// AlteraCampos é um método para alterar os campos de um TipoConta a partir de hashMap informado no parâmetro campos. Somente as chaves informadas com um valor correto serão atualizadas. É atualizado a data de modificação do TipoConta. Caso ocorra um problema na validação dos campos, retorna um erro != nil. Campos permitidos: nome, descricaoDebito, descricaoCredito
func (t *TipoConta) AlteraCampos(campos map[string]string) (err error) {
for chave, valor := range campos {
switch chave {
case "nome":
if err = verificaNome(valor); err != nil {
return
}
t.Nome = valor
case "descricaoDebito":
if err = verificaDescricao(valor, false); err != nil {
return
}
t.DescricaoDebito = valor
case "descricaoCredito":
if err = verificaDescricao(valor, true); err != nil {
return
}
t.DescricaoCredito = valor
}
}
t.DataModificacao = time.Now().Local()
return
}
// Ativa é um método que define o TipoConta como ativo e atualiza a sua data de modificação
func (t *TipoConta) Ativa() {
t.alteraEstado(true)
}
// Inativa é um método que define o TipoConta como inativo e atualiza a sua data de modificação
func (t *TipoConta) Inativa() {
t.alteraEstado(false)
}
// CorrigeData é um método que converte a data(Time) no formato do timezone local
func (t *TipoConta) CorrigeData() {
t.DataCriacao = t.DataCriacao.Local()
t.DataModificacao = t.DataModificacao.Local()
}
// ProcuraTipoConta é um método que retorna um TipoConta a partir da busca em uma listagem de TiposConta. Caso não seja encontrado o TipoConta, retorna um erro != nil. A interface ITiposConta exige a implementação desse método
func (ts TiposConta) ProcuraTipoConta(tipoConta string) (t *TipoConta, err error) {
for _, tipoContaLista := range ts {
if tipoContaLista.Nome == tipoConta {
t = tipoContaLista
return
}
}
err = fmt.Errorf("Tipo de conta %s informada não existe na listagem", tipoConta)
return
}
// Len é um método de TiposConta que retorna a quantidade de elementos contidos dentro do slice de TipoConta. A interface ITiposConta exige a implementação desse método
func (ts TiposConta) Len() int {
return len(ts)
}
func (t *TipoConta) alteraEstado(estado bool) {
t.DataModificacao = time.Now().Local()
t.Estado = estado
}
func verifica(nome, descDebito, descCredito string) (err error) {
if err = verificaNome(nome); err != nil {
return
} else if err = verificaDescricao(descDebito, false); err != nil {
return
} else if err = verificaDescricao(descCredito, true); err != nil {
return
}
return
}
func verificaNome(nome string) (err error) {
if len(nome) == 0 || len(nome) > MaxNome {
err = erro.ErroTamanho(MsgErroNome01, len(nome))
}
return
}
func verificaDescricao(descricao string, tipoCredito bool) (err error) {
if len(descricao) == 0 || len(descricao) > MaxDescricao {
if tipoCredito {
err = erro.ErroTamanho(MsgErroDescricao02, len(descricao))
} else {
err = erro.ErroTamanho(MsgErroDescricao01, len(descricao))
}
}
return
}
|
[
6
] |
package algorithms
import "math"
func thirdMax(nums []int) int {
max1 := math.MinInt64
max2 := math.MinInt64
max3 := math.MinInt64
for _, n := range nums {
if n > max1 {
max3 = max2
max2 = max1
max1 = n
} else if n > max2 && n < max1 {
max3 = max2
max2 = n
} else if n > max3 && n < max2 {
max3 = n
}
}
if max3 != math.MinInt64 {
return max3
}
return max1
}
|
[
5
] |
package calcium
import (
"context"
"github.com/projecteru2/core/store"
"github.com/projecteru2/core/types"
"github.com/projecteru2/core/utils"
log "github.com/sirupsen/logrus"
)
// DissociateContainer dissociate container from eru, return it resource but not modity it
func (c *Calcium) DissociateContainer(ctx context.Context, IDs []string) (chan *types.DissociateContainerMessage, error) {
ch := make(chan *types.DissociateContainerMessage)
go func() {
defer close(ch)
for _, ID := range IDs {
err := c.withContainerLocked(ctx, ID, func(container *types.Container) error {
return c.withNodeLocked(ctx, container.Nodename, func(node *types.Node) (err error) {
return utils.Txn(
ctx,
// if
func(ctx context.Context) error {
return c.store.RemoveContainer(ctx, container)
},
// then
func(ctx context.Context) error {
log.Infof("[DissociateContainer] Container %s dissociated", container.ID)
return c.store.UpdateNodeResource(ctx, node, container.CPU, container.Quota, container.Memory, container.Storage, container.VolumePlan.IntoVolumeMap(), store.ActionIncr)
},
// rollback
nil,
c.config.GlobalTimeout,
)
})
})
if err != nil {
log.Errorf("[DissociateContainer] Dissociate container %s failed, err: %v", ID, err)
}
ch <- &types.DissociateContainerMessage{ContainerID: ID, Error: err}
}
}()
return ch, nil
}
|
[
2
] |
package eudore
import (
"encoding/json"
"fmt"
"os"
"reflect"
"runtime"
"strings"
"sync"
)
// ConfigParseFunc 定义配置解析函数。
//
// Config 默认解析函数为eudore.ConfigAllParseFunc
type ConfigParseFunc func(Config) error
/*
Config defines configuration management and uses configuration read-write and analysis functions.
Get/Set read and write data implementation:
Use custom map or struct as data storage
Support Lock concurrency safety
Access attributes based on string path hierarchy
The default analysis function implementation:
Custom configuration analysis function
Parse multiple json files
Parse the length and short parameters of the command line
Parse Env environment variables
Configuration differentiation
Generate help information based on the structure
Switch working directory
Config 定义配置管理,使用配置读写和解析功能。
Get/Set读写数据实现下列功能:
使用自定义map或struct作为数据存储
支持Lock并发安全
基于字符串路径层次访问属性
默认解析函数实现下列功能:
自定义配置解析函数
解析多json文件
解析命令行长短参数
解析Env环境变量
配置差异化
根据结构体生成帮助信息
切换工作目录
*/
type Config interface {
Get(string) interface{}
Set(string, interface{}) error
ParseOption([]ConfigParseFunc) []ConfigParseFunc
Parse() error
}
// configMap 使用map保存配置。
type configMap struct {
Keys map[string]interface{} `alias:"keys"`
Print func(...interface{}) `alias:"print"`
funcs []ConfigParseFunc `alias:"-"`
Locker sync.RWMutex `alias:"-"`
}
// configEudore 使用结构体或map保存配置,通过属性或反射来读写属性。
type configEudore struct {
Keys interface{} `alias:"keys"`
Print func(...interface{}) `alias:"print"`
funcs []ConfigParseFunc `alias:"-"`
configRLocker `alias:"-"`
}
type configRLocker interface {
sync.Locker
RLock()
RUnlock()
}
// NewConfigMap 创建一个ConfigMap,如果传入参数为map[string]interface{},则作为初始化数据。
//
// ConfigMap将使用传入的map作为配置存储去Get/Set一个键值。
//
// ConfigMap已实现json.Marshaler和json.Unmarshaler接口.
func NewConfigMap(arg interface{}) Config {
var keys map[string]interface{}
if ks, ok := arg.(map[string]interface{}); ok {
keys = ks
} else {
keys = make(map[string]interface{})
}
return &configMap{
Keys: keys,
Print: printEmpty,
funcs: ConfigAllParseFunc,
}
}
// Get 方法获取一个属性,如果键为空字符串,返回保存全部数据的map对象。
func (c *configMap) Get(key string) interface{} {
c.Locker.RLock()
defer c.Locker.RUnlock()
if len(key) == 0 {
return c.Keys
}
return c.Keys[key]
}
// Set 方法设置一个属性,如果键为空字符串且值类型是map[string]interface{},则替换保存全部数据的map对象。
func (c *configMap) Set(key string, val interface{}) error {
c.Locker.Lock()
if len(key) == 0 {
keys, ok := val.(map[string]interface{})
if ok {
c.Keys = keys
}
} else if key == "print" {
fn, ok := val.(func(...interface{}))
if ok {
c.Print = fn
} else {
c.Print(val)
}
} else {
c.Keys[key] = val
}
c.Locker.Unlock()
return nil
}
// ParseOption 执行一个配置解析函数选项。
func (c *configMap) ParseOption(fn []ConfigParseFunc) []ConfigParseFunc {
c.funcs, fn = fn, c.funcs
return fn
}
// Parse 方法执行全部配置解析函数,如果其中解析函数返回err,则停止解析并返回err。
func (c *configMap) Parse() (err error) {
for _, fn := range c.funcs {
err = fn(c)
if err != nil {
c.Print(err)
return
}
}
return nil
}
// MarshalJSON 实现json.Marshaler接口,试json序列化直接操作保存的数据。
func (c *configMap) MarshalJSON() ([]byte, error) {
c.Locker.RLock()
defer c.Locker.RUnlock()
return json.Marshal(c.Keys)
}
// UnmarshalJSON 实现json.Unmarshaler接口,试json反序列化直接操作保存的数据。
func (c *configMap) UnmarshalJSON(data []byte) error {
c.Locker.Lock()
defer c.Locker.Unlock()
return json.Unmarshal(data, &c.Keys)
}
// NewConfigEudore 创建一个ConfigEudore,如果传入参数为空,使用空map[string]interface{}作为初始化数据。
//
// ConfigEduoew允许传入一个map或struct作为配置存储,使用eudore.Set和eudore.Get方法去读写数据。
//
// 如果传入的配置对象实现sync.RLock一样的读写锁,则使用配置的读写锁,否则会创建一个sync.RWMutex锁。
//
// ConfigEduoe已实现json.Marshaler和json.Unmarshaler接口.
func NewConfigEudore(i interface{}) Config {
if i == nil {
i = make(map[string]interface{})
}
mu, ok := i.(configRLocker)
if !ok {
mu = new(sync.RWMutex)
}
return &configEudore{
Keys: i,
Print: printEmpty,
funcs: ConfigAllParseFunc,
configRLocker: mu,
}
}
// Get 方法实现读取数据属性的一个属性。
func (c *configEudore) Get(key string) (i interface{}) {
if len(key) == 0 {
return c.Keys
}
c.RLock()
i = Get(c.Keys, key)
c.RUnlock()
return
}
// Set 方法实现设置数据的一个属性。
func (c *configEudore) Set(key string, val interface{}) (err error) {
c.Lock()
if len(key) == 0 {
c.Keys = val
} else if key == "print" {
fn, ok := val.(func(...interface{}))
if ok {
c.Print = fn
} else {
c.Print(val)
}
} else {
err = Set(c.Keys, key, val)
}
c.Unlock()
return
}
// ParseOption 执行一个配置解析函数选项。
func (c *configEudore) ParseOption(fn []ConfigParseFunc) []ConfigParseFunc {
c.funcs, fn = fn, c.funcs
return fn
}
// Parse 方法执行全部配置解析函数,如果其中解析函数返回err,则停止解析并返回err。
func (c *configEudore) Parse() (err error) {
for _, fn := range c.funcs {
err = fn(c)
if err != nil {
c.Print(err)
return
}
}
return nil
}
// MarshalJSON 实现json.Marshaler接口,试json序列化直接操作保存的数据。
func (c *configEudore) MarshalJSON() ([]byte, error) {
c.RLock()
defer c.RUnlock()
return json.Marshal(c.Keys)
}
// UnmarshalJSON 实现json.Unmarshaler接口,试json反序列化直接操作保存的数据。
func (c *configEudore) UnmarshalJSON(data []byte) error {
c.Lock()
defer c.Unlock()
return json.Unmarshal(data, &c.Keys)
}
func configPrint(c Config, args ...interface{}) {
c.Set("print", fmt.Sprint(args...))
}
// ConfigParseJSON 方法解析json文件配置。
func ConfigParseJSON(c Config) error {
configPrint(c, "config read paths: ", c.Get("config"))
for _, path := range GetStrings(c.Get("config")) {
file, err := os.Open(path)
if err == nil {
err = json.NewDecoder(file).Decode(c)
file.Close()
}
if err == nil {
configPrint(c, "config load path: ", path)
return nil
}
if !os.IsNotExist(err) {
return fmt.Errorf("config load %s error: %s", path, err.Error())
}
}
return nil
}
// ConfigParseArgs 函数使用参数设置配置,参数使用'--'为前缀。
//
// 如果结构体存在flag tag将作为该路径的缩写,tag长度小于5使用'-'为前缀。
func ConfigParseArgs(c Config) (err error) {
flag := &eachTags{tag: "flag", Repeat: make(map[uintptr]string)}
flag.Each("", reflect.ValueOf(c.Get("")))
short := make(map[string][]string)
for i, tag := range flag.Tags {
short[flag.Vals[i]] = append(short[flag.Vals[i]], tag[1:])
}
for _, str := range os.Args[1:] {
key, val := split2byte(str, '=')
if len(key) > 1 && key[0] == '-' && key[1] != '-' {
for _, lkey := range short[key[1:]] {
val := val
if val == "" && reflect.ValueOf(c.Get(lkey)).Kind() == reflect.Bool {
val = "true"
}
configPrint(c, fmt.Sprintf("config set short arg %s: --%s=%s", key[1:], lkey, val))
c.Set(lkey, val)
}
} else if strings.HasPrefix(key, "--") {
if val == "" && reflect.ValueOf(c.Get(key[2:])).Kind() == reflect.Bool {
val = "true"
}
configPrint(c, "config set arg: ", str)
c.Set(key[2:], val)
}
}
return
}
// ConfigParseEnvs 函数使用环境变量设置配置,环境变量使用'ENV_'为前缀,'_'下划线相当于'.'的作用。
func ConfigParseEnvs(c Config) error {
for _, value := range os.Environ() {
if strings.HasPrefix(value, "ENV_") {
configPrint(c, "config set env: ", value)
k, v := split2byte(value, '=')
k = strings.ToLower(strings.Replace(k, "_", ".", -1))[4:]
c.Set(k, v)
}
}
return nil
}
// ConfigParseMods 函数从'enable'项获得使用的模式的数组字符串,从'mods.xxx'加载配置。
//
// 默认会加载OS mod,如果是docker环境下使用docker模式。
func ConfigParseMods(c Config) error {
mod := GetStrings(c.Get("enable"))
mod = append([]string{getOS()}, mod...)
for _, i := range mod {
m := c.Get("mods." + i)
if m != nil {
configPrint(c, "config load mod "+i)
ConvertTo(m, c.Get(""))
}
}
return nil
}
func getOS() string {
// check docker
_, err := os.Stat("/.dockerenv")
if err == nil || !os.IsNotExist(err) {
return "docker"
}
// 返回默认OS
return runtime.GOOS
}
// ConfigParseWorkdir 函数初始化工作空间,从config获取workdir的值为工作空间,然后切换目录。
func ConfigParseWorkdir(c Config) error {
dir := GetString(c.Get("workdir"))
if dir != "" {
configPrint(c, "changes working directory to: "+dir)
return os.Chdir(dir)
}
return nil
}
// ConfigParseHelp 函数测试配置内容,如果存在help项会层叠获取到结构体的的description tag值作为帮助信息输出。
//
// 注意配置结构体的属性需要是非空,否则不会进入遍历。
func ConfigParseHelp(c Config) error {
if !GetBool(c.Get("help")) {
return nil
}
conf := reflect.ValueOf(c.Get(""))
flag := &eachTags{tag: "flag", Repeat: make(map[uintptr]string)}
flag.Each("", conf)
flagmap := make(map[string]string)
for i, tag := range flag.Tags {
flagmap[tag[1:]] = flag.Vals[i]
}
desc := &eachTags{tag: "description", Repeat: make(map[uintptr]string)}
desc.Each("", conf)
var length int
for i, tag := range desc.Tags {
desc.Tags[i] = tag[1:]
if len(tag) > length {
length = len(tag)
}
}
for i, tag := range desc.Tags {
f, ok := flagmap[tag]
if ok && !strings.Contains(tag, "{") && len(f) < 5 {
fmt.Printf(" -%s,", f)
}
fmt.Printf("\t --%s=%s\t%s\n", tag, strings.Repeat(" ", length-len(tag)), desc.Vals[i])
}
return nil
}
type eachTags struct {
tag string
Tags []string
Vals []string
Repeat map[uintptr]string
LastTag string
}
func (each *eachTags) Each(prefix string, iValue reflect.Value) {
switch iValue.Kind() {
case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
if !iValue.IsNil() {
_, ok := each.Repeat[iValue.Pointer()]
if ok {
return
}
each.Repeat[iValue.Pointer()] = prefix
}
}
switch iValue.Kind() {
case reflect.Ptr, reflect.Interface:
if !iValue.IsNil() {
each.Each(prefix, iValue.Elem())
}
case reflect.Map:
if each.LastTag != "" {
each.Tags = append(each.Tags, fmt.Sprintf("%s.{%s}", prefix, iValue.Type().Key().Name()))
each.Vals = append(each.Vals, each.LastTag)
}
case reflect.Slice, reflect.Array:
length := "n"
if iValue.Kind() == reflect.Array {
length = fmt.Sprint(iValue.Type().Len() - 1)
}
last := each.LastTag
if last != "" {
each.Tags = append(each.Tags, fmt.Sprintf("%s.{0-%s}", prefix, length))
each.Vals = append(each.Vals, last)
}
each.LastTag = last
each.Each(fmt.Sprintf("%s.{0-%s}", prefix, length), reflect.New(iValue.Type().Elem()))
case reflect.Struct:
each.EachStruct(prefix, iValue)
}
}
func (each *eachTags) EachStruct(prefix string, iValue reflect.Value) {
iType := iValue.Type()
for i := 0; i < iType.NumField(); i++ {
if iValue.Field(i).CanSet() {
val := iType.Field(i).Tag.Get(each.tag)
name := iType.Field(i).Tag.Get("alias")
if name == "" {
name = iType.Field(i).Name
}
if val != "" && each.getValueKind(iType.Field(i).Type) != "" {
each.Tags = append(each.Tags, prefix+"."+name)
each.Vals = append(each.Vals, val)
}
each.LastTag = val
each.Each(prefix+"."+name, iValue.Field(i))
}
}
}
func (each *eachTags) getValueKind(iType reflect.Type) string {
switch iType.Kind() {
case reflect.Bool:
return "bool"
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return "int"
case reflect.Float32, reflect.Float64:
return "float"
case reflect.String:
return "string"
default:
if iType.Kind() == reflect.Slice && iType.Elem().Kind() == reflect.Uint8 {
return "string"
}
return ""
}
}
|
[
5
] |
package inttest
import (
"github.com/Sirupsen/logrus"
"github.com/joatmon08/ovs_exporter/utils"
"time"
"errors"
"testing"
)
const (
TCP = "tcp"
UNIX = "unix"
CREATE_WAIT_TIME = 2 * time.Second
EXEC_WAIT_TIME = 5 * time.Second
INTTEST_NETWORK = "ovs_exporter_inttest_network"
INTTEST_NETWORK_CIDR = "172.19.0.0"
OPENVSWITCH_IP = "172.19.0.2"
OPENVSWITCH_PORT = ":6640"
EXPORTER_PORT = ":9177"
OPENVSWITCH_JSON = "openvswitch"
EXPORTER_JSON = "ovs_exporter"
BRIDGE_ID = "br0"
PORT_ID = "eth0"
IP = "192.168.128.5"
OVS_STATE = "openvswitch_up"
OVS_INTERFACES = "openvswitch_interfaces_total"
OVS_PORTS = "openvswitch_ports_total"
)
var (
BridgeMetric = "openvswitch_interfaces_statistics{name=\"" + BRIDGE_ID + "\",stat=\"rx_bytes\"}"
AddBridge = "ovs-vsctl add-br " + BRIDGE_ID
SetDatapath = "ovs-vsctl set bridge " + BRIDGE_ID + " datapath_type=netdev"
AddPort = "ovs-vsctl add-port " + BRIDGE_ID + " " + PORT_ID
CreateBridge = AddBridge + " && " + SetDatapath + " && " + AddPort
ConfigureBridge = "ifconfig " + BRIDGE_ID + " " + IP
OVSUNIXCommand = "app -listen-port " + EXPORTER_PORT
OVSTCPCommand = OVSUNIXCommand + " -uri " + OPENVSWITCH_IP + OPENVSWITCH_PORT
)
type testSetupObject struct {
ovsConnectionMode string
containerExecCmd string
ovsContainerID string
ovsExporterContainerID string
networkID string
metrics map[string]string
}
func createContainers(exporterCmd string) (ovsContainerID string, ovsExporterContainerID string) {
var err error
ovsArgs := &utils.OptionalContainerArgs{
Network: INTTEST_NETWORK,
}
if exporterCmd == OVSUNIXCommand {
ovsArgs.HostBinds = []string{
"/tmp/openvswitch:/usr/local/var/run/openvswitch",
}
}
ovsContainerID, err = utils.CreateContainer(OPENVSWITCH_JSON, ovsArgs)
if err != nil {
panic(err)
}
err = utils.StartContainer(ovsContainerID)
if err != nil {
panic(err)
}
logrus.Debugf("created ovs container %s", ovsContainerID)
exporterArgs := &utils.OptionalContainerArgs{
Network: INTTEST_NETWORK,
Cmd: exporterCmd,
}
if exporterCmd == OVSUNIXCommand {
exporterArgs.HostBinds = []string{
"/tmp/openvswitch:/var/run/openvswitch",
}
}
ovsExporterContainerID, err = utils.CreateContainer(EXPORTER_JSON, exporterArgs)
if err != nil {
panic(err)
}
err = utils.StartContainer(ovsExporterContainerID)
if err != nil {
panic(err)
}
logrus.Debugf("created ovs exporter container %s", ovsExporterContainerID)
time.Sleep(CREATE_WAIT_TIME)
return ovsContainerID, ovsExporterContainerID
}
func RetrieveMetrics(testSetup *testSetupObject) (error) {
ovsClient := utils.NewOVSExporterClient("http://localhost:9177")
metrics, err := ovsClient.GetExporterMetrics()
if err != nil {
return err
}
if len(metrics) == 0 {
return errors.New("no metrics, metrics map is empty")
}
testSetup.metrics = metrics
return nil
}
func Setup(t *testing.T, testSetup *testSetupObject) (*testSetupObject) {
var ovsEntrypoint string
networkID, err := utils.CreateNetwork(INTTEST_NETWORK, INTTEST_NETWORK_CIDR)
if err != nil {
t.Error(err)
}
testSetup.networkID = networkID
switch connection := testSetup.ovsConnectionMode; connection {
case TCP:
ovsEntrypoint = OVSTCPCommand
case UNIX:
ovsEntrypoint = OVSUNIXCommand
default:
t.Error("Specify unix or tcp mode for OVS container")
}
ovs, exporter := createContainers(ovsEntrypoint)
testSetup.ovsExporterContainerID = exporter
testSetup.ovsContainerID = ovs
if testSetup.containerExecCmd == "" {
return testSetup
}
commands := []string{utils.SHELL, utils.COMMAND_OPTION, testSetup.containerExecCmd}
if err := utils.ExecuteContainer(ovs, commands); err != nil {
t.Error(err)
}
time.Sleep(EXEC_WAIT_TIME)
return testSetup
}
func Teardown(ovsContainerID string, ovsExporterContainerID string, networkID string) {
if err := utils.DeleteContainer(ovsExporterContainerID); err != nil {
logrus.Error(err)
}
if err := utils.DeleteContainer(ovsContainerID); err != nil {
logrus.Error(err)
}
if err := utils.DeleteNetwork(networkID); err != nil {
logrus.Error(err)
}
}
|
[
6
] |
package env
import (
"os"
"strings"
)
// Package env provides some utility functions to interact with the environment
// of the process.
// GetBoolVal retrieves a boolean value from given environment envVar.
// Returns default value if envVar is not set.
func GetBoolVal(envVar string, defaultValue bool) bool {
if val := os.Getenv(envVar); val != "" {
if strings.ToLower(val) == "true" {
return true
} else if strings.ToLower(val) == "false" {
return false
}
}
return defaultValue
}
// GetStringVal retrieves a string value from given environment envVar
// Returns default value if envVar is not set.
func GetStringVal(envVar string, defaultValue string) string {
if val := os.Getenv(envVar); val != "" {
return val
} else {
return defaultValue
}
}
|
[
6
] |
package xorfilter
import (
"errors"
"math"
)
func murmur64(h uint64) uint64 {
h ^= h >> 33
h *= 0xff51afd7ed558ccd
h ^= h >> 33
h *= 0xc4ceb9fe1a85ec53
h ^= h >> 33
return h
}
// returns random number, modifies the seed
func splitmix64(seed *uint64) uint64 {
*seed = *seed + 0x9E3779B97F4A7C15
z := *seed
z = (z ^ (z >> 30)) * 0xBF58476D1CE4E5B9
z = (z ^ (z >> 27)) * 0x94D049BB133111EB
return z ^ (z >> 31)
}
func mixsplit(key, seed uint64) uint64 {
return murmur64(key + seed)
}
func rotl64(n uint64, c int) uint64 {
return (n << uint(c&63)) | (n >> uint((-c)&63))
}
func reduce(hash, n uint32) uint32 {
// http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
return uint32((uint64(hash) * uint64(n)) >> 32)
}
func fingerprint(hash uint64) uint64 {
return hash ^ (hash >> 32)
}
// Contains tell you whether the key is likely part of the set
func (filter *Xor8) Contains(key uint64) bool {
hash := mixsplit(key, filter.Seed)
f := uint8(fingerprint(hash))
r0 := uint32(hash)
r1 := uint32(rotl64(hash, 21))
r2 := uint32(rotl64(hash, 42))
h0 := reduce(r0, filter.BlockLength)
h1 := reduce(r1, filter.BlockLength) + filter.BlockLength
h2 := reduce(r2, filter.BlockLength) + 2*filter.BlockLength
return f == (filter.Fingerprints[h0] ^ filter.Fingerprints[h1] ^ filter.Fingerprints[h2])
}
func (filter *Xor8) geth0h1h2(k uint64) hashes {
hash := mixsplit(k, filter.Seed)
answer := hashes{}
answer.h = hash
r0 := uint32(hash)
r1 := uint32(rotl64(hash, 21))
r2 := uint32(rotl64(hash, 42))
answer.h0 = reduce(r0, filter.BlockLength)
answer.h1 = reduce(r1, filter.BlockLength)
answer.h2 = reduce(r2, filter.BlockLength)
return answer
}
func (filter *Xor8) geth0(hash uint64) uint32 {
r0 := uint32(hash)
return reduce(r0, filter.BlockLength)
}
func (filter *Xor8) geth1(hash uint64) uint32 {
r1 := uint32(rotl64(hash, 21))
return reduce(r1, filter.BlockLength)
}
func (filter *Xor8) geth2(hash uint64) uint32 {
r2 := uint32(rotl64(hash, 42))
return reduce(r2, filter.BlockLength)
}
// scan for values with a count of one
func scanCount(Qi []keyindex, setsi []xorset) ([]keyindex, int) {
QiSize := 0
// len(setsi) = filter.BlockLength
for i := uint32(0); i < uint32(len(setsi)); i++ {
if setsi[i].count == 1 {
Qi[QiSize].index = i
Qi[QiSize].hash = setsi[i].xormask
QiSize++
}
}
return Qi, QiSize
}
// fill setsi to xorset{0, 0}
func resetSets(setsi []xorset) []xorset {
for i := range setsi {
setsi[i] = xorset{0, 0}
}
return setsi
}
// The maximum number of iterations allowed before the populate function returns an error
var MaxIterations = 1024
// Populate fills the filter with provided keys. For best results,
// the caller should avoid having too many duplicated keys.
// The function may return an error if the set is empty.
func Populate(keys []uint64) (*Xor8, error) {
size := len(keys)
if size == 0 {
return nil, errors.New("provide a non-empty set")
}
capacity := 32 + uint32(math.Ceil(1.23*float64(size)))
capacity = capacity / 3 * 3 // round it down to a multiple of 3
filter := &Xor8{}
var rngcounter uint64 = 1
filter.Seed = splitmix64(&rngcounter)
filter.BlockLength = capacity / 3
// slice capacity defaults to length
filter.Fingerprints = make([]uint8, capacity)
stack := make([]keyindex, size)
Q0 := make([]keyindex, filter.BlockLength)
Q1 := make([]keyindex, filter.BlockLength)
Q2 := make([]keyindex, filter.BlockLength)
sets0 := make([]xorset, filter.BlockLength)
sets1 := make([]xorset, filter.BlockLength)
sets2 := make([]xorset, filter.BlockLength)
iterations := 0
for {
iterations += 1
if iterations > MaxIterations {
// The probability of this happening is lower than the
// the cosmic-ray probability (i.e., a cosmic ray corrupts your system),
// but if it happens, we just fill the fingerprint with ones which
// will flag all possible keys as 'possible', ensuring a correct result.
for i := 0; i < len(filter.Fingerprints); i++ {
filter.Fingerprints[i] = ^uint8(0)
}
return filter, nil
}
for i := 0; i < size; i++ {
key := keys[i]
hs := filter.geth0h1h2(key)
sets0[hs.h0].xormask ^= hs.h
sets0[hs.h0].count++
sets1[hs.h1].xormask ^= hs.h
sets1[hs.h1].count++
sets2[hs.h2].xormask ^= hs.h
sets2[hs.h2].count++
}
// scan for values with a count of one
Q0, Q0size := scanCount(Q0, sets0)
Q1, Q1size := scanCount(Q1, sets1)
Q2, Q2size := scanCount(Q2, sets2)
stacksize := 0
for Q0size+Q1size+Q2size > 0 {
for Q0size > 0 {
Q0size--
keyindexvar := Q0[Q0size]
index := keyindexvar.index
if sets0[index].count == 0 {
continue // not actually possible after the initial scan.
}
hash := keyindexvar.hash
h1 := filter.geth1(hash)
h2 := filter.geth2(hash)
stack[stacksize] = keyindexvar
stacksize++
sets1[h1].xormask ^= hash
sets1[h1].count--
if sets1[h1].count == 1 {
Q1[Q1size].index = h1
Q1[Q1size].hash = sets1[h1].xormask
Q1size++
}
sets2[h2].xormask ^= hash
sets2[h2].count--
if sets2[h2].count == 1 {
Q2[Q2size].index = h2
Q2[Q2size].hash = sets2[h2].xormask
Q2size++
}
}
for Q1size > 0 {
Q1size--
keyindexvar := Q1[Q1size]
index := keyindexvar.index
if sets1[index].count == 0 {
continue
}
hash := keyindexvar.hash
h0 := filter.geth0(hash)
h2 := filter.geth2(hash)
keyindexvar.index += filter.BlockLength
stack[stacksize] = keyindexvar
stacksize++
sets0[h0].xormask ^= hash
sets0[h0].count--
if sets0[h0].count == 1 {
Q0[Q0size].index = h0
Q0[Q0size].hash = sets0[h0].xormask
Q0size++
}
sets2[h2].xormask ^= hash
sets2[h2].count--
if sets2[h2].count == 1 {
Q2[Q2size].index = h2
Q2[Q2size].hash = sets2[h2].xormask
Q2size++
}
}
for Q2size > 0 {
Q2size--
keyindexvar := Q2[Q2size]
index := keyindexvar.index
if sets2[index].count == 0 {
continue
}
hash := keyindexvar.hash
h0 := filter.geth0(hash)
h1 := filter.geth1(hash)
keyindexvar.index += 2 * filter.BlockLength
stack[stacksize] = keyindexvar
stacksize++
sets0[h0].xormask ^= hash
sets0[h0].count--
if sets0[h0].count == 1 {
Q0[Q0size].index = h0
Q0[Q0size].hash = sets0[h0].xormask
Q0size++
}
sets1[h1].xormask ^= hash
sets1[h1].count--
if sets1[h1].count == 1 {
Q1[Q1size].index = h1
Q1[Q1size].hash = sets1[h1].xormask
Q1size++
}
}
}
if stacksize == size {
// success
break
}
sets0 = resetSets(sets0)
sets1 = resetSets(sets1)
sets2 = resetSets(sets2)
filter.Seed = splitmix64(&rngcounter)
}
stacksize := size
for stacksize > 0 {
stacksize--
ki := stack[stacksize]
val := uint8(fingerprint(ki.hash))
if ki.index < filter.BlockLength {
val ^= filter.Fingerprints[filter.geth1(ki.hash)+filter.BlockLength] ^ filter.Fingerprints[filter.geth2(ki.hash)+2*filter.BlockLength]
} else if ki.index < 2*filter.BlockLength {
val ^= filter.Fingerprints[filter.geth0(ki.hash)] ^ filter.Fingerprints[filter.geth2(ki.hash)+2*filter.BlockLength]
} else {
val ^= filter.Fingerprints[filter.geth0(ki.hash)] ^ filter.Fingerprints[filter.geth1(ki.hash)+filter.BlockLength]
}
filter.Fingerprints[ki.index] = val
}
return filter, nil
}
|
[
0,
2,
5
] |
package main
import (
"fmt"
"io"
"net/http"
"os"
"strings"
"github.com/go-chi/chi"
)
// Got from https://stackoverflow.com/questions/49589685/good-way-to-disable-directory-listing-with-http-fileserver-in-go
//
// Creates a custom filesystem for the FileServer function so it doesn't serve folders as a listing of files and,
// instead serve a 404 error
type CustomFilesystem struct {
http.FileSystem
readDirBatchSize int
}
func (fs *CustomFilesystem) Open(name string) (http.File, error) {
f, err := fs.FileSystem.Open(name)
if err != nil {
return nil, err
}
return &NeuteredStatFile{File: f, readDirBatchSize: fs.readDirBatchSize}, nil
}
type NeuteredStatFile struct {
http.File
readDirBatchSize int
}
func (e *NeuteredStatFile) Stat() (os.FileInfo, error) {
s, err := e.File.Stat()
if err != nil {
return nil, err
}
if s.IsDir() {
LOOP:
for {
fl, err := e.File.Readdir(e.readDirBatchSize)
switch err {
case io.EOF:
break LOOP
case nil:
for _, f := range fl {
if f.Name() == "index.html" {
return s, err
}
}
default:
return nil, err
}
}
return nil, os.ErrNotExist
}
return s, err
}
// Got from https://github.com/go-chi/chi/blob/master/_examples/fileserver/main.go
//
// FileServer conveniently sets up a http.FileServer handler to serve
// static files from a http.FileSystem.
func FileServer(router *chi.Mux, path string, root string) {
if strings.ContainsAny(path, "{}*") {
panic("FileServer does not permit URL parameters.")
}
fs := http.StripPrefix(path, http.FileServer(
&CustomFilesystem{
FileSystem: http.Dir(root),
readDirBatchSize: 2,
},
))
// redirect to / terminated urls
if path != "/" && path[len(path)-1] != '/' {
router.Get(path, http.RedirectHandler(path+"/", 301).ServeHTTP)
path += "/"
}
path += "*"
router.Get(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// check if url has GET parameters
if strings.Contains(r.RequestURI, "?") {
// trim parameters as server is not gonna parse them
r.RequestURI = r.RequestURI[:strings.LastIndex(r.RequestURI, "?")]
fmt.Println(r.RequestURI)
}
info, err := os.Stat(fmt.Sprintf("%s%s", root, r.RequestURI))
if err == nil && info.IsDir() {
_, err = os.Stat(fmt.Sprintf("%s%s/index.html", root, r.RequestURI))
}
if os.IsNotExist(err) {
router.NotFoundHandler().ServeHTTP(w, r)
} else {
w.Header().Set("Cache-Control", "max-age=3600")
fs.ServeHTTP(w, r)
}
}))
}
func ServeZIP(w http.ResponseWriter, file io.ReadCloser) error {
w.Header().Set("Content-Type", "application/zip")
_, err := io.Copy(w, file)
return err
}
|
[
6
] |
// Copyright 2019 Kuei-chun Chen. All rights reserved.
package gox
import (
"bytes"
"crypto/md5"
"crypto/rand"
"encoding/hex"
"fmt"
"io"
"net/http"
"strings"
)
// HTTPDigest --digest
func HTTPDigest(method string, uri string, username string, password string, headers map[string]string, body ...[]byte) (*http.Response, error) {
var err error
var req *http.Request
var resp *http.Response
req, err = http.NewRequest(method, uri, nil)
if err != nil {
return resp, err
}
req.SetBasicAuth(username, password)
for k, v := range headers {
req.Header.Set(k, v)
}
resp, err = http.DefaultClient.Do(req)
if err != nil {
return resp, err
}
defer resp.Body.Close()
digest := map[string]string{}
if len(resp.Header["Www-Authenticate"]) > 0 {
wantedHeaders := []string{"nonce", "realm", "qop"}
responseHeaders := strings.Split(resp.Header["Www-Authenticate"][0], ",")
for _, r := range responseHeaders {
for _, w := range wantedHeaders {
if strings.Contains(r, w) {
digest[w] = strings.Split(r, `"`)[1]
}
}
}
}
digest["uri"] = uri
digest["method"] = method
digest["username"] = username
digest["password"] = password
ha1 := hash(digest["username"] + ":" + digest["realm"] + ":" + digest["password"])
ha2 := hash(digest["method"] + ":" + digest["uri"])
nonceCount := 00000001
b := make([]byte, 8)
io.ReadFull(rand.Reader, b)
cnonce := fmt.Sprintf("%x", b)[:16]
response := hash(fmt.Sprintf("%s:%s:%v:%s:%s:%s", ha1, digest["nonce"], nonceCount, cnonce, digest["qop"], ha2))
authorization := fmt.Sprintf(`Digest username="%s", realm="%s", nonce="%s", uri="%s", cnonce="%s", nc="%v", qop="%s", response="%s"`,
digest["username"], digest["realm"], digest["nonce"], digest["uri"], cnonce, nonceCount, digest["qop"], response)
if len(body) > 0 {
req, _ = http.NewRequest(method, uri, bytes.NewBuffer(body[0]))
}
req.Header.Set("Authorization", authorization)
req.Header.Set("Content-Type", "application/json")
return http.DefaultClient.Do(req)
}
func hash(s string) string {
h := md5.New()
io.WriteString(h, s)
return hex.EncodeToString(h.Sum(nil))
}
|
[
6
] |
package main
import (
"fmt"
"strings"
)
const COLS = "ABCDEFGHJKLMNOPQRST"
func (b Board) print(printGoStrings bool) {
fmt.Printf(" %s\n", COLS[0:b.NumCols])
for row := 1; row <= b.NumRows; row++ {
colStr := ""
for col := 1; col <= b.NumCols; col++ {
stone, exists := b.get(Point{row, col})
if !exists {
colStr += " "
} else if stone.isBlack {
colStr += "x"
} else {
colStr += "o"
}
}
fmt.Printf("%d|%s\n", row, colStr)
}
if printGoStrings {
fmt.Printf("\nGo Strings (%d)\n", len(b.Grid))
for _, gs := range b.Grid {
gs.print()
}
}
}
func (state GameState) Print() {
if state.PreviousState != nil {
fmt.Printf("%s %s\n", state.NextPlayer.other(), state.LastMove.String())
state.Board.print(true)
} else {
fmt.Println("Empty Board")
}
}
func (move Move) String() string {
if move.IsPass {
return "passes"
}
if move.IsResign {
return "resigns"
}
return fmt.Sprintf("plays [%d,%d]", move.Point.Row, move.Point.Col)
}
func (gs GoString) print() {
libCoords := gs.stoneCoords(gs.Liberties)
stoneCoords := gs.stoneCoords(gs.Stones)
fmt.Printf("{Player: %s, Liberties (%d): %s, Stones (%d): %s}\n", gs.Player, len(libCoords), strings.Join(libCoords, ", "), len(stoneCoords), strings.Join(stoneCoords, ", "))
}
func (gs GoString) stoneCoords(input map[int]map[int]bool) []string {
var stoneStr []string
for row, cols := range input {
for col, _ := range cols {
stoneStr = append(stoneStr, fmt.Sprintf("[%d,%d]", row, col))
}
}
return stoneStr
}
func (p Player) String() string {
if p.isBlack {
return "Black"
} else {
return "White"
}
}
|
[
5
] |
package stack
type CalcStack struct {
reg [10]int
Index int
}
func (st *CalcStack) Result() int {
return st.reg[st.Index-1]
}
func (st *CalcStack) Pop() int {
result := st.reg[st.Index-1]
st.Index = st.Index - 1
return result
}
func (st *CalcStack) Push(r int) {
st.reg[st.Index] = r
st.Index = st.Index + 1
}
|
[
0
] |
package topic_bit_brute_force
import (
"strconv"
"testing"
)
// [ABC079C - Train Ticket](https://atcoder.jp/contests/abc079/tasks/abc079_c)
func AnswerABC079Cその1(ABCD string) string {
// 扱いやすくするために整数スライスに変換する
var A = make([]int, 4)
for i, s := range ABCD {
a, _ := strconv.Atoi(string(s))
A[i] = a
}
var N = 3
var op string
for bit := 0; bit < (1 << N); bit++ {
a0 := A[0]
sum := a0
statement := strconv.Itoa(a0)
for i := 0; i < N; i++ {
a := A[i+1]
if bit&(1<<i) > 0 {
op = "+"
sum += a
} else {
op = "-"
sum -= a
}
statement += op + strconv.Itoa(a)
}
if sum == 7 {
return statement + "=7"
}
}
return "" // 必ず 7 がつくれるのでここには到達しない
}
func TestAnswerABC079Cその1(t *testing.T) {
tests := []struct {
name string
ABCD string
want string
}{
{"入力例1", "1222", "1+2+2+2=7"},
{"入力例2", "0290", "0-2+9-0=7"}, // "0−2+9+0=7" でもOK
{"入力例3", "3242", "3+2+4-2=7"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := AnswerABC079Cその1(tt.ABCD)
if got != tt.want {
t.Errorf("got %v want %v", got, tt.want)
}
})
}
}
|
[
2
] |
package controller
import (
"time"
"nhlpool.com/service/go/nhlpool/data"
"nhlpool.com/service/go/nhlpool/store"
)
func getMatchup(season *data.Season, id string) *data.Matchup {
matchup, _ := store.GetStore().Matchup().GetMatchup(season.League, season, id)
return matchup
}
// GetMatchup Process the get matchup request
func GetMatchup(leagueID string, year int, ID string) data.GetMatchupReply {
var reply data.GetMatchupReply
league := getLeague(leagueID)
season := getSeason(year, league)
matchup := getMatchup(season, ID)
if matchup == nil {
reply.Result.Code = data.NOTFOUND
reply.Matchup = data.Matchup{}
return reply
}
reply.Result.Code = data.SUCCESS
reply.Matchup = *matchup
return reply
}
// EditMatchup Process the edit matchup request
func EditMatchup(leagueID string, year int, ID string, request data.EditMatchupRequest) data.EditMatchupReply {
var reply data.EditMatchupReply
session := store.GetSessionManager().Get(request.SessionID)
if session == nil {
reply.Result.Code = data.ACCESSDENIED
return reply
}
league := getLeague(leagueID)
season := getSeason(year, league)
matchup := getMatchup(season, ID)
if matchup == nil {
reply.Result.Code = data.NOTFOUND
return reply
}
if !session.Player.Admin {
reply.Result.Code = data.ACCESSDENIED
return reply
}
home := getTeam(request.HomeID, league)
away := getTeam(request.AwayID, league)
start, _ := time.Parse(time.RFC3339, request.Start)
if home != nil {
matchup.Home = *home
}
if away != nil {
matchup.Away = *away
}
matchup.Start = start
matchup.Round = request.Round
err := store.GetStore().Matchup().UpdateMatchup(matchup)
if err != nil {
reply.Result.Code = data.ERROR
reply.Matchup = data.Matchup{}
return reply
}
reply.Result.Code = data.SUCCESS
reply.Matchup = *matchup
return reply
}
// DeleteMatchup Process the delete matchup request
func DeleteMatchup(leagueID string, year int, ID string, request data.DeleteMatchupRequest) data.DeleteMatchupReply {
var reply data.DeleteMatchupReply
session := store.GetSessionManager().Get(request.SessionID)
if session == nil {
reply.Result.Code = data.ACCESSDENIED
return reply
}
league := getLeague(leagueID)
season := getSeason(year, league)
matchup := getMatchup(season, ID)
if matchup == nil {
reply.Result.Code = data.NOTFOUND
return reply
}
if !session.Player.Admin {
reply.Result.Code = data.ACCESSDENIED
return reply
}
store.GetStore().Matchup().DeleteMatchup(matchup)
reply.Result.Code = data.SUCCESS
return reply
}
|
[
2
] |
package config
import (
"fmt"
"io/ioutil"
"regexp"
"time"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/yaml.v2"
)
const (
GaugeValueType = "gauge"
CounterValueType = "counter"
DeviceIDRegexGroup = "deviceid"
MetricNameRegexGroup = "metricname"
)
var MQTTConfigDefaults = MQTTConfig{
Server: "tcp://127.0.0.1:1883",
TopicPath: "v1/devices/me",
DeviceIDRegex: MustNewRegexp(fmt.Sprintf("(.*/)?(?P<%s>.*)", DeviceIDRegexGroup)),
QoS: 0,
}
var CacheConfigDefaults = CacheConfig{
Timeout: 2 * time.Minute,
}
var JsonParsingConfigDefaults = JsonParsingConfig{
Separator: ".",
}
type Regexp struct {
r *regexp.Regexp
pattern string
}
func (rf *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error {
var pattern string
if err := unmarshal(&pattern); err != nil {
return err
}
r, err := regexp.Compile(pattern)
if err != nil {
return err
}
rf.r = r
rf.pattern = pattern
return nil
}
func (rf *Regexp) MarshalYAML() (interface{}, error) {
if rf == nil {
return "", nil
}
return rf.pattern, nil
}
func (rf *Regexp) Match(s string) bool {
return rf == nil || rf.r == nil || rf.r.MatchString(s)
}
// GroupValue returns the value of the given group. If the group is not part of the underlying regexp, returns the empty string.
func (rf *Regexp) GroupValue(s string, groupName string) string {
match := rf.r.FindStringSubmatch(s)
groupValues := make(map[string]string)
for i, name := range rf.r.SubexpNames() {
if len(match) > i && name != "" {
groupValues[name] = match[i]
}
}
return groupValues[groupName]
}
func (rf *Regexp) RegEx() *regexp.Regexp {
return rf.r
}
func MustNewRegexp(pattern string) *Regexp {
return &Regexp{
pattern: pattern,
r: regexp.MustCompile(pattern),
}
}
type Config struct {
JsonParsing *JsonParsingConfig `yaml:"json_parsing,omitempty"`
Metrics []MetricConfig `yaml:"metrics"`
MQTT *MQTTConfig `yaml:"mqtt,omitempty"`
Cache *CacheConfig `yaml:"cache,omitempty"`
}
type CacheConfig struct {
Timeout time.Duration `yaml:"timeout"`
}
type JsonParsingConfig struct {
Separator string `yaml:"separator"`
}
type MQTTConfig struct {
Server string `yaml:"server"`
TopicPath string `yaml:"topic_path"`
DeviceIDRegex *Regexp `yaml:"device_id_regex"`
User string `yaml:"user"`
Password string `yaml:"password"`
QoS byte `yaml:"qos"`
ObjectPerTopicConfig *ObjectPerTopicConfig `yaml:"object_per_topic_config"`
MetricPerTopicConfig *MetricPerTopicConfig `yaml:"metric_per_topic_config"`
CACert string `yaml:"ca_cert"`
ClientCert string `yaml:"client_cert"`
ClientKey string `yaml:"client_key"`
ClientID string `yaml:"client_id"`
}
const EncodingJSON = "JSON"
type ObjectPerTopicConfig struct {
Encoding string `yaml:"encoding"` // Currently only JSON is a valid value
}
type MetricPerTopicConfig struct {
MetricNameRegex *Regexp `yaml:"metric_name_regex"` // Default
}
// Metrics Config is a mapping between a metric send on mqtt to a prometheus metric
type MetricConfig struct {
PrometheusName string `yaml:"prom_name"`
MQTTName string `yaml:"mqtt_name"`
SensorNameFilter Regexp `yaml:"sensor_name_filter"`
Help string `yaml:"help"`
ValueType string `yaml:"type"`
OmitTimestamp bool `yaml:"omit_timestamp"`
ConstantLabels map[string]string `yaml:"const_labels"`
StringValueMapping *StringValueMappingConfig `yaml:"string_value_mapping"`
MQTTValueScale float64 `yaml:"mqtt_value_scale"`
}
// StringValueMappingConfig defines the mapping from string to float
type StringValueMappingConfig struct {
// ErrorValue is used when no mapping is found in Map
ErrorValue *float64 `yaml:"error_value"`
Map map[string]float64 `yaml:"map"`
}
func (mc *MetricConfig) PrometheusDescription() *prometheus.Desc {
return prometheus.NewDesc(
mc.PrometheusName, mc.Help, []string{"sensor", "topic"}, mc.ConstantLabels,
)
}
func (mc *MetricConfig) PrometheusValueType() prometheus.ValueType {
switch mc.ValueType {
case GaugeValueType:
return prometheus.GaugeValue
case CounterValueType:
return prometheus.CounterValue
default:
return prometheus.UntypedValue
}
}
func LoadConfig(configFile string) (Config, error) {
configData, err := ioutil.ReadFile(configFile)
if err != nil {
return Config{}, err
}
var cfg Config
if err = yaml.UnmarshalStrict(configData, &cfg); err != nil {
return cfg, err
}
if cfg.MQTT == nil {
cfg.MQTT = &MQTTConfigDefaults
}
if cfg.Cache == nil {
cfg.Cache = &CacheConfigDefaults
}
if cfg.JsonParsing == nil {
cfg.JsonParsing = &JsonParsingConfigDefaults
}
if cfg.MQTT.DeviceIDRegex == nil {
cfg.MQTT.DeviceIDRegex = MQTTConfigDefaults.DeviceIDRegex
}
var validRegex bool
for _, name := range cfg.MQTT.DeviceIDRegex.RegEx().SubexpNames() {
if name == DeviceIDRegexGroup {
validRegex = true
}
}
if !validRegex {
return Config{}, fmt.Errorf("device id regex %q does not contain required regex group %q", cfg.MQTT.DeviceIDRegex.pattern, DeviceIDRegexGroup)
}
if cfg.MQTT.ObjectPerTopicConfig != nil && cfg.MQTT.MetricPerTopicConfig != nil {
return Config{}, fmt.Errorf("only one of object_per_topic_config and metric_per_topic_config can be specified")
}
if cfg.MQTT.ObjectPerTopicConfig == nil && cfg.MQTT.MetricPerTopicConfig == nil {
cfg.MQTT.ObjectPerTopicConfig = &ObjectPerTopicConfig{
Encoding: EncodingJSON,
}
}
if cfg.MQTT.MetricPerTopicConfig != nil {
validRegex = false
for _, name := range cfg.MQTT.MetricPerTopicConfig.MetricNameRegex.RegEx().SubexpNames() {
if name == MetricNameRegexGroup {
validRegex = true
}
}
if !validRegex {
return Config{}, fmt.Errorf("metric name regex %q does not contain required regex group %q", cfg.MQTT.DeviceIDRegex.pattern, MetricNameRegexGroup)
}
}
return cfg, nil
}
|
[
6
] |
package main
import (
"log"
lua "github.com/yuin/gopher-lua"
)
// LuaDouble ...
func LuaDouble(L *lua.LState, a lua.LValue) int {
er := L.CallByParam(lua.P{Fn: L.GetGlobal("double"), NRet: 1, Protect: true}, a)
if er != nil {
log.Println(er)
return 1
}
ret := L.Get(-1)
L.Pop(-1)
num, ok := ret.(lua.LNumber)
if ok {
return int(num)
}
return -1
}
// LuaMax ...
func LuaMax(L *lua.LState, a, b lua.LValue) int {
er := L.CallByParam(lua.P{Fn: L.GetGlobal("max"), NRet: 1, Protect: true}, a, b)
if er != nil {
log.Println(er)
return 1
}
v := L.Get(-1)
L.Pop(-1)
max, ok := v.(lua.LNumber)
if ok {
return int(max)
}
return -1
}
// LuaMin ...
func LuaMin(L *lua.LState, a, b lua.LValue) int {
er := L.CallByParam(lua.P{Fn: L.GetGlobal("min"), NRet: 1, Protect: true}, a, b)
if er != nil {
log.Println(er)
return 1
}
v := L.Get(-1)
L.Pop(-1)
min, ok := v.(lua.LNumber)
if ok {
return int(min)
}
return -1
}
// LuaMaxmin ...
func LuaMaxmin(L *lua.LState, a, b lua.LValue) (max int, min int) {
er := L.CallByParam(lua.P{Fn: L.GetGlobal("maxmin"), NRet: 2, Protect: true}, a, b)
if er != nil {
log.Println(er)
return 0, 0
}
n1 := L.Get(-2)
L.Pop(-2)
v1, ok := n1.(lua.LNumber)
if ok {
max = int(v1)
}
n2 := L.Get(-1)
L.Pop(-1)
v2, ok := n2.(lua.LNumber)
if ok {
min = int(v2)
}
return max, min
}
|
[
1,
2,
6
] |
package DriversService
import (
"github.com/spf13/viper"
"google.golang.org/grpc"
"net"
"snap/AuthService/AuthServiceImplementation"
GrpcServices3 "snap/AuthService/GrpcServices"
"snap/DriversService/DriversLocationService"
"snap/DriversService/GrpcServices"
GrpcServices2 "snap/TravelersService/GrpcServices"
"snap/TravelersService/TravelersServiceImplementation"
)
var Configs *viper.Viper
func InitiateGrpcServices() {
listener, err := net.Listen("tcp", Configs.GetString("listen_address")+":"+Configs.GetString("port"))
AuthListener, err := net.Listen("tcp", "192.168.1.200:6167")
switch err != nil {
case true:
panic(err)
}
server := grpc.NewServer(grpc.UnaryInterceptor(DriversLocationService.Authenticate))
GrpcServices.RegisterDriversLocationReportServer(server, &DriversLocationService.DriversLocationService{})
GrpcServices2.RegisterTravelersServiceServer(server, &TravelersServiceImplementation.TravelersService{})
AuthServer := grpc.NewServer()
GrpcServices3.RegisterAuthServer(AuthServer, &AuthServiceImplementation.AuthServiceImplementation{})
go func() {
err = server.Serve(listener)
switch err != nil {
case true:
panic(err)
}
}()
go func() {
err = AuthServer.Serve(AuthListener)
switch err != nil {
case true:
panic(err)
}
}()
}
|
[
7
] |
package scraper
import (
"context"
"github.com/bowd/quip-exporter/interfaces"
"github.com/bowd/quip-exporter/repositories"
"github.com/bowd/quip-exporter/types"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"path"
)
type CurrentUserNode struct {
*BaseNode
currentUser *types.QuipUser
onlyPrivate bool
}
func NewCurrentUserNode(ctx context.Context, onlyPrivate bool) interfaces.INode {
wg, ctx := errgroup.WithContext(ctx)
return &CurrentUserNode{
BaseNode: &BaseNode{
logger: logrus.WithField("module", types.NodeTypes.CurrentUser),
path: "/",
wg: wg,
ctx: ctx,
},
onlyPrivate: onlyPrivate,
}
}
func (node CurrentUserNode) Type() types.NodeType {
return types.NodeTypes.CurrentUser
}
func (node CurrentUserNode) ID() string {
return "root"
}
func (node CurrentUserNode) Path() string {
return path.Join("data", "root.json")
}
func (node CurrentUserNode) Children() []interfaces.INode {
children := make([]interfaces.INode, 0, 0)
for _, folderID := range node.currentUser.Folders(node.onlyPrivate) {
children = append(children, NewFolderNode(node.ctx, node.path, folderID))
}
return children
}
func (node *CurrentUserNode) Process(repo interfaces.IRepository, quip interfaces.IQuipClient, search interfaces.ISearchIndex) error {
var currentUser *types.QuipUser
var err error
currentUser, err = repo.GetCurrentUser(node)
if err != nil && repositories.IsNotFoundError(err) {
if currentUser, err = quip.GetCurrentUser(); err != nil {
return err
}
if err := repo.SaveNodeJSON(node, currentUser); err != nil {
return err
}
} else if err != nil {
return err
} else {
node.logger.Debugf("loaded from repository")
}
node.currentUser = currentUser
return nil
}
|
[
5
] |
/*
* go-stable-toposort
*
* Copyright (C) 2018 SOFe
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package stableToposort
import (
"sort"
)
type Node interface {
Before(other Node) bool
}
type nodeNumber int
type edge [2]nodeNumber
type edgeNumber int
type edgeIndex struct {
slice []edge // probably
index [2]map[nodeNumber]map[nodeNumber]edgeNumber
}
func newEdgeIndex() *edgeIndex {
index := &edgeIndex{
slice: make([]edge, 0),
}
for i := range index.index {
index.index[i] = map[nodeNumber]map[nodeNumber]edgeNumber{}
}
return index
}
func (index *edgeIndex) add(edge edge) edgeNumber {
number := edgeNumber(len(index.slice))
index.slice = append(index.slice, edge)
for pos := 0; pos < 2; pos++ {
if _, exists := index.index[pos][edge[pos]]; !exists {
index.index[pos][edge[pos]] = make(map[nodeNumber]edgeNumber)
}
index.index[pos][edge[pos]][edge[1-pos]] = number
}
return number
}
func (index edgeIndex) removeIndex(edge edge) {
for pos := range [...]int{0, 1} {
delete(index.index[pos][edge[pos]], edge[1-pos])
if len(index.index[pos][edge[pos]]) == 0 {
delete(index.index[pos], edge[pos])
}
}
}
// Sorts nodes by Kahn's algorithm
func Sort(nodes []Node) (output []Node, cycle []Node) {
edges := newEdgeIndex()
var i nodeNumber
for i = 0; int(i) < len(nodes); i++ {
var j nodeNumber
for j = i + 1; int(j) < len(nodes); j++ {
ij := nodes[i].Before(nodes[j])
ji := nodes[j].Before(nodes[i])
if ij && ji {
return nil, []Node{nodes[i], nodes[j]}
}
if ij {
edges.add(edge{i, j})
} else if ji {
edges.add(edge{j, i})
}
}
}
output = make([]Node, 0, len(nodes))
roots := make([]nodeNumber, 0, len(nodes))
{
for mInt := range nodes {
m := nodeNumber(mInt)
if _, hasBefore := edges.index[1][m]; !hasBefore {
roots = append(roots, m)
}
}
}
for len(roots) > 0 {
n := roots[0]
roots = roots[1:]
output = append(output, nodes[n])
var mSlice = make([]nodeNumber, 0, len(edges.index[0][n]))
for m := range edges.index[0][n] {
mSlice = append(mSlice, m)
}
sort.SliceStable(mSlice, func(i, j int) bool {
return mSlice[i] < mSlice[j]
}) // stabilize the output because we are using a map
for _, m := range mSlice {
e := edges.index[0][n][m]
edges.removeIndex(edges.slice[e])
if _, hasBefore := edges.index[1][m]; !hasBefore {
roots = append(roots, m)
}
}
}
for pos := 0; pos < 2; pos++ {
if len(edges.index[pos]) > 0 {
cycle = make([]Node, 0, len(edges.index[0]))
for n := range edges.index[pos] {
cycle = append(cycle, nodes[n])
}
return nil, cycle
}
}
return output, nil
}
|
[
6
] |
package lib
import (
"golang.org/x/crypto/ssh"
"os/user"
"path/filepath"
"io/ioutil"
"log"
"strings"
)
func LoadSrcSSHConf(sshUser string, keypath string) *ssh.ClientConfig {
usr, _ := user.Current()
keypath = strings.Replace(keypath, "~", usr.HomeDir, 1)
absKeyPath, _ := filepath.Abs(keypath)
key, err := ioutil.ReadFile(absKeyPath)
if err != nil {
log.Fatalf("unable to read private key: %v", err)
}
signer, err := ssh.ParsePrivateKey(key)
if err != nil {
log.Fatalf("unable to parse private key: %v", err)
}
config := &ssh.ClientConfig{
User: sshUser,
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
return config
}
|
[
6
] |
package services
import (
"bytes"
"context"
"github.com/reddec/trusted-cgi/api"
"github.com/reddec/trusted-cgi/application"
"github.com/reddec/trusted-cgi/stats"
"github.com/reddec/trusted-cgi/types"
)
func NewLambdaSrv(cases application.Cases, tracker stats.Reader) *lambdaSrv {
return &lambdaSrv{
cases: cases,
tracker: tracker,
}
}
type lambdaSrv struct {
cases application.Cases
tracker stats.Reader
}
func (srv *lambdaSrv) Upload(ctx context.Context, token *api.Token, uid string, tarGz []byte) (bool, error) {
fn, err := srv.cases.Platform().FindByUID(uid)
if err != nil {
return false, err
}
err = fn.Lambda.SetContent(bytes.NewReader(tarGz))
if err != nil {
return false, err
}
return true, nil
}
func (srv *lambdaSrv) Download(ctx context.Context, token *api.Token, uid string) ([]byte, error) {
var out bytes.Buffer
fn, err := srv.cases.Platform().FindByUID(uid)
if err != nil {
return nil, err
}
err = fn.Lambda.Content(&out)
return out.Bytes(), err
}
func (srv *lambdaSrv) Push(ctx context.Context, token *api.Token, uid string, file string, content []byte) (bool, error) {
fn, err := srv.cases.Platform().FindByUID(uid)
if err != nil {
return false, err
}
err = fn.Lambda.WriteFile(file, bytes.NewReader(content))
return err == nil, err
}
func (srv *lambdaSrv) Pull(ctx context.Context, token *api.Token, uid string, file string) ([]byte, error) {
fn, err := srv.cases.Platform().FindByUID(uid)
if err != nil {
return nil, err
}
var out bytes.Buffer
err = fn.Lambda.ReadFile(file, &out)
return out.Bytes(), err
}
func (srv *lambdaSrv) Remove(ctx context.Context, token *api.Token, uid string) (bool, error) {
err := srv.cases.Remove(uid)
return err == nil, err
}
func (srv *lambdaSrv) Files(ctx context.Context, token *api.Token, uid string, dir string) ([]types.File, error) {
fn, err := srv.cases.Platform().FindByUID(uid)
if err != nil {
return nil, err
}
return fn.Lambda.ListFiles(dir)
}
func (srv *lambdaSrv) Info(ctx context.Context, token *api.Token, uid string) (*application.Definition, error) {
fn, err := srv.cases.Platform().FindByUID(uid)
if err != nil {
return nil, err
}
return fn, nil
}
func (srv *lambdaSrv) Update(ctx context.Context, token *api.Token, uid string, manifest types.Manifest) (*application.Definition, error) {
fn, err := srv.cases.Platform().FindByUID(uid)
if err != nil {
return nil, err
}
if err := manifest.Validate(); err != nil {
return nil, err
}
err = fn.Lambda.SetManifest(manifest)
if err != nil {
return nil, err
}
fn.Manifest = manifest
return fn, nil
}
func (srv *lambdaSrv) CreateFile(ctx context.Context, token *api.Token, uid string, path string, dir bool) (bool, error) {
fn, err := srv.cases.Platform().FindByUID(uid)
if err != nil {
return false, err
}
if dir {
err = fn.Lambda.EnsureDir(path)
} else {
err = fn.Lambda.WriteFile(path, bytes.NewBufferString(""))
}
return err == nil, err
}
func (srv *lambdaSrv) RemoveFile(ctx context.Context, token *api.Token, uid string, path string) (bool, error) {
fn, err := srv.cases.Platform().FindByUID(uid)
if err != nil {
return false, err
}
err = fn.Lambda.RemoveFile(path)
return err == nil, err
}
func (srv *lambdaSrv) RenameFile(ctx context.Context, token *api.Token, uid string, oldPath, newPath string) (bool, error) {
fn, err := srv.cases.Platform().FindByUID(uid)
if err != nil {
return false, err
}
err = fn.Lambda.RenameFile(oldPath, newPath)
return err == nil, err
}
func (srv *lambdaSrv) Stats(ctx context.Context, token *api.Token, uid string, limit int) ([]stats.Record, error) {
return srv.tracker.LastByUID(uid, limit)
}
func (srv *lambdaSrv) Actions(ctx context.Context, token *api.Token, uid string) ([]string, error) {
fn, err := srv.cases.Platform().FindByUID(uid)
if err != nil {
return nil, err
}
return fn.Lambda.Actions()
}
func (srv *lambdaSrv) Invoke(ctx context.Context, token *api.Token, uid string, action string) (string, error) {
fn, err := srv.cases.Platform().FindByUID(uid)
if err != nil {
return "", err
}
var out bytes.Buffer
err = srv.cases.Platform().Do(ctx, fn.Lambda, action, 0, &out)
return out.String(), err
}
func (srv *lambdaSrv) Link(ctx context.Context, token *api.Token, uid string, alias string) (*application.Definition, error) {
return srv.cases.Platform().Link(uid, alias)
}
func (srv *lambdaSrv) Unlink(ctx context.Context, token *api.Token, alias string) (*application.Definition, error) {
return srv.cases.Platform().Unlink(alias)
}
|
[
6
] |
package timRestAPI
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"strconv"
"strings"
)
const (
sdkappid = 1400048262
identifier = "admin"
usersig = "eJxlj11LwzAYhe-7K0JvK5qkHxRhF3PMVo0Xad2Y3pTYpO5F*7E0rivD-*6sAwOe2*c5HM7RQQi5Tyy-FGXZfjamMGOnXHSNXOxe-MGuA1kIU-ha-oPq0IFWhaiM0hMkYRhSjG0HpGoMVHA2hKyhsXAv34tp47cfnMpBTCNqK-A2wcclX9ylPFpsdHWVb-eBZFmWP6hArEjcxnPZsdeP9bBpDymvPMrmcLNNa6JGM6xXCS13Owb3L2TwuP-MhefdJrzny3BMcJ9pPptZkwZqdT5EMI5iEoUW3SvdQ9tMAsUnhfr4J67z5XwD2mxccg__"
)
type TimRestAPI struct {
sdkappid int
identifier string
usersig string
}
func CreateRestAPI() *TimRestAPI {
return &TimRestAPI{
sdkappid,
identifier,
usersig,
}
}
func (timRestAPI *TimRestAPI) api(serviceName string, cmdName string, reqData []byte) string {
urlPart := []string{"https://console.tim.qq.com/v4/", serviceName, "/", cmdName, "?usersig=",
timRestAPI.usersig, "&identifier=", timRestAPI.identifier, "&sdkappid=", strconv.Itoa(timRestAPI.sdkappid),
"&random=", strconv.Itoa(int(rand.Int31())), "&contenttype=json"}
url := strings.Join(urlPart, "")
bodyType := "application/json;charset=utf-8"
req := bytes.NewBuffer(reqData)
resp, _ := http.Post(url, bodyType, req)
body, _ := ioutil.ReadAll(resp.Body)
fmt.Println(string(body))
return string(body)
}
func (timRestAPI *TimRestAPI) AccountImport(identifier, nick, faceUrl string) {
msg := struct{ Identifier, Nick, FaceUrl string }{identifier, nick, faceUrl}
if reqData, err := json.Marshal(msg); err != nil {
fmt.Println(err)
} else {
timRestAPI.api("im_open_login_svc", "account_import", reqData)
}
}
func (timRestAPI *TimRestAPI) ProfilePortraitSet(accountId, newName string) {
msg := struct {
From_Account string
ProfileItem []struct{ Tag, Value string }
}{accountId, []struct{ Tag, Value string }{{"Tag_Profile_IM_Nick", newName}}}
if reqData, err := json.Marshal(msg); err != nil {
fmt.Println(err)
} else {
timRestAPI.api("profile", "portrait_set", reqData)
}
}
func (timRestAPI *TimRestAPI) GroupCreateGroup(groupType, groupName, ownerId string) {
msg := struct{ Type, Name, Owner_Account string }{groupType, groupName, ownerId}
if reqData, err := json.Marshal(msg); err != nil {
fmt.Println(err)
} else {
timRestAPI.api("group_open_http_svc", "create_group", reqData)
}
}
func (timRestAPI *TimRestAPI) GroupAddGroupMember(groupId, memberId string, silence int) {
msg := struct {
GroupId string
MemberList []struct{ Member_Account string }
Silence int
}{groupId, []struct{ Member_Account string }{{memberId}}, silence}
if reqData, err := json.Marshal(msg); err != nil {
fmt.Println(err)
} else {
timRestAPI.api("group_open_http_svc", "add_group_member", reqData)
}
}
func (timRestAPI *TimRestAPI) GroupDeleteGroupMember(groupId, memberId string, silence int) {
msg := struct {
GroupId string
MemberToDel_Account []string
Silence int
}{groupId, []string{memberId}, silence}
if reqData, err := json.Marshal(msg); err != nil {
fmt.Println(err)
} else {
timRestAPI.api("group_open_http_svc", "delete_group_member", reqData)
}
}
func (timRestAPI *TimRestAPI) GroupDestroyGroup(groupId string) {
msg := struct{ GroupId string }{groupId}
if reqData, err := json.Marshal(msg); err != nil {
fmt.Println(err)
} else {
timRestAPI.api("group_open_http_svc", "destroy_group", reqData)
}
}
func (timRestAPI *TimRestAPI) GroupSendGroupMsg(accountId, groupId, textContent string) {
msg := struct {
GroupId string
From_Account string
Random int32
MsgBody []struct {
MsgType string
MsgContent struct{ Text string }
}
}{groupId, accountId, rand.Int31(), []struct {
MsgType string
MsgContent struct{ Text string }
}{{"TIMTextElem", struct{ Text string }{textContent}}}}
if reqData, err := json.Marshal(msg); err != nil {
fmt.Println(err)
} else {
timRestAPI.api("group_open_http_svc", "send_group_msg", reqData)
}
}
func (timRestAPI *TimRestAPI) GroupSendGroupSystemNotification(groupId, textContent string) {
msg := struct{ GroupId, Content string }{groupId, textContent}
if reqData, err := json.Marshal(msg); err != nil {
fmt.Println(err)
} else {
timRestAPI.api("group_open_http_svc", "send_group_system_notification", reqData)
}
}
type GroupIdList []struct {
GroupId string
Name string
}
func (timRestAPI *TimRestAPI) GroupGetJoinedGroupList(account_id string) GroupIdList {
msg := struct {
Member_Account string
ResponseFilter struct {
GroupBaseInfoFilter []string
}
}{account_id, struct {
GroupBaseInfoFilter []string
}{[]string{"Name"}}}
if reqData, err := json.Marshal(msg); err != nil {
fmt.Println(err)
} else {
ret := timRestAPI.api("group_open_http_svc", "get_joined_group_list", reqData)
type GroupRes struct {
ActionStatus string
ErrorInfo string
ErrorCode int
TotalCount int
GroupIdList []struct {
GroupId string
Name string
}
}
retObj := new(GroupRes)
json.Unmarshal([]byte(ret), retObj)
if retObj.ActionStatus == "OK" {
// groupIDs := []string{}
// for _, groupItem := range retObj.GroupIdList {
// groupIDs = append(groupIDs, groupItem.GroupId)
// }
// return groupIDs
return (GroupIdList)(retObj.GroupIdList)
}
}
return nil
}
func (timRestAPI *TimRestAPI) GroupGetAppidGroupList(limit int) []string {
msg := struct{ Limit int }{limit}
if reqData, err := json.Marshal(msg); err != nil {
fmt.Println(err)
} else {
ret := timRestAPI.api("group_open_http_svc", "get_appid_group_list", reqData)
type GroupRes struct {
ActionStatus string
ErrorInfo string
ErrorCode int
TotalCount int
GroupIdList []struct{ GroupId string }
Next int
}
retObj := new(GroupRes)
json.Unmarshal([]byte(ret), retObj)
if retObj.ActionStatus == "OK" {
groupIDs := []string{}
for _, groupItem := range retObj.GroupIdList {
groupIDs = append(groupIDs, groupItem.GroupId)
}
return groupIDs
}
}
return nil
}
|
[
6
] |
package main
import (
"fmt"
"math"
)
func main() {
fmt.Println(reverse(123))
fmt.Println(reverse(-123))
fmt.Println(reverse(120))
fmt.Println(reverse(1534236469))
fmt.Println(reverse(-2147483648))
fmt.Println(math.MaxInt32)
}
func reverse(x int) int {
ret := 0
for x != 0 {
a := x % 10
ret = ret*10 + a
x = x / 10
}
if ret > math.MaxInt32 || ret < math.MinInt32 {
return 0
}
return ret
}
|
[
0
] |
package handlers
import (
"net/http"
"strings"
"github.com/denouche/go-api-skeleton/middlewares"
"github.com/denouche/go-api-skeleton/storage/dao"
dbFake "github.com/denouche/go-api-skeleton/storage/dao/fake" // DAO IN MEMORY
dbMock "github.com/denouche/go-api-skeleton/storage/dao/mock"
"github.com/denouche/go-api-skeleton/storage/dao/mongodb" // DAO MONGO
"github.com/denouche/go-api-skeleton/storage/dao/postgresql" // DAO PG
"github.com/denouche/go-api-skeleton/storage/validators"
"github.com/denouche/go-api-skeleton/utils"
"github.com/denouche/go-api-skeleton/utils/httputils"
"github.com/gin-gonic/gin"
"gopkg.in/go-playground/validator.v9"
)
const (
baseURI = ""
)
var (
ApplicationName = ""
ApplicationVersion = "dev"
ApplicationGitHash = ""
ApplicationBuildDate = ""
)
type Config struct {
Mock bool
DBInMemory bool // DAO IN MEMORY
DBInMemoryImportFile string // DAO IN MEMORY
DBConnectionURI string
DBName string
Port int
LogLevel string
LogFormat string
}
type Context struct {
db dao.Database
validator *validator.Validate
}
func NewContext(config *Config) *Context {
hc := &Context{}
if config.Mock {
hc.db = dbMock.NewDatabaseMock()
} else if config.DBInMemory { // DAO IN MEMORY
hc.db = dbFake.NewDatabaseFake(config.DBInMemoryImportFile) // DAO IN MEMORY
} else if strings.HasPrefix(config.DBConnectionURI, "postgresql://") { // DAO PG
hc.db = postgresql.NewDatabasePostgreSQL(config.DBConnectionURI) // DAO PG
} else if strings.HasPrefix(config.DBConnectionURI, "mongodb://") { // DAO MONGO
hc.db = mongodb.NewDatabaseMongoDB(config.DBConnectionURI, config.DBName) // DAO MONGO
} else {
utils.GetLogger().Fatal("no db connection uri given or not handled, and no db in memory mode enabled, exiting")
}
hc.validator = validators.NewValidator()
return hc
}
func NewRouter(hc *Context) *gin.Engine {
gin.SetMode(gin.ReleaseMode)
router := gin.New()
router.HandleMethodNotAllowed = true
router.Use(gin.Recovery())
router.Use(middlewares.GetLoggerMiddleware())
router.Use(middlewares.GetHTTPLoggerMiddleware())
handleAPIRoutes(hc, router)
handleCORSRoutes(hc, router)
return router
}
func handleCORSRoutes(hc *Context, router *gin.Engine) {
public := router.Group(baseURI)
public.Handle(http.MethodOptions, "/_health", hc.GetOptionsHandler(httputils.AllowedHeaders, http.MethodGet))
public.Handle(http.MethodOptions, "/openapi", hc.GetOptionsHandler(httputils.AllowedHeaders, http.MethodGet))
// start: template routes
public.Handle(http.MethodOptions, "/templates", hc.GetOptionsHandler(httputils.AllowedHeaders, http.MethodGet, http.MethodPost))
public.Handle(http.MethodOptions, "/templates/:id", hc.GetOptionsHandler(httputils.AllowedHeaders, http.MethodGet, http.MethodPut, http.MethodDelete))
// end: template routes
}
func handleAPIRoutes(hc *Context, router *gin.Engine) {
public := router.Group(baseURI)
public.Use(middlewares.GetCORSMiddlewareForOthersHTTPMethods())
public.Handle(http.MethodGet, "/_health", hc.GetHealth)
public.Handle(http.MethodGet, "/openapi", hc.GetOpenAPISchema)
if dbInMemory, ok := hc.db.(*dbFake.DatabaseFake); ok { // DAO IN MEMORY
// db in memory mode, add export endpoint // DAO IN MEMORY
public.Handle(http.MethodGet, "/export", func(c *gin.Context) { // DAO IN MEMORY
httputils.JSON(c.Writer, http.StatusOK, dbInMemory.Export()) // DAO IN MEMORY
}) // DAO IN MEMORY
} // DAO IN MEMORY
secured := public.Group("/")
// you can add an authentication middleware here
// start: template routes
secured.Handle(http.MethodGet, "/templates", hc.GetAllTemplates)
secured.Handle(http.MethodPost, "/templates", hc.CreateTemplate)
secured.Handle(http.MethodGet, "/templates/:id", hc.GetTemplate)
secured.Handle(http.MethodPut, "/templates/:id", hc.UpdateTemplate)
secured.Handle(http.MethodDelete, "/templates/:id", hc.DeleteTemplate)
// end: template routes
}
|
[
5
] |
package ilasm
import (
"github.com/strict-lang/sdk/pkg/compiler/grammar/token"
"github.com/strict-lang/sdk/pkg/compiler/grammar/tree"
"github.com/strict-lang/sdk/pkg/compiler/scope"
"github.com/strict-lang/sdk/pkg/compiler/typing"
)
func resolveClassOfExpression(expression tree.Expression) *Class {
return nil
}
func translateClass(class typing.Type) *Class {
return nil
}
func (generation *Generation) EmitIdentifier(identifier *tree.Identifier) {
if field, ok := scope.AsFieldSymbol(identifier.Binding()); ok {
generation.EmitField(field)
}
}
func (generation *Generation) EmitField(field *scope.Field) {
switch field.Kind {
case scope.MemberField:
generation.EmitMemberField(field)
case scope.ConstantField:
generation.EmitConstantField(field)
case scope.VariableField:
generation.emitVariableFieldLoad(field)
case scope.ParameterField:
generation.emitParameterFieldLoad(field)
}
}
func (generation *Generation) emitVariableFieldLoad(field *scope.Field) {
generation.emitLocalFieldLoad(field)
}
func (generation *Generation) emitParameterFieldLoad(field *scope.Field) {
generation.emitLocalFieldLoad(field)
}
func (generation *Generation) emitLocalFieldLoad(field *scope.Field) {
class := translateClass(field.Class.ActualClass)
location := createLocationOfField(class, field)
if err := location.EmitLoad(generation.code); err != nil {
panic("failed to emit variable field")
}
}
func createLocationOfField(class *Class, field *scope.Field) StorageLocation {
return &VariableLocation{
Variable: &VirtualVariable{
Name: field.Name(),
Class: class,
},
Parameter: field.Kind == scope.ParameterField,
}
}
func (generation *Generation) EmitConstantField(field *scope.Field) {
// TODO: Support constant values
panic("Constant values are not supported")
}
func (generation *Generation) EmitMemberField(field *scope.Field) {
location := generation.createMemberFieldLocation(field)
if err := location.EmitLoad(generation.code); err != nil {
panic("failed to load member field")
}
}
func (generation *Generation) createMemberFieldLocation(field *scope.Field) StorageLocation {
valueClass := translateClass(field.Class.ActualClass)
enclosingClass := translateClass(field.EnclosingClass.ActualClass)
return &MemberLocation{
Field: MemberField{
Name: field.Name(),
Class: valueClass,
EnclosingClass: enclosingClass,
},
// TODO: Implement this using FieldSelectExpressions. Currently, we cant figure
// out which StorageLocation the targeted field has, unless it is the own class.
InstanceLocation: createOwnReferenceLocation(generation.currentClass),
}
}
func (generation *Generation) EmitNumberLiteral(number *tree.NumberLiteral) {
if number.IsFloat() {
generation.code.PushNumberConstant(Float, number.Value)
} else {
if constant, ok := number.AsInt(); ok {
generation.code.PushConstantInt(constant)
} else {
generation.code.PushNumberConstant(Int, number.Value)
}
}
}
func (generation *Generation) EmitExpression(expression tree.Expression) {}
func (generation *Generation) EmitStringLiteral(string *tree.StringLiteral) {
generation.code.PushStringConstant(string.Value)
}
func (generation *Generation) EmitBinaryExpression(binary *tree.BinaryExpression) {
generation.EmitExpression(binary.LeftOperand)
generation.EmitExpression(binary.RightOperand)
class := resolveClassOfExpression(binary)
generation.EmitBinaryOperation(binaryOperation{
operator: binary.Operator,
operandClass: class,
})
}
type binaryOperation struct {
operator token.Operator
operandClass *Class
}
func (generation *Generation) EmitBinaryOperation(operation binaryOperation) {
if emitter, ok := binaryOperationEmitters[operation.operator]; ok {
emitter(generation.code, operation)
} else {
panic("unsupported binary operation")
}
}
type binaryOperationEmitter func(code *BlockBuilder, operation binaryOperation)
var binaryOperationEmitters = map[token.Operator]binaryOperationEmitter{
token.AddOperator: func(code *BlockBuilder, operation binaryOperation) {
code.EmitAdd(operation.operandClass)
},
token.SubOperator: func(code *BlockBuilder, operation binaryOperation) {
code.EmitSubtraction(operation.operandClass)
},
token.MulOperator: func(code *BlockBuilder, operation binaryOperation) {
code.EmitMultiplication(operation.operandClass)
},
token.DivOperator: func(code *BlockBuilder, operation binaryOperation) {
code.EmitDivision(operation.operandClass)
},
}
|
[
1
] |
package main
import "fmt"
func main() {
var A, B int
fmt.Scan(&A, &B)
low8 := int(float64(A) / 0.08)
high8 := int((float64(A) + 0.9) / 0.08)
low10 := int(float64(B) / 0.1)
high10 := int((float64(B) + 0.9) / 0.1)
if high8 < low10 || high10 < low8 {
fmt.Println(-1)
} else if low8 < low10 {
fmt.Println(low10)
} else {
fmt.Println(low8)
}
fmt.Println(low8, high8, low10, high10)
}
|
[
5
] |
package main
import (
"bytes"
"flag"
"go/ast"
"go/parser"
"go/printer"
"go/token"
"log"
)
var path = flag.String("path", "analyzing.go", "The path to the file to parse and examine")
func funcDeclToString(decl *ast.FuncDecl) string {
var buffer bytes.Buffer
var body *ast.BlockStmt
body, decl.Body = decl.Body, nil
printer.Fprint(&buffer, token.NewFileSet(), decl)
decl.Body = body
return buffer.String()
}
type ComplexityCalculator struct {
Name string
Complexity int
}
func (cc *ComplexityCalculator) Visit(node ast.Node) ast.Visitor {
switch exp := node.(type) {
case *ast.IfStmt, *ast.CaseClause:
cc.Complexity++
case *ast.BinaryExpr:
switch exp.Op {
case token.LAND, token.LOR:
cc.Complexity++
}
case *ast.ForStmt:
if exp.Cond != nil {
cc.Complexity++
}
}
return cc
}
type FuncVisitor struct {
FuncComplexities []*ComplexityCalculator
}
func (mv *FuncVisitor) Visit(node ast.Node) ast.Visitor {
switch exp := node.(type) {
case *ast.FuncDecl:
cc := &ComplexityCalculator{
Name: funcDeclToString(exp),
Complexity: 1,
}
mv.FuncComplexities = append(mv.FuncComplexities, cc)
ast.Walk(cc, node)
return nil // Return nil to stop this walk.
}
return mv
}
func main() {
flag.Parse()
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, *path, nil, 0)
if err != nil {
log.Fatalf("failed parsing file: %s", err)
}
var mv FuncVisitor
ast.Walk(&mv, f)
for _, mc := range mv.FuncComplexities {
log.Printf("%s has complexity %d", mc.Name, mc.Complexity)
}
}
|
[
7
] |
/*
Use the `html/template` package (§4.6) to replace `printTracks` with a function that displays the tracks as an HTML table.
Use the solution to the previous exercise to arrange that each click on a column head makes an HTTP request to sort the table.
func printTracks(tracks []*Track) {
const format = "%v\t%v\t%v\t%v\t%v\t\n"
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
fmt.Fprintf(tw, format, "Title", "Artist", "Album", "Year", "Length")
fmt.Fprintf(tw, format, "-----", "------", "-----", "----", "------")
for _, t := range tracks {
fmt.Fprintf(tw, format, t.Title, t.Artist, t.Album, t.Year, t.Length)
}
tw.Flush() // calculate column widths and print table
}
*/
package main
import (
"io"
"log"
"net/http"
"sort"
"strings"
"text/template"
"time"
)
type MovieReview struct {
Title string
Rating int
RunningTime time.Duration
}
type Data struct {
Items []MovieReview
}
var reviews = []MovieReview{
{"Soul", 8, time.Hour + (time.Minute * 40)},
{"Tenet", 7, (time.Hour * 2) + (time.Minute * 40)},
{"Gambit", 7, (time.Hour * 1) + (time.Minute * 25)},
{"Bridgerton", 8, (time.Hour * 1) + (time.Minute * 35)},
{"Escape", 9, (time.Hour * 2) + (time.Minute * 15)},
{"Mira", 6, (time.Hour * 2) + (time.Minute * 2)},
}
func main() {
http.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) {
s := strings.TrimSpace(r.FormValue("sort"))
if strings.EqualFold(s, "title") {
clickSimulation("title", reviews)
} else if strings.EqualFold(s, "rating") {
clickSimulation("rating", reviews)
} else if strings.EqualFold(s, "time") {
clickSimulation("runningTime", reviews)
}
printReviews(rw, Data{reviews})
})
log.Fatal(http.ListenAndServe(":8080", nil))
}
func printReviews(wr io.Writer, data Data) {
const tpl = `
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Movie Review</title>
</head>
<body>
<table>
<tr>
<th><a href="/?sort=title">Title</a></th>
<th><a href="/?sort=rating">Rating</a></th>
<th><a href="/?sort=time">Time</a></th>
</tr>
{{range .Items}}
<tr>
<td>{{.Title}}</td>
<td>{{.Rating}}</td>
<td>{{.RunningTime}}</td>
</tr>
{{end}}
</table>
</body>
</html>`
check := func(err error) {
if err != nil {
log.Fatal(err)
}
}
t, err := template.New("webpage").Parse(tpl)
check(err)
err = t.Execute(wr, data)
check(err)
}
var sortOrder []string = []string{"title","rating","runningTime"}
func clickSimulation(byWhat string, data []MovieReview) {
if !isContain(sortOrder, byWhat) {
// do nothing
} else if strings.EqualFold(byWhat, "title") {
sortOrder = moveToFront(byWhat, sortOrder)
sort.Slice(data, func(i, j int) bool {
return compareTitle(data[i].Title, data[j].Title)
})
} else if strings.EqualFold(byWhat, "rating") {
sortOrder = moveToFront(byWhat, sortOrder)
sort.Slice(data, func(i, j int) bool {
if data[i].Rating == data[j].Rating {
if sortOrder[1] == "title" {
compareTitle(data[i].Title, data[j].Title)
} else {
compareRating(data[i].Rating, data[j].Rating)
}
}
return compareRating(data[i].Rating, data[j].Rating)
})
} else if strings.EqualFold(byWhat, "runningTime") {
sortOrder = moveToFront(byWhat, sortOrder)
sort.Slice(data, func(i, j int) bool {
return compareTime(data[i].RunningTime, data[j].RunningTime)
})
}
}
func compareTitle(t1 string, t2 string) bool {
return strings.Compare(t1, t2) < 0
}
func compareRating(r1 int, r2 int) bool {
return r1 < r2
}
func compareTime(t1 time.Duration, t2 time.Duration) bool {
return t1 < t2
}
func isContain(ss []string, s string) bool {
for _, str := range ss {
if strings.EqualFold(str, s) {
return true
}
}
return false
}
func moveToFront(needle string, haystack []string) []string {
if len(haystack) == 0 || haystack[0] == needle {
return haystack
}
var prev string
for i, elem := range haystack {
switch {
case i == 0:
haystack[0] = needle
prev = elem
case elem == needle:
haystack[i] = prev
return haystack
default:
haystack[i] = prev
prev = elem
}
}
return append(haystack, prev)
}
|
[
5,
6
] |
package main
import (
"net/http"
"encoding/json"
"os"
)
// response struct holds response payload
type response struct {
Success int `json:"success"`
Message interface{} `json:"message"`
}
// respondWithJSON writes server to the client in JSON
func respondWithJSON(w http.ResponseWriter, httpStatus int, successCode int, payload interface{}) {
formattedResponse := response{successCode, payload}
response, _ := json.Marshal(formattedResponse)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST, GET")
w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, Authorization,Content-Disposition")
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.WriteHeader(httpStatus)
w.Write(response)
}
// getEnvironmentVariable fetches the value for specified key
// in .env file
func getEnvironmentVariable(key string) string {
return os.Getenv(key)
}
|
[
6
] |
package main
import (
"errors"
"math"
)
const (
humanPlayer = 0
cpuPlayer = 1
)
var boardColors [8][8]string = [8][8]string{
{"orange", "blue", "purple", "pink", "yellow", "red", "green", "brown"}, // player 1 (black, CPU)
{"red", "orange", "pink", "green", "blue", "yellow", "brown", "purple"},
{"green", "pink", "orange", "red", "purple", "brown", "yellow", "blue"},
{"pink", "purple", "blue", "orange", "brown", "green", "red", "yellow"},
{"yellow", "red", "green", "brown", "orange", "blue", "purple", "pink"},
{"blue", "yellow", "brown", "purple", "red", "orange", "pink", "green"},
{"purple", "brown", "yellow", "blue", "green", "pink", "orange", "red"},
{"brown", "green", "red", "yellow", "pink", "purple", "blue", "orange"}} // player 0 (white, human)
var N int = len(boardColors)
type coord struct {
i, j int
}
type piece struct {
// fields must be capitalized to be JSON exportable
Player int `json:"player"`
Color string `json:"color"`
}
type state struct {
board [8][8]*piece
playerPieceCoords [2]map[string]coord // int -> color -> coord
}
func (state *state) copy() *state {
// the board grid is getting copied because it's an array,
// but the playerPieceCoords structure must be deeply copied
newState := *state
newState.playerPieceCoords = [2]map[string]coord{}
for player := 0; player < 2; player++ {
newState.playerPieceCoords[player] = make(map[string]coord)
for color, coord := range state.playerPieceCoords[player] {
newState.playerPieceCoords[player][color] = coord
}
}
return &newState
}
// "a1" -> Coord{7, 0}
// "d5" -> Coord{3, 3}
func toCoord(a []string) (coord, error) {
if len(a) != 2 {
return coord{-1, -1}, errors.New(`Coord must have two elements`)
}
// use ascii code conversion
j := int(a[0][0]) - 97 // 'a' -> 0, 'h' -> 7
i := N - (int(a[1][0]) - 48) // '0' -> 0, '7' -> 7
if i < 0 || j < 0 || i >= N || j >= N {
return coord{-1, -1}, errors.New(`Bad coord`)
}
return coord{i, j}, nil
}
func (state *state) movePiece(player int, color string, dst coord) {
src := state.playerPieceCoords[player][color]
piece := state.board[src.i][src.j]
state.board[dst.i][dst.j] = piece
state.board[src.i][src.j] = nil
state.playerPieceCoords[player][color] = coord{dst.i, dst.j}
}
func (state *state) getPossibleMoveCoords(player int, color string) []coord {
incrs := [3]coord{coord{1, -1}, coord{1, 0}, coord{1, 1}}
coords := []coord{}
src := state.playerPieceCoords[player][color]
piece := state.board[src.i][src.j]
m := 1
if piece.Player == humanPlayer {
m = -1 // reverse direction of coord.i component
}
for n := 0; n < 3; n++ { // cycle through 3 i directions
i, j := src.i, src.j
for {
i += incrs[n].i * m
j += incrs[n].j
if i < 0 || i > (N-1) || j < 0 || j > (N-1) || state.board[i][j] != nil {
break
}
coords = append(coords, coord{i, j})
}
}
return coords
}
func (state *state) findBestMoveCoord(player int, color string, depth int) coord {
dstCoords := state.getPossibleMoveCoords(player, color)
var bestCoord coord
bestValue := math.Inf(-1)
for _, dst := range dstCoords {
newState := state.copy()
newState.movePiece(player, color, dst)
nextColor := boardColors[dst.i][dst.j]
v := -negamax(newState, player, player, nextColor, depth)
if v > bestValue {
bestCoord = dst
bestValue = v
}
}
return bestCoord
}
func (state *state) isWinning(player int) bool {
var i int
if player == humanPlayer { // if white, check top row
i = 0
} else {
i = N - 1 // if black, check bottom row
}
for j := 0; j < N; j++ {
piece := state.board[i][j]
// if you find a player's piece in the target row, they won
if piece != nil && piece.Player == player {
return true
}
}
return false
}
func (state *state) value(player int) float64 {
opponent := (player + 1) % 2
pos := state.getNumberOfWinInOnePlayerPieces(player)
neg := state.getNumberOfWinInOnePlayerPieces(opponent)
neg += state.getNumberDistinctColorsForNextMove(opponent)
return float64(pos - neg)
}
func (state *state) getNumberOfWinInOnePlayerPieces(player int) int {
nWinningPieces := 0
var winningRow int
if player == humanPlayer {
winningRow = 0
} else {
winningRow = N - 1
}
for color, _ := range state.playerPieceCoords[player] {
moveCoords := state.getPossibleMoveCoords(player, color)
for _, nextCoord := range moveCoords {
if nextCoord.i == winningRow {
nWinningPieces++
break
}
}
}
return nWinningPieces
}
func (state *state) getNumberDistinctColorsForNextMove(player int) int {
colors := make(map[string]bool)
n := 0
for color, _ := range state.playerPieceCoords[player] {
moveCoords := state.getPossibleMoveCoords(player, color)
for _, nextCoord := range moveCoords {
nextColor := boardColors[nextCoord.i][nextCoord.j]
if _, ok := colors[nextColor]; !ok {
colors[nextColor] = true
n++
break
}
}
}
return n
}
func negamax(state *state, initPlayer int, currPlayer int, color string, depth int) float64 {
nextPlayer := (currPlayer + 1) % 2
m := float64(1)
if currPlayer == initPlayer {
m = float64(-1)
}
if state.isWinning(currPlayer) {
return m * math.Inf(1)
} else if state.isWinning(nextPlayer) {
return m * math.Inf(-1)
} else if depth == 0 {
return m * state.value(initPlayer)
}
dstCoords := state.getPossibleMoveCoords(nextPlayer, color)
bestValue := float64(-1)
foundMove := false
for _, dst := range dstCoords {
nextColor := boardColors[dst.i][dst.j]
newState := state.copy()
newState.movePiece(nextPlayer, nextColor, dst)
v := -negamax(newState, nextPlayer, nextPlayer, nextColor, depth-1)
if v > bestValue {
bestValue = v
foundMove = true
}
}
if foundMove {
return bestValue
} else {
// board stays the same (src == dst), and next color is src's one
src := state.playerPieceCoords[nextPlayer][color]
return negamax(state, initPlayer, nextPlayer, boardColors[src.i][src.j], depth-1)
}
}
func (state *state) isLegalMove(player int, color string, dst coord) bool {
dstCoords := state.getPossibleMoveCoords(player, color)
for _, dstCoord := range dstCoords {
if dst == dstCoord {
return true
}
}
return false
}
func (state *state) isBlocked(player int, color string) bool {
return len(state.getPossibleMoveCoords(player, color)) == 0
}
|
[
5,
6
] |
package config
import (
"github.com/spf13/viper"
"fmt"
)
const PROFILE_KEY = "_profile_"
func LoadConsulConfigWithToken(host string, app string, profile string) (*viper.Viper, error) {
v := viper.New()
v.SetConfigName(app)
v.Set(PROFILE_KEY, profile)
v.AddRemoteProvider("consul", host, fmt.Sprintf("/config/%s::%s/", app, profile))
v.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop"
err := v.ReadRemoteConfig()
return v, err
}
|
[
6
] |
package internal
import (
"context"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/evergreen-ci/evergreen"
"github.com/evergreen-ci/evergreen/apimodels"
"github.com/evergreen-ci/evergreen/model"
"github.com/evergreen-ci/evergreen/model/patch"
"github.com/evergreen-ci/evergreen/model/task"
"github.com/evergreen-ci/evergreen/thirdparty"
"github.com/evergreen-ci/evergreen/util"
"github.com/mongodb/grip"
"github.com/pkg/errors"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/baggage"
)
type TaskConfig struct {
Distro *apimodels.DistroView
ProjectRef *model.ProjectRef
Project *model.Project
Task *task.Task
BuildVariant *model.BuildVariant
Expansions *util.Expansions
DynamicExpansions util.Expansions
Redacted map[string]bool
WorkDir string
GithubPatchData thirdparty.GithubPatch
GithubMergeData thirdparty.GithubMergeGroup
Timeout *Timeout
TaskSync evergreen.S3Credentials
EC2Keys []evergreen.EC2Key
ModulePaths map[string]string
CedarTestResultsID string
mu sync.RWMutex
}
type Timeout struct {
IdleTimeoutSecs int
ExecTimeoutSecs int
}
func (t *TaskConfig) SetIdleTimeout(timeout int) {
t.mu.Lock()
defer t.mu.Unlock()
t.Timeout.IdleTimeoutSecs = timeout
}
func (t *TaskConfig) SetExecTimeout(timeout int) {
t.mu.Lock()
defer t.mu.Unlock()
t.Timeout.ExecTimeoutSecs = timeout
}
func (t *TaskConfig) GetIdleTimeout() int {
t.mu.RLock()
defer t.mu.RUnlock()
return t.Timeout.IdleTimeoutSecs
}
func (t *TaskConfig) GetExecTimeout() int {
t.mu.RLock()
defer t.mu.RUnlock()
return t.Timeout.ExecTimeoutSecs
}
func NewTaskConfig(workDir string, d *apimodels.DistroView, p *model.Project, t *task.Task, r *model.ProjectRef, patchDoc *patch.Patch, e util.Expansions) (*TaskConfig, error) {
if p == nil {
return nil, errors.Errorf("project '%s' is nil", t.Project)
}
if r == nil {
return nil, errors.Errorf("project ref '%s' is nil", p.Identifier)
}
if t == nil {
return nil, errors.Errorf("task cannot be nil")
}
bv := p.FindBuildVariant(t.BuildVariant)
if bv == nil {
return nil, errors.Errorf("cannot find build variant '%s' for task in project '%s'", t.BuildVariant, t.Project)
}
taskConfig := &TaskConfig{
Distro: d,
ProjectRef: r,
Project: p,
Task: t,
BuildVariant: bv,
Expansions: &e,
DynamicExpansions: util.Expansions{},
WorkDir: workDir,
}
if patchDoc != nil {
taskConfig.GithubPatchData = patchDoc.GithubPatchData
taskConfig.GithubMergeData = patchDoc.GithubMergeData
}
taskConfig.Timeout = &Timeout{}
return taskConfig, nil
}
func (c *TaskConfig) GetWorkingDirectory(dir string) (string, error) {
if dir == "" {
dir = c.WorkDir
} else if strings.HasPrefix(dir, c.WorkDir) {
// pass
} else {
dir = filepath.Join(c.WorkDir, dir)
}
if stat, err := os.Stat(dir); os.IsNotExist(err) {
return "", errors.Errorf("path '%s' does not exist", dir)
} else if err != nil || stat == nil {
return "", errors.Wrapf(err, "retrieving file info for path '%s'", dir)
} else if !stat.IsDir() {
return "", errors.Errorf("path '%s' is not a directory", dir)
}
return dir, nil
}
func (c *TaskConfig) GetCloneMethod() string {
if c.Distro != nil {
return c.Distro.CloneMethod
}
return evergreen.CloneMethodOAuth
}
// GetTaskGroup returns the task group for the given task group name. It may
// return nil if the task group name is empty.
func (tc *TaskConfig) GetTaskGroup(taskGroup string) (*model.TaskGroup, error) {
if err := tc.Validate(); err != nil {
return nil, err
}
if taskGroup == "" {
return nil, nil
}
tg := tc.Project.FindTaskGroup(taskGroup)
if tg == nil {
return nil, errors.Errorf("couldn't find task group '%s' in project '%s'", taskGroup, tc.Project.Identifier)
}
return tg, nil
}
// Validate validates that the task config is populated with the data required
// for a task to run.
func (tc *TaskConfig) Validate() error {
if tc == nil {
return errors.New("unable to get task setup because task config is nil")
}
if tc.Task == nil {
return errors.New("unable to get task setup because task is nil")
}
if tc.Task.Version == "" {
return errors.New("task has no version")
}
if tc.Project == nil {
return errors.New("project is nil")
}
return nil
}
func (tc *TaskConfig) TaskAttributeMap() map[string]string {
return map[string]string{
evergreen.TaskIDOtelAttribute: tc.Task.Id,
evergreen.TaskNameOtelAttribute: tc.Task.DisplayName,
evergreen.TaskExecutionOtelAttribute: strconv.Itoa(tc.Task.Execution),
evergreen.VersionIDOtelAttribute: tc.Task.Version,
evergreen.VersionRequesterOtelAttribute: tc.Task.Requester,
evergreen.BuildIDOtelAttribute: tc.Task.BuildId,
evergreen.BuildNameOtelAttribute: tc.Task.BuildVariant,
evergreen.ProjectIdentifierOtelAttribute: tc.ProjectRef.Identifier,
evergreen.ProjectIDOtelAttribute: tc.ProjectRef.Id,
evergreen.DistroIDOtelAttribute: tc.Task.DistroId,
}
}
func (tc *TaskConfig) AddTaskBaggageToCtx(ctx context.Context) (context.Context, error) {
catcher := grip.NewBasicCatcher()
bag := baggage.FromContext(ctx)
for key, val := range tc.TaskAttributeMap() {
member, err := baggage.NewMember(key, val)
if err != nil {
catcher.Add(errors.Wrapf(err, "making member for key '%s' val '%s'", key, val))
continue
}
bag, err = bag.SetMember(member)
catcher.Add(err)
}
return baggage.ContextWithBaggage(ctx, bag), catcher.Resolve()
}
func (tc *TaskConfig) TaskAttributes() []attribute.KeyValue {
var attributes []attribute.KeyValue
for key, val := range tc.TaskAttributeMap() {
attributes = append(attributes, attribute.String(key, val))
}
return attributes
}
|
[
5
] |
package main
import (
"fmt"
// "os"
)
/**
* Auto-generated code below aims at helping you parse
* the standard input according to the problem statement.
**/
func main() {
var N int
fmt.Scan(&N)
/*
Triangle height = N
Triangle width = 2N - 1
Height = 2N
Width = 4N - 1
Middle = 2N - 1
*/
var line string
for i := 0; i < 2*N; i++ {
line = ""
for j := 0; j < 2 * N + i; j++ {
if j < 2 * N - 1 - i {
line += " "
} else if i < N {
// First triangle
// --------------
line += "*"
} else {
// Others triangles
// ----------------
if j < i {
line += "*"
} else if j < 4 * N - i - 1 {
line += " "
} else {
line += "*"
}
}
}
if i == 0 {
line = "." + line[1:]
}
fmt.Println(line)
}
}
|
[
5
] |
package model
import (
"bytes"
"encoding/base64"
"errors"
"github.com/jinzhu/gorm"
"github.com/satori/go.uuid"
"log"
"time"
)
// 登录有效期180天
const LoginPeriod = 180 * 24 * time.Hour
var ErrCSRFTokenMismatch = errors.New("CSRFToken 不匹配")
type LoginToken struct {
Token []byte `gorm:"primary_key;type:BINARY(16)"`
// CSRFToken 由前端保存,并附加到请求,确保请求发自正确的网站
CSRFToken []byte `gorm:"type:BINARY(16)"`
CreatedAt time.Time
Expires time.Time
UserId uint
User User
}
var DefaultLoginToken = new(LoginToken)
// 检查 token 是否有效
func (t *LoginToken) Check() (err error) {
CSRFToken := t.CSRFToken
err = db.First(t).Error
if err != nil {
return
}
if !bytes.Equal(t.CSRFToken, CSRFToken) {
return ErrCSRFTokenMismatch
}
return
}
func (t *LoginToken) GetUser() (err error) {
err = db.Model(t).Related(&t.User).Error
return
}
func (t *LoginToken) Login() (err error) {
t.Token = uuid.Must(uuid.NewV4()).Bytes()
t.CSRFToken = uuid.Must(uuid.NewV4()).Bytes()
t.Expires = gorm.NowFunc().Add(LoginPeriod)
err = db.Create(t).Error
return
}
func (t *LoginToken) Logout() (err error) {
err = db.Delete(t).Error
return
}
func (t *LoginToken) LogoutAll(userId uint) (err error) {
err = db.Delete(t, "user_id = ?", userId).Error
return
}
func (t *LoginToken) GetTokensStr() (token, CSRFToken string) {
token = base64.StdEncoding.EncodeToString(t.Token)[:22]
CSRFToken = base64.StdEncoding.EncodeToString(t.CSRFToken)[:22]
return
}
func (t *LoginToken) ParseTokensStr(token, CSRFToken string) (err error) {
t.Token, err = base64.StdEncoding.DecodeString(token + "==")
if err != nil {
return
}
t.CSRFToken, err = base64.StdEncoding.DecodeString(CSRFToken + "==")
return
}
func (t *LoginToken) ClearUp() (err error) {
err = db.Delete(t, "expires < ?", gorm.NowFunc()).Error
return
}
func (t *LoginToken) PeriodicCleanup() (quit chan struct{}) {
quit = make(chan struct{})
ticker := time.NewTicker(time.Minute * 10)
go func() {
for {
select {
case <-ticker.C:
err := t.ClearUp()
if err != nil {
log.Printf("fail to ClearUp: %v", err)
}
case <-quit:
return
}
}
}()
return
}
|
[
2
] |
package main
import (
"database/sql"
"fmt"
"strings"
"time"
"github.com/pkg/errors"
"cjting.me/apphub/parser"
"github.com/jmoiron/sqlx"
"github.com/kataras/golog"
"github.com/mattn/go-sqlite3"
)
type DB struct {
*sqlx.DB
}
func initDB() {
dsn := fmt.Sprintf("file:%s?_foreign_keys=true", config.DBPath)
sqlDB, err := sqlx.Open("sqlite3", dsn)
if err != nil {
golog.Fatalf("could not open sqlite3 database: %v", err)
}
db = &DB{sqlDB}
}
// null means no package
func (db *DB) getPackage(id string) *Package {
pkg := &Package{}
err := db.Get(pkg, "select * from package where id = $1", id)
if err == sql.ErrNoRows {
return nil
}
if err != nil {
panic(err)
}
return pkg
}
func (db *DB) createPackage(
info *parser.AppInfo, fileName, versionRemark, pkgRemark string,
pkgID, channel, env string,
) (*App, *Version, *Package, error) {
// fetch app
app := &App{}
{
err := db.Get(
app,
`select * from app where bundle_id = $1 and platform = $2`,
info.BundleID,
info.Platform,
)
if err == sql.ErrNoRows {
// create app
app.Name = info.Name
app.Platform = info.Platform
app.BundleID = info.BundleID
if err := db.ensureInsertApp(app); err != nil {
return nil, nil, nil, errors.Wrap(err, "could not insert app")
}
}
}
// fetch version
version := &Version{}
{
err := db.Get(
version,
`select * from version where version = $1 and app_id = $2`,
getFullVersion(info),
app.ID,
)
if err == sql.ErrNoRows {
// create version
version.Version = getFullVersion(info)
version.AppID = app.ID
version.AndroidVersionName = info.AndroidVersionName
version.AndroidVersionCode = info.AndroidVersionCode
version.IOSShortVersion = info.IOSShortVersion
version.IOSBundleVersion = info.IOSBundleVersion
version.SortKey = time.Now().Unix()
version.Remark = versionRemark
if res, err := db.NamedExec(`
insert into version(
version, app_id, android_version_code, android_version_name,
ios_short_version, ios_bundle_version, sort_key, remark
)
values(
:version, :app_id, :android_version_code, :android_version_name,
:ios_short_version, :ios_bundle_version, :sort_key, :remark
)
`, version); err != nil {
return nil, nil, nil, errors.Wrap(err, "could not insert version")
} else {
id, _ := res.LastInsertId()
version.ID = int(id)
}
}
}
// create package
pkg := &Package{}
pkg.ID = pkgID
pkg.VersionID = version.ID
pkg.Name = fileName
pkg.Size = info.Size
pkg.CreatedAt = time.Now()
pkg.Remark = pkgRemark
pkg.IOSPackageType = info.IOSPackageType
pkg.IOSDeviceList = info.IOSDeviceList
pkg.Channel = channel
pkg.Env = env
if _, err := db.NamedExec(`
insert into package(
id, version_id, name, size, created_at, remark, ios_package_type, ios_device_list, channel, env
)
values(
:id, :version_id, :name, :size, :created_at, :remark, :ios_package_type, :ios_device_list, :channel, :env
)
`, pkg); err != nil {
return nil, nil, nil, errors.Wrap(err, "could not insert package")
}
return app, version, pkg, nil
}
// need to assign ID
func (db *DB) insertApp(app *App) error {
res, err := db.NamedExec(`
insert into app(
alias, name, platform, bundle_id
)
values(
:alias, :name, :platform, :bundle_id
)
`, app)
if err == nil {
id, _ := res.LastInsertId()
app.ID = int(id)
}
return err
}
// handle app.alias unique constraint
func (db *DB) ensureInsertApp(app *App) error {
for {
app.Alias = randomStr(4)
err := db.insertApp(app)
if err == nil {
return nil
}
if isAppAliasUniqueError(err) {
continue
} else {
return err
}
}
}
func (db *DB) deletePackage(id string) error {
_, err := db.Exec("delete from package where id = $1", id)
return err
}
// value could be alias(string) or id(int)
func (db *DB) getAppByAliasOrID(value interface{}) *SimpleApp {
app := &SimpleApp{}
var err error
if id, ok := value.(int); ok {
err = db.Get(app, "select * from simple_app where id = $1", id)
} else if alias, ok := value.(string); ok {
err = db.Get(app, "select * from simple_app where alias = $1", alias)
} else {
panic("invalid value for getAppByAliasOrID")
}
if err != nil {
if err == sql.ErrNoRows {
return nil
} else {
panic(err)
}
}
return app
}
func (db *DB) getApps() ([]*SimpleApp, error) {
apps := make([]*SimpleApp, 0)
if err := db.Select(&apps, "select * from simple_app"); err != nil {
return nil, err
}
return apps, nil
}
// return all envs of packages of this app
func (db *DB) getAppEnvs(appID int) ([]string, error) {
result := make([]string, 0)
if err := db.Select(&result, `
select
distinct env
from
package p
left join version v
on p.version_id = v.id where v.app_id = $1;
`, appID); err != nil {
return nil, err
}
return result, nil
}
// return all channels of packages of this app
func (db *DB) getAppChannels(appID int) ([]string, error) {
result := make([]string, 0)
if err := db.Select(&result, `
select
distinct channel
from
package p
left join version v
on p.version_id = v.id where v.app_id = $1;
`, appID); err != nil {
return nil, err
}
return result, nil
}
func (db *DB) getAppAlias(platform, bundleID string) (string, error) {
var result []string
if err := db.Select(&result, `
select alias from app where platform = $1 and bundle_id = $2
`, platform, bundleID); err != nil {
return "", err
}
if len(result) == 0 {
return "", nil
}
return result[0], nil
}
// sort by sort_key desc
func (db *DB) getAppDetailedVersions(appID int) ([]*DetailVersion, error) {
versions := make([]*DetailVersion, 0)
if err := db.Select(&versions, "select * from detail_version where app_id = $1", appID); err != nil {
return nil, err
}
return versions, nil
}
// return null if not exists
func (db *DB) getVersion(id int) *DetailVersion {
ver := &DetailVersion{}
err := db.Get(ver, "select * from detail_version where id = $1", id)
if err != nil {
if err == sql.ErrNoRows {
return nil
} else {
panic(err)
}
}
return ver
}
// return null if not exists
func (db *DB) getVersionByAppAliasAndFullVersion(appAlias, fullVersion string) *DetailVersion {
app := db.getAppByAliasOrID(appAlias)
if app == nil {
return nil
}
ver := &DetailVersion{}
err := db.Get(
ver,
"select * from detail_version where app_id = $1 and version = $2",
app.ID,
fullVersion,
)
if err != nil {
if err == sql.ErrNoRows {
return nil
} else {
panic(err)
}
}
return ver
}
// -1 or empty string means all
func (db *DB) getPackages(
appAlias string,
versionID int,
env string,
channel string,
) ([]*Package, error) {
pkgs := make([]*Package, 0)
sql := `
select
p.*
from package p
left join version v
on p.version_id = v.id
left join app a
on v.app_id = a.id
where
a.alias = $1
`
var params []interface{}
params = append(params, appAlias)
n := 2
if versionID != -1 {
sql += fmt.Sprintf(` and v.id = $%d`, n)
params = append(params, versionID)
n += 1
}
if env != "" {
sql += fmt.Sprintf(` and p.env = $%d`, n)
params = append(params, env)
n += 1
}
if channel != "" {
sql += fmt.Sprintf(` and p.channel = $%d`, n)
params = append(params, channel)
n += 1
}
sql += ` order by v.sort_key desc, p.created_at desc`
if err := db.Select(&pkgs, sql, params...); err != nil {
return nil, err
}
return pkgs, nil
}
// sort by created_at desc
func (db *DB) getVersionPackages(versionID int) ([]*Package, error) {
pkgs := make([]*Package, 0)
if err := db.Select(&pkgs, "select * from package where version_id = $1 order by created_at desc", versionID); err != nil {
return nil, err
}
return pkgs, nil
}
func isAppAliasUniqueError(err error) bool {
if e, ok := err.(sqlite3.Error); ok {
return (e.ExtendedCode == sqlite3.ErrConstraintUnique ||
e.ExtendedCode == sqlite3.ErrConstraintPrimaryKey) &&
strings.Contains(err.Error(), "app.alias")
}
return false
}
|
[
6
] |
package devices
import (
"com.azure.iot/iotcentral/iotcgo/client"
"com.azure.iot/iotcentral/iotcgo/client/operations"
"com.azure.iot/iotcentral/iotcgo/cmd/devicetemplates"
"com.azure.iot/iotcentral/iotcgo/config"
"com.azure.iot/iotcentral/iotcgo/models"
"com.azure.iot/iotcentral/iotcgo/util"
"fmt"
"github.com/jedib0t/go-pretty/table"
"github.com/spf13/cobra"
"os"
"strings"
)
// listCmd represents the devices list command
var listCmd = &cobra.Command{
Use: "list",
Short: "List all the devices in an application",
Long: `List all the devices in an application`,
RunE: func(cmd *cobra.Command, args []string) error {
// read the command line parameters
app, err := cmd.Flags().GetString("app")
if err != nil {
return err
}
format, err := cmd.Flags().GetString("format")
if err != nil {
return err
}
top, err := cmd.Flags().GetInt("top")
if err != nil {
return err
}
deviceTemplateID, err := cmd.Flags().GetString("deviceTemplate")
if err != nil {
return err
}
// create an IoTC API Client to connect to the given app
c, err := client.NewFromToken(app)
if err != nil {
return err
}
// start the spinner
spin := util.NewSpinner(" Downloading devices ...")
// get the list of devices
var devices []*models.Device
var nextLink string
if strings.ToLower(deviceTemplateID) == "all" {
res, err := c.Operations.DevicesList(operations.NewDevicesListParams())
if err != nil {
return err
}
devices = res.Payload.Value
nextLink = res.Payload.NextLink
} else {
p := operations.NewDeviceTemplatesListDevicesParams()
p.DeviceTemplateID = deviceTemplateID
res, err := c.Operations.DeviceTemplatesListDevices(p)
if err != nil {
return err
}
devices = res.Payload.Value
nextLink = res.Payload.NextLink
}
if len(devices) == 0 {
spin.Stop()
fmt.Printf("No devices found in '%s' app\n", app)
return nil
}
// get all device templates look up table so that we can print the template names
spin.Suffix = " Getting device templates"
deviceTemplates, err := devicetemplates.GetDeviceTemplatesLookupTable(c, app)
if err != nil {
return nil
}
t := table.NewWriter()
t.SetOutputMirror(os.Stdout)
t.AppendHeader(table.Row{"#", "ID", "Display Name", "Device Template", "Provisioned", "Approved", "Simulated"})
numItem := 1
limitReached := false
moreRowsExist := false
numItem, limitReached, moreRowsExist = addTableRows(t, devices, deviceTemplates, numItem, top)
// loop through and download all the rows one page at a time
for {
if len(nextLink) == 0 || limitReached {
break
}
spin.Suffix = fmt.Sprintf(" Downloaded %v devices, getting more...", numItem-1)
body, err := util.GetContent(app, nextLink)
if err != nil {
return err
}
var dc models.DeviceCollection
if err := dc.UnmarshalBinary(body); err != nil {
return err
}
numItem, limitReached, moreRowsExist = addTableRows(t, dc.Value, deviceTemplates, numItem, top)
nextLink = dc.NextLink
}
spin.Stop()
// write out the table
util.RenderTable(t, format, moreRowsExist || len(nextLink) != 0)
return nil
},
}
func init() {
devicesCmd.AddCommand(listCmd)
listCmd.Flags().StringP("app", "a", "", "name of the IoT Central application")
listCmd.MarkFlagRequired("app")
listCmd.Flags().StringP("format", "f", config.Config.Format, "output formats: pretty, table, csv, markdown, html")
listCmd.Flags().IntP("top", "", config.Config.MaxRows, "list only top N rows")
listCmd.Flags().StringP("deviceTemplate", "", "all", "list devices of the specified device template ID")
}
func addTableRows(t table.Writer, devices []*models.Device, deviceTemplates map[string]string, numItem int, top int) (int, bool, bool) {
var limitReached = false
var moreRowsExist = false
for i, item := range devices {
t.AppendRow([]interface{}{numItem, item.ID, item.DisplayName, deviceTemplates[item.InstanceOf], *item.Provisioned, item.Approved, item.Simulated})
if numItem == top {
limitReached = true
moreRowsExist = len(devices) != i+1
break
}
numItem++
}
return numItem, limitReached, moreRowsExist
}
|
[
6
] |
package password
import (
"fmt"
"strings"
"github.com/cloudfoundry/cli/cf/configuration/core_config"
"github.com/cloudfoundry/cli/cf/errors"
. "github.com/cloudfoundry/cli/cf/i18n"
"github.com/cloudfoundry/cli/cf/net"
)
//go:generate counterfeiter . PasswordRepository
type PasswordRepository interface {
UpdatePassword(old string, new string) error
}
type CloudControllerPasswordRepository struct {
config core_config.Reader
gateway net.Gateway
}
func NewCloudControllerPasswordRepository(config core_config.Reader, gateway net.Gateway) (repo CloudControllerPasswordRepository) {
repo.config = config
repo.gateway = gateway
return
}
func (repo CloudControllerPasswordRepository) UpdatePassword(old string, new string) error {
uaaEndpoint := repo.config.UaaEndpoint()
if uaaEndpoint == "" {
return errors.New(T("UAA endpoint missing from config file"))
}
url := fmt.Sprintf("/Users/%s/password", repo.config.UserGuid())
body := fmt.Sprintf(`{"password":"%s","oldPassword":"%s"}`, new, old)
return repo.gateway.UpdateResource(uaaEndpoint, url, strings.NewReader(body))
}
|
[
1,
6
] |
package dao
import (
"context"
"errors"
"github.com/krilie/lico_alone/common/com-model"
"github.com/krilie/lico_alone/common/errs"
"github.com/krilie/lico_alone/common/utils/id_util"
"github.com/krilie/lico_alone/module/module-blog-article/model"
"gorm.io/gorm"
"time"
)
type IBlogArticleDao interface {
CreateArticle(ctx context.Context, article *model.Article) error
DeleteArticleById(ctx context.Context, id string) (bool, error)
UpdateArticle(ctx context.Context, article *model.Article) error
UpdateArticleSample(ctx context.Context, article *model.UpdateArticleModel) error
GetArticleById(ctx context.Context, id string) (*model.Article, error)
}
func (b *BlogArticleDao) CreateArticle(ctx context.Context, article *model.Article) error {
if article.Id == "" {
article.Id = id_util.GetUuid()
}
err := b.GetDb(ctx).Model(new(model.Article)).Create(article).Error
return err
}
func (b *BlogArticleDao) DeleteArticleById(ctx context.Context, id string) (bool, error) {
err := b.GetDb(ctx).Delete(&model.Article{
Model: com_model.Model{
Id: id,
},
}).Error
if errors.Is(err, gorm.ErrRecordNotFound) {
return false, nil
} else if err != nil {
return false, err
} else {
return true, nil
}
}
func (b *BlogArticleDao) UpdateArticle(ctx context.Context, article *model.Article) error {
result := b.GetDb(ctx).Model(new(model.Article)).Select("*").Updates(article)
if result.Error != nil {
return result.Error
}
if result.RowsAffected <= 0 {
return errs.NewNotExistsError().WithMsg("没有作出修改")
}
return nil
}
func (b *BlogArticleDao) UpdateArticleSample(ctx context.Context, article *model.UpdateArticleModel) error {
result := b.GetDb(ctx).Model(new(model.Article)).Where("id=?", article.Id).
UpdateColumns(map[string]interface{}{
"title": article.Title,
"content": article.Content,
"picture": article.Picture,
"sort": article.Sort,
"description": article.Description,
"updated_at": time.Now(),
})
if result.Error != nil {
return result.Error
}
if result.RowsAffected <= 0 {
return errs.NewNotExistsError().WithMsg("没有作出修改")
}
return nil
}
func (b *BlogArticleDao) GetArticleById(ctx context.Context, id string) (article *model.Article, err error) {
article = new(model.Article)
err = b.GetDb(ctx).First(article, "id=?", id).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, err
}
return article, err
}
|
[
5
] |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package model
import (
"context"
"fmt"
"github.com/apache/plc4x/plc4go/spi/utils"
"github.com/pkg/errors"
"github.com/rs/zerolog"
)
// Code generated by code-generation. DO NOT EDIT.
// S7ParameterModeTransition is the corresponding interface of S7ParameterModeTransition
type S7ParameterModeTransition interface {
fmt.Stringer
utils.LengthAware
utils.Serializable
S7Parameter
// GetMethod returns Method (property field)
GetMethod() uint8
// GetCpuFunctionType returns CpuFunctionType (property field)
GetCpuFunctionType() uint8
// GetCpuFunctionGroup returns CpuFunctionGroup (property field)
GetCpuFunctionGroup() uint8
// GetCurrentMode returns CurrentMode (property field)
GetCurrentMode() uint8
// GetSequenceNumber returns SequenceNumber (property field)
GetSequenceNumber() uint8
}
// S7ParameterModeTransitionExactly can be used when we want exactly this type and not a type which fulfills S7ParameterModeTransition.
// This is useful for switch cases.
type S7ParameterModeTransitionExactly interface {
S7ParameterModeTransition
isS7ParameterModeTransition() bool
}
// _S7ParameterModeTransition is the data-structure of this message
type _S7ParameterModeTransition struct {
*_S7Parameter
Method uint8
CpuFunctionType uint8
CpuFunctionGroup uint8
CurrentMode uint8
SequenceNumber uint8
// Reserved Fields
reservedField0 *uint16
}
///////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////
/////////////////////// Accessors for discriminator values.
///////////////////////
func (m *_S7ParameterModeTransition) GetParameterType() uint8 {
return 0x01
}
func (m *_S7ParameterModeTransition) GetMessageType() uint8 {
return 0x07
}
///////////////////////
///////////////////////
///////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////
func (m *_S7ParameterModeTransition) InitializeParent(parent S7Parameter) {}
func (m *_S7ParameterModeTransition) GetParent() S7Parameter {
return m._S7Parameter
}
///////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////
/////////////////////// Accessors for property fields.
///////////////////////
func (m *_S7ParameterModeTransition) GetMethod() uint8 {
return m.Method
}
func (m *_S7ParameterModeTransition) GetCpuFunctionType() uint8 {
return m.CpuFunctionType
}
func (m *_S7ParameterModeTransition) GetCpuFunctionGroup() uint8 {
return m.CpuFunctionGroup
}
func (m *_S7ParameterModeTransition) GetCurrentMode() uint8 {
return m.CurrentMode
}
func (m *_S7ParameterModeTransition) GetSequenceNumber() uint8 {
return m.SequenceNumber
}
///////////////////////
///////////////////////
///////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////
// NewS7ParameterModeTransition factory function for _S7ParameterModeTransition
func NewS7ParameterModeTransition(method uint8, cpuFunctionType uint8, cpuFunctionGroup uint8, currentMode uint8, sequenceNumber uint8) *_S7ParameterModeTransition {
_result := &_S7ParameterModeTransition{
Method: method,
CpuFunctionType: cpuFunctionType,
CpuFunctionGroup: cpuFunctionGroup,
CurrentMode: currentMode,
SequenceNumber: sequenceNumber,
_S7Parameter: NewS7Parameter(),
}
_result._S7Parameter._S7ParameterChildRequirements = _result
return _result
}
// Deprecated: use the interface for direct cast
func CastS7ParameterModeTransition(structType any) S7ParameterModeTransition {
if casted, ok := structType.(S7ParameterModeTransition); ok {
return casted
}
if casted, ok := structType.(*S7ParameterModeTransition); ok {
return *casted
}
return nil
}
func (m *_S7ParameterModeTransition) GetTypeName() string {
return "S7ParameterModeTransition"
}
func (m *_S7ParameterModeTransition) GetLengthInBits(ctx context.Context) uint16 {
lengthInBits := uint16(m.GetParentLengthInBits(ctx))
// Reserved Field (reserved)
lengthInBits += 16
// Implicit Field (itemLength)
lengthInBits += 8
// Simple field (method)
lengthInBits += 8
// Simple field (cpuFunctionType)
lengthInBits += 4
// Simple field (cpuFunctionGroup)
lengthInBits += 4
// Simple field (currentMode)
lengthInBits += 8
// Simple field (sequenceNumber)
lengthInBits += 8
return lengthInBits
}
func (m *_S7ParameterModeTransition) GetLengthInBytes(ctx context.Context) uint16 {
return m.GetLengthInBits(ctx) / 8
}
func S7ParameterModeTransitionParse(ctx context.Context, theBytes []byte, messageType uint8) (S7ParameterModeTransition, error) {
return S7ParameterModeTransitionParseWithBuffer(ctx, utils.NewReadBufferByteBased(theBytes), messageType)
}
func S7ParameterModeTransitionParseWithBuffer(ctx context.Context, readBuffer utils.ReadBuffer, messageType uint8) (S7ParameterModeTransition, error) {
positionAware := readBuffer
_ = positionAware
log := zerolog.Ctx(ctx)
_ = log
if pullErr := readBuffer.PullContext("S7ParameterModeTransition"); pullErr != nil {
return nil, errors.Wrap(pullErr, "Error pulling for S7ParameterModeTransition")
}
currentPos := positionAware.GetPos()
_ = currentPos
var reservedField0 *uint16
// Reserved Field (Compartmentalized so the "reserved" variable can't leak)
{
reserved, _err := readBuffer.ReadUint16("reserved", 16)
if _err != nil {
return nil, errors.Wrap(_err, "Error parsing 'reserved' field of S7ParameterModeTransition")
}
if reserved != uint16(0x0010) {
log.Info().Fields(map[string]any{
"expected value": uint16(0x0010),
"got value": reserved,
}).Msg("Got unexpected response for reserved field.")
// We save the value, so it can be re-serialized
reservedField0 = &reserved
}
}
// Implicit Field (itemLength) (Used for parsing, but its value is not stored as it's implicitly given by the objects content)
itemLength, _itemLengthErr := readBuffer.ReadUint8("itemLength", 8)
_ = itemLength
if _itemLengthErr != nil {
return nil, errors.Wrap(_itemLengthErr, "Error parsing 'itemLength' field of S7ParameterModeTransition")
}
// Simple Field (method)
_method, _methodErr := readBuffer.ReadUint8("method", 8)
if _methodErr != nil {
return nil, errors.Wrap(_methodErr, "Error parsing 'method' field of S7ParameterModeTransition")
}
method := _method
// Simple Field (cpuFunctionType)
_cpuFunctionType, _cpuFunctionTypeErr := readBuffer.ReadUint8("cpuFunctionType", 4)
if _cpuFunctionTypeErr != nil {
return nil, errors.Wrap(_cpuFunctionTypeErr, "Error parsing 'cpuFunctionType' field of S7ParameterModeTransition")
}
cpuFunctionType := _cpuFunctionType
// Simple Field (cpuFunctionGroup)
_cpuFunctionGroup, _cpuFunctionGroupErr := readBuffer.ReadUint8("cpuFunctionGroup", 4)
if _cpuFunctionGroupErr != nil {
return nil, errors.Wrap(_cpuFunctionGroupErr, "Error parsing 'cpuFunctionGroup' field of S7ParameterModeTransition")
}
cpuFunctionGroup := _cpuFunctionGroup
// Simple Field (currentMode)
_currentMode, _currentModeErr := readBuffer.ReadUint8("currentMode", 8)
if _currentModeErr != nil {
return nil, errors.Wrap(_currentModeErr, "Error parsing 'currentMode' field of S7ParameterModeTransition")
}
currentMode := _currentMode
// Simple Field (sequenceNumber)
_sequenceNumber, _sequenceNumberErr := readBuffer.ReadUint8("sequenceNumber", 8)
if _sequenceNumberErr != nil {
return nil, errors.Wrap(_sequenceNumberErr, "Error parsing 'sequenceNumber' field of S7ParameterModeTransition")
}
sequenceNumber := _sequenceNumber
if closeErr := readBuffer.CloseContext("S7ParameterModeTransition"); closeErr != nil {
return nil, errors.Wrap(closeErr, "Error closing for S7ParameterModeTransition")
}
// Create a partially initialized instance
_child := &_S7ParameterModeTransition{
_S7Parameter: &_S7Parameter{},
Method: method,
CpuFunctionType: cpuFunctionType,
CpuFunctionGroup: cpuFunctionGroup,
CurrentMode: currentMode,
SequenceNumber: sequenceNumber,
reservedField0: reservedField0,
}
_child._S7Parameter._S7ParameterChildRequirements = _child
return _child, nil
}
func (m *_S7ParameterModeTransition) Serialize() ([]byte, error) {
wb := utils.NewWriteBufferByteBased(utils.WithInitialSizeForByteBasedBuffer(int(m.GetLengthInBytes(context.Background()))))
if err := m.SerializeWithWriteBuffer(context.Background(), wb); err != nil {
return nil, err
}
return wb.GetBytes(), nil
}
func (m *_S7ParameterModeTransition) SerializeWithWriteBuffer(ctx context.Context, writeBuffer utils.WriteBuffer) error {
positionAware := writeBuffer
_ = positionAware
log := zerolog.Ctx(ctx)
_ = log
ser := func() error {
if pushErr := writeBuffer.PushContext("S7ParameterModeTransition"); pushErr != nil {
return errors.Wrap(pushErr, "Error pushing for S7ParameterModeTransition")
}
// Reserved Field (reserved)
{
var reserved uint16 = uint16(0x0010)
if m.reservedField0 != nil {
log.Info().Fields(map[string]any{
"expected value": uint16(0x0010),
"got value": reserved,
}).Msg("Overriding reserved field with unexpected value.")
reserved = *m.reservedField0
}
_err := writeBuffer.WriteUint16("reserved", 16, reserved)
if _err != nil {
return errors.Wrap(_err, "Error serializing 'reserved' field")
}
}
// Implicit Field (itemLength) (Used for parsing, but it's value is not stored as it's implicitly given by the objects content)
itemLength := uint8(uint8(uint8(m.GetLengthInBytes(ctx))) - uint8(uint8(2)))
_itemLengthErr := writeBuffer.WriteUint8("itemLength", 8, (itemLength))
if _itemLengthErr != nil {
return errors.Wrap(_itemLengthErr, "Error serializing 'itemLength' field")
}
// Simple Field (method)
method := uint8(m.GetMethod())
_methodErr := writeBuffer.WriteUint8("method", 8, (method))
if _methodErr != nil {
return errors.Wrap(_methodErr, "Error serializing 'method' field")
}
// Simple Field (cpuFunctionType)
cpuFunctionType := uint8(m.GetCpuFunctionType())
_cpuFunctionTypeErr := writeBuffer.WriteUint8("cpuFunctionType", 4, (cpuFunctionType))
if _cpuFunctionTypeErr != nil {
return errors.Wrap(_cpuFunctionTypeErr, "Error serializing 'cpuFunctionType' field")
}
// Simple Field (cpuFunctionGroup)
cpuFunctionGroup := uint8(m.GetCpuFunctionGroup())
_cpuFunctionGroupErr := writeBuffer.WriteUint8("cpuFunctionGroup", 4, (cpuFunctionGroup))
if _cpuFunctionGroupErr != nil {
return errors.Wrap(_cpuFunctionGroupErr, "Error serializing 'cpuFunctionGroup' field")
}
// Simple Field (currentMode)
currentMode := uint8(m.GetCurrentMode())
_currentModeErr := writeBuffer.WriteUint8("currentMode", 8, (currentMode))
if _currentModeErr != nil {
return errors.Wrap(_currentModeErr, "Error serializing 'currentMode' field")
}
// Simple Field (sequenceNumber)
sequenceNumber := uint8(m.GetSequenceNumber())
_sequenceNumberErr := writeBuffer.WriteUint8("sequenceNumber", 8, (sequenceNumber))
if _sequenceNumberErr != nil {
return errors.Wrap(_sequenceNumberErr, "Error serializing 'sequenceNumber' field")
}
if popErr := writeBuffer.PopContext("S7ParameterModeTransition"); popErr != nil {
return errors.Wrap(popErr, "Error popping for S7ParameterModeTransition")
}
return nil
}
return m.SerializeParent(ctx, writeBuffer, m, ser)
}
func (m *_S7ParameterModeTransition) isS7ParameterModeTransition() bool {
return true
}
func (m *_S7ParameterModeTransition) String() string {
if m == nil {
return "<nil>"
}
writeBuffer := utils.NewWriteBufferBoxBasedWithOptions(true, true)
if err := writeBuffer.WriteSerializable(context.Background(), m); err != nil {
return err.Error()
}
return writeBuffer.GetBox().String()
}
|
[
6
] |
package services
import ("../resources"; "../model";
)
func CheckShootedCell(board *model.Board, shootingBoard *model.Board) (bool, int) {
cell := board.GetCell(shootingBoard.CurrentY, shootingBoard.CurrentX)
stayAtCurrentPlayer := true
switch cell {
case resources.NoShipCell:
shootingBoard.Board[shootingBoard.CurrentY][shootingBoard.CurrentX].BoardCell = resources.ShootedNoShipCell
stayAtCurrentPlayer = false
case resources.ShipCell:
shootingBoard.Board[shootingBoard.CurrentY][shootingBoard.CurrentX].BoardCell = resources.ShootedShipCell
stayAtCurrentPlayer = true
default:
stayAtCurrentPlayer = true
}
return stayAtCurrentPlayer, board.Board[shootingBoard.CurrentY][shootingBoard.CurrentX].ShipCell
}
|
[
6
] |
package game
import (
"fmt"
"github.com/damargulis/game/interfaces"
"github.com/damargulis/game/player"
)
type Reversi struct {
board [8][8]string
p1 game.Player
p2 game.Player
pTurn bool
round int
}
type ReversiMove struct {
row, col int
}
func (g Reversi) GetBoardDimensions() (int, int) {
return len(g.board), len(g.board[0])
}
func (g Reversi) BoardString() string {
s := "-----------------\n"
s += " 0 1 2 3 4 5 6 7\n"
for i, row := range g.board {
s += fmt.Sprintf("%v ", i)
for _, p := range row {
s += p
s += " "
}
s += "\n"
}
s += " 0 1 2 3 4 5 6 7\n"
s += "-----------------"
return s
}
func (g Reversi) GetPlayerTurn() game.Player {
if g.pTurn {
return g.p1
} else {
return g.p2
}
}
func (g Reversi) GetHumanInput() game.Move {
spot := readInts("Spot to place: ")
return ReversiMove{row: spot[0], col: spot[1]}
}
func (g Reversi) checkMove(i, j, rowDir, colDir int) bool {
var target, match string
if g.pTurn {
target, match = "O", "X"
} else {
target, match = "X", "O"
}
if isInside(g, i+rowDir, j+colDir) && g.board[i+rowDir][j+colDir] == target {
rowCheck := i + rowDir
colCheck := j + colDir
for isInside(g, rowCheck, colCheck) && g.board[rowCheck][colCheck] == target {
rowCheck += rowDir
colCheck += colDir
}
if isInside(g, rowCheck, colCheck) && g.board[rowCheck][colCheck] == match {
return true
}
}
return false
}
func (g Reversi) GetPossibleMoves() []game.Move {
var moves []game.Move
for i, row := range g.board {
for j, spot := range row {
if spot == "." {
if g.checkMove(i, j, 0, 1) ||
g.checkMove(i, j, 1, 1) ||
g.checkMove(i, j, 1, 0) ||
g.checkMove(i, j, 1, 0) ||
g.checkMove(i, j, 1, -1) ||
g.checkMove(i, j, 0, -1) ||
g.checkMove(i, j, -1, -1) ||
g.checkMove(i, j, -1, 0) ||
g.checkMove(i, j, -1, 1) {
moves = append(moves, ReversiMove{
row: i,
col: j,
})
}
}
}
}
return moves
}
func (g Reversi) checkAndFill(i, j, rowDir, colDir int) Reversi {
var target, match string
if g.pTurn {
target, match = "O", "X"
} else {
target, match = "X", "O"
}
if isInside(g, i+rowDir, j+colDir) && g.board[i+rowDir][j+colDir] == target {
rowCheck := i + rowDir
colCheck := j + colDir
for isInside(g, rowCheck, colCheck) && g.board[rowCheck][colCheck] == target {
rowCheck += rowDir
colCheck += colDir
}
if isInside(g, rowCheck, colCheck) && g.board[rowCheck][colCheck] == match {
for r, c := i, j; r != rowCheck || c != colCheck; r, c = r+rowDir, c+colDir {
g.board[r][c] = match
}
}
}
return g
}
func (g Reversi) MakeMove(m game.Move) game.Game {
g.round++
move := m.(ReversiMove)
var match string
if g.pTurn {
match = "X"
} else {
match = "O"
}
g.board[move.row][move.col] = match
g = g.checkAndFill(move.row, move.col, 0, 1)
g = g.checkAndFill(move.row, move.col, 1, 1)
g = g.checkAndFill(move.row, move.col, 1, 0)
g = g.checkAndFill(move.row, move.col, 1, -1)
g = g.checkAndFill(move.row, move.col, 0, -1)
g = g.checkAndFill(move.row, move.col, -1, -1)
g = g.checkAndFill(move.row, move.col, -1, 0)
g = g.checkAndFill(move.row, move.col, -1, 1)
g.pTurn = !g.pTurn
possibleMoves := g.GetPossibleMoves()
if len(possibleMoves) == 0 {
g.pTurn = !g.pTurn
}
return g
}
func (g Reversi) GameOver() (bool, game.Player) {
possibleMoves := g.GetPossibleMoves()
if len(possibleMoves) == 0 {
score := g.CurrentScore(g.p1)
if score > 0 {
return true, g.p1
} else if score < 0 {
return true, g.p2
} else {
return true, player.HumanPlayer{"DRAW"}
}
} else {
return false, player.ComputerPlayer{}
}
}
func (g Reversi) CurrentScore(p game.Player) int {
score := 0
for _, row := range g.board {
for _, spot := range row {
if spot == "X" {
score++
} else if spot == "O" {
score--
}
}
}
if p == g.p1 {
return score
} else {
return -1 * score
}
}
func NewReversi(p1 string, p2 string, depth1 int, depth2 int) *Reversi {
r := new(Reversi)
r.round = 0
r.p1 = getPlayer(p1, "Player 1", depth1)
r.p2 = getPlayer(p2, "Player 2", depth2)
r.pTurn = true
r.board = [8][8]string{
{".", ".", ".", ".", ".", ".", ".", "."},
{".", ".", ".", ".", ".", ".", ".", "."},
{".", ".", ".", ".", ".", ".", ".", "."},
{".", ".", ".", "X", "O", ".", ".", "."},
{".", ".", ".", "O", "X", ".", ".", "."},
{".", ".", ".", ".", ".", ".", ".", "."},
{".", ".", ".", ".", ".", ".", ".", "."},
{".", ".", ".", ".", ".", ".", ".", "."},
}
return r
}
func (g Reversi) GetRound() int {
return g.round
}
|
[
5,
6
] |
package algorithm
import "fmt"
type BallotNumber struct {
proposalId uint64
nodeId uint64
}
func NewBallotNumber(proposalId uint64, nodeId uint64) *BallotNumber {
return &BallotNumber{
proposalId: proposalId,
nodeId: nodeId,
}
}
func (self *BallotNumber) String() string {
return fmt.Sprintf("%d:%d", self.proposalId, self.nodeId)
}
func (self *BallotNumber) BE(other *BallotNumber) bool {
if self.proposalId == other.proposalId {
return self.nodeId >= other.nodeId
}
return self.proposalId >= other.proposalId
}
func (self *BallotNumber) NE(other *BallotNumber) bool {
return self.proposalId != other.proposalId ||
self.nodeId != other.nodeId
}
func (self *BallotNumber) EQ(other *BallotNumber) bool {
return !self.NE(other)
}
func (self *BallotNumber) BT(other *BallotNumber) bool {
if self.proposalId == other.proposalId {
return self.nodeId > other.nodeId
}
return self.proposalId > other.proposalId
}
func (self *BallotNumber) IsNull() bool {
return self.proposalId == 0
}
func (self *BallotNumber) Clone(bn *BallotNumber) {
self.nodeId = bn.nodeId
self.proposalId = bn.proposalId
}
func (self *BallotNumber) Reset() {
self.nodeId = 0
self.proposalId = 0
}
|
[
6
] |
package maxNumberOfBalloons
func maxNumberOfBalloons(text string) int {
// balloon
b, a, l, o, n := 0, 0, 0, 0, 0
ret := 10000
for _, v := range text {
if v == 'b' {
b++
} else if v == 'a' {
a++
} else if v == 'l' {
l++
} else if v == 'o' {
o++
} else if v == 'n' {
n++
}
}
l, o = l/2, o/2
ret = min(ret, min(b, min(a, min(l, min(o, n)))))
return ret
}
func min(x, y int) int {
if x < y {
return x
}
return y
}
|
[
5
] |
// Package chapter07 contains
// implementations of the algorithms introduced in Chapter 7.
package chapter07
import "fmt"
// FaStringMatcher uses finite automaton to find occurrences of
// a pattern string in T. This function requires a complete
// nextState from ComputeNextStates function.
func FaStringMatcher(
T string,
nextState []map[string]int,
m int,
n int,
) {
// Starting from the empty substring of T
state := 0
for i := 1; i <= n; i++ {
// From the current state, what's the next state
// given the new character T[i-1]?
state = nextState[state][string(T[i-1])]
if state == m {
fmt.Printf("The pattern occurs with shift %d.\n", i-m)
}
}
}
|
[
2,
6
] |
package plugin
import (
"errors"
"reflect"
"strings"
"github.com/appscode/searchlight/pkg/controller/host"
)
func GetKubeObjectInfo(hostname string) (objectType string, objectName string, namespace string, err error) {
parts := strings.Split(hostname, "@")
if len(parts) != 2 {
err = errors.New("Invalid icinga host.name")
return
}
name := parts[0]
namespace = parts[1]
objectType = ""
objectName = ""
if name != host.CheckCommandPodExists && name != host.CheckCommandPodStatus {
parts = strings.Split(name, "|")
if len(parts) == 1 {
objectType = host.TypePods
objectName = parts[0]
} else if len(parts) == 2 {
objectType = parts[0]
objectName = parts[1]
} else {
err = errors.New("Invalid icinga host.name")
return
}
}
return
}
func FillStruct(data map[string]interface{}, result interface{}) {
t := reflect.ValueOf(result).Elem()
for k, v := range data {
val := t.FieldByName(k)
val.Set(reflect.ValueOf(v))
}
}
|
[
5,
6
] |
package main
import (
"fmt"
"sync"
"time"
)
// 操作失败信息
const (
LoopFail = "operation doesn't exist"
AddFail = "user has already existed"
DesFail = "user doesn't exist"
WithdrawFail = "user doesn't exist or value is bigger than balance"
QueryFail = "user doesn't exist"
)
// 操作失败信息映射
var FailMes = map[string]string{
"Loop": LoopFail,
"AddUser": AddFail,
"Desposit": DesFail,
"Withdraw": WithdrawFail,
"Query": QueryFail,
}
// Bank 银行
type Bank struct {
saving map[string]int
}
// Request 银行存取操作
type Request struct {
op string
name string
value int
retCh chan *Result
}
// Result 执行结果
type Result struct {
status bool
balance int
}
// NewBank 新建银行
func NewBank() *Bank {
b := &Bank{
saving: make(map[string]int),
}
return b
}
// Loop 银行处理客户端请求
func (b *Bank) Loop(reqCh chan *Request) {
for req := range reqCh {
switch req.op {
case "Desposit":
b.Desposit(req)
case "Withdraw":
b.Withdraw(req)
case "Query":
b.Query(req)
case "AddUser":
b.AddUser(req)
default:
ret := &Result{
status: false,
balance: 0,
}
req.retCh <- ret
}
}
// 无请求时银行退出
fmt.Println("Bank exit")
}
// AddUser 新增用户操作
func (b *Bank) AddUser(req *Request) {
name := req.name
var status bool
if _, ok := b.saving[name]; !ok {
status = true
b.saving[name] = 0
}
ret := &Result{
status: status,
balance: 0,
}
req.retCh <- ret
}
// Desposit 存款操作
func (b *Bank) Desposit(req *Request) {
name := req.name
value := req.value
var (
ok bool
balance int
)
if _, ok = b.saving[name]; ok {
b.saving[name] += value
balance = b.saving[name]
}
ret := &Result{
status: ok,
balance: balance,
}
req.retCh <- ret
}
// Withdraw 取款操作
func (b *Bank) Withdraw(req *Request) {
name := req.name
value := req.value
var (
status bool
balance int
)
if balance, ok := b.saving[name]; ok && balance >= value {
status = true
b.saving[name] -= value
balance = b.saving[name]
}
ret := &Result{
status: status,
balance: balance,
}
req.retCh <- ret
}
// Query 查询余额操作
func (b *Bank) Query(req *Request) {
name := req.name
balance, ok := b.saving[name]
ret := &Result{
status: ok,
balance: balance,
}
req.retCh <- ret
}
// xiaoming 客户小明的操作
func xiaoming(wg *sync.WaitGroup, reqCh chan<- *Request) {
name := "xiaoming"
retCh := make(chan *Result)
defer func() {
close(retCh)
wg.Done()
}()
addReq := &Request{
op: "AddUser",
name: name,
retCh: retCh,
}
depReq := &Request{
op: "Desposit",
name: name,
value: 100,
retCh: retCh,
}
withdrawReq := &Request{
op: "Withdraw",
name: name,
value: 110,
retCh: retCh,
}
queryReq := &Request{
op: "Query",
name: name,
retCh: retCh,
}
reqs := []*Request{addReq, depReq, withdrawReq, queryReq}
for _, req := range reqs {
reqCh <- req
waitResp(req)
}
}
// xiaogang 客户小刚的操作
func xiaogang(wg *sync.WaitGroup, reqCh chan<- *Request) {
name := "xiaogang"
retCh := make(chan *Result)
defer func() {
close(retCh)
wg.Done()
}()
addReq := &Request{
op: "AddUser",
name: name,
retCh: retCh,
}
depReq := &Request{
op: "Desposit",
name: name,
value: 200,
retCh: retCh,
}
withdrawReq := &Request{
op: "Withdraw",
name: name,
value: 70,
retCh: retCh,
}
queryReq := &Request{
op: "Query",
name: name,
retCh: retCh,
}
reqs := []*Request{addReq, depReq, withdrawReq, queryReq}
for _, req := range reqs {
reqCh <- req
waitResp(req)
}
}
// waitResp 等待请求响应req,输出信息
func waitResp(req *Request) {
ret := <-req.retCh
if ret.status {
if req.op == "Desposit" || req.op == "Withdraw" {
fmt.Printf("%s %s %d success, balance = %d.\n", req.name, req.op, req.value, ret.balance)
} else {
fmt.Printf("%s %s success, balance = %d.\n", req.name, req.op, ret.balance)
}
} else {
if req.op == "Desposit" || req.op == "Withdraw" {
fmt.Printf("%s %s %d fail, message = %s.\n", req.name, req.op, req.value, FailMes[req.op])
} else if req.op == "AddUser" || req.op == "Query" {
fmt.Printf("%s %s fail, message = %s.\n", req.name, req.op, FailMes[req.op])
} else {
fmt.Printf("%s %s fail, message = %s.\n", req.name, req.op, LoopFail)
}
}
}
func main() {
// 创建请求的通道和银行
reqCh := make(chan *Request, 100)
bank := NewBank()
// 银行处理请求
go bank.Loop(reqCh)
// 小明和小刚2个协程同时存取钱
var wg sync.WaitGroup
wg.Add(2)
go xiaoming(&wg, reqCh)
go xiaogang(&wg, reqCh)
// 等待小明和小刚完成
wg.Wait()
close(reqCh)
// 等待看银行是否退出
time.Sleep(time.Second)
}
|
[
5
] |
package main
import (
"fmt"
"math/rand"
"net/http"
"os"
"time"
"github.com/newrelic/go-agent/v3/newrelic"
)
const (
fcRequestIDHeader = "x-fc-request-id"
fcControlPath = "x-fc-control-path"
fcLogInvokeStartPrefix = "FC Invoke Start RequestId: %s"
fcLogInvokeEndPrefix = "FC Invoke End RequestId: %s"
fcLogInitializeStartPrefix = "FC Initialize Start RequestId: %s"
fcLogInitializeEndPrefix = "FC Initialize End RequestId: %s"
fcLogPreFreezeStartPrefix = "FC PreFreeze Start RequestId: %s"
fcLogPreFreezeEndPrefix = "FC PreFreeze End RequestId: %s"
fcLogPreStopStartPrefix = "FC PreStop Start RequestId: %s"
fcLogPreStopEndPrefix = "FC PreStop End RequestId: %s"
)
// init 与容器生命周期绑定
// 我们用 custom runtime 嘛,不用 custom container 嘛,还要打镜像
func main() {
fmt.Println("FunctionCompute golang runtime inited.")
http.HandleFunc("/", handle)
port := os.Getenv("FC_SERVER_PORT")
if port == "" {
port = "9000"
}
http.ListenAndServe(":"+port, nil)
}
func handle(w http.ResponseWriter, req *http.Request) {
controlPath := req.Header.Get(fcControlPath)
fmt.Println("controlPath", controlPath)
if controlPath == "/initialize" {
initializeHandler(w, req)
} else if controlPath == "/pre-freeze" {
preFreezeHandler(w, req)
} else if controlPath == "/pre-stop" {
preStopHandler(w, req)
} else {
invokeHandler(w, req)
}
}
var app *newrelic.Application
func initializeHandler(w http.ResponseWriter, req *http.Request) {
requestID := req.Header.Get(fcRequestIDHeader)
fmt.Println(fmt.Sprintf(fcLogInitializeStartPrefix, requestID))
defer func() {
fmt.Println(fmt.Sprintf(fcLogInitializeEndPrefix, requestID))
}()
if application, err := newrelic.NewApplication(
newrelic.ConfigAppName("newrelic-demo"),
newrelic.ConfigLicense("97dc68c3ea1926da0d6988fbce83e9652193NRAL"),
newrelic.ConfigDistributedTracerEnabled(true)); err != nil {
fmt.Println("An error occured when new application")
panic(err)
app = application
}
w.Write([]byte(""))
}
func preFreezeHandler(w http.ResponseWriter, req *http.Request) {
requestID := req.Header.Get(fcRequestIDHeader)
fmt.Println(fmt.Sprintf(fcLogPreFreezeStartPrefix, requestID))
defer func() {
fmt.Println(fmt.Sprintf(fcLogPreFreezeEndPrefix, requestID))
}()
time.Sleep(2 * time.Second)
w.Write([]byte(""))
}
func preStopHandler(w http.ResponseWriter, req *http.Request) {
requestID := req.Header.Get(fcRequestIDHeader)
fmt.Println(fmt.Sprintf(fcLogPreStopStartPrefix, requestID))
defer func() {
fmt.Println(fmt.Sprintf(fcLogPreStopEndPrefix, requestID))
}()
w.Write([]byte(""))
}
func invokeHandler(w http.ResponseWriter, req *http.Request) {
txn := app.StartTransaction("invoke")
defer txn.End()
requestID := req.Header.Get(fcRequestIDHeader)
fmt.Println(fmt.Sprintf(fcLogInvokeStartPrefix, requestID))
defer func() {
fmt.Println(fmt.Sprintf(fcLogInvokeEndPrefix, requestID))
}()
headerComponent := txn.StartSegment("header")
n := rand.Intn(20) // n will be between 0 and 10
fmt.Printf("Sleeping %d ms...\n", 20+n)
time.Sleep(time.Duration(20+n) * time.Millisecond)
w.WriteHeader(http.StatusOK)
headerComponent.End()
bodyComponent := txn.StartSegment("header")
n = rand.Intn(30) // n will be between 0 and 10
fmt.Printf("Sleeping %d ms...\n", 30+n)
time.Sleep(time.Duration(30+n) * time.Millisecond)
bodyComponent.End()
w.Write([]byte(fmt.Sprintf("Hello, golang http invoke!")))
}
|
[
5
] |
// +build off
package python
import (
"encoding/json"
"path/filepath"
"strings"
"sourcegraph.com/sourcegraph/srclib/graph"
)
func init() {
graph.RegisterMakeSymbolFormatter(DistPackageDisplayName, newSymbolFormatter)
}
func newSymbolFormatter(s *graph.Symbol) graph.SymbolFormatter {
var si symbolData
if len(s.Data) > 0 {
if err := json.Unmarshal(s.Data, &si); err != nil {
panic("unmarshal Python symbol data: " + err.Error())
}
}
return symbolFormatter{s, &si}
}
type symbolFormatter struct {
symbol *graph.Symbol
data *symbolData
}
func (f symbolFormatter) Language() string { return "Python" }
func (f symbolFormatter) DefKeyword() string {
if f.isFunc() {
return "def"
}
if f.data.Kind == "class" {
return "class"
}
if f.data.Kind == "module" {
return "module"
}
if f.data.Kind == "package" {
return "package"
}
return ""
}
func (f symbolFormatter) Kind() string { return f.data.Kind }
func dotted(slashed string) string { return strings.Replace(slashed, "/", ".", -1) }
func (f symbolFormatter) Name(qual graph.Qualification) string {
if qual == graph.Unqualified {
return f.symbol.Name
}
// Get the name of the containing package or module
var containerName string
if filename := filepath.Base(f.symbol.File); filename == "__init__.py" {
containerName = filepath.Base(filepath.Dir(f.symbol.File))
} else if strings.HasSuffix(filename, ".py") {
containerName = filename[:len(filename)-len(".py")]
} else if strings.HasSuffix(filename, ".c") {
// Special case for Standard Lib C extensions
return dotted(string(f.symbol.TreePath))
} else {
// Should never reach here, but fall back to TreePath if we do
return string(f.symbol.TreePath)
}
// Compute the path relative to the containing package or module
var treePathCmps = strings.Split(string(f.symbol.TreePath), "/")
// Note(kludge): The first occurrence of the container name in the treepath may not be the correct occurrence.
containerCmpIdx := -1
for t, component := range treePathCmps {
if component == containerName {
containerCmpIdx = t
break
}
}
var relTreePath string
if containerCmpIdx != -1 {
relTreePath = strings.Join(treePathCmps[containerCmpIdx+1:], "/")
if relTreePath == "" {
relTreePath = "."
}
} else {
// Should never reach here, but fall back to the unqualified name if we do
relTreePath = f.symbol.Name
}
switch qual {
case graph.ScopeQualified:
return dotted(relTreePath)
case graph.DepQualified:
return dotted(filepath.Join(containerName, relTreePath))
case graph.RepositoryWideQualified:
return dotted(string(f.symbol.TreePath))
case graph.LanguageWideQualified:
return string(f.symbol.Repo) + "/" + f.Name(graph.RepositoryWideQualified)
}
panic("Name: unhandled qual " + string(qual))
}
func (f symbolFormatter) isFunc() bool {
k := f.data.Kind
return k == "function" || k == "method" || k == "constructor"
}
func (f symbolFormatter) NameAndTypeSeparator() string {
if f.isFunc() {
return ""
}
return " "
}
func (f symbolFormatter) Type(qual graph.Qualification) string {
fullSig := f.data.FuncSignature
if strings.Contains(fullSig, ")") { // kludge to get rid of extra type info (very noisy)
return fullSig[:strings.Index(fullSig, ")")+1]
}
return fullSig
}
|
[
5
] |
package api
import (
"encoding/json"
"fmt"
log "github.com/sirupsen/logrus"
"github.com/wneessen/sotbot/httpclient"
"net/http"
)
type Factions struct {
AthenasFortune FactionReputation `json:"AthenasFortune"`
BilgeRats FactionReputation `json:"BilgeRats"`
GoldHoarders FactionReputation `json:"GoldHoarders"`
HuntersCall FactionReputation `json:"HuntersCall"`
MerchantAlliance FactionReputation `json:"MerchantAlliance"`
OrderOfSouls FactionReputation `json:"OrderOfSouls"`
ReapersBones FactionReputation `json:"ReapersBones"`
SeaDogs FactionReputation `json:"SeaDogs"`
}
type FactionReputation struct {
Name string
Motto string `json:"Motto"`
Level int `json:"Level"`
Rank string `json:"Rank"`
Xp int `json:"Xp"`
NextLevel FactionNextLevel `json:"NextCompanyLevel"`
}
type FactionNextLevel struct {
Level int `json:"Level"`
XpRequired int `json:"XpRequiredToAttain"`
}
func GetFactionReputation(hc *http.Client, rc string, f string) (FactionReputation, error) {
l := log.WithFields(log.Fields{
"action": "sotapi.GetFactionReputation",
})
apiUrl := "https://www.seaofthieves.com/api/profilev2/reputation"
l.Debugf("Fetching user reputation in %v faction from API...", f)
var userReps Factions
httpResp, err := httpclient.HttpReqGet(apiUrl, hc, &rc, nil, false)
if err != nil {
return FactionReputation{}, err
}
if err := json.Unmarshal(httpResp, &userReps); err != nil {
l.Errorf("Failed to unmarshal API response: %v", err)
return FactionReputation{}, err
}
switch f {
case "athena":
userReps.AthenasFortune.Name = "Athenas Fortune"
return userReps.AthenasFortune, nil
case "bilge":
userReps.BilgeRats.Name = "Bilge Rats"
return userReps.BilgeRats, nil
case "hoarder":
userReps.GoldHoarders.Name = "Gold Hoarders"
return userReps.GoldHoarders, nil
case "hunter":
userReps.HuntersCall.Name = "Hunter's Call"
return userReps.HuntersCall, nil
case "merchant":
userReps.MerchantAlliance.Name = "Merchant Alliance"
return userReps.MerchantAlliance, nil
case "order":
userReps.OrderOfSouls.Name = "Order of Souls"
return userReps.OrderOfSouls, nil
case "reaper":
userReps.ReapersBones.Name = "Reaper's Bones"
return userReps.ReapersBones, nil
case "seadog":
userReps.SeaDogs.Name = "Sea Dogs"
return userReps.SeaDogs, nil
default:
l.Errorf("Wrong faction name provided")
return FactionReputation{}, fmt.Errorf("Unknown faction")
}
}
|
[
6
] |
package worker
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"time"
"github.com/razorpay/metro/internal/subscriber"
"github.com/razorpay/metro/internal/subscription"
"github.com/razorpay/metro/pkg/logger"
metrov1 "github.com/razorpay/metro/rpc/proto/v1"
"golang.org/x/sync/errgroup"
)
// PushStream provides reads from broker and publishes messages for the push subscription
type PushStream struct {
ctx context.Context
cancelFunc func()
nodeID string
subscription *subscription.Model
subscriptionCore subscription.ICore
subscriberCore subscriber.ICore
subs subscriber.ISubscriber
httpClient *http.Client
doneCh chan struct{}
}
// Start reads the messages from the broker and publish them to the subscription endpoint
func (ps *PushStream) Start() error {
defer close(ps.doneCh)
var (
err error
// init these channels and pass to subscriber
// the lifecycle of these channels should be maintained by the user
subscriberRequestCh = make(chan *subscriber.PullRequest)
subscriberAckCh = make(chan *subscriber.AckMessage)
subscriberModAckCh = make(chan *subscriber.ModAckMessage)
)
ps.subs, err = ps.subscriberCore.NewSubscriber(ps.ctx, ps.nodeID, ps.subscription, 100, 50, 0,
subscriberRequestCh, subscriberAckCh, subscriberModAckCh)
if err != nil {
logger.Ctx(ps.ctx).Errorw("worker: error creating subscriber", "subscription", ps.subscription.Name, "error", err.Error())
return err
}
errGrp, gctx := errgroup.WithContext(ps.ctx)
errGrp.Go(func() error {
// Read from broker and publish to response channel in a go routine
for {
select {
case <-gctx.Done():
logger.Ctx(ps.ctx).Infow("worker: subscriber request and response stopped", "subscription", ps.subscription.Name, "subscriberId", ps.subs.GetID())
// close all subscriber channels
close(subscriberRequestCh)
close(subscriberAckCh)
close(subscriberModAckCh)
// stop the subscriber after all the send channels are closed
ps.stopSubscriber()
return gctx.Err()
case err = <-ps.subs.GetErrorChannel():
// if channel is closed, this can return with a nil error value
if err != nil {
logger.Ctx(ps.ctx).Errorw("worker: error from subscriber", "subscription", ps.subscription.Name, "subscriberId", ps.subs.GetID(), "err", err.Error())
workerSubscriberErrors.WithLabelValues(env, ps.subscription.ExtractedTopicName, ps.subscription.Name, err.Error(), ps.subs.GetID()).Inc()
}
default:
logger.Ctx(ps.ctx).Debugw("worker: sending a subscriber pull request", "subscription", ps.subscription.Name, "subscriberId", ps.subs.GetID())
ps.subs.GetRequestChannel() <- &subscriber.PullRequest{MaxNumOfMessages: 10}
logger.Ctx(ps.ctx).Debugw("worker: waiting for subscriber data response", "subscription", ps.subscription.Name, "subscriberId", ps.subs.GetID())
data := <-ps.subs.GetResponseChannel()
if data != nil && data.ReceivedMessages != nil && len(data.ReceivedMessages) > 0 {
logger.Ctx(ps.ctx).Infow("worker: received response data from channel", "subscription", ps.subscription.Name, "subscriberId", ps.subs.GetID())
ps.processPushStreamResponse(ps.ctx, ps.subscription, data)
}
}
}
})
return errGrp.Wait()
}
// Stop is used to terminate the push subscription processing
func (ps *PushStream) Stop() error {
logger.Ctx(ps.ctx).Infow("worker: push stream stop invoked", "subscription", ps.subscription.Name)
// signal to stop all go routines
ps.cancelFunc()
// wait for stop to complete
<-ps.doneCh
return nil
}
func (ps *PushStream) stopSubscriber() {
// stop the subscriber
if ps.subs != nil {
logger.Ctx(ps.ctx).Infow("worker: stopping subscriber", "subscription", ps.subscription.Name, "subcriber_id", ps.subs.GetID())
ps.subs.Stop()
}
}
func (ps *PushStream) processPushStreamResponse(ctx context.Context, subModel *subscription.Model, data *metrov1.PullResponse) {
logger.Ctx(ctx).Infow("worker: response", "data", data, "subscription", ps.subscription.Name, "subscriberId", ps.subs.GetID())
for _, message := range data.ReceivedMessages {
logger.Ctx(ps.ctx).Infow("worker: publishing response data to subscription endpoint", "subscription", ps.subscription.Name, "subscriberId", ps.subs.GetID())
if message.AckId == "" {
continue
}
startTime := time.Now()
pushRequest := newPushEndpointRequest(message, subModel.Key())
postBody, _ := json.Marshal(pushRequest)
postData := bytes.NewBuffer(postBody)
req, err := http.NewRequest("POST", subModel.PushEndpoint, postData)
if subModel.HasCredentials() {
req.SetBasicAuth(subModel.GetCredentials().GetUsername(), subModel.GetCredentials().GetPassword())
}
logger.Ctx(ps.ctx).Infow("worker: posting messages to subscription url", "subscription", ps.subscription.Name, "subscriberId", ps.subs.GetID(), "endpoint", subModel.PushEndpoint)
resp, err := ps.httpClient.Do(req)
workerPushEndpointCallsCount.WithLabelValues(env, subModel.ExtractedTopicName, subModel.ExtractedSubscriptionName, subModel.PushEndpoint, ps.subs.GetID()).Inc()
workerPushEndpointTimeTaken.WithLabelValues(env, subModel.ExtractedTopicName, subModel.ExtractedSubscriptionName, subModel.PushEndpoint).Observe(time.Now().Sub(startTime).Seconds())
if err != nil {
logger.Ctx(ps.ctx).Errorw("worker: error posting messages to subscription url", "subscription", ps.subscription.Name, "subscriberId", ps.subs.GetID(), "error", err.Error())
ps.nack(ctx, message)
return
}
logger.Ctx(ps.ctx).Infow("worker: push response received for subscription", "status", resp.StatusCode, "subscription", ps.subscription.Name, "subscriberId", ps.subs.GetID())
workerPushEndpointHTTPStatusCode.WithLabelValues(env, subModel.ExtractedTopicName, subModel.ExtractedSubscriptionName, subModel.PushEndpoint, fmt.Sprintf("%v", resp.StatusCode)).Inc()
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
// Ack
ps.ack(ctx, message)
workerMessagesAckd.WithLabelValues(env, subModel.ExtractedTopicName, subModel.ExtractedSubscriptionName, subModel.PushEndpoint, ps.subs.GetID()).Inc()
} else {
// Nack
ps.nack(ctx, message)
workerMessagesNAckd.WithLabelValues(env, subModel.ExtractedTopicName, subModel.ExtractedSubscriptionName, subModel.PushEndpoint, ps.subs.GetID()).Inc()
}
// discard response.Body after usage and ignore errors
_, err = io.Copy(ioutil.Discard, resp.Body)
err = resp.Body.Close()
if err != nil {
logger.Ctx(ps.ctx).Errorw("worker: push response error on response io close()", "status", resp.StatusCode, "subscription", ps.subscription.Name, "subscriberId", ps.subs.GetID(), "error", err.Error())
}
// TODO: read response body if required by publisher later
}
}
func (ps *PushStream) nack(ctx context.Context, message *metrov1.ReceivedMessage) {
logger.Ctx(ps.ctx).Infow("worker: sending nack request to subscriber", "ackId", message.AckId, "subscription", ps.subscription.Name, "subscriberId", ps.subs.GetID())
ackReq := subscriber.ParseAckID(message.AckId)
// deadline is set to 0 for nack
modackReq := subscriber.NewModAckMessage(ackReq, 0)
// check for closed channel before sending request
if ps.subs.GetModAckChannel() != nil {
ps.subs.GetModAckChannel() <- modackReq
}
}
func (ps *PushStream) ack(ctx context.Context, message *metrov1.ReceivedMessage) {
logger.Ctx(ps.ctx).Infow("worker: sending ack request to subscriber", "ackId", message.AckId, "subscription", ps.subscription.Name, "subscriberId", ps.subs.GetID())
ackReq := subscriber.ParseAckID(message.AckId)
// check for closed channel before sending request
if ps.subs.GetAckChannel() != nil {
ps.subs.GetAckChannel() <- ackReq
}
}
// NewPushStream return a push stream obj which is used for push subscriptions
func NewPushStream(ctx context.Context, nodeID string, subName string, subscriptionCore subscription.ICore, subscriberCore subscriber.ICore, config *HTTPClientConfig) *PushStream {
pushCtx, cancelFunc := context.WithCancel(ctx)
// get subscription Model details
subModel, err := subscriptionCore.Get(pushCtx, subName)
if err != nil {
logger.Ctx(pushCtx).Errorf("error fetching subscription: %s", err.Error())
return nil
}
// set http connection timeout from the subscription
if subModel.AckDeadlineSec != 0 {
// make sure to convert sec to milli-sec
config.ConnectTimeoutMS = int(subModel.AckDeadlineSec) * 1e3
}
httpclient := NewHTTPClientWithConfig(config)
return &PushStream{
ctx: pushCtx,
cancelFunc: cancelFunc,
nodeID: nodeID,
subscription: subModel,
subscriptionCore: subscriptionCore,
subscriberCore: subscriberCore,
doneCh: make(chan struct{}),
httpClient: httpclient,
}
}
// NewHTTPClientWithConfig return a http client
func NewHTTPClientWithConfig(config *HTTPClientConfig) *http.Client {
tr := &http.Transport{
ResponseHeaderTimeout: time.Duration(config.ResponseHeaderTimeoutMS) * time.Millisecond,
DialContext: (&net.Dialer{
KeepAlive: time.Duration(config.ConnKeepAliveMS) * time.Millisecond,
Timeout: time.Duration(config.ConnectTimeoutMS) * time.Millisecond,
}).DialContext,
MaxIdleConns: config.MaxAllIdleConns,
IdleConnTimeout: time.Duration(config.IdleConnTimeoutMS) * time.Millisecond,
TLSHandshakeTimeout: time.Duration(config.TLSHandshakeTimeoutMS) * time.Millisecond,
MaxIdleConnsPerHost: config.MaxHostIdleConns,
ExpectContinueTimeout: time.Duration(config.ExpectContinueTimeoutMS) * time.Millisecond,
}
return &http.Client{Transport: tr}
}
func newPushEndpointRequest(message *metrov1.ReceivedMessage, subscription string) *metrov1.PushEndpointRequest {
return &metrov1.PushEndpointRequest{
Message: message.Message,
Subscription: subscription,
}
}
|
[
6
] |
package main
import (
"bufio"
"fmt"
"io"
"math"
"os"
"strconv"
"strings"
)
// Complete the surfaceArea function below.
func surfaceArea(A [][]int32) int32 {
var prev int32 = 0
var total int32 = 0
var sideTotal int32 = 0
var btmTotal int32 = 0
side := make([]int32, len(A[0]))
for i := range A {
for j := range A[i] {
if A[i][j] != 0 {
btmTotal += 1
}
if j == len(A[i])-1 {
total = total + int32(math.Abs(float64(A[i][j])-float64(prev))) + 1 + A[i][j]
} else if j == 0 {
total = total + A[i][j] + 1
prev = A[i][j]
} else {
total = total + int32(math.Abs(float64(A[i][j])-float64(prev))) + 1
prev = A[i][j]
}
if i == len(A)-1 {
sideTotal = sideTotal + int32(math.Abs(float64(A[i][j])-float64(side[j]))) + A[i][j]
} else {
sideTotal = sideTotal + int32(math.Abs(float64(A[i][j])-float64(side[j])))
}
}
side = A[i]
}
return total + sideTotal + btmTotal
}
func main() {
reader := bufio.NewReaderSize(os.Stdin, 1024*1024)
HW := strings.Split(readLine(reader), " ")
HTemp, err := strconv.ParseInt(HW[0], 10, 64)
checkError(err)
H := int32(HTemp)
WTemp, err := strconv.ParseInt(HW[1], 10, 64)
checkError(err)
W := int32(WTemp)
var A [][]int32
for i := 0; i < int(H); i++ {
ARowTemp := strings.Split(readLine(reader), " ")
var ARow []int32
for _, ARowItem := range ARowTemp {
AItemTemp, err := strconv.ParseInt(ARowItem, 10, 64)
checkError(err)
AItem := int32(AItemTemp)
ARow = append(ARow, AItem)
}
if len(ARow) != int(int(W)) {
panic("Bad input")
}
A = append(A, ARow)
}
result := surfaceArea(A)
fmt.Println(result)
}
func readLine(reader *bufio.Reader) string {
str, _, err := reader.ReadLine()
if err == io.EOF {
return ""
}
return strings.TrimRight(string(str), "\r\n")
}
func checkError(err error) {
if err != nil {
panic(err)
}
}
|
[
0,
2,
5
] |
// Copyright (c) 2018 Cisco and/or its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nsplugin
import (
"os"
"syscall"
"github.com/vishvananda/netlink"
"github.com/vishvananda/netns"
)
// SystemAPI defines all methods required for managing operating system, system calls and namespaces on system level
type SystemAPI interface {
OperatingSystem
Syscall
NetNsNamespace
NetlinkNamespace
}
// OperatingSystem defines all methods calling os package
type OperatingSystem interface {
// Open file
OpenFile(name string, flag int, perm os.FileMode) (*os.File, error)
// MkDirAll creates a directory with all parent directories
MkDirAll(path string, perm os.FileMode) error
// Remove removes named file or directory
Remove(name string) error
}
// Syscall defines methods using low-level operating system primitives
type Syscall interface {
// Mount makes resources available
Mount(source string, target string, fsType string, flags uintptr, data string) error
// Unmount resources
Unmount(target string, flags int) (err error)
}
// NetNsNamespace defines method for namespace handling from netns package
type NetNsNamespace interface {
// NewNetworkNamespace crates new namespace and returns handle to manage it further
NewNetworkNamespace() (ns netns.NsHandle, err error)
// GetNamespaceFromName returns namespace handle from its name
GetNamespaceFromName(name string) (ns netns.NsHandle, err error)
// SetNamespace sets the current namespace to the namespace represented by the handle
SetNamespace(ns netns.NsHandle) (err error)
}
// NetlinkNamespace defines method for namespace handling from netlink package
type NetlinkNamespace interface {
// LinkSetNsFd puts the device into a new network namespace.
LinkSetNsFd(link netlink.Link, fd int) (err error)
}
// SystemHandler implements interfaces.
type SystemHandler struct{}
// NewSystemHandler returns new handler.
func NewSystemHandler() *SystemHandler {
return &SystemHandler{}
}
/* Operating system */
// OpenFile implements OperatingSystem.
func (osh *SystemHandler) OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) {
return os.OpenFile(name, flag, perm)
}
// MkDirAll implements OperatingSystem.
func (osh *SystemHandler) MkDirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
// Remove implements OperatingSystem.
func (osh *SystemHandler) Remove(name string) error {
return os.Remove(name)
}
/* Syscall */
// Mount implements Syscall.
func (osh *SystemHandler) Mount(source string, target string, fsType string, flags uintptr, data string) error {
return syscall.Mount(source, target, fsType, flags, data)
}
// Unmount implements Syscall.
func (osh *SystemHandler) Unmount(target string, flags int) error {
return syscall.Unmount(target, flags)
}
/* Netns namespace */
// NewNetworkNamespace implements NetNsNamespace.
func (osh *SystemHandler) NewNetworkNamespace() (ns netns.NsHandle, err error) {
return netns.New()
}
// GetNamespaceFromName implements NetNsNamespace.
func (osh *SystemHandler) GetNamespaceFromName(name string) (ns netns.NsHandle, err error) {
return netns.GetFromName(name)
}
// SetNamespace implements NetNsNamespace.
func (osh *SystemHandler) SetNamespace(ns netns.NsHandle) (err error) {
return netns.Set(ns)
}
/* Netlink namespace */
// LinkSetNsFd implements NetlinkNamespace.
func (osh *SystemHandler) LinkSetNsFd(link netlink.Link, fd int) (err error) {
return netlink.LinkSetNsFd(link, fd)
}
|
[
6
] |
package zfs
// #include <stdlib.h>
// #include <libzfs.h>
// #include "common.h"
// #include "zpool.h"
// #include "zfs.h"
// #include <memory.h>
// #include <string.h>
import "C"
import (
"fmt"
"io/ioutil"
"os"
"path"
"regexp"
"strconv"
"strings"
"time"
"unsafe"
)
func to_boolean_t(a bool) C.boolean_t {
if a {
return 1
}
return 0
}
func to_sendflags_t(flags *SendFlags) (cflags *C.sendflags_t) {
cflags = C.alloc_sendflags()
cflags.verbose = to_boolean_t(flags.Verbose)
cflags.replicate = to_boolean_t(flags.Replicate)
cflags.doall = to_boolean_t(flags.DoAll)
cflags.fromorigin = to_boolean_t(flags.FromOrigin)
cflags.dedup = to_boolean_t(flags.Dedup)
cflags.props = to_boolean_t(flags.Props)
cflags.dryrun = to_boolean_t(flags.DryRun)
cflags.parsable = to_boolean_t(flags.Parsable)
cflags.progress = to_boolean_t(flags.Progress)
cflags.largeblock = to_boolean_t(flags.LargeBlock)
cflags.embed_data = to_boolean_t(flags.EmbedData)
cflags.compress = to_boolean_t(flags.Compress)
if flags.Raw {
C.sendflags_set_raw(cflags)
}
return
}
func to_recvflags_t(flags *RecvFlags) (cflags *C.recvflags_t) {
cflags = C.alloc_recvflags()
cflags.verbose = to_boolean_t(flags.Verbose)
cflags.isprefix = to_boolean_t(flags.IsPrefix)
cflags.istail = to_boolean_t(flags.IsTail)
cflags.dryrun = to_boolean_t(flags.DryRun)
cflags.force = to_boolean_t(flags.Force)
cflags.canmountoff = to_boolean_t(flags.CanmountOff)
cflags.resumable = to_boolean_t(flags.Resumable)
cflags.byteswap = to_boolean_t(flags.ByteSwap)
cflags.nomount = to_boolean_t(flags.NoMount)
return
}
func (d *Dataset) send(FromName string, outf *os.File, flags *SendFlags) (err error) {
var cfromname, ctoname *C.char
var dpath string
var pd *Dataset
if d.Type != DatasetTypeSnapshot || (len(FromName) > 0 && strings.Contains(FromName, "#")) {
err = NewError(ENotsup, "Unsupported method on filesystem or bookmark. Use func SendOne() for that purpose.")
return
}
cflags := to_sendflags_t(flags)
defer C.free(unsafe.Pointer(cflags))
if dpath, err = d.Path(); err != nil {
return
}
sendparams := strings.Split(dpath, "@")
parent := sendparams[0]
if len(FromName) > 0 {
if FromName[0] == '@' {
FromName = FromName[1:]
} else if strings.Contains(FromName, "/") {
from := strings.Split(FromName, "@")
if len(from) > 0 {
FromName = from[1]
}
}
cfromname = C.CString(FromName)
defer C.free(unsafe.Pointer(cfromname))
}
ctoname = C.CString(sendparams[1])
defer C.free(unsafe.Pointer(ctoname))
if pd, err = DatasetOpen(parent); err != nil {
return
}
defer pd.Close()
pd.openHdl()
defer pd.closeHdl()
cerr := C.zfs_send(pd.zh, cfromname, ctoname, cflags, C.int(outf.Fd()), nil, nil, nil)
if cerr != 0 {
err = LastError()
}
return
}
func (d *Dataset) SendOne(FromName string, outf *os.File, flags *SendFlags) (err error) {
var cfromname, ctoname *C.char
var dpath string
if d.Type == DatasetTypeSnapshot || (len(FromName) > 0 && !strings.Contains(FromName, "#")) {
err = NewError(ENotsup, "Unsupported with snapshot. Use func Send() for that purpose.")
return
}
if flags.Replicate || flags.DoAll || flags.Props || flags.Dedup || flags.DryRun {
err = NewError(ENotsup, "Unsupported flag with filesystem or bookmark.")
return
}
cflags := to_sendflags_t(flags)
defer C.free(unsafe.Pointer(cflags))
if dpath, err = d.Path(); err != nil {
return
}
if len(FromName) > 0 {
if FromName[0] == '#' || FromName[0] == '@' {
FromName = dpath + FromName
}
cfromname = C.CString(FromName)
defer C.free(unsafe.Pointer(cfromname))
}
ctoname = C.CString(path.Base(dpath))
defer C.free(unsafe.Pointer(ctoname))
d.openHdl()
defer d.closeHdl()
cerr := C.gozfs_send_one(d.zh, cfromname, C.int(outf.Fd()), cflags, nil)
if cerr != 0 {
err = LastError()
}
return
}
func (d *Dataset) Send(outf *os.File, flags SendFlags) (err error) {
if flags.Replicate {
flags.DoAll = true
}
err = d.send("", outf, &flags)
return
}
func (d *Dataset) SendFrom(FromName string, outf *os.File, flags SendFlags) (err error) {
var porigin PropertyValue
var from, dest []string
if err = d.ReloadProperties(); err != nil {
return
}
porigin, _ = d.GetProperty(DatasetPropOrigin)
if len(porigin.Value) > 0 && porigin.Value == FromName {
FromName = ""
flags.FromOrigin = true
} else {
var dpath string
if dpath, err = d.Path(); err != nil {
return
}
dest = strings.Split(dpath, "@")
from = strings.Split(FromName, "@")
if len(from[0]) > 0 && from[0] != dest[0] {
err = NewError(ENotsup, "incremental source must be in same filesystem.")
return
}
if len(from) < 2 || strings.Contains(from[1], "@") || strings.Contains(from[1], "/") {
err = NewError(ENotsup, "invalid incremental source.")
return
}
}
err = d.send("@"+from[1], outf, &flags)
return
}
// SendSize - estimate snapshot size to transfer
func (d *Dataset) SendSize(FromName string, flags SendFlags) (size int64, err error) {
var r, w *os.File
errch := make(chan error)
defer func() {
select {
case <-errch:
default:
}
close(errch)
}()
flags.DryRun = true
flags.Verbose = true
flags.Progress = true
flags.Parsable = true
if r, w, err = os.Pipe(); err != nil {
return
}
defer r.Close()
go func() {
var tmpe error
saveOut := C.redirect_libzfs_stdout(C.int(w.Fd()))
if saveOut < 0 {
tmpe = NewError(ENotsup, fmt.Sprintf("Redirection of zfslib stdout failed %d", saveOut))
} else {
tmpe = d.send(FromName, w, &flags)
C.restore_libzfs_stdout(saveOut)
}
w.Close()
errch <- tmpe
}()
r.SetReadDeadline(time.Now().Add(15 * time.Second))
var data []byte
if data, err = ioutil.ReadAll(r); err != nil {
return
}
// parse size
var sizeRe *regexp.Regexp
if sizeRe, err = regexp.Compile("size[ \t]*([0-9]+)"); err != nil {
return
}
matches := sizeRe.FindAllSubmatch(data, 3)
if len(matches) > 0 && len(matches[0]) > 1 {
if size, err = strconv.ParseInt(
string(matches[0][1]), 10, 64); err != nil {
return
}
}
err = <-errch
return
}
// Receive - receive snapshot stream
func (d *Dataset) Receive(inf *os.File, flags RecvFlags) (err error) {
var dpath string
if dpath, err = d.Path(); err != nil {
return
}
props := C.new_property_nvlist()
if props == nil {
err = NewError(ENomem, "Out of memory func (d *Dataset) Recv()")
return
}
defer C.nvlist_free(props)
cflags := to_recvflags_t(&flags)
defer C.free(unsafe.Pointer(cflags))
dest := C.CString(dpath)
defer C.free(unsafe.Pointer(dest))
ec := C.zfs_receive(C.libzfs_get_handle(), dest, nil, cflags, C.int(inf.Fd()), nil)
if ec != 0 {
err = LastError()
}
return
}
|
[
2
] |
package utils
import (
"math/rand"
"time"
"github.com/iotaledger/hive.go/syncutils"
"github.com/iotaledger/iota.go/consts"
"github.com/iotaledger/iota.go/trinary"
)
var (
seededRand = rand.New(rand.NewSource(time.Now().UnixNano()))
randLock = &syncutils.Mutex{}
charsetTrytes = "ABCDEFGHIJKLMNOPQRSTUVWXYZ9"
)
// RandomInsecure returns a random int in the range of min to max.
// the result is not cryptographically secure.
// RandomInsecure is inclusive max value.
func RandomInsecure(min int, max int) int {
// Rand needs to be locked: https://github.com/golang/go/issues/3611
randLock.Lock()
defer randLock.Unlock()
return seededRand.Intn(max+1-min) + min
}
// RandomTrytesInsecure returns random Trytes with the given length.
// the result is not cryptographically secure.
// DO NOT USE this function to generate a seed.
func RandomTrytesInsecure(length int) trinary.Trytes {
// Rand needs to be locked: https://github.com/golang/go/issues/3611
randLock.Lock()
defer randLock.Unlock()
trytes := make([]byte, length)
for i := range trytes {
trytes[i] = charsetTrytes[seededRand.Intn(len(charsetTrytes))]
}
return trinary.Trytes(trytes)
}
// RandomKerlHashTrytesInsecure returns random hash trytes.
// Since the result mimics a Kerl hash, the last trit will be zero.
func RandomKerlHashTrytesInsecure() trinary.Hash {
// Rand needs to be locked: https://github.com/golang/go/issues/3611
randLock.Lock()
defer randLock.Unlock()
trits := make(trinary.Trits, consts.HashTrinarySize)
for i := 0; i < consts.HashTrinarySize-1; i++ {
trits[i] = int8(seededRand.Intn(consts.TrinaryRadix) + consts.MinTritValue)
}
return trinary.MustTritsToTrytes(trits)
}
|
[
1,
6
] |
package finder
import (
"bytes"
"context"
"errors"
"fmt"
"strings"
"github.com/lomik/graphite-clickhouse/config"
"github.com/lomik/graphite-clickhouse/helper/clickhouse"
"github.com/lomik/graphite-clickhouse/pkg/scope"
"github.com/lomik/graphite-clickhouse/pkg/where"
)
var ErrNotImplemented = errors.New("not implemented")
type BaseFinder struct {
url string // clickhouse dsn
table string // graphite_tree table
opts clickhouse.Options // timeout, connectTimeout
body []byte // clickhouse response body
}
func NewBase(url string, table string, opts clickhouse.Options) Finder {
return &BaseFinder{
url: url,
table: table,
opts: opts,
}
}
func (b *BaseFinder) where(query string) *where.Where {
level := strings.Count(query, ".") + 1
w := where.New()
w.And(where.Eq("Level", level))
w.And(where.TreeGlob("Path", query))
return w
}
func (b *BaseFinder) Execute(ctx context.Context, config *config.Config, query string, from int64, until int64, stat *FinderStat) (err error) {
w := b.where(query)
b.body, stat.ChReadRows, stat.ChReadBytes, err = clickhouse.Query(
scope.WithTable(ctx, b.table),
b.url,
// TODO: consider consistent query generator
fmt.Sprintf("SELECT Path FROM %s WHERE %s GROUP BY Path FORMAT TabSeparatedRaw", b.table, w),
b.opts,
nil,
)
stat.Table = b.table
stat.ReadBytes = int64(len(b.body))
return
}
func (b *BaseFinder) makeList(onlySeries bool) [][]byte {
if b.body == nil {
return [][]byte{}
}
rows := bytes.Split(b.body, []byte{'\n'})
skip := 0
for i := 0; i < len(rows); i++ {
if len(rows[i]) == 0 {
skip++
continue
}
if onlySeries && rows[i][len(rows[i])-1] == '.' {
skip++
continue
}
if skip > 0 {
rows[i-skip] = rows[i]
}
}
rows = rows[:len(rows)-skip]
return rows
}
func (b *BaseFinder) List() [][]byte {
return b.makeList(false)
}
func (b *BaseFinder) Series() [][]byte {
return b.makeList(true)
}
func (b *BaseFinder) Abs(v []byte) []byte {
return v
}
func (b *BaseFinder) Bytes() ([]byte, error) {
return b.body, nil
}
|
[
6
] |
package main
import (
"github.com/massarakhsh/polyforum/api"
"github.com/massarakhsh/polyforum/generate"
"github.com/massarakhsh/polyforum/ruler"
"fmt"
"log"
"net/http"
"os"
"github.com/massarakhsh/lik"
"github.com/massarakhsh/lik/likapi"
"github.com/massarakhsh/polyforum/base"
"github.com/massarakhsh/polyforum/front"
)
var (
HostPort = 80
HostServ = "localhost"
HostBase = "polyforum"
HostUser = "polyforum"
HostPass = "Polyforum17"
)
func main() {
lik.SetLevelInf()
lik.SayError("System started")
if !getArgs() {
return
}
if !base.OpenDB(HostServ, HostBase, HostUser, HostPass) {
return
}
http.HandleFunc("/", routerMain)
if err := http.ListenAndServe(fmt.Sprintf(":%d", HostPort), nil); err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
func getArgs() bool {
args, ok := lik.GetArgs(os.Args[1:])
if val := args.GetInt("port"); val > 0 {
HostPort = val
}
if val := args.GetString("serv"); val != "" {
HostServ = val
}
if val := args.GetString("base"); val != "" {
HostBase = val
}
if val := args.GetString("user"); val != "" {
HostUser = val
}
if val := args.GetString("pass"); val != "" {
HostPass = val
}
if len(HostBase) <= 0 {
fmt.Println("HostBase name must be present")
ok = false
}
if !ok {
fmt.Println("Usage: polyforum [-key val | --key=val]...")
fmt.Println("port - port value (80)")
fmt.Println("serv - Database server")
fmt.Println("base - Database name")
fmt.Println("user - Database user")
fmt.Println("pass - Database pass")
}
return ok
}
func routerMain(w http.ResponseWriter, r *http.Request) {
if r.Method == "PROPFIND" {
return
}
isapi := lik.RegExCompare(r.RequestURI,"^/api")
isfront := lik.RegExCompare(r.RequestURI,"^/front")
ismarshal := lik.RegExCompare(r.RequestURI,"^/marshal")
if match := lik.RegExParse(r.RequestURI, "/ean13/(\\d+)\\.png"); match != nil {
path := generate.DirectEan13(match[1], r.RequestURI)
likapi.ProbeRouteFile(w, r, path)
return
}
if !isfront && !ismarshal &&
lik.RegExCompare(r.RequestURI, "\\.(js|css|htm|html|ico|gif|png|jpg|jpeg|pdf|doc|docx|xls|xlsx)(\\?|$)") {
likapi.ProbeRouteFile(w, r, r.RequestURI)
return
}
var page *ruler.DataPage
if sp := lik.StrToInt(likapi.GetParm(r, "_sp")); sp > 0 {
if pager := likapi.FindPage(sp); pager != nil {
page = pager.(ruler.DataPager).GetItPage()
}
}
if page == nil {
page = ruler.StartPage()
}
var rule ruler.DataRuler
if isapi {
rule = api.BuildRule(page)
} else {
rule = front.BuildRule(page)
}
rule.LoadRequest(r)
if !ismarshal {
rule.RuleLog()
}
if !rule.Authority() && !isfront && !ismarshal {
}
if isfront {
json := rule.Execute()
likapi.RouteJson(w, 200, json, false)
} else if ismarshal {
json := rule.Marshal()
likapi.RouteJson(w, 200, json, false)
} else if !rule.Authority() {
likapi.Route401(w, 401, "realm=\"PolyForum\"")
} else if isapi {
json := rule.Execute()
likapi.RouteJson(w, 200, json, false)
} else {
html := rule.ShowPage()
likapi.RouteCookies(w, rule.GetAllCookies())
likapi.RouteHtml(w, 200, html.ToString())
}
}
|
[
5
] |
package astquery
import (
"fmt"
"go/ast"
"go/token"
"github.com/antchfx/xpath"
"golang.org/x/tools/go/ast/inspector"
)
// Evaluator evals and selects AST's nodes by XPath.
type Evaluator struct {
n *NodeNavigator
}
// New creates an Evaluator.
// If the given inspector is not nil macher use it.
func New(fset *token.FileSet, files []*ast.File, in *inspector.Inspector) *Evaluator {
return &Evaluator{n: NewNodeNavigator(fset, files, in)}
}
// Eval returns the result of the expression.
// The result type of the expression is one of the follow: bool,float64,string,[]ast.Node.
func (e *Evaluator) Eval(expr string) (interface{}, error) {
n := e.n.Copy()
_expr, err := xpath.Compile(expr)
if err != nil {
return nil, fmt.Errorf("expr cannot compile: %w", err)
}
v := _expr.Evaluate(n)
switch v := v.(type) {
case *xpath.NodeIterator:
ns := nodes(v)
vs := make([]interface{}, 0, len(ns))
for i := range ns {
switch n := ns[i].(type) {
case attr:
vs = append(vs, n.val)
}
}
if len(vs) == len(ns) {
return vs, nil
}
return ns, nil
}
return v, nil
}
// Select selects a node set which match the XPath expr.
func (e *Evaluator) Select(expr string) ([]ast.Node, error) {
n := e.n.Copy()
_expr, err := xpath.Compile(expr)
if err != nil {
return nil, fmt.Errorf("expr cannot compile: %w", err)
}
return nodes(_expr.Select(n)), nil
}
// SelectOne selects a node set which match the XPath expr and return the first node.
func (e *Evaluator) SelectOne(expr string) (ast.Node, error) {
ns, err := e.Select(expr)
if err != nil {
return nil, err
}
if len(ns) == 0 {
return nil, nil
}
return ns[0], nil
}
|
[
7
] |
// Copyright 2020-2021 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plan
import (
"fmt"
"github.com/dolthub/go-mysql-server/sql/transform"
"github.com/dolthub/go-mysql-server/sql"
)
// QueryProcess represents a running query process node. It will use a callback
// to notify when it has finished running.
// TODO: QueryProcess -> trackedRowIter is required to dispose certain iter caches.
// Make a proper scheduler interface to perform lifecycle management, caching, and
// scan attaching
type QueryProcess struct {
UnaryNode
Notify NotifyFunc
}
var _ sql.Node = (*QueryProcess)(nil)
var _ sql.CollationCoercible = (*QueryProcess)(nil)
// NotifyFunc is a function to notify about some event.
type NotifyFunc func()
// NewQueryProcess creates a new QueryProcess node.
func NewQueryProcess(node sql.Node, notify NotifyFunc) *QueryProcess {
return &QueryProcess{UnaryNode{Child: node}, notify}
}
func (p *QueryProcess) Child() sql.Node {
return p.UnaryNode.Child
}
func (p *QueryProcess) IsReadOnly() bool {
return p.Child().IsReadOnly()
}
// WithChildren implements the Node interface.
func (p *QueryProcess) WithChildren(children ...sql.Node) (sql.Node, error) {
if len(children) != 1 {
return nil, sql.ErrInvalidChildrenNumber.New(p, len(children), 1)
}
return NewQueryProcess(children[0], p.Notify), nil
}
// CheckPrivileges implements the interface sql.Node.
func (p *QueryProcess) CheckPrivileges(ctx *sql.Context, opChecker sql.PrivilegedOperationChecker) bool {
return p.Child().CheckPrivileges(ctx, opChecker)
}
// CollationCoercibility implements the interface sql.CollationCoercible.
func (p *QueryProcess) CollationCoercibility(ctx *sql.Context) (collation sql.CollationID, coercibility byte) {
return sql.GetCoercibility(ctx, p.Child())
}
func (p *QueryProcess) String() string { return p.Child().String() }
func (p *QueryProcess) DebugString() string {
tp := sql.NewTreePrinter()
_ = tp.WriteNode("QueryProcess")
_ = tp.WriteChildren(sql.DebugString(p.Child()))
return tp.String()
}
// ShouldSetFoundRows returns whether the query process should set the FOUND_ROWS query variable. It should do this for
// any select except a Limit with a SQL_CALC_FOUND_ROWS modifier, which is handled in the Limit node itself.
func (p *QueryProcess) ShouldSetFoundRows() bool {
var fromLimit *bool
var fromTopN *bool
transform.Inspect(p.Child(), func(n sql.Node) bool {
switch n := n.(type) {
case *StartTransaction:
return true
case *Limit:
fromLimit = &n.CalcFoundRows
return true
case *TopN:
fromTopN = &n.CalcFoundRows
return true
default:
return true
}
})
if fromLimit == nil && fromTopN == nil {
return true
}
if fromTopN != nil {
return !*fromTopN
}
return !*fromLimit
}
// ProcessIndexableTable is a wrapper for sql.Tables inside a query process
// that support indexing.
// It notifies the process manager about the status of a query when a
// partition is processed.
type ProcessIndexableTable struct {
sql.DriverIndexableTable
OnPartitionDone NamedNotifyFunc
OnPartitionStart NamedNotifyFunc
OnRowNext NamedNotifyFunc
}
func (t *ProcessIndexableTable) DebugString() string {
tp := sql.NewTreePrinter()
_ = tp.WriteNode("ProcessIndexableTable")
_ = tp.WriteChildren(sql.DebugString(t.Underlying()))
return tp.String()
}
// NewProcessIndexableTable returns a new ProcessIndexableTable.
func NewProcessIndexableTable(t sql.DriverIndexableTable, onPartitionDone, onPartitionStart, OnRowNext NamedNotifyFunc) *ProcessIndexableTable {
return &ProcessIndexableTable{t, onPartitionDone, onPartitionStart, OnRowNext}
}
// Underlying implements sql.TableWrapper interface.
func (t *ProcessIndexableTable) Underlying() sql.Table {
return t.DriverIndexableTable
}
// IndexKeyValues implements the sql.IndexableTable interface.
func (t *ProcessIndexableTable) IndexKeyValues(
ctx *sql.Context,
columns []string,
) (sql.PartitionIndexKeyValueIter, error) {
iter, err := t.DriverIndexableTable.IndexKeyValues(ctx, columns)
if err != nil {
return nil, err
}
return &trackedPartitionIndexKeyValueIter{iter, t.OnPartitionDone, t.OnPartitionStart, t.OnRowNext}, nil
}
// PartitionRows implements the sql.Table interface.
func (t *ProcessIndexableTable) PartitionRows(ctx *sql.Context, p sql.Partition) (sql.RowIter, error) {
iter, err := t.DriverIndexableTable.PartitionRows(ctx, p)
if err != nil {
return nil, err
}
return t.newPartIter(p, iter)
}
func (t *ProcessIndexableTable) newPartIter(p sql.Partition, iter sql.RowIter) (sql.RowIter, error) {
partitionName := partitionName(p)
if t.OnPartitionStart != nil {
t.OnPartitionStart(partitionName)
}
var onDone NotifyFunc
if t.OnPartitionDone != nil {
onDone = func() {
t.OnPartitionDone(partitionName)
}
}
var onNext NotifyFunc
if t.OnRowNext != nil {
onNext = func() {
t.OnRowNext(partitionName)
}
}
return NewTrackedRowIter(nil, iter, onNext, onDone), nil
}
var _ sql.DriverIndexableTable = (*ProcessIndexableTable)(nil)
// NamedNotifyFunc is a function to notify about some event with a string argument.
type NamedNotifyFunc func(name string)
// ProcessTable is a wrapper for sql.Tables inside a query process. It
// notifies the process manager about the status of a query when a partition
// is processed.
type ProcessTable struct {
sql.Table
OnPartitionDone NamedNotifyFunc
OnPartitionStart NamedNotifyFunc
OnRowNext NamedNotifyFunc
}
// NewProcessTable returns a new ProcessTable.
func NewProcessTable(t sql.Table, onPartitionDone, onPartitionStart, OnRowNext NamedNotifyFunc) *ProcessTable {
return &ProcessTable{t, onPartitionDone, onPartitionStart, OnRowNext}
}
// Underlying implements sql.TableWrapper interface.
func (t *ProcessTable) Underlying() sql.Table {
return t.Table
}
// PartitionRows implements the sql.Table interface.
func (t *ProcessTable) PartitionRows(ctx *sql.Context, p sql.Partition) (sql.RowIter, error) {
iter, err := t.Table.PartitionRows(ctx, p)
if err != nil {
return nil, err
}
onDone, onNext := t.notifyFuncsForPartition(p)
return NewTrackedRowIter(nil, iter, onNext, onDone), nil
}
// notifyFuncsForPartition returns the OnDone and OnNext NotifyFuncs for the partition given
func (t *ProcessTable) notifyFuncsForPartition(p sql.Partition) (NotifyFunc, NotifyFunc) {
partitionName := partitionName(p)
if t.OnPartitionStart != nil {
t.OnPartitionStart(partitionName)
}
var onDone NotifyFunc
if t.OnPartitionDone != nil {
onDone = func() {
t.OnPartitionDone(partitionName)
}
}
var onNext NotifyFunc
if t.OnRowNext != nil {
onNext = func() {
t.OnRowNext(partitionName)
}
}
return onDone, onNext
}
func GetQueryType(child sql.Node) queryType {
// TODO: behavior of CALL is not specified in the docs. Needs investigation
var queryType queryType = QueryTypeSelect
transform.Inspect(child, func(node sql.Node) bool {
if IsNoRowNode(node) {
queryType = QueryTypeDdl
return false
}
switch node.(type) {
case *Signal:
queryType = QueryTypeDdl
return false
case nil:
return false
case *TriggerExecutor, *InsertInto, *Update, *DeleteFrom, *LoadData:
// TODO: AlterTable belongs here too, but we don't keep track of updated rows there so we can't return an
// accurate ROW_COUNT() anyway.
queryType = QueryTypeUpdate
return false
}
return true
})
return queryType
}
type queryType byte
const (
QueryTypeSelect = iota
QueryTypeDdl
QueryTypeUpdate
)
type trackedRowIter struct {
node sql.Node
iter sql.RowIter
numRows int64
QueryType queryType
ShouldSetFoundRows bool
onDone NotifyFunc
onNext NotifyFunc
}
func NewTrackedRowIter(
node sql.Node,
iter sql.RowIter,
onNext NotifyFunc,
onDone NotifyFunc,
) *trackedRowIter {
return &trackedRowIter{node: node, iter: iter, onDone: onDone, onNext: onNext}
}
func (i *trackedRowIter) done() {
if i.onDone != nil {
i.onDone()
i.onDone = nil
}
if i.node != nil {
i.Dispose()
i.node = nil
}
}
func disposeNode(n sql.Node) {
transform.Inspect(n, func(node sql.Node) bool {
sql.Dispose(node)
return true
})
transform.InspectExpressions(n, func(e sql.Expression) bool {
sql.Dispose(e)
return true
})
}
func (i *trackedRowIter) Dispose() {
if i.node != nil {
disposeNode(i.node)
}
}
func (i *trackedRowIter) Next(ctx *sql.Context) (sql.Row, error) {
row, err := i.iter.Next(ctx)
if err != nil {
return nil, err
}
i.numRows++
if i.onNext != nil {
i.onNext()
}
return row, nil
}
func (i *trackedRowIter) Close(ctx *sql.Context) error {
err := i.iter.Close(ctx)
i.updateSessionVars(ctx)
i.done()
return err
}
func (i *trackedRowIter) updateSessionVars(ctx *sql.Context) {
switch i.QueryType {
case QueryTypeSelect:
ctx.SetLastQueryInfo(sql.RowCount, -1)
case QueryTypeDdl:
ctx.SetLastQueryInfo(sql.RowCount, 0)
case QueryTypeUpdate:
// This is handled by RowUpdateAccumulator
default:
panic(fmt.Sprintf("Unexpected query type %v", i.QueryType))
}
if i.ShouldSetFoundRows {
ctx.SetLastQueryInfo(sql.FoundRows, i.numRows)
}
}
type trackedPartitionIndexKeyValueIter struct {
sql.PartitionIndexKeyValueIter
OnPartitionDone NamedNotifyFunc
OnPartitionStart NamedNotifyFunc
OnRowNext NamedNotifyFunc
}
func (i *trackedPartitionIndexKeyValueIter) Next(ctx *sql.Context) (sql.Partition, sql.IndexKeyValueIter, error) {
p, iter, err := i.PartitionIndexKeyValueIter.Next(ctx)
if err != nil {
return nil, nil, err
}
partitionName := partitionName(p)
if i.OnPartitionStart != nil {
i.OnPartitionStart(partitionName)
}
var onDone NotifyFunc
if i.OnPartitionDone != nil {
onDone = func() {
i.OnPartitionDone(partitionName)
}
}
var onNext NotifyFunc
if i.OnRowNext != nil {
onNext = func() {
i.OnRowNext(partitionName)
}
}
return p, &trackedIndexKeyValueIter{iter, onDone, onNext}, nil
}
type trackedIndexKeyValueIter struct {
iter sql.IndexKeyValueIter
onDone NotifyFunc
onNext NotifyFunc
}
func (i *trackedIndexKeyValueIter) done() {
if i.onDone != nil {
i.onDone()
i.onDone = nil
}
}
func (i *trackedIndexKeyValueIter) Close(ctx *sql.Context) (err error) {
if i.iter != nil {
err = i.iter.Close(ctx)
}
i.done()
return err
}
func (i *trackedIndexKeyValueIter) Next(ctx *sql.Context) ([]interface{}, []byte, error) {
v, k, err := i.iter.Next(ctx)
if err != nil {
return nil, nil, err
}
if i.onNext != nil {
i.onNext()
}
return v, k, nil
}
func partitionName(p sql.Partition) string {
if n, ok := p.(sql.Nameable); ok {
return n.Name()
}
return string(p.Key())
}
func IsDDLNode(node sql.Node) bool {
switch node.(type) {
case *CreateTable, *DropTable, *Truncate,
*AddColumn, *ModifyColumn, *DropColumn,
*CreateDB, *DropDB, *AlterDB,
*RenameTable, *RenameColumn,
*CreateView, *DropView,
*CreateIndex, *AlterIndex, *DropIndex,
*CreateProcedure, *DropProcedure,
*CreateEvent, *DropEvent,
*CreateForeignKey, *DropForeignKey,
*CreateCheck, *DropCheck,
*CreateTrigger, *DropTrigger, *AlterPK,
*Block: // Block as a top level node wraps a set of ALTER TABLE statements
return true
default:
return false
}
}
func IsShowNode(node sql.Node) bool {
switch node.(type) {
case *ShowTables, *ShowCreateTable,
*ShowTriggers, *ShowCreateTrigger,
*ShowDatabases, *ShowCreateDatabase,
*ShowColumns, *ShowIndexes,
*ShowProcessList, *ShowTableStatus,
*ShowVariables, ShowWarnings,
*ShowEvents, *ShowCreateEvent:
return true
default:
return false
}
}
// IsNoRowNode returns whether this are node interacts only with schema and the catalog, not with any table
// rows.
func IsNoRowNode(node sql.Node) bool {
return IsDDLNode(node) || IsShowNode(node)
}
func IsReadOnly(node sql.Node) bool {
return node.IsReadOnly()
}
|
[
2,
6
] |
package main
import (
"encoding/csv"
"flag"
"fmt"
"io"
"log"
"os"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
godays "github.com/frairon/goka-godays2019"
"github.com/lovoo/goka"
)
const (
readerChannelSize = 100
timeFormat = "2006-01-02 15:04:05"
)
var (
brokers = flag.String("brokers", "localhost:9092", "brokers")
input = flag.String("input", "testdata/taxidata_tiny.csv", "input events file")
timeLapse = flag.Float64("time-lapse", 60, "increase or decrease time. >1.0 -> time runs faster")
licenseFraudRate = flag.Int("license-fraud-rate", 0.0, "Every nth license is a fraud license")
)
var (
eventsSent int64
)
func main() {
flag.Parse()
f, err := os.Open(*input)
if err != nil {
log.Fatalf("Error opening file %s for reading: %v", *input, err)
}
defer f.Close()
c := make(chan []string, 1000)
// start the emitters
startEmitter, err := goka.NewEmitter(strings.Split(*brokers, ","), godays.TripStartedTopic, new(godays.TripStartedCodec))
if err != nil {
log.Fatalf("error creating emitter: %v", err)
}
defer startEmitter.Finish()
endEmitter, err := goka.NewEmitter(strings.Split(*brokers, ","), godays.TripEndedTopic, new(godays.TripStartedCodec))
if err != nil {
log.Fatalf("error creating emitter: %v", err)
}
defer endEmitter.Finish()
// read from csv input file and send events to channel
reader := csv.NewReader(f)
var timeRead time.Time
go func() {
for {
record, readErr := reader.Read()
if readErr == io.EOF {
break
}
if readErr != nil {
log.Fatal(readErr)
}
eventTime, err := time.Parse(timeFormat, record[0])
if err != nil {
log.Fatalf("Error parsing event time %s: %v", record[0], err)
}
if timeRead.After(eventTime) {
log.Printf("event misordering")
}
timeRead = eventTime
c <- record
}
close(c)
}()
startTime := time.Now()
firstEvent := <-c
baseTime, err := time.Parse(timeFormat, firstEvent[0])
if err != nil {
log.Fatalf("Error parsing basetime %s: %v", firstEvent[0], err)
}
emitEvent := func(eventTime time.Time, record []string) {
event := parseFromCsvRecord(baseTime, startTime, record)
switch ev := event.(type) {
case *godays.TripStarted:
if ev.Latitude == 0 && ev.Longitude == 0 {
return
}
startEmitter.Emit(ev.TaxiID, event)
case *godays.TripEnded:
if ev.Latitude == 0 && ev.Longitude == 0 {
return
}
endEmitter.Emit(ev.TaxiID, event)
default:
log.Fatalf("unhandled event type: %v", event)
}
atomic.AddInt64(&eventsSent, 1)
}
// emit the first event now
emitEvent(baseTime, firstEvent)
var wg sync.WaitGroup
for i := 0; i < 4; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
record, ok := <-c
if !ok {
return
}
eventTime, err := time.Parse(timeFormat, record[0])
if err != nil {
log.Fatalf("Error parsing event time %s: %v", record[0], err)
}
realDiff := time.Since(startTime)
eventDiff := time.Duration(float64(eventTime.Sub(baseTime)) / *timeLapse)
// wait for the event to occur
if eventDiff > realDiff {
time.Sleep(eventDiff - realDiff)
}
emitEvent(eventTime, record)
}
}()
}
go printEventCounter()
wg.Wait()
}
func parseFromCsvRecord(baseEventTime time.Time, baseTime time.Time, record []string) interface{} {
eventTime, err := time.Parse(timeFormat, record[0])
if err != nil {
log.Fatalf("Error parsing event time %s: %v", record[0], err)
}
licenseSplit := strings.Split(record[3], "-")
licenseNumber, err := strconv.ParseInt(licenseSplit[1], 10, 64)
if err != nil {
log.Fatalf("Error parsing license ID %s: %v", record[3], err)
}
if *licenseFraudRate > 0 {
if licenseNumber > int64(*licenseFraudRate) && licenseNumber%int64(*licenseFraudRate) == 0 {
licenseNumber = licenseNumber - int64(*licenseFraudRate)
log.Printf("creating duplicate: %d", licenseNumber)
}
}
licenseID := fmt.Sprintf("license-%d", licenseNumber)
realEventTime := baseTime.Add(eventTime.Sub(baseEventTime))
switch record[1] {
case "pickup":
return &godays.TripStarted{
Ts: realEventTime,
TaxiID: record[2],
LicenseID: licenseID,
Latitude: mustParseFloat(record[4]),
Longitude: mustParseFloat(record[5]),
}
case "dropoff":
return &godays.TripEnded{
Ts: realEventTime,
TaxiID: record[2],
LicenseID: licenseID,
Latitude: mustParseFloat(record[4]),
Longitude: mustParseFloat(record[5]),
Charge: mustParseFloat(record[6]),
Tip: mustParseFloat(record[7]),
Duration: time.Duration(mustParseFloat(record[8]) * float64(time.Second)),
Distance: mustParseFloat(record[9]),
}
}
log.Fatalf("Invalid record type: %#v", record)
return nil
}
func mustParseFloat(strVal string) float64 {
floatVal, err := strconv.ParseFloat(strVal, 64)
if err != nil {
log.Fatalf("Error parsing strVal %s: %v", strVal, err)
}
return floatVal
}
func printEventCounter() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
<-ticker.C
log.Printf("sent %d events", atomic.LoadInt64(&eventsSent))
}
}
|
[
0,
6
] |
package services
import (
"errors"
"github.com/dembygenesis/student_enrollment_exam/src/api/domain"
)
type courseService struct {
}
var (
CourseService *courseService
)
func init() {
CourseService = &courseService{}
}
func (s *courseService) GetEnrolledStudents(courseId int) (*[]domain.Student, error) {
// Quick validations to prevent hitting the database
if courseId == 0 {
return nil, errors.New("student_id is invalid")
}
// Validate course_id
isValidCourseId, err := domain.CourseDao.IsValidId(courseId)
if err != nil {
return nil, err
}
if isValidCourseId == false {
return nil, errors.New("course_id is invalid")
}
// Perform fetch
return domain.CourseDao.GetEnrolledStudents(courseId)
}
func (s *courseService) Create(name string, professor string, description string) error {
// Quick validations to prevent hitting the database
if name == "" {
return errors.New("name is invalid")
}
if professor == "" {
return errors.New("professor is invalid")
}
if description == "" {
return errors.New("description is invalid")
}
// Perform create
return domain.CourseDao.Create(name, professor, description)
}
func (s *courseService) DeleteCourse(courseId int) error {
// Quick validations to prevent hitting the database
if courseId == 0 {
return errors.New("course_id is invalid")
}
// Validate course_id entry
isValidCourseId, err := domain.CourseDao.IsValidId(courseId)
if err != nil {
return err
}
if isValidCourseId == false {
return errors.New("course_id is invalid")
}
// Validate no students enrolled before deleting
hasStudentsEnrolled, err := domain.CourseDao.HasStudentsEnrolled(courseId)
if err != nil {
return err
}
if hasStudentsEnrolled == true {
return errors.New("cannot delete a course_id with students enrolled")
}
return domain.CourseDao.DeleteCourse(courseId)
}
|
[
6
] |
package lfu
type dbNode struct {
key, value interface{}
prev, next *dbNode
freqNode *freqNode
}
type dbList struct {
head, tail *dbNode
total int
}
type freqNode struct {
freq int
dl *dbList
prev, next *freqNode
}
type freqList struct {
head *freqNode
tail *freqNode
}
func (fl *freqList) removeNode(node *freqNode) {
node.next.prev = node.prev
node.prev.next = node.next
}
func (fl *freqList) lastFreq() *freqNode {
return fl.tail.prev
}
func (fl *freqList) addNode(node *dbNode) {
if fqNode := fl.lastFreq(); fqNode.freq == 1 {
node.freqNode = fqNode
fqNode.dl.addToHead(node)
} else {
newNode := &freqNode{
freq: 1,
dl: initdbList(),
}
node.freqNode = newNode
newNode.dl.addToHead(node)
fqNode.next = newNode
newNode.prev = fqNode
newNode.next = fl.tail
fl.tail.prev = newNode
}
}
func (dbl *dbList) isEmpty() bool {
return dbl.total == 0
}
func (dbl *dbList) GetTotal() int {
return dbl.total
}
func (dbl *dbList) addToHead(node *dbNode) {
node.next = dbl.head.next
node.prev = dbl.head
dbl.head.next.prev = node
dbl.head.next = node
dbl.total++
}
func (dbl *dbList) removeNode(node *dbNode) {
node.next.prev = node.prev
node.prev.next = node.next
dbl.total--
}
func (dbl *dbList) moveToHead(node *dbNode) {
dbl.removeNode(node)
dbl.addToHead(node)
}
func (dbl *dbList) removeTail() *dbNode {
node := dbl.tail.prev
dbl.removeNode(node)
return node
}
func initNode(k, v interface{}) *dbNode {
return &dbNode{
key: k,
value: v,
}
}
func initdbList() *dbList {
l := dbList{
head: initNode(0, 0),
tail: initNode(0, 0),
}
l.head.next = l.tail
l.tail.prev = l.head
return &l
}
type LFUCache struct {
cache map[interface{}]*dbNode
size, capacity int
freqList *freqList
}
func NewLFU(capacity int) LFUCache {
ca := LFUCache{
capacity: capacity,
cache: make(map[interface{}]*dbNode),
}
ca.freqList = &freqList{
head: &freqNode{},
tail: &freqNode{},
}
ca.freqList.head.next = ca.freqList.tail
ca.freqList.tail.prev = ca.freqList.head
return ca
}
func (lfu *LFUCache) incrFreq(node *dbNode) {
curfreqNode := node.freqNode
curdbNode := curfreqNode.dl
if curfreqNode.prev.freq == curfreqNode.freq+1 {
curdbNode.removeNode(node)
curfreqNode.prev.dl.addToHead(node)
node.freqNode = curfreqNode.prev
} else if curdbNode.GetTotal() == 1 {
curfreqNode.freq++
} else {
curdbNode.removeNode(node)
newFreqNode := &freqNode{
freq: curfreqNode.freq + 1,
dl: initdbList(),
}
newFreqNode.dl.addToHead(node)
node.freqNode = newFreqNode
newFreqNode.next = curfreqNode
newFreqNode.prev = curfreqNode.prev
curfreqNode.prev.next = newFreqNode
curfreqNode.prev = newFreqNode
}
if curdbNode.isEmpty() {
lfu.freqList.removeNode(curfreqNode)
}
}
func (lfu *LFUCache) Get(key interface{}) interface{} {
if n, ok := lfu.cache[key]; ok {
lfu.incrFreq(n)
return n.value
}
return -1
}
func (lfu *LFUCache) Put(key interface{}, value interface{}) {
if lfu.capacity == 0 {
return
}
if n, ok := lfu.cache[key]; ok {
n.value = value
lfu.incrFreq(n)
} else {
if lfu.size >= lfu.capacity {
fqNode := lfu.freqList.lastFreq()
node := fqNode.dl.removeTail()
lfu.size--
delete(lfu.cache, node.key)
}
newNode := initNode(key, value)
lfu.cache[key] = newNode
lfu.freqList.addNode(newNode)
lfu.size++
}
}
func (lfu *LFUCache) GetIterator() func() *dbNode {
curFreqNode := lfu.freqList.head.next
var dump *dbNode
return func() *dbNode {
for {
if dump == nil {
dump = curFreqNode.dl.head.next
}
for {
if dump == curFreqNode.dl.tail {
break
}
ret := dump
dump = dump.next
return ret
}
curFreqNode = curFreqNode.next
if curFreqNode == lfu.freqList.tail {
return nil
}
dump = curFreqNode.dl.head.next
}
}
}
func (lfu *LFUCache) GetAll() []interface{} {
var ret []interface{}
curFreqNode := lfu.freqList.head.next
for {
if curFreqNode == lfu.freqList.tail {
return ret
}
dump := curFreqNode.dl.head.next
for {
if dump == curFreqNode.dl.tail {
break
}
ret = append(ret, dump.value)
dump = dump.next
}
curFreqNode = curFreqNode.next
}
}
|
[
5,
6
] |
package main
import "fmt"
func superAdd(numbers ...int) int {
total := 0
for _,number := range numbers {
total = total + number
}
return total
}
func main() {
result := superAdd(1,2,3,4,5,6)
fmt.Println(result)
for i := 0; i<10 ; i++ {
fmt.Print(i)
}
}
|
[
0
] |
package fact;
import (
"fmt"
)
func factorial(n int64) int64 {
if n == 0 {
return 0
} else if n == 1 {
return 1
} else {
return n * factorial(n-1)
}
}
func main() {
fmt.Println("Factorial 16", factorial(16))
}
|
[
5
] |
package session
import (
"io/ioutil"
"net/http"
)
const ()
// Reference: go-github/github/github.go
// Client http client
type Client struct {
httpClient *http.Client
}
// NewClient factory Client struct
func NewClient(httpClient *http.Client) *Client {
if httpClient == nil {
httpClient = http.DefaultClient
}
return &Client{
httpClient: httpClient,
}
}
// Get exec HTTP GET
func (c *Client) Get(url string, header map[string]string) ([]byte, error) {
req, err := c.newRequest("GET", url, header)
if err != nil {
return nil, err
}
resp, err := c.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return bytes, nil
}
// Do GET only now
func (c *Client) Do(req *http.Request) (*http.Response, error) {
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
return resp, nil
}
func (c *Client) newRequest(method string, url string, header map[string]string) (*http.Request, error) {
req, err := http.NewRequest(method, url, nil)
if err != nil {
return nil, err
}
for k, v := range header {
req.Header.Set(k, v)
}
return req, nil
}
|
[
6
] |
package portforward
import (
"fmt"
"io"
"net/http"
"net/url"
"os"
"strings"
"time"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/portforward"
"k8s.io/client-go/transport/spdy"
)
// ForwardPorts spawns a goroutine that does the equivalent of
// "kubectl port-forward -n <namespace> <podName> [portMapping]".
// The connection will remain open until stopChan is closed. Use errChan for receiving errors from the port-forward
// goroutine.
//
// Example:
//
// stopCh := make(chan struct{}, 1)
// errCh := make(chan error)
// if err = ForwardPorts(conf, "my-ns", "my-pod", []string{"5000:5000"}, stopCh, errCh, time.Minute); err != nil {
// return err
// }
// defer func() {
// close(stopCh)
// close(errCh)
// }()
func ForwardPorts(
conf *rest.Config,
namespace string,
podName string,
portMapping []string,
stopChan <-chan struct{},
errChan chan error,
timeout time.Duration,
) error {
transport, upgrader, err := spdy.RoundTripperFor(conf)
if err != nil {
return fmt.Errorf("error creating roundtripper: %w", err)
}
dialer := spdy.NewDialer(
upgrader,
&http.Client{Transport: transport},
http.MethodPost,
&url.URL{
Scheme: "https",
Path: fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward", namespace, podName),
Host: strings.TrimLeft(conf.Host, "htps:/"),
},
)
// Create a new port-forwarder with localhost as the listen address. Standard output from the forwarder will be
// discarded, but errors will go to stderr.
readyChan := make(chan struct{})
fw, err := portforward.New(dialer, portMapping, stopChan, readyChan, io.Discard, os.Stderr)
if err != nil {
return fmt.Errorf("error creating port-forwarder: %w", err)
}
// Start the port-forward
go func() {
if err := fw.ForwardPorts(); err != nil {
errChan <- err
}
}()
// Wait for the port-forward to be ready for use before returning
select {
case <-readyChan:
return nil
case <-time.After(timeout):
return fmt.Errorf("timed out after %s waiting for port-forward to be ready", timeout)
case err = <-errChan:
return fmt.Errorf("error from port-forwarder: %w", err)
}
}
|
[
6
] |
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"github.com/toshinarin/go-googlesheets"
"golang.org/x/oauth2/google"
"google.golang.org/api/sheets/v4"
)
func newService(configJsonPath, credentialsFileName string) (*sheets.Service, error) {
b, err := ioutil.ReadFile(configJsonPath)
if err != nil {
return nil, err
}
// If modifying these scopes, delete your previously saved credentials
// at ~/.google_oauth_credentials/{credentialsFileName}
config, err := google.ConfigFromJSON([]byte(b), "https://www.googleapis.com/auth/spreadsheets")
if err != nil {
return nil, fmt.Errorf("Unable to parse client secret file to config: %v", err)
}
srv, err := googlesheets.New(config, credentialsFileName)
if err != nil {
return nil, fmt.Errorf("Unable to retrieve Sheets Client %v", err)
}
return srv, nil
}
func importSpreadSheet(srv *sheets.Service, spreadsheetId, spreadsheetRange string) error {
resp, err := srv.Spreadsheets.Values.Get(spreadsheetId, spreadsheetRange).Do()
if err != nil {
return fmt.Errorf("Unable to retrieve data from sheet. %v", err)
}
for i, row := range resp.Values {
fmt.Printf("row[%d]; %s\n", i, row)
}
return nil
}
func exportToSpreadSheet(srv *sheets.Service, spreadsheetId, spreadsheetRange string, rows [][]interface{}) error {
valueRange := sheets.ValueRange{}
for _, r := range rows {
valueRange.Values = append(valueRange.Values, r)
}
clearReq := sheets.ClearValuesRequest{}
clearResp, err := srv.Spreadsheets.Values.Clear(spreadsheetId, spreadsheetRange, &clearReq).Do()
if err != nil {
return fmt.Errorf("failed to clear sheet. error: %v", err)
}
log.Printf("clear response: %v", clearResp)
resp, err := srv.Spreadsheets.Values.Update(spreadsheetId, spreadsheetRange, &valueRange).ValueInputOption("RAW").Do()
if err != nil {
return fmt.Errorf("failed to update sheet. error: %v", err)
}
log.Printf("update response: %v", resp)
return nil
}
func main() {
mode := flag.String("mode", "import", "import or export")
spreadSheetID := flag.String("id", "", "google spread sheet id")
flag.Parse()
if *spreadSheetID == "" {
log.Fatal("option -id: please set spread sheet id")
}
srv, err := newService("client_secret.json", "googlesheets-example.json")
if err != nil {
log.Fatal(err)
}
if *mode == "import" {
if err := importSpreadSheet(srv, *spreadSheetID, "A1:B"); err != nil {
log.Fatal(err)
}
} else if *mode == "export" {
rows := [][]interface{}{[]interface{}{"a1", "b1"}, []interface{}{"a2", "b2"}}
if err := exportToSpreadSheet(srv, *spreadSheetID, "A1:B", rows); err != nil {
log.Fatal(err)
}
} else {
log.Fatal("option -mode: please set import or export")
}
}
|
[
5
] |
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"github.com/cloud-barista/cb-ladybug/src/grpc-api/cbadm/app"
)
type ConfigOptions struct {
*app.Options
Ladybug_Server_addr string
Ladybug_Timeout string
Ladybug_Endpoint string
Ladybug_Service_name string
Ladybug_Sample_rate string
Spider_Server_addr string
Spider_Timeout string
Spider_Endpoint string
Spider_Service_name string
Spider_Sample_rate string
}
func (o *ConfigOptions) writeYaml(in interface{}) {
if b, err := yaml.Marshal(in); err != nil {
o.PrintlnError(err)
} else {
o.WriteBody(b)
}
}
// returns a cobra command
func NewCommandConfig(options *app.Options) *cobra.Command {
o := &ConfigOptions{
Options: options,
}
// root
cmds := &cobra.Command{
Use: "config",
Short: "Configuration command",
Long: "This is a configuration command",
Run: func(c *cobra.Command, args []string) {
c.Help()
},
}
// add-context
cmdC := &cobra.Command{
Use: "add-context (NAME | --name NAME) [options]",
Short: "Add a context",
DisableFlagsInUseLine: true,
Args: app.BindCommandArgs(&o.Name),
Run: func(c *cobra.Command, args []string) {
app.ValidateError(c, func() error {
if len(o.Name) == 0 {
return fmt.Errorf("Name is required.")
}
if _, ok := app.Config.Contexts[o.Name]; ok {
return fmt.Errorf("The context '%s' is alreaday exist", o.Name)
} else {
var sConf *app.CliConfig = new(app.CliConfig)
sConf.ServerAddr = o.Spider_Server_addr
sConf.Timeout = o.Spider_Timeout
sConf.Interceptors.Opentracing.Jaeger.Endpoint = o.Spider_Endpoint
sConf.Interceptors.Opentracing.Jaeger.ServiceName = o.Spider_Service_name
sConf.Interceptors.Opentracing.Jaeger.SampleRate = o.Spider_Sample_rate
var gConf *app.CliConfig = new(app.CliConfig)
gConf.ServerAddr = o.Ladybug_Server_addr
gConf.Timeout = o.Ladybug_Timeout
gConf.Interceptors.Opentracing.Jaeger.Endpoint = o.Ladybug_Endpoint
gConf.Interceptors.Opentracing.Jaeger.ServiceName = o.Ladybug_Service_name
gConf.Interceptors.Opentracing.Jaeger.SampleRate = o.Ladybug_Sample_rate
app.Config.Contexts[o.Name] = &app.ConfigContext{
Name: o.Name,
Namespace: o.Namespace,
Ladybugcli: gConf,
Spidercli: sConf,
}
}
app.Config.WriteConfig()
o.writeYaml(app.Config)
return nil
}())
},
}
cmdC.Flags().StringVarP(&o.Ladybug_Server_addr, "ladybug_server_addr", "", "127.0.0.1:50254", "Server Addr URL")
cmdC.Flags().StringVarP(&o.Ladybug_Timeout, "ladybug_timeout", "", "1000s", "Timeout")
cmdC.Flags().StringVarP(&o.Ladybug_Endpoint, "ladybug_endpoint", "", "localhost:6834", "endpoint URL")
cmdC.Flags().StringVarP(&o.Ladybug_Service_name, "ladybug_service_name", "", "ladybug grpc client", "Service Name")
cmdC.Flags().StringVarP(&o.Ladybug_Sample_rate, "ladybug_sample_rate", "", "1", "sample rate")
cmdC.Flags().StringVarP(&o.Spider_Server_addr, "spider_server_addr", "", "127.0.0.1:2048", "Server Addr URL")
cmdC.Flags().StringVarP(&o.Spider_Timeout, "spider_timeout", "", "1000s", "Timeout")
cmdC.Flags().StringVarP(&o.Spider_Endpoint, "spider_endpoint", "", "localhost:6832", "endpoint URL")
cmdC.Flags().StringVarP(&o.Spider_Service_name, "spider_service_name", "", "spider grpc client", "Service Name")
cmdC.Flags().StringVarP(&o.Spider_Sample_rate, "spider_sample_rate", "", "1", "sample rate")
cmds.AddCommand(cmdC)
// view
cmds.AddCommand(&cobra.Command{
Use: "view",
Short: "Get contexts",
Run: func(c *cobra.Command, args []string) {
app.ValidateError(c, func() error {
o.writeYaml(app.Config)
return nil
}())
},
})
// get context
cmds.AddCommand(&cobra.Command{
Use: "get-context (NAME | --name NAME) [options]",
Short: "Get a context",
Args: app.BindCommandArgs(&o.Name),
Run: func(c *cobra.Command, args []string) {
app.ValidateError(c, func() error {
if o.Name == "" {
for k := range app.Config.Contexts {
o.Println(k)
}
} else {
if app.Config.Contexts[o.Name] != nil {
o.writeYaml(app.Config.Contexts[o.Name])
}
}
return nil
}())
},
})
// set context
cmdS := &cobra.Command{
Use: "set-context (NAME | --name NAME) [options]",
Short: "Set a context",
Args: app.BindCommandArgs(&o.Name),
DisableFlagsInUseLine: true,
Run: func(c *cobra.Command, args []string) {
app.ValidateError(c, func() error {
if o.Name == "" {
c.Help()
} else if app.Config.Contexts[o.Name] != nil {
app.Config.Contexts[o.Name].Name = o.Name
if o.Ladybug_Server_addr != "" {
app.Config.Contexts[o.Name].Ladybugcli.ServerAddr = o.Ladybug_Server_addr
}
if o.Ladybug_Timeout != "" {
app.Config.Contexts[o.Name].Ladybugcli.Timeout = o.Ladybug_Timeout
}
if o.Ladybug_Endpoint != "" {
app.Config.Contexts[o.Name].Ladybugcli.Interceptors.Opentracing.Jaeger.Endpoint = o.Ladybug_Endpoint
}
if o.Ladybug_Service_name != "" {
app.Config.Contexts[o.Name].Ladybugcli.Interceptors.Opentracing.Jaeger.ServiceName = o.Ladybug_Service_name
}
if o.Ladybug_Sample_rate != "" {
app.Config.Contexts[o.Name].Ladybugcli.Interceptors.Opentracing.Jaeger.SampleRate = o.Ladybug_Sample_rate
}
if o.Spider_Server_addr != "" {
app.Config.Contexts[o.Name].Spidercli.ServerAddr = o.Spider_Server_addr
}
if o.Spider_Timeout != "" {
app.Config.Contexts[o.Name].Spidercli.Timeout = o.Spider_Timeout
}
if o.Spider_Endpoint != "" {
app.Config.Contexts[o.Name].Spidercli.Interceptors.Opentracing.Jaeger.Endpoint = o.Spider_Endpoint
}
if o.Spider_Service_name != "" {
app.Config.Contexts[o.Name].Spidercli.Interceptors.Opentracing.Jaeger.ServiceName = o.Spider_Service_name
}
if o.Spider_Sample_rate != "" {
app.Config.Contexts[o.Name].Spidercli.Interceptors.Opentracing.Jaeger.SampleRate = o.Spider_Sample_rate
}
o.writeYaml(app.Config.Contexts[o.Name])
} else {
o.Println("Not found a context (name=%s)", o.Name)
}
return nil
}())
},
}
cmdS.Flags().StringVarP(&o.Ladybug_Server_addr, "ladybug_server_addr", "", "127.0.0.1:50254", "Server Addr URL")
cmdS.Flags().StringVarP(&o.Ladybug_Timeout, "ladybug_timeout", "", "1000s", "Timeout")
cmdS.Flags().StringVarP(&o.Ladybug_Endpoint, "ladybug_endpoint", "", "localhost:6834", "endpoint URL")
cmdS.Flags().StringVarP(&o.Ladybug_Service_name, "ladybug_service_name", "", "ladybug grpc client", "Service Name")
cmdS.Flags().StringVarP(&o.Ladybug_Sample_rate, "ladybug_sample_rate", "", "1", "sample rate")
cmdS.Flags().StringVarP(&o.Spider_Server_addr, "spider_server_addr", "", "127.0.0.1:2048", "Server Addr URL")
cmdS.Flags().StringVarP(&o.Spider_Timeout, "spider_timeout", "", "1000s", "Timeout")
cmdS.Flags().StringVarP(&o.Spider_Endpoint, "spider_endpoint", "", "localhost:6832", "endpoint URL")
cmdS.Flags().StringVarP(&o.Spider_Service_name, "spider_service_name", "", "spider grpc client", "Service Name")
cmdS.Flags().StringVarP(&o.Spider_Sample_rate, "spider_sample_rate", "", "1", "sample rate")
cmds.AddCommand(cmdS)
// current-context (get/set)
cmds.AddCommand(&cobra.Command{
Use: "current-context (NAME | --name NAME) [options]",
Short: "Get/Set a current context",
DisableFlagsInUseLine: true,
Args: app.BindCommandArgs(&o.Name),
Run: func(c *cobra.Command, args []string) {
app.ValidateError(c, func() error {
if len(o.Name) > 0 {
_, ok := app.Config.Contexts[o.Name]
if ok {
app.Config.CurrentContext = o.Name
app.Config.WriteConfig()
} else {
o.Println("context '%s' is not exist\n", o.Name)
}
}
o.writeYaml(app.Config.GetCurrentContext().Name)
return nil
}())
},
})
// delete-context
cmds.AddCommand(&cobra.Command{
Use: "delete-context (NAME | --name NAME) [options]",
Short: "Delete a context",
Args: app.BindCommandArgs(&o.Name),
Run: func(c *cobra.Command, args []string) {
app.ValidateError(c, func() error {
if o.Name == "" {
return fmt.Errorf("Name Required.")
}
conf := app.Config
if len(conf.Contexts) > 1 {
delete(conf.Contexts, o.Name)
if o.Name == conf.CurrentContext {
conf.CurrentContext = func() string {
if len(conf.Contexts) > 0 {
for k := range conf.Contexts {
return k
}
}
return ""
}()
}
conf.WriteConfig()
}
o.writeYaml(conf)
return nil
}())
},
})
// set-namespace
cmds.AddCommand(&cobra.Command{
Use: "set-namespace (NAME | --name NAME) [options]",
Short: "Set a namespace to context",
Args: app.BindCommandArgs(&o.Name),
DisableFlagsInUseLine: true,
Run: func(c *cobra.Command, args []string) {
app.ValidateError(c, func() error {
if len(app.Config.GetCurrentContext().Name) == 0 {
c.Help()
} else {
app.Config.GetCurrentContext().Namespace = args[0]
app.Config.WriteConfig()
o.writeYaml(app.Config.GetCurrentContext())
}
return nil
}())
},
})
return cmds
}
|
[
4,
5
] |
package main
func GraphiteSender(AppState *app_state, AppConfig *app_config, TrafficData *traffic_data) {
DebugLogger.Println("Sending to graphite")
}
|
[
2
] |
package s2
import (
"github.com/golang/geo/s2"
)
// The Earth's mean radius in kilometers (according to NASA).
const earthRadiusKm float32 = 6371.01
// TODO: get the best region coverer for a given distance
var km20Coverer = &s2.RegionCoverer{
MinLevel: 8,
MaxCells: 32,
}
func goodCoverer(km float32) *s2.RegionCoverer {
return km20Coverer
}
func distance(c1 s2.CellID, c2 s2.CellID) float32 {
return earthRadiusKm * float32(c1.Point().Distance(c2.Point()))
}
|
[
6
] |
package main
import "fmt"
import "bufio"
import "os"
import "strings"
func merge(arr1 []string, arr2 []string) []string{
var count1,count2 int
count1=0
count2=0
var arr []string
for (count1<len(arr1) && count2<len(arr2)){
a := arr1[count1]
b := arr2[count2]
if(a < b){
arr=append(arr,arr1[count1])
count1++
} else{
arr=append(arr,arr2[count2])
count2++
}
}
for count1<len(arr1){
arr=append(arr,arr1[count1])
count1++
}
for count2<len(arr2){
arr=append(arr,arr2[count2])
count2++
}
return arr
}
func main(){
scanner := bufio.NewScanner(os.Stdin)
fmt.Println("Enter first sorted array")
scanner.Scan()
str :=scanner.Text()
arr1:= strings.Split(str," ")
fmt.Println("Enter second sorted array")
scanner.Scan()
str=scanner.Text()
arr2:= strings.Split(str," ")
fmt.Println("Merging...")
arr := merge(arr1,arr2)
fmt.Println("Merged array is : ",arr)
}
|
[
6
] |
/*
An ALBUM_ART resource provider from Last.fm
*/
package resources
import (
"github.com/shkh/lastfm-go/lastfm"
"io"
"net/http"
)
const apiKey = "43ffca14ea943af9f30bd147cd03e891"
type LastFMAlbumArtProvider struct {
api *lastfm.Api
}
func (this *LastFMAlbumArtProvider) String() string {
return "Last.fm"
}
func newLastFMAlbumArtProvider() *LastFMAlbumArtProvider {
return &LastFMAlbumArtProvider{
api: lastfm.New(apiKey, ""),
}
}
func (this *LastFMAlbumArtProvider) Type() ResourceType {
return ALBUM_ART
}
func (this *LastFMAlbumArtProvider) GetResource(track *Track) (io.ReadCloser, error) {
switch {
case track.Artist != "" && track.Album != "":
return this.getAlbumImageUrl(track.Artist, track.Album)
default:
return this.getTrackImageUrl(track.Artist, track.Title)
}
}
func (this *LastFMAlbumArtProvider) getAlbumImageUrl(artist string, album string) (stream io.ReadCloser, err error) {
args := map[string]interface{}{
"album": album,
"artist": artist,
}
albumInfo, err := this.api.Album.GetInfo(args)
if err != nil {
return nil, err
}
albumImages := albumInfo.Images
imageUrl := albumImages[2].Url
response, err := http.Get(imageUrl)
if err != nil {
logger.Warn("Last.fm: %s", err)
return nil, err
}
return response.Body, err
}
func (this *LastFMAlbumArtProvider) getTrackImageUrl(artist string, title string) (stream io.ReadCloser, err error) {
args := map[string]interface{}{
"track": title,
"artist": artist,
}
trackInfo, err := this.api.Track.GetInfo(args)
if err != nil {
return nil, err
}
albumImages := trackInfo.Album.Images
imageUrl := albumImages[2].Url
response, err := http.Get(imageUrl)
if err != nil {
logger.Warn("Last.fm: %s", err)
return nil, err
}
return response.Body, err
}
|
[
6
] |
package main
import (
"github.com/ajstarks/deck/generate"
"math"
"math/rand"
"os"
"strings"
"time"
)
type pf struct {
function func(float64) float64
xmin, xmax float64
ymin, ymax float64
xint float64
}
type plot struct {
name string
color string
data pf
}
const (
largest = math.MaxFloat64
smallest = -math.MaxFloat64
)
// grid draws a horizontal and veritical grid
func grid(deck *generate.Deck, left, right, top, bottom, xinterval, yinterval, size float64, color string) {
for yp := top; yp >= bottom; yp -= xinterval {
deck.Line(left, yp, right, yp, size, color, 30)
}
for xp := left; xp <= right; xp += yinterval {
deck.Line(xp, top, xp, bottom, size, color, 30)
}
}
// labels makes a multi-line label
func label(deck *generate.Deck, x, y float64, s, font string, size, ls float64, color string) {
lines := strings.Split(s, "\n")
for _, t := range lines {
deck.Text(x, y, t, font, size, color)
y -= ls
}
}
// extrema returns the min and max from a slice of data
func extrema(data []float64) (float64, float64) {
min := largest
max := smallest
for _, d := range data {
if d > max {
max = d
}
if d < min {
min = d
}
}
return min, max
}
// vmap maps one range into another
func vmap(value float64, low1 float64, high1 float64, low2 float64, high2 float64) float64 {
return low2 + (high2-low2)*(value-low1)/(high1-low1)
}
// plotdata plots coordinates stored in x, y,
// mapped to a region defined by left, right, top, bottom
func plotdata(deck *generate.Deck, left, right, top, bottom float64, x []float64, y []float64, size float64, color string) {
if len(x) != len(y) {
return
}
minx, maxx := extrema(x)
miny, maxy := extrema(y)
ix := left
iy := bottom
for i := 0; i < len(x); i++ {
xp := vmap(x[i], minx, maxx, left, right)
yp := vmap(y[i], miny, maxy, bottom, top)
deck.Circle(xp, yp, size, color)
deck.Line(ix, iy, xp, yp, 0.2, color)
ix = xp
iy = yp
}
}
// plotfunc plots data generated from a function defined as y=f(x),
// mapped to a region defined by left, right, top, bottom
func plotfunc(deck *generate.Deck, left, right, top, bottom float64, data pf, size float64, color string) {
ix := left
iy := bottom
for xd := data.xmin; xd <= data.xmax; xd += data.xint {
xp := vmap(xd, data.xmin, data.xmax, left, right)
yp := vmap(data.function(xd), data.ymin, data.ymax, bottom, top)
deck.Line(ix, iy, xp, yp, 0.2, color)
ix = xp
iy = yp
}
}
func randy(x float64) float64 {
return rand.Float64() * math.Cos(x)
}
func flatline(x float64) float64 {
return 0
}
func blinkalert(deck *generate.Deck, message, color string) {
var op float64
for i := 1; i <= 3; i++ {
if i == 2 {
op = 20
} else {
op = 100
}
deck.StartSlide("black", "white")
deck.Rect(50, 60, 60, 25, "orangered")
deck.TextMid(50, 63, message, "sans", 5, color, op)
deck.EndSlide()
}
}
func alert(deck *generate.Deck, message, color string) {
deck.StartSlide("black", "white")
deck.Rect(50, 60, 60, 25, "orangered")
deck.TextMid(50, 63, message, "sans", 5, color)
deck.EndSlide()
}
func showfunctions(deck *generate.Deck, top, left, fw, fh float64, fdata []plot) {
gutter := 2.0
ts := fh / 5
w2 := fw / 2
h2 := fh / 2
x := left
y := top
deck.StartSlide("black", "white")
for _, p := range fdata {
deck.Rect(x, y, fw, fh, p.color)
label(deck, (x-w2)+ts, y+(ts/2.5), p.name, "sans", ts, ts*1.8, "white")
grid(deck, x+w2, 80, y+h2, y-h2, 2, 5, 0.1, p.color)
plotfunc(deck, x+w2, 80, y+h2, y-h2, p.data, 0.2, p.color)
y -= fh + gutter
}
deck.EndSlide()
}
func main() {
random := pf{function: randy, xmin: 0, xmax: 50, ymin: -1.5, ymax: 1.5, xint: 0.25}
fdata := []plot{
{"CARDIO\nVASCULAR", "rgb(39,147,172)", random},
{"METABOLIC\nLEVELS", "rgb(31,119,91)", random},
{"CENTRAL\nNERV. SYSTEM", "rgb(54,164,142)", random},
{"PULMONARY\nFUNCTION", "rgb(105,133,174)", random},
{"SYSTEMS\nINTEGRATION", "rgb(82,165,205)", random},
{"LOCOMOTOR\nSYSTEM", "rgb(92,88,28)", random},
}
top, left := 80.0, 25.0
fw, fh := 15.0, 6.0
killorder := []int{1, 4, 0, 3, 5, 2}
rand.Seed(int64(time.Now().Unix()))
deck := generate.NewSlides(os.Stdout, 0, 0)
deck.StartDeck()
showfunctions(deck, top, left, fw, fh, fdata)
alert(deck, "COMPUTER\nMALFUNCTION", "white")
for i := 0; i < len(fdata); i++ {
fdata[killorder[i]].color = "orangered"
fdata[killorder[i]].data.function = flatline
showfunctions(deck, top, left, fw, fh, fdata)
if i == 2 {
alert(deck, "COMPUTER\nMALFUNCTION", "white")
}
if i == 3 {
blinkalert(deck, "LIFE FUNCTIONS\nCRITICAL", "white")
}
}
blinkalert(deck, "LIFE FUNCTIONS\nTERMINATED", "white")
deck.EndDeck()
}
|
[
6
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.