code
stringlengths 67
15.9k
| labels
listlengths 1
4
|
---|---|
package utils
import (
"fmt"
"kurapika/config"
"log"
"strconv"
"time"
)
type date struct {
Day int
Week int
Month int
Year string
ActionDate string
}
func ExecQuery(q string, p []interface{}) error {
db := config.MysqlDB()
stmt, err := db.Prepare(q)
if err != nil {
panic(err)
}
defer stmt.Close()
_, err = stmt.Exec(p...)
if err != nil {
panic(err)
}
defer db.Close()
return nil
}
func FetchID(q, ID string) (resID int, err error) {
db := config.MysqlDB()
if err := db.QueryRow(q, ID).Scan(&resID); err != nil {
log.Fatal(err)
}
defer db.Close()
return resID, nil
}
func DateDim(dc date) (dateID int, err error) {
db := config.MysqlDB()
stmt, err := db.Prepare("CALL dateDimension(?, ?, ?, ?, ?)")
if err != nil {
panic(err)
}
defer stmt.Close()
_, err = stmt.Exec(dc.Day, dc.Week, dc.Month, dc.Year, dc.ActionDate)
if err != nil {
panic(err)
}
row, err := db.Query("SELECT @dateId")
if err != nil {
panic(err)
}
defer row.Close()
for row.Next() {
row.Scan(&dateID)
}
defer db.Close()
return dateID, nil
}
func Extract(createdAt time.Time) (dc date, err error) {
_, actionWeek := createdAt.ISOWeek()
actionYear := createdAt.Year()
actionMonth := int(createdAt.Month())
_, _, actionDate := createdAt.Date()
return date{
createdAt.YearDay(),
actionWeek,
actionMonth,
strconv.Itoa(actionYear),
fmt.Sprintf("%d-%d-%d", actionYear, actionMonth, actionDate),
}, nil
}
|
[
2
] |
package main
import (
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strings"
"github.com/lwithers/htpack/cmd/htpacker/packer"
"github.com/spf13/cobra"
yaml "gopkg.in/yaml.v2"
)
var yamlCmd = &cobra.Command{
Use: "yaml",
Short: "Build YAML spec from list of files/dirs",
Long: `Generates a YAML specification from a list of files and directories.
The specification is suitable for passing to pack.
File names will be mapped as follows:
• if you specify a file, it will appear be served as "/filename";
• if you specify a directory, its contents will be merged into "/", such that a
directory with contents "a", "b", and "c/d" will cause entries "/a", "/b" and
"/c/d" to be served.
`,
RunE: func(c *cobra.Command, args []string) error {
if len(args) == 0 {
return errors.New("must specify one or more files/directories")
}
// convert "out" to absolute path, in case we need to chdir
out, err := c.Flags().GetString("out")
if err != nil {
return err
}
out, err = filepath.Abs(out)
if err != nil {
return err
}
// chdir if required
chdir, err := c.Flags().GetString("chdir")
if err != nil {
return err
}
if chdir != "" {
if err = os.Chdir(chdir); err != nil {
return err
}
}
if err := MakeYaml(args, out); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
return nil
},
}
func init() {
yamlCmd.Flags().StringP("out", "O", "",
"Output filename")
yamlCmd.MarkFlagRequired("out")
yamlCmd.Flags().StringP("chdir", "C", "",
"Change to directory before searching for input files")
}
func MakeYaml(args []string, out string) error {
ftp, err := filesFromList(args)
if err != nil {
return err
}
raw, err := yaml.Marshal(ftp)
if err != nil {
return fmt.Errorf("failed to marshal %T to YAML: %v", ftp, err)
}
return ioutil.WriteFile(out, raw, 0666)
}
func filesFromList(args []string) (packer.FilesToPack, error) {
ftp := make(packer.FilesToPack)
// NB: we don't use filepath.Walk since:
// (a) we don't care about lexical order; just do it quick
// (b) we want to dereference symlinks
for _, arg := range args {
if err := filesFromListR(arg, arg, ftp); err != nil {
return nil, err
}
}
return ftp, nil
}
func filesFromListR(prefix, arg string, ftp packer.FilesToPack) error {
f, err := os.Open(arg)
if err != nil {
return err
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return err
}
switch {
case fi.Mode().IsDir():
// readdir
fnames, err := f.Readdirnames(0) // 0 ⇒ everything
if err != nil {
return err
}
for _, fname := range fnames {
fullname := filepath.Join(arg, fname)
if err = filesFromListR(prefix, fullname, ftp); err != nil {
return err
}
}
return nil
case fi.Mode().IsRegular():
// sniff content type
buf := make([]byte, 512)
n, err := f.Read(buf)
if err != nil {
return err
}
buf = buf[:n]
ctype := http.DetectContentType(buf)
// augmented rules for JS / CSS / etc.
switch {
case strings.HasPrefix(ctype, "text/plain"):
switch filepath.Ext(arg) {
case ".css":
ctype = "text/css"
case ".js":
ctype = "application/javascript"
case ".json":
ctype = "application/json"
case ".svg":
ctype = "image/svg+xml"
}
case strings.HasPrefix(ctype, "text/xml"):
switch filepath.Ext(arg) {
case ".svg":
ctype = "image/svg+xml"
}
}
// pack
srvName := strings.TrimPrefix(arg, prefix)
if srvName == "" {
srvName = filepath.Base(arg)
}
if !strings.HasPrefix(srvName, "/") {
srvName = "/" + srvName
}
ftp[srvName] = packer.FileToPack{
Filename: arg,
ContentType: ctype,
}
return nil
default:
return fmt.Errorf("%s: not file/dir (mode %x)", arg, fi.Mode())
}
}
|
[
7
] |
package Week_08
import "fmt"
func selectSort(nums []int) {
l := len(nums)
for i := 0; i < l-1; i++ {
var min = i
for j := i + 1; j < l; j++ {
if nums[j] < nums[min] {
min = j
}
}
nums[i], nums[min] = nums[min], nums[i]
}
}
func main() {
var nums = []int{9, 8, 7, 6, 5, 4, 3, 2, 1}
selectSort(nums)
fmt.Println(nums)
}
|
[
1
] |
package parser
import (
"github.com/tliron/kutil/ard"
"github.com/tliron/kutil/util"
"gopkg.in/yaml.v3"
)
//
// OrchestrationProvisioningPolicy
//
type OrchestrationProvisioningPolicy struct {
Sites []string
Profile bool
Substitutable bool
Instantiable bool
Virtualizable bool
SubstitutionInputs map[string]interface{}
}
func ParseOrchestrationProvisioningPolicy(value ard.Value) (*OrchestrationProvisioningPolicy, bool) {
properties := ard.NewNode(value)
self := OrchestrationProvisioningPolicy{
SubstitutionInputs: make(map[string]interface{}),
}
var ok bool
if self.Substitutable, ok = properties.Get("substitutable").Boolean(true); !ok {
return nil, false
}
if self.Instantiable, ok = properties.Get("instantiable").Boolean(true); !ok {
return nil, false
}
if self.Virtualizable, ok = properties.Get("virtualizable").Boolean(true); !ok {
return nil, false
}
if sites := properties.Get("sites"); sites != ard.NoNode {
if sites_, ok := sites.List(true); ok {
for _, site := range sites_ {
if site_, ok := site.(string); ok {
self.Sites = append(self.Sites, site_)
} else {
return nil, false
}
}
} else {
return nil, false
}
}
if substitutionInputs := properties.Get("substitutionInputs"); substitutionInputs != ard.NoNode {
if substitutionInputs_, ok := substitutionInputs.Map(true); ok {
for name, input := range substitutionInputs_ {
if name_, ok := name.(string); ok {
self.SubstitutionInputs[name_] = input
} else {
return nil, false
}
}
} else {
return nil, false
}
}
return &self, true
}
//
// OrchestrationPolicies
//
type OrchestrationPolicies map[string][]interface{}
func DecodeOrchestrationPolicies(code string) (OrchestrationPolicies, bool) {
var policies ard.StringMap
if err := yaml.Unmarshal(util.StringToBytes(code), &policies); err == nil {
self := make(OrchestrationPolicies)
for nodeTemplateName, nodePolicies := range policies {
if nodePolicies_, ok := nodePolicies.(ard.List); ok {
var policies []interface{}
for _, policy := range nodePolicies_ {
policy_ := ard.NewNode(policy)
if type_, ok := policy_.Get("type").String(false); ok {
if properties, ok := policy_.Get("properties").Map(true); ok {
switch type_ {
case "provisioning":
if policy__, ok := ParseOrchestrationProvisioningPolicy(properties); ok {
policies = append(policies, policy__)
} else {
return nil, false
}
}
} else {
return nil, false
}
} else {
return nil, false
}
}
if len(policies) > 0 {
self[nodeTemplateName] = policies
}
} else {
return nil, false
}
}
return self, true
} else {
return nil, false
}
}
|
[
7
] |
package admin
import (
"github.com/astaxie/beego"
"go_blog/utils"
)
type AdminController struct {
beego.Controller
}
type AdminDirector struct {
controller *AdminController
modelBuilder AdminModel
current string
}
func (self *AdminDirector) getModel() {
self.modelBuilder.GetUserOrRedirectLogin(self.controller)
self.modelBuilder.RenderLayout(self.controller, self.current)
self.modelBuilder.RenderData(self.controller)
}
func GetBlogDirector(c *AdminController, builder AdminModel) AdminDirector {
return AdminDirector{c, builder, "blog"}
}
func GetCategoryDirector(c *AdminController, builder AdminModel) AdminDirector {
return AdminDirector{c, builder, "category"}
}
type AdminModel interface {
GetUserOrRedirectLogin(c *AdminController)
RenderLayout(c *AdminController, current string)
RenderData(c *AdminController)
}
type Admin struct {
}
func (self *Admin)GetUserOrRedirectLogin(c *AdminController) {
if _, err := GetUser(c); err != nil {
c.Redirect("/login", 302)
}
}
func (self *Admin) RenderLayout(c *AdminController, current string) {
c.Data["current"] = current
tables := []map[string]string{}
tableNames := utils.GetAllTableNames()
for _, table := range tableNames {
if table == current {
tables = append(tables, map[string]string{"name": current, "active": "true"})
} else {
tables = append(tables, map[string]string{"name": table, "active": "false"})
}
}
c.Data["tables"] = tables
c.Layout = "admin.html"
}
func DeleteRecordAndReturnJson(c *AdminController, DeleteFunction func(int)(error), errMsg string) {
var recordId int
var ret = 1
var message = ""
_, err := GetUserBySession(c)
if err != nil {
message = utils.USER_NOT_LOGIN
} else {
if err := c.Ctx.Input.Bind(&recordId, "id"); err != nil {
message = utils.ID_NO_FOUND
} else {
if err = DeleteFunction(recordId); err != nil {
message = errMsg
} else {
ret = 0
message = "删除成功"
}
}
}
c.Data["json"] = map[string]interface{}{"ret":ret,"message":message}
c.ServeJSON()
}
|
[
2
] |
package usecases
import (
"github.com/pajarraco93/graphql-test/pkg/library/domain"
"github.com/pajarraco93/graphql-test/pkg/library/domain/entities"
)
type UseCases struct {
repo domain.Repository
}
type UseCasesInterface interface {
CreateGroup(entities.Group) error
CreateAlbum(entities.Album) error
CreateSong(entities.Song) error
AllGroups() ([]entities.Group, error)
GetGroupsByIDs([]int) ([]entities.Group, error)
GetAlbumsByIDs([]int) ([]entities.Album, error)
}
func NewUseCases(repo domain.Repository) UseCasesInterface {
return &UseCases{
repo: repo,
}
}
func (u *UseCases) CreateGroup(group entities.Group) error {
return u.repo.CreateGroup(group)
}
func (u *UseCases) CreateAlbum(album entities.Album) error {
return u.repo.CreateAlbum(album)
}
func (u *UseCases) CreateSong(song entities.Song) error {
return u.repo.CreateSong(song)
}
func (u *UseCases) AllGroups() ([]entities.Group, error) {
return u.repo.AllGroups()
}
func (u *UseCases) GetGroupsByIDs(IDs []int) ([]entities.Group, error) {
return u.repo.GetGroupsByIDs(IDs)
}
func (u *UseCases) GetAlbumsByIDs(IDs []int) ([]entities.Album, error) {
return u.repo.GetAlbumsByIDs(IDs)
}
|
[
2
] |
package main
import "fmt"
func main() {
fmt.Println(smallerNumbersThanCurrent([]int{8, 1, 2, 2, 3}))
fmt.Println(smallerNumbersThanCurrent([]int{6, 5, 4, 8}))
fmt.Println(smallerNumbersThanCurrent([]int{7, 7, 7, 7}))
}
func smallerNumbersThanCurrent(nums []int) []int {
var freqs [101]int
var acc [101]int
var max int
for _, n := range nums {
freqs[n]++
if n > max {
max = n
}
}
var count int
for n, f := range freqs[:max+1] {
acc[n] = count
count += f
}
sol := make([]int, len(nums))
for ix, n := range nums {
sol[ix] = acc[n]
}
return sol
}
|
[
1
] |
package argocd
import (
"context"
"fmt"
"os"
"reflect"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
argoprojv1a1 "github.com/argoproj-labs/argocd-operator/api/v1alpha1"
"github.com/argoproj-labs/argocd-operator/common"
"github.com/argoproj-labs/argocd-operator/controllers/argoutil"
)
// newClusterRoleBinding returns a new ClusterRoleBinding instance.
func newClusterRoleBinding(cr *argoprojv1a1.ArgoCD) *v1.ClusterRoleBinding {
return &v1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: cr.Name,
Labels: argoutil.LabelsForCluster(cr),
Annotations: argoutil.AnnotationsForCluster(cr),
},
}
}
// newClusterRoleBindingWithname creates a new ClusterRoleBinding with the given name for the given ArgCD.
func newClusterRoleBindingWithname(name string, cr *argoprojv1a1.ArgoCD) *v1.ClusterRoleBinding {
roleBinding := newClusterRoleBinding(cr)
roleBinding.Name = GenerateUniqueResourceName(name, cr)
labels := roleBinding.ObjectMeta.Labels
labels[common.ArgoCDKeyName] = name
roleBinding.ObjectMeta.Labels = labels
return roleBinding
}
// newRoleBinding returns a new RoleBinding instance.
func newRoleBinding(cr *argoprojv1a1.ArgoCD) *v1.RoleBinding {
return &v1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: cr.Name,
Labels: argoutil.LabelsForCluster(cr),
Annotations: argoutil.AnnotationsForCluster(cr),
Namespace: cr.Namespace,
},
}
}
// newRoleBindingForSupportNamespaces returns a new RoleBinding instance.
func newRoleBindingForSupportNamespaces(cr *argoprojv1a1.ArgoCD, namespace string) *v1.RoleBinding {
return &v1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: getRoleBindingNameForSourceNamespaces(cr.Name, cr.Namespace, namespace),
Labels: argoutil.LabelsForCluster(cr),
Annotations: argoutil.AnnotationsForCluster(cr),
Namespace: namespace,
},
}
}
func getRoleBindingNameForSourceNamespaces(argocdName, argocdNamespace, targetNamespace string) string {
return fmt.Sprintf("%s_%s", argocdName, targetNamespace)
}
// newRoleBindingWithname creates a new RoleBinding with the given name for the given ArgCD.
func newRoleBindingWithname(name string, cr *argoprojv1a1.ArgoCD) *v1.RoleBinding {
roleBinding := newRoleBinding(cr)
roleBinding.ObjectMeta.Name = fmt.Sprintf("%s-%s", cr.Name, name)
labels := roleBinding.ObjectMeta.Labels
labels[common.ArgoCDKeyName] = name
roleBinding.ObjectMeta.Labels = labels
return roleBinding
}
// reconcileRoleBindings will ensure that all ArgoCD RoleBindings are configured.
func (r *ReconcileArgoCD) reconcileRoleBindings(cr *argoprojv1a1.ArgoCD) error {
params := getPolicyRuleList(r.Client)
for _, param := range params {
if err := r.reconcileRoleBinding(param.name, param.policyRule, cr); err != nil {
return fmt.Errorf("error reconciling roleBinding for %q: %w", param.name, err)
}
}
return nil
}
// reconcileRoleBinding, creates RoleBindings for every role and associates it with the right ServiceAccount.
// This would create RoleBindings for all the namespaces managed by the ArgoCD instance.
func (r *ReconcileArgoCD) reconcileRoleBinding(name string, rules []v1.PolicyRule, cr *argoprojv1a1.ArgoCD) error {
var sa *corev1.ServiceAccount
var error error
if sa, error = r.reconcileServiceAccount(name, cr); error != nil {
return error
}
if _, error = r.reconcileRole(name, rules, cr); error != nil {
return error
}
for _, namespace := range r.ManagedNamespaces.Items {
// If encountering a terminating namespace remove managed-by label from it and skip reconciliation - This should trigger
// clean-up of roles/rolebindings and removal of namespace from cluster secret
if namespace.DeletionTimestamp != nil {
if _, ok := namespace.Labels[common.ArgoCDManagedByLabel]; ok {
delete(namespace.Labels, common.ArgoCDManagedByLabel)
_ = r.Client.Update(context.TODO(), &namespace)
}
continue
}
list := &argoprojv1a1.ArgoCDList{}
listOption := &client.ListOptions{Namespace: namespace.Name}
err := r.Client.List(context.TODO(), list, listOption)
if err != nil {
return err
}
// only skip creation of dex and redisHa rolebindings for namespaces that no argocd instance is deployed in
if len(list.Items) < 1 {
// namespace doesn't contain argocd instance, so skipe all the ArgoCD internal roles
if cr.ObjectMeta.Namespace != namespace.Name && (name != common.ArgoCDApplicationControllerComponent && name != common.ArgoCDServerComponent) {
continue
}
}
// get expected name
roleBinding := newRoleBindingWithname(name, cr)
roleBinding.Namespace = namespace.Name
// fetch existing rolebinding by name
existingRoleBinding := &v1.RoleBinding{}
err = r.Client.Get(context.TODO(), types.NamespacedName{Name: roleBinding.Name, Namespace: roleBinding.Namespace}, existingRoleBinding)
roleBindingExists := true
if err != nil {
if !errors.IsNotFound(err) {
return fmt.Errorf("failed to get the rolebinding associated with %s : %s", name, err)
}
if name == common.ArgoCDDexServerComponent && !UseDex(cr) {
continue // Dex installation is not requested, do nothing
}
roleBindingExists = false
}
roleBinding.Subjects = []v1.Subject{
{
Kind: v1.ServiceAccountKind,
Name: sa.Name,
Namespace: sa.Namespace,
},
}
customRoleName := getCustomRoleName(name)
if customRoleName != "" {
roleBinding.RoleRef = v1.RoleRef{
APIGroup: v1.GroupName,
Kind: "ClusterRole",
Name: customRoleName,
}
} else {
roleBinding.RoleRef = v1.RoleRef{
APIGroup: v1.GroupName,
Kind: "Role",
Name: generateResourceName(name, cr),
}
}
if roleBindingExists {
if name == common.ArgoCDDexServerComponent && !UseDex(cr) {
// Delete any existing RoleBinding created for Dex since dex uninstallation is requested
log.Info("deleting the existing Dex roleBinding because dex uninstallation is requested")
if err = r.Client.Delete(context.TODO(), existingRoleBinding); err != nil {
return err
}
continue
}
// if the RoleRef changes, delete the existing role binding and create a new one
if !reflect.DeepEqual(roleBinding.RoleRef, existingRoleBinding.RoleRef) {
if err = r.Client.Delete(context.TODO(), existingRoleBinding); err != nil {
return err
}
} else {
// if the Subjects differ, update the role bindings
if !reflect.DeepEqual(roleBinding.Subjects, existingRoleBinding.Subjects) {
existingRoleBinding.Subjects = roleBinding.Subjects
if err = r.Client.Update(context.TODO(), existingRoleBinding); err != nil {
return err
}
}
continue
}
}
// Only set ownerReferences for role bindings in same namespaces as Argo CD CR
if cr.Namespace == roleBinding.Namespace {
if err = controllerutil.SetControllerReference(cr, roleBinding, r.Scheme); err != nil {
return fmt.Errorf("failed to set ArgoCD CR \"%s\" as owner for roleBinding \"%s\": %s", cr.Name, roleBinding.Name, err)
}
}
log.Info(fmt.Sprintf("creating rolebinding %s for Argo CD instance %s in namespace %s", roleBinding.Name, cr.Name, cr.Namespace))
if err = r.Client.Create(context.TODO(), roleBinding); err != nil {
return err
}
}
// reconcile rolebindings only for ArgoCDServerComponent
if name == common.ArgoCDServerComponent {
// reconcile rolebindings for all source namespaces for argocd-server
for _, sourceNamespace := range cr.Spec.SourceNamespaces {
namespace := &corev1.Namespace{}
if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: sourceNamespace}, namespace); err != nil {
return err
}
// do not reconcile rolebindings for namespaces already containing managed-by label
// as it already contains rolebindings with permissions to manipulate application resources
// reconciled during reconcilation of ManagedNamespaces
if value, ok := namespace.Labels[common.ArgoCDManagedByLabel]; ok {
log.Info(fmt.Sprintf("Skipping reconciling resources for namespace %s as it is already managed-by namespace %s.", namespace.Name, value))
continue
}
list := &argoprojv1a1.ArgoCDList{}
listOption := &client.ListOptions{Namespace: namespace.Name}
err := r.Client.List(context.TODO(), list, listOption)
if err != nil {
log.Info(err.Error())
return err
}
// get expected name
roleBinding := newRoleBindingWithNameForApplicationSourceNamespaces(name, namespace.Name, cr)
roleBinding.Namespace = namespace.Name
roleBinding.RoleRef = v1.RoleRef{
APIGroup: v1.GroupName,
Kind: "Role",
Name: getRoleNameForApplicationSourceNamespaces(namespace.Name, cr),
}
// fetch existing rolebinding by name
existingRoleBinding := &v1.RoleBinding{}
err = r.Client.Get(context.TODO(), types.NamespacedName{Name: roleBinding.Name, Namespace: roleBinding.Namespace}, existingRoleBinding)
roleBindingExists := true
if err != nil {
if !errors.IsNotFound(err) {
return fmt.Errorf("failed to get the rolebinding associated with %s : %s", name, err)
}
log.Info(fmt.Sprintf("Existing rolebinding %s", err.Error()))
roleBindingExists = false
}
roleBinding.Subjects = []v1.Subject{
{
Kind: v1.ServiceAccountKind,
Name: getServiceAccountName(cr.Name, common.ArgoCDServerComponent),
Namespace: sa.Namespace,
},
{
Kind: v1.ServiceAccountKind,
Name: getServiceAccountName(cr.Name, common.ArgoCDApplicationControllerComponent),
Namespace: sa.Namespace,
},
}
if roleBindingExists {
// reconcile role bindings for namespaces already containing managed-by-cluster-argocd label only
if n, ok := namespace.Labels[common.ArgoCDManagedByClusterArgoCDLabel]; !ok || n == cr.Namespace {
continue
}
// if the RoleRef changes, delete the existing role binding and create a new one
if !reflect.DeepEqual(roleBinding.RoleRef, existingRoleBinding.RoleRef) {
if err = r.Client.Delete(context.TODO(), existingRoleBinding); err != nil {
return err
}
} else {
// if the Subjects differ, update the role bindings
if !reflect.DeepEqual(roleBinding.Subjects, existingRoleBinding.Subjects) {
existingRoleBinding.Subjects = roleBinding.Subjects
if err = r.Client.Update(context.TODO(), existingRoleBinding); err != nil {
return err
}
}
continue
}
}
log.Info(fmt.Sprintf("creating rolebinding %s for Argo CD instance %s in namespace %s", roleBinding.Name, cr.Name, namespace))
if err = r.Client.Create(context.TODO(), roleBinding); err != nil {
return err
}
}
}
return nil
}
func getCustomRoleName(name string) string {
if name == common.ArgoCDApplicationControllerComponent {
return os.Getenv(common.ArgoCDControllerClusterRoleEnvName)
}
if name == common.ArgoCDServerComponent {
return os.Getenv(common.ArgoCDServerClusterRoleEnvName)
}
return ""
}
// Returns the name of the role for the source namespaces for ArgoCDServer in the format of "sourceNamespace_targetNamespace_argocd-server"
func getRoleNameForApplicationSourceNamespaces(targetNamespace string, cr *argoprojv1a1.ArgoCD) string {
return fmt.Sprintf("%s_%s", cr.Name, targetNamespace)
}
// newRoleBindingWithNameForApplicationSourceNamespaces creates a new RoleBinding with the given name for the source namespaces of ArgoCD Server.
func newRoleBindingWithNameForApplicationSourceNamespaces(name, namespace string, cr *argoprojv1a1.ArgoCD) *v1.RoleBinding {
roleBinding := newRoleBindingForSupportNamespaces(cr, namespace)
labels := roleBinding.ObjectMeta.Labels
labels[common.ArgoCDKeyName] = roleBinding.ObjectMeta.Name
roleBinding.ObjectMeta.Labels = labels
return roleBinding
}
func (r *ReconcileArgoCD) reconcileClusterRoleBinding(name string, role *v1.ClusterRole, cr *argoprojv1a1.ArgoCD) error {
// get expected name
roleBinding := newClusterRoleBindingWithname(name, cr)
// fetch existing rolebinding by name
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: roleBinding.Name}, roleBinding)
roleBindingExists := true
if err != nil {
if !errors.IsNotFound(err) {
return err
}
roleBindingExists = false
roleBinding = newClusterRoleBindingWithname(name, cr)
}
if roleBindingExists && role == nil {
return r.Client.Delete(context.TODO(), roleBinding)
}
if !roleBindingExists && role == nil {
// DO Nothing
return nil
}
roleBinding.Subjects = []v1.Subject{
{
Kind: v1.ServiceAccountKind,
Name: generateResourceName(name, cr),
Namespace: cr.Namespace,
},
}
roleBinding.RoleRef = v1.RoleRef{
APIGroup: v1.GroupName,
Kind: "ClusterRole",
Name: GenerateUniqueResourceName(name, cr),
}
if cr.Namespace == roleBinding.Namespace {
if err = controllerutil.SetControllerReference(cr, roleBinding, r.Scheme); err != nil {
return fmt.Errorf("failed to set ArgoCD CR \"%s\" as owner for roleBinding \"%s\": %s", cr.Name, roleBinding.Name, err)
}
}
if roleBindingExists {
return r.Client.Update(context.TODO(), roleBinding)
}
return r.Client.Create(context.TODO(), roleBinding)
}
func deleteClusterRoleBindings(c client.Client, clusterBindingList *v1.ClusterRoleBindingList) error {
for _, clusterBinding := range clusterBindingList.Items {
if err := c.Delete(context.TODO(), &clusterBinding); err != nil {
return fmt.Errorf("failed to delete ClusterRoleBinding %q during cleanup: %w", clusterBinding.Name, err)
}
}
return nil
}
|
[
1
] |
package main
import (
"context"
"errors"
"reflect"
kivik "github.com/go-kivik/kivik/v4"
)
// Method contains the relevant information for a driver method.
type Method struct {
// The method name
Name string
// Accepted values, except for context and options
Accepts []reflect.Type
// Return values, except for error
Returns []reflect.Type
AcceptsContext bool
AcceptsOptions bool
ReturnsError bool
DBMethod bool
}
var (
typeContext = reflect.TypeOf((*context.Context)(nil)).Elem()
typeDriverOptions = reflect.TypeOf(map[string]interface{}{})
typeClientOptions = reflect.TypeOf([]kivik.Options{})
typeError = reflect.TypeOf((*error)(nil)).Elem()
typeString = reflect.TypeOf("")
)
func parseMethods(input interface{}, isClient bool, skip map[string]struct{}) ([]*Method, error) {
var hasReceiver bool
t := reflect.TypeOf(input)
if t.Kind() != reflect.Struct {
return nil, errors.New("input must be struct")
}
if t.NumField() != 1 || t.Field(0).Name != "X" {
return nil, errors.New("wrapper struct must have a single field: X")
}
fType := t.Field(0).Type
if isClient {
if fType.Kind() != reflect.Ptr {
return nil, errors.New("field X must be of type pointer to struct")
}
if fType.Elem().Kind() != reflect.Struct {
return nil, errors.New("field X must be of type pointer to struct")
}
hasReceiver = true
} else {
if fType.Kind() != reflect.Interface {
return nil, errors.New("field X must be of type interface")
}
}
result := make([]*Method, 0, fType.NumMethod())
for i := 0; i < fType.NumMethod(); i++ {
m := fType.Method(i)
if _, ok := skip[m.Name]; ok {
continue
}
dm := &Method{
Name: m.Name,
}
result = append(result, dm)
accepts := make([]reflect.Type, m.Type.NumIn())
for j := 0; j < m.Type.NumIn(); j++ {
accepts[j] = m.Type.In(j)
}
if hasReceiver {
accepts = accepts[1:]
}
if len(accepts) > 0 && accepts[0].Kind() == reflect.Interface && accepts[0].Implements(typeContext) {
dm.AcceptsContext = true
accepts = accepts[1:]
}
if !isClient && len(accepts) > 0 && accepts[len(accepts)-1] == typeDriverOptions {
dm.AcceptsOptions = true
accepts = accepts[:len(accepts)-1]
}
if isClient && m.Type.IsVariadic() && len(accepts) > 0 && accepts[len(accepts)-1].String() == typeClientOptions.String() {
dm.AcceptsOptions = true
accepts = accepts[:len(accepts)-1]
}
if len(accepts) > 0 {
dm.Accepts = accepts
}
returns := make([]reflect.Type, m.Type.NumOut())
for j := 0; j < m.Type.NumOut(); j++ {
returns[j] = m.Type.Out(j)
}
if len(returns) > 0 && returns[len(returns)-1] == typeError {
dm.ReturnsError = true
returns = returns[:len(returns)-1]
}
if len(returns) > 0 {
dm.Returns = returns
}
}
return result, nil
}
|
[
4
] |
package main
import "encoding/hex"
import "io/ioutil"
import "debug/pe"
import "errors"
import "bytes"
func CreateFileMapping(file string) (bytes.Buffer) {
verbose("Mapping PE File...", "*")
// Open the file as a *pe.File
File, err := pe.Open(file)
ParseError(err,"While opening file for mapping")
progress()
// Open the file as a byte array
RawFile, err2 := ioutil.ReadFile(file)
ParseError(err2,"While reading file content")
progress()
// if File.Machine == 0x8664 {
// _opt := (File.OptionalHeader.(*pe.OptionalHeader64))
// target.opt.Magic = _opt.Magic
// target.opt.Subsystem = _opt.Subsystem
// target.opt.CheckSum = _opt.CheckSum
// target.opt.ImageBase = _opt.ImageBase
// target.opt.AddressOfEntryPoint = _opt.AddressOfEntryPoint
// target.opt.SizeOfImage = _opt.SizeOfImage
// target.opt.SizeOfHeaders = _opt.SizeOfHeaders
// for i:=0; i<16; i++ {
// target.opt.DataDirectory[i].VirtualAddress = _opt.DataDirectory[i].VirtualAddress
// target.opt.DataDirectory[i].Size = _opt.DataDirectory[i].Size
// }
// }else{
// _opt := File.OptionalHeader.((*pe.OptionalHeader32))
// target.opt.Magic = _opt.Magic
// target.opt.Subsystem = _opt.Subsystem
// target.opt.CheckSum = _opt.CheckSum
// target.opt.ImageBase = uint64(_opt.ImageBase)
// target.opt.AddressOfEntryPoint = _opt.AddressOfEntryPoint
// target.opt.SizeOfImage = _opt.SizeOfImage
// target.opt.SizeOfHeaders = _opt.SizeOfHeaders
// for i:=0; i<16; i++ {
// target.opt.DataDirectory[i].VirtualAddress = _opt.DataDirectory[i].VirtualAddress
// target.opt.DataDirectory[i].Size = _opt.DataDirectory[i].Size
// }
// }
// Check if the PE file is 64 bit (Will be removed)
if File.Machine == 0x8664 {
err := errors.New("64 bit files not supported.")
ParseError(err,"Amber currently does not support 64 bit PE files.")
}
var offset uint64 = target.opt.ImageBase
Map := bytes.Buffer{}
Map.Write(RawFile[0:target.opt.SizeOfHeaders])
offset += uint64(target.opt.SizeOfHeaders)
progress()
for i := 0; i < len(File.Sections); i++ {
// Append null bytes if there is a gap between sections or PE header
for {
if offset < (uint64(File.Sections[i].VirtualAddress)+target.opt.ImageBase) {
Map.WriteString(string(0x00))
offset += 1
} else {
break
}
}
// Map the section
SectionData, err := File.Sections[i].Data()
if err != nil {
err := errors.New("Cannot read section data.")
ParseError(err,"While reading the file section data.")
}
Map.Write(SectionData)
offset += uint64(File.Sections[i].Size)
// Append null bytes until reaching the end of the virtual address of the section
for {
if offset < (uint64(File.Sections[i].VirtualAddress)+uint64(File.Sections[i].VirtualSize)+target.opt.ImageBase) {
Map.WriteString(string(0x00))
offset += 1
} else {
break
}
}
}
progress()
for {
if (offset-target.opt.ImageBase) < uint64(target.opt.SizeOfImage) {
Map.WriteString(string(0x00))
offset += 1
} else {
break
}
}
progress()
// Perform integrity checks...
verbose("\n[#] Performing integrity checks on file mapping...\n|", "Y")
if int(target.opt.SizeOfImage) != Map.Len() {
if !target.IgnoreIntegrity {
err := errors.New("Integrity check failed (Mapping size does not match the size of image header)\nTry '-ignore-integrity' parameter.")
ParseError(err,"Integrity check failed (Mapping size does not match the size of image header)")
}
}
verbose("[Image Size]------------> OK", "Y")
for i := 0; i < len(File.Sections); i++ {
for j := 0; j < int(File.Sections[i].Size/10); j++ {
Buffer := Map.Bytes()
if RawFile[int(int(File.Sections[i].Offset)+j)] != Buffer[int(int(File.Sections[i].VirtualAddress)+j)] {
if !target.IgnoreIntegrity {
err := errors.New("Integrity check failed (Broken section alignment)\nTry '-ignore-integrity' parameter.")
ParseError(err,"Integrity check failed (Broken section alignment)")
}
}
}
}
verbose("[Section Alignment]-----> OK\n", "Y")
// Add data directory intervals check !
progress()
return Map
}
func scrape(Map []byte) ([]byte){
verbose("Scraping PE headers...","*")
var scraped string
// if string(Map[:2]) == "MZ" {
// scraped += hex.Dump(Map[:2])
// Map[0] = 0x00
// Map[1] = 0x00
// }
// for i:=0; i<0x1000; i++ {
// if string(Map[i:i+2]) == "PE" {
// scraped += hex.Dump(Map[i:i+2])
// Map[i] = 0x00
// Map[i+1] = 0x00
// }
// }
for i:=0; i<0x1000; i++ {
if string(Map[i:i+39]) == "This program cannot be run in DOS mode." {
scraped += hex.Dump(Map[i:i+39])
for j:=0; j<39; j++ {
Map[i+j] = 0x00
}
}
}
for i:=66; i<0x1000; i++ {
if Map[i] == 0x2e && Map[i+1] < 0x7e && Map[i+1] > 0x21 {
scraped += hex.Dump(Map[i:i+7])
for j:=0; j<7; j++{
Map[i+j] = 0x00
}
}
}
verbose(scraped,"")
verbose("Done scraping headers.","+")
return Map
}
|
[
2
] |
package icarus
import (
"testing"
"github.com/luuphu25/data-sidecar/util"
)
func SuiteTestStore(t *testing.T, x *IcarusStore, cap int) {
metric := util.Metric{Desc: map[string]string{"a": "B", "__name__": "hello"}, Data: util.DataPoint{Val: 1.}}
t.Run("in-out loop", func(t *testing.T) {
x.Insert(metric)
if metric.Data.Val != x.Dump()[0].Data.Val {
t.Error()
}
})
t.Run("aging keeps for a bit", func(t *testing.T) {
x.Roll()
if metric.Data.Val != x.Dump()[0].Data.Val {
t.Error()
}
})
t.Run("not forever", func(t *testing.T) {
for i := 1; i < cap; i++ {
x.Roll()
}
if 0 != len(x.Dump()) {
t.Error()
}
})
}
func TestRollingStore(t *testing.T) {
g := NewRollingStore(2)
SuiteTestStore(t, g, 2)
}
|
[
1
] |
package simple_levenstein
func IsValid(s1, s2 string) bool {
if len(s1)-len(s2) >= 2 || len(s1)-len(s2) <= -2 {
return false
}
flag := false
if len(s1) == len(s2) {
for i := 0; i < len(s1); i++ {
if s1[i] != s2[i] {
if flag {
return false
}
flag = true
}
}
} else {
var min, max string
if len(s1) < len(s2) {
min, max = s1, s2
} else {
min, max = s2, s1
}
if len(min) == 0 {
return true
}
flag := false
for i := 0; i < len(min); {
if flag {
if min[i] != max[i+1] {
return false
}
i++
} else {
if min[i] != max[i] {
flag = true
} else {
i++
}
}
}
}
return true
}
func IsValidRec(s1, s2 string) bool {
if len(s1)-len(s2) >= 2 || len(s1)-len(s2) <= -2 {
return false
}
return validRec(s1, s2, false)
}
func validRec(s1, s2 string, flag bool) bool {
if len(s1) == len(s2) {
for i := 0; i < len(s1); i++ {
if s1[i] != s2[i] {
if flag {
return false
}
flag = true
}
}
} else {
var min, max string
if len(s1) < len(s2) {
min, max = s1, s2
} else {
min, max = s2, s1
}
if len(min) == 0 {
return true
}
flag := false
for i := 0; i < len(min); i++ {
if min[i] != max[i] {
if flag {
return false
}
return validRec(min[i:], max[i+1:], true)
}
}
}
return true
}
|
[
1
] |
package matasano
import (
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"errors"
"math/big"
mathrand "math/rand"
"time"
)
const bigP = "ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca237327ffffffffffffffff"
// DiffieHelman provides Diffie-Helman key exchange
type DiffieHelman struct {
p *big.Int
g *big.Int
a *big.Int
B *big.Int // public key of other party
message []byte
}
// PubKey computes and returns the instance's public key (g**a mod p).
func (dh *DiffieHelman) PubKey() *big.Int {
pubKey := big.NewInt(0)
// We use -1 as a beacon to indicate that we should publish 0 as
// our public key.
if dh.a.Cmp(big.NewInt(-1)) != 0 {
pubKey.Exp(dh.g, dh.a, dh.p)
}
return pubKey
}
// SessionKey performs Diffie-Helman key exchange. It takes any public key as
// an argument, computes and returns a 256-bit session key.
func (dh *DiffieHelman) SessionKey() ([sha256.Size]byte, error) {
if dh.B == big.NewInt(0) {
var k [sha256.Size]byte
return k, errors.New("no second public key available")
}
sessionKey := big.NewInt(0)
// Having your secret set to -1 means your public key is just 0.
// (This is not a mathematical fact; we've just used -1 as a beacon here.)
if dh.a.Cmp(big.NewInt(-1)) != 0 {
sessionKey = big.NewInt(0).Exp(dh.B, dh.a, dh.p)
}
sessionKeyBytes := sessionKey.Bytes()
return sha256.Sum256(sessionKeyBytes), nil
}
func (dh *DiffieHelman) SendParameters(dhB *DiffieHelman) {
dhB.ReceiveParameters(dh.p, dh.g, dh.PubKey())
}
func (dh *DiffieHelman) ReceiveParameters(p, g, A *big.Int) (dh_p, dh_g, dh_B *big.Int) {
if (dh.p == nil && dh.g == nil) || (dh.p == p && dh.g == g) {
dh.p = p
dh.g = g
dh.B = A
}
return dh.p, dh.g, dh.B
}
func (dh *DiffieHelman) SendMessage(m []byte, dhB *DiffieHelman) error {
s, err := dh.SessionKey()
if err != nil {
return err
}
key := s[0:16]
iv := make([]byte, 16)
rand.Read(iv)
enc := CbcEncryptor{key, iv}
ct, err := enc.CbcEncrypt(m)
if err != nil {
return err
}
err = dhB.ReceiveMessage(append(iv, ct...))
if err != nil {
return err
}
return nil
}
func (dh *DiffieHelman) ReceiveMessage(m []byte) error {
s, err := dh.SessionKey()
if err != nil {
return err
}
key := s[0:16]
iv := m[0:16]
ct := m[16:]
enc := CbcEncryptor{key, iv}
pt, err := enc.CbcDecrypt(ct)
if err != nil {
return err
}
dh.message = pt
return nil
}
func createDhPair(p, g *big.Int) (dhA, dhB DiffieHelman) {
s := mathrand.NewSource(time.Now().Unix())
r := mathrand.New(s)
a := big.NewInt(0)
b := big.NewInt(0)
a.Rand(r, p)
b.Rand(r, p)
dhA = DiffieHelman{p, g, a, big.NewInt(0), nil}
dhB = DiffieHelman{p, g, b, big.NewInt(0), nil}
return dhA, dhB
}
func Mitm(aToB, bToA []byte) (receivedByA, receivedByB, interceptedForA, interceptedForB []byte) {
pBytes, _ := hex.DecodeString(bigP)
p := big.NewInt(0)
p.SetBytes(pBytes)
g := big.NewInt(2)
dhA, dhB := createDhPair(p, g)
// We create two different instantions of Mallory, one each for talking to
// A (dhMa) and B (dhMb)
dhMa := DiffieHelman{p, g, big.NewInt(-1), big.NewInt(0), nil}
dhMb := DiffieHelman{p, g, big.NewInt(-1), big.NewInt(0), nil}
// Here's the key exchange with Mallory in the middle
dhA.SendParameters(&dhMb)
dhMb.SendParameters(&dhB)
dhB.SendParameters(&dhMa)
dhMa.SendParameters(&dhA)
dhA.SendMessage(aToB, &dhMb)
dhMb.SendMessage(dhMb.message, &dhB)
dhB.SendMessage(bToA, &dhMa)
dhMa.SendMessage(dhMa.message, &dhA)
return dhA.message, dhB.message, dhMa.message, dhMb.message
}
|
[
2
] |
package dna_to_rna
// Detta är ett paket med en publik funktion, ToRNA(string)
// Översätter DNA-strängar till RNA
// Denna metod är private och kan inte anropas utanför denna fil
func charToRNA(dna_base string) string {
switch dna_base {
case "A":
return "U"
case "T":
return "A"
case "C":
return "G"
case "G":
return "C"
}
return ""
}
func ToRNA(DNA string) string {
var RNA string = ""
for i := 0; i < len(DNA); i++ {
RNA += charToRNA(string(DNA[i]))
}
return RNA
}
|
[
2
] |
package controller
import (
"database/sql"
"encoding/json"
"net/http"
"regexp"
"strconv"
"time"
"github.com/gorilla/mux"
_ "github.com/mattn/go-sqlite3"
"github.com/vilisseranen/castellers/common"
"github.com/vilisseranen/castellers/model"
)
// Regex to match any positive number followed by w (week) or d (days)
var intervalRegex = regexp.MustCompile(`^([1-9]\d*)(w|d)$`)
const INTERVAL_DAY_SECOND = 60 * 60 * 24
const INTERVAL_WEEK_SECOND = 60 * 60 * 24 * 7
func GetEvent(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
uuid := vars["uuid"]
e := model.Event{UUID: uuid}
if err := e.Get(); err != nil {
switch err {
case sql.ErrNoRows:
RespondWithError(w, http.StatusNotFound, "Event not found")
default:
RespondWithError(w, http.StatusInternalServerError, err.Error())
}
return
}
RespondWithJSON(w, http.StatusOK, e)
}
func GetEvents(w http.ResponseWriter, r *http.Request) {
count, _ := strconv.Atoi(r.FormValue("count"))
start, _ := strconv.Atoi(r.FormValue("start"))
if count < 1 {
count = 100
}
if start < 1 {
start = int(time.Now().Unix())
}
e := model.Event{}
events, err := e.GetAll(start, count)
if err != nil {
switch err {
default:
RespondWithError(w, http.StatusInternalServerError, err.Error())
}
return
}
vars := mux.Vars(r)
var member_uuid string
if vars["member_uuid"] != "" {
member_uuid = vars["member_uuid"]
} else if vars["admin_uuid"] != "" {
member_uuid = vars["admin_uuid"]
}
if member_uuid != "" {
for index, event := range events {
p := model.Participation{EventUUID: event.UUID, MemberUUID: member_uuid}
if err := p.GetParticipation(); err != nil {
switch err {
case sql.ErrNoRows:
continue
default:
RespondWithError(w, http.StatusInternalServerError, err.Error())
}
}
events[index].Participation = p.Answer
}
}
if admin_uuid := vars["admin_uuid"]; admin_uuid != "" {
for index, event := range events {
if err := event.GetAttendance(); err != nil {
switch err {
default:
RespondWithError(w, http.StatusInternalServerError, err.Error())
}
}
events[index].Attendance = event.Attendance
}
}
RespondWithJSON(w, http.StatusOK, events)
}
func CreateEvent(w http.ResponseWriter, r *http.Request) {
// Decode the event
var event model.Event
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&event); err != nil {
RespondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
// Validation on events data
if event.StartDate > event.EndDate ||
event.Name == "" {
RespondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
// Compute all events
var events = make([]model.Event, 0)
if event.Recurring.Interval == "" || event.Recurring.Until == 0 {
event.UUID = common.GenerateUUID()
events = append(events, event)
} else {
interval := intervalRegex.FindStringSubmatch(event.Recurring.Interval)
if len(interval) != 0 && event.Recurring.Until >= event.StartDate {
inter, err := strconv.ParseUint(interval[1], 10, 32)
intervalSeconds := uint(inter)
if err != nil {
RespondWithError(w, http.StatusBadRequest, "Invalid request payload")
}
switch interval[2] {
case "d":
intervalSeconds *= INTERVAL_DAY_SECOND
case "w":
intervalSeconds *= INTERVAL_WEEK_SECOND
}
// Create the recurringEvent
var recurringEvent model.RecurringEvent
recurringEvent.UUID = common.GenerateUUID()
recurringEvent.Name = event.Name
recurringEvent.Description = event.Description
recurringEvent.Interval = event.Recurring.Interval
if err := recurringEvent.CreateRecurringEvent(); err != nil {
RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
// Compute the list of events
for date := event.StartDate; date <= event.Recurring.Until; date += intervalSeconds {
var anEvent model.Event
anEvent.UUID = common.GenerateUUID()
if event.UUID == "" {
event.UUID = anEvent.UUID
}
anEvent.Name = recurringEvent.Name
anEvent.Description = recurringEvent.Description
anEvent.StartDate = date
anEvent.EndDate = date + event.EndDate - event.StartDate
anEvent.RecurringEvent = recurringEvent.UUID
events = append(events, anEvent)
// Adjust for Daylight Saving Time
var location, err = time.LoadLocation("America/Montreal")
if err != nil {
RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
// This gives the offset of the current Zone in Montreal
// In daylight Saving Time or Standard time accord to the time of year
_, thisEventZoneOffset := time.Unix(int64(date), 0).In(location).Zone()
_, nextEventZoneOffset := time.Unix(int64(date+intervalSeconds), 0).In(location).Zone()
// If the event switch between EST and EDT, offset will adjust the time
// So that the end user see always the event at the same time of day
offset := thisEventZoneOffset - nextEventZoneOffset
date = uint(int(date) + offset)
}
} else {
RespondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
}
// Create the events
for _, event := range events {
if err := event.CreateEvent(); err != nil {
RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
}
RespondWithJSON(w, http.StatusCreated, event)
}
func UpdateEvent(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
uuid := vars["uuid"]
var e model.Event
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&e); err != nil {
RespondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
e.UUID = uuid
if err := e.UpdateEvent(); err != nil {
RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
RespondWithJSON(w, http.StatusOK, e)
}
func DeleteEvent(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
uuid := vars["uuid"]
e := model.Event{UUID: uuid}
if err := e.DeleteEvent(); err != nil {
RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
RespondWithJSON(w, http.StatusOK, nil)
}
|
[
7
] |
package scheduling
import (
"sync"
"time"
)
type Scheduler struct {
Hour, Min, Sec int
Weekday map[time.Weekday]bool
TimeChan chan time.Time
doneChan chan bool
wg sync.WaitGroup
}
func NewScheduler(hour, min, sec int, weekdays []time.Weekday) *Scheduler {
if weekdays == nil {
return nil
}
if hour < 0 || hour > 23 || min < 0 || min > 59 || sec < 0 || sec > 59 || len(weekdays) == 0 {
return nil
}
s := &Scheduler{
Hour: hour,
Min: min,
Sec: sec,
Weekday: make(map[time.Weekday]bool),
TimeChan: make(chan time.Time, 1),
doneChan: make(chan bool, 1),
}
for _, day := range weekdays {
s.Weekday[day] = true
}
return s
}
// Start 정해진 시간에 TimeChan 을 통해 알림
func (s *Scheduler) Start() chan time.Time {
now := time.Now()
t := time.Date(now.Year(), now.Month(), now.Day(), s.Hour, s.Min, s.Sec, 0, now.Location())
if !t.After(now) {
t = time.Date(now.Year(), now.Month(), now.Day()+1, s.Hour, s.Min, s.Sec, 0, now.Location())
}
s.wg.Add(1)
go func(s *Scheduler) {
defer s.wg.Done()
ticker := time.NewTicker(t.Sub(now))
select {
case t := <-ticker.C:
ticker.Stop()
ticker = time.NewTicker(24 * time.Hour)
s.TimeChan <- t
for {
select {
case t := <-ticker.C:
if s.Weekday[t.Weekday()] {
s.TimeChan <- t
}
case <-s.doneChan:
return
}
}
case <-s.doneChan:
ticker.Stop()
return
}
}(s)
return s.TimeChan
}
// Stop 알림 중지
func (s *Scheduler) Stop() {
s.doneChan <- true
s.wg.Wait()
}
|
[
1
] |
package anydiff
import "github.com/unixpickle/anyvec"
type tanhRes struct {
In Res
OutVec anyvec.Vector
}
// Tanh computes the hyperbolic tangent of each component
// of the input.
func Tanh(in Res) Res {
v := in.Output().Copy()
anyvec.Tanh(v)
return &tanhRes{
In: in,
OutVec: v,
}
}
func (t *tanhRes) Output() anyvec.Vector {
return t.OutVec
}
func (t *tanhRes) Vars() VarSet {
return t.In.Vars()
}
func (t *tanhRes) Propagate(u anyvec.Vector, g Grad) {
down := t.OutVec.Copy()
anyvec.Pow(down, t.OutVec.Creator().MakeNumeric(2))
anyvec.Complement(down)
u.Mul(down)
t.In.Propagate(u, g)
}
type sigmoidRes struct {
In Res
OutVec anyvec.Vector
}
// Sigmoid computes the logistic sigmoid of the input.
//
// The sigmoid is defined as:
//
// f(x) = 1 / (1 + exp(-x))
//
func Sigmoid(in Res) Res {
res := in.Output().Copy()
anyvec.Sigmoid(res)
return &sigmoidRes{
In: in,
OutVec: res,
}
}
func (s *sigmoidRes) Output() anyvec.Vector {
return s.OutVec
}
func (s *sigmoidRes) Vars() VarSet {
return s.In.Vars()
}
func (s *sigmoidRes) Propagate(u anyvec.Vector, g Grad) {
comp := s.OutVec.Copy()
u.Mul(comp)
anyvec.Complement(comp)
u.Mul(comp)
s.In.Propagate(u, g)
}
type logSoftmaxRes struct {
In Res
ChunkSize int
OutVec anyvec.Vector
}
// LogSoftmax computes the log of the softmax function for
// each chunk in a packed list of chunks.
// The chunk size must divide the vector length.
// If chunkSize is 0, it will be treated like the full
// length of v.
func LogSoftmax(v Res, chunkSize int) Res {
if chunkSize == 0 {
chunkSize = v.Output().Len()
}
if v.Output().Len()%chunkSize != 0 {
panic("chunk size must divide vector size")
}
out := v.Output().Copy()
anyvec.LogSoftmax(out, chunkSize)
return &logSoftmaxRes{
In: v,
ChunkSize: chunkSize,
OutVec: out,
}
}
func (l *logSoftmaxRes) Output() anyvec.Vector {
return l.OutVec
}
func (l *logSoftmaxRes) Vars() VarSet {
return l.In.Vars()
}
func (l *logSoftmaxRes) Propagate(u anyvec.Vector, g Grad) {
numBatch := u.Len() / l.ChunkSize
batchSums := anyvec.SumCols(u, numBatch)
probs := l.OutVec.Copy()
anyvec.Exp(probs)
anyvec.ScaleChunks(probs, batchSums)
u.Sub(probs)
l.In.Propagate(u, g)
}
// Square squares the vector components.
func Square(v Res) Res {
return Pow(v, v.Output().Creator().MakeNumeric(2))
}
type powRes struct {
In Res
OutVec anyvec.Vector
Power anyvec.Numeric
}
// Pow raises each component of the vector to the given
// scaler power.
func Pow(v Res, s anyvec.Numeric) Res {
out := v.Output().Copy()
anyvec.Pow(out, s)
return &powRes{
In: v,
OutVec: out,
Power: s,
}
}
func (p *powRes) Output() anyvec.Vector {
return p.OutVec
}
func (p *powRes) Vars() VarSet {
return p.In.Vars()
}
func (p *powRes) Propagate(u anyvec.Vector, g Grad) {
temp := u.Creator().MakeVector(1)
temp.AddScalar(p.Power)
temp.AddScalar(temp.Creator().MakeNumeric(-1))
powerMinusOne := anyvec.Sum(temp)
exped := p.In.Output().Copy()
anyvec.Pow(exped, powerMinusOne)
u.Mul(exped)
u.Scale(p.Power)
p.In.Propagate(u, g)
}
type clipPosRes struct {
In Res
OutVec anyvec.Vector
}
// ClipPos clips the values to be non-negative.
// Thus, any negative entries of in are 0 in the result.
func ClipPos(in Res) Res {
out := in.Output().Copy()
anyvec.ClipPos(out)
return &clipPosRes{
In: in,
OutVec: out,
}
}
func (c *clipPosRes) Output() anyvec.Vector {
return c.OutVec
}
func (c *clipPosRes) Vars() VarSet {
return c.In.Vars()
}
func (c *clipPosRes) Propagate(u anyvec.Vector, g Grad) {
mask := c.In.Output().Copy()
anyvec.GreaterThan(mask, mask.Creator().MakeNumeric(0))
u.Mul(mask)
c.In.Propagate(u, g)
}
// ClipRange clips values to be in the (exclusive) range.
func ClipRange(in Res, min, max anyvec.Numeric) Res {
return Pool(in, func(in Res) Res {
highEnough, lowEnough := in.Output().Copy(), in.Output().Copy()
anyvec.GreaterThan(highEnough, min)
anyvec.LessThan(lowEnough, max)
midRange := highEnough.Copy()
midRange.Mul(lowEnough)
middlePart := Mul(in, NewConst(midRange))
anyvec.Complement(lowEnough)
lowEnough.Scale(max)
anyvec.Complement(highEnough)
highEnough.Scale(min)
return Add(
middlePart,
Add(
NewConst(lowEnough),
NewConst(highEnough),
),
)
})
}
type sinRes struct {
In Res
OutVec anyvec.Vector
}
// Sin takes the component-wise sine of the vector, in
// radians.
func Sin(in Res) Res {
out := in.Output().Copy()
anyvec.Sin(out)
return &sinRes{
In: in,
OutVec: out,
}
}
func (s *sinRes) Output() anyvec.Vector {
return s.OutVec
}
func (s *sinRes) Vars() VarSet {
return s.In.Vars()
}
func (s *sinRes) Propagate(u anyvec.Vector, g Grad) {
upScale := s.In.Output().Copy()
anyvec.Cos(upScale)
u.Mul(upScale)
s.In.Propagate(u, g)
}
type cosRes struct {
In Res
OutVec anyvec.Vector
}
// Cos takes the component-wise cosine of the vector, in
// radians.
func Cos(in Res) Res {
out := in.Output().Copy()
anyvec.Cos(out)
return &cosRes{
In: in,
OutVec: out,
}
}
func (s *cosRes) Output() anyvec.Vector {
return s.OutVec
}
func (s *cosRes) Vars() VarSet {
return s.In.Vars()
}
func (s *cosRes) Propagate(u anyvec.Vector, g Grad) {
upScale := s.In.Output().Copy()
anyvec.Sin(upScale)
u.Mul(upScale)
u.Scale(u.Creator().MakeNumeric(-1))
s.In.Propagate(u, g)
}
type expRes struct {
OutVec anyvec.Vector
In Res
}
// Exp exponentiates the vector components.
func Exp(in Res) Res {
expd := in.Output().Copy()
anyvec.Exp(expd)
return &expRes{OutVec: expd, In: in}
}
func (e *expRes) Output() anyvec.Vector {
return e.OutVec
}
func (e *expRes) Vars() VarSet {
return e.In.Vars()
}
func (e *expRes) Propagate(u anyvec.Vector, g Grad) {
u.Mul(e.OutVec)
e.In.Propagate(u, g)
}
type logSigmoidRes struct {
OutVec anyvec.Vector
In Res
}
// LogSigmoid computes the log of the sigmoid of the
// input.
// This may be more numerically stable than first
// computing the sigmoid and then computing the log.
func LogSigmoid(in Res) Res {
c := in.Output().Creator()
idxMap := make([]int, in.Output().Len())
for i := range idxMap {
idxMap[i] = i * 2
}
mapper := c.MakeMapper(len(idxMap)*2, idxMap)
logSumMe := c.MakeVector(len(idxMap) * 2)
mapper.MapTranspose(in.Output(), logSumMe)
logSumMe.Scale(c.MakeNumeric(-1))
sum := anyvec.AddLogs(logSumMe, 2)
sum.Scale(c.MakeNumeric(-1))
return &logSigmoidRes{
OutVec: sum,
In: in,
}
}
func (l *logSigmoidRes) Output() anyvec.Vector {
return l.OutVec
}
func (l *logSigmoidRes) Vars() VarSet {
return l.In.Vars()
}
func (l *logSigmoidRes) Propagate(u anyvec.Vector, g Grad) {
downstream := l.In.Output().Copy()
downstream.Scale(downstream.Creator().MakeNumeric(-1))
anyvec.Sigmoid(downstream)
u.Mul(downstream)
l.In.Propagate(u, g)
}
type complementRes struct {
In Res
OutVec anyvec.Vector
}
// Complement computes (1-x) for every component x.
func Complement(in Res) Res {
compIn := in.Output().Copy()
anyvec.Complement(compIn)
return &complementRes{In: in, OutVec: compIn}
}
func (c *complementRes) Output() anyvec.Vector {
return c.OutVec
}
func (c *complementRes) Vars() VarSet {
return c.In.Vars()
}
func (c *complementRes) Propagate(u anyvec.Vector, g Grad) {
u.Scale(u.Creator().MakeNumeric(-1))
c.In.Propagate(u, g)
}
// Abs computes the component-wise absolute value.
func Abs(in Res) Res {
sign := in.Output().Copy()
anyvec.GreaterThan(sign, sign.Creator().MakeNumeric(0))
sign.Scale(sign.Creator().MakeNumeric(2))
sign.AddScalar(sign.Creator().MakeNumeric(-1))
return Mul(in, NewConst(sign))
}
// ElemMax selects the maximum of a[i] and b[i] for each
// component index i.
func ElemMax(a, b Res) Res {
aMask := a.Output().Copy()
aMask.Sub(b.Output())
anyvec.GreaterThan(aMask, aMask.Creator().MakeNumeric(0))
bMask := aMask.Copy()
anyvec.Complement(bMask)
return Add(Mul(a, NewConst(aMask)), Mul(b, NewConst(bMask)))
}
// ElemMin selects the minimum of a[i] and b[i] for each
// component index i.
func ElemMin(a, b Res) Res {
aMask := a.Output().Copy()
aMask.Sub(b.Output())
anyvec.LessThan(aMask, aMask.Creator().MakeNumeric(0))
bMask := aMask.Copy()
anyvec.Complement(bMask)
return Add(Mul(a, NewConst(aMask)), Mul(b, NewConst(bMask)))
}
|
[
1
] |
// Copyright 2011, Bryan Matsuo. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// See csv_test.go for more information about each test.
package csvutil
import (
"testing"
"os"
"io/ioutil"
"bytes"
)
func cleanTestFile(f string, T *testing.T) {
_, statErr := os.Stat(f)
if statErr == os.ENOENT {
return
}
if statErr != nil {
T.Errorf("Error stat'ing the test file %s; %s\n", f, statErr.String())
}
rmErr := os.Remove(f)
if rmErr != nil {
T.Error("Error removing the test file %s; %s\n", f, rmErr.String())
}
}
// TEST1 - Simple 3x3 matrix w/ comma separators and w/o excess whitespace.
var (
TestIn string = "_test-csvutil-01-i.csv"
TestOut string = "_test-csvutil-01-o.csv"
TestPerm uint32 = 0622
)
func TestWriteFile(T *testing.T) {
var testFilename string = TestOut
defer cleanTestFile(testFilename, T)
mat, str := csvTestInstance1()
nbytes, err := WriteFile(testFilename, TestPerm, mat)
if err != nil {
T.Error(err)
}
if nbytes == 0 {
T.Error("Wrote 0 bytes.")
return
}
T.Logf("Wrote %d bytes.\n", nbytes)
var outputString []byte
outputString, err = ioutil.ReadFile(testFilename)
if err != nil {
T.Errorf("Error reading the test output %s for verification", testFilename)
}
T.Logf("\nExpected:\n'%s'\nReceived:\n'%s'\n\n", outputString, str)
if string(outputString) != str {
T.Error("OUTPUT MISMATCH")
}
}
func TestReadFile(T *testing.T) {
var testFilename string = TestOut
defer cleanTestFile(testFilename, T)
mat, str := csvTestInstance1()
err := ioutil.WriteFile(testFilename, bytes.NewBufferString(str).Bytes(), TestPerm)
if err != nil {
T.Error(err)
}
inputMat, csvErr := ReadFile(testFilename)
if csvErr != nil {
T.Errorf("CSV reading error: %s", err.String())
}
T.Logf("\nExpected;\n'%v'\n Received:\n'%v'\n\n", mat, inputMat)
if len(inputMat) != len(mat) {
T.Fatal("INPUT MISMATCH; number of rows")
}
for i := 0; i < len(mat); i++ {
if len(mat[i]) != len(inputMat[i]) {
T.Errorf("INPUT MISMATCH; row %d", i)
} else {
for j := 0; j < len(mat[i]); j++ {
if mat[i][j] != inputMat[i][j] {
T.Errorf("INPUT MISMATCH; %d, %d", i, j)
}
}
}
}
}
// END TEST1
|
[
2
] |
// Code generated by https://github.com/getbread/breadkit/zeus/tree/master/generators/searcher. DO NOT EDIT.
package search
import (
"fmt"
"strings"
"github.com/getbread/breadkit/zeus/searcher"
zeus "github.com/getbread/breadkit/zeus/types"
)
type PlusGatewayCheckoutSearchField int
const (
PlusGatewayCheckoutSearch_Id PlusGatewayCheckoutSearchField = iota
PlusGatewayCheckoutSearch_CheckoutID
PlusGatewayCheckoutSearch_TransactionID
PlusGatewayCheckoutSearch_CreatedAt
PlusGatewayCheckoutSearch_UpdatedAt
)
func (s PlusGatewayCheckoutSearchField) MarshalText() ([]byte, error) {
var data string
switch s {
case PlusGatewayCheckoutSearch_Id:
data = "id"
case PlusGatewayCheckoutSearch_CheckoutID:
data = "checkoutID"
case PlusGatewayCheckoutSearch_TransactionID:
data = "transactionID"
case PlusGatewayCheckoutSearch_CreatedAt:
data = "created_at"
case PlusGatewayCheckoutSearch_UpdatedAt:
data = "updated_at"
default:
return nil, fmt.Errorf("Cannot marshal text '%v'", s)
}
return []byte(data), nil
}
func (s PlusGatewayCheckoutSearchField) MarshalBinary() ([]byte, error) {
var data string
switch s {
case PlusGatewayCheckoutSearch_Id:
data = "id"
case PlusGatewayCheckoutSearch_CheckoutID:
data = "checkoutID"
case PlusGatewayCheckoutSearch_TransactionID:
data = "transactionID"
case PlusGatewayCheckoutSearch_CreatedAt:
data = "created_at"
case PlusGatewayCheckoutSearch_UpdatedAt:
data = "updated_at"
default:
return nil, fmt.Errorf("Cannot marshal binary '%v'", s)
}
return []byte(data), nil
}
func (s *PlusGatewayCheckoutSearchField) UnmarshalText(b []byte) error {
str := strings.Trim(string(b), `"`)
switch str {
case "id":
*s = PlusGatewayCheckoutSearch_Id
case "checkoutID":
*s = PlusGatewayCheckoutSearch_CheckoutID
case "transactionID":
*s = PlusGatewayCheckoutSearch_TransactionID
case "created_at":
*s = PlusGatewayCheckoutSearch_CreatedAt
case "updated_at":
*s = PlusGatewayCheckoutSearch_UpdatedAt
default:
return fmt.Errorf("Cannot unmarshal text '%s'", str)
}
return nil
}
func (s *PlusGatewayCheckoutSearchField) UnmarshalBinary(b []byte) error {
str := strings.Trim(string(b), `"`)
switch str {
case "id":
*s = PlusGatewayCheckoutSearch_Id
case "checkoutID":
*s = PlusGatewayCheckoutSearch_CheckoutID
case "transactionID":
*s = PlusGatewayCheckoutSearch_TransactionID
case "created_at":
*s = PlusGatewayCheckoutSearch_CreatedAt
case "updated_at":
*s = PlusGatewayCheckoutSearch_UpdatedAt
default:
return fmt.Errorf("Cannot unmarshal binary '%s'", str)
}
return nil
}
func (s PlusGatewayCheckoutSearchField) DbFieldName() string {
switch s {
case PlusGatewayCheckoutSearch_Id:
return "id"
case PlusGatewayCheckoutSearch_CheckoutID:
return "checkout_id"
case PlusGatewayCheckoutSearch_TransactionID:
return "transaction_id"
case PlusGatewayCheckoutSearch_CreatedAt:
return "created_at"
case PlusGatewayCheckoutSearch_UpdatedAt:
return "updated_at"
}
return ""
}
type PlusGatewayCheckoutSearchRequest struct {
searcher.SearchRequestFields
Filters []PlusGatewayCheckoutSearchFilter `json:"filters"`
FilterGroup searcher.FilterGroup `json:"filterGroup"`
OrderBy PlusGatewayCheckoutOrderBy `json:"orderBy"`
OrderBys []PlusGatewayCheckoutOrderBy `json:"orderBys"`
Fields []PlusGatewayCheckoutSearchField `json:"fields"`
IsByID bool `json:"isById"`
}
type PlusGatewayCheckoutSearchFilter struct {
Field PlusGatewayCheckoutSearchField `json:"field"`
Value interface{} `json:"value"`
Operator searcher.FilterOperator `json:"operator"`
Condition searcher.FilterCondition `json:"condition"`
}
type PlusGatewayCheckoutOrderBy struct {
Field PlusGatewayCheckoutSearchField `json:"field"`
Descending bool `json:"desc"`
}
/*
PlusGatewayCheckoutByID constructs a PlusGatewayCheckoutSearchRequest to pull
a PlusGatewayCheckout by it's ID.
You can add additional options using functions.
Handlers may choose to return (*PlusGatewayCheckout, error) by checking the
IsSearchByID() function.
*/
func PlusGatewayCheckoutByID(ID zeus.Uuid, options ...func(*PlusGatewayCheckoutSearchRequest)) PlusGatewayCheckoutSearchRequest {
var searchRequest PlusGatewayCheckoutSearchRequest
searchRequest.AddFilter(
PlusGatewayCheckoutSearch_Id,
ID,
searcher.Operator_EQ,
searcher.Condition_AND)
searchRequest.Limit = 1
searchRequest.IsByID = true
for _, f := range options {
f(&searchRequest)
}
return searchRequest
}
// implement searcher.SearchRequest interface
func (sr *PlusGatewayCheckoutSearchRequest) GetTableName() string {
return "shopify_plus_gateway_checkouts"
}
func (sr *PlusGatewayCheckoutSearchRequest) GetFilters() []searcher.Filter {
filters := []searcher.Filter{}
for _, f := range sr.Filters {
filter := searcher.Filter{
Field: f.Field,
Value: f.Value,
Operator: f.Operator,
Condition: f.Condition,
}
filters = append(filters, filter)
}
return filters
}
func (sr *PlusGatewayCheckoutSearchRequest) GetFilterGroup() searcher.FilterGroup {
return sr.FilterGroup
}
func (sr *PlusGatewayCheckoutSearchRequest) GetOrderBy() searcher.OrderBy {
return searcher.OrderBy{
Field: sr.OrderBy.Field,
Descending: sr.OrderBy.Descending,
}
}
func (sr *PlusGatewayCheckoutSearchRequest) GetOrderBys() []searcher.OrderBy {
orderBys := make([]searcher.OrderBy, len(sr.OrderBys))
for i, value := range sr.OrderBys {
orderBys[i] = searcher.OrderBy{
Field: value.Field,
Descending: value.Descending,
}
}
return orderBys
}
func (sr *PlusGatewayCheckoutSearchRequest) GetLimit() int {
return sr.Limit
}
func (sr *PlusGatewayCheckoutSearchRequest) GetOffset() int {
return sr.Offset
}
func (sr *PlusGatewayCheckoutSearchRequest) IsSearchByID() bool {
return sr.IsByID
}
func (sr *PlusGatewayCheckoutSearchRequest) AddFilter(field PlusGatewayCheckoutSearchField, value interface{}, operator searcher.FilterOperator, condition searcher.FilterCondition) {
if len(sr.FilterGroup.Filters) > 0 || len(sr.FilterGroup.FilterGroups) > 0 {
panic("Filters cannot be used with FilterGroups")
}
f := PlusGatewayCheckoutSearchFilter{
Field: field,
Value: value,
Operator: operator,
Condition: condition,
}
sr.Filters = append(sr.Filters, f)
}
func (sr *PlusGatewayCheckoutSearchRequest) SetFilterGroup(fg searcher.FilterGroup) {
if len(sr.Filters) > 0 {
panic("FilterGroups cannot be used with Filters")
}
sr.FilterGroup = fg
}
func (sr *PlusGatewayCheckoutSearchRequest) SetOrderBy(field PlusGatewayCheckoutSearchField, isDescending bool) {
sr.OrderBy = PlusGatewayCheckoutOrderBy{
Field: field,
Descending: isDescending,
}
// Set this primary order by as the first in the slice
sr.OrderBys = []PlusGatewayCheckoutOrderBy{sr.OrderBy}
}
func (sr *PlusGatewayCheckoutSearchRequest) SetOrderBys(orderBys ...PlusGatewayCheckoutOrderBy) {
sr.OrderBys = append([]PlusGatewayCheckoutOrderBy{}, orderBys...)
}
func (sr *PlusGatewayCheckoutSearchRequest) GetAllFields() []string {
return []string{
"id",
"checkout_id",
"transaction_id",
"created_at",
"updated_at",
}
}
func (sr *PlusGatewayCheckoutSearchRequest) GetFields() []string {
fields := []string{}
for _, f := range sr.Fields {
fields = append(fields, f.DbFieldName())
}
return fields
}
|
[
2
] |
package main
import (
"fmt"
"time"
)
func main(){
const len = 1000
start := time.Now()
var a [len][len]int
for i :=0;i<len;i++{
for j:=0;j<len;j++{
a[j][i]++
}
}
fmt.Printf("Program runs for : %v\n",time.Since(start))
}
|
[
1
] |
// Copyright 2020 Douyu
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tstore
import (
"fmt"
"net"
"net/http"
"strings"
"time"
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore"
prome "github.com/douyu/jupiter/pkg/core/metric"
"github.com/douyu/jupiter/pkg/util/xdebug"
"github.com/douyu/jupiter/pkg/util/xstring"
"github.com/douyu/jupiter/pkg/xlog"
"github.com/fatih/color"
)
func newTs(config *Config) *tablestore.TableStoreClient {
tsConfig := &tablestore.TableStoreConfig{
RetryTimes: config.RetryTimes,
MaxRetryTime: config.MaxRetryTime,
HTTPTimeout: tablestore.HTTPTimeout{
ConnectionTimeout: config.ConnectionTimeout,
RequestTimeout: config.RequestTimeout,
},
MaxIdleConnections: config.MaxIdleConnections,
Transport: &TsRoundTripper{
name: config.Name,
config: *config,
Transport: http.Transport{
MaxIdleConnsPerHost: config.MaxIdleConnections,
DialContext: (&net.Dialer{
Timeout: config.ConnectionTimeout,
}).DialContext,
},
},
}
return tablestore.NewClientWithConfig(config.EndPoint, config.Instance, config.AccessKeyId, config.AccessKeySecret, config.SecurityToken, tsConfig)
}
type TsRoundTripper struct {
http.Transport
name string
config Config
}
func (h *TsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
var (
addr = req.URL.String()
hostURL = strings.TrimLeft(req.URL.Path, "/")
method = req.Method
beg = time.Now()
)
resp, err := h.Transport.RoundTrip(req)
var cost = time.Since(beg)
// 指标采集
if err != nil {
prome.ClientHandleCounter.WithLabelValues(prome.TypeTableStore, h.name, method, hostURL, "error").Inc()
} else {
prome.ClientHandleCounter.WithLabelValues(prome.TypeTableStore, h.name, method, hostURL, resp.Status).Inc()
}
prome.ClientHandleHistogram.WithLabelValues(prome.TypeTableStore, h.name, method, hostURL).Observe(cost.Seconds())
statusCode := -1
if resp != nil {
statusCode = resp.StatusCode
}
if xdebug.IsDevelopmentMode() {
prefix := fmt.Sprintf("[%s:%s]", h.name, addr)
fmt.Printf("%-50s => %s\n", color.GreenString(prefix), color.GreenString("Send: "+method+" | "+xstring.PrettyJson(req.Body)))
}
// 访问日志
if err != nil {
xlog.Jupiter().Error("access",
xlog.FieldErr(err),
xlog.FieldMethod(method),
xlog.FieldAddr(addr),
xlog.FieldCode(int32(statusCode)),
xlog.FieldCost(cost),
)
} else {
if h.config.EnableAccessLog {
xlog.Jupiter().Info("access",
xlog.FieldMethod(method),
xlog.FieldAddr(addr),
xlog.FieldCost(cost),
xlog.FieldCode(int32(statusCode)),
)
}
}
if h.config.SlowThreshold > time.Duration(0) {
// 慢日志
if cost > h.config.SlowThreshold {
xlog.Jupiter().Error("slow",
xlog.FieldErr(errSlowCommand),
xlog.FieldMethod(method),
xlog.FieldCost(cost),
xlog.FieldAddr(addr),
xlog.FieldCode(int32(statusCode)),
)
}
}
return resp, err
}
|
[
4
] |
package MessageSecpkModel
import (
"github.com/gohouse/gorose/v2"
"main.go/tuuz"
"main.go/tuuz/Log"
)
const table = "message_speck"
type Interface struct {
Db gorose.IOrm
}
func (self *Interface) Api_insert(belong_cid, height, message_cid, Version, From, To, Nonce, Value, GasLimit, GasFeeCap, GasPremium, Method, Params, Signature, CID, date interface{}) bool {
db := self.Db.Table(table)
data := map[string]interface{}{
"belong_cid": belong_cid,
"height": height,
"message_cid": message_cid,
"Version": Version,
"From": From,
"To": To,
"Nonce": Nonce,
"Value": Value,
"GasLimit": GasLimit,
"GasFeeCap": GasFeeCap,
"GasPremium": GasPremium,
"Method": Method,
"Params": Params,
"Signature": Signature,
"CID": CID,
"date": date,
}
db.Data(data)
_, err := db.Insert()
if err != nil {
Log.Dbrr(err, tuuz.FUNCTION_ALL())
return false
} else {
return true
}
}
func Api_select(belong_cid interface{}, page, limit int) []gorose.Data {
db := tuuz.Db().Table(table)
where := map[string]interface{}{
"belong_cid": belong_cid,
}
db.Where(where)
db.Order("id desc")
db.Limit(limit)
db.Page(page)
ret, err := db.Get()
if err != nil {
Log.Dbrr(err, tuuz.FUNCTION_ALL())
return nil
} else {
return ret
}
}
|
[
2
] |
/*
https://leetcode.com/problems/serialize-and-deserialize-bst/
449. Serialize and Deserialize BST
Medium
Serialization is converting a data structure or object into a sequence of bits so that it can be stored in a file or memory buffer, or transmitted across a network connection link to be reconstructed later in the same or another computer environment.
Design an algorithm to serialize and deserialize a binary search tree. There is no restriction on how your serialization/deserialization algorithm should work. You need to ensure that a binary search tree can be serialized to a string, and this string can be deserialized to the original tree structure.
The encoded string should be as compact as possible.
Example 1:
Input: root = [2,1,3]
Output: [2,1,3]
Example 2:
Input: root = []
Output: []
Constraints:
The number of nodes in the tree is in the range [0, 104].
0 <= Node.val <= 104
The input tree is guaranteed to be a binary search tree.
*/
package main
import (
"fmt"
"strconv"
"strings"
)
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type Codec struct {
}
func Constructor() Codec {
return Codec{}
}
func _to_string(root *TreeNode, r *[]int) {
if root == nil {
return
}
*r = append(*r, root.Val)
_to_string(root.Left, r)
_to_string(root.Right, r)
}
// Serializes a tree to a single string.
func (this *Codec) serialize(root *TreeNode) string {
nodeValues := []int{}
// preoder 노드 탐색
_to_string(root, &nodeValues)
// 노드 값을 공백으로 구분한 스트링으로 리턴
r := ""
for i := 0; i < len(nodeValues); i++ {
r += fmt.Sprintf("%d ", nodeValues[i])
}
return strings.TrimSpace(r)
}
func _to_BinaryTreeNode(q *[]string, min, max int) *TreeNode {
if len(*q) == 0 {
return nil
}
// 큐 맨 앞값 파악
frontIntVal, _ := strconv.Atoi((*q)[0])
// 현재 노드 하위가 아닌 경우
if frontIntVal < min || frontIntVal > max {
return nil
}
// 큐 프론트 빼기
*q = (*q)[1:]
// 새 노드
newNode := &TreeNode{
Val: frontIntVal,
}
newNode.Left = _to_BinaryTreeNode(q, min, frontIntVal)
newNode.Right = _to_BinaryTreeNode(q, frontIntVal, max)
return newNode
}
// Deserializes your encoded data to tree.
func (this *Codec) deserialize(data string) *TreeNode {
if len(data) == 0 {
return nil
}
q := strings.Split(data, " ")
if len(q) == 0 {
return nil
}
// 큐 맨 앞값 파악
min := -1 << 31
max := 1<<31 - 1
return _to_BinaryTreeNode(&q, min, max)
}
/**
* Your Codec object will be instantiated and called as such:
* ser := Constructor()
* deser := Constructor()
* tree := ser.serialize(root)
* ans := deser.deserialize(tree)
* return ans
*/
func main() {
root := &TreeNode{
Val: 2,
Left: &TreeNode{
Val: 1,
},
Right: &TreeNode{
Val: 3,
},
}
printTreeNodeByDFS(root)
fmt.Println()
ser := Constructor()
treeString := ser.serialize(root)
fmt.Println(treeString)
ans := ser.deserialize(treeString)
// printTreeNodeByBFS(ans)
printTreeNodeByDFS(ans)
fmt.Println()
}
|
[
1
] |
package _go
/*
面试题 08.05. 递归乘法
递归乘法。 写一个递归函数,不使用 * 运算符, 实现两个正整数的相乘。可以使用加号、减号、位移,但要吝啬一些。
示例1:
输入:A = 1, B = 10
输出:10
示例2:
输入:A = 3, B = 4
输出:12
提示:
保证乘法范围不会溢出
通过次数22,334提交次数32,923
*/
// 耗子尾汁,不要耍小聪明
func multiply(A, B int) int {
if B == 0 {
return 0
}
if B&1 == 1 {
return A + multiply(A<<1, B>>1)
}
return multiply(A<<1, B>>1)
}
|
[
2
] |
package model
import (
"Server/app/constants"
"Server/app/model/database"
"Server/app/utility"
"context"
"github.com/arangodb/go-driver"
)
type GamesEdge struct {
From driver.DocumentID `json:"_from"`
To driver.DocumentID `json:"_to"`
Type string `json:"type"`
}
func (GE *GamesEdge) find(key string) (map[string]interface{}, error) {
var g map[string]interface{}
ctx := context.Background()
_, err := database.GamesEdge().ReadDocument(ctx, key, &g)
if err != nil {
return g, err
}
return g, nil
}
func (GE *GamesEdge) findAll(count, page int64) ([]map[string]interface{}, error) {
query := `FOR v IN ` + constants.GamesEdge + ` FILTER LIMIT @offset, @count RETURN v`
bindVars := map[string]interface{}{
"offset": (page - 1) * count,
"count": count,
}
ctx := context.Background()
cursor, err := database.DB().Query(ctx, query, bindVars)
defer cursor.Close()
var gameEdge []map[string]interface{}
if err != nil {
return gameEdge, err
}
for {
var g map[string]interface{}
_, err := cursor.ReadDocument(ctx, &g)
if driver.IsNoMoreDocuments(err) {
break
} else if err != nil {
utility.CheckErr(err)
}
gameEdge = append(gameEdge, g)
}
return gameEdge, nil
}
func (GE *GamesEdge) create() (driver.DocumentID, error) {
ctx := context.Background()
if meta, err := database.GamesEdge().CreateDocument(ctx, GE); err != nil {
return "", err
} else {
return meta.ID, nil
}
}
func (GE *GamesEdge) destroy(key string) error {
ctx := context.Background()
_, err := database.GamesEdge().RemoveDocument(ctx, key)
if err != nil {
return err
}
return nil
}
func (GE *GamesEdge) update(key string) error {
ctx := context.Background()
_, err := database.GamesEdge().UpdateDocument(ctx, key, GE)
if err != nil {
return err
}
return nil
}
func (GE *GamesEdge) count() (int64, error) {
query := `FOR v IN ` + constants.GamesEdge + ` FILTER RETURN v`
ctx := driver.WithQueryCount(context.Background())
cursor, err := database.DB().Query(ctx, query, nil)
defer cursor.Close()
if err != nil {
return 0, err
}
return cursor.Count(), nil
}
|
[
2
] |
package reflect
import (
_ "unsafe" // for go:linkname
)
// This is the same thing as an interface{}.
type Value struct {
typecode Type
value *uint8
}
func Indirect(v Value) Value {
return v
}
func ValueOf(i interface{}) Value
//go:linkname _ValueOf reflect.ValueOf
func _ValueOf(i Value) Value {
return i
}
func (v Value) Interface() interface{}
func (v Value) Type() Type {
return v.typecode
}
func (v Value) Kind() Kind {
return Invalid // TODO
}
func (v Value) IsNil() bool {
panic("unimplemented: (reflect.Value).IsNil()")
}
func (v Value) Pointer() uintptr {
panic("unimplemented: (reflect.Value).Pointer()")
}
func (v Value) IsValid() bool {
panic("unimplemented: (reflect.Value).IsValid()")
}
func (v Value) CanInterface() bool {
panic("unimplemented: (reflect.Value).CanInterface()")
}
func (v Value) CanAddr() bool {
panic("unimplemented: (reflect.Value).CanAddr()")
}
func (v Value) Addr() Value {
panic("unimplemented: (reflect.Value).Addr()")
}
func (v Value) CanSet() bool {
panic("unimplemented: (reflect.Value).CanSet()")
}
func (v Value) Bool() bool {
panic("unimplemented: (reflect.Value).Bool()")
}
func (v Value) Int() int64 {
panic("unimplemented: (reflect.Value).Int()")
}
func (v Value) Uint() uint64 {
panic("unimplemented: (reflect.Value).Uint()")
}
func (v Value) Float() float64 {
panic("unimplemented: (reflect.Value).Float()")
}
func (v Value) Complex() complex128 {
panic("unimplemented: (reflect.Value).Complex()")
}
func (v Value) String() string {
panic("unimplemented: (reflect.Value).String()")
}
func (v Value) Bytes() []byte {
panic("unimplemented: (reflect.Value).Bytes()")
}
func (v Value) Slice(i, j int) Value {
panic("unimplemented: (reflect.Value).Slice()")
}
func (v Value) Len() int {
panic("unimplemented: (reflect.Value).Len()")
}
func (v Value) NumField() int {
panic("unimplemented: (reflect.Value).NumField()")
}
func (v Value) Elem() Value {
panic("unimplemented: (reflect.Value).Elem()")
}
func (v Value) Field(i int) Value {
panic("unimplemented: (reflect.Value).Field()")
}
func (v Value) Index(i int) Value {
panic("unimplemented: (reflect.Value).Index()")
}
func (v Value) MapKeys() []Value {
panic("unimplemented: (reflect.Value).MapKeys()")
}
func (v Value) MapIndex(key Value) Value {
panic("unimplemented: (reflect.Value).MapIndex()")
}
func (v Value) Set(x Value) {
panic("unimplemented: (reflect.Value).Set()")
}
func (v Value) SetBool(x bool) {
panic("unimplemented: (reflect.Value).SetBool()")
}
func (v Value) SetInt(x int64) {
panic("unimplemented: (reflect.Value).SetInt()")
}
func (v Value) SetUint(x uint64) {
panic("unimplemented: (reflect.Value).SetUint()")
}
func (v Value) SetFloat(x float64) {
panic("unimplemented: (reflect.Value).SetFloat()")
}
func (v Value) SetComplex(x complex128) {
panic("unimplemented: (reflect.Value).SetComplex()")
}
func (v Value) SetString(x string) {
panic("unimplemented: (reflect.Value).SetString()")
}
func MakeSlice(typ Type, len, cap int) Value {
panic("unimplemented: reflect.MakeSlice()")
}
|
[
1
] |
package usecases
import (
"errors"
"github.com/kmaguswira/micro-clean/service/account/application/repositories"
"github.com/kmaguswira/micro-clean/service/account/domain"
)
type ICreateRole interface {
Execute(input string) (domain.Role, error)
}
type createRoleUseCase struct {
readWriteRepository repositories.IReadWriteRepository
}
func NewCreateRoleUseCase(ReadWriteRepository repositories.IReadWriteRepository) ICreateRole {
return &createRoleUseCase{
readWriteRepository: ReadWriteRepository,
}
}
func (t *createRoleUseCase) Execute(input string) (domain.Role, error) {
newRole := domain.Role{
Title: input,
}
result, err := t.readWriteRepository.CreateRole(&newRole)
if err == nil {
return *result, nil
}
return domain.Role{}, errors.New("Bad Request")
}
|
[
2
] |
// This file "invite.go" is created by Lincan Li at 5/11/16.
// Copyright © 2016 - Lincan Li. All rights reserved
package model
import (
"github.com/satori/go.uuid"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"time"
)
type InvitationStatus int
const (
InvitationStatusActive InvitationStatus = 1 + iota
InvitationStatusLimited
)
type InvitationType int
const (
InvitationTypeInHouse InvitationType = 1 + iota
InvitationTypeVisitor
InvitationTypeTester
)
// Invitation struct, 邀请码相关, 用于记录邀请码, 邀请码所有人和使用数量
type Invitation struct {
ID bson.ObjectId `json:"id" bson:"_id,omitempty"`
Code string `bson:"code"`
Owner string `bson:"owner"`
AppliedCount int `bson:"applied"`
Type InvitationType `bson:"type"`
Status InvitationStatus `bson:"status"`
Timestamp time.Time `bson:"timestamp"`
}
// SMSAccountCollectionName 方法, 返回 Collection 字段
func InvitationCollectionName() string {
return "invitation"
}
// SMSAccountCollection 方法, 返回 Collection
func InvitationCollection(MDB *mgo.Database) *mgo.Collection {
return MDB.C(InvitationCollectionName())
}
// CollectionName 方法, 返回 Invitation 的 Collection 字段
func (i *Invitation) CollectionName() string {
return "invitation"
}
// Collection 方法, 返回 Invitation 的 Collection
func (i *Invitation) Collection(MDB *mgo.Database) *mgo.Collection {
return MDB.C(i.CollectionName())
}
func (i *Invitation) ClaimInvitation(MDB *mgo.Database, uUUID uuid.UUID, iType InvitationType) (*Invitation, error) {
inv := &Invitation{
Code: string(RandomNumber(6)),
Owner: uUUID.String(),
AppliedCount: 0,
Type: iType,
Status: InvitationStatusActive,
Timestamp: time.Now(),
}
if err := i.Collection(MDB).Insert(inv); err != nil {
return nil, err
}
return inv, nil
}
// IncrementApplied 方法, applied 加 1
func (i *Invitation) IncrementApplied(MDB *mgo.Database) (*Invitation, error) {
change := mgo.Change{
Update: bson.M{"$inc": bson.M{"applied": 1}},
ReturnNew: true,
}
i.Collection(MDB).Find(bson.M{"_id": i.ID}).Apply(change, i)
return i, nil
}
func (i *Invitation) TypeValidation() bool {
switch i.Type {
case InvitationTypeInHouse:
if i.AppliedCount > 5 {
return false
}
tLimit, _ := time.Parse(time.RFC822, "30 Jun 16 12:00 UTC")
if !i.Timestamp.Before(tLimit) {
return false
}
case InvitationTypeVisitor:
tLimit := i.Timestamp.Add(24 * time.Hour)
if !tLimit.Before(time.Now()) {
return false
}
if i.AppliedCount > 1 {
return false
}
case InvitationTypeTester:
tLimit := i.Timestamp.Add(48 * time.Hour)
if !tLimit.Before(time.Now()) {
return false
}
if i.AppliedCount > 1 {
return false
}
}
return true
}
func (i *Invitation) ValidateInvitation(code string) bool {
if i.Status == InvitationStatusLimited {
return false
}
if !i.TypeValidation() {
return false
}
return i.Code == code
}
func FirstInvitation(MDB *mgo.Database, code string) (*Invitation, error) {
var invitation Invitation
if err := InvitationCollection(MDB).Find(bson.M{"code": code}).One(&invitation); err != nil {
if err == mgo.ErrNotFound {
return nil, nil
}
return nil, err
}
return &invitation, nil
}
func ValidateInvitation(MDB *mgo.Database, code string) (bool, error) {
if code == "123456" || code == "111111" {
return true, nil
}
invitation, err := FirstInvitation(MDB, code)
if err != nil {
return false, err
}
if invitation == nil {
return false, nil
}
if !invitation.ValidateInvitation(code) {
return false, nil
}
if _, err = invitation.IncrementApplied(MDB); err != nil {
return true, err
}
return true, nil
}
type InviteUser struct {
UserUUID string `bson:"user_uuid"`
Code string `bson:"code"`
}
// SMSAccountCollectionName 方法, 返回 Collection 字段
func InvitationUserCollectionName() string {
return "invite_user"
}
// SMSAccountCollection 方法, 返回 Collection
func InvitationUserCollection(MDB *mgo.Database) *mgo.Collection {
return MDB.C(InvitationUserCollectionName())
}
// CollectionName 方法, 返回 Invitation 的 Collection 字段
func (iu *InviteUser) CollectionName() string {
return "invite_user"
}
// Collection 方法, 返回 Invitation 的 Collection
func (iu *InviteUser) Collection(MDB *mgo.Database) *mgo.Collection {
return MDB.C(iu.CollectionName())
}
func InsertInvitedUser(MDB *mgo.Database, uUUID uuid.UUID, code string) error {
iu := &InviteUser{
Code: code,
UserUUID: uUUID.String(),
}
if err := InvitationUserCollection(MDB).Insert(iu); err != nil {
return err
}
return nil
}
func FindInvitedUser(MDB *mgo.Database, code string) ([]uuid.UUID, error) {
var ius []*InviteUser
if err := InvitationUserCollection(MDB).Find(bson.M{"code": code}).All(&ius); err != nil {
return nil, err
}
if ius == nil {
return nil, nil
}
var UUIDs []uuid.UUID
for _, i := range ius {
UUID := uuid.FromStringOrNil(i.UserUUID)
if UUID != uuid.Nil {
UUIDs = append(UUIDs, UUID)
}
}
return UUIDs, nil
}
|
[
2
] |
package diff
import (
"archive/tar"
"bytes"
"compress/gzip"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"strings"
"gopkg.in/yaml.v2"
"github.com/kr/pretty"
"github.com/xchapter7x/enaml"
"github.com/xchapter7x/enaml/pull"
)
func NewDiff(cacheDir string) *Diff {
return &Diff{CacheDir: cacheDir}
}
type Diff struct {
CacheDir string
}
func (s *Diff) ReleaseDiff(releaseURLA, releaseURLB string) (diffset []string, err error) {
var filenameA string
var filenameB string
release := pull.NewRelease(s.CacheDir)
if filenameA, err = release.Pull(releaseURLA); err == nil {
if filenameB, err = release.Pull(releaseURLB); err == nil {
objA := GetReleaseManifest(filenameA)
objB := GetReleaseManifest(filenameB)
diffset = pretty.Diff(objA, objB)
}
}
return
}
func (s *Diff) JobDiffBetweenReleases(jobname, releaseURLA, releaseURLB string) (diffset []string, err error) {
var (
jobA *tar.Reader
jobB *tar.Reader
filenameA string
filenameB string
ok bool
)
release := pull.NewRelease(s.CacheDir)
filenameA, err = release.Pull(releaseURLA)
if err != nil {
err = fmt.Errorf("An error occurred downloading %s. %s", releaseURLA, err.Error())
return
}
filenameB, err = release.Pull(releaseURLB)
if err != nil {
err = fmt.Errorf("An error occurred downloading %s. %s", releaseURLB, err.Error())
return
}
if jobA, ok = ProcessReleaseArchive(filenameA)[jobname]; !ok {
err = errors.New(fmt.Sprintf("could not find jobname %s in release A", jobname))
return
}
if jobB, ok = ProcessReleaseArchive(filenameB)[jobname]; !ok {
err = errors.New(fmt.Sprintf("could not find jobname %s in release B", jobname))
return
}
bufA := new(bytes.Buffer)
bufA.ReadFrom(jobA)
bufB := new(bytes.Buffer)
bufB.ReadFrom(jobB)
diffset = JobPropertiesDiff(bufA.Bytes(), bufB.Bytes())
return
}
func GetReleaseManifest(srcFile string) (releaseManifest enaml.ReleaseManifest) {
f, err := os.Open(srcFile)
if err != nil {
fmt.Println(err)
}
defer f.Close()
tarReader := getTarballReader(f)
for {
header, err := tarReader.Next()
if err == io.EOF {
break
}
name := header.Name
switch header.Typeflag {
case tar.TypeReg:
if path.Base(name) == "release.MF" {
if b, err := ioutil.ReadAll(tarReader); err == nil {
releaseManifest = enaml.ReleaseManifest{}
yaml.Unmarshal(b, &releaseManifest)
}
}
}
}
return
}
func ProcessReleaseArchive(srcFile string) (jobs map[string]*tar.Reader) {
jobs = make(map[string]*tar.Reader)
f, err := os.Open(srcFile)
if err != nil {
fmt.Println(err)
}
defer f.Close()
tarReader := getTarballReader(f)
for {
header, err := tarReader.Next()
if err == io.EOF {
break
}
name := header.Name
switch header.Typeflag {
case tar.TypeReg:
if strings.HasPrefix(name, "./jobs/") {
jobTarball := getTarballReader(tarReader)
jobManifest := getJobManifestFromTarball(jobTarball)
jobName := strings.Split(path.Base(name), ".")[0]
jobs[jobName] = jobManifest
}
}
}
return
}
func getTarballReader(reader io.Reader) *tar.Reader {
gzf, err := gzip.NewReader(reader)
if err != nil {
fmt.Println(err)
}
return tar.NewReader(gzf)
}
func getJobManifestFromTarball(jobTarball *tar.Reader) (res *tar.Reader) {
var jobManifestFilename = "./job.MF"
for {
header, _ := jobTarball.Next()
if header.Name == jobManifestFilename {
res = jobTarball
break
}
}
return
}
func JobPropertiesDiff(a, b []byte) []string {
var objA enaml.JobManifest
var objB enaml.JobManifest
yaml.Unmarshal(a, &objA)
yaml.Unmarshal(b, &objB)
mp := pretty.Diff(objA, objB)
return mp
}
|
[
7
] |
package anticheat
import (
"context"
"net/http"
"strconv"
"time"
"go-common/library/log"
"go-common/library/log/infoc"
bm "go-common/library/net/http/blademaster"
"go-common/library/net/metadata"
)
// AntiCheat send anti-cheating info to berserker.
type AntiCheat struct {
infoc *infoc.Infoc
}
// New new AntiCheat logger.
func New(c *infoc.Config) (a *AntiCheat) {
return &AntiCheat{infoc: infoc.New(c)}
}
// antiCheat 尽可能多的提供信息.
type antiCheat struct {
Buvid string
Build string
Client string // for example ClientWeb
IP string
UID string
Aid string
Mid string
Sid string
Refer string
URL string
From string
ItemID string
ItemType string // for example ItemTypeAv
Action string // for example ActionClick
ActionID string
UA string
TS string
Extra string
}
// anti-cheat const.
const (
ClientWeb = "web"
ClientIphone = "iphone"
ClientIpad = "ipad"
ClientAndroid = "android"
// AntiCheat ItemType
ItemTypeAv = "av"
ItemTypeBangumi = "bangumi"
ItemTypeLive = "live"
ItemTypeTopic = "topic"
ItemTypeRank = "rank"
ItemTypeActivity = "activity"
ItemTypeTag = "tag"
ItemTypeAD = "ad"
ItemTypeLV = "lv"
// AntiCheat Action
ActionClick = "click"
ActionPlay = "play"
ActionFav = "fav"
ActionCoin = "coin"
ActionDM = "dm"
ActionToView = "toview"
ActionShare = "share"
ActionSpace = "space"
Actionfollow = "follow"
ActionHeartbeat = "heartbeat"
ActionAnswer = "answer"
)
func (a *antiCheat) toSlice() (as []interface{}) {
as = make([]interface{}, 0, 18)
as = append(as, a.Buvid, a.Build, a.Client, a.IP, a.UID, a.Aid, a.Mid)
as = append(as, a.Sid, a.Refer, a.URL, a.From, a.ItemID, a.ItemType)
as = append(as, a.Action, a.ActionID, a.UA, a.TS, a.Extra)
return
}
// InfoAntiCheat2 for new http framework(bm).
func (a *AntiCheat) InfoAntiCheat2(ctx *bm.Context, uid, aid, mid, itemID, itemType, action, actionID string) error {
return a.infoAntiCheat(ctx, ctx.Request, metadata.String(ctx, metadata.RemoteIP), uid, aid, mid, itemID, itemType, action, actionID)
}
// infoAntiCheat common logic.
func (a *AntiCheat) infoAntiCheat(ctx context.Context, req *http.Request, IP, uid, aid, mid, itemID, itemType, action, actionID string) error {
params := req.Form
ac := &antiCheat{
UID: uid,
Aid: aid,
Mid: mid,
ItemID: itemID,
ItemType: itemType,
Action: action,
ActionID: actionID,
IP: IP,
URL: req.URL.Path,
Refer: req.Header.Get("Referer"),
UA: req.Header.Get("User-Agent"),
TS: strconv.FormatInt(time.Now().Unix(), 10),
}
ac.From = params.Get("from")
if csid, err := req.Cookie("sid"); err == nil {
ac.Sid = csid.Value
}
var cli string
switch {
case len(params.Get("access_key")) == 0:
cli = ClientWeb
if ck, err := req.Cookie("buvid3"); err == nil {
ac.Buvid = ck.Value
}
case params.Get("platform") == "ios":
cli = ClientIphone
if params.Get("device") == "pad" {
cli = ClientIpad
}
case params.Get("platform") == "android":
cli = ClientAndroid
default:
log.Warn("unkown plat(%s)", params.Get("platform"))
}
ac.Client = cli
if cli != ClientWeb {
ac.Buvid = req.Header.Get("buvid")
ac.Build = params.Get("build")
}
return a.infoc.Infov(ctx, ac.toSlice()...)
}
// ServiceAntiCheat common anti-cheat.
func (a *AntiCheat) ServiceAntiCheat(p map[string]string) error {
return a.infoc.Info(convertBase(p)...)
}
// ServiceAntiCheatBus for answer anti-cheat.
func (a *AntiCheat) ServiceAntiCheatBus(p map[string]string, bus []interface{}) error {
ac := append(convertBase(p), bus...)
return a.infoc.Info(ac...)
}
// ServiceAntiCheatv support mirror request
func (a *AntiCheat) ServiceAntiCheatv(ctx context.Context, p map[string]string) error {
return a.infoc.Infov(ctx, convertBase(p)...)
}
// ServiceAntiCheatBusv support mirror request
func (a *AntiCheat) ServiceAntiCheatBusv(ctx context.Context, p map[string]string, bus []interface{}) error {
ac := append(convertBase(p), bus...)
return a.infoc.Infov(ctx, ac...)
}
func convertBase(p map[string]string) (res []interface{}) {
ac := &antiCheat{
ItemType: p["itemType"],
Action: p["action"],
IP: p["ip"],
Mid: p["mid"],
UID: p["fid"],
Aid: p["aid"],
Sid: p["sid"],
UA: p["ua"],
Buvid: p["buvid"],
Refer: p["refer"],
URL: p["url"],
TS: strconv.FormatInt(time.Now().Unix(), 10),
}
res = ac.toSlice()
return
}
|
[
2
] |
// COPYRIGHT (C) 2017 barreiro. All Rights Reserved.
// GoLang solvers for Project Euler problems
package euler
// Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be:
// 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
// By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.
func Solver002() int {
return solver002(4000000)
}
func solver002(N int) int {
previous, last, sum := 1, 2, 2
for ; last <= N; sum += last {
// previous moves to two terms ahead of last. last gets the next after the new previous. it's even by definition!
previous = previous + last + last
last = previous + previous - last
}
// need to remove the last sum, added before breaking the loop
return sum - last
}
|
[
2
] |
package main
import (
"bufio"
"fmt"
"io"
"os"
"sort"
"strconv"
"strings"
)
var cache = make(map[string]int)
func sortString(in string) string {
out := strings.Split(in, "")
sort.Strings(out)
return strings.Join(out, "")
}
func solve(N int, s []string) uint64 {
for _, v := range s {
ss := sortString(v)
cache[ss]++
}
return count(cache)
}
func count(cache map[string]int) uint64 {
ret := uint64(0)
for _, v := range cache {
if v >= 2 {
for i := 1; i <= v-1; i++ {
ret += uint64(i)
}
}
}
return ret
}
func nextLine(sc *bufio.Scanner) string {
sc.Scan()
return sc.Text()
}
func newSplitScanner(r io.Reader) *bufio.Scanner {
sc := bufio.NewScanner(r)
sc.Split(bufio.ScanWords)
return sc
}
func nextInt(sc *bufio.Scanner) int {
sc.Scan()
i, e := strconv.Atoi(sc.Text())
if e != nil {
panic(e)
}
return i
}
func main() {
var sc = newSplitScanner(os.Stdin)
n := nextInt(sc)
a := make([]string, n)
for i := 0; i < n; i++ {
a[i] = nextLine(sc)
}
fmt.Println(solve(n, a))
}
|
[
2
] |
package cclua
import (
"sync"
)
// TagLuaPoolInfo .
type TagLuaPoolInfo struct {
m sync.Mutex
data []*TagLuaInfo
}
// Get .
func (p *TagLuaPoolInfo) Get() *TagLuaInfo {
p.m.Lock()
defer p.m.Unlock()
n := len(p.data)
if n == 0 {
return newLua()
}
x := p.data[n-1]
p.data = p.data[0 : n-1]
return x
}
// Put .
func (p *TagLuaPoolInfo) Put(L *TagLuaInfo) {
p.m.Lock()
defer p.m.Unlock()
p.data = append(p.data, L)
}
// Close .
func (p *TagLuaPoolInfo) Close() {
for _, L := range p.data {
L.Close()
}
}
// Count .
func (p *TagLuaPoolInfo) Count() int {
p.m.Lock()
defer p.m.Unlock()
return len(p.data)
}
|
[
2
] |
package lox
import (
"errors"
"fmt"
)
type RuntimeError struct {
error
Token *Token
}
func NewRuntimeError(token *Token, msg string) RuntimeError {
return RuntimeError{error: errors.New(msg), Token: token}
}
type Interpreter struct {
globals *Environment
environment *Environment
locals map[Expr]int
}
func NewInterpreter() *Interpreter {
globals := NewEnvironment(nil)
globals.define("clock", Clock{})
return &Interpreter{
globals: globals,
environment: globals,
locals: make(map[Expr]int),
}
}
func (i *Interpreter) Interpret(statements []Stmt) {
defer func() {
if r := recover(); r != nil {
if e, ok := r.(RuntimeError); ok {
ReportRuntimeError(e)
return
}
panic(r)
}
}()
for _, s := range statements {
i.execute(s)
}
}
func (i *Interpreter) execute(stmt Stmt) {
stmt.accept(i)
}
func (i *Interpreter) resolve(expr Expr, depth int) {
i.locals[expr] = depth
}
func (i *Interpreter) executeBlock(statements []Stmt, environment *Environment) {
previous := i.environment
i.environment = environment
defer func() { i.environment = previous }()
for _, statement := range statements {
i.execute(statement)
}
}
func (i *Interpreter) visitBlockStmt(stmt *Block) interface{} {
i.executeBlock(stmt.statements, NewEnvironment(i.environment))
return nil
}
func (i *Interpreter) visitExpressionStmt(stmt *Expression) interface{} {
i.evaluate(stmt.expression)
return nil
}
func (i *Interpreter) visitFunctionStmt(stmt *Function) interface{} {
function := NewLoxFunction(stmt, i.environment)
i.environment.define(stmt.name.lexeme, function)
return nil
}
func (i *Interpreter) visitIfStmt(stmt *If) interface{} {
if isTruthy(i.evaluate(stmt.condition)) {
i.execute(stmt.thenBranch)
} else if stmt.elseBranch != nil {
i.execute(stmt.elseBranch)
}
return nil
}
func (i *Interpreter) visitPrintStmt(stmt *Print) interface{} {
value := i.evaluate(stmt.expression)
fmt.Printf("%s\n", stringify(value))
return nil
}
func (i *Interpreter) visitReturnStmt(stmt *Return) interface{} {
var value interface{}
if stmt.value != nil {
value = i.evaluate(stmt.value)
}
panic(returnSignal{value})
}
func (i *Interpreter) visitVarStmt(stmt *Var) interface{} {
var value interface{}
if stmt.initializer != nil {
value = i.evaluate(stmt.initializer)
}
i.environment.define(stmt.name.lexeme, value)
return nil
}
func (i *Interpreter) visitWhileStmt(stmt *While) interface{} {
for isTruthy(i.evaluate(stmt.condition)) {
i.execute(stmt.body)
}
return nil
}
func (i *Interpreter) visitAssignExpr(expr *Assign) interface{} {
value := i.evaluate(expr.value)
if distance, ok := i.locals[expr]; ok {
i.environment.assignAt(distance, expr.name, value)
} else {
i.globals.assign(expr.name, value)
}
return value
}
func (i *Interpreter) visitBinaryExpr(b *Binary) interface{} {
left := i.evaluate(b.left)
right := i.evaluate(b.right)
switch b.operator.kind {
case GREATER:
left, right := checkNumbers(b.operator, left, right)
return left > right
case GREATER_EQUAL:
left, right := checkNumbers(b.operator, left, right)
return left >= right
case LESS:
left, right := checkNumbers(b.operator, left, right)
return left < right
case LESS_EQUAL:
left, right := checkNumbers(b.operator, left, right)
return left <= right
case BANG_EQUAL:
return !isEqual(left, right)
case EQUAL_EQUAL:
return isEqual(left, right)
case MINUS:
left, right := checkNumbers(b.operator, left, right)
return left - right
case PLUS:
if left, ok := left.(float64); ok {
if right, ok := right.(float64); ok {
return left + right
}
}
if left, ok := left.(string); ok {
if right, ok := right.(string); ok {
return left + right
}
}
panic(NewRuntimeError(b.operator, "Operands must be two numbers or two strings."))
case SLASH:
left, right := checkNumbers(b.operator, left, right)
return left / right
case STAR:
left, right := checkNumbers(b.operator, left, right)
return left * right
}
panic("unreachable")
}
func (i *Interpreter) visitCallExpr(expr *Call) interface{} {
callee := i.evaluate(expr.callee)
var arguments []interface{}
for _, argument := range expr.arguments {
arguments = append(arguments, i.evaluate(argument))
}
if function, ok := callee.(LoxCallable); ok {
if len(arguments) != function.Arity() {
panic(NewRuntimeError(expr.paren, fmt.Sprintf("Expected %d arguments but got %d.", function.Arity(), len(arguments))))
}
return function.Call(i, arguments)
}
panic(NewRuntimeError(expr.paren, "Can only call functions and classes."))
}
func (i *Interpreter) visitGroupingExpr(g *Grouping) interface{} {
return i.evaluate(g.expression)
}
func (i *Interpreter) visitLiteralExpr(l *Literal) interface{} {
return l.value
}
func (i *Interpreter) visitLogicalExpr(expr *Logical) interface{} {
left := i.evaluate(expr.left)
if expr.operator.kind == OR {
if isTruthy(left) {
return left
}
} else { // AND
if !isTruthy(left) {
return left
}
}
return i.evaluate(expr.right)
}
func (i *Interpreter) visitUnaryExpr(u *Unary) interface{} {
right := i.evaluate(u.right)
switch u.operator.kind {
case BANG:
return !isTruthy(right)
case MINUS:
return -checkNumber(u.operator, right)
}
panic("unreachable")
}
func (i *Interpreter) visitVariableExpr(expr *Variable) interface{} {
return i.lookUpVariable(expr.name, expr)
}
func (i *Interpreter) lookUpVariable(name *Token, expr Expr) interface{} {
if distance, ok := i.locals[expr]; ok {
return i.environment.getAt(distance, name.lexeme)
}
return i.globals.get(name)
}
func (i *Interpreter) evaluate(expr Expr) interface{} {
return expr.accept(i)
}
func isTruthy(object interface{}) bool {
if object == nil {
return false
}
switch object := object.(type) {
case bool:
return object
}
return true
}
func isEqual(a, b interface{}) bool {
if a == nil && b == nil {
return true
}
if a == nil {
return false
}
return a == b
}
func checkNumber(op *Token, x interface{}) float64 {
if x, ok := x.(float64); ok {
return x
}
panic(NewRuntimeError(op, "Operand must be a number."))
}
func checkNumbers(op *Token, left, right interface{}) (float64, float64) {
if left, ok := left.(float64); ok {
if right, ok := right.(float64); ok {
return left, right
}
}
panic(NewRuntimeError(op, "Operands must be numbers."))
}
func stringify(object interface{}) string {
if object == nil {
return "nil"
}
if s, ok := object.(string); ok {
return s
}
return fmt.Sprintf("%v", object)
}
|
[
7
] |
package raft
import (
"6.824/src/labrpc"
)
func (rf *Raft) Candidate(me int, peers []*labrpc.ClientEnd) {
term := rf.currentTerm
args := RequestVoteArgs{
Term: rf.currentTerm,
CandidateId: me,
LastLogIndex: rf.getLastLogIndex(),
LastLogTerm: rf.getLastLogTerm(),
}
DPrintln("candidate ", me, " start to hold election , has term ", term)
cnt := 1
for peerId, peer := range peers {
if peerId == me {
continue
}
go func(peerId int, peer *labrpc.ClientEnd) {
reply := &RequestVoteReply{}
suc := peer.Call("Raft.RequestVote", &args, &reply)
if !suc {
return
}
rf.mu.Lock()
defer rf.mu.Unlock()
currentState := rf.state
currentTerm := rf.currentTerm
if currentTerm != args.Term {
DPrintln("candidate ", me, "'s request vote sent to ", peerId,
" had old term", args.Term, " but candidate now is ", currentState, " have term ", currentTerm)
return
}
if currentState != candidate {
DPrintln("candidate ", me, "'s request vote sent to ", peerId,
" but candidate now is no longer candidate ,but is", currentState)
return
}
if reply.Term > currentTerm {
rf.turnFollower(reply.Term, -1)
} else {
if reply.VoteGranted {
cnt++
if cnt == len(peers)/2+1 {
rf.turnLeader(me, len(peers))
rf.chSender(rf.voteCh)
}
}
}
}(peerId, peer)
}
}
|
[
4
] |
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"encoding/json"
"reflect"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/go-logr/logr"
"github.com/RHEcosystemAppEng/dbaas-operator/api/v1alpha1"
)
const (
testProviderName = "mongodb-atlas"
testInventoryKind = "MongoDBAtlasInventory"
testConnectionKind = "MongoDBAtlasConnection"
)
var defaultProvider = &v1alpha1.DBaaSProvider{
ObjectMeta: metav1.ObjectMeta{
Name: testProviderName,
},
Spec: v1alpha1.DBaaSProviderSpec{
Provider: v1alpha1.DatabaseProvider{
Name: testProviderName,
},
InventoryKind: testInventoryKind,
ConnectionKind: testConnectionKind,
CredentialFields: []v1alpha1.CredentialField{},
},
}
var defaultTenant = &v1alpha1.DBaaSTenant{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster",
},
Spec: v1alpha1.DBaaSTenantSpec{
InventoryNamespace: testNamespace,
Authz: v1alpha1.DBaasAuthz{
Developer: v1alpha1.DBaasUsersGroups{
Groups: []string{"system:authenticated"},
},
},
},
}
func assertResourceCreationIfNotExists(object client.Object) func() {
return func() {
By("checking the resource exists")
if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(object), object); err != nil {
if errors.IsNotFound(err) {
assertResourceCreation(object)()
} else {
Fail(err.Error())
}
}
}
}
func assertResourceCreation(object client.Object) func() {
return func() {
By("creating resource")
object.SetResourceVersion("")
Expect(k8sClient.Create(ctx, object)).Should(Succeed())
By("checking the resource created")
Eventually(func() bool {
if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(object), object); err != nil {
return false
}
return true
}, timeout, interval).Should(BeTrue())
}
}
func assertResourceDeletion(object client.Object) func() {
return func() {
By("deleting resource")
Expect(k8sClient.Delete(ctx, object)).Should(Succeed())
By("checking the resource deleted")
Eventually(func() bool {
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(object), object)
if err != nil && errors.IsNotFound(err) {
return true
}
return false
}, timeout, interval).Should(BeTrue())
}
}
func assertProviderResourceCreated(object client.Object, providerResourceKind string, DBaaSResourceSpec interface{}) func() {
return func() {
By("checking a provider resource created")
objectKey := client.ObjectKeyFromObject(object)
providerResource := &unstructured.Unstructured{}
providerResource.SetGroupVersionKind(schema.GroupVersionKind{
Group: v1alpha1.GroupVersion.Group,
Version: v1alpha1.GroupVersion.Version,
Kind: providerResourceKind,
})
Eventually(func() bool {
if err := k8sClient.Get(ctx, objectKey, providerResource); err != nil {
return false
}
return true
}, timeout, interval).Should(BeTrue())
By("checking the provider resource spec is correct")
bytes, err := providerResource.MarshalJSON()
Expect(err).NotTo(HaveOccurred())
switch v := object.(type) {
case *v1alpha1.DBaaSInventory:
providerInventory := &v1alpha1.DBaaSProviderInventory{}
err := json.Unmarshal(bytes, providerInventory)
Expect(err).NotTo(HaveOccurred())
Expect(&providerInventory.Spec).Should(Equal(DBaaSResourceSpec))
Expect(len(providerInventory.GetOwnerReferences())).Should(Equal(1))
Expect(providerInventory.GetOwnerReferences()[0].Name).Should(Equal(object.GetName()))
case *v1alpha1.DBaaSConnection:
providerConnection := &v1alpha1.DBaaSProviderConnection{}
err := json.Unmarshal(bytes, providerConnection)
Expect(err).NotTo(HaveOccurred())
Expect(&providerConnection.Spec).Should(Equal(DBaaSResourceSpec))
Expect(len(providerConnection.GetOwnerReferences())).Should(Equal(1))
Expect(providerConnection.GetOwnerReferences()[0].Name).Should(Equal(object.GetName()))
default:
_ = v.GetName() // to avoid syntax error
Fail("invalid test object")
}
}
}
func assertDBaaSResourceStatusUpdated(object client.Object, providerResourceKind string, providerResourceStatus interface{}) func() {
return func() {
By("checking the DBaaS resource status has no conditions")
objectKey := client.ObjectKeyFromObject(object)
Consistently(func() (int, error) {
err := k8sClient.Get(ctx, objectKey, object)
if err != nil {
return -1, err
}
switch v := object.(type) {
case *v1alpha1.DBaaSInventory:
return len(v.Status.Conditions), nil
case *v1alpha1.DBaaSConnection:
return len(v.Status.Conditions), nil
default:
Fail("invalid test object")
return -1, err
}
}, duration, interval).Should(Equal(0))
By("getting the provider resource")
providerResource := &unstructured.Unstructured{}
providerResource.SetGroupVersionKind(schema.GroupVersionKind{
Group: v1alpha1.GroupVersion.Group,
Version: v1alpha1.GroupVersion.Version,
Kind: providerResourceKind,
})
Eventually(func() bool {
err := k8sClient.Get(ctx, objectKey, providerResource)
if err != nil {
if errors.IsNotFound(err) {
return false
}
Expect(err).NotTo(HaveOccurred())
}
By("updating the provider resource status")
providerResource.UnstructuredContent()["status"] = providerResourceStatus
err = k8sClient.Status().Update(ctx, providerResource)
if err != nil {
if errors.IsConflict(err) {
return false
}
Expect(err).NotTo(HaveOccurred())
}
return true
}, timeout, interval).Should(BeTrue())
By("checking the DBaaS resource status updated")
Eventually(func() (int, error) {
err := k8sClient.Get(ctx, objectKey, object)
if err != nil {
return -1, err
}
switch v := object.(type) {
case *v1alpha1.DBaaSInventory:
return len(v.Status.Conditions), nil
case *v1alpha1.DBaaSConnection:
return len(v.Status.Conditions), nil
default:
Fail("invalid test object")
return -1, err
}
}, timeout, interval).Should(Equal(1))
switch v := object.(type) {
case *v1alpha1.DBaaSInventory:
Expect(&v.Status).Should(Equal(providerResourceStatus))
case *v1alpha1.DBaaSConnection:
Expect(&v.Status).Should(Equal(providerResourceStatus))
default:
Fail("invalid test object")
}
}
}
func assertProviderResourceSpecUpdated(object client.Object, providerResourceKind string, DBaaSResourceSpec interface{}) func() {
return func() {
By("updating the DBaaS resource spec")
objectKey := client.ObjectKeyFromObject(object)
Eventually(func() bool {
err := k8sClient.Get(ctx, objectKey, object)
Expect(err).NotTo(HaveOccurred())
switch v := object.(type) {
case *v1alpha1.DBaaSInventory:
v.Spec.DBaaSInventorySpec = *DBaaSResourceSpec.(*v1alpha1.DBaaSInventorySpec)
case *v1alpha1.DBaaSConnection:
v.Spec = *DBaaSResourceSpec.(*v1alpha1.DBaaSConnectionSpec)
default:
Fail("invalid test object")
}
err = k8sClient.Update(ctx, object)
if err != nil {
if errors.IsConflict(err) {
return false
}
Expect(err).NotTo(HaveOccurred())
}
return true
}, timeout, interval).Should(BeTrue())
By("checking the provider resource status updated")
providerResource := &unstructured.Unstructured{}
providerResource.SetGroupVersionKind(schema.GroupVersionKind{
Group: v1alpha1.GroupVersion.Group,
Version: v1alpha1.GroupVersion.Version,
Kind: providerResourceKind,
})
Eventually(func() bool {
err := k8sClient.Get(ctx, objectKey, providerResource)
if err != nil {
return false
}
bytes, err := providerResource.MarshalJSON()
Expect(err).NotTo(HaveOccurred())
switch v := object.(type) {
case *v1alpha1.DBaaSInventory:
providerInventory := &v1alpha1.DBaaSProviderInventory{}
err := json.Unmarshal(bytes, providerInventory)
Expect(err).NotTo(HaveOccurred())
return reflect.DeepEqual(&providerInventory.Spec, DBaaSResourceSpec)
case *v1alpha1.DBaaSConnection:
providerConnection := &v1alpha1.DBaaSProviderConnection{}
err := json.Unmarshal(bytes, providerConnection)
Expect(err).NotTo(HaveOccurred())
return reflect.DeepEqual(&providerConnection.Spec, DBaaSResourceSpec)
default:
_ = v.GetName() // to avoid syntax error
Fail("invalid test object")
return false
}
}, timeout, interval).Should(BeTrue())
}
}
type SpyController struct {
controller.Controller
source chan client.Object
owner chan runtime.Object
}
func (c *SpyController) Watch(src source.Source, evthdler handler.EventHandler, prct ...predicate.Predicate) error {
c.reset()
switch s := src.(type) {
case *source.Kind:
c.source <- s.Type
default:
Fail("unexpected source type")
}
switch h := evthdler.(type) {
case *handler.EnqueueRequestForOwner:
c.owner <- h.OwnerType
default:
Fail("unexpected handler type")
}
if c.Controller != nil {
return c.Controller.Watch(src, evthdler, prct...)
} else {
return nil
}
}
func (c *SpyController) Start(ctx context.Context) error {
if c.Controller != nil {
return c.Controller.Start(ctx)
} else {
return nil
}
}
func (c *SpyController) GetLogger() logr.Logger {
if c.Controller != nil {
return c.Controller.GetLogger()
} else {
return nil
}
}
func (c *SpyController) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
if c.Controller != nil {
return c.Controller.Reconcile(ctx, req)
} else {
return reconcile.Result{}, nil
}
}
func (c *SpyController) reset() {
if len(c.source) > 0 {
<-c.source
}
if len(c.owner) > 0 {
<-c.owner
}
}
func newSpyController(ctrl controller.Controller) *SpyController {
return &SpyController{
Controller: ctrl,
source: make(chan client.Object, 1),
owner: make(chan runtime.Object, 1),
}
}
|
[
2
] |
package delete
import (
"errorhandlers"
"fmt"
"net/http"
"storage/deletefromdb"
"strconv"
)
func User(w http.ResponseWriter, r *http.Request) {
id, err := strconv.ParseInt(r.PostFormValue("id"), 10, 64)
if err != nil {
errorhandlers.LogError(err, "Error deleting user")
fmt.Fprint(w, "Error deleting user")
return
}
var error = deletefromdb.DeleteUser(int(id))
if error != nil {
fmt.Fprint(w, "Error deleting user")
return
}
fmt.Fprint(w, "OK")
}
|
[
1
] |
package foodchain
import (
"fmt"
"strings"
)
var TestVersion = 1
var data = []struct{ name, line, extra string }{
{},
{"fly", "I don't know why she swallowed the fly. Perhaps she'll die.", ""},
{"spider", "It wriggled and jiggled and tickled inside her.", " that wriggled and jiggled and tickled inside her"},
{"bird", "How absurd to swallow a bird!", ""},
{"cat", "Imagine that, to swallow a cat!", ""},
{"dog", "What a hog, to swallow a dog!", ""},
{"goat", "Just opened her throat and swallowed a goat!", ""},
{"cow", "I don't know how she swallowed a cow!", ""},
}
func Verse(v int) string {
if v == 8 {
return "I know an old lady who swallowed a horse.\nShe's dead, of course!"
}
lines := []string{
fmt.Sprintf("I know an old lady who swallowed a %s.", data[v].name),
data[v].line,
}
for j := v; j > 1; j-- {
lines = append(lines, fmt.Sprintf("She swallowed the %s to catch the %s%s.", data[j].name, data[j-1].name, data[j-1].extra))
}
if v != 1 {
lines = append(lines, data[1].line)
}
return strings.Join(lines, "\n")
}
func Verses(min, max int) string {
var verses []string
for j := min; j <= max; j++ {
verses = append(verses, Verse(j))
}
return strings.Join(verses, "\n\n")
}
func Song() string {
return Verses(1, 8)
}
|
[
1
] |
package scene
import (
"fmt"
"math"
"strconv"
"time"
"github.com/spoof/go-flappybird/scene/gameobj"
"github.com/veandco/go-sdl2/img"
"github.com/veandco/go-sdl2/sdl"
"github.com/veandco/go-sdl2/ttf"
)
const (
distanceBetweenPipes = 300
birdX = 200
)
// Game is game scene
type Game struct {
width int
height int
bg *sdl.Texture
bird *gameobj.Bird
scoreFont *ttf.Font
pipeTexture *sdl.Texture
pipeWidth int
pipePairs []*gameobj.PipePair
score int
bestScore int
isGameOver bool
}
// NewGame creates new Game scene
func NewGame(r *sdl.Renderer, width, height int) (*Game, error) {
bg, err := img.LoadTexture(r, "res/imgs/background.png")
if err != nil {
return nil, fmt.Errorf("could not load background image: %v", err)
}
bird, err := gameobj.NewBird(r, birdX, height/2)
if err != nil {
return nil, fmt.Errorf("could not create bird: %v", err)
}
pipe, err := img.LoadTexture(r, "res/imgs/pipe.png")
if err != nil {
return nil, fmt.Errorf("could not load pipe image: %v", err)
}
_, _, pipeWidth, _, err := pipe.Query()
if err != nil {
return nil, fmt.Errorf("could not get pipe width: %v", err)
}
scoreFont, err := ttf.OpenFont("res/fonts/flappy.ttf", 42)
if err != nil {
return nil, fmt.Errorf("cound not load font: %v", err)
}
return &Game{
width: width,
height: height,
bg: bg,
bird: bird,
pipeTexture: pipe,
pipeWidth: int(pipeWidth),
scoreFont: scoreFont,
}, nil
}
// Run runs the game scene.
func (g *Game) Run(in <-chan sdl.Event, r *sdl.Renderer) <-chan Event {
out := make(chan Event)
go func() {
defer close(out)
g.reset()
tick := time.Tick(10 * time.Millisecond)
for {
select {
case event, ok := <-in:
if !ok {
return
}
g.handleEvent(event)
case <-tick:
if g.hasCollisions() {
g.isGameOver = true
}
if !g.isGameOver {
g.generatePipes()
g.moveScene()
g.updateScore()
g.deleteHiddenPipes()
} else {
g.bird.Fall()
}
g.moveBird()
if err := g.paint(r); err != nil {
out <- &ErrorEvent{Err: err}
return
}
if g.doesBirdHitsGround() && g.isGameOver {
out <- &EndGameEvent{Score: g.score, BestScore: g.bestScore}
return
}
}
}
}()
return out
}
// Destroy frees all resources
func (g *Game) Destroy() {
g.bg.Destroy()
g.bird.Destroy()
g.pipeTexture.Destroy()
}
func (g *Game) reset() {
g.score = 0
g.bird.ResetPosition()
g.pipePairs = nil
g.isGameOver = false
}
func (g *Game) hasCollisions() bool {
if g.bird.Y <= 0 {
return true
}
if g.doesBirdHitsGround() {
return true
}
for _, pp := range g.pipePairs {
if pp.Hits(g.bird) {
return true
}
}
return false
}
func (g *Game) doesBirdHitsGround() bool {
if g.bird.Y+g.bird.Height >= g.height {
return true
}
return false
}
func (g *Game) handleEvent(event sdl.Event) {
switch e := event.(type) {
case *sdl.MouseButtonEvent:
if e.Type != sdl.MOUSEBUTTONDOWN {
return
}
if !g.isGameOver {
g.bird.Jump()
}
}
}
func (g *Game) generatePipes() {
needNewPipe := false
if len(g.pipePairs) == 0 {
needNewPipe = true
} else {
lastPipe := g.pipePairs[len(g.pipePairs)-1]
if g.width-(lastPipe.X+lastPipe.Width) >= distanceBetweenPipes {
needNewPipe = true
}
}
if needNewPipe {
x := g.width
pipes := gameobj.NewPipePair(g.pipeTexture, x, int(g.pipeWidth), g.height)
g.pipePairs = append(g.pipePairs, pipes)
}
}
func (g *Game) moveBird() {
if g.bird.Y+g.bird.Height <= g.height {
g.bird.Move()
}
}
func (g *Game) moveScene() {
for _, pp := range g.pipePairs {
pp.Move(-2)
}
}
func (g *Game) updateScore() {
for _, pp := range g.pipePairs {
if !pp.Counted && pp.X+pp.Width < g.bird.X {
pp.Counted = true
g.score++
g.bestScore = int(math.Max(float64(g.score), float64(g.bestScore)))
}
}
}
func (g *Game) deleteHiddenPipes() {
pipes := []*gameobj.PipePair{}
for _, pp := range g.pipePairs {
if pp.X+pp.Width >= 0 {
pipes = append(pipes, pp)
}
}
g.pipePairs = pipes
}
func (g *Game) paint(renderer *sdl.Renderer) error {
renderer.Clear()
if err := renderer.Copy(g.bg, nil, nil); err != nil {
return fmt.Errorf("could not copy background: %v", err)
}
drawOutline := false
if err := g.bird.Paint(renderer, drawOutline); err != nil {
return fmt.Errorf("could paint bird: %v", err)
}
for _, p := range g.pipePairs {
if err := p.Paint(renderer); err != nil {
return fmt.Errorf("could paint pipe: %v", err)
}
}
if err := g.paintScore(renderer); err != nil {
return fmt.Errorf("could not paint score: %v", err)
}
renderer.Present()
return nil
}
func (g *Game) paintScore(renderer *sdl.Renderer) error {
white := sdl.Color{R: 255, G: 255, B: 255, A: 255}
text, err := g.scoreFont.RenderUTF8_Solid(strconv.Itoa(g.score), white)
if err != nil {
return fmt.Errorf("could not render score: %v", err)
}
defer text.Free()
t, err := renderer.CreateTextureFromSurface(text)
if err != nil {
return fmt.Errorf("cound not create texture: %v", err)
}
defer t.Destroy()
var clipRect sdl.Rect
text.GetClipRect(&clipRect)
rect := &sdl.Rect{X: int32(g.width)/2 - clipRect.W/2, Y: 60, W: clipRect.W, H: clipRect.H}
if err := renderer.Copy(t, nil, rect); err != nil {
return fmt.Errorf("cound not copy texture: %v", err)
}
return nil
}
|
[
7
] |
// sample_factory project main.go
package main
import (
"fmt"
)
type Operation interface {
getResult() float64
setNumA(float64)
setNumB(float64)
}
type BaseOperation struct {
numberA float64
numberB float64
}
func (Operation *BaseOperation) setNumA(numA float64) {
Operation.numberA = numA
}
func (Operation *BaseOperation) setNumB(numB float64) {
Operation.numberB = numB
}
type OperationAdd struct {
BaseOperation
}
func (this *OperationAdd) getResult() float64 {
return this.numberA + this.numberB
}
type OperationSub struct {
BaseOperation
}
func (this *OperationSub) getResult() float64 {
return this.numberA - this.numberB
}
type OperationFactory struct {
}
func (this OperationFactory) createOperation(operator string) (operation Operation) {
switch operator {
case "+":
operation = new(OperationAdd)
case "-":
operation = new(OperationSub)
default:
panic("error")
}
return
}
func main() {
defer func() {
if err := recover(); err != nil {
fmt.Println(err)
}
}()
var fac OperationFactory
oper := fac.createOperation("+")
oper.setNumA(4.0)
oper.setNumB(5.0)
oper1 := fac.createOperation("-")
oper1.setNumA(120)
oper1.setNumB(90.0)
fmt.Println(oper.getResult())
fmt.Println(oper1.getResult())
}
|
[
2
] |
package fakes
import (
"github.com/AusDTO/pe-rds-broker/awsrds"
)
type FakeDBCluster struct {
DescribeCalled bool
DescribeID string
DescribeDBClusterDetails awsrds.DBClusterDetails
DescribeError error
CreateCalled bool
CreateID string
CreateDBClusterDetails awsrds.DBClusterDetails
CreateError error
ModifyCalled bool
ModifyID string
ModifyDBClusterDetails awsrds.DBClusterDetails
ModifyApplyImmediately bool
ModifyError error
DeleteCalled bool
DeleteID string
DeleteSkipFinalSnapshot bool
DeleteError error
}
func (f *FakeDBCluster) Describe(ID string) (awsrds.DBClusterDetails, error) {
f.DescribeCalled = true
f.DescribeID = ID
return f.DescribeDBClusterDetails, f.DescribeError
}
func (f *FakeDBCluster) Create(ID string, dbClusterDetails awsrds.DBClusterDetails) error {
f.CreateCalled = true
f.CreateID = ID
f.CreateDBClusterDetails = dbClusterDetails
return f.CreateError
}
func (f *FakeDBCluster) Modify(ID string, dbClusterDetails awsrds.DBClusterDetails, applyImmediately bool) error {
f.ModifyCalled = true
f.ModifyID = ID
f.ModifyDBClusterDetails = dbClusterDetails
f.ModifyApplyImmediately = applyImmediately
return f.ModifyError
}
func (f *FakeDBCluster) Delete(ID string, skipFinalSnapshot bool) error {
f.DeleteCalled = true
f.DeleteID = ID
f.DeleteSkipFinalSnapshot = skipFinalSnapshot
return f.DeleteError
}
|
[
2
] |
package main
import (
"fmt"
"os"
"bufio"
"reflect"
"strconv"
"strings"
"errors"
)
func P(t interface{}) {
fmt.Println(reflect.TypeOf(t))
}
func StrStdin() (res string){
stdin := bufio.NewScanner(os.Stdin)
stdin.Scan()
text := stdin.Text()
return text
}
func IntLength(n int) (int){
return len(strconv.Itoa(n))
}
func IntStdin() (int, error) {
input := StrStdin()
return strconv.Atoi(strings.TrimSpace(input))
}
func ArrayStdin() ([]string) {
var input string = StrStdin()
var slice []string = strings.Fields(input)
return slice
}
func IntArrayStdin() ([]int, error) {
input := ArrayStdin()
var output []int
for _, v := range input {
n, _ := strconv.Atoi(v)
if len(v) > 10 || n < 0 {
return output, errors.New("try again.")
}
output = append(output, n)
}
return output, nil
}
func MaxArrayNum(arr []int) (int) {
var max int = 0
var idx int
for i, v := range arr {
if max < v {
max = v
idx = i
}
}
return idx
}
func RemoveSlice(slice []int, s int) ([]int) {
return append(slice[:s], slice[s+1:]...)
}
func main() {
n, err := IntStdin()
if err != nil {
fmt.Println(err)
return
} else if len(strconv.Itoa(n)) > 5 || n < 1{
fmt.Println("dame")
return
}
arr, err := IntArrayStdin()
if err != nil {
return
}
fmt.Println(len(arr))
if len(arr) != (2*n) {
fmt.Println("please tyr again.")
return
}
var ans int = 0
for i := 0; i < n; i++ {
idx := MaxArrayNum(arr)
ans += arr[idx]
arr = RemoveSlice(arr, idx)
idx = len(arr) / 2
arr = RemoveSlice(arr, idx)
}
fmt.Printf("%v\n",ans)
}
|
[
1
] |
package pkg
import (
"sync"
"time"
)
// RollingCounter 滑动窗口计数器
type RollingCounter struct {
buckets map[int64]*Bucket // 秒级为单位的桶, 字典的key为时间戳
interval int64 // 时间周期
mux sync.RWMutex // 慎重使用 *sync.RWMutex 不可复制类型的变量: https://www.haohongfan.com/post/2020-12-22-struct-sync/
}
// Bucket 为计数桶
type Bucket struct {
Count int64
}
// NewRollingCounter 实例化滑动窗口计数器
func NewRollingCounter(interval int64) *RollingCounter {
return &RollingCounter{
buckets: make(map[int64]*Bucket),
interval: interval,
}
}
// GetCurrentBucket 获取当前桶
func (rc *RollingCounter) getCurrentBucket() *Bucket {
now := time.Now().Unix()
// 判断当前时间是否有桶存在, 有就直接返回
if b, ok := rc.buckets[now]; ok {
return b
}
// 否则, 创建新桶
b := new(Bucket)
rc.buckets[now] = b
return b
}
func (rc *RollingCounter) removeOldBuckets() {
t := time.Now().Unix() - rc.interval
for timestamp := range rc.buckets {
if timestamp <= t {
delete(rc.buckets, timestamp)
}
}
}
// Incr 累加计数器
func (rc *RollingCounter) Incr() {
rc.IncrN(1)
}
// IncrN 累加计数器
func (rc *RollingCounter) IncrN(i int64) {
rc.mux.Lock()
defer rc.mux.Unlock()
bucket := rc.getCurrentBucket()
bucket.Count += i
rc.removeOldBuckets()
}
// Sum 累计, 将过去N秒内各窗口的数值相加
func (rc *RollingCounter) Sum() int64 {
var sum int64
t := time.Now().Unix() - rc.interval
rc.mux.RLock()
defer rc.mux.RUnlock()
for timestamp, bucket := range rc.buckets {
if timestamp >= t {
sum += bucket.Count
}
}
return sum
}
// Max 获取过去N秒内的最大值
func (rc *RollingCounter) Max() int64 {
var max int64
t := time.Now().Unix() - rc.interval
rc.mux.RLock()
defer rc.mux.RUnlock()
for timestamp, bucket := range rc.buckets {
if timestamp >= t {
if bucket.Count > max {
max = bucket.Count
}
}
}
return max
}
// Min
func (rc *RollingCounter) Min() int64 {
var min int64
t := time.Now().Unix() - rc.interval
rc.mux.RLock()
defer rc.mux.RUnlock()
for timestamp, bucket := range rc.buckets {
if timestamp >= t {
if min == 0 {
min = bucket.Count
continue
}
if bucket.Count < min {
min = bucket.Count
}
}
}
return min
}
// Avg 获取过去N秒内所有窗口的平均值。
func (rc *RollingCounter) Avg() float64 {
return float64(rc.Sum()) / float64(rc.interval)
}
// Stats
func (rc *RollingCounter) Stats() map[int64]*Bucket {
return rc.buckets
}
|
[
1
] |
package leetcode
func removeDuplicates(S string) string {
res := make([]string, 0)
for _,v := range S{
if len(res)!=0{
if res[len(res)-1] == string(v){
res = res[0:len(res)-1]
}else{
res = append(res, string(v))
}
}else{
res = append(res, string(v))
}
}
return strings.Join(res, "")
}
|
[
2
] |
package log_test
import (
"bytes"
"fmt"
"github.com/marcusva/docproc/common/log"
"github.com/marcusva/docproc/common/testing/assert"
"io/ioutil"
"os"
"strings"
"testing"
)
func TestPackage(t *testing.T) {
if log.Logger() == nil {
t.Error("_log is nil, although a package initialization was done")
}
// None of those should cause a panic
log.Alert("test")
log.Alertf("test")
log.Critical("test")
log.Criticalf("test")
log.Debug("test")
log.Debugf("test")
log.Info("test")
log.Infof("test")
log.Notice("test")
log.Noticef("test")
log.Error("test")
log.Errorf("test")
log.Warning("test")
log.Warningf("test")
log.Emergency("test")
log.Emergencyf("test")
}
func TestLogger(t *testing.T) {
logger := log.Logger()
assert.NotNil(t, logger)
var buf bytes.Buffer
log.Init(&buf, log.LevelDebug, true)
logger2 := log.Logger()
assert.NotEqual(t, logger, logger2)
}
func TestInitFile(t *testing.T) {
fp, err := ioutil.TempFile(os.TempDir(), "docproc-logtest")
assert.NoErr(t, err)
fname := fp.Name()
fp.Close()
err = log.InitFile(fname, log.LevelDebug, false)
assert.NoErr(t, err)
log.Init(os.Stdout, log.LevelDebug, false)
assert.NoErr(t, os.Remove(fname))
err = log.InitFile("", log.LevelDebug, false)
assert.Err(t, err)
}
func TestGetLogLevel(t *testing.T) {
levelsInt := []string{"0", "1", "2", "3", "4", "5", "6", "7"}
levelsTxt := []string{
"Emergency", "Alert", "Critical", "Error", "Warning", "Notice", "Info",
"Debug",
}
for idx, v := range levelsInt {
if v1, err := log.GetLogLevel(v); err != nil {
t.Error(err)
} else {
if v2, err := log.GetLogLevel(levelsTxt[idx]); err != nil {
t.Error(err)
} else {
if v1 != v2 {
t.Errorf("Log level mismatch: '%s' - '%s'",
v, levelsTxt[idx])
}
}
}
}
levelsInvalid := []string{"", "10", "SomeText"}
for _, v := range levelsInvalid {
if v1, err := log.GetLogLevel(""); err == nil || v1 != -1 {
t.Errorf("invalid level '%s' was accepted", v)
}
}
}
func TestLog(t *testing.T) {
callbacks := map[string]func(...interface{}){
"DEBUG": log.Debug,
"INFO": log.Info,
"NOTICE": log.Notice,
"WARNING": log.Warning,
"ERROR": log.Error,
"CRITICAL": log.Critical,
"ALERT": log.Alert,
"EMERGENCY": log.Emergency,
}
var buf bytes.Buffer
log.Init(&buf, log.LevelDebug, true)
for prefix, cb := range callbacks {
cb("Test")
result := string(buf.Bytes())
assert.FailIfNot(t, strings.Contains(result, prefix),
"'%s' not found in %s", prefix, result)
assert.FailIfNot(t, strings.Contains(result, "Test"))
buf.Reset()
}
}
func TestLogf(t *testing.T) {
callbacks := map[string]func(f string, args ...interface{}){
"DEBUG": log.Debugf,
"INFO": log.Infof,
"NOTICE": log.Noticef,
"WARNING": log.Warningf,
"ERROR": log.Errorf,
"CRITICAL": log.Criticalf,
"ALERT": log.Alertf,
"EMERGENCY": log.Emergencyf,
}
var buf bytes.Buffer
log.Init(&buf, log.LevelDebug, true)
fmtstring := "Formatted result: '%s'"
for prefix, cb := range callbacks {
fmtresult := fmt.Sprintf(fmtstring, "TestLogf")
cb(fmtstring, "TestLogf")
result := string(buf.Bytes())
assert.FailIfNot(t, strings.Contains(result, prefix),
"'%s' not found in %s", prefix, result)
assert.FailIfNot(t, strings.Contains(result, fmtresult))
buf.Reset()
}
}
func TestLogLevel(t *testing.T) {
levels := []log.Level{
log.LevelDebug, log.LevelInfo, log.LevelNotice, log.LevelWarning,
log.LevelError, log.LevelAlert, log.LevelCritical, log.LevelEmergency,
}
for _, level := range levels {
var buf bytes.Buffer
log.Init(&buf, level, true)
assert.Equal(t, level, log.CurrentLevel())
}
}
|
[
4
] |
package base
import (
"context"
"fmt"
"github.com/odpf/optimus/models"
pbp "github.com/odpf/optimus/api/proto/odpf/optimus/plugins"
)
// GRPCServer will be used by plugins, this is working as proto adapter
type GRPCServer struct {
// This is the real implementation coming from plugin
Impl models.BasePlugin
pbp.UnimplementedBaseServer
}
func (s *GRPCServer) PluginInfo(ctx context.Context, req *pbp.PluginInfoRequest) (*pbp.PluginInfoResponse, error) {
n, err := s.Impl.PluginInfo()
if err != nil {
return nil, err
}
ptype := pbp.PluginType_PluginType_HOOK
switch n.PluginType {
case models.PluginTypeTask:
ptype = pbp.PluginType_PluginType_TASK
}
var mtype []pbp.PluginMod
for _, mod := range n.PluginMods {
switch mod {
case models.ModTypeCLI:
mtype = append(mtype, pbp.PluginMod_PluginMod_CLI)
case models.ModTypeDependencyResolver:
mtype = append(mtype, pbp.PluginMod_PluginMod_DEPENDENCYRESOLVER)
default:
return nil, fmt.Errorf("plugin mod is of unknown type: %s", mod)
}
}
htype := pbp.HookType_HookType_UNKNOWN
switch n.HookType {
case models.HookTypePre:
htype = pbp.HookType_HookType_PRE
case models.HookTypePost:
htype = pbp.HookType_HookType_POST
case models.HookTypeFail:
htype = pbp.HookType_HookType_FAIL
}
return &pbp.PluginInfoResponse{
Name: n.Name,
PluginType: ptype,
PluginMods: mtype,
PluginVersion: n.PluginVersion,
ApiVersion: n.APIVersion,
Description: n.Description,
Image: n.Image,
DependsOn: n.DependsOn,
HookType: htype,
SecretPath: n.SecretPath,
}, nil
}
|
[
7
] |
package main
import "fmt"
func main() {
defer finish()
// woow complex numbers!
var f complex64 = 1 + 2i
var g complex64 = 2 - 5i
fmt.Println(f / g)
// foreach like
numbers := generateRange(1, 30)
for _, number := range numbers {
fmt.Printf("\nCollatz row for %v:\n", number)
collatzConjecture(number)
}
panic("Program killed")
}
func collatzConjecture(x int) (y int) {
fmt.Printf("%v -> ", x)
if x == 1 {
return 1
}
if x%2 == 1 {
return collatzConjecture(x*3 + 1)
}
if x%2 == 0 {
return collatzConjecture(x / 2)
}
return 1
}
func generateRange(min, max int) []int {
a := make([]int, max-min+1)
for i := range a {
a[i] = min + i
}
return a
}
func finish() {
fmt.Println("\n\nLast breath of a program...")
}
|
[
1
] |
package main
import (
"bufio"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"sort"
"strings"
"text/template"
)
func readdir(dir string, check func(os.FileInfo) bool, suffix string) []string {
var list []string
d, err := os.Open(dir)
if err != nil {
return list
}
defer d.Close()
fi, err := d.Readdir(-1)
if err != nil {
return list
}
for _, f := range fi {
if check(f) {
if suffix != "" && !strings.HasSuffix(f.Name(), suffix) {
continue
}
list = append(list, strings.TrimSuffix(f.Name(), suffix))
}
}
sort.Strings(list)
return list
}
func checkDir(f os.FileInfo) bool {
if f.Mode().IsDir() {
return true
}
return false
}
func checkFile(f os.FileInfo) bool {
if f.Mode().IsRegular() {
return true
}
return false
}
func readFile(file string) []string {
var lines []string
f, err := os.Open(file)
if err != nil {
log.Println(err)
return lines
}
defer f.Close()
scanner := bufio.NewScanner(f)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
return lines
}
func runAnsible(inventory, playbook, remote string) (string, error) {
os.Mkdir("jobs", 0700)
jobdir, err := ioutil.TempDir("jobs", "")
if err != nil {
return "", err
}
err = ioutil.WriteFile(jobdir+"/inventory", []byte(inventory), 0444)
if err != nil {
return "", err
}
err = ioutil.WriteFile(jobdir+"/playbook.yml", []byte(playbook), 0444)
if err != nil {
return "", err
}
ioutil.WriteFile(jobdir+"/remote", []byte("job started by "+remote), 0444)
logfname := jobdir + "/log"
f, err := os.OpenFile(logfname, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0444)
if err != nil {
return "", err
}
defer f.Close()
curdir, err := os.Getwd()
if err != nil {
return "", err
}
os.Symlink(curdir+"/roles", jobdir+"/roles")
cmd := exec.Command("ansible-playbook", "-i", jobdir+"/inventory", jobdir+"/playbook.yml")
cmd.Stdout = f
cmd.Stderr = f
cmd.Dir = curdir
/*
* ansible is made from python, which wants to buffer output.
* disable that here by setting the environment variable.
*/
env := os.Environ()
env = append(env, "PYTHONUNBUFFERED=1")
cmd.Env = env
err = cmd.Start()
if err != nil {
return "", fmt.Errorf("command Start: %v", err)
}
go func() {
var output string
err := cmd.Wait()
if err != nil {
output = fmt.Sprintf("%s finished with error status: %v\n", jobdir, err)
} else {
output = fmt.Sprintf("%s finished with no errors\n", jobdir)
}
ioutil.WriteFile(jobdir+"/exitstatus", []byte(output), 0400)
}()
return logfname, nil
}
func requestHandler(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "POST":
r.ParseForm()
inventory := r.PostFormValue("inventory")
playbook := r.PostFormValue("playbook")
remote := r.RemoteAddr
logfname, err := runAnsible(inventory, playbook, remote)
if err != nil {
http.Error(w, "internal error", http.StatusInternalServerError)
log.Println(err)
return
}
logfile_details := &struct {
Playbook string
Logfile string
}{
Playbook: r.PostFormValue("playbook_selection"),
Logfile: logfname,
}
t, err := template.ParseFiles("templates/logfile.html")
if err != nil {
http.Error(w, "internal error", http.StatusInternalServerError)
log.Println(err)
return
}
err = t.Execute(w, logfile_details)
if err != nil {
http.Error(w, "internal error", http.StatusInternalServerError)
log.Println(err)
return
}
case "GET":
t, err := template.ParseFiles("templates/ansible.html")
if err != nil {
http.Error(w, "internal error", http.StatusInternalServerError)
log.Println(err)
}
ansible_details := &struct {
Machines []string
Playbooks []string
Roles []string
}{
Machines: readFile("machines"),
Playbooks: readdir("playbooks", checkFile, ".yml"),
Roles: readdir("roles", checkDir, ""),
}
err = t.Execute(w, ansible_details)
if err != nil {
http.Error(w, "internal error", http.StatusInternalServerError)
log.Println(err)
}
}
}
func serveAssets(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, r.URL.Path[1:])
}
func serveStatus(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "templates/status.html")
}
type httperror struct {
Code int `json:"code"`
Id string `json:"id"`
Message string `json:"message"`
Detail string `json:"detail"`
}
type job struct {
Job string `json:"job"`
Link string `json:"link"`
}
func serveAPI(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
if r.Method != "GET" {
w.Header().Set("Allow", "GET")
h := &httperror{
Code: http.StatusMethodNotAllowed,
Id: "notallowed",
Message: "method not allowed",
Detail: "Only GET supported on this service",
}
b, _ := json.MarshalIndent(h, "", "\t")
http.Error(w, string(b), http.StatusMethodNotAllowed)
return
}
switch r.URL.Path {
case "/api/v1/jobs":
jobsdirs := readdir("jobs", checkDir, "")
jobslist := make([]job, 0)
for _, j := range jobsdirs {
entry := job{
Job: j,
Link: "http://" + r.Host + "/jobs/" + j + "/",
}
jobslist = append(jobslist, entry)
}
b, err := json.MarshalIndent(jobslist, "", "\t")
if err != nil {
h := &httperror{
Code: http.StatusInternalServerError,
Id: "internalerror",
Message: "internal error",
Detail: "Unable to Marshal jobs list",
}
b, _ := json.MarshalIndent(h, "", "\t")
http.Error(w, string(b), http.StatusInternalServerError)
return
}
fmt.Fprintf(w, string(b))
return
}
http.NotFound(w, r)
}
func main() {
var port = flag.String("port", "", "HTTP service address (.e.g. 8080)")
flag.Parse()
if *port == "" {
flag.Usage()
return
}
http.HandleFunc("/", requestHandler)
http.HandleFunc("/api/", serveAPI)
http.HandleFunc("/assets/", serveAssets)
http.HandleFunc("/jobs/", serveAssets)
http.HandleFunc("/playbooks/", serveAssets)
http.HandleFunc("/status", serveStatus)
log.Fatal(http.ListenAndServe(":"+*port, nil))
}
|
[
7
] |
// Code generated by mockery v2.30.1. DO NOT EDIT.
package mocks
import (
dynamic "k8s.io/client-go/dynamic"
kubernetes "k8s.io/client-go/kubernetes"
mock "github.com/stretchr/testify/mock"
pflag "github.com/spf13/pflag"
pkgclient "sigs.k8s.io/controller-runtime/pkg/client"
rest "k8s.io/client-go/rest"
versioned "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned"
)
// Factory is an autogenerated mock type for the Factory type
type Factory struct {
mock.Mock
}
// BindFlags provides a mock function with given fields: flags
func (_m *Factory) BindFlags(flags *pflag.FlagSet) {
_m.Called(flags)
}
// Client provides a mock function with given fields:
func (_m *Factory) Client() (versioned.Interface, error) {
ret := _m.Called()
var r0 versioned.Interface
var r1 error
if rf, ok := ret.Get(0).(func() (versioned.Interface, error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() versioned.Interface); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(versioned.Interface)
}
}
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ClientConfig provides a mock function with given fields:
func (_m *Factory) ClientConfig() (*rest.Config, error) {
ret := _m.Called()
var r0 *rest.Config
var r1 error
if rf, ok := ret.Get(0).(func() (*rest.Config, error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() *rest.Config); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*rest.Config)
}
}
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DynamicClient provides a mock function with given fields:
func (_m *Factory) DynamicClient() (dynamic.Interface, error) {
ret := _m.Called()
var r0 dynamic.Interface
var r1 error
if rf, ok := ret.Get(0).(func() (dynamic.Interface, error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() dynamic.Interface); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(dynamic.Interface)
}
}
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// KubeClient provides a mock function with given fields:
func (_m *Factory) KubeClient() (kubernetes.Interface, error) {
ret := _m.Called()
var r0 kubernetes.Interface
var r1 error
if rf, ok := ret.Get(0).(func() (kubernetes.Interface, error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() kubernetes.Interface); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(kubernetes.Interface)
}
}
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// KubebuilderClient provides a mock function with given fields:
func (_m *Factory) KubebuilderClient() (pkgclient.Client, error) {
ret := _m.Called()
var r0 pkgclient.Client
var r1 error
if rf, ok := ret.Get(0).(func() (pkgclient.Client, error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() pkgclient.Client); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(pkgclient.Client)
}
}
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// KubebuilderWatchClient provides a mock function with given fields:
func (_m *Factory) KubebuilderWatchClient() (pkgclient.WithWatch, error) {
ret := _m.Called()
var r0 pkgclient.WithWatch
var r1 error
if rf, ok := ret.Get(0).(func() (pkgclient.WithWatch, error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() pkgclient.WithWatch); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(pkgclient.WithWatch)
}
}
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Namespace provides a mock function with given fields:
func (_m *Factory) Namespace() string {
ret := _m.Called()
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// SetBasename provides a mock function with given fields: _a0
func (_m *Factory) SetBasename(_a0 string) {
_m.Called(_a0)
}
// SetClientBurst provides a mock function with given fields: _a0
func (_m *Factory) SetClientBurst(_a0 int) {
_m.Called(_a0)
}
// SetClientQPS provides a mock function with given fields: _a0
func (_m *Factory) SetClientQPS(_a0 float32) {
_m.Called(_a0)
}
// NewFactory creates a new instance of Factory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewFactory(t interface {
mock.TestingT
Cleanup(func())
}) *Factory {
mock := &Factory{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
|
[
4
] |
package main
import (
"fmt"
)
// DP solution
/*func longestPalindrome(str string) string {
if len(str) == 0 {
return ""
} else if len(str) == 1 {
return str
}
dp := make([][]bool, len(str))
for i := range dp {
dp[i] = make([]bool, len(str))
}
var maxPalindpom string
var maxLength int
// case for length of 1
for i := range str {
dp[i][i] = true
}
maxPalindpom = str[:1]
maxLength = 1
// case for length of 2
for i := 1; i < len(str); i++ {
if str[i] == str[i-1] {
maxPalindpom = str[i-1 : i+1]
maxLength = 2
dp[i-1][i] = true
}
}
// case for length of greater than 2
for i := 2; i < len(str); i++ {
for j := 0; j <= i-2; j++ {
if str[i] == str[j] && dp[j+1][i-1] {
if maxLength < i-j+1 {
maxPalindpom = str[j : i+1]
maxLength = i - j + 1
}
dp[j][i] = true
}
}
}
return maxPalindpom
}*/
// Expand solution
func longestPalindrome(str string) string {
var max string
for i := 0; i < len(str); i++ {
expand(i, i, str, &max)
expand(i, i+1, str, &max)
}
return max
}
func expand(left, right int, s string, max *string) {
for left >= 0 && right < len(s) && s[left] == s[right] {
if len(*max) < right-left+1 {
*max = s[left : right+1]
}
left--
right++
}
}
func main() {
fmt.Println(longestPalindrome("cababac"))
}
|
[
1
] |
package proxy
import (
"fmt"
"html"
"io"
"log"
"net/http"
"github.com/Bearnie-H/easy-tls/client"
"github.com/Bearnie-H/easy-tls/header"
"github.com/Bearnie-H/easy-tls/server"
)
// NotFoundHandlerProxyOverride will override the NotFound handler of the
// Server with a reverse proxy lookup function. This will allow the server
// to attempt to re-route requests it doesn't have a defined route for, while
// still falling back to a "NotFound" 404 response if there is
// no defined place to route to.
func NotFoundHandlerProxyOverride(S *server.SimpleServer, c *client.SimpleClient, RouteMatcher ReverseProxyRouterFunc, logger *log.Logger) {
var err error
if logger == nil {
logger = S.Logger()
}
if c == nil {
c, err = client.NewClientHTTPS(S.TLSBundle())
if err != nil {
panic(err)
}
c.SetLogger(logger)
}
S.Router().NotFoundHandler = DoReverseProxy(c, RouteMatcher, logger)
}
// ConfigureReverseProxy will convert a freshly created SimpleServer
// into a ReverseProxy. This will use the provided SimpleClient
// (or a default HTTP SimpleClient) to perform the requests.
// The ReverseProxyRouterFunc defines HOW the routing will be performed, and
// must map individual requests to URLs to forward to.
// The PathPrefix defines the base path to proxy from, with a default of "/"
// indicating that ALL incoming requests should be proxied.
// If No Server or Client are provided, default instances will be generated.
func ConfigureReverseProxy(S *server.SimpleServer, Client *client.SimpleClient, logger *log.Logger, RouteMatcher ReverseProxyRouterFunc, PathPrefix string) *server.SimpleServer {
// If No server is provided, create a default HTTP Server.
var err error
if S == nil {
S = server.NewServerHTTP()
}
// Assert a non-empty path prefix to proxy on
if PathPrefix == "" {
PathPrefix = "/"
}
// If there's no logger provided, use the one from the server
if logger == nil {
logger = S.Logger()
}
// If no client is given, attempt to create one, using any TLS resources the potential server had.
if Client == nil {
Client, err = client.NewClientHTTPS(S.TLSBundle())
if err != nil {
panic(err)
}
Client.SetLogger(logger)
}
S.AddSubrouter(
S.Router(),
PathPrefix,
server.NewSimpleHandler(
DoReverseProxy(
Client,
RouteMatcher,
logger,
),
PathPrefix,
),
)
return S
}
// DoReverseProxy is the backbone of this package, and the reverse
// Proxy behaviour in general.
//
// This is the http.HandlerFunc which is called on ALL incoming requests
// to the reverse proxy. At a high level this function:
//
// 1) Determines the forward host, from the incoming request
// 2) Creates a NEW request, performing a deep copy of the original, including the body
// 3) Performs this new request, using the provided (or default) SimpleClient to the new Host.
// 4) Receives the corresponding response, and deep copies it back to the original requester.
//
func DoReverseProxy(C *client.SimpleClient, Matcher ReverseProxyRouterFunc, logger *log.Logger) http.HandlerFunc {
// Anonymous function to be returned, and is what is actually called when requests come in.
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
// Create the new URL to use, based on the TLS settings of the Client, and the incoming request.
proxyURL, err := Matcher(r)
switch err {
case nil:
case ErrRouteNotFound:
logger.Printf("Failed to find destination host:port for URL [ %s ] from %s - %s", r.URL.String(), r.RemoteAddr, err)
w.WriteHeader(http.StatusNotFound)
return
case ErrForbiddenRoute:
logger.Printf("Cannot forward request for URL [ %s ] from %s - %s", r.URL.String(), r.RemoteAddr, err)
w.WriteHeader(http.StatusForbidden)
return
default:
logger.Printf("Failed to format proxy forwarding for URL [ %s ] from %s - %s", r.URL.String(), r.RemoteAddr, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
// Create the new Request to send
proxyReq, err := client.NewRequest(r.Method, proxyURL.String(), r.Header, r.Body)
if err != nil {
logger.Printf("Failed to create proxy forwarding request for URL [ %s ] from %s - %s", r.URL.String(), r.RemoteAddr, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
// Add in some proxy-specific headers
proxyHeaders := http.Header{
"Host": []string{r.Host},
"X-Forwarded-For": []string{r.RemoteAddr},
}
header.Merge(&(proxyReq.Header), &proxyHeaders)
logger.Printf("Forwarding [ %s [ %s ] ] from [ %s ] to [ %s ]", r.URL.String(), r.Method, r.RemoteAddr, proxyURL.String())
// Perform the full proxy request
proxyResp, err := C.Do(proxyReq)
if err != nil {
logger.Printf("Failed to perform proxy request for URL [ %s ] from %s - %s", r.URL.String(), r.RemoteAddr, err)
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(html.EscapeString(fmt.Sprintf("Failed to perform proxy request for URL [ %s ] - %s.\n", r.URL.String(), err))))
return
}
// Write the response fields out to the original requester
responseHeader := w.Header()
header.Merge(&responseHeader, &(proxyResp.Header))
// Write back the status code
w.WriteHeader(proxyResp.StatusCode)
// Write back the response body
if _, err := io.Copy(w, proxyResp.Body); err != nil {
logger.Printf("Failed to write back proxy response for URL [ %s ] from %s - %s", r.URL.String(), r.RemoteAddr, err)
return
}
})
}
|
[
2
] |
/*
func
method
interface
package
*/
package main
import (
"fmt"
"time"
pkg1 "./foo"
)
// func
func func1() {
fmt.Println("func1")
}
func func2(x, y int) {
fmt.Println("func2", x, y)
}
func func3(x, y int) int {
return x + y
}
func func4(x, y int) (add, minus int) {
add = x + y
minus = x - y
return // 参数有命名的时候,return 通常可以不跟参数
}
func func5(x, y int) (minus int, error error) {
minus = x - y
if minus >= 0 {
return minus, nil
} else {
return 0, fmt.Errorf("Error: %s", "x 必须大于或等于 y")
}
}
func func6() func()int { // 函数 func6 返回另一个类型为 func()int 的函数
var x int
return func() int { // 匿名函数
x++
return x * x // 平方
}
}
func func7(values... int) int { // 可变函数
sum := 0
for _, val := range values {
sum += val
}
return sum
}
func func8() (string, int) { // 延迟函数
foo := 100
fmt.Println("func8 start", foo)
defer func() { // defer 一般用于异常处理、释放资源、清理数据、记录日志等,defer 还有一个重要的特性,就是即便函数抛出了异常,也会被执行的
time.Sleep(3 * time.Second)
fmt.Println("func8 defer 1", foo)
}()
defer func() { // 按照 FILO(先进后出)的原则依次执行每一个 defer
foo = 200
fmt.Println("func8 defer 2", foo)
}()
fmt.Println("func8 middle", foo)
return "func8 return", foo // defer 在声明时不会立即执行,而是在函数 return 执行前
}
func func9() {
defer func() {
if p := recover(); p != nil { // recover 捕获异常
err := fmt.Errorf("func9 error: %v", p)
fmt.Println(err);
}
}()
panic(fmt.Sprintf("异常抛出 %q", "func9")) // panic 抛出异常
}
// method
type St1 struct {
x, y int
}
func (s St1) Method1(z int) int { // Go 语言只有一种控制可见性的手段:大写首字母的标识符会从定义它们的包中被导出,小写字母的则不会。
sum := s.x + s.y + z
return sum // method 必须有 return
}
type St2 struct {
x, y int
}
func (s *St2) method2(z int) int { // 一个struct类型的字段对同一个包的所有代码都有可见性,无论你的代码是写在一个函数还是一个方法里
sum := s.x + s.y + z
return sum
}
func main() {
// func
fmt.Println("--------------", "func")
func1()
func2(1, 2)
fmt.Println("func3", func3(1, 2))
fmt.Println(func4(1, 2))
func5Val, func5Err := func5(1, 2)
if func5Err != nil {
fmt.Println("func5", func5Err)
} else {
fmt.Println("func5", func5Val)
}
f6 := func6()
fmt.Println("func6", f6())
fmt.Println("func6", f6())
fmt.Println("func6", f6())
fmt.Println("func7", func7())
fmt.Println("func7", func7(3))
fmt.Println("func7", func7(1, 2, 3))
fmt.Println(func8())
// func9() // 要测试异常的抛出和捕获的时候再开启
// method
fmt.Println("--------------", "method")
s1 := St1{1, 2}
fmt.Println("method1", s1.Method1(3))
s2 := &St2{1, 2}
fmt.Println("method2", s2.method2(3))
fmt.Println("method3", OtherMethod1()) // go run .
// package
fmt.Println("--------------", "package")
fmt.Println("pkg1", pkg1.ReturnStr())
}
|
[
1
] |
package mypaint
import (
"github.com/llgcode/draw2d"
"github.com/llgcode/draw2d/draw2dimg"
"image/color"
"image/draw"
)
type Filler struct {
GC draw2d.GraphicContext
CurFillColor color.RGBA
CurStrokeColor color.RGBA
*Path
}
func NewFiller(img draw.Image) *Filler {
return &Filler{
GC: draw2dimg.NewGraphicContext(img),
Path: NewPath(),
}
}
func (f *Filler) SetLineWidth(x float64) {
f.GC.SetLineWidth(x)
}
func (f *Filler) SetFillRuleWinding(x bool) {
if x {
f.GC.SetFillRule(draw2d.FillRuleWinding)
} else {
f.GC.SetFillRule(draw2d.FillRuleEvenOdd)
}
}
func (f *Filler) Clear() {
f.Path = NewPath()
}
func (f *Filler) TakePath(p *Path) {
f.Path = p
}
func (f *Filler) Scale(sx, sy float64) {
f.GC.Scale(sx, sy)
}
func (f *Filler) Translate(sx, sy float64) {
f.GC.Translate(sx, sy)
}
func (f *Filler) Fill(mypaths ...*Path) {
if len(mypaths) == 0 {
f.GC.Fill(f.Path.Path)
f.Clear()
} else {
f.GC.Fill(ConvertPaths(mypaths)...)
}
}
func (f *Filler) Stroke(mypaths ...*Path) {
if len(mypaths) == 0 {
f.GC.Stroke(f.Path.Path)
f.Clear()
} else {
f.GC.Stroke(ConvertPaths(mypaths)...)
}
}
func (f *Filler) FillStroke(mypaths ...*Path) {
if len(mypaths) == 0 {
f.GC.FillStroke(f.Path.Path)
f.Clear()
} else {
f.GC.FillStroke(ConvertPaths(mypaths)...)
}
}
func (p *Filler) SetFillColor(R, G, B, A uint8) {
c := color.RGBA{R, G, B, A}
p.CurFillColor = c
p.GC.SetFillColor(c)
}
func (p *Filler) SetStrokeColor(R, G, B, A uint8) {
c := color.RGBA{R, G, B, A}
p.CurStrokeColor = c
p.GC.SetStrokeColor(c)
}
func (p *Filler) SetFillC(name string) {
c := Pallate(name)
p.CurFillColor = c
p.GC.SetFillColor(c)
}
func (p *Filler) SetStrokeC(name string) {
c := Pallate(name)
p.CurStrokeColor = c
p.GC.SetStrokeColor(c)
}
func (p *Filler) SetFillCL(name string, s float64) {
c := Pallate(name)
p.CurFillColor = c
p.GC.SetFillColor(Lighter(c, s))
}
func (p *Filler) SetStrokeCL(name string, s float64) {
c := Pallate(name)
p.CurStrokeColor = c
p.GC.SetStrokeColor(Lighter(c, s))
}
func (p *Filler) SetFillLight(s float64) {
c := Lighter(p.CurFillColor, s)
p.GC.SetFillColor(c)
}
func (p *Filler) SetStrokeLight(s float64) {
c := Lighter(p.CurStrokeColor, s)
p.GC.SetStrokeColor(c)
}
func (f *Filler) Pencil() {
f.SetLineWidth(1)
f.SetStrokeC("black")
}
func (f *Filler) IsFillWhite() bool {
return f.CurFillColor == color.RGBA{0xff, 0xff, 0xff, 40}
}
func (f *Filler) IsFillBlack() bool {
return f.CurFillColor == color.RGBA{0, 0, 0, 255}
}
|
[
2
] |
package main
import (
"fmt"
"os"
"path"
"strings"
)
func main() {
conDir := "/Users/alizohrevand/homebrew/Cellar/mosquitto/1.4.14_2"
confFile := "test2"
confAddr := path.Join(conDir, confFile)
d1 := []byte("hello\ngo\n")
/* err := ioutil.WriteFile(confAddr, d1, 0644)
fmt.Println(err)*/
fo, err := os.Open(confAddr)
defer fo.Close()
if err != nil {
fmt.Println(err)
}
fo.Write(d1)
buffer := make([]byte, 10)
fo.Read(buffer)
fmt.Println(string(buffer))
temp := string(buffer)
if strings.Contains(temp, "go") {
temp = strings.Replace(temp, "\ngo", "bye", -1)
} else {
temp = temp + "\n" + "bye"
}
b := []byte(temp)
fmt.Println("last ", err)
}
func createFile(Addr string) (file *os.File, err error) {
file, err = os.Open(Addr)
if err != nil {
file, err = os.Create(Addr)
}
return
}
|
[
2
] |
package dotpath
import (
"fmt"
"reflect"
"strconv"
"strings"
"github.com/pkg/errors"
)
func get_from_slice_by_path(IDX int, paths []string, data []interface{}) (interface{}, error) {
if data == nil {
return nil, errors.New("input data is nil")
}
header := fmt.Sprintf("get_from_slice_by_path: %d %v in:\n%+v\n\n", IDX, paths, data)
// fmt.Println(header)
logger.Debug(header)
if IDX >= len(paths) || len(paths) == 0 {
return nil, nil
}
subs := []interface{}{}
P := paths[IDX]
lpos_index := strings.Index(P, "[")
rpos_index := strings.LastIndex(P, "]")
pos_colon := strings.Index(P, ":")
has_listing := strings.Contains(P, ",")
// pos_regex := strings.Index(P, "regex")
has_eq := strings.Contains(P, "==")
// has_ne := strings.Contains(P, "!=")
inner := ""
if lpos_index > -1 {
inner = P[lpos_index+1 : rpos_index]
}
// fmt.Printf(" slice inner: %d %q %q\n", IDX, inner, P)
// handle indexing here
if inner != "" {
inner := P[lpos_index+1 : rpos_index]
// fmt.Printf("index: %q [%d:%d]\n", inner, lpos_index+1, rpos_index)
// handle slicing
if pos_colon > -1 {
elems, err := extract_from_slice_with_splice(inner, data)
if err != nil {
return nil, errors.Wrap(err, "while extracting splice in dotpath case []interface{}")
}
return elems, nil
}
// handle equality
if has_eq {
fields := strings.Split(inner, "==")
if len(fields) != 2 {
return nil, errors.New("Found not 2 fields in equality in: " + P)
}
elems, err := extract_from_slice_with_field(fields[0], fields[1], data)
if err != nil {
return nil, errors.Wrap(err, "while extracting has_eq in dotpath case []interface{}")
}
return elems, nil
}
// handle listing
if has_listing {
elems, err := extract_from_slice_with_name(inner, data)
if err != nil {
return nil, errors.Wrap(err, "while extracting listing in dotpath case []interface{}")
}
return elems, nil
}
// handle int
i_val, ierr := strconv.Atoi(inner)
if ierr == nil {
if i_val < 0 || i_val >= len(data) {
str := "index out of range: " + inner
return str, errors.New(str)
}
return data[i_val], nil
}
// default is single string for name, then field
elems, err := extract_from_slice_with_name(inner, data)
if err != nil {
return nil, errors.Wrap(err, "while extracting name/field in dotpath case []interface{}")
}
return elems, nil
} else {
// No inner indexing
for _, elem := range data {
logger.Debug(" - elem", "elem", elem, "paths", paths, "P", P)
switch V := elem.(type) {
case map[string]interface{}:
logger.Debug(" map[string]")
val, err := get_from_smap_by_path(IDX, paths, V)
if err != nil {
logger.Debug("could not find '" + P + "' in object")
continue
}
logger.Debug("Adding val", "val", val)
subs = append(subs, val)
case map[interface{}]interface{}:
logger.Debug(" map[iface]")
val, err := get_from_imap_by_path(IDX, paths, V)
if err != nil {
logger.Debug("could not find '" + P + "' in object")
continue
}
logger.Debug("Adding val", "val", val)
subs = append(subs, val)
default:
str := fmt.Sprintf("%+v", reflect.TypeOf(V))
return nil, errors.New("element not an object type: " + str)
}
}
}
if len(subs) == 1 {
return subs[0], nil
}
return subs, nil
}
|
[
2
] |
package ef
import (
"fmt"
"math/rand"
"testing"
)
func TestIterator(t *testing.T) {
for i, tc := range testCases {
d, err := From(tc.in)
if err != nil {
t.Error(err)
}
it := d.Iterator()
for j := range tc.in {
got, gok := it.Next()
want, wok := d.Value(j)
if got != want || gok != wok {
t.Errorf(
"tc: %d, t: %d, got: %d, want: %d, gok: %v, wok: %v\n",
i, j, got, want, gok, wok,
)
}
}
for j := range tc.in {
got, gok := it.Value(j)
want, wok := d.Value(j)
if got != want || gok != wok {
t.Errorf(
"tc: %d, t: %d, got: %d, want: %d, gok: %v, wok: %v\n",
i, j, got, want, gok, wok,
)
}
for k := j + 1; k < len(tc.in); k++ {
got, gok := it.Next()
want, wok := d.Value(k)
if got != want || gok != wok {
t.Errorf(
"tc: %d, t: %d, k: %d, got: %d, want: %d, gok: %v, wok: %v\n",
i, j, k, got, want, gok, wok,
)
}
}
}
}
}
func BenchmarkIterator(b *testing.B) {
const (
n = 1_000_000
max = 100
)
in := make([]uint, n)
rand.Seed(18)
var prev uint
for i := range in {
prev += uint(rand.Intn(max))
in[i] = prev
}
d, err := From(in)
if err != nil {
b.Error(err)
}
it := d.Iterator()
b.Run(fmt.Sprintf("Next([%d])", n), func(b *testing.B) {
for i := 0; i < b.N; i++ {
v, ok := it.Value(0)
for i := 0; i < n; i++ {
v, ok = it.Next()
}
_ = v
_ = ok
}
})
}
|
[
1
] |
package adv
type RmTask struct {
Id int64 `json:"id" name:"id" title:"评论ID"`
Channel string `json:"channel" name:"channel" title:"频道"`
Position int32 `json:"position" name:"position" title:"广告组位置"`
}
func (T *RmTask) GetName() string {
return "rm.json"
}
func (T *RmTask) GetTitle() string {
return "删除广告"
}
|
[
2
] |
package cluster
import (
"context"
"encoding/json"
errors2 "errors"
"fmt"
"reflect"
"github.com/rancher/rancher/pkg/kontainer-engine/logstream"
"github.com/rancher/rancher/pkg/kontainer-engine/types"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/errors"
)
const (
PreCreating = "Pre-Creating"
Creating = "Creating"
PostCheck = "Post-Checking"
Running = "Running"
Error = "Error"
Updating = "Updating"
Init = "Init"
)
var (
// ErrClusterExists This error is checked in rancher, don't change the string
ErrClusterExists = errors2.New("cluster already exists")
)
// Cluster represents a kubernetes cluster
type Cluster struct {
// The cluster driver to provision cluster
Driver types.CloseableDriver `json:"-"`
// The name of the cluster driver
DriverName string `json:"driverName,omitempty" yaml:"driver_name,omitempty"`
// The name of the cluster
Name string `json:"name,omitempty" yaml:"name,omitempty"`
// The status of the cluster
Status string `json:"status,omitempty" yaml:"status,omitempty"`
// specific info about kubernetes cluster
// Kubernetes cluster version
Version string `json:"version,omitempty" yaml:"version,omitempty"`
// Service account token to access kubernetes API
ServiceAccountToken string `json:"serviceAccountToken,omitempty" yaml:"service_account_token,omitempty"`
// Kubernetes API master endpoint
Endpoint string `json:"endpoint,omitempty" yaml:"endpoint,omitempty"`
// Username for http basic authentication
Username string `json:"username,omitempty" yaml:"username,omitempty"`
// Password for http basic authentication
Password string `json:"password,omitempty" yaml:"password,omitempty"`
// Root CaCertificate for API server(base64 encoded)
RootCACert string `json:"rootCACert,omitempty" yaml:"root_ca_cert,omitempty"`
// Client Certificate(base64 encoded)
ClientCertificate string `json:"clientCertificate,omitempty" yaml:"client_certificate,omitempty"`
// Client private key(base64 encoded)
ClientKey string `json:"clientKey,omitempty" yaml:"client_key,omitempty"`
// Node count in the cluster
NodeCount int64 `json:"nodeCount,omitempty" yaml:"node_count,omitempty"`
// Metadata store specific driver options per cloud provider
Metadata map[string]string `json:"metadata,omitempty" yaml:"metadata,omitempty"`
PersistStore PersistentStore `json:"-" yaml:"-"`
ConfigGetter ConfigGetter `json:"-" yaml:"-"`
Logger logstream.Logger `json:"-" yaml:"-"`
}
// PersistentStore defines the interface for persist options like check and store
type PersistentStore interface {
GetStatus(name string) (string, error)
Get(name string) (Cluster, error)
Remove(name string) error
Store(cluster Cluster) error
PersistStatus(cluster Cluster, status string) error
}
// ConfigGetter defines the interface for getting the driver options.
type ConfigGetter interface {
GetConfig() (types.DriverOptions, error)
}
// Create creates a cluster
func (c *Cluster) Create(ctx context.Context) error {
if c.RootCACert != "" && c.Status == "" {
c.PersistStore.PersistStatus(*c, Init)
}
err := c.createInner(ctx)
if err != nil {
if err == ErrClusterExists {
c.PersistStore.PersistStatus(*c, Running)
} else {
c.PersistStore.PersistStatus(*c, Error)
}
return err
}
return c.PersistStore.PersistStatus(*c, Running)
}
func (c *Cluster) create(ctx context.Context, clusterInfo *types.ClusterInfo) error {
if c.Status == PostCheck {
return nil
}
if err := c.PersistStore.PersistStatus(*c, PreCreating); err != nil {
return err
}
// get cluster config from cli flags or json config
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
// also set metadata value to retrieve the cluster info
for k, v := range c.Metadata {
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Creating); err != nil {
return err
}
// create cluster
info, err := c.Driver.Create(ctx, &driverOpts, clusterInfo)
if info != nil {
transformClusterInfo(c, info)
}
return err
}
func (c *Cluster) PostCheck(ctx context.Context) error {
if err := c.PersistStore.PersistStatus(*c, PostCheck); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) GenerateServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) RemoveLegacyServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
return c.Driver.RemoveLegacyServiceAccount(ctx, toInfo(c))
}
func (c *Cluster) createInner(ctx context.Context) error {
// check if it is already created
c.restore()
var info *types.ClusterInfo
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
info = toInfo(c)
}
if c.Status == Updating || c.Status == Running || c.Status == PostCheck || c.Status == Init {
logrus.Infof("Cluster %s already exists.", c.Name)
return ErrClusterExists
}
if err := c.create(ctx, info); err != nil {
return err
}
return c.PostCheck(ctx)
}
// Update updates a cluster
func (c *Cluster) Update(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
return c.Create(ctx)
}
if c.Status == PreCreating || c.Status == Creating {
logrus.Errorf("Cluster %s has not been created.", c.Name)
return fmt.Errorf("cluster %s has not been created", c.Name)
}
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
driverOpts.StringOptions["name"] = c.Name
for k, v := range c.Metadata {
if k == "state" {
state := make(map[string]interface{})
if err := json.Unmarshal([]byte(v), &state); err == nil {
flattenIfNotExist(state, &driverOpts)
}
continue
}
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Updating); err != nil {
return err
}
info := toInfo(c)
info, err = c.Driver.Update(ctx, info, &driverOpts)
if err != nil {
return err
}
transformClusterInfo(c, info)
return c.PostCheck(ctx)
}
func (c *Cluster) GetVersion(ctx context.Context) (*types.KubernetesVersion, error) {
return c.Driver.GetVersion(ctx, toInfo(c))
}
func (c *Cluster) SetVersion(ctx context.Context, version *types.KubernetesVersion) error {
return c.Driver.SetVersion(ctx, toInfo(c), version)
}
func (c *Cluster) GetClusterSize(ctx context.Context) (*types.NodeCount, error) {
return c.Driver.GetClusterSize(ctx, toInfo(c))
}
func (c *Cluster) SetClusterSize(ctx context.Context, count *types.NodeCount) error {
return c.Driver.SetClusterSize(ctx, toInfo(c), count)
}
func transformClusterInfo(c *Cluster, clusterInfo *types.ClusterInfo) {
c.ClientCertificate = clusterInfo.ClientCertificate
c.ClientKey = clusterInfo.ClientKey
c.RootCACert = clusterInfo.RootCaCertificate
c.Username = clusterInfo.Username
c.Password = clusterInfo.Password
c.Version = clusterInfo.Version
c.Endpoint = clusterInfo.Endpoint
c.NodeCount = clusterInfo.NodeCount
c.Metadata = clusterInfo.Metadata
c.ServiceAccountToken = clusterInfo.ServiceAccountToken
c.Status = clusterInfo.Status
}
func toInfo(c *Cluster) *types.ClusterInfo {
return &types.ClusterInfo{
ClientCertificate: c.ClientCertificate,
ClientKey: c.ClientKey,
RootCaCertificate: c.RootCACert,
Username: c.Username,
Password: c.Password,
Version: c.Version,
Endpoint: c.Endpoint,
NodeCount: c.NodeCount,
Metadata: c.Metadata,
ServiceAccountToken: c.ServiceAccountToken,
Status: c.Status,
}
}
// Remove removes a cluster
func (c *Cluster) Remove(ctx context.Context, forceRemove bool) error {
if err := c.restore(); errors.IsNotFound(err) {
return nil
} else if err != nil {
return err
}
if err := c.Driver.Remove(ctx, toInfo(c)); err != nil {
// Persist store removal must take place despite error to prevent cluster from being stuck in remove state
// TODO: We should add a "forceRemove" action to cluster and then revert this to return an error, so that
// the user can see the problem and take appropriate action
if !forceRemove {
return fmt.Errorf("Error removing cluster [%s] with driver [%s]: %v", c.Name, c.DriverName, err)
}
logrus.Errorf("Error removing cluster [%s] with driver [%s]. Check for stray resources on cloud provider: %v", c.Name, c.DriverName, err)
}
return c.PersistStore.Remove(c.Name)
}
func (c *Cluster) GetCapabilities(ctx context.Context) (*types.Capabilities, error) {
return c.Driver.GetCapabilities(ctx)
}
func (c *Cluster) GetK8SCapabilities(ctx context.Context) (*types.K8SCapabilities, error) {
options, err := c.ConfigGetter.GetConfig()
if err != nil {
return nil, err
}
return c.Driver.GetK8SCapabilities(ctx, &options)
}
func (c *Cluster) GetDriverCreateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverCreateOptions(ctx)
}
func (c *Cluster) GetDriverUpdateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverUpdateOptions(ctx)
}
// Store persists cluster information
func (c *Cluster) Store() error {
return c.PersistStore.Store(*c)
}
func (c *Cluster) restore() error {
cluster, err := c.PersistStore.Get(c.Name)
if err != nil {
return err
}
info := toInfo(&cluster)
transformClusterInfo(c, info)
return nil
}
// NewCluster create a cluster interface to do operations
func NewCluster(driverName, name, addr string, configGetter ConfigGetter, persistStore PersistentStore) (*Cluster, error) {
rpcClient, err := types.NewClient(driverName, addr)
if err != nil {
return nil, err
}
return &Cluster{
Driver: rpcClient,
DriverName: driverName,
Name: name,
ConfigGetter: configGetter,
PersistStore: persistStore,
}, nil
}
func FromCluster(cluster *Cluster, addr string, configGetter ConfigGetter, persistStore PersistentStore) (*Cluster, error) {
rpcClient, err := types.NewClient(cluster.DriverName, addr)
if err != nil {
return nil, err
}
cluster.Driver = rpcClient
cluster.ConfigGetter = configGetter
cluster.PersistStore = persistStore
return cluster, nil
}
// flattenIfNotExist take a map into driverOptions, if the key not exist
func flattenIfNotExist(data map[string]interface{}, driverOptions *types.DriverOptions) {
for k, v := range data {
switch v.(type) {
case float64:
if _, exist := driverOptions.IntOptions[k]; !exist {
driverOptions.IntOptions[k] = int64(v.(float64))
}
case string:
if _, exist := driverOptions.StringOptions[k]; !exist {
driverOptions.StringOptions[k] = v.(string)
}
case bool:
if _, exist := driverOptions.BoolOptions[k]; !exist {
driverOptions.BoolOptions[k] = v.(bool)
}
case []interface{}:
// lists of strings come across as lists of interfaces, have to convert them manually
var stringArray []string
for _, stringInterface := range v.([]interface{}) {
switch stringInterface.(type) {
case string:
stringArray = append(stringArray, stringInterface.(string))
}
}
// if the length is 0 then it must not have been an array of strings
if len(stringArray) != 0 {
if _, exist := driverOptions.StringSliceOptions[k]; !exist {
driverOptions.StringSliceOptions[k] = &types.StringSlice{Value: stringArray}
}
}
case []string:
if _, exist := driverOptions.StringSliceOptions[k]; !exist {
driverOptions.StringSliceOptions[k] = &types.StringSlice{Value: v.([]string)}
}
case map[string]interface{}:
// hack for labels
if k == "tags" {
r := make([]string, 0, 4)
for key1, value1 := range v.(map[string]interface{}) {
r = append(r, fmt.Sprintf("%v=%v", key1, value1))
}
if _, exist := driverOptions.StringSliceOptions[k]; !exist {
driverOptions.StringSliceOptions[k] = &types.StringSlice{Value: r}
}
} else {
flattenIfNotExist(v.(map[string]interface{}), driverOptions)
}
case nil:
logrus.Debugf("could not convert %v because value is nil %v=%v", reflect.TypeOf(v), k, v)
default:
logrus.Warnf("could not convert %v %v=%v", reflect.TypeOf(v), k, v)
}
}
}
|
[
7
] |
package rtsp
import (
"bytes"
"encoding/json"
"io/ioutil"
"log"
"net/http"
"strings"
)
type WebHookActionType string
const (
ON_PLAY WebHookActionType = "on_play"
ON_STOP WebHookActionType = "on_stop"
ON_PUBLISH WebHookActionType = "on_publish"
ON_TEARDOWN WebHookActionType = "on_teardown"
)
type WebHookInfo struct {
ID string `json:"clientId"`
SessionType string `json:"sessionType"`
TransType string `json:"transType"`
URL string `json:"url"`
Path string `json:"path"`
SDP string `json:"sdp"`
ActionType WebHookActionType `json:"actionType"`
ClientAddr string `json:"clientAddr"`
logger *log.Logger
}
func NewWebHookInfo(ActionType WebHookActionType, ID string, sessionType SessionType, transType TransType, url, path, sdp, clientAddr string, logger *log.Logger) (webHook *WebHookInfo) {
webHook = &WebHookInfo{
ActionType: ActionType,
ID: ID,
SessionType: sessionType.String(),
TransType: transType.String(),
URL: url,
Path: path,
SDP: sdp,
ClientAddr: strings.Split(clientAddr, ":")[0],
logger: logger,
}
return
}
func (session *Session) ToCloseWebHookInfo() (webHook *WebHookInfo) {
var ActionType WebHookActionType
if session.Type == SESSEION_TYPE_PLAYER {
ActionType = ON_STOP
} else {
ActionType = ON_TEARDOWN
}
return session.ToWebHookInfo(ActionType)
}
func (session *Session) ToWebHookInfo(ActionType WebHookActionType) (webHook *WebHookInfo) {
webHook = &WebHookInfo{
ActionType: ActionType,
ID: session.ID,
SessionType: session.Type.String(),
TransType: session.TransType.String(),
URL: session.URL,
Path: session.Path,
SDP: session.SDPRaw,
ClientAddr: strings.Split(session.Conn.RemoteAddr().String(), ":")[0],
logger: session.logger,
}
return
}
func (webHook WebHookInfo) ExecuteWebHookNotify() (success bool) {
server := GetServer()
success = true
var webHookUrls []string
switch webHook.ActionType {
case ON_PLAY:
webHookUrls = server.onPlay
success = false
case ON_STOP:
webHookUrls = server.onStop
case ON_PUBLISH:
webHookUrls = server.onPublish
success = false
case ON_TEARDOWN:
webHookUrls = server.onTeardown
}
if len(webHookUrls) == 0 {
return true
}
jsonBytes, _ := json.Marshal(webHook)
for _, url := range webHookUrls {
response, err := http.Post(url, "application/json", bytes.NewReader(jsonBytes))
if err != nil {
server.logger.Printf("request web hook [%s] error:%v", url, err)
if response != nil && response.Body != nil {
response.Body.Close()
}
continue
} else if response.StatusCode != 200 {
if response != nil && response.Body != nil {
response.Body.Close()
}
continue
}
resultBytes, err := ioutil.ReadAll(response.Body)
if err != nil {
server.logger.Printf("request web hook [%s] error:%v", url, err)
if response != nil && response.Body != nil {
response.Body.Close()
}
continue
}
if result := string(resultBytes); result != "0" {
server.logger.Printf("request web hook [%s] return error:%v", url, result)
if response != nil && response.Body != nil {
response.Body.Close()
}
continue
}
response.Body.Close()
return true
}
return
}
|
[
2
] |
package http
import (
"github.com/julienschmidt/httprouter"
"github.com/tebben/marvin/go/marvin/models"
"log"
netHTTP "net/http"
"strconv"
)
// MarvinServer is the type that contains all of the relevant information to set
// up the Marvin HTTP Server
type MarvinServer struct {
marvin *models.Marvin
host string // Hostname for example "localhost" or "192.168.1.14"
port int // Port number where you want to run your http server on
endpoints []models.MarvinEndpoint // Configured endpoints for Marvin HTTP
}
// CreateServer initialises a new Marvin HTTPServer based on the given parameters
func CreateServer(marvin *models.Marvin, host string, port int, endpoints []models.MarvinEndpoint) models.HTTPServer {
return &MarvinServer{
marvin: marvin,
host: host,
port: port,
endpoints: endpoints,
}
}
// Start command to start the GOST HTTPServer
func (ms *MarvinServer) Start() {
go startWebsockets()
log.Printf("Started Marvin HTTP Server on %v:%v\n", ms.host, ms.port)
router := createRouter(ms)
httpError := netHTTP.ListenAndServe(ms.host+":"+strconv.Itoa(ms.port), router)
if httpError != nil {
log.Fatal(httpError)
return
}
}
func startWebsockets() {
go hub.run()
netHTTP.HandleFunc("/ws", serveWs)
err := netHTTP.ListenAndServe(":9000", nil)
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
// Stop command to stop the Marvin HTTP server, currently not supported
func (ms *MarvinServer) Stop() {
}
func createRouter(ms *MarvinServer) *httprouter.Router {
router := httprouter.New()
for _, endpoint := range ms.endpoints {
ep := endpoint
for _, op := range ep.GetOperations() {
operation := op
if operation.Handler == nil {
continue
}
switch operation.OperationType {
case models.HTTPOperationGet:
{
router.GET(operation.Path, func(w netHTTP.ResponseWriter, r *netHTTP.Request, p httprouter.Params) {
operation.Handler(w, r, ms.marvin)
})
}
}
}
}
router.ServeFiles("/app/*filepath", netHTTP.Dir("client/app"))
return router
}
|
[
7
] |
package effect
import (
"korok.io/korok/gfx"
"korok.io/korok/math"
"korok.io/korok/math/f32"
)
// SnowSimulator can simulate snow effect.
type SnowSimulator struct {
Pool
RateController
LifeController
VisualController
velocity channel_v2
rot channel_f32
rotDelta channel_f32
// Configuration.
Config struct {
Duration, Rate float32
Life Var
Size Var
Color f32.Vec4
Position [2]Var
Velocity [2]Var
Rotation Var
}
}
func NewSnowSimulator(cap int, w, h float32) *SnowSimulator {
sim := SnowSimulator{Pool: Pool{cap: cap}}
sim.AddChan(Life, Size)
sim.AddChan(Position, Velocity)
sim.AddChan(Color)
sim.AddChan(Rotation, RotationDelta)
// config
sim.Config.Duration = math.MaxFloat32
sim.Config.Rate = 60
sim.Config.Life = Var{10, 4}
sim.Config.Color = f32.Vec4{1, 0, 0, 1}
sim.Config.Size = Var{6, 6}
sim.Config.Position[0] = Var{0, w}
sim.Config.Position[1] = Var{h, 0}
sim.Config.Velocity[0] = Var{-10, 20}
sim.Config.Velocity[1] = Var{-50, 20}
sim.Config.Rotation = Var{0, 360}
return &sim
}
func (sim *SnowSimulator) Initialize() {
sim.Pool.Initialize()
sim.life = sim.Field(Life).(channel_f32)
sim.size = sim.Field(Size).(channel_f32)
sim.pose = sim.Field(Position).(channel_v2)
sim.velocity = sim.Field(Velocity).(channel_v2)
sim.color = sim.Field(Color).(channel_v4)
sim.rot = sim.Field(Rotation).(channel_f32)
sim.rotDelta = sim.Field(RotationDelta).(channel_f32)
sim.RateController.Initialize(sim.Config.Duration, sim.Config.Rate)
}
func (sim *SnowSimulator) Simulate(dt float32) {
if new := sim.Rate(dt); new > 0 {
sim.NewParticle(new)
}
n := int32(sim.live)
// update old particle
sim.life.Sub(n, dt)
// position integrate: p' = p + v * t
sim.pose.Integrate(n, sim.velocity, dt)
sim.rot.Integrate(n, sim.rotDelta, dt)
// GC
sim.GC(&sim.Pool)
}
func (sim *SnowSimulator) Size() (live, cap int) {
return int(sim.live), sim.cap
}
func (sim *SnowSimulator) NewParticle(new int) {
if (sim.live + new) > sim.cap {
return
}
start := sim.live
sim.live += new
rot := Range{Var{0, 10}, Var{1, 10}}
for i := start; i < sim.live; i++ {
sim.life[i] = sim.Config.Life.Random()
sim.color[i] = sim.Config.Color
sim.size[i] = sim.Config.Size.Random()
sim.rot[i], sim.rotDelta[i] = rot.RangeInit(1 / sim.life[i])
f := sim.size[i] / (sim.Config.Size.Base + sim.Config.Size.Var)
sim.color[i][3] = f
px := sim.Config.Position[0].Random()
py := sim.Config.Position[1].Random()
sim.pose[i] = f32.Vec2{px, py}
dx := sim.Config.Velocity[0].Random()
dy := sim.Config.Velocity[1].Random()
sim.velocity[i] = f32.Vec2{dx, dy}
}
}
func (sim *SnowSimulator) Visualize(buf []gfx.PosTexColorVertex, tex gfx.Tex2D) {
size := sim.size
pose := sim.pose
rotate := sim.rot
// compute vbo
for i := 0; i < sim.live; i++ {
vi := i << 2
h_size := size[i] / 2
var (
r = math.Clamp(sim.color[i][0], 0, 1)
g_ = math.Clamp(sim.color[i][1], 0, 1)
b = math.Clamp(sim.color[i][2], 0, 1)
a = math.Clamp(sim.color[i][3], 0, 1)
)
c := uint32(a*255)<<24 + uint32(b*255)<<16 + uint32(g_*255)<<8 + uint32(r*255)
rg := tex.Region()
rot := float32(0)
if len(rotate) > i {
rot = rotate[i]
}
// bottom-left
buf[vi+0].X, buf[vi+0].Y = math.Rotate(pose[i][0]-h_size, pose[i][1]-h_size, pose[i][0], pose[i][1], rot)
buf[vi+0].U = rg.X1
buf[vi+0].V = rg.Y1
buf[vi+0].RGBA = c
// bottom-right
buf[vi+1].X, buf[vi+1].Y = math.Rotate(pose[i][0]+h_size, pose[i][1]-h_size, pose[i][0], pose[i][1], rot)
buf[vi+1].U = rg.X2
buf[vi+1].V = rg.Y1
buf[vi+1].RGBA = c
// top-right
buf[vi+2].X, buf[vi+2].Y = math.Rotate(pose[i][0]+h_size, pose[i][1]+h_size, pose[i][0], pose[i][1], rot)
buf[vi+2].U = rg.X2
buf[vi+2].V = rg.Y2
buf[vi+2].RGBA = c
// top-left
buf[vi+3].X, buf[vi+3].Y = math.Rotate(pose[i][0]-h_size, pose[i][1]+h_size, pose[i][0], pose[i][1], rot)
buf[vi+3].U = rg.X1
buf[vi+3].V = rg.Y2
buf[vi+3].RGBA = c
}
}
|
[
1
] |
package Codity
// timeout 50%
func Solution(A []int) int {
count := 0
for i:=0; i<len(A); i++ {
for j:=i+1;j<len(A); j++ {
// fmt.Println(A[i], A[j])
if A[i] == 0 {
if A[j] == 1 {
count++
}
}
}
}
if count > 1000000000 {
return -1
} else {
return count
}
}
|
[
2
] |
package gaia
import (
"fmt"
"github.com/globalsign/mgo/bson"
"github.com/mitchellh/copystructure"
"go.aporeto.io/elemental"
)
// OAUTHKeyIdentity represents the Identity of the object.
var OAUTHKeyIdentity = elemental.Identity{
Name: "oauthkey",
Category: "oauthkeys",
Package: "cactuar",
Private: false,
}
// OAUTHKeysList represents a list of OAUTHKeys
type OAUTHKeysList []*OAUTHKey
// Identity returns the identity of the objects in the list.
func (o OAUTHKeysList) Identity() elemental.Identity {
return OAUTHKeyIdentity
}
// Copy returns a pointer to a copy the OAUTHKeysList.
func (o OAUTHKeysList) Copy() elemental.Identifiables {
copy := append(OAUTHKeysList{}, o...)
return ©
}
// Append appends the objects to the a new copy of the OAUTHKeysList.
func (o OAUTHKeysList) Append(objects ...elemental.Identifiable) elemental.Identifiables {
out := append(OAUTHKeysList{}, o...)
for _, obj := range objects {
out = append(out, obj.(*OAUTHKey))
}
return out
}
// List converts the object to an elemental.IdentifiablesList.
func (o OAUTHKeysList) List() elemental.IdentifiablesList {
out := make(elemental.IdentifiablesList, len(o))
for i := 0; i < len(o); i++ {
out[i] = o[i]
}
return out
}
// DefaultOrder returns the default ordering fields of the content.
func (o OAUTHKeysList) DefaultOrder() []string {
return []string{}
}
// ToSparse returns the OAUTHKeysList converted to SparseOAUTHKeysList.
// Objects in the list will only contain the given fields. No field means entire field set.
func (o OAUTHKeysList) ToSparse(fields ...string) elemental.Identifiables {
out := make(SparseOAUTHKeysList, len(o))
for i := 0; i < len(o); i++ {
out[i] = o[i].ToSparse(fields...).(*SparseOAUTHKey)
}
return out
}
// Version returns the version of the content.
func (o OAUTHKeysList) Version() int {
return 1
}
// OAUTHKey represents the model of a oauthkey
type OAUTHKey struct {
// KeyString is the JWKS key response for an OAUTH verifier. It provides the OAUTH
// compatible signing keys.
KeyString string `json:"keyString" msgpack:"keyString" bson:"-" mapstructure:"keyString,omitempty"`
ModelVersion int `json:"-" msgpack:"-" bson:"_modelversion"`
}
// NewOAUTHKey returns a new *OAUTHKey
func NewOAUTHKey() *OAUTHKey {
return &OAUTHKey{
ModelVersion: 1,
}
}
// Identity returns the Identity of the object.
func (o *OAUTHKey) Identity() elemental.Identity {
return OAUTHKeyIdentity
}
// Identifier returns the value of the object's unique identifier.
func (o *OAUTHKey) Identifier() string {
return ""
}
// SetIdentifier sets the value of the object's unique identifier.
func (o *OAUTHKey) SetIdentifier(id string) {
}
// GetBSON implements the bson marshaling interface.
// This is used to transparently convert ID to MongoDBID as ObectID.
func (o *OAUTHKey) GetBSON() (interface{}, error) {
if o == nil {
return nil, nil
}
s := &mongoAttributesOAUTHKey{}
return s, nil
}
// SetBSON implements the bson marshaling interface.
// This is used to transparently convert ID to MongoDBID as ObectID.
func (o *OAUTHKey) SetBSON(raw bson.Raw) error {
if o == nil {
return nil
}
s := &mongoAttributesOAUTHKey{}
if err := raw.Unmarshal(s); err != nil {
return err
}
return nil
}
// Version returns the hardcoded version of the model.
func (o *OAUTHKey) Version() int {
return 1
}
// BleveType implements the bleve.Classifier Interface.
func (o *OAUTHKey) BleveType() string {
return "oauthkey"
}
// DefaultOrder returns the list of default ordering fields.
func (o *OAUTHKey) DefaultOrder() []string {
return []string{}
}
// Doc returns the documentation for the object
func (o *OAUTHKey) Doc() string {
return `OAUTHInfo provides the information for an OAUTH server to retrieve the secrets
that can validate a JWT token issued by us.`
}
func (o *OAUTHKey) String() string {
return fmt.Sprintf("<%s:%s>", o.Identity().Name, o.Identifier())
}
// ToSparse returns the sparse version of the model.
// The returned object will only contain the given fields. No field means entire field set.
func (o *OAUTHKey) ToSparse(fields ...string) elemental.SparseIdentifiable {
if len(fields) == 0 {
// nolint: goimports
return &SparseOAUTHKey{
KeyString: &o.KeyString,
}
}
sp := &SparseOAUTHKey{}
for _, f := range fields {
switch f {
case "keyString":
sp.KeyString = &(o.KeyString)
}
}
return sp
}
// Patch apply the non nil value of a *SparseOAUTHKey to the object.
func (o *OAUTHKey) Patch(sparse elemental.SparseIdentifiable) {
if !sparse.Identity().IsEqual(o.Identity()) {
panic("cannot patch from a parse with different identity")
}
so := sparse.(*SparseOAUTHKey)
if so.KeyString != nil {
o.KeyString = *so.KeyString
}
}
// DeepCopy returns a deep copy if the OAUTHKey.
func (o *OAUTHKey) DeepCopy() *OAUTHKey {
if o == nil {
return nil
}
out := &OAUTHKey{}
o.DeepCopyInto(out)
return out
}
// DeepCopyInto copies the receiver into the given *OAUTHKey.
func (o *OAUTHKey) DeepCopyInto(out *OAUTHKey) {
target, err := copystructure.Copy(o)
if err != nil {
panic(fmt.Sprintf("Unable to deepcopy OAUTHKey: %s", err))
}
*out = *target.(*OAUTHKey)
}
// Validate valides the current information stored into the structure.
func (o *OAUTHKey) Validate() error {
errors := elemental.Errors{}
requiredErrors := elemental.Errors{}
if len(requiredErrors) > 0 {
return requiredErrors
}
if len(errors) > 0 {
return errors
}
return nil
}
// SpecificationForAttribute returns the AttributeSpecification for the given attribute name key.
func (*OAUTHKey) SpecificationForAttribute(name string) elemental.AttributeSpecification {
if v, ok := OAUTHKeyAttributesMap[name]; ok {
return v
}
// We could not find it, so let's check on the lower case indexed spec map
return OAUTHKeyLowerCaseAttributesMap[name]
}
// AttributeSpecifications returns the full attribute specifications map.
func (*OAUTHKey) AttributeSpecifications() map[string]elemental.AttributeSpecification {
return OAUTHKeyAttributesMap
}
// ValueForAttribute returns the value for the given attribute.
// This is a very advanced function that you should not need but in some
// very specific use cases.
func (o *OAUTHKey) ValueForAttribute(name string) interface{} {
switch name {
case "keyString":
return o.KeyString
}
return nil
}
// OAUTHKeyAttributesMap represents the map of attribute for OAUTHKey.
var OAUTHKeyAttributesMap = map[string]elemental.AttributeSpecification{
"KeyString": {
AllowedChoices: []string{},
Autogenerated: true,
ConvertedName: "KeyString",
Description: `KeyString is the JWKS key response for an OAUTH verifier. It provides the OAUTH
compatible signing keys.`,
Exposed: true,
Name: "keyString",
ReadOnly: true,
Type: "string",
},
}
// OAUTHKeyLowerCaseAttributesMap represents the map of attribute for OAUTHKey.
var OAUTHKeyLowerCaseAttributesMap = map[string]elemental.AttributeSpecification{
"keystring": {
AllowedChoices: []string{},
Autogenerated: true,
ConvertedName: "KeyString",
Description: `KeyString is the JWKS key response for an OAUTH verifier. It provides the OAUTH
compatible signing keys.`,
Exposed: true,
Name: "keyString",
ReadOnly: true,
Type: "string",
},
}
// SparseOAUTHKeysList represents a list of SparseOAUTHKeys
type SparseOAUTHKeysList []*SparseOAUTHKey
// Identity returns the identity of the objects in the list.
func (o SparseOAUTHKeysList) Identity() elemental.Identity {
return OAUTHKeyIdentity
}
// Copy returns a pointer to a copy the SparseOAUTHKeysList.
func (o SparseOAUTHKeysList) Copy() elemental.Identifiables {
copy := append(SparseOAUTHKeysList{}, o...)
return ©
}
// Append appends the objects to the a new copy of the SparseOAUTHKeysList.
func (o SparseOAUTHKeysList) Append(objects ...elemental.Identifiable) elemental.Identifiables {
out := append(SparseOAUTHKeysList{}, o...)
for _, obj := range objects {
out = append(out, obj.(*SparseOAUTHKey))
}
return out
}
// List converts the object to an elemental.IdentifiablesList.
func (o SparseOAUTHKeysList) List() elemental.IdentifiablesList {
out := make(elemental.IdentifiablesList, len(o))
for i := 0; i < len(o); i++ {
out[i] = o[i]
}
return out
}
// DefaultOrder returns the default ordering fields of the content.
func (o SparseOAUTHKeysList) DefaultOrder() []string {
return []string{}
}
// ToPlain returns the SparseOAUTHKeysList converted to OAUTHKeysList.
func (o SparseOAUTHKeysList) ToPlain() elemental.IdentifiablesList {
out := make(elemental.IdentifiablesList, len(o))
for i := 0; i < len(o); i++ {
out[i] = o[i].ToPlain()
}
return out
}
// Version returns the version of the content.
func (o SparseOAUTHKeysList) Version() int {
return 1
}
// SparseOAUTHKey represents the sparse version of a oauthkey.
type SparseOAUTHKey struct {
// KeyString is the JWKS key response for an OAUTH verifier. It provides the OAUTH
// compatible signing keys.
KeyString *string `json:"keyString,omitempty" msgpack:"keyString,omitempty" bson:"-" mapstructure:"keyString,omitempty"`
ModelVersion int `json:"-" msgpack:"-" bson:"_modelversion"`
}
// NewSparseOAUTHKey returns a new SparseOAUTHKey.
func NewSparseOAUTHKey() *SparseOAUTHKey {
return &SparseOAUTHKey{}
}
// Identity returns the Identity of the sparse object.
func (o *SparseOAUTHKey) Identity() elemental.Identity {
return OAUTHKeyIdentity
}
// Identifier returns the value of the sparse object's unique identifier.
func (o *SparseOAUTHKey) Identifier() string {
return ""
}
// SetIdentifier sets the value of the sparse object's unique identifier.
func (o *SparseOAUTHKey) SetIdentifier(id string) {
}
// GetBSON implements the bson marshaling interface.
// This is used to transparently convert ID to MongoDBID as ObectID.
func (o *SparseOAUTHKey) GetBSON() (interface{}, error) {
if o == nil {
return nil, nil
}
s := &mongoAttributesSparseOAUTHKey{}
return s, nil
}
// SetBSON implements the bson marshaling interface.
// This is used to transparently convert ID to MongoDBID as ObectID.
func (o *SparseOAUTHKey) SetBSON(raw bson.Raw) error {
if o == nil {
return nil
}
s := &mongoAttributesSparseOAUTHKey{}
if err := raw.Unmarshal(s); err != nil {
return err
}
return nil
}
// Version returns the hardcoded version of the model.
func (o *SparseOAUTHKey) Version() int {
return 1
}
// ToPlain returns the plain version of the sparse model.
func (o *SparseOAUTHKey) ToPlain() elemental.PlainIdentifiable {
out := NewOAUTHKey()
if o.KeyString != nil {
out.KeyString = *o.KeyString
}
return out
}
// DeepCopy returns a deep copy if the SparseOAUTHKey.
func (o *SparseOAUTHKey) DeepCopy() *SparseOAUTHKey {
if o == nil {
return nil
}
out := &SparseOAUTHKey{}
o.DeepCopyInto(out)
return out
}
// DeepCopyInto copies the receiver into the given *SparseOAUTHKey.
func (o *SparseOAUTHKey) DeepCopyInto(out *SparseOAUTHKey) {
target, err := copystructure.Copy(o)
if err != nil {
panic(fmt.Sprintf("Unable to deepcopy SparseOAUTHKey: %s", err))
}
*out = *target.(*SparseOAUTHKey)
}
type mongoAttributesOAUTHKey struct {
}
type mongoAttributesSparseOAUTHKey struct {
}
|
[
7
] |
// Copyright 2012 The Probab Authors. All rights reserved. See the LICENSE file.
package dst
// Wishart distribution.
// A generalization to multiple dimensions of the chi-squared distribution, or, in the case of non-integer degrees of freedom, of the gamma distribution.
// Wishart, J. (1928). "The generalised product moment distribution in samples from a normal multivariate population". Biometrika 20A (1-2): 32–52. doi:10.1093/biomet/20A.1-2.32.
// Parameters:
// n > p-1 degrees of freedom (real)
// V > 0 pxp scale matrix (positive definite, real)
//
// Support:
// X pxp positive definite, real
import (
m "github.com/skelterjohn/go.matrix"
)
// WishartPDF returns the PDF of the Wishart distribution.
func WishartPDF(n int, V *m.DenseMatrix) func(W *m.DenseMatrix) float64 {
p := V.Rows()
Vdet := V.Det()
Vinv, _ := V.Inverse()
normalization := pow(2, -0.5*float64(n*p)) *
pow(Vdet, -0.5*float64(n)) /
Γ(0.5*float64(n))
return func(W *m.DenseMatrix) float64 {
VinvW, _ := Vinv.Times(W)
return normalization * pow(W.Det(), 0.5*float64(n-p-1)) *
exp(-0.5*VinvW.Trace())
}
}
// WishartLnPDF returns the natural logarithm of the PDF of the Wishart distribution.
func WishartLnPDF(n int, V *m.DenseMatrix) func(W *m.DenseMatrix) float64 {
p := V.Rows()
Vdet := V.Det()
Vinv, _ := V.Inverse()
normalization := log(2)*(-0.5*float64(n*p)) +
log(Vdet)*(-0.5*float64(n)) -
LnΓ(0.5*float64(n))
return func(W *m.DenseMatrix) float64 {
VinvW, _ := Vinv.Times(W)
return normalization +
log(W.Det())*0.5*float64(n-p-1) -
0.5*VinvW.Trace()
}
}
// WishartNext returns random number drawn from the Wishart distribution.
func WishartNext(n int, V *m.DenseMatrix) *m.DenseMatrix {
return Wishart(n, V)()
}
// Wishart returns the random number generator with Wishart distribution.
func Wishart(n int, V *m.DenseMatrix) func() *m.DenseMatrix {
p := V.Rows()
zeros := m.Zeros(p, 1)
rowGen := MVNormal(zeros, V)
return func() *m.DenseMatrix {
x := make([][]float64, n)
for i := 0; i < n; i++ {
x[i] = rowGen().Array()
}
X := m.MakeDenseMatrixStacked(x)
S, _ := X.Transpose().TimesDense(X)
return S
}
}
|
[
2
] |
package main
import (
"bytes"
"encoding/csv"
"fmt"
"io"
"os"
"strconv"
)
type RateArea struct {
State string
Num string
}
var RateAreas = make(map[string]map[RateArea]struct{}, 0)
var Plans = make(map[RateArea][]float64, 0)
func getSLCSP(zip string) []byte {
// If a zipcode has more than one rate area, it's impossible to determine
// the SLCSP from the zipcode alone, as each rate area has a unique SLCSP.
if len(RateAreas[zip]) != 1 {
return nil
}
// Get rate area for the zipcode. This will only loop once.
rateArea := RateArea{}
for ra, _ := range RateAreas[zip] {
rateArea = ra
}
// Impossible to determine SLCSP if the zipcode's rate area has no silver
// plans.
if len(Plans[rateArea]) == 0 {
return nil
}
var (
rate bytes.Buffer
min float64 // FIRST lowest cost silver plan rate
mid float64 // SECOND lowest cost silver plan rate
)
for _, rate := range Plans[rateArea] {
if rate < min || min == 0 {
min = rate
}
if (rate < mid && rate > min) || mid == 0 {
mid = rate
}
}
rate.WriteString(fmt.Sprintf("%.2f", mid))
return rate.Bytes()
}
// Load and populate RateAreas and Plans from plans.csv and zips.csv.
func init() {
plansFile, err := os.Open("plans.csv")
if err != nil {
panic(err)
}
plans := csv.NewReader(plansFile)
// Discard the header line.
plans.Read()
var (
record []string
rate float64
rateArea = RateArea{}
)
for {
if record, err = plans.Read(); err != nil {
if err == io.EOF {
break
}
panic(err)
}
// Only silver plans are used to determine SLCSPs.
if record[2] != "Silver" {
continue
}
// Get the rate area this silver plan belongs to.
rateArea = RateArea{
State: record[1],
Num: record[4],
}
// Get the rate of this silver plan.
if rate, err = strconv.ParseFloat(record[3], 8); err != nil {
panic(err)
}
// Add this silver plan to the rate area's collection.
Plans[rateArea] = append(
Plans[rateArea],
rate,
)
rateArea = RateArea{}
}
zipsFile, err := os.Open("zips.csv")
if err != nil {
panic(err)
}
zips := csv.NewReader(zipsFile)
// Discard the header line.
zips.Read()
for {
if record, err = zips.Read(); err != nil {
if err == io.EOF {
break
}
panic(err)
}
// Get a rate area of this zip.
rateArea = RateArea{
State: record[1],
Num: record[4],
}
if _, ok := RateAreas[record[0]]; !ok {
RateAreas[record[0]] = make(map[RateArea]struct{}, 0)
}
// Update the zipcode's list of rate areas.
RateAreas[record[0]][rateArea] = struct{}{}
rateArea = RateArea{}
}
}
func main() {
slcspFile, err := os.Open("slcsp.csv")
if err != nil {
panic(err)
}
slcsp := csv.NewReader(slcspFile)
outFile, err := os.Create("slcsp_full.csv")
if err != nil {
panic(err)
}
var (
row bytes.Buffer
record []string
i int
offset int64 // current byte position in the file
)
for {
if record, err = slcsp.Read(); err != nil {
if err == io.EOF {
break
}
panic(err)
}
row.WriteString(record[0])
row.WriteString(",")
// If current line is the header, don't attempt to get the SLCSP.
if i == 0 {
row.WriteString(record[1])
} else {
row.Write(getSLCSP(record[0]))
}
row.WriteString("\n")
outFile.WriteAt(row.Bytes(), offset)
offset += int64(row.Len())
row.Reset()
i++
}
outFile.Sync()
}
|
[
1
] |
package main
import sf "github.com/zyedidia/sfml/v2.3/sfml"
import "runtime"
const (
screenWidth = 800
screenHeight = 600
)
var textures map[string]*sf.Texture
func LoadTexture(filename string) {
texture := sf.NewTexture(filename)
textures[filename] = texture
}
func main() {
runtime.LockOSThread()
textures = make(map[string]*sf.Texture)
window := sf.NewRenderWindow(sf.VideoMode{screenWidth, screenHeight, 32}, "Rectangle", sf.StyleDefault, nil)
window.SetVerticalSyncEnabled(true)
window.SetFramerateLimit(60)
LoadTexture("playerShip1_blue.png")
sprite := sf.NewSprite(textures["playerShip1_blue.png"])
size := sprite.GetGlobalBounds()
sprite.SetOrigin(sf.Vector2f{size.Width / 2, size.Height / 2})
sprite.SetPosition(sf.Vector2f{400, 300})
soundBuf := sf.NewSoundBuffer("sfx_laser1.ogg")
sound := sf.NewSound(soundBuf)
sound.Play()
for window.IsOpen() {
if event := window.PollEvent(); event != nil {
switch event.Type {
case sf.EventClosed:
window.Close()
}
}
sprite.Rotate(3)
window.Clear(sf.ColorWhite)
window.Draw(sprite)
window.Display()
}
}
|
[
7
] |
package web
import (
"encoding/json"
"errors"
"sync"
)
// Message define one single event message
type Message struct {
Level string `json:"level"`
Time string `json:"time"`
Msg string `json:"msg"`
Result bool `json:"result"`
}
// GlobalMessages define the Messages Manager
type GlobalMessages struct {
locker sync.RWMutex
MaxSize int
Events []Message `json:"events"`
}
// MessagesHub for global message hub
var MessagesHub *GlobalMessages
func init() {
MessagesHub = NewGloabalMessages(50)
SetupLogger()
}
// Len return current message length
func (g *GlobalMessages) Len() int {
g.locker.RLock()
defer g.locker.RUnlock()
return len(g.Events)
}
func (g *GlobalMessages) Write(p []byte) (n int, err error) {
g.locker.Lock()
defer g.locker.Unlock()
event := Message{}
if err := json.Unmarshal(p, &event); err != nil {
return 0, err
}
if event.Msg == "" {
return 0, errors.New("Empty Messages")
}
g.Events = append(g.Events, event)
return len(p), nil
}
// Get get current message and delete it from messages manager
func (g *GlobalMessages) Get() ([]byte, error) {
g.locker.Lock()
defer g.locker.Unlock()
if len(g.Events) == 0 {
return []byte{}, nil
}
result, err := json.Marshal(g.Events)
if err != nil {
return nil, err
}
g.Events = nil
return result, nil
}
// NewGloabalMessages create a new messages manager
func NewGloabalMessages(max int) *GlobalMessages {
events := make([]Message, 0)
return &GlobalMessages{
MaxSize: max,
Events: events,
}
}
|
[
1
] |
package main
import "fmt"
func main() {
nums := []int{100, 4, 200, 1, 3, 2}
fmt.Println(longestConsecutive(nums))
}
/**
解题思路:哈希表
1. 构建哈希表
2. 判断前后数字是否存在,存在+1
3. 记录最大值
时间复杂度:O(n)
空间复杂度:O(n)
*/
func longestConsecutive(nums []int) int {
var hashmap = make(map[int]int)
for i := 0; i < len(nums); i++ {
hashmap[nums[i]] = i
}
var max int
for v, _ := range hashmap {
delete(hashmap, v)
// pre
pre := v - 1
for {
if _, ok := hashmap[pre]; ok {
delete(hashmap, pre)
pre--
} else {
break
}
}
// next
next := v + 1
for {
if _, ok := hashmap[next]; ok {
delete(hashmap, next)
next++
} else {
break
}
}
if next-pre-1 > max {
max = next - pre - 1
}
}
return max
}
|
[
1
] |
package main
// https://leetcode-cn.com/problems/richest-customer-wealth
func maximumWealth(accounts [][]int) int {
var max int
for _, account := range accounts {
var cur int
for _, v := range account {
cur += v
}
if max < cur {
max = cur
}
}
return max
}
|
[
1
] |
package main
import (
"encoding/json"
. "fmt"
"math/rand"
"net/http"
"strconv"
"strings"
"time"
)
type Alumno struct {
ID uint64
Nombre string `json:"nombre"`
Calif map[uint64]float64
}
type Materia struct {
ID uint64
Nombre string `json:"nombre"`
}
type Calif struct {
Al uint64 `json:"id-al"`
Ma uint64 `json:"id-ma"`
Cali float64 `json:"calif"`
}
type Clase struct {
Alumnos map[uint64]*Alumno
Materias map[uint64]*Materia
}
func getID() uint64 {
s1 := rand.NewSource(time.Now().UnixNano())
r1 := rand.New(s1)
return r1.Uint64()
}
var clase Clase
func Add_Alumno(alumno Alumno) []byte {
jsonData := []byte(`{"code": "ok"}`)
clase.Alumnos[alumno.ID] = &alumno
return jsonData
}
func Add_Materia(materia Materia) []byte {
jsonData := []byte(`{"code": "ok"}`)
clase.Materias[materia.ID] = &materia
return jsonData
}
func Add_Calif(calif Calif) []byte {
_, ok := clase.Alumnos[calif.Al]
if ok == false {
return []byte(`{"code": "noexistealumno"}`)
}
_, ok = clase.Materias[calif.Ma]
if ok == false {
return []byte(`{"code": "noexistemateria"}`)
}
_, ok = clase.Alumnos[calif.Al].Calif[calif.Ma]
if ok == true {
return []byte(`{"code": "yatienecalif"}`)
}
clase.Alumnos[calif.Al].Calif[calif.Ma] = calif.Cali
return []byte(`{"code": "ok"}`)
}
func Delete_Alumno(id uint64) []byte {
_, ok := clase.Alumnos[id]
if ok == false {
return []byte(`{"code": "noexiste"}`)
}
delete(clase.Alumnos, id)
return []byte(`{"code": "ok"}`)
}
func Update_Calif(calif Calif) []byte {
_, ok := clase.Alumnos[calif.Al]
if ok == false {
return []byte(`{"code": "noexistealumno"}`)
}
_, ok = clase.Alumnos[calif.Al].Calif[calif.Ma]
if ok == false {
return []byte(`{"code": "noexistemateria"}`)
}
clase.Alumnos[calif.Al].Calif[calif.Ma] = calif.Cali
return []byte(`{"code": "ok"}`)
}
func Get() ([]byte, error) {
jsonData, err := json.MarshalIndent(clase, "", " ")
if err != nil {
return jsonData, nil
}
return jsonData, err
}
func GetAlumno(id uint64) ([]byte, error) {
jsonData := []byte(`{}`)
al, ok := clase.Alumnos[id]
if ok == false {
return jsonData, nil
}
jsonData, err := json.MarshalIndent(al, "", " ")
if err != nil {
return jsonData, err
}
return jsonData, nil
}
func alumnos(res http.ResponseWriter, req *http.Request) {
Println(req.Method)
switch req.Method {
case "POST":
var alumno Alumno
err := json.NewDecoder(req.Body).Decode(&alumno)
if err != nil {
http.Error(res, err.Error(), http.StatusInternalServerError)
return
}
alumno.ID = getID()
alumno.Calif = make(map[uint64]float64)
res_json := Add_Alumno(alumno)
res.Header().Set(
"Content-Type",
"application/json",
)
res.Write(res_json)
}
}
func materias(res http.ResponseWriter, req *http.Request) {
Println(req.Method)
switch req.Method {
case "POST":
var materia Materia
err := json.NewDecoder(req.Body).Decode(&materia)
if err != nil {
http.Error(res, err.Error(), http.StatusInternalServerError)
return
}
materia.ID = getID()
res_json := Add_Materia(materia)
res.Header().Set(
"Content-Type",
"application/json",
)
res.Write(res_json)
}
}
func alumnoID(res http.ResponseWriter, req *http.Request) {
id, err := strconv.ParseUint(strings.TrimPrefix(req.URL.Path, "/clase/alumnos/"), 10, 64)
if err != nil {
http.Error(res, err.Error(), http.StatusInternalServerError)
return
}
Println(req.Method, id)
switch req.Method {
case "GET":
res_json, err := GetAlumno(id)
if err != nil {
http.Error(res, err.Error(), http.StatusInternalServerError)
}
res.Header().Set(
"Content-Type",
"application/json",
)
res.Write(res_json)
case "DELETE":
res_json := Delete_Alumno(id)
res.Header().Set(
"Content-Type",
"application/json",
)
res.Write(res_json)
}
}
func miclase(res http.ResponseWriter, req *http.Request) {
Println(req.Method)
switch req.Method {
case "GET":
res_json, err := Get()
if err != nil {
http.Error(res, err.Error(), http.StatusInternalServerError)
}
res.Header().Set(
"Content-Type",
"application/json",
)
res.Write(res_json)
}
}
func calif(res http.ResponseWriter, req *http.Request) {
Println(req.Method)
switch req.Method {
case "POST":
var calif Calif
err := json.NewDecoder(req.Body).Decode(&calif)
if err != nil {
http.Error(res, err.Error(), http.StatusInternalServerError)
return
}
res_json := Add_Calif(calif)
res.Header().Set(
"Content-Type",
"application/json",
)
res.Write(res_json)
case "PUT":
var calif Calif
err := json.NewDecoder(req.Body).Decode(&calif)
if err != nil {
http.Error(res, err.Error(), http.StatusInternalServerError)
return
}
res_json := Update_Calif(calif)
res.Header().Set(
"Content-Type",
"application/json",
)
res.Write(res_json) // retornamos el JSON respuesta al cliente
}
}
func main() {
clase = Clase{
Alumnos: map[uint64]*Alumno{},
Materias: map[uint64]*Materia{},
}
http.HandleFunc("/clase/alumnos", alumnos)
http.HandleFunc("/clase/alumnos/", alumnoID)
http.HandleFunc("/clase/materias", materias)
http.HandleFunc("/clase", miclase)
http.HandleFunc("/clase/", miclase)
http.HandleFunc("/clase/calif", calif)
Println("Corriendo RESTful API...")
http.ListenAndServe(":9000", nil)
}
|
[
7
] |
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
// Run make gen at FeG to re-generate
package mocks
import mock "github.com/stretchr/testify/mock"
// ServerConnectionInterface is an autogenerated mock type for the ServerConnectionInterface type
type ServerConnectionInterface struct {
mock.Mock
}
// AcceptConn provides a mock function with given fields:
func (_m *ServerConnectionInterface) AcceptConn() error {
ret := _m.Called()
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
} else {
r0 = ret.Error(0)
}
return r0
}
// CloseConn provides a mock function with given fields:
func (_m *ServerConnectionInterface) CloseConn() error {
ret := _m.Called()
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
} else {
r0 = ret.Error(0)
}
return r0
}
// CloseListener provides a mock function with given fields:
func (_m *ServerConnectionInterface) CloseListener() error {
ret := _m.Called()
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
} else {
r0 = ret.Error(0)
}
return r0
}
// ConnectionEstablished provides a mock function with given fields:
func (_m *ServerConnectionInterface) ConnectionEstablished() bool {
ret := _m.Called()
var r0 bool
if rf, ok := ret.Get(0).(func() bool); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// ReceiveThroughListener provides a mock function with given fields:
func (_m *ServerConnectionInterface) ReceiveThroughListener() ([]byte, error) {
ret := _m.Called()
var r0 []byte
if rf, ok := ret.Get(0).(func() []byte); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]byte)
}
}
var r1 error
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// SendFromServer provides a mock function with given fields: _a0
func (_m *ServerConnectionInterface) SendFromServer(_a0 []byte) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func([]byte) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}
// StartListener provides a mock function with given fields: ipAddr, port
func (_m *ServerConnectionInterface) StartListener(ipAddr string, port int) (int, error) {
ret := _m.Called(ipAddr, port)
var r0 int
if rf, ok := ret.Get(0).(func(string, int) int); ok {
r0 = rf(ipAddr, port)
} else {
r0 = ret.Get(0).(int)
}
var r1 error
if rf, ok := ret.Get(1).(func(string, int) error); ok {
r1 = rf(ipAddr, port)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
|
[
4
] |
// session
package netlib
import (
"fmt"
"net"
"runtime"
"strconv"
"time"
"github.com/idealeak/goserver/core/logger"
"github.com/idealeak/goserver/core/utils"
)
var (
SendRoutinePoison *packet = nil
)
type TcpSession struct {
Session
conn net.Conn
}
func newTcpSession(id int, conn net.Conn, sc *SessionConfig, scl SessionCloseListener) *TcpSession {
s := &TcpSession{
conn: conn,
}
s.Session.Id = id
s.Session.sc = sc
s.Session.scl = scl
s.Session.createTime = time.Now()
s.Session.waitor = utils.NewWaitor("netlib.TcpSession")
s.Session.impl = s
s.init()
return s
}
func (s *TcpSession) init() {
s.Session.init()
}
func (s *TcpSession) LocalAddr() string {
return s.conn.LocalAddr().String()
}
func (s *TcpSession) RemoteAddr() string {
return s.conn.RemoteAddr().String()
}
func (s *TcpSession) start() {
s.lastRcvTime = time.Now()
go s.recvRoutine()
go s.sendRoutine()
}
func (s *TcpSession) sendRoutine() {
name := fmt.Sprintf("TcpSession.sendRoutine(%v_%v)", s.sc.Name, s.Id)
s.waitor.Add(name, 1)
defer func() {
if err := recover(); err != nil {
if !s.sc.IsClient && s.sc.IsInnerLink {
logger.Logger.Warn(s.Id, " ->close: TcpSession.sendRoutine err: ", err)
} else {
logger.Logger.Trace(s.Id, " ->close: TcpSession.sendRoutine err: ", err)
}
}
s.sc.encoder.FinishEncode(&s.Session)
s.shutWrite()
s.shutRead()
s.Close()
s.waitor.Done(name)
}()
var (
err error
data []byte
)
for !s.quit || len(s.sendBuffer) != 0 {
if s.PendingSnd {
runtime.Gosched()
continue
}
select {
case packet, ok := <-s.sendBuffer:
if !ok {
panic("[comm expt]sendBuffer chan closed")
}
if packet == nil {
panic("[comm expt]normal close send")
}
if s.sc.IsInnerLink {
var timeZero time.Time
s.conn.SetWriteDeadline(timeZero)
} else {
if s.sc.WriteTimeout != 0 {
s.conn.SetWriteDeadline(time.Now().Add(s.sc.WriteTimeout))
}
}
data, err = s.sc.encoder.Encode(&s.Session, packet.packetid, packet.logicno, packet.data, s.conn)
if err != nil {
logger.Logger.Trace("s.sc.encoder.Encode err", err)
if s.sc.IsInnerLink == false {
FreePacket(packet)
panic(err)
}
}
FreePacket(packet)
s.FirePacketSent(packet.packetid, packet.logicno, data)
s.lastSndTime = time.Now()
}
}
}
func (s *TcpSession) recvRoutine() {
name := fmt.Sprintf("TcpSession.recvRoutine(%v_%v)", s.sc.Name, s.Id)
s.waitor.Add(name, 1)
defer func() {
if err := recover(); err != nil {
if !s.sc.IsClient && s.sc.IsInnerLink {
logger.Logger.Warn(s.Id, " ->close: TcpSession.recvRoutine err: ", err)
} else {
logger.Logger.Trace(s.Id, " ->close: TcpSession.recvRoutine err: ", err)
}
}
s.sc.decoder.FinishDecode(&s.Session)
s.shutRead()
s.Close()
s.waitor.Done(name)
}()
var (
err error
pck interface{}
packetid int
logicNo uint32
raw []byte
)
for {
if s.PendingRcv {
runtime.Gosched()
continue
}
if s.sc.IsInnerLink {
var timeZero time.Time
s.conn.SetReadDeadline(timeZero)
} else {
if s.sc.ReadTimeout != 0 {
s.conn.SetReadDeadline(time.Now().Add(s.sc.ReadTimeout))
}
}
packetid, logicNo, pck, err, raw = s.sc.decoder.Decode(&s.Session, s.conn)
if err != nil {
bUnproc := true
bPackErr := false
if _, ok := err.(*UnparsePacketTypeErr); ok {
bPackErr = true
if s.sc.eph != nil && s.sc.eph.OnErrorPacket(&s.Session, packetid, logicNo, raw) {
bUnproc = false
}
}
if bUnproc {
logger.Logger.Tracef("s.sc.decoder.Decode(packetid:%v) err:%v ", packetid, err)
if s.sc.IsInnerLink == false {
panic(err)
} else if !bPackErr {
panic(err)
}
}
}
if pck != nil {
if s.FirePacketReceived(packetid, logicNo, pck) {
act := AllocAction()
act.s = &s.Session
act.p = pck
act.packid = packetid
act.logicNo = logicNo
act.n = "packet:" + strconv.Itoa(packetid)
s.recvBuffer <- act
}
}
s.lastRcvTime = time.Now()
}
}
func (s *TcpSession) shutRead() {
if s.shutRecv {
return
}
logger.Logger.Trace(s.Id, " shutRead")
s.shutRecv = true
if tcpconn, ok := s.conn.(*net.TCPConn); ok {
tcpconn.CloseRead()
}
}
func (s *TcpSession) shutWrite() {
if s.shutSend {
return
}
logger.Logger.Trace(s.Id, " shutWrite")
rest := len(s.sendBuffer)
for rest > 0 {
packet := <-s.sendBuffer
if packet != nil {
FreePacket(packet)
}
rest--
}
s.shutSend = true
if tcpconn, ok := s.conn.(*net.TCPConn); ok {
tcpconn.CloseWrite()
}
}
func (s *TcpSession) canShutdown() bool {
return s.shutRecv && s.shutSend
}
|
[
4
] |
package compiler
import (
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/Spriithy/rosa/pkg/compiler/fragments"
"github.com/Spriithy/rosa/pkg/compiler/text"
)
type Scanner struct {
path string
source []rune
tokens []text.Token
start int
current int
line int
lastNewline int
Logs []Log
openComments int
parens stack
tokenData strings.Builder
}
type tokenStack []text.Token
type stack interface {
push(text.Token)
peek() text.Token
pop() text.Token
isEmpty() bool
}
func (s *tokenStack) push(token text.Token) {
*s = append(*s, token)
}
func (s *tokenStack) pop() (token text.Token) {
if s.isEmpty() {
return
}
last := len(*s) - 1
token = (*s)[last]
*s = (*s)[:last]
return
}
func (s *tokenStack) peek() (token text.Token) {
if s.isEmpty() {
return
}
last := len(*s) - 1
token = (*s)[last]
return
}
func (s *tokenStack) isEmpty() bool {
return len(*s) == 0
}
func fileExists(path string) bool {
info, err := os.Stat(path)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
func NewScanner(path string) (scanner *Scanner) {
if !fileExists(path) {
fmt.Printf("error: %s doesn't exist\n", path)
return
}
source, err := ioutil.ReadFile(path)
if err != nil {
fmt.Printf("error: failed to open %s\n", path)
return
}
scanner = &Scanner{
path: path,
line: 1,
source: []rune(string(source)),
parens: new(tokenStack),
}
return
}
////////////////////////////////////////////////////////////////////////////////
func (s *Scanner) error(pos text.Pos, message string, args ...interface{}) {
s.Logs = append(s.Logs, Log{
Path: s.path,
Level: LogError,
Pos: pos,
Message: fmt.Sprintf(message, args...),
})
}
func (s *Scanner) syntaxError(pos text.Pos, message string, args ...interface{}) {
s.Logs = append(s.Logs, Log{
Path: s.path,
Level: LogSyntaxError,
Pos: pos,
Message: fmt.Sprintf(message, args...),
})
}
func (s *Scanner) eof() bool {
return s.current >= len(s.source)
}
func (s *Scanner) col() int {
return s.start - s.lastNewline + 1
}
func (s *Scanner) currentCol() int {
return s.current - s.lastNewline + 1
}
func (s *Scanner) pos() text.Pos {
return text.Pos{
FileName: s.path,
Line: s.line,
Column: s.col(),
}
}
func (s *Scanner) currentPos() text.Pos {
return text.Pos{
FileName: s.path,
Line: s.line,
Column: s.currentCol(),
}
}
func (s *Scanner) peek() rune {
if s.eof() {
return text.SU
}
return s.source[s.current]
}
func (s *Scanner) advance() rune {
if s.peek() == '\n' {
s.lastNewline = s.current + 1
s.line++
}
s.ingest(s.source[s.current])
s.current++
return s.source[s.current-1]
}
func (s *Scanner) skipRune() {
if s.peek() == '\n' {
s.lastNewline = s.current + 1
s.line++
}
s.current++
}
func (s *Scanner) accept(expected ...rune) bool {
if s.eof() {
return false
}
for _, r := range expected {
if s.peek() == r {
s.advance()
return true
}
}
return false
}
func (s *Scanner) acceptIf(fs ...fragments.Fragment) bool {
if s.eof() {
return false
}
for _, f := range fs {
if f(s.peek()) {
s.advance()
return true
}
}
return false
}
func (s *Scanner) match(accepted ...rune) bool {
if s.eof() {
return false
}
for _, r := range accepted {
if s.peek() == r {
return true
}
}
return false
}
func (s *Scanner) matchIf(fs ...fragments.Fragment) bool {
if s.eof() {
return false
}
for _, f := range fs {
if f(s.peek()) {
return true
}
}
return false
}
func (s *Scanner) many(f fragments.Fragment) bool {
for s.acceptIf(f) {
}
return true
}
func (s *Scanner) atLeastOne(f fragments.Fragment) bool {
if s.acceptIf(f) {
for s.acceptIf(f) {
}
return true
}
return false
}
func (s *Scanner) text() string {
return string(s.source[s.start:s.current])
}
func (s *Scanner) data() string {
return s.tokenData.String()
}
func (s *Scanner) ingest(r rune) {
s.tokenData.WriteRune(r)
}
func (s *Scanner) tokenType() text.TokenType {
return text.TypeOfToken(s.data())
}
func (s *Scanner) wrapToken() text.Token {
return text.Token{
Text: s.data(),
Type: s.tokenType(),
Pos: s.pos(),
Spans: s.start + len(s.text()),
}
}
func (s *Scanner) wrapTokenAs(typ text.TokenType) text.Token {
return text.Token{
Text: s.data(),
Type: typ,
Pos: s.pos(),
Spans: s.start + len(s.text()),
}
}
func (s *Scanner) wrapTokenWith(typ text.TokenType, data string) text.Token {
return text.Token{
Text: data,
Type: typ,
Pos: s.pos(),
Spans: s.start + len(s.text()),
}
}
func (s *Scanner) Scan() (token text.Token) {
if s.eof() {
// report all unmatched parens (, [, {
for !s.parens.isEmpty() {
paren := s.parens.pop()
s.syntaxError(paren.Pos, "unmatched %s", paren.Type)
}
token = text.Token{
Type: text.EOF,
Pos: s.currentPos(),
}
return
}
token = s.next()
s.tokens = append(s.tokens, token)
return
}
func (s *Scanner) next() (token text.Token) {
s.tokenData.Reset()
s.start = s.current // reset token pos
switch {
case s.eof():
token = s.wrapTokenWith(text.EOF, s.text())
case s.match(' ', '\t', text.CR, text.LF, text.FF):
s.skipRune()
token = s.next()
case s.acceptIf(text.IdentStart):
s.identRest()
token = s.wrapToken()
case s.match('/'):
s.skipRune()
if s.skipComment() {
token = s.next()
} else {
s.ingest('/')
s.operatorRest()
token = s.wrapToken()
}
case s.acceptIf(text.IsOperatorPart):
s.operatorRest()
token = s.wrapToken()
case s.accept('0'):
switch {
case s.accept('b', 'B'):
token = s.binary()
case s.accept('x', 'X'):
token = s.hexadecimal()
default:
token = s.number()
}
s.number()
token = s.wrapTokenAs(text.IntegerLit)
case s.acceptIf(text.NonZeroDigit):
token = s.number()
case s.acceptIf(text.IsSeparator):
token = s.wrapToken()
switch {
case text.Lpar(token), text.Lbrk(token), text.Lbrc(token):
s.parens.push(token)
case text.Rpar(token), text.Rbrk(token), text.Rbrc(token):
switch {
case s.parens.isEmpty():
s.syntaxError(s.pos(), "%s unexpected", token.Type)
case text.IsParenMatch(s.parens.peek(), token):
s.parens.pop()
default:
s.syntaxError(s.pos(), "%s unexpected", token.Type)
}
}
case s.match('"'):
s.skipRune()
if s.match('"') {
s.skipRune()
if s.match('"') {
// s.rawString()
}
} else {
s.stringLit()
}
token = s.wrapTokenAs(text.StringLit)
case s.match('\''):
s.charLit()
token = s.wrapTokenAs(text.CharLit)
default:
s.advance()
token = s.wrapTokenWith(text.ErrorType, s.text())
}
return
}
////////////////////////////////////////////////////////////////////////////////
// Comments
func (s *Scanner) skipComment() bool {
switch ch := s.peek(); {
case s.match('/', '*'):
s.skipRune()
s.skipCommentToEnd(ch == '/')
return true
}
return false
}
func (s *Scanner) skipCommentToEnd(isLineComment bool) {
if isLineComment {
s.skipLineComment()
} else {
s.openComments = 1
s.skipNestedComments()
}
}
func (s *Scanner) skipLineComment() {
for !s.match(text.SU, text.CR, text.LF) {
s.skipRune()
}
}
func (s *Scanner) skipNestedComments() {
switch s.peek() {
case '/':
s.maybeOpen()
s.skipNestedComments()
case '*':
if !s.maybeClose() {
s.skipNestedComments()
}
case text.SU:
s.error(s.currentPos(), "unclosed multiline comment")
default:
s.skipRune()
s.skipNestedComments()
}
}
func (s *Scanner) maybeOpen() {
s.skipRune()
if s.match('*') {
s.skipRune()
s.openComments++
}
}
func (s *Scanner) maybeClose() bool {
s.skipRune()
if s.match('/') {
s.skipRune()
s.openComments--
}
return s.openComments == 0
}
////////////////////////////////////////////////////////////////////////////////
// Identifiers & Operators
func (s *Scanner) identRest() {
switch {
case s.acceptIf(text.IdentRest):
s.identRest()
case s.accept('_'):
s.identOrOperatorRest()
case s.acceptIf(text.IsIdentifierPart):
s.identRest()
}
}
func (s *Scanner) identOrOperatorRest() {
switch {
case s.matchIf(text.IsIdentifierPart):
s.identRest()
case s.matchIf(text.IsOperatorPart):
s.operatorRest()
}
}
func (s *Scanner) operatorRest() {
switch {
case s.accept('/'):
if !s.skipComment() {
s.ingest('/')
}
case s.acceptIf(text.IsOperatorPart):
s.operatorRest()
case s.acceptIf(text.IsSpecial):
s.operatorRest()
}
}
////////////////////////////////////////////////////////////////////////////////
// Numbers
func (s *Scanner) base(digits fragments.Fragment, baseName string) (token text.Token) {
if !s.atLeastOne(text.Digit) {
s.syntaxError(s.currentPos(), "expected at least one digit in %s integer literal", baseName)
token = s.wrapTokenAs(text.IntegerLit)
return
}
content := s.text()
if offset := strings.IndexFunc(content[2:], fragments.Not(digits)); offset >= 0 {
pos := s.pos()
pos.Column += offset
s.syntaxError(pos, "unexpected digit in %s literal: '%c'", baseName, content[2:][offset])
}
token = s.wrapTokenAs(text.IntegerLit)
return
}
func (s *Scanner) binary() (token text.Token) {
return s.base(text.BinaryDigit, "binary")
}
func (s *Scanner) octal() (token text.Token) {
return s.base(text.OctalDigit, "octal")
}
func (s *Scanner) decimal() (token text.Token) {
return s.base(text.Digit, "decimal")
}
func (s *Scanner) hexadecimal() (token text.Token) {
return s.base(text.HexDigit, "hexadecimal")
}
func (s *Scanner) exponent() (token text.Token) {
if s.acceptIf(text.Exponent) {
s.accept('+', '-') // optional
if !s.atLeastOne(text.Digit) {
s.syntaxError(s.currentPos(), "expected at least one exponent digit in float literal")
content := s.data()
cut := strings.LastIndexFunc(content, text.Exponent)
token = s.wrapTokenWith(text.FloatLit, content[:cut])
return
}
}
token = s.wrapTokenAs(text.FloatLit)
return
}
func (s *Scanner) decimalPart() (token text.Token) {
if !s.atLeastOne(text.Digit) {
s.syntaxError(s.currentPos(), "expected at least one digit after decimal point in float literal, found '%c'", s.peek())
}
token = s.exponent()
return
}
func (s *Scanner) number() (token text.Token) {
switch {
case s.acceptIf(text.Digit):
token = s.number()
case s.accept('.'):
token = s.decimalPart()
case s.matchIf(text.Exponent):
token = s.exponent()
default:
token = s.wrapTokenWith(text.IntegerLit, s.text())
}
return
}
////////////////////////////////////////////////////////////////////////////////
// String, Char & escapes
func (s *Scanner) escape(digits fragments.Fragment, expected int) {
seq := text.EscapeBuffer(expected)
for n := 0; n < expected; n++ {
switch {
case s.matchIf(digits):
seq[n] = s.peek()
s.skipRune()
default:
s.syntaxError(s.currentPos(), "invalid character in escape sequence (found %q, expected hexadecimal digit)", s.peek())
return
}
}
s.ingest(seq.Rune())
}
func (s *Scanner) invalidEscape() {
s.syntaxError(s.currentPos(), "invalid escape character")
s.advance()
}
func (s *Scanner) litRune() {
switch {
case s.match('\\'):
s.skipRune()
switch s.peek() {
case 'b':
s.ingest('\b')
case 't':
s.ingest('\t')
case 'n':
s.ingest('\n')
case 'f':
s.ingest('\f')
case 'r':
s.ingest('\r')
case '"':
s.ingest('"')
case '\'':
s.ingest('\'')
case '\\':
s.ingest('\\')
case 'x', 'X':
s.skipRune()
s.escape(text.HexDigit, 2)
return
case 'u', 'U':
s.skipRune()
s.escape(text.HexDigit, 4)
return
default:
s.invalidEscape()
}
s.skipRune()
default:
s.advance()
}
}
func (s *Scanner) litRunes(del rune) {
for !s.match(del) && !s.eof() && !s.match(text.SU, text.CR, text.LF) {
s.litRune()
}
}
func (s *Scanner) stringLit() {
s.litRunes('"')
if s.match('"') {
s.skipRune()
} else {
s.syntaxError(s.currentPos(), "unclosed stringLit literal")
}
}
func (s *Scanner) charLit() {
s.skipRune()
switch {
case s.matchIf(text.IsIdentifierStart):
s.charLitOr(s.identRest)
case s.matchIf(text.IsOperatorPart) && !s.match('\\'):
s.charLitOr(s.operatorRest)
case !s.eof() && !s.match(text.SU, text.CR, text.LF):
emptyCharLit := s.match('\'')
s.litRune()
switch {
case s.match('\''):
if emptyCharLit {
s.syntaxError(s.pos(), "empty character literal (use '\\'' for single quote)")
} else {
s.skipRune()
}
case emptyCharLit:
s.syntaxError(s.pos(), "empty character literal")
default:
s.syntaxError(s.currentPos(), "unclosed character literal")
}
default:
s.syntaxError(s.currentPos(), "unclosed character literal")
}
}
func (s *Scanner) charLitOr(op func()) {
}
|
[
7
] |
// Code generated by mockery v2.10.0. DO NOT EDIT.
package mocks
import (
context "context"
mock "github.com/stretchr/testify/mock"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/api/core/v1"
)
// Manager is an autogenerated mock type for the Manager type
type Manager struct {
mock.Mock
}
// Create provides a mock function with given fields: ctx, secret, options
func (_m *Manager) Create(ctx context.Context, secret *v1.Secret, options metav1.CreateOptions) (*v1.Secret, error) {
ret := _m.Called(ctx, secret, options)
var r0 *v1.Secret
if rf, ok := ret.Get(0).(func(context.Context, *v1.Secret, metav1.CreateOptions) *v1.Secret); ok {
r0 = rf(ctx, secret, options)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.Secret)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *v1.Secret, metav1.CreateOptions) error); ok {
r1 = rf(ctx, secret, options)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Delete provides a mock function with given fields: ctx, name, options
func (_m *Manager) Delete(ctx context.Context, name string, options metav1.DeleteOptions) error {
ret := _m.Called(ctx, name, options)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string, metav1.DeleteOptions) error); ok {
r0 = rf(ctx, name, options)
} else {
r0 = ret.Error(0)
}
return r0
}
// Get provides a mock function with given fields: ctx, name, options
func (_m *Manager) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Secret, error) {
ret := _m.Called(ctx, name, options)
var r0 *v1.Secret
if rf, ok := ret.Get(0).(func(context.Context, string, metav1.GetOptions) *v1.Secret); ok {
r0 = rf(ctx, name, options)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.Secret)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, string, metav1.GetOptions) error); ok {
r1 = rf(ctx, name, options)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Update provides a mock function with given fields: ctx, secret, options
func (_m *Manager) Update(ctx context.Context, secret *v1.Secret, options metav1.UpdateOptions) (*v1.Secret, error) {
ret := _m.Called(ctx, secret, options)
var r0 *v1.Secret
if rf, ok := ret.Get(0).(func(context.Context, *v1.Secret, metav1.UpdateOptions) *v1.Secret); ok {
r0 = rf(ctx, secret, options)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.Secret)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *v1.Secret, metav1.UpdateOptions) error); ok {
r1 = rf(ctx, secret, options)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
|
[
4
] |
// Code generated by github.com/rogpeppe/gogen-avro/v7. DO NOT EDIT.
/*
* SOURCE:
* namespace.avsc
*/
package avro
import (
"github.com/rogpeppe/gogen-avro/v7/compiler"
"github.com/rogpeppe/gogen-avro/v7/vm"
"github.com/rogpeppe/gogen-avro/v7/vm/types"
"io"
)
// A Universally Unique Identifier, in canonical form in lowercase. Example: de305d54-75b4-431b-adb2-eb6b9e546014
type BodyworksDatatypeUUID struct {
Uuid string
}
func NewBodyworksDatatypeUUID() *BodyworksDatatypeUUID {
return &BodyworksDatatypeUUID{}
}
func DeserializeBodyworksDatatypeUUID(r io.Reader) (*BodyworksDatatypeUUID, error) {
t := NewBodyworksDatatypeUUID()
deser, err := compiler.CompileSchemaBytes([]byte(t.Schema()), []byte(t.Schema()))
if err != nil {
return nil, err
}
err = vm.Eval(r, deser, t)
if err != nil {
return nil, err
}
return t, err
}
func DeserializeBodyworksDatatypeUUIDFromSchema(r io.Reader, schema string) (*BodyworksDatatypeUUID, error) {
t := NewBodyworksDatatypeUUID()
deser, err := compiler.CompileSchemaBytes([]byte(schema), []byte(t.Schema()))
if err != nil {
return nil, err
}
err = vm.Eval(r, deser, t)
if err != nil {
return nil, err
}
return t, err
}
func writeBodyworksDatatypeUUID(r *BodyworksDatatypeUUID, w io.Writer) error {
var err error
err = vm.WriteString(r.Uuid, w)
if err != nil {
return err
}
return err
}
func (r *BodyworksDatatypeUUID) Serialize(w io.Writer) error {
return writeBodyworksDatatypeUUID(r, w)
}
func (r *BodyworksDatatypeUUID) Schema() string {
return "{\"doc\":\"A Universally Unique Identifier, in canonical form in lowercase. Example: de305d54-75b4-431b-adb2-eb6b9e546014\",\"fields\":[{\"default\":\"\",\"name\":\"uuid\",\"type\":\"string\"}],\"name\":\"UUID\",\"namespace\":\"bodyworks.datatype\",\"type\":\"record\"}"
}
func (r *BodyworksDatatypeUUID) SchemaName() string {
return "bodyworks.datatype.UUID"
}
func (_ *BodyworksDatatypeUUID) SetBoolean(v bool) { panic("Unsupported operation") }
func (_ *BodyworksDatatypeUUID) SetInt(v int32) { panic("Unsupported operation") }
func (_ *BodyworksDatatypeUUID) SetLong(v int64) { panic("Unsupported operation") }
func (_ *BodyworksDatatypeUUID) SetFloat(v float32) { panic("Unsupported operation") }
func (_ *BodyworksDatatypeUUID) SetDouble(v float64) { panic("Unsupported operation") }
func (_ *BodyworksDatatypeUUID) SetBytes(v []byte) { panic("Unsupported operation") }
func (_ *BodyworksDatatypeUUID) SetString(v string) { panic("Unsupported operation") }
func (_ *BodyworksDatatypeUUID) SetUnionElem(v int64) { panic("Unsupported operation") }
func (r *BodyworksDatatypeUUID) Get(i int) types.Field {
switch i {
case 0:
return (*types.String)(&r.Uuid)
}
panic("Unknown field index")
}
func (r *BodyworksDatatypeUUID) SetDefault(i int) {
switch i {
case 0:
r.Uuid = ""
return
}
panic("Unknown field index")
}
func (_ *BodyworksDatatypeUUID) AppendMap(key string) types.Field { panic("Unsupported operation") }
func (_ *BodyworksDatatypeUUID) AppendArray() types.Field { panic("Unsupported operation") }
func (_ *BodyworksDatatypeUUID) Finalize() {}
|
[
7
] |
// Program ictest confirms that all ICs and communications buses are working
package main
import (
"bitbucket.org/NickRyder/goipbus/ipbus"
"bitbucket.org/solidexperiment/readout-software/frontend"
"bitbucket.org/solidexperiment/readout-software/frontend/comms"
"bitbucket.org/solidexperiment/readout-software/frontend/ics"
"flag"
"fmt"
"io/ioutil"
"net"
"os"
"os/user"
"strconv"
"strings"
"time"
)
type Result string
const (
ResSuccess Result = "success"
ResFailure Result = "failure"
ResWarning Result = "warning"
)
type Report struct {
Now time.Time
ID frontend.PlaneID
Address string
Tester string
Results map[string]Result
}
func New() Report {
res := make(map[string]Result)
u, err := user.Current()
user := ""
if err == nil {
user = u.Username
}
return Report{Now: time.Now(), Results:res, Tester: user}
}
var tests []string = []string{
"Contact",
"UniqueID",
"DigitalI2C",
"PowerChips",
"Voltages",
"Currents",
"LM82Temp",
"InDetectorI2C",
"Temperature",
"ADCControl",
"Analog0I2C",
"Analog1I2C",
"ClockSA"}
func (r Report) Write() error {
now := r.Now.Format("02Jan06_1504")
msg := fmt.Sprintf("Time: %s\nTester: %s\n", now, r.Tester)
msg += fmt.Sprintf("Board: %012x\n", r.ID)
msg += fmt.Sprintf("Address: %s\nTests:\n", r.Address)
fail := false
nwarn := 0
for _, test := range tests {
res := r.Results[test]
msg += fmt.Sprintf("\t%s:\t%s\n", test, string(res))
if res == ResFailure {
fail = true
}
if res == ResWarning {
nwarn++
}
}
data := []byte(msg)
fn := fmt.Sprintf("reports/report_ictest_%012x_%s.txt", r.ID, now)
fmt.Println(msg)
fmt.Printf("Writing report to %s\n", fn)
if nwarn > 0 {
fmt.Printf("\n\nBoard %012x has %d warnings\n", r.ID, nwarn)
} else {
if !fail {
fmt.Printf("\n\nBoard %012x: PASS\n", r.ID)
}
}
if fail {
fmt.Printf("\n\nBoard %012x: FAIL\n", r.ID)
}
return ioutil.WriteFile(fn, data, 0644)
}
var report Report
func Write() {
report.Write()
}
func main() {
// Parse command line to get IP
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
fmt.Fprintf(os.Stderr, "%s <last byte of IP address>\n", os.Args[0])
flag.PrintDefaults()
fmt.Fprintln(os.Stderr, "see X for instructions.")
}
notemp := flag.Bool("notemp", false, "Flag that there are no external temperature sensors")
flag.Parse()
if len(flag.Args()) != 1 {
flag.Usage()
os.Exit(1)
}
report = New()
defer Write()
// Create target given IP address
u, err := strconv.ParseUint(flag.Args()[0], 0, 8)
if err != nil {
panic(err)
}
addr := fmt.Sprintf("192.168.235.%d:50001", u)
conn, err := net.Dial("udp", addr)
if err != nil {
report.Results["Contact"] = ResFailure
panic(err)
}
fmt.Printf("Using device with address = %s\n", addr)
t, err := ipbus.New("dut", "addr_tablev12/top.xml", conn)
if err != nil {
report.Results["Contact"] = ResFailure
panic(err)
}
digi, err := frontend.New64chDigital(&t, "dut")
if err != nil {
// Need to check what other errors they could be
// since creating front end already tries to
// use various components
report.Results["Contact"] = ResFailure
}
fw, err := digi.FirmwareVersion()
if err != nil {
panic(err)
}
if fw != 13 {
err := fmt.Errorf("Invalid firmware version: %d not 13", fw)
panic(err)
}
report.Results["Contact"] = ResSuccess
// ID chip, if New64chDigital worked then it is correct
report.ID = digi.ID
report.Address = addr
report.Results["UniqueID"] = ResSuccess
report.Results["DigitalI2C"] = ResSuccess
fmt.Printf("Board unique ID = %012x\n", digi.ID)
// Power
power, err := digi.Power.ReadPower()
if err != nil {
report.Results["PowerChips"] = ResFailure
panic(err)
}
report.Results["PowerChips"] = ResSuccess
fmt.Printf("%v\n", power)
// Check voltages
dv := (power.V5_3v3 - 5.0) / 5.0
warn := false
if dv > 0.1 || dv < -0.1 {
report.Results["Voltages"] = ResFailure
err := fmt.Errorf("5.0 -> 3.3 V regulator input out of range: %0.2f V, %.02f", power.V5_3v3, dv)
panic(err)
} else if dv > 0.1 || dv < -0.1 {
warn = true
fmt.Printf("Warning: 5.0 -> 3.3 V regulator input out near edge range: %0.2f V", power.V5_3v3)
}
dv = (power.V5_2v5 - 5.0) / 5.0
if dv > 0.1 || dv < -0.1 {
report.Results["Voltages"] = ResFailure
err := fmt.Errorf("5.0 -> 3.3 V regulator input out of range: %0.2f V", power.V5_2v5)
panic(err)
} else if dv > 0.1 || dv < -0.1 {
warn = true
fmt.Printf("Warning: 5.0 -> 2.5 V regulator input out near edge range: %0.2f V", power.V5_2v5)
}
dv = (power.V3v3 - 3.3) / 3.3
if dv > 0.1 || dv < -0.1 {
report.Results["Voltages"] = ResFailure
err := fmt.Errorf("3.3 V regulator out of range: %0.2f V", power.V3v3)
panic(err)
} else if dv > 0.1 || dv < -0.1 {
warn = true
fmt.Printf("Warning: 3.3 V regulator near edge of range: %0.2f V", power.V3v3)
}
dv = (power.V1v8 - 1.8) / 1.8
if dv > 0.1 || dv < -0.1 {
report.Results["Voltages"] = ResFailure
err := fmt.Errorf("1.8 V regulator out of range: %0.2f V", power.V1v8)
panic(err)
} else if dv > 0.1 || dv < -0.1 {
warn = true
fmt.Printf("Warning: 1.8 V regulator near edge of range: %0.2f V", power.V1v8)
}
if warn {
report.Results["Voltages"] = ResWarning
} else {
report.Results["Voltages"] = ResSuccess
}
// Check currents
if power.I3v3 > 1.0 || power.I3v3 < 0.5 {
report.Results["Currents"] = ResFailure
err := fmt.Errorf("3.3 V current out of range: %0.2f V", power.I3v3)
panic(err)
}
if power.I2v5 > 0.2 {
report.Results["Currents"] = ResFailure
err := fmt.Errorf("2.5 V current out of range: %0.2f V", power.I3v3)
panic(err)
}
if power.I1v8 > 2.5 || power.I1v8 < 1.5 {
report.Results["Currents"] = ResFailure
err := fmt.Errorf("1.8 V current out of range: %0.2f V", power.I3v3)
panic(err)
}
report.Results["Currents"] = ResSuccess
// Check digital board temperature
alarmtemp := float32(80.0)
digitali2c := digi.I2CBuses[1]
digitaladdr := uint8(0x18)
indeti2c := make([]comms.I2C, 0, 2)
indeti2c = append(indeti2c, digi.I2CBuses[3])
indetaddress := []uint8{0x48}
tempmon, err := frontend.NewTempMonitor(alarmtemp, digitali2c, digitaladdr, []comms.I2C{}, uint8(0), indeti2c, indetaddress)
if err != nil {
if strings.Contains(err.Error(), "LM82") {
report.Results["LM82Temp"] = ResFailure
panic(err)
} else if strings.Contains(err.Error(), "AT30") {
report.Results["LM82Temp"] = ResSuccess
if !*notemp {
report.Results["InDetectorI2C"] = ResFailure
} else {
report.Results["InDetectorI2C"] = ResWarning
fmt.Printf("Warning: In detector I2C bus not tested\n")
}
tempmon, err = frontend.NewTempMonitor(alarmtemp, digitali2c, digitaladdr, []comms.I2C{}, uint8(0), []comms.I2C{}, []uint8{})
if err != nil {
panic(err)
}
}
}
temps, err := tempmon.Temperatures()
if err != nil {
report.Results["LM82Temp"] = ResFailure
report.Results["InDetectorI2C"] = ResFailure
panic(err)
}
report.Results["LM82Temp"] = ResSuccess
if _, ok := report.Results["InDetectorI2C"]; !ok {
report.Results["InDetectorI2C"] = ResSuccess
}
digitemp := temps[0]
fmt.Printf("Digital board temperature %0.0f C\n", digitemp)
if len(temps) > 1 {
indettemp := temps[1]
fmt.Printf("External temperature %0.0f C\n", indettemp)
}
if digitemp > 60.0 {
report.Results["Temperature"] = ResFailure
panic(fmt.Errorf("Digital board temperature too high: %0.1f", digitemp))
}
report.Results["Temperature"] = ResSuccess
// Check ADCs working
if err := digi.ConfigureADCs(); err != nil {
report.Results["ADCControl"] = ResFailure
}
report.Results["ADCControl"] = ResSuccess
// Check comms with analog boards
i2canalog0 := digi.I2CBuses[2]
i2canalog1 := digi.I2CBuses[3]
hvaddr := uint8(0x61)
hvdac := ics.NewMCP4725(i2canalog0, hvaddr, 4.6)
if _, err := hvdac.Read(); err != nil {
report.Results["Analog0I2C"] = ResFailure
}
report.Results["Analog0I2C"] = ResSuccess
hvdac = ics.NewMCP4725(i2canalog1, hvaddr, 4.6)
if _, err := hvdac.Read(); err != nil {
report.Results["Analog1I2C"] = ResFailure
}
report.Results["Analog1I2C"] = ResSuccess
// Check clock chip
if err := digi.Reset("Si5345-RevB-SOL64CSA-Registers.txt"); err != nil {
report.Results["ClockSA"] = ResFailure
panic(err)
}
report.Results["ClockSA"] = ResSuccess
}
|
[
4
] |
package main
import (
"log"
"net"
"runtime"
"strconv"
"time"
)
func main() {
runtime.GOMAXPROCS(3)
const max = 10
ch := make(chan int, max)
for i := 0; i < max; i++ {
go exec(ch, i)
}
for i := 0; i < max; i++ {
<-ch
}
}
func exec(ch chan<- int, pos int) {
var rlen int
var err error
defer func() {
ch <- pos
}()
remote, err := net.ResolveUDPAddr("udp", "localhost:8888")
if err != nil {
log.Fatalf("%v\n", err)
}
conn, err := net.DialUDP("udp", nil, remote)
if err != nil {
log.Fatalf("%v\n", err)
}
log.Printf("Connect[%d]: %v\n", pos, remote)
conn.SetDeadline(time.Now().Add(5 * time.Second))
defer conn.Close()
s := "user" + strconv.Itoa(pos)
rlen, err = conn.Write([]byte(s))
if err != nil {
log.Printf("Send Error: %v\n", err)
return
}
log.Printf("Send[%d]: %v\n", pos, s)
buf := make([]byte, 1024)
rlen, err = conn.Read(buf)
if err != nil {
log.Printf("Receive Error: %v\n", err)
return
}
log.Printf("Receive[%d]: %v\n", pos, string(buf[:rlen]))
}
|
[
1
] |
package types
// Set is a threadsafe map any->bool
type Set struct {
cache map[I]bool
lock Mutex
}
// NewSet returns an empty Set
func NewSet() *Set {
return &Set{
cache: make(map[I]bool),
}
}
// Has returns whether the set has the key present
func (set *Set) Has(i I) bool { return set.cache[i] }
// Add saves an element to this Set
func (set *Set) Add(i I) {
set.lock.Lock()
set.cache[i] = true
set.lock.Unlock()
}
// Remove deletes an element from this Set
func (set *Set) Remove(i I) {
set.lock.Lock()
delete(set.cache, i)
set.lock.Unlock()
}
// SetString is a threadsafe map string->bool
type SetString struct {
cache map[string]bool
lock Mutex
}
// NewSetString returns an empty SetString
func NewSetString() *SetString {
return &SetString{
cache: make(map[string]bool),
}
}
// Add saves an element to this Set
func (set *SetString) Add(string string) {
set.lock.Lock()
set.cache[string] = true
set.lock.Unlock()
}
// Has returns whether the set has the string present
func (set *SetString) Has(string string) bool { return set.cache[string] }
// Slice returns a new SliceString with all elements randomly ordered
func (set *SetString) Slice() SliceString {
set.lock.Lock()
keys := make(SliceString, len(set.cache))
i := 0
for k := range set.cache {
keys[i] = k
}
set.lock.Unlock()
return keys
}
// Remove deletes an element from this Set
func (set *SetString) Remove(string string) {
set.lock.Lock()
delete(set.cache, string)
set.lock.Unlock()
}
// SetUI is a threadsafe map uint->bool
type SetUI struct {
cache map[uint]bool
lock Mutex
}
// NewSetUI returns an empty SetUI
func NewSetUI() *SetUI {
return &SetUI{
cache: make(map[uint]bool),
}
}
// Add saves an uint to this Set
func (set *SetUI) Add(uint uint) {
set.lock.Lock()
set.cache[uint] = true
set.lock.Unlock()
}
// Has returns whether the set has the uint present
func (set *SetUI) Has(uint uint) bool { return set.cache[uint] }
// Slice returns a new SliceUI with all elements randomly ordered
func (set *SetUI) Slice() SliceUI {
set.lock.Lock()
keys := make(SliceUI, len(set.cache))
i := 0
for k := range set.cache {
keys[i] = k
}
set.lock.Unlock()
return keys
}
// Remove deletes an uint from this Set
func (set *SetUI) Remove(uint uint) {
set.lock.Lock()
delete(set.cache, uint)
set.lock.Unlock()
}
|
[
1
] |
// Code generated by mockery v1.0.0. DO NOT EDIT.
package stdcli
import (
io "io"
mock "github.com/stretchr/testify/mock"
)
// Executor is an autogenerated mock type for the Executor type
type Executor struct {
mock.Mock
}
// Execute provides a mock function with given fields: cmd, args
func (_m *Executor) Execute(cmd string, args ...string) ([]byte, error) {
_va := make([]interface{}, len(args))
for _i := range args {
_va[_i] = args[_i]
}
var _ca []interface{}
_ca = append(_ca, cmd)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 []byte
if rf, ok := ret.Get(0).(func(string, ...string) []byte); ok {
r0 = rf(cmd, args...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]byte)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string, ...string) error); ok {
r1 = rf(cmd, args...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Run provides a mock function with given fields: w, cmd, args
func (_m *Executor) Run(w io.Writer, cmd string, args ...string) error {
_va := make([]interface{}, len(args))
for _i := range args {
_va[_i] = args[_i]
}
var _ca []interface{}
_ca = append(_ca, w, cmd)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 error
if rf, ok := ret.Get(0).(func(io.Writer, string, ...string) error); ok {
r0 = rf(w, cmd, args...)
} else {
r0 = ret.Error(0)
}
return r0
}
// Terminal provides a mock function with given fields: cmd, args
func (_m *Executor) Terminal(cmd string, args ...string) error {
_va := make([]interface{}, len(args))
for _i := range args {
_va[_i] = args[_i]
}
var _ca []interface{}
_ca = append(_ca, cmd)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 error
if rf, ok := ret.Get(0).(func(string, ...string) error); ok {
r0 = rf(cmd, args...)
} else {
r0 = ret.Error(0)
}
return r0
}
|
[
4
] |
package generators
import (
"math"
"math/rand"
"time"
)
type IntArrProps struct {
Min int32
Max int32 // Exclusive
Size uint32
}
func IntArr(p *IntArrProps) []int32 {
var min int32 = math.MinInt32
var max int32 = math.MaxInt32
var size uint32 = 20
if p.Min != 0 {
min = p.Min
}
if p.Max != 0 {
max = p.Max
}
if p.Size != 0{
size = p.Size
}
rand.Seed(time.Now().Unix())
randArr := make([]int32, size)
diff := uint32(max - min)
for i, _ := range randArr{
r := rand.Uint32()
add := r % diff
randArr[i] = min + int32(add)
}
return randArr
}
|
[
1
] |
// Code generated by mockery 2.9.4. DO NOT EDIT.
package dependencies
import (
discounts "discounts-applier/internal/discounts"
mock "github.com/stretchr/testify/mock"
)
// MockDependencies is an autogenerated mock type for the Dependencies type
type MockDependencies struct {
mock.Mock
}
// GetDiscountsManager provides a mock function with given fields:
func (_m *MockDependencies) GetDiscountsManager() (discounts.Manager, error) {
ret := _m.Called()
var r0 discounts.Manager
if rf, ok := ret.Get(0).(func() discounts.Manager); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(discounts.Manager)
}
}
var r1 error
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
|
[
4
] |
package main
import (
"fmt"
"log"
)
// Expression represents all the different forms of expressions possible in
// side an XML protocol description file. It's also received a few custom
// addendums to make applying special functions (like padding) easier.
type Expression interface {
// Concrete determines whether this particular expression can be computed
// to some constant value inside xgbgen. (The alternative is that the
// expression can only be computed with values at run time of the
// generated code.)
Concrete() bool
// Eval evaluates a concrete expression. It is an error to call Eval
// on any expression that is not concrete (or contains any sub-expression
// that is not concrete).
Eval() int
// Reduce attempts to evaluate any concrete sub-expressions.
// i.e., (1 + 2 * (5 + 1 + someSizeOfStruct) reduces to
// (3 * (6 + someSizeOfStruct)).
// 'prefix' is used preprended to any field reference name.
Reduce(prefix string) string
// String is an alias for Reduce("")
String() string
// Initialize makes sure all names in this expression and any subexpressions
// have been translated to Go source names.
Initialize(p *Protocol)
}
// Function is a custom expression not found in the XML. It's simply used
// to apply a function named in 'Name' to the Expr expression.
type Function struct {
Name string
Expr Expression
}
func (e *Function) Concrete() bool {
return false
}
func (e *Function) Eval() int {
log.Fatalf("Cannot evaluate a 'Function'. It is not concrete.")
panic("unreachable")
}
func (e *Function) Reduce(prefix string) string {
return fmt.Sprintf("%s(%s)", e.Name, e.Expr.Reduce(prefix))
}
func (e *Function) String() string {
return e.Reduce("")
}
func (e *Function) Initialize(p *Protocol) {
e.Expr.Initialize(p)
}
// BinaryOp is an expression that performs some operation (defined in the XML
// file) with Expr1 and Expr2 as operands.
type BinaryOp struct {
Op string
Expr1 Expression
Expr2 Expression
}
// newBinaryOp constructs a new binary expression when both expr1 and expr2
// are not nil. If one or both are nil, then the non-nil expression is
// returned unchanged or nil is returned.
func newBinaryOp(op string, expr1, expr2 Expression) Expression {
switch {
case expr1 != nil && expr2 != nil:
return &BinaryOp{
Op: op,
Expr1: expr1,
Expr2: expr2,
}
case expr1 != nil && expr2 == nil:
return expr1
case expr1 == nil && expr2 != nil:
return expr2
case expr1 == nil && expr2 == nil:
return nil
}
panic("unreachable")
}
func (e *BinaryOp) Concrete() bool {
return e.Expr1.Concrete() && e.Expr2.Concrete()
}
func (e *BinaryOp) Eval() int {
switch e.Op {
case "+":
return e.Expr1.Eval() + e.Expr2.Eval()
case "-":
return e.Expr1.Eval() - e.Expr2.Eval()
case "*":
return e.Expr1.Eval() * e.Expr2.Eval()
case "/":
return e.Expr1.Eval() / e.Expr2.Eval()
case "&":
return e.Expr1.Eval() & e.Expr2.Eval()
case "<<":
return int(uint(e.Expr1.Eval()) << uint(e.Expr2.Eval()))
}
log.Fatalf("Invalid binary operator '%s' for expression.", e.Op)
panic("unreachable")
}
func (e *BinaryOp) Reduce(prefix string) string {
if e.Concrete() {
return fmt.Sprintf("%d", e.Eval())
}
// An incredibly dirty hack to make sure any time we perform an operation
// on a field, we're dealing with ints...
expr1, expr2 := e.Expr1, e.Expr2
switch expr1.(type) {
case *FieldRef:
expr1 = &Function{
Name: "int",
Expr: expr1,
}
}
switch expr2.(type) {
case *FieldRef:
expr2 = &Function{
Name: "int",
Expr: expr2,
}
}
return fmt.Sprintf("(%s %s %s)",
expr1.Reduce(prefix), e.Op, expr2.Reduce(prefix))
}
func (e *BinaryOp) String() string {
return e.Reduce("")
}
func (e *BinaryOp) Initialize(p *Protocol) {
e.Expr1.Initialize(p)
e.Expr2.Initialize(p)
}
// UnaryOp is the same as BinaryOp, except it's a unary operator with only
// one sub-expression.
type UnaryOp struct {
Op string
Expr Expression
}
func (e *UnaryOp) Concrete() bool {
return e.Expr.Concrete()
}
func (e *UnaryOp) Eval() int {
switch e.Op {
case "~":
return ^e.Expr.Eval()
}
log.Fatalf("Invalid unary operator '%s' for expression.", e.Op)
panic("unreachable")
}
func (e *UnaryOp) Reduce(prefix string) string {
if e.Concrete() {
return fmt.Sprintf("%d", e.Eval())
}
return fmt.Sprintf("(%s (%s))", e.Op, e.Expr.Reduce(prefix))
}
func (e *UnaryOp) String() string {
return e.Reduce("")
}
func (e *UnaryOp) Initialize(p *Protocol) {
e.Expr.Initialize(p)
}
// Padding represents the application of the 'pad' function to some
// sub-expression.
type Padding struct {
Expr Expression
}
func (e *Padding) Concrete() bool {
return e.Expr.Concrete()
}
func (e *Padding) Eval() int {
return pad(e.Expr.Eval())
}
func (e *Padding) Reduce(prefix string) string {
if e.Concrete() {
return fmt.Sprintf("%d", e.Eval())
}
return fmt.Sprintf("xgb.Pad(%s)", e.Expr.Reduce(prefix))
}
func (e *Padding) String() string {
return e.Reduce("")
}
func (e *Padding) Initialize(p *Protocol) {
e.Expr.Initialize(p)
}
// PopCount represents the application of the 'PopCount' function to
// some sub-expression.
type PopCount struct {
Expr Expression
}
func (e *PopCount) Concrete() bool {
return e.Expr.Concrete()
}
func (e *PopCount) Eval() int {
return int(popCount(uint(e.Expr.Eval())))
}
func (e *PopCount) Reduce(prefix string) string {
if e.Concrete() {
return fmt.Sprintf("%d", e.Eval())
}
return fmt.Sprintf("xgb.PopCount(%s)", e.Expr.Reduce(prefix))
}
func (e *PopCount) String() string {
return e.Reduce("")
}
func (e *PopCount) Initialize(p *Protocol) {
e.Expr.Initialize(p)
}
// Value represents some constant integer.
type Value struct {
v int
}
func (e *Value) Concrete() bool {
return true
}
func (e *Value) Eval() int {
return e.v
}
func (e *Value) Reduce(prefix string) string {
return fmt.Sprintf("%d", e.v)
}
func (e *Value) String() string {
return e.Reduce("")
}
func (e *Value) Initialize(p *Protocol) {}
// Bit represents some bit whose value is computed by '1 << bit'.
type Bit struct {
b int
}
func (e *Bit) Concrete() bool {
return true
}
func (e *Bit) Eval() int {
return int(1 << uint(e.b))
}
func (e *Bit) Reduce(prefix string) string {
return fmt.Sprintf("%d", e.Eval())
}
func (e *Bit) String() string {
return e.Reduce("")
}
func (e *Bit) Initialize(p *Protocol) {}
// FieldRef represents a reference to some variable in the generated code
// with name Name.
type FieldRef struct {
Name string
}
func (e *FieldRef) Concrete() bool {
return false
}
func (e *FieldRef) Eval() int {
log.Fatalf("Cannot evaluate a 'FieldRef'. It is not concrete.")
panic("unreachable")
}
func (e *FieldRef) Reduce(prefix string) string {
val := e.Name
if len(prefix) > 0 {
val = fmt.Sprintf("%s%s", prefix, val)
}
return val
}
func (e *FieldRef) String() string {
return e.Reduce("")
}
func (e *FieldRef) Initialize(p *Protocol) {
e.Name = SrcName(p, e.Name)
}
// EnumRef represents a reference to some enumeration field.
// EnumKind is the "group" an EnumItem is the name of the specific enumeration
// value inside that group.
type EnumRef struct {
EnumKind Type
EnumItem string
}
func (e *EnumRef) Concrete() bool {
return false
}
func (e *EnumRef) Eval() int {
log.Fatalf("Cannot evaluate an 'EnumRef'. It is not concrete.")
panic("unreachable")
}
func (e *EnumRef) Reduce(prefix string) string {
return fmt.Sprintf("%s%s", e.EnumKind, e.EnumItem)
}
func (e *EnumRef) String() string {
return e.Reduce("")
}
func (e *EnumRef) Initialize(p *Protocol) {
e.EnumKind = e.EnumKind.(*Translation).RealType(p)
e.EnumItem = SrcName(p, e.EnumItem)
}
// SumOf represents a summation of the variable in the generated code named by
// Name. It is not currently used. (It's XKB voodoo.)
type SumOf struct {
Name string
}
func (e *SumOf) Concrete() bool {
return false
}
func (e *SumOf) Eval() int {
log.Fatalf("Cannot evaluate a 'SumOf'. It is not concrete.")
panic("unreachable")
}
func (e *SumOf) Reduce(prefix string) string {
if len(prefix) > 0 {
return fmt.Sprintf("sum(%s%s)", prefix, e.Name)
}
return fmt.Sprintf("sum(%s)", e.Name)
}
func (e *SumOf) String() string {
return e.Reduce("")
}
func (e *SumOf) Initialize(p *Protocol) {
e.Name = SrcName(p, e.Name)
}
|
[
7
] |
package main
import "code.google.com/p/goprotobuf/proto"
import "github.com/msparks/iq/ircconnection"
import "github.com/msparks/iq/public"
func ConnReactor(ns *NamedSession, evs *EventServer) {
notifiee := ns.Conn.NewNotifiee()
defer ns.Conn.CloseNotifiee(notifiee)
for {
v := <-notifiee
switch v := v.(type) {
case ircconnection.IncomingMessageNotification:
ev := &public.Event{
IrcMessage: &public.IrcMessage{
Handle: proto.String(ns.Handle),
Message: v.Message,
},
}
evs.Event <-ev
}
}
}
|
[
7
] |
/*
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
package lz4
import (
"errors"
"sync"
)
const (
minMatch = 4
hashLog = 16
hashTableSize = 1 << hashLog
hashShift = (minMatch * 8) - hashLog
incompressible uint32 = 128
uninitHash = 0x88888888
mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes.
// MaxInputSize is the largest buffer than can be compressed in a single block
MaxInputSize = 0x7E000000
)
var (
// ErrTooLarge indicates the input buffer was too large
ErrTooLarge = errors.New("input too large")
ErrEncodeTooSmall = errors.New("encode buffer too small")
hashPool = sync.Pool{
New: func() interface{} {
return make([]uint32, hashTableSize)
},
}
)
type encoder struct {
src []byte
dst []byte
hashTable []uint32
pos uint32
anchor uint32
dpos uint32
}
// CompressBound returns the maximum length of a lz4 block
func CompressBound(isize int) int {
if isize > MaxInputSize {
return 0
}
return isize + ((isize) / 255) + 16
}
func (e *encoder) writeLiterals(length, mlLen, pos uint32) {
ln := length
var code byte
if ln > runMask-1 {
code = runMask
} else {
code = byte(ln)
}
if mlLen > mlMask-1 {
e.dst[e.dpos] = (code << mlBits) + byte(mlMask)
} else {
e.dst[e.dpos] = (code << mlBits) + byte(mlLen)
}
e.dpos++
if code == runMask {
ln -= runMask
for ; ln > 254; ln -= 255 {
e.dst[e.dpos] = 255
e.dpos++
}
e.dst[e.dpos] = byte(ln)
e.dpos++
}
for ii := uint32(0); ii < length; ii++ {
e.dst[e.dpos+ii] = e.src[pos+ii]
}
e.dpos += length
}
// Encode returns the encoded form of src. The returned array may be a
// sub-slice of dst if it was large enough to hold the entire output.
func Encode(dst, src []byte) (compressedSize int, error error) {
if len(src) >= MaxInputSize {
return 0, ErrTooLarge
}
if n := CompressBound(len(src)); len(dst) < n {
return 0, ErrEncodeTooSmall
}
hashTable := hashPool.Get().([]uint32)
for i := range hashTable {
hashTable[i] = 0
}
e := encoder{src: src, dst: dst, hashTable: hashTable}
defer func() {
hashPool.Put(hashTable)
}()
// binary.LittleEndian.PutUint32(dst, uint32(len(src)))
// e.dpos = 0
var (
step uint32 = 1
limit = incompressible
)
for {
if int(e.pos)+12 >= len(e.src) {
e.writeLiterals(uint32(len(e.src))-e.anchor, 0, e.anchor)
return int(e.dpos), nil
}
sequence := uint32(e.src[e.pos+3])<<24 | uint32(e.src[e.pos+2])<<16 | uint32(e.src[e.pos+1])<<8 | uint32(e.src[e.pos+0])
hash := (sequence * 2654435761) >> hashShift
ref := e.hashTable[hash] + uninitHash
e.hashTable[hash] = e.pos - uninitHash
if ((e.pos-ref)>>16) != 0 || uint32(e.src[ref+3])<<24|uint32(e.src[ref+2])<<16|uint32(e.src[ref+1])<<8|uint32(e.src[ref+0]) != sequence {
if e.pos-e.anchor > limit {
limit <<= 1
step += 1 + (step >> 2)
}
e.pos += step
continue
}
if step > 1 {
e.hashTable[hash] = ref - uninitHash
e.pos -= step - 1
step = 1
continue
}
limit = incompressible
ln := e.pos - e.anchor
back := e.pos - ref
anchor := e.anchor
e.pos += minMatch
ref += minMatch
e.anchor = e.pos
for int(e.pos) < len(e.src)-5 && e.src[e.pos] == e.src[ref] {
e.pos++
ref++
}
mlLen := e.pos - e.anchor
e.writeLiterals(ln, mlLen, anchor)
e.dst[e.dpos] = uint8(back)
e.dst[e.dpos+1] = uint8(back >> 8)
e.dpos += 2
if mlLen > mlMask-1 {
mlLen -= mlMask
for mlLen > 254 {
mlLen -= 255
e.dst[e.dpos] = 255
e.dpos++
}
e.dst[e.dpos] = byte(mlLen)
e.dpos++
}
e.anchor = e.pos
}
}
|
[
1
] |
package manuscript
import (
"github.com/quii/go-piggy"
"strconv"
)
type Manuscript struct {
EntityID string
Title, Abstract string
Authors []string
Version int
Published bool
}
func (m *Manuscript) InsertAuthorIn(index int, name string) {
if m.Authors == nil {
m.Authors = make([]string, 0)
}
authorsArraySize := len(m.Authors)
if (index + 1) > authorsArraySize {
authorsArraySize = index + 1
}
newArray := make([]string, authorsArraySize)
copy(newArray, m.Authors)
newArray[index] = name
m.Authors = newArray
}
func (m *Manuscript) Update(facts []go_piggy.Fact) Manuscript {
newVersion := *m
newVersion.Version++
for _, f := range facts {
switch f.Op {
case "SET":
switch f.Key {
case "Title":
newVersion.Title = f.Value
case "Abstract":
newVersion.Abstract = f.Value
}
if authorsRegex.MatchString(f.Key) {
extractedIndex := authorIndexRegex.FindString(f.Key)
i, _ := strconv.Atoi(extractedIndex)
newVersion.InsertAuthorIn(i, f.Value)
}
}
}
return newVersion
}
|
[
7
] |
package main
import (
"fmt"
"math"
)
// The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
// Find the sum of all the primes below two million.
// Let's make a prime sieve.
// We want to represent the set of all numbers not divisible by any previous number
// So lets build a cascading sequence of channels that check for the presence of the nth prime.
const (
maxPrime = 2e6
)
// Let's not use the intentionally bad channel sieve.
// checkPrimes returns the non-prime statuses of all numbers up to max.
func checkPrimes(max int) (s []bool) {
s = make([]bool, max+1)
s[0] = true
s[1] = true
s[2] = false
for i := 4; i <= max; i += 2 {
s[i] = true
}
limit := int(math.Sqrt(float64(max))) + 1
for i := 3; i < limit; i += 2 {
if !s[i] {
for j := i * i; j < max; j += i {
s[j] = true
}
}
}
return s
}
// sumPrimes returns the sum of all primes less than max
func sumPrimes(max int) (sum int) {
s := checkPrimes(max)
for i := 0; i <= max; i++ {
if !s[i] {
sum += i
}
}
return sum
}
func main() {
sum := sumPrimes(maxPrime)
fmt.Println(sum)
}
|
[
1
] |
package airtable
import (
"context"
"fmt"
"sync"
"github.com/CAVaccineInventory/airtable-export/pipeline/pkg/filter"
"github.com/CAVaccineInventory/airtable-export/pipeline/pkg/types"
beeline "github.com/honeycombio/beeline-go"
)
type tableFetchResults struct {
table types.TableContent
err error
}
// Tables allows just-in-time table fetching and caching from Airtable.
// It is not intended for long-term use, as data is fetched and cached exactly once.
type Tables struct {
mainLock sync.RWMutex // mainLock protects tableLocks.
tableLocks map[string]*sync.Mutex // tableLocks contains a lock for each table, to prevent races to populate a table.
tables map[string]tableFetchResults // Tables contains a map of table name to (table content or error).
fetcher fetcher
}
type fetcher interface {
Download(context.Context, string) (types.TableContent, error)
}
func NewTables(secret string) *Tables {
return &Tables{
mainLock: sync.RWMutex{},
tableLocks: map[string]*sync.Mutex{},
tables: map[string]tableFetchResults{},
fetcher: newAirtable(secret),
}
}
func (t *Tables) GetCounties(ctx context.Context) (types.TableContent, error) {
return t.getTable(ctx, "Counties", filter.WithMunger(dropEmpty))
}
func (t *Tables) GetProviders(ctx context.Context) (types.TableContent, error) {
return t.getTable(ctx, "Provider networks", filter.WithMunger(dropEmpty))
}
func dropEmpty(row map[string]interface{}) (map[string]interface{}, error) {
// Real rows from airtable will have at least 2 fields. The synthetic
// record id field and one real field.
if len(row) < 2 {
return nil, nil
}
return row, nil
}
func hideNotes(row map[string]interface{}) (map[string]interface{}, error) {
// Because this function is used as part of the input processing, which only
// happens once and inside a lock, it directly modifies the input row.
if v, ok := row["Latest report yes?"].(float64); !ok || v != 1 {
row["Latest report notes"] = ""
}
return row, nil
}
func dropSoftDeleted(row map[string]interface{}) (map[string]interface{}, error) {
if v, ok := row["is_soft_deleted"].(bool); ok && v {
return nil, nil
}
return row, nil
}
func useCountyURL(ctx context.Context, t *Tables) (func(row map[string]interface{}) (map[string]interface{}, error), error) {
cs, err := t.GetCounties(ctx)
if err != nil {
return nil, fmt.Errorf("GetCounties: %v", err)
}
urls := make(map[string]string)
for _, c := range cs {
var n, u string
var ok bool
if n, ok = c["County"].(string); !ok {
continue
}
if u, ok = c["County vaccination reservations URL"].(string); ok {
urls[n] = u
}
}
return func(row map[string]interface{}) (map[string]interface{}, error) {
if county, ok := row["County"].(string); ok {
if inst, ok := row["Appointment scheduling instructions"].(string); ok {
if inst == "Uses county scheduling system" {
if u, ok := urls[county]; ok {
row["Appointment scheduling instructions"] = u
}
}
}
}
return row, nil
}, nil
}
func (t *Tables) GetLocations(ctx context.Context) (types.TableContent, error) {
cm, err := useCountyURL(ctx, t)
if err != nil {
return nil, fmt.Errorf("Can't setup useCountyURL: %v", err)
}
return t.getTable(ctx, "Locations", filter.WithMunger(dropEmpty), filter.WithMunger(hideNotes), filter.WithMunger(dropSoftDeleted), filter.WithMunger(cm))
}
// getTable does a thread-safe, just-in-time fetch of a table.
// The result is cached for the lifetime of the Tables object..
func (t *Tables) getTable(ctx context.Context, tableName string, xfOpts ...filter.XformOpt) (types.TableContent, error) {
ctx, span := beeline.StartSpan(ctx, "airtable.getTable")
defer span.Send()
beeline.AddField(ctx, "table", tableName)
// Acquire the lock for the table in question, in order to fetch exactly once or wait for that fetch.
tableLock := t.getTableLock(tableName)
tableLock.Lock()
defer tableLock.Unlock()
if fetchResult, found := t.tables[tableName]; found {
beeline.AddField(ctx, "fetched", 0)
return fetchResult.table, fetchResult.err
}
beeline.AddField(ctx, "fetched", 1)
table, err := t.fetcher.Download(ctx, tableName)
if err != nil {
beeline.AddField(ctx, "error", err)
} else {
if len(xfOpts) > 0 {
table, err = filter.Transform(table, xfOpts...)
if err != nil {
err = fmt.Errorf("Transform failed: %v", err)
beeline.AddField(ctx, "error", err)
}
}
}
t.tables[tableName] = tableFetchResults{
table: table,
err: err,
}
return table, err
}
// Returns the lock for the specified table.
// Creates it if it doesn't exist.
func (t *Tables) getTableLock(tableName string) *sync.Mutex {
t.mainLock.Lock()
defer t.mainLock.Unlock()
lock, found := t.tableLocks[tableName]
if found {
return lock
}
lock = &sync.Mutex{}
t.tableLocks[tableName] = lock
return lock
}
|
[
4
] |
package test
import (
"bufio"
"os"
)
// LoadTestFile - loading test file in a two dimetional slice of bytes
func LoadTestFile(path string) [][]byte {
file, err := os.Open(path)
if err != nil {
panic("Couldn't open " + path)
}
defer file.Close()
var words [][]byte
reader := bufio.NewReader(file)
for {
if line, err := reader.ReadBytes(byte('\n')); err != nil {
break
} else {
if len(line) > 0 {
words = append(words, line[:len(line)-1])
}
}
}
return words
}
|
[
4
] |
package attest
import (
"errors"
"os"
)
// Ensure that the file is in the specified state of existence.
func FileState(name string, exists bool) error {
if _, err := os.Stat(name); err != nil {
if !os.IsNotExist(err) {
return err
}
if exists {
return errors.New("file does not exist")
}
} else {
if !exists {
return errors.New("file exists")
}
}
return nil
}
|
[
4
] |
package clock
import "fmt"
const testVersion = 4
const minutesInDay = 24 * 60
type Clock int
func New(hour, min int) Clock {
minutes := min + 60*hour
return Minutize(Clock(minutes))
}
func (c Clock) String() string {
hours := c / 60
minutes := c % 60
return fmt.Sprintf("%02d:%02d", hours, minutes)
}
func (c Clock) Add(min int) Clock {
return Minutize(c + Clock(min))
}
func Minutize(c Clock) Clock {
for c < 0 {
c += minutesInDay
}
return Clock(c % minutesInDay)
}
|
[
1
] |
// Copyright (C) 2019-2020, Xiongfa Li.
// @author xiongfa.li
// @version V1.0
// Description:
package lru
import "container/list"
type QueueElem list.Element
type QueueListener interface {
// 元素命中,将元素移动到队列头部后调用
PostTouch(v interface{})
// 元素新添加到队列头部后调用
PostInsert(v interface{})
// 元素从队列中删除后调用
PostDelete(v interface{})
}
type Queue interface {
AddListener(listener QueueListener)
Touch(elem *QueueElem)
Move(other *LruQueue, elem *QueueElem, notify bool) *QueueElem
Insert(v interface{}) *QueueElem
Delete(elem *QueueElem)
}
type LruQueue struct {
list *list.List
cap int
listeners []QueueListener
}
func NewLruElement(v interface{}) *QueueElem {
return &QueueElem{
Value: v,
}
}
func NewLruQueue(cap int) *LruQueue {
return &LruQueue{
list: list.New(),
cap: cap,
listeners: []QueueListener{},
}
}
func (q *LruQueue) AddListener(listener QueueListener) {
q.listeners = append(q.listeners, listener)
}
func (q *LruQueue) Touch(elem *QueueElem) {
if elem != nil {
q.list.MoveToFront((*list.Element)(elem))
for _, l := range q.listeners {
l.PostTouch(elem.Value)
}
}
}
func (q *LruQueue) Move(other *LruQueue, elem *QueueElem, notify bool) *QueueElem {
if other == nil || elem == nil {
return nil
}
v := q.list.Remove((*list.Element)(elem))
if notify {
for _, l := range q.listeners {
l.PostDelete(elem.Value)
}
}
elem = (*QueueElem)(other.list.PushFront(v))
if notify {
for _, l := range q.listeners {
l.PostInsert(elem.Value)
}
}
return elem
}
func (q *LruQueue) Insert(v interface{}) *QueueElem {
if q.list.Len() == q.cap {
e := q.list.Back()
if e != nil {
q.Delete((*QueueElem)(e))
}
}
e := q.list.PushFront(v)
for _, l := range q.listeners {
l.PostInsert(v)
}
return (*QueueElem)(e)
}
func (q *LruQueue) Delete(elem *QueueElem) {
v := q.list.Remove((*list.Element)(elem))
for _, l := range q.listeners {
l.PostDelete(v)
}
}
type dummyListener struct{}
func (l *dummyListener) PostTouch(v interface{}) {
}
func (l *dummyListener) PostInsert(v interface{}) {
}
func (l *dummyListener) PostDelete(v interface{}) {
}
|
[
1
] |
package css_parser
import (
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
"github.com/evanw/esbuild/internal/logger"
)
const (
borderRadiusTopLeft = iota
borderRadiusTopRight
borderRadiusBottomRight
borderRadiusBottomLeft
)
type borderRadiusCorner struct {
firstToken css_ast.Token
secondToken css_ast.Token
unitSafety unitSafetyTracker
ruleIndex uint32 // The index of the originating rule in the rules array
wasSingleRule bool // True if the originating rule was just for this side
}
type borderRadiusTracker struct {
corners [4]borderRadiusCorner
important bool // True if all active rules were flagged as "!important"
}
func (borderRadius *borderRadiusTracker) updateCorner(rules []css_ast.Rule, corner int, new borderRadiusCorner) {
if old := borderRadius.corners[corner]; old.firstToken.Kind != css_lexer.TEndOfFile &&
(!new.wasSingleRule || old.wasSingleRule) &&
old.unitSafety.status == unitSafe && new.unitSafety.status == unitSafe {
rules[old.ruleIndex] = css_ast.Rule{}
}
borderRadius.corners[corner] = new
}
func (borderRadius *borderRadiusTracker) mangleCorners(rules []css_ast.Rule, decl *css_ast.RDeclaration, minifyWhitespace bool) {
// Reset if we see a change in the "!important" flag
if borderRadius.important != decl.Important {
borderRadius.corners = [4]borderRadiusCorner{}
borderRadius.important = decl.Important
}
tokens := decl.Value
beforeSplit := len(tokens)
afterSplit := len(tokens)
// Search for the single slash if present
for i, t := range tokens {
if t.Kind == css_lexer.TDelimSlash {
if beforeSplit == len(tokens) {
beforeSplit = i
afterSplit = i + 1
} else {
// Multiple slashes are an error
borderRadius.corners = [4]borderRadiusCorner{}
return
}
}
}
// Use a single tracker for the whole rule
unitSafety := unitSafetyTracker{}
for _, t := range tokens[:beforeSplit] {
unitSafety.includeUnitOf(t)
}
for _, t := range tokens[afterSplit:] {
unitSafety.includeUnitOf(t)
}
firstRadii, firstRadiiOk := expandTokenQuad(tokens[:beforeSplit], "")
lastRadii, lastRadiiOk := expandTokenQuad(tokens[afterSplit:], "")
// Stop now if the pattern wasn't matched
if !firstRadiiOk || (beforeSplit < afterSplit && !lastRadiiOk) {
borderRadius.corners = [4]borderRadiusCorner{}
return
}
// Handle the first radii
for corner, t := range firstRadii {
if unitSafety.status == unitSafe {
t.TurnLengthIntoNumberIfZero()
}
borderRadius.updateCorner(rules, corner, borderRadiusCorner{
firstToken: t,
secondToken: t,
unitSafety: unitSafety,
ruleIndex: uint32(len(rules) - 1),
})
}
// Handle the last radii
if lastRadiiOk {
for corner, t := range lastRadii {
if unitSafety.status == unitSafe {
t.TurnLengthIntoNumberIfZero()
}
borderRadius.corners[corner].secondToken = t
}
}
// Success
borderRadius.compactRules(rules, decl.KeyRange, minifyWhitespace)
}
func (borderRadius *borderRadiusTracker) mangleCorner(rules []css_ast.Rule, decl *css_ast.RDeclaration, minifyWhitespace bool, corner int) {
// Reset if we see a change in the "!important" flag
if borderRadius.important != decl.Important {
borderRadius.corners = [4]borderRadiusCorner{}
borderRadius.important = decl.Important
}
if tokens := decl.Value; (len(tokens) == 1 && tokens[0].Kind.IsNumeric()) ||
(len(tokens) == 2 && tokens[0].Kind.IsNumeric() && tokens[1].Kind.IsNumeric()) {
firstToken := tokens[0]
secondToken := firstToken
if len(tokens) == 2 {
secondToken = tokens[1]
}
// Check to see if these units are safe to use in every browser
unitSafety := unitSafetyTracker{}
unitSafety.includeUnitOf(firstToken)
unitSafety.includeUnitOf(secondToken)
// Only collapse "0unit" into "0" if the unit is safe
if unitSafety.status == unitSafe && firstToken.TurnLengthIntoNumberIfZero() {
tokens[0] = firstToken
}
if len(tokens) == 2 {
if unitSafety.status == unitSafe && secondToken.TurnLengthIntoNumberIfZero() {
tokens[1] = secondToken
}
// If both tokens are equal, merge them into one
if firstToken.EqualIgnoringWhitespace(secondToken) {
tokens[0].Whitespace &= ^css_ast.WhitespaceAfter
decl.Value = tokens[:1]
}
}
borderRadius.updateCorner(rules, corner, borderRadiusCorner{
firstToken: firstToken,
secondToken: secondToken,
unitSafety: unitSafety,
ruleIndex: uint32(len(rules) - 1),
wasSingleRule: true,
})
borderRadius.compactRules(rules, decl.KeyRange, minifyWhitespace)
} else {
borderRadius.corners = [4]borderRadiusCorner{}
}
}
func (borderRadius *borderRadiusTracker) compactRules(rules []css_ast.Rule, keyRange logger.Range, minifyWhitespace bool) {
// All tokens must be present
if eof := css_lexer.TEndOfFile; borderRadius.corners[0].firstToken.Kind == eof || borderRadius.corners[1].firstToken.Kind == eof ||
borderRadius.corners[2].firstToken.Kind == eof || borderRadius.corners[3].firstToken.Kind == eof {
return
}
// All tokens must have the same unit
for _, side := range borderRadius.corners[1:] {
if !side.unitSafety.isSafeWith(borderRadius.corners[0].unitSafety) {
return
}
}
// Generate the most minimal representation
tokens := compactTokenQuad(
borderRadius.corners[0].firstToken,
borderRadius.corners[1].firstToken,
borderRadius.corners[2].firstToken,
borderRadius.corners[3].firstToken,
minifyWhitespace,
)
secondTokens := compactTokenQuad(
borderRadius.corners[0].secondToken,
borderRadius.corners[1].secondToken,
borderRadius.corners[2].secondToken,
borderRadius.corners[3].secondToken,
minifyWhitespace,
)
if !css_ast.TokensEqualIgnoringWhitespace(tokens, secondTokens) {
var whitespace css_ast.WhitespaceFlags
if !minifyWhitespace {
whitespace = css_ast.WhitespaceBefore | css_ast.WhitespaceAfter
}
tokens = append(tokens, css_ast.Token{
Loc: tokens[len(tokens)-1].Loc,
Kind: css_lexer.TDelimSlash,
Text: "/",
Whitespace: whitespace,
})
tokens = append(tokens, secondTokens...)
}
// Remove all of the existing declarations
var minLoc logger.Loc
for i, corner := range borderRadius.corners {
if loc := rules[corner.ruleIndex].Loc; i == 0 || loc.Start < minLoc.Start {
minLoc = loc
}
rules[corner.ruleIndex] = css_ast.Rule{}
}
// Insert the combined declaration where the last rule was
rules[borderRadius.corners[3].ruleIndex] = css_ast.Rule{Loc: minLoc, Data: &css_ast.RDeclaration{
Key: css_ast.DBorderRadius,
KeyText: "border-radius",
Value: tokens,
KeyRange: keyRange,
Important: borderRadius.important,
}}
}
|
[
1
] |
package signalfx
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"golang.org/x/net/context"
"zvelo.io/go-signalfx/sfxproto"
)
const (
// TokenHeader is the header on which SignalFx looks for the api token
TokenHeader = "X-SF-TOKEN"
)
// A Client is used to send datapoints to SignalFx
type Client struct {
config *Config
tr http.RoundTripper
client *http.Client
}
// NewClient returns a new Client. config is copied, so future changes to the
// external config object are not reflected within the client.
func NewClient(config *Config) *Client {
tr := config.Transport()
return &Client{
config: config.Clone(),
tr: tr,
client: &http.Client{Transport: tr},
}
}
// Submit forwards raw datapoints to SignalFx
func (c *Client) Submit(ctx context.Context, pdps *sfxproto.DataPoints) error {
if ctx == nil {
ctx = context.Background()
} else if ctx.Err() != nil {
return ErrContext(ctx.Err())
}
jsonBytes, err := pdps.Marshal()
if err != nil {
return ErrMarshal(err)
}
req, _ := http.NewRequest("POST", c.config.URL, bytes.NewBuffer(jsonBytes))
req.Header = http.Header{
TokenHeader: {c.config.AuthToken},
"User-Agent": {c.config.UserAgent},
"Connection": {"Keep-Alive"},
"Content-Type": {"application/x-protobuf"},
}
var resp *http.Response
done := make(chan interface{}, 1)
go func() {
resp, err = c.client.Do(req)
done <- true
}()
select {
case <-ctx.Done():
if tr, ok := c.tr.(*http.Transport); ok {
tr.CancelRequest(req)
<-done // wait for the request to be canceled
} else {
if c.config.Logger != nil {
fmt.Fprintf(c.config.Logger, "tried to cancel non-cancellable transport %T", tr)
}
}
return ErrContext(ctx.Err())
case <-done:
if err != nil {
return ErrPost(err)
}
}
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return ErrResponse(err)
}
if resp.StatusCode != 200 {
return &ErrStatus{respBody, resp.StatusCode}
}
var body string
if err = json.Unmarshal(respBody, &body); err != nil {
return &ErrJSON{respBody}
}
if body != "OK" {
return &ErrInvalidBody{body}
}
return nil
}
|
[
4
] |
package aws
import (
"fmt"
"log"
"reflect"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsNetworkInterfaceSGAttachment() *schema.Resource {
return &schema.Resource{
Create: resourceAwsNetworkInterfaceSGAttachmentCreate,
Read: resourceAwsNetworkInterfaceSGAttachmentRead,
Delete: resourceAwsNetworkInterfaceSGAttachmentDelete,
Schema: map[string]*schema.Schema{
"security_group_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"network_interface_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
},
}
}
func resourceAwsNetworkInterfaceSGAttachmentCreate(d *schema.ResourceData, meta interface{}) error {
mk := "network_interface_sg_attachment_" + d.Get("network_interface_id").(string)
awsMutexKV.Lock(mk)
defer awsMutexKV.Unlock(mk)
sgID := d.Get("security_group_id").(string)
interfaceID := d.Get("network_interface_id").(string)
conn := meta.(*AWSClient).ec2conn
// Fetch the network interface we will be working with.
iface, err := fetchNetworkInterface(conn, interfaceID)
if err != nil {
return err
}
// Add the security group to the network interface.
log.Printf("[DEBUG] Attaching security group %s to network interface ID %s", sgID, interfaceID)
if sgExistsInENI(sgID, iface) {
return fmt.Errorf("security group %s already attached to interface ID %s", sgID, *iface.NetworkInterfaceId)
}
var groupIDs []string
for _, v := range iface.Groups {
groupIDs = append(groupIDs, *v.GroupId)
}
groupIDs = append(groupIDs, sgID)
params := &ec2.ModifyNetworkInterfaceAttributeInput{
NetworkInterfaceId: iface.NetworkInterfaceId,
Groups: aws.StringSlice(groupIDs),
}
_, err = conn.ModifyNetworkInterfaceAttribute(params)
if err != nil {
return err
}
log.Printf("[DEBUG] Successful attachment of security group %s to network interface ID %s", sgID, interfaceID)
return resourceAwsNetworkInterfaceSGAttachmentRead(d, meta)
}
func resourceAwsNetworkInterfaceSGAttachmentRead(d *schema.ResourceData, meta interface{}) error {
sgID := d.Get("security_group_id").(string)
interfaceID := d.Get("network_interface_id").(string)
log.Printf("[DEBUG] Checking association of security group %s to network interface ID %s", sgID, interfaceID)
conn := meta.(*AWSClient).ec2conn
iface, err := fetchNetworkInterface(conn, interfaceID)
if err != nil {
return err
}
if sgExistsInENI(sgID, iface) {
d.SetId(fmt.Sprintf("%s_%s", sgID, interfaceID))
} else {
// The association does not exist when it should, taint this resource.
log.Printf("[WARN] Security group %s not associated with network interface ID %s, tainting", sgID, interfaceID)
d.SetId("")
}
return nil
}
func resourceAwsNetworkInterfaceSGAttachmentDelete(d *schema.ResourceData, meta interface{}) error {
mk := "network_interface_sg_attachment_" + d.Get("network_interface_id").(string)
awsMutexKV.Lock(mk)
defer awsMutexKV.Unlock(mk)
sgID := d.Get("security_group_id").(string)
interfaceID := d.Get("network_interface_id").(string)
log.Printf("[DEBUG] Removing security group %s from interface ID %s", sgID, interfaceID)
conn := meta.(*AWSClient).ec2conn
iface, err := fetchNetworkInterface(conn, interfaceID)
if err != nil {
return err
}
if err := delSGFromENI(conn, sgID, iface); err != nil {
return err
}
d.SetId("")
return nil
}
// fetchNetworkInterface is a utility function used by Read and Delete to fetch
// the full ENI details for a specific interface ID.
func fetchNetworkInterface(conn *ec2.EC2, ifaceID string) (*ec2.NetworkInterface, error) {
log.Printf("[DEBUG] Fetching information for interface ID %s", ifaceID)
dniParams := &ec2.DescribeNetworkInterfacesInput{
NetworkInterfaceIds: aws.StringSlice([]string{ifaceID}),
}
dniResp, err := conn.DescribeNetworkInterfaces(dniParams)
if err != nil {
return nil, err
}
return dniResp.NetworkInterfaces[0], nil
}
func delSGFromENI(conn *ec2.EC2, sgID string, iface *ec2.NetworkInterface) error {
old := iface.Groups
var new []*string
for _, v := range iface.Groups {
if *v.GroupId == sgID {
continue
}
new = append(new, v.GroupId)
}
if reflect.DeepEqual(old, new) {
// The interface already didn't have the security group, nothing to do
return nil
}
params := &ec2.ModifyNetworkInterfaceAttributeInput{
NetworkInterfaceId: iface.NetworkInterfaceId,
Groups: new,
}
_, err := conn.ModifyNetworkInterfaceAttribute(params)
return err
}
// sgExistsInENI is a utility function that can be used to quickly check to
// see if a security group exists in an *ec2.NetworkInterface.
func sgExistsInENI(sgID string, iface *ec2.NetworkInterface) bool {
for _, v := range iface.Groups {
if *v.GroupId == sgID {
return true
}
}
return false
}
|
[
1
] |
package install
import (
"reflect"
"strings"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
configapiinstall "github.com/openshift/origin/pkg/cmd/server/apis/config/install"
)
func TestDescriptions(t *testing.T) {
scheme := runtime.NewScheme()
InstallInternalOpenShift(scheme)
InstallInternalKube(scheme)
for _, version := range scheme.PrioritizedVersionsAllGroups() {
seen := map[reflect.Type]bool{}
for _, apiType := range scheme.KnownTypes(version) {
checkDescriptions(apiType, &seen, t)
}
}
}
func checkDescriptions(objType reflect.Type, seen *map[reflect.Type]bool, t *testing.T) {
if _, exists := (*seen)[objType]; exists {
return
}
(*seen)[objType] = true
if !strings.Contains(objType.PkgPath(), "github.com/openshift/origin/pkg") {
return
}
for i := 0; i < objType.NumField(); i++ {
structField := objType.FieldByIndex([]int{i})
// these fields don't need descriptions
if structField.Name == "TypeMeta" || structField.Name == "ObjectMeta" || structField.Name == "ListMeta" {
continue
}
if structField.Type == reflect.TypeOf(metav1.Time{}) || structField.Type == reflect.TypeOf(time.Time{}) || structField.Type == reflect.TypeOf(runtime.RawExtension{}) {
continue
}
descriptionTag := structField.Tag.Get("description")
if len(descriptionTag) > 0 {
t.Errorf("%v", structField.Tag)
t.Errorf("%v.%v should not have a description tag", objType, structField.Name)
}
switch structField.Type.Kind() {
case reflect.Struct:
checkDescriptions(structField.Type, seen, t)
}
}
}
func TestInternalJsonTags(t *testing.T) {
scheme := runtime.NewScheme()
InstallInternalOpenShift(scheme)
InstallInternalKube(scheme)
configapiinstall.InstallLegacyInternal(scheme)
seen := map[reflect.Type]bool{}
seenGroups := sets.String{}
for _, version := range scheme.PrioritizedVersionsAllGroups() {
if seenGroups.Has(version.Group) {
continue
}
seenGroups.Insert(version.Group)
internalVersion := schema.GroupVersion{Group: version.Group, Version: runtime.APIVersionInternal}
for _, apiType := range scheme.KnownTypes(internalVersion) {
checkInternalJsonTags(apiType, &seen, t)
}
}
}
// internalTypesWithAllowedJsonTags is the list of special structs that have a particular need to have json tags on their
// internal types. Do not add to this list without having you paperwork checked in triplicate.
var internalTypesWithAllowedJsonTags = sets.NewString("DockerConfig", "DockerImage")
func checkInternalJsonTags(objType reflect.Type, seen *map[reflect.Type]bool, t *testing.T) {
if objType.Kind() != reflect.Struct {
return
}
if _, exists := (*seen)[objType]; exists {
return
}
(*seen)[objType] = true
if !strings.Contains(objType.PkgPath(), "github.com/openshift/origin/pkg") {
return
}
if internalTypesWithAllowedJsonTags.Has(objType.Name()) {
return
}
if objType.Kind() != reflect.Struct {
return
}
for i := 0; i < objType.NumField(); i++ {
structField := objType.FieldByIndex([]int{i})
jsonTag := structField.Tag.Get("json")
if len(jsonTag) != 0 {
t.Errorf("%v.%v should not have a json tag", objType, structField.Name)
}
switch structField.Type.Kind() {
case reflect.Struct:
checkInternalJsonTags(structField.Type, seen, t)
case reflect.Ptr:
checkInternalJsonTags(structField.Type.Elem(), seen, t)
}
}
}
func TestExternalJsonTags(t *testing.T) {
scheme := runtime.NewScheme()
InstallInternalOpenShift(scheme)
InstallInternalKube(scheme)
configapiinstall.InstallLegacyInternal(scheme)
seen := map[reflect.Type]bool{}
for _, version := range scheme.PrioritizedVersionsAllGroups() {
for _, apiType := range scheme.KnownTypes(version) {
checkExternalJsonTags(apiType, &seen, t)
}
}
}
func checkExternalJsonTags(objType reflect.Type, seen *map[reflect.Type]bool, t *testing.T) {
if objType.Kind() != reflect.Struct {
return
}
if _, exists := (*seen)[objType]; exists {
return
}
(*seen)[objType] = true
if !strings.Contains(objType.PkgPath(), "github.com/openshift/origin/pkg") {
return
}
if objType.Kind() != reflect.Struct {
return
}
for i := 0; i < objType.NumField(); i++ {
structField := objType.FieldByIndex([]int{i})
jsonTag := structField.Tag.Get("json")
if len(jsonTag) == 0 {
t.Errorf("%v.%v should have a json tag", objType, structField.Name)
}
switch structField.Type.Kind() {
case reflect.Struct:
checkExternalJsonTags(structField.Type, seen, t)
case reflect.Ptr:
checkExternalJsonTags(structField.Type.Elem(), seen, t)
}
}
}
|
[
7
] |
package mockDevice
import (
"fmt"
"io"
"log"
"net"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
)
func newSFTP(_ Handler) Server {
// An SSH server is represented by a ServerConfig, which holds
// certificate details and handles authentication of ServerConns.
srv := &sftp_server{
config: &ssh.ServerConfig{PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
// Should use constant-time compare (or better, salt+hash) in
// a production setting.
if c.User() == "testuser" && string(pass) == "tiger" {
return nil, nil
}
return nil, fmt.Errorf("password rejected for %q", c.User())
},
},
}
return srv
}
type sftp_server struct {
config *ssh.ServerConfig
nConn net.Conn
}
func (s *sftp_server) Serve(l net.Listener) (err error) {
private, err := ssh.ParsePrivateKey(privKey)
if err != nil {
log.Fatal("Failed to parse private key", err)
}
s.config.AddHostKey(private)
s.nConn, err = l.Accept()
if err != nil {
return err
}
// Before use, a handshake must be performed on the incoming
// net.Conn.
_, chans, reqs, err := ssh.NewServerConn(s.nConn, s.config)
if err != nil {
return err
}
// The incoming Request channel must be serviced.
go ssh.DiscardRequests(reqs)
// Service the incoming Channel channel.
for newChannel := range chans {
// Channels have a type, depending on the application level
// protocol intended. In the case of an SFTP session, this is "subsystem"
// with a payload string of "<length=4>sftp"
if newChannel.ChannelType() != "session" {
newChannel.Reject(ssh.UnknownChannelType, "unknown channel type")
continue
}
channel, requests, err := newChannel.Accept()
if err != nil {
log.Fatal("could not accept channel.", err)
}
// Sessions have out-of-band requests such as "shell",
// "pty-req" and "env". Here we handle only the
// "subsystem" request.
go func(in <-chan *ssh.Request) {
for req := range in {
ok := false
switch req.Type {
case "subsystem":
if string(req.Payload[4:]) == "sftp" {
ok = true
}
}
req.Reply(ok, nil)
}
}(requests)
server, err := sftp.NewServer(
channel,
)
if err != nil {
log.Fatal(err)
}
if err := server.Serve(); err == io.EOF {
server.Close()
} else if err != nil {
log.Fatal("sftp server completed with error:", err)
}
}
return nil
}
func (s *sftp_server) Close() error {
return nil
}
var privKey = []byte(`-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn
NhAAAAAwEAAQAAAQEAuI8PoR/XRVcd8wCXKxgZPNLvE03W/BwoR8/Sdn2cs3/VMUmpM+rA
zSLUKW/+4fWLIPEzlCwe0AMs4MsD3QGf121pxpgFfC09FZiN2VNK2+FGj//g3DMLMe5GOK
z5ZxLH2wqI2YtJqaTl4qUOQBgSg+NGSzH7N0JH3aYPHRDb3S1CUy41y+v8arnQt7vh/+9e
UFeLBJaVGoDukVutbzcm1ElzKFeB6SUOauZnaRSRSR1FD6it+0T18FBPZa/Rbr8WT1J7+d
8RRr4xEKbWAmJGZMX1uTQweZ36bqSLYRfDgnTEmDOKmSQfGwc+evGAiabMp9AYzXvoRNTn
SKWpX2rrkwAAA9DPHXxmzx18ZgAAAAdzc2gtcnNhAAABAQC4jw+hH9dFVx3zAJcrGBk80u
8TTdb8HChHz9J2fZyzf9UxSakz6sDNItQpb/7h9Ysg8TOULB7QAyzgywPdAZ/XbWnGmAV8
LT0VmI3ZU0rb4UaP/+DcMwsx7kY4rPlnEsfbCojZi0mppOXipQ5AGBKD40ZLMfs3Qkfdpg
8dENvdLUJTLjXL6/xqudC3u+H/715QV4sElpUagO6RW61vNybUSXMoV4HpJQ5q5mdpFJFJ
HUUPqK37RPXwUE9lr9FuvxZPUnv53xFGvjEQptYCYkZkxfW5NDB5nfpupIthF8OCdMSYM4
qZJB8bBz568YCJpsyn0BjNe+hE1OdIpalfauuTAAAAAwEAAQAAAQBF+ukAPWSRBFF03Np1
GrQnHgxNE4zbF4omgKTbDRIn9ebOw5GHABKPNg+gjrjk0QgqO4tFOd2NHkccDZ6vZHhJZV
FgXjBmP3kUAT54E18lNKxe2bVXiXtLOYAi6WPAM5zYb4wogOozizUn1VIr93S90aXLyW3q
LBW388lzSfs0R9ow/BGljWCjQ7cNnA0aaZVBzPMheZMrc875ScFcEy6JQe4IFzUCPSGttM
Gm0/90v5vcF7nuBKt5WH8PVx7GgxZTupZZXfmSmO8xSOv8Exz/aOH/vR8UjaGVYyIEIgQ8
TL9ovrWH3IsSQvQ1VmPMJRgpWzttl9LkZ6/sohRMsEUxAAAAgQDA1XtCuRxEveY5nH5WGS
PEGwSVPz9qGUDTjIfkLyaQFJR8h0Poety085a4Iqag49xiyEFMEvC6CKd49eq9LjziGTM+
GY7BRdHp/4AR9AgcSdw07fWQXMuOdghnU+oY8RFtTYoDp5Z9nJyrGn4TmHO+vJVo/Ape7P
mc0WTD22ABEAAAAIEA2gENTgkzyj8/2ND5VFY37PTXagnE89YvXdqTtthK3t3rFUAmb3uG
3mgrEsNwy9Fu0FAP2SE1ZlSghRAt/CZ748w3pr0chAgIZNnZL6UKWmEiC7i6acbIG/M0cx
qewiKjkiFnb1fwrHFqNRdjtgXMrY77uCEdEa03exMxIM3VyIUAAACBANi5vdDk/k7ieWNg
eYwiLcsc92TC5LPHaM1BuXOdM5V9QXeev2P2YiD7ZHOo5hIlgNVXXhcdnSd/dcQyIP6zCC
jHYDRdX0YYQjFlbZpyCe0Zz57VXIK1wPnAOdoEyboABVPJC7T+N8PaZwzjdHpnFIeSxKDd
nu5d0NiPWUkvHKs3AAAAFWpzaW1vbmV0dGlAanNpbW9uZXR0aQECAwQF
-----END OPENSSH PRIVATE KEY-----`)
|
[
7
] |
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
package mocks
import (
entity "go-resepee-api/entity"
mock "github.com/stretchr/testify/mock"
)
// RecipeMaterialRepositoryInterface is an autogenerated mock type for the RecipeMaterialRepositoryInterface type
type RecipeMaterialRepositoryInterface struct {
mock.Mock
}
// FindByRecipeID provides a mock function with given fields: recipeID
func (_m *RecipeMaterialRepositoryInterface) FindByRecipeID(recipeID int) ([]entity.RecipeMaterial, error) {
ret := _m.Called(recipeID)
var r0 []entity.RecipeMaterial
if rf, ok := ret.Get(0).(func(int) []entity.RecipeMaterial); ok {
r0 = rf(recipeID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]entity.RecipeMaterial)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(int) error); ok {
r1 = rf(recipeID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Store provides a mock function with given fields: recipeMaterial
func (_m *RecipeMaterialRepositoryInterface) Store(recipeMaterial *entity.RecipeMaterial) (entity.RecipeMaterial, error) {
ret := _m.Called(recipeMaterial)
var r0 entity.RecipeMaterial
if rf, ok := ret.Get(0).(func(*entity.RecipeMaterial) entity.RecipeMaterial); ok {
r0 = rf(recipeMaterial)
} else {
r0 = ret.Get(0).(entity.RecipeMaterial)
}
var r1 error
if rf, ok := ret.Get(1).(func(*entity.RecipeMaterial) error); ok {
r1 = rf(recipeMaterial)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
|
[
4
] |
package test
import (
"testing"
sqlmock "github.com/DATA-DOG/go-sqlmock"
"github.com/flanksource/canary-checker/cmd"
"github.com/flanksource/canary-checker/pkg"
)
type args struct {
config pkg.Config
}
type test struct {
name string
args args
want []pkg.CheckResult // each config can result in multiple checks
}
func TestRunChecks(t *testing.T) {
tests := []test{
{
name: "http_pass",
args: args{
pkg.ParseConfig("../fixtures/http_pass.yaml"),
},
want: []pkg.CheckResult{
{
Pass: true,
Invalid: false,
Endpoint: "https://httpstat.us/200",
Metrics: []pkg.Metric{},
},
},
},
{
name: "http_fail",
args: args{
pkg.ParseConfig("../fixtures/http_fail.yaml"),
},
want: []pkg.CheckResult{
{
Pass: false,
Invalid: true,
Endpoint: "https://ttpstat.us/500",
Metrics: []pkg.Metric{},
},
{
Pass: false,
Invalid: false,
Endpoint: "https://httpstat.us/500",
Metrics: []pkg.Metric{},
},
},
},
{
name: "postgres_fail",
args: args{
pkg.ParseConfig("../fixtures/postgres_fail.yaml"),
},
want: []pkg.CheckResult{
{
Pass: false,
Invalid: false,
Endpoint: "user=pqgotest dbname=pqgotest sslmode=verify-full",
Metrics: []pkg.Metric{},
},
},
},
{
name: "dns_fail",
args: args{
pkg.ParseConfig("../fixtures/dns_fail.yaml"),
},
want: []pkg.CheckResult{
{
Pass: false,
Invalid: false,
Endpoint: "8.8.8.8:53",
Metrics: []pkg.Metric{},
Message: "Check failed: A flanksource.com on 8.8.8.8. Got [34.65.228.161], expected [8.8.8.8]",
},
{
Pass: false,
Invalid: false,
Endpoint: "8.8.8.8:53",
Metrics: []pkg.Metric{},
Message: "Check failed: PTR 8.8.8.8 on 8.8.8.8. Records count is less then minrecords",
},
{
Pass: false,
Invalid: false,
Endpoint: "8.8.8.8:53",
Metrics: []pkg.Metric{},
Message: "Check failed: CNAME dns.google on 8.8.8.8. Got [dns.google.], expected [wrong.google.]",
},
{
Pass: false,
Invalid: false,
Endpoint: "8.8.8.8:53",
Metrics: []pkg.Metric{},
Message: "Check failed: MX flanksource.com on 8.8.8.8. Got [alt1.aspmx.l.google.com. 5 alt2.aspmx.l.google.com. 5 aspmx.l.google.com. 1 aspmx2.googlemail.com. 10 aspmx3.googlemail.com. 10], expected [alt1.aspmx.l.google.com. 5 alt2.aspmx.l.google.com. 5 aspmx.l.google.com. 1]",
},
{
Pass: false,
Invalid: false,
Endpoint: "8.8.8.8:53",
Metrics: []pkg.Metric{},
Message: "Check failed: TXT flanksource.com on 8.8.8.8. Records count is less then minrecords",
},
{
Pass: false,
Invalid: false,
Endpoint: "8.8.8.8:53",
Metrics: []pkg.Metric{},
Message: "Check failed: NS flanksource.com on 8.8.8.8. Got [ns-1450.awsdns-53.org. ns-1896.awsdns-45.co.uk. ns-908.awsdns-49.net. ns-91.awsdns-11.com.], expected [ns-91.awsdns-11.com.]",
},
},
},
{
name: "dns_pass",
args: args{
pkg.ParseConfig("../fixtures/dns_pass.yaml"),
},
want: []pkg.CheckResult{
{
Pass: true,
Invalid: false,
Endpoint: "8.8.8.8:53",
Metrics: []pkg.Metric{},
Message: "Successful check on 8.8.8.8. Got [34.65.228.161]",
},
{
Pass: true,
Invalid: false,
Endpoint: "8.8.8.8:53",
Metrics: []pkg.Metric{},
Message: "Successful check on 8.8.8.8. Got [dns.google.]",
},
{
Pass: true,
Invalid: false,
Endpoint: "8.8.8.8:53",
Metrics: []pkg.Metric{},
Message: "Successful check on 8.8.8.8. Got [dns.google.]",
},
{
Pass: true,
Invalid: false,
Endpoint: "8.8.8.8:53",
Metrics: []pkg.Metric{},
Message: "Successful check on 8.8.8.8. Got [alt1.aspmx.l.google.com. 5 alt2.aspmx.l.google.com. 5 aspmx.l.google.com. 1 aspmx2.googlemail.com. 10 aspmx3.googlemail.com. 10]",
},
{
Pass: true,
Invalid: false,
Endpoint: "8.8.8.8:53",
Metrics: []pkg.Metric{},
Message: "Successful check on 8.8.8.8. Got [google-site-verification=IIE1aJuvqseLUKSXSIhu2O2lgdU_d8csfJjjIQVc-q0]",
},
{
Pass: true,
Invalid: false,
Endpoint: "8.8.8.8:53",
Metrics: []pkg.Metric{},
Message: "Successful check on 8.8.8.8. Got [ns-1450.awsdns-53.org. ns-1896.awsdns-45.co.uk. ns-908.awsdns-49.net. ns-91.awsdns-11.com.]",
},
},
},
}
runTests(t, tests)
}
// Test the connectivity with a mock DB
func TestPostgresCheckWithDbMock(t *testing.T) {
// create a mock db
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("an error '%s' was not expected when opening a stub database connection", err)
}
defer db.Close()
// This is the result we expect
rows := sqlmock.NewRows([]string{"column"}).
AddRow(1)
// declare our expectation
mock.ExpectQuery("^SELECT 1$").WillReturnRows(rows)
config := pkg.ParseConfig("../fixtures/postgres_succeed.yaml")
results := cmd.RunChecks(config)
foundResults := make([]*pkg.CheckResult, 0)
for result := range results {
foundResults = append(foundResults, result)
}
expectationErr := mock.ExpectationsWereMet()
if expectationErr != nil {
t.Errorf("Test %s failed. Expected queries not made: %v", "postgres_succeed", expectationErr)
}
for _, result := range foundResults {
if result.Invalid {
t.Errorf("Test %s failed. Expected valid result, but found %v", "postgres_succeed", result.Invalid)
}
if !result.Pass {
t.Errorf("Test %s failed. Expected PASS result, but found %v", "postgres_succeed", result.Pass)
}
}
}
func runTests(t *testing.T, tests []test) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
checkResults := cmd.RunChecks(tt.args.config)
i := 0
foundResults := make([]*pkg.CheckResult, 0)
for res := range checkResults {
// check if this result is extra
if i > len(tt.want)-1 {
t.Errorf("Test %s failed. Found unexpected extra result is %v", tt.name, res)
} else {
/* Not checking durations we don't want equality*/
if res.Invalid != tt.want[i].Invalid ||
res.Pass != tt.want[i].Pass ||
(tt.want[i].Endpoint != "" && res.Endpoint != tt.want[i].Endpoint) ||
(tt.want[i].Message != "" && res.Message != tt.want[i].Message) {
t.Errorf("Test %s failed. Expected result is %v, but found %v", tt.name, tt.want, res)
}
}
foundResults = append(foundResults, res)
i++
}
// check if we have more expected results than were found
if len(tt.want) > len(foundResults) {
t.Errorf("Test %s failed. Expected %d results, but found %d ", tt.name, len(tt.want), len(foundResults))
for i := len(foundResults); i <= len(tt.want)-1; i++ {
t.Errorf("Did not find %s %v", tt.name, tt.want[i])
}
}
})
}
}
|
[
4
] |
package flags
import (
"errors"
"flag"
"os"
"reflect"
"strings"
)
// cmds is used for tracking the 'commands' defined in the user provided struct
//
// TODO: avoid package level scoped variables by injecting as a dependency.
//
var cmds = make(map[string]bool)
var (
ErrNoArgs = errors.New("no flags or commands provided")
ErrWrongType = errors.New("expected a pointer to a struct for the schema")
)
func Parse(s interface{}) error {
args := os.Args[1:]
if len(args) == 0 {
return ErrNoArgs
}
// ValueOf() returns the concrete struct value (e.g. &{...})
// Indirect() returns the value that is pointed to (e.g. the actual struct)
//
v := reflect.Indirect(reflect.ValueOf(s))
// we acquire the type of the value (e.g. main.Schema)
//
// NOTE: we could have done this like so reflect.TypeOf(s).Elem() but I find
// calling Type() on the actual value looks a bit cleaner :shrugs:
//
st := v.Type()
// we code defensively and ensure a struct was provided, otherwise we'll have
// to raise an error to avoid panics later on in the code where we're
// presuming a struct was given.
//
if v.Kind() != reflect.Struct {
return ErrWrongType
}
// TODO: redesign the IterFields function.
//
// it works but it's fugly as hell.
// having to have a control var like `recurse` is nasty.
//
recurse := false
// iterate over the top level fields of the user provided struct,
// and create the required flags.
//
IterFields(recurse, st, v, func(field reflect.Value, sf reflect.StructField, cmd ...string) {
switch field.Kind() {
case reflect.Bool:
var v bool
flag.BoolVar(&v, strings.ToLower(sf.Name), false, sf.Tag.Get("usage"))
flag.BoolVar(&v, sf.Tag.Get("short"), false, sf.Tag.Get("usage")+" (shorthand)")
case reflect.Int:
var v int
flag.IntVar(&v, strings.ToLower(sf.Name), 0, sf.Tag.Get("usage"))
flag.IntVar(&v, sf.Tag.Get("short"), 0, sf.Tag.Get("usage")+" (shorthand)")
case reflect.String:
var v string
flag.StringVar(&v, strings.ToLower(sf.Name), "", sf.Tag.Get("usage"))
flag.StringVar(&v, sf.Tag.Get("short"), "", sf.Tag.Get("usage")+" (shorthand)")
}
})
flag.Parse()
// iterate over the top level fields of the user provided struct,
// and populate the fields with the parsed flag values.
//
IterFields(recurse, st, v, func(field reflect.Value, sf reflect.StructField, cmd ...string) {
flag.Visit(func(f *flag.Flag) {
// annoyingly you can't get to the flag's concrete value, so we have to
// first type assert it to a flag.Getter which then gives us an interface
// (e.g. Get()) for accessing the internal value which we finally can
// type assert into the correct value type (and thus we can assign that
// to our struct field).
//
getter, ok := f.Value.(flag.Getter)
if ok {
if f.Name == strings.ToLower(sf.Name) || f.Name == sf.Tag.Get("short") {
switch field.Kind() {
case reflect.Bool:
if b, ok := getter.Get().(bool); ok {
field.Set(reflect.ValueOf(b))
}
case reflect.Int:
if i, ok := getter.Get().(int); ok {
field.Set(reflect.ValueOf(i))
}
case reflect.String:
if s, ok := getter.Get().(string); ok {
field.Set(reflect.ValueOf(s))
}
}
}
}
})
})
cmd := IdentifyCommand(cmds, args)
cmdFlags := CommandFlags(cmd, flag.Args())
cfs := CommandFlagSet(cmd, cmdFlags, st, v)
err := cfs.Parse(cmdFlags)
if err != nil {
return err
}
recurse = true
// iterate over the command fields of the user provided struct,
// and populate the fields with the parsed flagset values.
//
IterFields(recurse, st, v, func(field reflect.Value, sf reflect.StructField, cmd ...string) {
cfs.Visit(func(f *flag.Flag) {
// annoyingly you can't get to the flag's concrete value, so we have to
// first type assert it to a flag.Getter which then gives us an interface
// (e.g. Get()) for accessing the internal value which we finally can
// type assert into the correct value type (and thus we can assign that
// to our struct field).
//
getter, ok := f.Value.(flag.Getter)
if ok {
if f.Name == strings.ToLower(sf.Name) || f.Name == sf.Tag.Get("short") {
switch field.Kind() {
case reflect.Bool:
if b, ok := getter.Get().(bool); ok {
field.Set(reflect.ValueOf(b))
}
case reflect.Int:
if i, ok := getter.Get().(int); ok {
field.Set(reflect.ValueOf(i))
}
case reflect.String:
if s, ok := getter.Get().(string); ok {
field.Set(reflect.ValueOf(s))
}
}
}
}
})
})
return nil
}
// IterFields iterates over all fields of a struct, including nested structs,
// and processes their individual fields by passing them into a callback.
//
func IterFields(recurse bool, st reflect.Type, v reflect.Value, callback func(f reflect.Value, sf reflect.StructField, cmd ...string)) {
// NOTE: if we're passed something that isn't a struct, then the program will
// panic when we call NumField() as this is the reality of using reflection.
//
// we are relying on the consumer of this package to follow the instructions
// given and to provide us with what we are expecting.
//
// so if we're not careful, then we violate the language type safety.
// but we protect against this in the calling function by checking for a
// struct before calling IterFields.
//
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
// we call Field() on the struct type so we can get a StructField type,
// which we have to do in order to access the struct 'tags' on the field.
//
// it also gives us access to the field name so we can create the various
// flags necessary (as well as determine the command that a user runs).
//
sf := st.Field(i)
if field.Kind() == reflect.Struct {
// when we see a struct we expect by convention for this to be a
// 'command' that will have its own set of flags.
//
cmd := strings.ToLower(sf.Name)
if _, ok := cmds[cmd]; !ok {
cmds[cmd] = true
}
// we use CanInterface() because otherise if we were to call Interface()
// on a field that was unexported, then the program would panic.
//
if recurse && field.CanInterface() {
// we use Interface() to get the nested struct value as an interface{}.
// this is done because if we called TypeOf on the field variable, then
// we would end up with reflect.Value when really we need the nested
// struct's concrete type definition (e.g. struct {...}).
//
st := reflect.TypeOf(field.Interface())
for i := 0; i < field.NumField(); i++ {
// again, we get the field from the nested struct, as well as acquire
// its StructField type for purposes already explained above.
//
field := field.Field(i)
st := st.Field(i)
// because our callback function is going to attempt to set values on
// these struct fields, we need to be sure they are 'settable' first.
//
if field.CanSet() {
callback(field, st, cmd)
}
}
}
} else {
// we check if recurse is false because we don't want our nested commands
// to accidentally add the top-level fields into our command flagset and
// thus -h/--help would show the top-level fields in the help output.
//
// also, because our callback function is going to attempt to set values
// on these struct fields, we need to be sure they are 'settable' first.
//
//
if !recurse && field.CanSet() {
callback(field, sf)
}
}
}
}
// IdentifyCommand parses the arguments provided looking for a 'command'.
//
// this implementation presumes that the format of the arguments will be...
//
// <program> <flag(s)> <command> <flag(s) for command>
//
func IdentifyCommand(cmds map[string]bool, args []string) string {
commandIndex := 0
commandSeen := false
for _, arg := range args {
if commandSeen {
break
}
if strings.HasPrefix(arg, "-") == true {
commandIndex++
continue
}
for cmd := range cmds {
if arg == cmd {
commandSeen = true
break
}
}
if !commandSeen {
commandIndex++
}
}
if !commandSeen {
return ""
}
return args[commandIndex]
}
// CommandFlags parses the flags that are provided after the 'command'.
//
func CommandFlags(cmd string, args []string) []string {
for i, v := range args {
if v == cmd {
return args[i+1:]
}
}
return []string{}
}
// CommandFlagSet defines flags for the command as a FlagSet.
//
func CommandFlagSet(cmd string, cmdFlags []string, st reflect.Type, v reflect.Value) *flag.FlagSet {
cfs := flag.NewFlagSet(cmd, flag.ExitOnError)
recurse := true
// iterate over the nested fields of the user provided struct,
// and create the required flagset flags.
//
IterFields(recurse, st, v, func(field reflect.Value, sf reflect.StructField, currentCmd ...string) {
// we're overloading the use of variadic functions to allow some iterations
// over our struct to pass a cmd, and others that aren't a command to not.
//
// this means when we explicitly access the first index, there isn't ever
// any expectation for there to be more than one command passed through.
//
if currentCmd[0] == cmd {
switch field.Kind() {
case reflect.Bool:
var v bool
cfs.BoolVar(&v, strings.ToLower(sf.Name), false, sf.Tag.Get("usage"))
cfs.BoolVar(&v, sf.Tag.Get("short"), false, sf.Tag.Get("usage")+" (shorthand)")
case reflect.Int:
var v int
cfs.IntVar(&v, strings.ToLower(sf.Name), 0, sf.Tag.Get("usage"))
cfs.IntVar(&v, sf.Tag.Get("short"), 0, sf.Tag.Get("usage")+" (shorthand)")
case reflect.String:
var v string
cfs.StringVar(&v, strings.ToLower(sf.Name), "", sf.Tag.Get("usage"))
cfs.StringVar(&v, sf.Tag.Get("short"), "", sf.Tag.Get("usage")+" (shorthand)")
}
}
})
return cfs
}
|
[
4
] |
package utils
import (
"encoding/json"
"math/rand"
"net/http"
"os"
"strconv"
"time"
)
// Message - util
func Message(status bool, message string) map[string]interface{} {
return map[string]interface{}{"status": status, "message": message}
}
// Respond - util
func Respond(w http.ResponseWriter, data map[string]interface{}) {
w.Header().Add("Content-Type", "application/json")
json.NewEncoder(w).Encode(data)
}
// MultipleRespond - util
func MultipleRespond(w http.ResponseWriter, data []map[string]interface{}) {
w.Header().Add("Content-Type", "application/json")
json.NewEncoder(w).Encode(data)
}
// Returns an int >= min, < max
func randomInt(min, max int) int {
return min + rand.Intn(max-min)
}
// Random avatar
func RandomAvatarUrl() string {
rand.Seed(time.Now().UnixNano())
number := randomInt(1, 7)
url := os.Getenv("api_icon") + "icon_" + strconv.Itoa(number) + ".png"
return url
}
|
[
1
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.