code
stringlengths 67
15.9k
| labels
listlengths 1
4
|
---|---|
package down
import (
"github.com/tiptok/gonat/model"
)
//DOWN_EXG_MSG_APPLY_FOR_MONITOR_END_ACK 取消交换指定车辆定位信息应答 9208
type DOWN_EXG_MSG_APPLY_FOR_MONITOR_END_ACK struct {
model.EntityBase
Vehicle_No string //车牌
Vehicle_Color byte //车牌颜色
/*
/// 取消交换指定车辆定位信息结果
/// 0:取消申请成功
/// 1:之前没有对应申请信息
/// 2:其他
*/
Result byte
}
func (e *DOWN_EXG_MSG_APPLY_FOR_MONITOR_END_ACK) GetMsgId() interface{} {
return model.J从链路动态信息交换
}
func (e *DOWN_EXG_MSG_APPLY_FOR_MONITOR_END_ACK) GetEntityBase() *model.EntityBase {
return e.EntityBase.GetEntityBase()
}
func (e *DOWN_EXG_MSG_APPLY_FOR_MONITOR_END_ACK) GetDBSql() string {
return ""
}
|
[
3
] |
package compiler
import (
"strconv"
"github.com/Armienn/MiniatureEngine/machine"
)
type constructor struct {
alias map[string]int
operations []machine.Operation
err error
}
func newConstructor() *constructor {
constructor := new(constructor)
constructor.alias = make(map[string]int)
constructor.operations = make([]machine.Operation, 0, 256)
return constructor
}
//ConstructProgram constructs a list of operations based on a list of assembler commands
func ConstructProgram(lines []Line) ([]machine.Operation, error) {
constructor := newConstructor()
for _, line := range lines {
constructor.processLine(line)
}
return constructor.operations, constructor.err
}
func (constructor *constructor) processLine(line Line) {
if constructor.err != nil {
return
}
switch line.CommandType {
case Alias:
constructor.processAlias(line)
case LineTag:
constructor.processLineTag(line)
case Command:
constructor.processCommand(line)
}
}
func (constructor *constructor) processAlias(line Line) {
value, err := strconv.Atoi(line.Op2)
if err != nil {
constructor.err = err
return
}
constructor.alias[line.Op1] = value
}
func (constructor *constructor) processLineTag(line Line) {
value := len(constructor.operations)
constructor.alias[line.Op1] = value
}
func (constructor *constructor) processCommand(line Line) {
var operation machine.Operation
operation.Type = line.Command
operation.FirstOperand, operation.SecondOperand = constructor.getValues(line)
constructor.operations = append(constructor.operations, operation)
}
func (constructor *constructor) getValues(line Line) (byte, byte) {
var err error
var op1 int
var op2 int
if line.Op1 != "" {
op1, err = constructor.getValue(line.Op1)
}
if line.Op2 != "" {
op2, err = constructor.getValue(line.Op2)
}
constructor.err = err
if op1 > 255 {
return byte(op1 / 256), byte(op1 % 256)
}
return byte(op1 % 256), byte(op2 % 256)
}
func (constructor *constructor) getValue(source string) (int, error) {
var err error
value, ok := constructor.alias[source]
if !ok {
value, err = strconv.Atoi(source)
}
return value, err
}
|
[
3
] |
package repositories
import "github.com/NicolasDeveloper/tracker-microservices/internal/trip/models"
//ITripRepository repository
type ITripRepository interface {
Save(trip models.Trip) error
GetOpenTrip(userID string) (models.Trip, error)
GetTripsByUser(userID string) ([]models.Trip, error)
CloseTrip(trip models.Trip, tracks []models.Track) error
UpdateTracks(trip models.Trip, tracks []models.Track) error
}
|
[
3
] |
package main
import (
"fmt"
"github.com/boj/redistore"
"github.com/gorilla/sessions"
"github.com/labstack/echo-contrib/session"
"net/http"
"github.com/flosch/pongo2"
"github.com/jmoiron/sqlx"
"github.com/labstack/echo"
"golang.org/x/crypto/bcrypt"
"log"
)
var (
db DB
templateSignup = pongo2.Must(pongo2.FromFile("template/signup.html"))
templatelogin = pongo2.Must(pongo2.FromFile("template/login.html"))
templateIndex = pongo2.Must(pongo2.FromFile("template/index.html"))
)
type DB struct {
*sqlx.DB
}
// Handler
func handlerIndex(c echo.Context) error {
sess, err := session.Get("session", c)
if err != nil {
log.Printf("session error:%v\n", err)
}
body, err := templateIndex.Execute(
pongo2.Context{
"userID": sess.Values["userID"],
},
)
if err != nil {
log.Printf("pongo2 error:%v\n", err)
return c.String(http.StatusInternalServerError, err.Error())
}
return c.HTML(http.StatusOK, body)
}
func handlerGetSingUp(c echo.Context) error {
body, err := templateSignup.Execute(
pongo2.Context{},
)
if err != nil {
c.String(http.StatusInternalServerError, err.Error())
}
return c.HTML(http.StatusOK, body)
}
func handlerPostSignUp(c echo.Context) error {
userID := c.FormValue("userID")
userName := c.FormValue("userName")
password, err := bcrypt.GenerateFromPassword([]byte(c.FormValue("password")), bcrypt.DefaultCost)
if err != nil {
log.Printf("password hash error:%v\n", err)
}
IDNumber, err := db.InsertUser(userID, userName, string(password))
if err != nil {
log.Printf("insert error:%v\n", err)
}
fmt.Printf("insert number %d\n", IDNumber)
db.FetchUsers()
return c.String(http.StatusOK, "Hello, World!")
}
func handlerGetLogin(c echo.Context) error {
body, err := templatelogin.Execute(
pongo2.Context{},
)
if err != nil {
c.String(http.StatusInternalServerError, err.Error())
}
return c.HTML(http.StatusOK, body)
}
func handlerPostLogin(c echo.Context) error {
userID := c.FormValue("userID")
password := c.FormValue("password")
authResult := authentication(userID, password)
if authResult {
sess, err := session.Get("session", c)
if err != nil {
log.Printf("session get error:%v\n", err)
}
sess.Options = &sessions.Options{
Path: "/",
MaxAge: 86400 * 7,
HttpOnly: true,
}
sess.Values["userID"] = userID
if err := sess.Save(c.Request(), c.Response()); err != nil {
log.Printf("session save error:%v\n", err)
}
return c.Redirect(http.StatusFound, "/")
} else {
body, err := templatelogin.Execute(
pongo2.Context{
"flash": "ログイン失敗",
"userID": userID,
},
)
if err != nil {
c.String(http.StatusInternalServerError, err.Error())
}
return c.HTML(http.StatusUnauthorized, body)
}
}
func handlerLogout(c echo.Context) error {
sess, err := session.Get("session", c)
if err != nil {
log.Printf("session error:%v\n", err)
}
sess.Options = &sessions.Options{
MaxAge: -1,
Path: "/",
}
sess.Save(c.Request(), c.Response())
return c.String(http.StatusOK, "Hello, World!")
}
func authentication(userID, password string) bool {
user, err := db.FetchUserByID(userID)
if err != nil {
log.Printf("userInfo fetch error:%v\n", err)
}
if err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password)); err == nil {
return true
} else {
log.Printf("authenticattion error:%v\n", err)
return false
}
}
func main() {
//DB接続
sqlxdb, err := sqlx.Open("mysql", fmt.Sprintf(
"%s:%s@tcp(%s:%s)/%s",
"local_user",
"local_password",
"localhost",
"3306",
"site",
))
if err != nil {
log.Fatalf("DB Connection Error: %v", err)
return
}
db = DB{sqlxdb}
store, err := redistore.NewRediStore(10, "tcp", ":6379", "", []byte("secret-key"))
if err != nil {
panic(err)
}
defer store.Close()
// Echo instance
e := echo.New()
e.Use(session.Middleware(store))
// Routes
e.GET("/", handlerIndex)
e.GET("/signup", handlerGetSingUp)
e.POST("/signup", handlerPostSignUp)
e.GET("/login", handlerGetLogin)
e.POST("/login", handlerPostLogin)
e.DELETE("/logout", handlerLogout)
// Start server
e.Logger.Fatal(e.Start(":1323"))
}
|
[
3
] |
package main
import (
"context"
"net"
"sync"
plogger "github.com/heytribe/go-plogger"
"github.com/heytribe/live-webrtcsignaling/gst"
"github.com/heytribe/live-webrtcsignaling/srtp"
)
// WebRTC pipeline
// rtpbin name=rtpbin rtp-profile=4 \
// udpsrc port=10001 caps="application/x-rtp,media=(string)video,payload=(int)96,clock-rate=(int)90000,encoding-name=(string)VP8" ! rtpbin.recv_rtp_sink_0 \
// rtpbin. ! rtpvp8depay ! vp8dec ! x264enc ! queue ! mpegtsmux name=tsmux ! queue ! filesink location=prout.ts \
// udpsrc port=10002 ! rtpbin.recv_rtcp_sink_0 \
// udpsrc port=10003 caps="application/x-rtp,media=(string)audio,payload=(int)111,clock-rate=(int)48000,encoding-name=(string)OPUS" ! rtpbin.recv_rtp_sink_1 \
// rtpbin. ! rtpopusdepay ! opusdec ! voaacenc ! queue ! tsmux. \
// udpsrc port=10004 ! rtpbin.recv_rtcp_sink_1
type GstSession struct {
id int
ctx context.Context
elements *ProtectedMap
callbackCtx *gst.CallbackCtx
callbackCtxEncoder *gst.CallbackCtx
AudioBufferList chan []byte
AudioDataStartFeeding chan bool
AudioDataStarted bool
VideoBufferList chan []byte
VideoDataStartFeeding chan bool
VideoDataStarted bool
AudioRtcpBufferList chan []byte
VideoRtcpBufferList chan []byte
EncodersMutex sync.RWMutex
Encoders []*GstSession
decoder *GstSession
audioReceived bool
videoReceived bool
WebrtcUpCh chan bool
WebrtcUpSignalSent bool
audioBitrate int
videoBitrate int
maxVideoBitrate int
limitVideoBitrate int
audio chan *srtp.PacketRTP
video chan *srtp.PacketRTP
RawAudioSampleList chan *gst.GstSample
RawVideoSampleList chan *gst.GstSample
/*VideoRtcpBufferList chan []byte
AudioRtcpBufferList chan []byte*/
CodecOption CodecOptions
HardwareCodecUsed bool
lastJitters []uint32
}
var gstSessionId int32 = 0
func NewGstSession(ctx context.Context, audio chan *srtp.PacketRTP, video chan *srtp.PacketRTP, c *connectionUdp, rAddr *net.UDPAddr, vSsrcId uint32, aSsrcId uint32, codecOption CodecOptions, maxVideoBitrate int) (s *GstSession) {
log := plogger.FromContextSafe(ctx)
s = new(GstSession)
s.elements = NewProtectedMap()
s.AudioDataStartFeeding = make(chan bool)
s.VideoDataStartFeeding = make(chan bool)
s.AudioBufferList = make(chan []byte, 1000)
s.VideoBufferList = make(chan []byte, 1000)
s.AudioRtcpBufferList = make(chan []byte, 1000)
s.VideoRtcpBufferList = make(chan []byte, 1000)
s.AudioDataStarted = false
s.VideoDataStarted = false
s.audioBitrate = config.Bitrates.Audio.Max
log.Warnf("NEWGSTSESSION WITH maxVideoBitrate = %d", maxVideoBitrate)
s.videoBitrate = maxVideoBitrate / 2
s.limitVideoBitrate = maxVideoBitrate
s.maxVideoBitrate = maxVideoBitrate
s.WebrtcUpCh = make(chan bool)
s.WebrtcUpSignalSent = false
s.audioReceived = false
s.videoReceived = false
s.audio = audio
s.video = video
s.RawAudioSampleList = make(chan *gst.GstSample, 10000)
s.RawVideoSampleList = make(chan *gst.GstSample, 10000)
// add prefix to logger
s.ctx = plogger.NewContextAddPrefix(ctx, "GST")
s.CodecOption = codecOption
return
}
type CodecOptions int
const (
CodecNone CodecOptions = iota
CodecVP8
CodecH264
)
func (s *GstSession) GetAudioBitrate() int {
return s.audioBitrate
}
func (s *GstSession) GetVideoBitrate() int {
return s.videoBitrate
}
func (s *GstSession) AdjustEncodersBitrate(ctx context.Context, bitrate uint32) {
log := plogger.FromContextSafe(s.ctx)
s.EncodersMutex.RLock()
defer s.EncodersMutex.RUnlock()
// search index
for _, sEnc := range s.Encoders {
if uint32(sEnc.videoBitrate) > bitrate {
log.Warnf("DOWN ENCODING BITRATE %d -> %d because DECODING bitrate is %d\n", sEnc.videoBitrate, bitrate, bitrate)
sEnc.SetEncodingVideoBitrate(int(bitrate))
sEnc.limitVideoBitrate = int(bitrate)
} else {
if uint32(sEnc.videoBitrate) < bitrate && sEnc.limitVideoBitrate < int(bitrate) {
log.Warnf("UP ENCODING BITRATE %d -> %d because DECODING bitrate is now higher %d", sEnc.videoBitrate, bitrate, bitrate)
sEnc.limitVideoBitrate = int(bitrate)
//sEnc.SetEncodingVideoBitrate(int(bitrate))
}
}
}
}
|
[
3,
4,
6
] |
package test
import (
"fmt"
"math/big"
"testing"
)
func TestBigintToHex(t *testing.T) {
// 10000000000000000000000000000
tmp := big.NewInt(0)
Reward := new(big.Int).Mul(big.NewInt(10000000000), big.NewInt(1e+18))
fmt.Printf("0x%x\n",Reward)
//str := fmt.Sprintf("%x",Reward)
//var hexReward = "200000000000000000000000000000000000000000000000000000000000000"
//i := new(big.Int)
tmp.SetString("5fc51700", 16)
fmt.Println(tmp)
}
|
[
3
] |
package main
import (
"encoding/csv"
"fmt"
"image"
"image/color"
"math"
"os"
"path/filepath"
"strconv"
"time"
"golang.org/x/image/bmp"
)
type rgb struct {
r uint8
g uint8
b uint8
}
const (
N_MUESTRAS = 100
)
var (
pcVersion string
t string
imgVersion string
inputImgPath = filepath.FromSlash("./img/%v.bmp")
outputImgPath = filepath.FromSlash("./img/inverted_%v.bmp")
outputFileName = "pc%v-go-%v-version%v-tratamiento%s.txt"
csvFile = "apilados.csv"
metricasFile = "metricas.csv"
confianza = 1.96
errMax = 0.25
)
func invert(t int, in, out string) error {
dat, err := os.Open(in)
if err != nil {
return err
}
defer func() {
errClose := dat.Close()
if err == nil {
err = errClose
}
}()
img, err := bmp.Decode(dat)
if err != nil {
return err
}
width := img.Bounds().Dx()
height := img.Bounds().Dy()
rgbArr0 := makeArray(height, width, img)
rgbArr := makeArray(height, width, img)
return writeImg(t, height, width, rgbArr0, rgbArr)
}
func makeArray(height, width int, img image.Image) [][]rgb {
ImRGB0 := [][]rgb{}
for r := 0; r < height; r++ {
row := []rgb{}
for c := 0; c < width; c++ {
rx, gx, bx, ax := img.At(c, r).RGBA()
r, g, b, _ := rx>>8, gx>>8, bx>>8, ax>>8
temp := rgb{
r: uint8(r),
g: uint8(g),
b: uint8(b),
}
row = append(row, temp)
}
ImRGB0 = append(ImRGB0, row)
}
return ImRGB0
}
func writeImg(version, height, width int, rgbArr0, rgbArr [][]rgb) error {
f, err := os.Create(filepath.Join("data", fmt.Sprintf(outputFileName, pcVersion, imgVersion, version, t)))
if err != nil {
return err
}
fCsv, err := os.OpenFile(csvFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
return err
}
fMetricas, err := os.OpenFile(metricasFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
return err
}
var datos []float64
for n := 0; n < N_MUESTRAS; n++ {
start := time.Now()
switch version {
case 1:
for r := 0; r < height; r++ {
for c := 0; c < width; c++ {
rgbArr[r][c].r = 255 - rgbArr0[r][c].r
rgbArr[r][c].g = 255 - rgbArr0[r][c].g
rgbArr[r][c].b = 255 - rgbArr0[r][c].b
}
}
case 2:
for r := 0; r < height; r++ {
for c := 0; c < width; c++ {
rgbArr[r][c].r = 255 - rgbArr0[r][c].r
}
}
for r := 0; r < height; r++ {
for c := 0; c < width; c++ {
rgbArr[r][c].g = 255 - rgbArr0[r][c].g
}
}
for r := 0; r < height; r++ {
for c := 0; c < width; c++ {
rgbArr[r][c].b = 255 - rgbArr0[r][c].b
}
}
case 3:
for c := 0; c < width; c++ {
for r := 0; r < height; r++ {
rgbArr[r][c].r = 255 - rgbArr0[r][c].r
rgbArr[r][c].g = 255 - rgbArr0[r][c].g
rgbArr[r][c].b = 255 - rgbArr0[r][c].b
}
}
case 4:
for r := 0; r < height; r++ {
for c := 0; c < width; c++ {
rgbArr[r][c].r = 255 - rgbArr0[r][c].r
}
}
for r := 0; r < height; r++ {
for c := 0; c < width; c++ {
rgbArr[r][c].g = 255 - rgbArr0[r][c].g
rgbArr[r][c].b = 255 - rgbArr0[r][c].b
}
}
case 5:
for r := 0; r < height; r += 2 {
for c := 0; c < width; c += 2 {
rgbArr[r][c].r = 255 - rgbArr0[r][c].r
rgbArr[r][c].g = 255 - rgbArr0[r][c].g
rgbArr[r][c].b = 255 - rgbArr0[r][c].b
rgbArr[r][c+1].g = 255 - rgbArr0[r][c+1].g
rgbArr[r][c+1].r = 255 - rgbArr0[r][c+1].r
rgbArr[r][c+1].b = 255 - rgbArr0[r][c+1].b
rgbArr[r+1][c].r = 255 - rgbArr0[r+1][c].r
rgbArr[r+1][c].g = 255 - rgbArr0[r+1][c].g
rgbArr[r+1][c].b = 255 - rgbArr0[r+1][c].b
rgbArr[r+1][c+1].r = 255 - rgbArr0[r+1][c+1].r
rgbArr[r+1][c+1].g = 255 - rgbArr0[r+1][c+1].g
rgbArr[r+1][c+1].b = 255 - rgbArr0[r+1][c+1].b
}
}
}
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
normalized := float64(elapsed) / float64(width*height)
datos = append(datos, normalized)
row := []string{pcVersion, imgVersion, strconv.FormatInt(int64(version), 10), "go", t, strconv.FormatFloat(normalized, 'f', 3, 64)}
_, err = f.WriteString(strconv.FormatFloat(normalized, 'f', 3, 64) + "\n")
if err != nil {
return err
}
writerFullCSVFile := csv.NewWriter(fCsv)
writerFullCSVFile.Comma = ';'
err = writerFullCSVFile.Write(row)
if err != nil {
return err
}
writerFullCSVFile.Flush()
}
sum := 0.0
for _, dato := range datos {
sum += dato
}
media := sum / float64(len(datos))
varianza := 0.0
for _, dato := range datos {
varianza += (dato - media) * (dato - media)
}
varianza /= float64(len(datos) - 1)
desv := math.Sqrt(varianza)
tamMuestra := (confianza * desv / errMax) * (confianza * desv / errMax)
row := []string{pcVersion, imgVersion, strconv.FormatInt(int64(version), 10), "go", t, strconv.FormatFloat(media, 'f', 3, 64), strconv.FormatFloat(varianza, 'f', 3, 64), strconv.FormatFloat(desv, 'f', 3, 64), strconv.FormatFloat(tamMuestra, 'f', 3, 64)}
writerFullCSVFile := csv.NewWriter(fMetricas)
writerFullCSVFile.Comma = ';'
err = writerFullCSVFile.Write(row)
if err != nil {
return err
}
writerFullCSVFile.Flush()
err = f.Close()
if err != nil {
return err
}
err = fCsv.Close()
if err != nil {
return err
}
err = fMetricas.Close()
if err != nil {
return err
}
//Write new img
fImg, err := os.Create(outputImgPath)
if err != nil {
return err
}
defer fImg.Close()
upLeft := image.Point{0, 0}
upRight := image.Point{width, height}
img := image.NewRGBA(image.Rectangle{upLeft, upRight})
for r := 0; r < height; r++ {
for c := 0; c < width; c++ {
color := color.RGBA{rgbArr[r][c].r, rgbArr[r][c].g, rgbArr[r][c].b, 255}
img.Set(c, r, color)
}
}
return bmp.Encode(fImg, img)
}
func main() {
args := os.Args
pcVersion = args[1]
t = args[3]
imgVersion = args[4]
inputImgPath = fmt.Sprintf(inputImgPath, imgVersion)
outputImgPath = fmt.Sprintf(outputImgPath, imgVersion)
version, err := strconv.Atoi(args[2])
if err != nil {
panic(err)
}
err = invert(version, inputImgPath, outputImgPath)
if err != nil {
fmt.Println(err)
}
}
|
[
3
] |
package lightning
import (
"encoding/json"
"fmt"
"log"
"time"
"github.com/tidwall/gjson"
)
var InvoiceListeningTimeout = time.Minute * 150
var WaitSendPayTimeout = time.Hour * 24
var WaitPaymentMaxAttempts = 60
type Client struct {
Path string
PaymentHandler func(gjson.Result)
LastInvoiceIndex int
}
// ListenForInvoices starts a goroutine that will repeatedly call waitanyinvoice.
// Each payment received will be fed into the client.PaymentHandler function.
// You can change that function in the meantime.
// Or you can set it to nil if you want to stop listening for invoices.
func (ln *Client) ListenForInvoices() {
go func() {
for {
if ln.PaymentHandler == nil {
log.Print("won't listen for invoices: no PaymentHandler.")
return
}
res, err := ln.CallWithCustomTimeout(InvoiceListeningTimeout,
"waitanyinvoice", ln.LastInvoiceIndex)
if err != nil {
if _, ok := err.(ErrorTimeout); ok {
time.Sleep(time.Minute)
} else {
log.Printf("error waiting for invoice %d: %s", ln.LastInvoiceIndex, err.Error())
time.Sleep(5 * time.Second)
}
continue
}
index := res.Get("pay_index").Int()
ln.LastInvoiceIndex = int(index)
ln.PaymentHandler(res)
}
}()
}
// PayAndWaitUntilResolution implements its 'pay' logic, querying and retrying routes.
// It's like the default 'pay' plugin, but it blocks until a final success or failure is achieved.
// After it returns you can be sure a failed payment will not succeed anymore.
// Any value in params will be passed to 'getroute' or 'sendpay' or smart defaults will be used.
// This includes values from the default 'pay' plugin.
func (ln *Client) PayAndWaitUntilResolution(
bolt11 string,
params map[string]interface{},
) (success bool, payment gjson.Result, tries []Try, err error) {
decoded, err := ln.Call("decodepay", bolt11)
if err != nil {
return false, payment, tries, err
}
hash := decoded.Get("payment_hash").String()
fakePayment := gjson.Parse(`{"payment_hash": "` + hash + `"}`)
exclude := []string{}
payee := decoded.Get("payee").String()
delayFinalHop := decoded.Get("min_final_cltv_expiry").Int()
var msatoshi float64
if imsatoshi, ok := params["msatoshi"]; ok {
if converted, err := toFloat(imsatoshi); err == nil {
msatoshi = converted
}
} else {
msatoshi = decoded.Get("msatoshi").Float()
}
riskfactor, ok := params["riskfactor"]
if !ok {
riskfactor = 10
}
label, ok := params["label"]
if !ok {
label = ""
}
maxfeepercent := 0.5
if imaxfeepercent, ok := params["maxfeepercent"]; ok {
if converted, err := toFloat(imaxfeepercent); err == nil {
maxfeepercent = converted
}
}
exemptfee := 5000.0
if iexemptfee, ok := params["exemptfee"]; ok {
if converted, err := toFloat(iexemptfee); err == nil {
exemptfee = converted
}
}
routehints := decoded.Get("routes").Array()
if len(routehints) > 0 {
for _, rh := range routehints {
done, payment := tryPayment(ln, &tries, bolt11,
payee, msatoshi, hash, label, &exclude,
delayFinalHop, riskfactor, maxfeepercent, exemptfee, &rh)
if done {
return true, payment, tries, nil
}
}
} else {
done, payment := tryPayment(ln, &tries, bolt11,
payee, msatoshi, hash, label, &exclude,
delayFinalHop, riskfactor, maxfeepercent, exemptfee, nil)
if done {
return true, payment, tries, nil
}
}
return false, fakePayment, tries, nil
}
func tryPayment(
ln *Client,
tries *[]Try,
bolt11 string,
payee string,
msatoshi float64,
hash string,
label interface{},
exclude *[]string,
delayFinalHop int64,
riskfactor interface{},
maxfeepercent float64,
exemptfee float64,
hint *gjson.Result,
) (paid bool, payment gjson.Result) {
for try := 0; try < 30; try++ {
target := payee
if hint != nil {
target = hint.Get("0.pubkey").String()
}
res, err := ln.CallNamed("getroute",
"id", target,
"riskfactor", riskfactor,
"cltv", delayFinalHop,
"msatoshi", msatoshi,
"fuzzpercent", 0,
"exclude", *exclude,
)
if err != nil {
// no route or invalid parameters, call it a simple failure
return
}
if !res.Get("route").Exists() {
continue
}
route := res.Get("route")
// if we're using a route hint, increment the queried route with the missing parts
if hint != nil {
route = addHintToRoute(route, *hint, payee, delayFinalHop)
}
// inspect route, it shouldn't be too expensive
if route.Get("0.msatoshi").Float()/msatoshi > (1 + 1/maxfeepercent) {
// too expensive, but we'll still accept it if the payment is small
if msatoshi > exemptfee {
// otherwise try the next route
// we force that by excluding a channel
*exclude = append(*exclude, getWorstChannel(route))
continue
}
}
// ignore returned value here as we'll get it from waitsendpay below
_, err = ln.CallNamed("sendpay",
"route", route.Value(),
"payment_hash", hash,
"label", label,
"bolt11", bolt11,
)
if err != nil {
// the command may return an error and we don't care
if _, ok := err.(ErrorCommand); ok {
// we don't care because we'll see this in the next call
} else {
// otherwise it's a different odd error, stop
return
}
}
// this should wait indefinitely, but 24h is enough
payment, err = ln.CallWithCustomTimeout(WaitSendPayTimeout, "waitsendpay", hash)
if err != nil {
if cmderr, ok := err.(ErrorCommand); ok {
*tries = append(*tries, Try{route.Value(), &cmderr, false})
switch cmderr.Code {
case 200, 202:
// try again
continue
case 204:
// error in route, eliminate erring channel and try again
data, ok0 := cmderr.Data.(map[string]interface{})
ichannel, ok1 := data["erring_channel"]
channel, ok2 := ichannel.(string)
if !ok0 || !ok1 || !ok2 {
// should never happen
return
}
// if erring channel is in the route hint just stop altogether
if hint != nil {
for _, hhop := range hint.Array() {
if hhop.Get("short_channel_id").String() == channel {
return
}
}
}
// get erring channel a direction by inspecting the route
var direction int64
for _, hop := range route.Array() {
if hop.Get("channel").String() == channel {
direction = hop.Get("direction").Int()
goto gotdirection
}
}
// we should never get here
return
gotdirection:
*exclude = append(*exclude, fmt.Sprintf("%s/%d", channel, direction))
continue
}
}
// a different error, call it a complete failure
return
}
// payment suceeded
*tries = append(*tries, Try{route.Value(), nil, true})
return true, payment
}
// stop trying
return
}
func getWorstChannel(route gjson.Result) (worstChannel string) {
var worstFee int64 = 0
hops := route.Array()
if len(hops) == 1 {
return hops[0].Get("channel").String() + "/" + hops[0].Get("direction").String()
}
for i := 0; i+1 < len(hops); i++ {
hop := hops[i]
next := hops[i+1]
fee := hop.Get("msatoshi").Int() - next.Get("msatoshi").Int()
if fee > worstFee {
worstFee = fee
worstChannel = hop.Get("channel").String() + "/" + hop.Get("direction").String()
}
}
return
}
func addHintToRoute(
route gjson.Result, hint gjson.Result,
finalPeer string, finalHopDelay int64,
) gjson.Result {
var extrafees int64 = 0 // these extra fees will be added to the public part
var extradelay int64 = 0 // this extra delay will be added to the public part
// we know exactly the length of our new route
npublichops := route.Get("#").Int()
nhinthops := hint.Get("#").Int()
newroute := make([]map[string]interface{}, npublichops+nhinthops)
// so we can start adding the last hops (from the last and backwards)
r := len(newroute) - 1
lastPublicHop := route.Array()[npublichops-1]
hhops := hint.Array()
for h := len(hhops) - 1; h >= 0; h-- {
hhop := hhops[h]
nextdelay, delaydelta, nextmsat, fees, nextpeer := grabParameters(
hint,
newroute,
lastPublicHop,
finalPeer,
finalHopDelay,
r,
h,
)
// delay for this hop is anything in the next hop plus the delta
delay := nextdelay + delaydelta
// calculate this channel direction
var direction int
if hhop.Get("pubkey").String() < nextpeer {
direction = 1
} else {
direction = 0
}
newroute[r] = map[string]interface{}{
"id": nextpeer,
"channel": hhop.Get("short_channel_id").Value(),
"direction": direction,
"msatoshi": int64(nextmsat) + fees,
"delay": delay,
}
// bump extra stuff for the public part
extrafees += fees
extradelay += delaydelta
r--
}
// since these parameters are always based on the 'next' part of the route, we need
// to run a fake thing here with the hint channel at index -1 so we'll get the parameters
// for actually index 0 -- this is not to add them to the actual route, but only to
// grab the 'extra' fees/delay we need to apply to the public part of the route
_, delaydelta, _, fees, _ := grabParameters(
hint,
newroute,
lastPublicHop,
finalPeer,
finalHopDelay,
r,
-1,
)
extrafees += fees
extradelay += delaydelta
// ~
// now we start from the beggining with the public part of the route
r = 0
route.ForEach(func(_, hop gjson.Result) bool {
newroute[r] = map[string]interface{}{
"id": hop.Get("id").Value(),
"channel": hop.Get("channel").Value(),
"direction": hop.Get("direction").Value(),
"delay": hop.Get("delay").Int() + extradelay,
"msatoshi": hop.Get("msatoshi").Int() + extrafees,
}
r++
return true
})
// turn it into a gjson.Result
newroutejsonstr, _ := json.Marshal(newroute)
newroutegjson := gjson.ParseBytes(newroutejsonstr)
return newroutegjson
}
func grabParameters(
fullHint gjson.Result,
fullNewRoute []map[string]interface{},
lastPublicHop gjson.Result,
finalPeer string,
finalHopDelay int64,
r int, // the full route hop index we're working on
h int, // the hint part channel index we're working on
) (
nextdelay int64, // delay amount for the hop after this or the final node's cltv
delaydelta int64, // delaydelta is given by the next hop hint or 0
nextmsat int64, // msatoshi amount for the hop after this (or the final amount)
fees int64, // fees are zero in the last hop, or a crazy calculation otherwise
nextpeer string, // next node id (or the final node)
) {
if int64(h) == fullHint.Get("#").Int()-1 {
// this is the first iteration, means it's the last hint channel/hop
nextmsat = lastPublicHop.Get("msatoshi").Int() // this is the final amount, yes it is.
nextdelay = finalHopDelay
nextpeer = finalPeer
delaydelta = 0
fees = 0
} else {
// now we'll get the value of a hop we've just calculated/iterated over
nextHintHop := fullNewRoute[r+1]
nextmsat = nextHintHop["msatoshi"].(int64)
nextdelay = nextHintHop["delay"].(int64)
nextHintChannel := fullHint.Array()[h+1]
nextpeer = nextHintChannel.Get("pubkey").String()
delaydelta = nextHintChannel.Get("cltv_expiry_delta").Int()
// fees for this hop are based on the next
fees = nextHintChannel.Get("fee_base_msat").Int() +
int64(
(float64(nextmsat)/1000)*nextHintChannel.Get("fee_proportional_millionths").Float()/1000,
)
}
return
}
type Try struct {
Route interface{} `json:"route"`
Error *ErrorCommand `json:"error"`
Success bool `json:"success"`
}
|
[
6
] |
package http
import (
"strconv"
"go-common/app/interface/main/creative/model/academy"
"go-common/app/interface/main/creative/model/archive"
whmdl "go-common/app/interface/main/creative/model/weeklyhonor"
"go-common/library/ecode"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
"go-common/library/net/metadata"
"go-common/library/sync/errgroup"
"go-common/library/xstr"
)
func appH5ArcTags(c *bm.Context) {
params := c.Request.Form
tidStr := params.Get("typeid")
title := params.Get("title")
filename := params.Get("filename")
desc := params.Get("desc")
cover := params.Get("cover")
midStr, ok := c.Get("mid")
mid := midStr.(int64)
if !ok {
c.JSON(nil, ecode.CreativeNotLogin)
return
}
tid, _ := strconv.ParseInt(tidStr, 10, 16)
if tid <= 0 {
tid = 0
}
tags, _ := dataSvc.TagsWithChecked(c, mid, uint16(tid), title, filename, desc, cover, archive.TagPredictFromAPP)
c.JSON(tags, nil)
}
func appH5ArcTagInfo(c *bm.Context) {
params := c.Request.Form
tagNameStr := params.Get("tag_name")
midStr, ok := c.Get("mid")
mid := midStr.(int64)
if !ok {
c.JSON(nil, ecode.CreativeNotLogin)
return
}
if len(tagNameStr) == 0 {
log.Error("tagNameStr len zero (%s)", tagNameStr)
c.JSON(nil, ecode.RequestErr)
return
}
code, msg := arcSvc.TagCheck(c, mid, tagNameStr)
c.JSON(map[string]interface{}{
"code": code,
"msg": msg,
}, nil)
}
func appH5Pre(c *bm.Context) {
midStr, ok := c.Get("mid")
if !ok {
c.JSON(nil, ecode.CreativeNotLogin)
return
}
mid := midStr.(int64)
c.JSON(map[string]interface{}{
"activities": arcSvc.Activities(c),
"fav": arcSvc.Fav(c, mid),
}, nil)
}
func appH5MissionByType(c *bm.Context) {
params := c.Request.Form
tidStr := params.Get("tid")
_, ok := c.Get("mid")
if !ok {
c.JSON(nil, ecode.CreativeNotLogin)
return
}
tid, _ := strconv.Atoi(tidStr)
actWithTP, _ := arcSvc.MissionOnlineByTid(c, int16(tid), 1)
c.JSON(actWithTP, nil)
}
func toInt(s string) (i int, err error) {
if s == "" {
return 0, nil
}
i, err = strconv.Atoi(s)
if err != nil {
log.Error("strconv.Atoi s(%s) error(%v)", s, err)
err = ecode.RequestErr
}
return
}
func toInt64(s string) (i int64, err error) {
if s == "" {
return 0, nil
}
i, err = strconv.ParseInt(s, 10, 64)
if err != nil {
log.Error("strconv.Atoi s(%s) error(%v)", s, err)
err = ecode.RequestErr
}
return
}
func h5ViewPlay(c *bm.Context) {
params := c.Request.Form
aidStr := params.Get("aid")
busStr := params.Get("business")
// check user
midI, ok := c.Get("mid")
if !ok {
c.JSON(nil, ecode.CreativeNotLogin)
return
}
mid, ok := midI.(int64)
if !ok || mid == 0 {
c.JSON(nil, ecode.CreativeNotLogin)
return
}
aid, err := toInt64(aidStr)
if err != nil || aid <= 0 {
c.JSON(nil, err)
return
}
bus, err := toInt(busStr)
if err != nil {
c.JSON(nil, err)
return
}
if aid == 0 || bus == 0 {
c.JSON(nil, ecode.RequestErr)
return
}
pl, err := acaSvc.ViewPlay(c, mid, aid, int8(bus))
if err != nil {
c.JSON(nil, err)
return
}
c.JSON(pl, nil)
}
func h5AddPlay(c *bm.Context) {
params := c.Request.Form
aidStr := params.Get("aid")
busStr := params.Get("business")
watchStr := params.Get("watch")
// check user
midI, ok := c.Get("mid")
if !ok {
c.JSON(nil, ecode.CreativeNotLogin)
return
}
mid, ok := midI.(int64)
if !ok || mid == 0 {
c.JSON(nil, ecode.CreativeNotLogin)
return
}
aid, err := toInt64(aidStr)
if err != nil || aid <= 0 {
c.JSON(nil, err)
return
}
bus, err := toInt(busStr)
if err != nil {
c.JSON(nil, err)
return
}
watch, err := toInt(watchStr)
if err != nil {
c.JSON(nil, err)
return
}
if aid == 0 || bus == 0 || watch == 0 {
c.JSON(nil, ecode.RequestErr)
return
}
id, err := acaSvc.PlayAdd(c, mid, aid, int8(bus), int8(watch))
if err != nil {
c.JSON(nil, err)
return
}
c.JSON(map[string]interface{}{
"id": id,
}, nil)
}
func h5DelPlay(c *bm.Context) {
params := c.Request.Form
aidStr := params.Get("aid")
busStr := params.Get("business")
// check user
midI, ok := c.Get("mid")
if !ok {
c.JSON(nil, ecode.CreativeNotLogin)
return
}
mid, ok := midI.(int64)
if !ok || mid == 0 {
c.JSON(nil, ecode.CreativeNotLogin)
return
}
aid, err := toInt64(aidStr)
if err != nil || aid <= 0 {
c.JSON(nil, err)
return
}
bus, err := toInt(busStr)
if err != nil {
c.JSON(nil, err)
return
}
if aid == 0 || bus == 0 {
c.JSON(nil, ecode.RequestErr)
return
}
id, err := acaSvc.PlayDel(c, mid, aid, int8(bus))
if err != nil {
c.JSON(nil, err)
return
}
c.JSON(map[string]interface{}{
"id": id,
}, nil)
}
func h5PlayList(c *bm.Context) {
params := c.Request.Form
pnStr := params.Get("pn")
psStr := params.Get("ps")
// check user
midStr, ok := c.Get("mid")
if !ok {
c.JSON(nil, ecode.CreativeNotLogin)
return
}
mid, ok := midStr.(int64)
if !ok || mid == 0 {
c.JSON(nil, ecode.CreativeNotLogin)
return
}
pn, err := toInt(pnStr)
if err != nil {
c.JSON(nil, err)
return
}
ps, err := toInt(psStr)
if err != nil {
c.JSON(nil, err)
return
}
if pn <= 0 {
pn = 1
}
if ps > 20 || ps <= 0 {
ps = 20
}
arcs, err := acaSvc.PlayList(c, mid, pn, ps)
if err != nil {
c.JSON(nil, err)
return
}
c.JSON(arcs, nil)
}
func h5ThemeDir(c *bm.Context) {
occs, err := acaSvc.Occupations(c)
if err != nil {
c.JSON(nil, err)
return
}
c.JSON(occs, nil)
}
func h5NewbCourse(c *bm.Context) {
nc, err := acaSvc.NewbCourse(c)
if err != nil {
c.JSON(nil, err)
return
}
c.JSON(nc, nil)
}
func h5Tags(c *bm.Context) {
c.JSON(acaSvc.Tags(c), nil)
}
func h5Archive(c *bm.Context) {
params := c.Request.Form
tidsStr := params.Get("tids")
bsStr := params.Get("business")
pageStr := params.Get("pn")
psStr := params.Get("ps")
keyword := params.Get("keyword")
order := params.Get("order")
drStr := params.Get("duration")
ip := metadata.String(c, metadata.RemoteIP)
var (
tids []int64
err error
)
// check params
if tidsStr != "" {
if tids, err = xstr.SplitInts(tidsStr); err != nil {
log.Error("strconv.ParseInt(%s) error(%v)", tidsStr, err)
c.JSON(nil, ecode.RequestErr)
return
}
}
bs, err := toInt(bsStr)
if err != nil {
c.JSON(nil, err)
return
}
dr, err := toInt(drStr)
if err != nil {
c.JSON(nil, err)
return
}
pn, err := toInt(pageStr)
if err != nil {
c.JSON(nil, err)
return
}
ps, err := toInt(psStr)
if err != nil {
c.JSON(nil, err)
return
}
if pn <= 0 {
pn = 1
}
if ps > 20 || ps <= 0 {
ps = 20
}
aca := &academy.EsParam{
Tid: tids,
Business: bs,
Pn: pn,
Ps: ps,
Keyword: keyword,
Order: order,
IP: ip,
Duration: dr,
}
var arcs *academy.ArchiveList
arcs, err = acaSvc.Archives(c, aca)
if err != nil {
c.JSON(nil, err)
return
}
c.JSON(arcs, nil)
}
func h5Feature(c *bm.Context) {
ip := metadata.String(c, metadata.RemoteIP)
// check params
aca := &academy.EsParam{
Pn: 1,
Ps: 50,
Keyword: "",
Order: "",
IP: ip,
}
aca2 := &academy.EsParam{
Pn: 1,
Ps: 50,
Keyword: "",
Order: "",
IP: ip,
}
var (
g = &errgroup.Group{}
offArcs, chArcs *academy.ArchiveList
)
g.Go(func() error {
aca.Tid = []int64{acaSvc.OfficialID} //官方课程
offArcs, _ = acaSvc.ArchivesWithES(c, aca)
return nil
})
g.Go(func() error {
aca2.Tid = []int64{acaSvc.EditorChoiceID} //编辑精选
chArcs, _ = acaSvc.ArchivesWithES(c, aca2)
return nil
})
g.Wait()
c.JSON(map[string]interface{}{
"official_course": offArcs,
"editor_choice": chArcs,
}, nil)
}
func weeklyHonor(c *bm.Context) {
midStr, _ := c.Get("mid")
var mid int64
uid, ok := midStr.(int64)
if ok {
mid = uid
}
arg := new(struct {
UID int64 `form:"uid"`
Token string `form:"token"`
})
if err := c.Bind(arg); err != nil {
return
}
if mid == 0 && arg.UID == 0 {
c.JSON(nil, ecode.CreativeNotLogin)
return
}
rec, err := honorSvc.WeeklyHonor(c, mid, arg.UID, arg.Token)
if err != nil {
c.JSON(nil, err)
return
}
c.JSON(rec, nil)
}
func weeklyHonorSubSwitch(c *bm.Context) {
midStr, _ := c.Get("mid")
mid := midStr.(int64)
if mid <= 0 {
c.JSON(nil, ecode.CreativeNotLogin)
return
}
params := c.Request.Form
stateStr := params.Get("state")
st, err := strconv.Atoi(stateStr)
state := uint8(st)
if err != nil || (state != whmdl.HonorSub && state != whmdl.HonorUnSub) {
c.JSON(nil, ecode.ReqParamErr)
}
err = honorSvc.ChangeSubState(c, mid, state)
c.JSON(nil, err)
}
func h5RecommendV2(c *bm.Context) {
midStr, _ := c.Get("mid")
var mid int64
uid, ok := midStr.(int64)
if ok {
mid = uid
}
rec, err := acaSvc.RecommendV2(c, mid)
if err != nil {
c.JSON(nil, err)
return
}
c.JSON(rec, nil)
}
func h5ThemeCousreV2(c *bm.Context) {
params := c.Request.Form
pnStr := params.Get("pn")
psStr := params.Get("ps")
pidStr := params.Get("pid")
skidStr := params.Get("skid")
sidStr := params.Get("sid")
pn, err := toInt(pnStr)
if err != nil {
c.JSON(nil, err)
return
}
ps, err := toInt(psStr)
if err != nil {
c.JSON(nil, err)
return
}
if pn <= 0 {
pn = 1
}
if ps > 20 || ps <= 0 {
ps = 20
}
var pids, skids, sids []int64
if pidStr != "" {
if pids, err = xstr.SplitInts(pidStr); err != nil {
log.Error("strconv.ParseInt pidStr(%s) error(%v)", pidStr, err)
c.JSON(nil, ecode.RequestErr)
return
}
}
if skidStr != "" {
if skids, err = xstr.SplitInts(skidStr); err != nil {
log.Error("strconv.ParseInt skidStr(%s) error(%v)", skidStr, err)
c.JSON(nil, ecode.RequestErr)
return
}
}
if sidStr != "" {
if sids, err = xstr.SplitInts(sidStr); err != nil {
log.Error("strconv.ParseInt sidStr(%s) error(%v)", sidStr, err)
c.JSON(nil, ecode.RequestErr)
return
}
}
arcs, err := acaSvc.ProfessionSkill(c, pids, skids, sids, pn, ps, false)
if err != nil {
c.JSON(nil, err)
return
}
c.JSON(arcs, nil)
}
func h5Keywords(c *bm.Context) {
c.JSON(acaSvc.Keywords(c), nil)
}
|
[
3
] |
package serializer
import (
"encoding/json"
"errors"
"github.com/pquerna/ffjson/ffjson"
"github.com/tidwall/gjson"
//"strings"
//"reflect"
//"egov/json"
"strings"
//"strconv"
//"fmt"
)
type jsonDialect struct {
TransDialect
ContentType string
}
func (jd *jsonDialect) GetValue(inBuf []byte, path string) (string, error) {
var e error
result := gjson.GetBytes(inBuf, path)
if result.String() == "" {
e = errors.New("cant not get " + path)
}
return result.String(), e
}
func (jd *jsonDialect) Unmarshal(inBuf []byte, inObject interface{}) error {
decoder := json.NewDecoder(strings.NewReader(string(inBuf)))
//decoder.UseNumber()
err := decoder.Decode(&inObject)
if err != nil {
return err
}
//inObjectAddr:=ReplaceJsonNumber(inObject)
////
//inObject=&inObjectAddr
//
//fmt.Println(fmt.Sprintf("%+v",inObjectAddr))
// err := ffjson.Unmarshal(inBuf, inObject)
return nil
}
func (jd *jsonDialect) Marshal(inObject interface{}) ([]byte, error) {
outBuf, err := ffjson.Marshal(inObject)
return outBuf, err
}
//func (jd *jsonDialect) ConvertIntToBool(obj map[string]interface{}) (int64, map[string]interface{}) {
// converted := int64(0)
// for key, kind := range core.ColumnTypes {
// if kind == reflect.Bool {
// if x, ok := obj[key]; ok {
// if reflect.ValueOf(x).Kind() != reflect.Bool {
// obj[key] = x.(float64) > 0
// converted++
// }
// }
// }
// continue
// }
// return converted, obj
//}
//
//func (jd *jsonDialect) ConvertIntToBoolInObject(inBuf []byte) ([]byte, error, int64) {
// converted := int64(0)
// if jd.IntConvertToBool {
// res := gjson.GetBytes(inBuf, "object")
// if res.Raw == "" {
// return inBuf, nil, 0
// }
// var mpobject map[string]interface{}
// err := ffjson.Unmarshal([]byte(res.Raw), &mpobject)
// if err != nil {
// return nil, NewError(0, err.Error()), 0
// }
// converted, mpobject = jd.ConvertIntToBool(mpobject)
// mpobyte, e := ffjson.Marshal(mpobject)
// if e != nil {
// return nil, NewError(0, e.Error()), 0
// }
// inBuf = bytes.Replace(inBuf, []byte(res.Raw), mpobyte, len([]byte(res.Raw)))
// }
// return inBuf, nil, converted
//}
//
//func (jd *jsonDialect) ConvertBooltoIntInResult(inBuf []byte) ([]byte, error) {
// if jd.BoolValueReturnInt {
// inBuf = bytes.Replace(inBuf, []byte(":true"), []byte(":1"), -1)
// inBuf = bytes.Replace(inBuf, []byte(":false"), []byte(":0"), -1)
// }
// return inBuf, nil
//}
|
[
3
] |
//go:generate goagen bootstrap -d github.com/localghost/my-clippings/design
package main
import (
"github.com/goadesign/goa"
"github.com/goadesign/goa/middleware"
"github.com/localghost/my-clippings/app"
)
func main() {
// Create service
service := goa.New("my-clippings")
// Mount middleware
service.Use(middleware.RequestID())
service.Use(middleware.LogRequest(true))
service.Use(middleware.ErrorHandler(service, true))
service.Use(middleware.Recover())
//app.MountCategoryController(service, NewCategoryController(service))
//app.MountPaymentController(service, NewPaymentController(service))
//app.MountTransferController(service, NewTransferController(service))
//app.UseJWTMiddleware(service, jwt.New(jwt.NewSimpleResolver([]jwt.Key{"jwt"}), nil, app.NewJWTSecurity()))
app.MountClippingsController(service, NewClippingsController(service))
// Start service
if err := service.ListenAndServe(":8111"); err != nil {
service.LogError("startup", "err", err)
}
}
|
[
3
] |
package main
import (
"bufio"
"fmt"
"os"
"runtime"
"strconv"
"strings"
)
const MAX_RANGE = 100000000
//const MAX_RANGE = 100000
var sols []string
// almostPrime will be used to store all the precalculated almost prime numbers
// from 0 to MAX_RANGE
var almostPrime []int
// isPrime Primality test implementation
func isPrime(n int) (bool) {
if n == 2 || n == 3 {
return true
}
if n % 2 == 0 || n % 3 == 0 {
return false
}
i := 5
w := 2
for i * i <= n {
if n % i == 0 {
return false
}
i += w
w = 6 - w
}
return true
}
// calculateAlmostPrimes thinking on the worst of the cases (that I suppose you
// will use for the submission), I'll calculate all the almost primes from 0 to
// 10^8, thanks to this pre-processor, we will be able to use binary search on
// the solveProblem method with O(log n)
func calculateAlmostPrimes() {
primes := []int{}
almostPrime = []int{}
// Using the method for factorization explained here:
// - http://www.calculatorsoup.com/calculators/math/prime-factors.php
// We will locate all the numbers between zero and the max range that
// divided by a prime number results as another prime number
searchLoop: for n := 2; n <= MAX_RANGE; n++ {
if !isPrime(n) {
for _, p := range primes {
// If the number can be divided by a prime
// number, and the result if another prime
// number, this number can only have these two
// factors
if n % p == 0 {
if isPrime(n / p) {
almostPrime = append(almostPrime, n)
}
continue searchLoop
}
}
} else {
primes = append(primes, n)
}
}
}
// getCeilPos Binary search that returns the position of the element under the ceil specified
func getCeilPos(top int) int {
left := 0
right := len(almostPrime)
for right - left > 1 {
pos := ((right - left) / 2) + left
if almostPrime[pos] > top {
right = pos
} else {
left = pos
}
}
if right < len(almostPrime) && almostPrime[right] < top {
return right
}
if almostPrime[left] == top {
return left-1
}
return left
}
// solveProblem Using Binary Search over the almostPrime sorted array, we can
// find the range of almost prime numbers in O(log n)
func solveProblem(from, to int, pos int, c chan bool) {
fromPos := getCeilPos(from)
toPos := getCeilPos(to)
if almostPrime[fromPos] < from {
fromPos += 1
}
if toPos+1 < len(almostPrime) && almostPrime[toPos+1] == to {
toPos += 1
}
if fromPos == toPos && almostPrime[fromPos] != from {
addSolution(pos, "0", c)
return
}
//fmt.Println("Search", from, to, fromPos, toPos, almostPrime[fromPos:toPos+1])
addSolution(pos, fmt.Sprintf("%d", len(almostPrime[fromPos:toPos+1])), c)
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
calculateAlmostPrimes()
reader := bufio.NewReader(os.Stdin)
text, _ := reader.ReadString('\n')
problems, _ := strconv.ParseInt(text[:len(text)-1], 10, 64)
sols = make([]string, problems)
solsChan := make(chan bool)
for p := 0; p < int(problems); p++ {
rangeStr, _ := reader.ReadString('\n')
rangeParts := strings.Split(rangeStr[:len(rangeStr)-1], " ")
from, _ := strconv.ParseInt(rangeParts[0], 10, 64)
to, _ := strconv.ParseInt(rangeParts[1], 10, 64)
go solveProblem(int(from), int(to), p, solsChan)
}
for p := 0; p < int(problems); p++ {
<-solsChan
}
for p := 0; p < int(problems); p++ {
fmt.Println(sols[p])
}
}
// -- Functions used to create the input and manage the channel
// filePutContents I'll use this method just to follow the progress of the
// program without need to use the standar output
func filePutContents(filename string, content []byte) error {
fp, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_APPEND, os.ModePerm)
if err != nil {
return err
}
defer fp.Close()
_, err = fp.Write(content)
return err
}
func addSolution(pos int, sol string, c chan bool) {
sols[pos] = sol
c <- true
// Using /tmp/prob_sols as secondary output in order to follow the
// progress of the program in real time
filePutContents("/tmp/prob_sols", []byte(fmt.Sprintf("%s\n", sols[pos])))
}
|
[
3,
6
] |
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"io"
"log"
)
type FlakeTestResult struct {
All []FlakeRegion
}
type FlakeRegion struct {
// Times gives the times of all failures in this region, in
// increasing order.
//
// TODO: Remove some of the redundant fields?
Times []int
// First and Last are the indexes of the first and last
// failures in this flaky region. These are equivalent to
// Times[0] and Times[len(Times)-1], respectively.
First, Last int
// Failures is the number of failures in the region. This is
// equivalent to len(Times).
Failures int
// FailureProbability is the fraction of builds in this region
// that failed.
FailureProbability float64
// GoodnessOfFit is the goodness of fit test for this region
// against the maximum likelihood estimate geometric
// distribution for these failures. This is primarily for
// debugging.
GoodnessOfFit *AndersonDarlingTestResult
}
// FlakeTest finds ranges of commits over which the failure
// probability of a test is fairly consistent. The failures argument
// gives the indexes of commits with failing tests.
//
// This works by assuming flaky tests are a Bernoulli process. That
// is, they fail with some probability and each failure is independent
// of other failures. Using this assumption, it subdivides the failure
// events to find subranges where the distribution of times between
// failures is very similar to a geometric distribution (determined
// using an Anderson-Darling goodness-of-fit test).
func FlakeTest(failures []int) *FlakeTestResult {
result := &FlakeTestResult{}
result.subdivide(failures)
return result
}
// subdivide adds events to the flake test result if it has a strongly
// geometric interarrival distribution. Otherwise, it recursively
// subdivides events on the longest gap.
//
// events must be strictly monotonically increasing.
func (r *FlakeTestResult) subdivide(events []int) {
if len(events) == 1 {
// Isolated failure.
region := FlakeRegion{events, events[0], events[0], 1, 1, nil}
r.All = append(r.All, region)
return
}
mle, ad := interarrivalAnalysis(events)
if ad == nil || ad.P >= 0.05 {
// We failed to reject the null hypothesis that this
// isn't geometrically distributed. That's about as
// close as we're going to get to calling it
// geometrically distributed.
region := FlakeRegion{events, events[0], events[len(events)-1], len(events), mle.P, ad}
r.All = append(r.All, region)
return
}
// We reject the null hypothesis and accept the alternate
// hypothesis that this range of events is not a Bernoulli
// process. Subdivide on the longest gap, which is the least
// likely event in this range.
longestIndex, longestVal := 0, events[1]-events[0]
for i := 0; i < len(events)-1; i++ {
val := events[i+1] - events[i]
if val > longestVal {
longestIndex, longestVal = i, val
}
}
//fmt.Fprintln(os.Stderr, "subdividing", events[:longestIndex+1], events[longestIndex+1:], mle.P, ad.P)
// Find the more recent ranges first.
r.subdivide(events[longestIndex+1:])
r.subdivide(events[:longestIndex+1])
}
// interarrivalAnalysis returns the maximum likelihood estimated
// distribution for the times between events and the Anderson-Darling
// test for how closely the data matches this distribution. ad will be
// nil if there is no time between any of the events.
//
// events must be strictly monotonically increasing.
func interarrivalAnalysis(events []int) (mle *GeometricDist, ad *AndersonDarlingTestResult) {
interarrivalTimes := make([]int, len(events)-1)
sum := 0
for i := 0; i < len(events)-1; i++ {
delta := events[i+1] - events[i] - 1
interarrivalTimes[i] = delta
sum += delta
}
// Compute maximum likelihood estimate of geometric
// distribution underlying interarrivalTimes.
mle = &GeometricDist{P: float64(len(interarrivalTimes)) / float64(len(interarrivalTimes)+sum)}
if mle.P == 1 {
// This happens if there are no gaps between events.
// In this case Anderson-Darling is undefined because
// the CDF is 1.
return
}
// Compute Anderson-Darling goodness-of-fit for the observed
// distribution against the theoretical distribution.
var err error
ad, err = AndersonDarlingTest(interarrivalTimes, mle)
if err != nil {
log.Fatal("Anderson-Darling test failed: ", err)
}
return
}
func (r *FlakeTestResult) Dump(w io.Writer) {
for i := range r.All {
reg := &r.All[len(r.All)-i-1]
gof := 0.0
if reg.GoodnessOfFit != nil {
gof = reg.GoodnessOfFit.P
}
fmt.Fprintln(w, reg.First, 0, 0)
fmt.Fprintln(w, reg.First, reg.FailureProbability, gof)
fmt.Fprintln(w, reg.Last, reg.FailureProbability, gof)
fmt.Fprintln(w, reg.Last, 0, 0)
}
}
// StillHappening returns the probability that the flake is still
// happening as of time t.
func (r *FlakeRegion) StillHappening(t int) float64 {
if t < r.First {
return 0
}
dist := GeometricDist{P: r.FailureProbability, Start: r.Last + 1}
return 1 - dist.CDF(t)
}
// Bounds returns the time at which the probability that the failure
// started rises above p and the time at which the probability that
// the failure stopped falls below p. Note that this has no idea of
// the "current" time, so stop may be "in the future."
func (r *FlakeRegion) Bounds(p float64) (start, stop int) {
dist := GeometricDist{P: r.FailureProbability}
delta := dist.InvCDF(1 - p)
return r.First - delta, r.Last + delta
}
// StartedAtOrBefore returns the probability that the failure start at
// or before time t.
func (r *FlakeRegion) StartedAtOrBefore(t int) float64 {
if t > r.First {
return 1
}
dist := GeometricDist{P: r.FailureProbability}
return 1 - dist.CDF(r.First-t-1)
}
func (r *FlakeRegion) StartedAt(t int) float64 {
dist := GeometricDist{P: r.FailureProbability}
return dist.PMF(r.First - t)
}
// Culprit gives the probability P that the event at time T was
// responsible for a failure.
type Culprit struct {
P float64
T int
}
// Culprits returns the possible culprits for this failure up to a
// cumulative probability of cumProb or at most limit events. Culprits
// are returned in reverse time order (from most likely culprit to
// least likely).
func (r *FlakeRegion) Culprits(cumProb float64, limit int) []Culprit {
culprits := []Culprit{}
total := 0.0
for t := r.First; t >= 0 && t > r.First-limit; t-- {
p := r.StartedAt(t)
culprits = append(culprits, Culprit{P: p, T: t})
total += p
if total > cumProb {
break
}
}
return culprits
}
|
[
3
] |
package main
import (
"fmt"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/auth/aksk"
"github.com/gophercloud/gophercloud/openstack"
"github.com/gophercloud/gophercloud/openstack/vpc/v1/privateips"
)
func main() {
fmt.Println("main start...")
//AKSK authentication, initialization authentication parameters
opts := aksk.AKSKOptions{
IdentityEndpoint: "https://iam.xxx.yyy.com/v3",
ProjectID: "{ProjectID}",
AccessKey: "your AK string",
SecretKey: "your SK string",
Domain: "yyy.com",
Region: "xxx",
DomainID: "{domainID}",
}
//Initialization provider client
provider, err := openstack.AuthenticatedClient(opts)
if err != nil {
fmt.Println("get provider client failed")
if ue, ok := err.(*gophercloud.UnifiedError); ok {
fmt.Println("ErrCode:", ue.ErrorCode())
fmt.Println("Message:", ue.Message())
}
return
}
//Initialization service client
sc, err := openstack.NewVPCV1(provider, gophercloud.EndpointOpts{})
if err != nil {
fmt.Println("get network client failed")
if ue, ok := err.(*gophercloud.UnifiedError); ok {
fmt.Println("ErrCode:", ue.ErrorCode())
fmt.Println("Message:", ue.Message())
}
return
}
CreatePrivateIp(sc)
GetPrivateIp(sc)
ListPrivateIp(sc)
DeletePrivateIp(sc)
fmt.Println("main end...")
}
func CreatePrivateIp(client *gophercloud.ServiceClient) {
result, err := privateips.Create(client, privateips.CreateOpts{
Privateips: []privateips.PrivateIpCreate{
{
SubnetId: "008ce66f-ff4a-430c-ae7f-d9959ebcde00",
//IpAddress: "192.168.0.232",
},
},
}).Extract()
if err != nil {
fmt.Println(err)
if ue, ok := err.(*gophercloud.UnifiedError); ok {
fmt.Println("ErrCode:", ue.ErrorCode())
fmt.Println("Message:", ue.Message())
}
return
}
fmt.Printf("privateips: %+v\r\n", result)
for _, resp := range *result {
fmt.Println("PrivateIps Id is:", resp.ID)
fmt.Println("PrivateIps Status is:", resp.Status)
fmt.Println("PrivateIps DeviceOwner is:", resp.DeviceOwner)
fmt.Println("PrivateIps IpAddress is:", resp.IpAddress)
fmt.Println("PrivateIps SubnetId is:", resp.SubnetId)
fmt.Println("PrivateIps TenantId is:", resp.TenantId)
}
}
func GetPrivateIp(client *gophercloud.ServiceClient) {
result, err := privateips.Get(client, "56559f35-f2ef-42d0-8931-11cc62249b48").Extract()
if err != nil {
fmt.Println(err)
if ue, ok := err.(*gophercloud.UnifiedError); ok {
fmt.Println("ErrCode:", ue.ErrorCode())
fmt.Println("Message:", ue.Message())
}
return
}
fmt.Printf("privateips: %+v\r\n", result)
fmt.Println("PrivateIps Id is:", result.ID)
fmt.Println("PrivateIps Status is:", result.Status)
fmt.Println("PrivateIps DeviceOwner is:", result.DeviceOwner)
fmt.Println("PrivateIps IpAddress is:", result.IpAddress)
fmt.Println("PrivateIps SubnetId is:", result.SubnetId)
fmt.Println("PrivateIps TenantId is:", result.TenantId)
fmt.Println("Get success!")
}
func ListPrivateIp(client *gophercloud.ServiceClient) {
subnetID := "008ce66f-ff4a-430c-ae7f-d9959ebcde00"
allPages, err := privateips.List(client, subnetID, privateips.ListOpts{
Limit: 2,
}).AllPages()
if err != nil {
fmt.Println(err)
if ue, ok := err.(*gophercloud.UnifiedError); ok {
fmt.Println("ErrCode:", ue.ErrorCode())
fmt.Println("Message:", ue.Message())
}
return
}
result, err := privateips.ExtractPrivateIps(allPages)
fmt.Printf("privateips: %+v\r\n", result)
for _, resp := range result {
fmt.Println("PrivateIps Id is:", resp.ID)
fmt.Println("PrivateIps Status is:", resp.Status)
fmt.Println("PrivateIps DeviceOwner is:", resp.DeviceOwner)
fmt.Println("PrivateIps IpAddress is:", resp.IpAddress)
fmt.Println("PrivateIps SubnetId is:", resp.SubnetId)
fmt.Println("PrivateIps TenantId is:", resp.TenantId)
}
fmt.Println("List success!")
}
func DeletePrivateIp(client *gophercloud.ServiceClient) {
err := privateips.Delete(client, "8ba7458d-af89-47e6-a04a-0b9e2b0c8404").ExtractErr()
if err != nil {
if ue, ok := err.(*gophercloud.UnifiedError); ok {
fmt.Println("ErrCode:", ue.ErrorCode())
fmt.Println("Message:", ue.Message())
}
return
}
fmt.Println("Delete success!")
}
|
[
3
] |
package utwil
import (
"log"
"os"
"testing"
"time"
)
var (
AccountSID = os.Getenv("TWILIO_ACCOUNT_SID")
AuthToken = os.Getenv("TWILIO_AUTH_TOKEN")
ToPhoneNumber = os.Getenv("TWILIO_DEFAULT_TO")
FromPhoneNumber = os.Getenv("TWILIO_DEFAULT_FROM")
TestClient = NewClient(AccountSID, AuthToken)
)
func init() {
if AccountSID == "" {
log.Fatalf("Testing env var TWILIO_ACCOUNT_SID is unset")
} else if AuthToken == "" {
log.Fatalf("Testing env var TWILIO_AUTH_TOKEN is unset")
} else if ToPhoneNumber == "" {
log.Fatalf("Testing env var TWILIO_DEFAULT_TO is unset")
} else if FromPhoneNumber == "" {
log.Fatalf("Testing env var TWILIO_DEFAULT_FROM is unset")
}
}
// Iterate (and paginate) through all the calls
func TestListCalls(t *testing.T) {
iter := TestClient.Calls().Iter()
callCount := 0
var call Call
for iter.Next(&call) {
callCount++
}
if iter.Err() != nil {
t.Fatalf("error: %s", iter.Err().Error())
}
t.Logf("Calls total: %d\n", callCount)
}
// Iterate (and paginate) through all calls from FromPhoneNumber within
// one week
func TestQueryCalls(t *testing.T) {
weekAgo := time.Now().Add(-7 * 24 * time.Hour)
iter := TestClient.Calls(
From(FromPhoneNumber),
StartedAfterYMD(weekAgo)).Iter()
callCount := 0
var call Call
for iter.Next(&call) {
callCount++
}
if iter.Err() != nil {
t.Fatalf("error: %s", iter.Err().Error())
}
t.Logf("Within-one-week calls total: %d\n", callCount)
}
// Iterate (and paginate) through all the messages
func TestListMessages(t *testing.T) {
iter := TestClient.Messages().Iter()
msgCount := 0
var msg Message
for iter.Next(&msg) {
msgCount++
}
if iter.Err() != nil {
t.Fatalf("error: %s\n", iter.Err().Error())
}
t.Logf("Messages total: %d\n", msgCount)
}
// Iterate (and paginate) through all calls from FromPhoneNumber within
// one week
func TestQueryMessages(t *testing.T) {
weekAgo := time.Now().Add(-7 * 24 * time.Hour)
iter := TestClient.Messages(
From(FromPhoneNumber),
SentAfterYMD(weekAgo)).Iter()
msgCount := 0
var msg Message
for iter.Next(&msg) {
msgCount++
}
if iter.Err() != nil {
t.Fatalf("error: %s\n", iter.Err().Error())
}
t.Logf("With-one-week Messages total: %d\n", msgCount)
}
|
[
5
] |
// Copyright 2014 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package docker
import (
"encoding/json"
"fmt"
"github.com/tsuru/docker-cluster/cluster"
"github.com/tsuru/tsuru/action"
"github.com/tsuru/tsuru/app"
"github.com/tsuru/tsuru/log"
"github.com/tsuru/tsuru/provision"
"labix.org/v2/mgo/bson"
"math"
"sync"
)
var (
appDBMutex sync.Mutex
logMutex sync.Mutex
)
type progressLog struct {
Message string
}
func logProgress(encoder *json.Encoder, format string, params ...interface{}) {
logMutex.Lock()
defer logMutex.Unlock()
encoder.Encode(progressLog{Message: fmt.Sprintf(format, params...)})
}
func moveOneContainerInDB(a *app.App, oldContainer container, newUnit provision.Unit) error {
appDBMutex.Lock()
defer appDBMutex.Unlock()
err := a.AddUnitsToDB([]provision.Unit{newUnit})
if err != nil {
return err
}
return a.RemoveUnitFromDB(oldContainer.ID)
}
func moveOneContainer(c container, toHost string, errors chan error, wg *sync.WaitGroup, encoder *json.Encoder) {
a, err := app.GetByName(c.AppName)
defer wg.Done()
if err != nil {
errors <- err
return
}
logProgress(encoder, "Moving unit %s for %q: %s -> %s...", c.ID, c.AppName, c.HostAddr, toHost)
pipeline := action.NewPipeline(
&provisionAddUnitToHost,
&provisionRemoveOldUnit,
)
err = pipeline.Execute(a, toHost, c)
if err != nil {
errors <- err
return
}
logProgress(encoder, "Finished moving unit %s for %q.", c.ID, c.AppName)
addedUnit := pipeline.Result().(provision.Unit)
err = moveOneContainerInDB(a, c, addedUnit)
if err != nil {
errors <- err
return
}
logProgress(encoder, "Moved unit %s -> %s for %s in DB.", c.ID, addedUnit.Name, c.AppName)
}
func moveContainer(contId string, toHost string, encoder *json.Encoder) error {
cont, err := getContainerPartialId(contId)
if err != nil {
return err
}
wg := sync.WaitGroup{}
wg.Add(1)
moveErrors := make(chan error, 1)
moveOneContainer(*cont, toHost, moveErrors, &wg, encoder)
close(moveErrors)
if err = <-moveErrors; err != nil {
log.Errorf("Error moving container - %s", err)
return err
}
return nil
}
func moveContainers(fromHost, toHost string, encoder *json.Encoder) error {
containers, err := listContainersByHost(fromHost)
if err != nil {
return err
}
numberContainers := len(containers)
if numberContainers == 0 {
logProgress(encoder, "No units to move in %s.", fromHost)
return nil
}
logProgress(encoder, "Moving %d units...", numberContainers)
moveErrors := make(chan error, numberContainers)
wg := sync.WaitGroup{}
wg.Add(numberContainers)
for _, c := range containers {
go moveOneContainer(c, toHost, moveErrors, &wg, encoder)
}
go func() {
wg.Wait()
close(moveErrors)
}()
var lastError error = nil
for err := range moveErrors {
log.Errorf("Error moving container - %s", err)
lastError = err
}
return lastError
}
type hostWithContainers struct {
HostAddr string `bson:"_id"`
Count int
Containers []container
}
func minHost(hosts map[string]*hostWithContainers, possibleDests []cluster.Node) *hostWithContainers {
var minHost *hostWithContainers
minCount := math.MaxInt32
for _, dest := range possibleDests {
hostAddr := urlToHost(dest.Address)
host := hosts[hostAddr]
if host.Count < minCount {
minCount = host.Count
minHost = host
}
}
return minHost
}
func rebalanceContainers(encoder *json.Encoder, dryRun bool) error {
coll := collection()
defer coll.Close()
pipe := coll.Pipe([]bson.M{
{"$match": bson.M{"hostaddr": bson.M{"$ne": ""}}},
{"$group": bson.M{
"_id": "$hostaddr",
"count": bson.M{"$sum": 1},
"containers": bson.M{"$push": bson.M{
// Could use $$ROOT instead of repeating fields but only in Mongo 2.6+.
"_id": "$_id",
"id": "$id",
"name": "$name",
"appname": "$appname",
"type": "$type",
"ip": "$ip",
"image": "$image",
"hostaddr": "$hostaddr",
"hostport": "$hostport",
"status": "$status",
"version": "$version",
}}}},
})
var hosts []hostWithContainers
hostsMap := make(map[string]*hostWithContainers)
err := pipe.All(&hosts)
if err != nil {
return err
}
totalCount := 0
for i, host := range hosts {
hostsMap[host.HostAddr] = &hosts[i]
totalCount += host.Count
}
cluster := dockerCluster()
allNodes, err := cluster.Nodes()
if err != nil {
return err
}
for _, node := range allNodes {
hostAddr := urlToHost(node.Address)
_, present := hostsMap[hostAddr]
if !present {
hosts = append(hosts, hostWithContainers{HostAddr: hostAddr})
hostsMap[hostAddr] = &hosts[len(hosts)-1]
}
}
numberOfNodes := len(allNodes)
maxContsPerUnit := int(math.Ceil(float64(totalCount) / float64(numberOfNodes)))
for _, host := range hosts {
toMoveCount := host.Count - maxContsPerUnit
if toMoveCount <= 0 {
continue
}
logProgress(encoder, "Trying to move %d units from %s...", toMoveCount, host.HostAddr)
wg := sync.WaitGroup{}
moveErrors := make(chan error, toMoveCount)
for _, cont := range host.Containers {
possibleDests, err := cluster.NodesForOptions(cont.AppName)
if err != nil {
return err
}
minDest := minHost(hostsMap, possibleDests)
if minDest.Count < maxContsPerUnit {
toMoveCount--
minDest.Count++
if dryRun {
logProgress(encoder, "Would move unit %s for %q: %s -> %s...", cont.ID, cont.AppName, cont.HostAddr, minDest.HostAddr)
} else {
wg.Add(1)
go moveOneContainer(cont, minDest.HostAddr, moveErrors, &wg, encoder)
}
}
if toMoveCount == 0 {
break
}
}
go func() {
wg.Wait()
close(moveErrors)
}()
for err := range moveErrors {
log.Errorf("Error moving container - %s", err)
return err
}
}
return nil
}
|
[
6
] |
package qp
import (
"context"
"errors"
"fmt"
"os"
"os/signal"
"reflect"
"strings"
"sync"
)
var ModuleNotExist = errors.New("模块不存在")
type appBase struct {
moduleSchema sync.Map // map[string]reflect.Type 存储所有module的引用,方便重启module
current sync.Map // map[string]Module 存储当前正在运行中的模块
rootCtx context.Context
rootCancel context.CancelFunc
// 客户端服务发现模块
Discover Discover
}
func New(name string) *appBase {
a := &appBase{}
a.rootCtx, a.rootCancel = context.WithCancel(context.Background())
return a
}
type canceledModule struct {
m Module
cancel context.CancelFunc
}
func (a *appBase) Get(name string) Module {
m, ok := a.current.Load(name)
if !ok {
return nil
}
return m.(Module)
}
// Run方法默认启动所有模块
func (a *appBase) Run(ms ...Module) {
if err := a.checkModuleName(ms); err != nil {
panic(err)
}
var wg sync.WaitGroup
for _, mod := range ms {
m := mod
m.setApp(a)
wg.Add(1)
go func() {
defer wg.Done()
m.Create()
}()
typ := reflect.TypeOf(m)
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
a.moduleSchema.Store(m.Name(), typ)
}
wg.Wait()
for _, mod := range ms {
m := mod
wg.Add(1)
ctx, cancel := context.WithCancel(a.rootCtx)
a.current.Store(m.Name(), canceledModule{m, cancel})
go func() {
var startErr error
defer func() {
if err := recover(); err != nil {
// todo logger
err = fmt.Errorf("%v", err)
recoverStop(m.Stop, err.(error))
} else {
err = startErr
if err == nil {
recoverStop(m.Stop, nil)
} else {
recoverStop(m.Stop, err.(error))
}
}
wg.Done()
}()
startErr = m.Start(ctx, &wg)
return
}()
}
go a.shutdownListen()
wg.Wait()
}
func recoverStop(f func(err error), err error) {
defer func() {
if err := recover(); err != nil {
fmt.Println("module stop error!")
}
}()
f(err)
}
func (a *appBase) shutdownListen() {
ch := make(chan os.Signal, 1)
signal.Notify(ch)
<-ch
_ = a.Stop("")
//appBase.Kill(nil)
}
// 停止一个模快, module为空则是停止所有模块
func (app *appBase) Stop(module string) error {
if module == "" {
app.rootCancel()
return nil
}
if _, ok := app.moduleSchema.Load(module); !ok {
return ModuleNotExist
} else {
mod, ok := app.current.Load(module)
if !ok {
return errors.New("该模块已经停止")
}
app.current.Delete(module)
m := mod.(canceledModule)
m.cancel()
return nil
}
}
func (a *appBase) checkModuleName(ms []Module) error {
set := map[string]struct{}{}
for _, m := range ms {
if strings.TrimSpace(m.Name()) != m.Name() {
return fmt.Errorf("模块%v的名字不能包含空格", m.Name())
}
if m.Name() == "" {
return errors.New("模块名字不能为空")
}
if _, ok := set[m.Name()]; ok {
return fmt.Errorf("模块%v名字重复", m.Name())
}
set[m.Name()] = struct{}{}
}
return nil
}
|
[
3
] |
package app
import (
"Ingress/src/db"
"Ingress/src/models"
"Ingress/src/rest"
"log"
"github.com/gin-gonic/gin"
)
//App - struct initalizer for ingress
type App struct {
Router *gin.Engine
DB *db.Session
IP string
Port int
}
//Initialize - initalizer for ingress
func (a *App) Initialize(config *models.StartupConfiguration) {
a.IP = config.DBIp
a.Port = config.Port
}
//Run - run app
func (a *App) Run() {
//Note: This might not work nil pointer perhaps?
var err error
a.DB, err = db.InitDB(a.IP)
if err != nil {
log.Fatalf("DB connection failed: %s", err)
}
a.Router = rest.NewRouter(false, a.DB)
defer a.DB.Close()
a.Router.Run()
}
|
[
3
] |
package eventManager
import (
"../config"
"../hw"
"../queue"
"time"
)
type Channels struct {
NewOrder chan bool
DoorLamp chan bool
ReachedFloor chan int
MotorDir chan int
DoorTimeout chan bool
DoorTimerReset chan bool
}
var floor int
var dir int
var state int
func Init() {
state = config.Idle
dir = config.DIR_STOP
floor = 0
}
func GetFloorDirState() (int, int, int) {
return floor, dir, state
}
func Run(ch Channels) {
for {
select {
case <-ch.NewOrder:
handleNewOrder(ch)
case floor := <-ch.ReachedFloor:
handleReachedFloor(ch, floor)
case <-ch.DoorTimeout:
handleDoorClosing(ch)
}
}
}
func handleNewOrder(ch Channels) {
switch state {
case config.Idle:
dir = queue.ChooseDirection(floor, dir)
if queue.ShouldStop(dir, floor) {
ch.DoorTimerReset <- true
state = config.DoorOpen
queue.RemoveOrder(floor)
ch.DoorLamp <- true
} else {
ch.MotorDir <- dir
state = config.Moving
}
case config.Moving:
//Ignore
case config.DoorOpen:
if queue.ShouldStop(dir, floor) {
ch.DoorTimerReset <- true
queue.RemoveOrder(floor)
}
}
}
func handleReachedFloor(ch Channels, newFloor int) {
floor = newFloor
switch state {
case config.Idle:
//Ignore
case config.Moving:
if queue.ShouldStop(dir, floor) {
dir = config.DIR_STOP
ch.MotorDir <- dir
state = config.DoorOpen
queue.RemoveOrder(floor)
ch.DoorLamp <- true
ch.DoorTimerReset <- true
}
}
}
func handleDoorClosing(ch Channels) {
ch.DoorLamp <- false
if queue.ChooseDirection(floor, dir) == config.DIR_STOP {
dir = config.DIR_STOP
ch.MotorDir <- dir
state = config.Idle
} else {
dir = queue.ChooseDirection(floor, dir)
ch.MotorDir <- dir
state = config.Moving
}
}
func OpenDoor(doorTimeout chan<- bool, resetTimer <-chan bool) {
timer := time.NewTimer(0)
timer.Stop()
for {
select {
case <-resetTimer:
timer.Reset(3 * time.Second)
case <-timer.C:
timer.Stop()
doorTimeout <- true
}
}
}
func PollFloors(temp chan int) {
oldFloor := hw.GetFloorSensorSignal()
for {
newFloor := hw.GetFloorSensorSignal()
if newFloor != oldFloor && newFloor != -1 {
hw.SetFloorIndicator(newFloor)
temp <- newFloor
}
oldFloor = newFloor
time.Sleep(time.Millisecond * 100)
}
}
func PollButtons(temp chan config.OrderInfo) {
var pressed [config.N_FLOORS][config.N_BUTTONS]bool
for {
for floor := 0; floor < config.N_FLOORS; floor++ {
for button := 0; button < config.N_BUTTONS; button++ {
if (floor == 0 && button == config.BUTTON_DOWN) || (floor == config.N_FLOORS-1 && button == config.BUTTON_UP) {
continue
}
if hw.GetButtonSignal(button, floor) {
if !pressed[floor][button] {
pressed[floor][button] = true
temp <- config.OrderInfo{Button: button, Floor: floor}
hw.SetButtonLamp(button, floor, true)
}
} else {
pressed[floor][button] = false
}
}
}
time.Sleep(time.Millisecond * 100)
}
}
|
[
3
] |
package controller
import (
"github.com/goinggo/mapstructure"
"go.uber.org/zap"
"slgserver/config"
"slgserver/constant"
"slgserver/log"
"slgserver/middleware"
"slgserver/net"
chat_proto "slgserver/server/chatserver/proto"
"slgserver/server/slgserver/proto"
"strings"
"sync"
)
var GHandle = Handle{
proxys: make(map[string]map[int64]*net.ProxyClient),
}
type Handle struct {
proxyMutex sync.Mutex
proxys map[string]map[int64]*net.ProxyClient
slgProxy string
chatProxy string
loginProxy string
}
func isAccount(msgName string) bool {
sArr := strings.Split(msgName, ".")
prefix := ""
if len(sArr) == 2{
prefix = sArr[0]
}
if prefix == "account"{
return true
}else{
return false
}
}
func isChat(msgName string) bool {
sArr := strings.Split(msgName, ".")
prefix := ""
if len(sArr) == 2{
prefix = sArr[0]
}
if prefix == "chat"{
return true
}else{
return false
}
}
func (this*Handle) InitRouter(r *net.Router) {
this.init()
g := r.Group("*").Use(middleware.ElapsedTime(), middleware.Log())
g.AddRouter("*", this.all)
}
func (this*Handle) init() {
this.slgProxy = config.File.MustValue("gateserver", "slg_proxy", "ws://127.0.0.1:8001")
this.chatProxy = config.File.MustValue("gateserver", "chat_proxy", "ws://127.0.0.1:8002")
this.loginProxy = config.File.MustValue("gateserver", "login_proxy", "ws://127.0.0.1:8003")
}
func (this*Handle) onPush(conn *net.ClientConn, body *net.RspBody) {
gc, err := conn.GetProperty("gateConn")
if err != nil{
return
}
gateConn := gc.(net.WSConn)
gateConn.Push(body.Name, body.Msg)
}
func (this*Handle) onProxyClose(conn *net.ClientConn) {
p, err := conn.GetProperty("proxy")
if err == nil {
proxyStr := p.(string)
this.proxyMutex.Lock()
_, ok := this.proxys[proxyStr]
if ok {
c, err := conn.GetProperty("cid")
if err == nil{
cid := c.(int64)
delete(this.proxys[proxyStr], cid)
}
}
this.proxyMutex.Unlock()
}
}
func (this*Handle) OnServerConnClose (conn net.WSConn){
c, err := conn.GetProperty("cid")
arr := make([]*net.ProxyClient, 0)
if err == nil{
cid := c.(int64)
this.proxyMutex.Lock()
for _, m := range this.proxys {
proxy, ok := m[cid]
if ok {
arr = append(arr, proxy)
}
delete(m, cid)
}
this.proxyMutex.Unlock()
}
for _, client := range arr {
client.Close()
}
}
func (this*Handle) all(req *net.WsMsgReq, rsp *net.WsMsgRsp) {
log.DefaultLog.Info("gateserver handle all begin",
zap.String("proxyStr", req.Body.Proxy),
zap.String("msgName", req.Body.Name))
this.deal(req, rsp)
if req.Body.Name == "role.enterServer" && rsp.Body.Code == constant.OK {
//登录聊天服
rspObj := &proto.EnterServerRsp{}
mapstructure.Decode(rsp.Body.Msg, rspObj)
r := &chat_proto.LoginReq{RId: rspObj.Role.RId, NickName: rspObj.Role.NickName, Token: rspObj.Token}
reqBody := &net.ReqBody{Seq: 0, Name: "chat.login", Msg: r, Proxy: ""}
rspBody := &net.RspBody{Seq: 0, Name: "chat.login", Msg: r, Code: 0}
this.deal(&net.WsMsgReq{Body: reqBody, Conn:req.Conn}, &net.WsMsgRsp{Body: rspBody})
}
log.DefaultLog.Info("gateserver handle all end",
zap.String("proxyStr", req.Body.Proxy),
zap.String("msgName", req.Body.Name))
}
func (this*Handle) deal(req *net.WsMsgReq, rsp *net.WsMsgRsp) {
//协议转发
proxyStr := req.Body.Proxy
if isAccount(req.Body.Name){
proxyStr = this.loginProxy
}else if isChat(req.Body.Name){
proxyStr = this.chatProxy
} else{
proxyStr = this.slgProxy
}
if proxyStr == ""{
rsp.Body.Code = constant.ProxyNotInConnect
return
}
this.proxyMutex.Lock()
_, ok := this.proxys[proxyStr]
if ok == false {
this.proxys[proxyStr] = make(map[int64]*net.ProxyClient)
}
var err error
var proxy *net.ProxyClient
d, _ := req.Conn.GetProperty("cid")
cid := d.(int64)
proxy, ok = this.proxys[proxyStr][cid]
this.proxyMutex.Unlock()
if ok == false {
proxy = net.NewProxyClient(proxyStr)
this.proxyMutex.Lock()
this.proxys[proxyStr][cid] = proxy
this.proxyMutex.Unlock()
//发起链接,这里是阻塞的,所以不要上锁
err = proxy.Connect()
if err == nil{
proxy.SetProperty("cid", cid)
proxy.SetProperty("proxy", proxyStr)
proxy.SetProperty("gateConn", req.Conn)
proxy.SetOnPush(this.onPush)
proxy.SetOnClose(this.onProxyClose)
}
}
if err != nil {
this.proxyMutex.Lock()
delete(this.proxys[proxyStr], cid)
this.proxyMutex.Unlock()
rsp.Body.Code = constant.ProxyConnectError
return
}
rsp.Body.Seq = req.Body.Seq
rsp.Body.Name = req.Body.Name
r, err := proxy.Send(req.Body.Name, req.Body.Msg)
if err == nil{
rsp.Body.Code = r.Code
rsp.Body.Msg = r.Msg
}else{
rsp.Body.Code = constant.ProxyConnectError
rsp.Body.Msg = nil
}
}
|
[
3,
5
] |
package imgresizer
import (
"fmt"
"image"
"image/jpeg"
_ "image/png"
"os"
"path/filepath"
"golang.org/x/image/draw"
)
func saveImg(src image.Image, filepath string, Quality int, force ...bool) error {
_, err := os.Stat(filepath)
if os.IsExist(err) && len(force) > 0 && force[0] {
return fmt.Errorf("该文件已存在")
}
dst, err := os.Create(filepath)
defer func() {
err = dst.Close()
}()
if err != nil {
return err
}
if err = jpeg.Encode(dst, src, &jpeg.Options{Quality: Quality}); err != nil {
return err
}
return err
}
func scaleTo(src image.Image, scale draw.Scaler, ratio int) image.Image {
if ratio > 100 {
ratio = 100
} else if ratio < 1 {
ratio = 1
}
rect := image.Rect(0, 0, src.Bounds().Max.X*ratio/100, src.Bounds().Max.Y*ratio/100)
dst := image.NewRGBA(rect)
scale.Scale(dst, rect, src, src.Bounds(), draw.Over, nil)
return dst
}
func openImg(filepath string) (image.Image, error) {
fl, err := os.Open(filepath)
if err != nil {
return nil, err
}
defer fl.Close()
img, _, err := image.Decode(fl)
if err != nil {
return nil, err
}
return img, nil
}
func Resize(filename, dstDir string, Quality int, ratio int, scaler draw.Interpolator) error {
img, err := openImg(filename)
_, pureName := filepath.Split(filename)
targetFile := filepath.Join(dstDir, pureName)
if err != nil {
return err
}
dst := scaleTo(img, scaler, ratio)
return saveImg(dst, targetFile, Quality, true)
}
|
[
2,
6
] |
package finallyIndexAddOne
func FinallyIndexAddOne(intS []int) []int {
var len = len(intS)
var num = intS[len-1] + 1
var newIntS []int
if num > 10 { // 9 + 1 = 10
newIntS = append(newIntS, num-10)
} else {
newIntS = append(newIntS, num)
}
for i := len - 2; i > 0; i-- {
}
return newIntS
}
|
[
1
] |
// Code generated by go-swagger; DO NOT EDIT.
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"strconv"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// NfsExportMapAll nfs export map all
// swagger:model NfsExportMapAll
type NfsExportMapAll struct {
// True if the user mapping is applied.
Enabled bool `json:"enabled,omitempty"`
// Specifies properties for a persona, which consists of either a 'type' and a 'name' or an 'ID'.
PrimaryGroup *GroupMember `json:"primary_group,omitempty"`
// Specifies persona properties for the secondary user group. A persona consists of either a type and name, or an ID.
SecondaryGroups []*NfsExportMapAllSecondaryGroupsItems0 `json:"secondary_groups"`
// Specifies properties for a persona, which consists of either a 'type' and a 'name' or an 'ID'.
User *GroupMember `json:"user,omitempty"`
}
// Validate validates this nfs export map all
func (m *NfsExportMapAll) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validatePrimaryGroup(formats); err != nil {
res = append(res, err)
}
if err := m.validateSecondaryGroups(formats); err != nil {
res = append(res, err)
}
if err := m.validateUser(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NfsExportMapAll) validatePrimaryGroup(formats strfmt.Registry) error {
if swag.IsZero(m.PrimaryGroup) { // not required
return nil
}
if m.PrimaryGroup != nil {
if err := m.PrimaryGroup.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("primary_group")
}
return err
}
}
return nil
}
func (m *NfsExportMapAll) validateSecondaryGroups(formats strfmt.Registry) error {
if swag.IsZero(m.SecondaryGroups) { // not required
return nil
}
for i := 0; i < len(m.SecondaryGroups); i++ {
if swag.IsZero(m.SecondaryGroups[i]) { // not required
continue
}
if m.SecondaryGroups[i] != nil {
if err := m.SecondaryGroups[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("secondary_groups" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *NfsExportMapAll) validateUser(formats strfmt.Registry) error {
if swag.IsZero(m.User) { // not required
return nil
}
if m.User != nil {
if err := m.User.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("user")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *NfsExportMapAll) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NfsExportMapAll) UnmarshalBinary(b []byte) error {
var res NfsExportMapAll
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// NfsExportMapAllSecondaryGroupsItems0 Specifies properties for a persona, which consists of either a 'type' and a 'name' or an 'ID'.
// swagger:model NfsExportMapAllSecondaryGroupsItems0
type NfsExportMapAllSecondaryGroupsItems0 struct {
// Specifies the serialized form of a persona, which can be 'UID:0', 'USER:name', 'GID:0', 'GROUP:wheel', or 'SID:S-1-1'.
ID string `json:"id,omitempty"`
// Specifies the persona name, which must be combined with a type.
Name string `json:"name,omitempty"`
// Specifies the type of persona, which must be combined with a name.
// Enum: [user group wellknown]
Type string `json:"type,omitempty"`
}
// Validate validates this nfs export map all secondary groups items0
func (m *NfsExportMapAllSecondaryGroupsItems0) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateType(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var nfsExportMapAllSecondaryGroupsItems0TypeTypePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["user","group","wellknown"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
nfsExportMapAllSecondaryGroupsItems0TypeTypePropEnum = append(nfsExportMapAllSecondaryGroupsItems0TypeTypePropEnum, v)
}
}
const (
// NfsExportMapAllSecondaryGroupsItems0TypeUser captures enum value "user"
NfsExportMapAllSecondaryGroupsItems0TypeUser string = "user"
// NfsExportMapAllSecondaryGroupsItems0TypeGroup captures enum value "group"
NfsExportMapAllSecondaryGroupsItems0TypeGroup string = "group"
// NfsExportMapAllSecondaryGroupsItems0TypeWellknown captures enum value "wellknown"
NfsExportMapAllSecondaryGroupsItems0TypeWellknown string = "wellknown"
)
// prop value enum
func (m *NfsExportMapAllSecondaryGroupsItems0) validateTypeEnum(path, location string, value string) error {
if err := validate.Enum(path, location, value, nfsExportMapAllSecondaryGroupsItems0TypeTypePropEnum); err != nil {
return err
}
return nil
}
func (m *NfsExportMapAllSecondaryGroupsItems0) validateType(formats strfmt.Registry) error {
if swag.IsZero(m.Type) { // not required
return nil
}
// value enum
if err := m.validateTypeEnum("type", "body", m.Type); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *NfsExportMapAllSecondaryGroupsItems0) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NfsExportMapAllSecondaryGroupsItems0) UnmarshalBinary(b []byte) error {
var res NfsExportMapAllSecondaryGroupsItems0
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
|
[
6
] |
package crypto
import (
"bytes"
"fmt"
"io"
pb "gx/ipfs/QmNiJiXwWE3kRhZrC5ej3kSjWHm337pYfhjLGSCDNKJP2s/go-libp2p-crypto/pb"
"gx/ipfs/QmW7VUmSvhvSGbYbdsh7uRjhGmsYkc9fL8aJ5CorxxrU5N/go-crypto/ed25519"
)
// Ed25519PrivateKey is an ed25519 private key
type Ed25519PrivateKey struct {
k ed25519.PrivateKey
}
// Ed25519PublicKey is an ed25519 public key
type Ed25519PublicKey struct {
k ed25519.PublicKey
}
// GenerateEd25519Key generate a new ed25519 private and public key pair
func GenerateEd25519Key(src io.Reader) (PrivKey, PubKey, error) {
pub, priv, err := ed25519.GenerateKey(src)
if err != nil {
return nil, nil, err
}
return &Ed25519PrivateKey{
k: priv,
},
&Ed25519PublicKey{
k: pub,
},
nil
}
func (k *Ed25519PrivateKey) Type() pb.KeyType {
return pb.KeyType_Ed25519
}
// Bytes marshals an ed25519 private key to protobuf bytes
func (k *Ed25519PrivateKey) Bytes() ([]byte, error) {
return MarshalPrivateKey(k)
}
func (k *Ed25519PrivateKey) Raw() ([]byte, error) {
// Intentionally redundant for backwards compatibility.
// Issue: #36
// TODO: Remove the second copy of the public key at some point in the future.
buf := make([]byte, len(k.k)+ed25519.PublicKeySize)
copy(buf, k.k)
copy(buf[len(k.k):], k.pubKeyBytes())
return buf, nil
}
func (k *Ed25519PrivateKey) pubKeyBytes() []byte {
return k.k[ed25519.PrivateKeySize-ed25519.PublicKeySize:]
}
// Equals compares two ed25519 private keys
func (k *Ed25519PrivateKey) Equals(o Key) bool {
edk, ok := o.(*Ed25519PrivateKey)
if !ok {
return false
}
return bytes.Equal(k.k, edk.k)
}
// GetPublic returns an ed25519 public key from a private key
func (k *Ed25519PrivateKey) GetPublic() PubKey {
return &Ed25519PublicKey{k: k.pubKeyBytes()}
}
// Sign returns a signature from an input message
func (k *Ed25519PrivateKey) Sign(msg []byte) ([]byte, error) {
return ed25519.Sign(k.k, msg), nil
}
func (k *Ed25519PublicKey) Type() pb.KeyType {
return pb.KeyType_Ed25519
}
// Bytes returns a ed25519 public key as protobuf bytes
func (k *Ed25519PublicKey) Bytes() ([]byte, error) {
return MarshalPublicKey(k)
}
func (k *Ed25519PublicKey) Raw() ([]byte, error) {
return k.k, nil
}
// Equals compares two ed25519 public keys
func (k *Ed25519PublicKey) Equals(o Key) bool {
edk, ok := o.(*Ed25519PublicKey)
if !ok {
return false
}
return bytes.Equal(k.k, edk.k)
}
// Verify checks a signature agains the input data
func (k *Ed25519PublicKey) Verify(data []byte, sig []byte) (bool, error) {
return ed25519.Verify(k.k, data, sig), nil
}
func UnmarshalEd25519PublicKey(data []byte) (PubKey, error) {
if len(data) != 32 {
return nil, fmt.Errorf("expect ed25519 public key data size to be 32")
}
return &Ed25519PublicKey{
k: ed25519.PublicKey(data),
}, nil
}
// UnmarshalEd25519PrivateKey returns a private key from input bytes
func UnmarshalEd25519PrivateKey(data []byte) (PrivKey, error) {
switch len(data) {
case ed25519.PrivateKeySize + ed25519.PublicKeySize:
// Remove the redundant public key. See issue #36.
redundantPk := data[ed25519.PrivateKeySize:]
if !bytes.Equal(data[len(data)-ed25519.PublicKeySize:], redundantPk) {
return nil, fmt.Errorf("expected redundant ed25519 public key to be redundant")
}
// No point in storing the extra data.
newKey := make([]byte, ed25519.PrivateKeySize)
copy(newKey, data[:ed25519.PrivateKeySize])
data = newKey
case ed25519.PrivateKeySize:
default:
return nil, fmt.Errorf(
"expected ed25519 data size to be %d or %d",
ed25519.PrivateKeySize,
ed25519.PublicKeySize+ed25519.PublicKeySize,
)
}
return &Ed25519PrivateKey{
k: ed25519.PrivateKey(data),
}, nil
}
|
[
6
] |
package app
import (
"sort"
"gopkg.in/urfave/cli.v1"
)
//NewCliApp ...
func NewCliApp() *cli.App {
app := cli.NewApp()
app.Name = "erygo"
app.Version = "0.1.0"
opts := NewErygoCmdOptions()
opts.AddFlags(app)
app.Action = func(c *cli.Context) error {
proc := NewErygoApp()
return proc.Run(opts)
}
// sort flags by name
sort.Sort(cli.FlagsByName(app.Flags))
return app
}
|
[
3
] |
/*******************************************************************************
* Copyright 2020 Samsung Electronics All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*******************************************************************************/
package connectionutil
import (
"errors"
"net"
"github.com/lf-edge/edge-home-orchestration-go/src/restinterface/tls"
rafftls "github.com/raff/tls-ext"
"github.com/raff/tls-psk"
)
type networkUtilImpl struct{}
var networkUtilIns networkUtilImpl
func init() {
}
//NetworkUtil interface declares the network methods
type NetworkUtil interface {
ConnectToHost(string, string, bool) (net.Conn, error)
WriteTo(net.Conn, []byte) error
ReadFrom(net.Conn) (int, []byte, error)
ListenIP(address string, isSecure bool) (net.Listener, error)
}
//GetInstance returns the NetworkUtil instance
func GetInstance() NetworkUtil {
return networkUtilIns
}
//ConnectToHost connects to a tcp host
func (networkUtilImpl) ConnectToHost(ip string, port string, isSecure bool) (net.Conn, error) {
if !isSecure {
conn, err := net.Dial("tcp", ip+":"+port)
return conn, err
}
var config = &rafftls.Config{
CipherSuites: []uint16{psk.TLS_PSK_WITH_AES_128_CBC_SHA},
Certificates: []rafftls.Certificate{rafftls.Certificate{}},
Extra: psk.PSKConfig{
GetKey: tls.GetKey,
GetIdentity: tls.GetIdentity,
},
}
conn, err := rafftls.Dial("tcp", ip+":"+port, config)
return conn, err
}
//WriteTo writes on a connection
func (networkUtilImpl) WriteTo(conn net.Conn, data []byte) error {
if conn != nil {
_, err := conn.Write(data)
return err
}
return errors.New("Connection is nil")
}
//ReadFrom reads from a connection
func (networkUtilImpl) ReadFrom(conn net.Conn) (int, []byte, error) {
if conn != nil {
buf := make([]byte, 1024)
n, err := conn.Read(buf)
return n, buf, err
}
return -1, []byte(""), errors.New("Connection is nil")
}
//ListenIP starts tcp server at given address
func (networkUtilImpl) ListenIP(address string, isSecure bool) (net.Listener, error) {
if !isSecure {
listener, err := net.Listen("tcp", address)
return listener, err
}
var config = &rafftls.Config{
CipherSuites: []uint16{psk.TLS_PSK_WITH_AES_128_CBC_SHA},
Certificates: []rafftls.Certificate{rafftls.Certificate{}},
Extra: psk.PSKConfig{
GetKey: tls.GetKey,
GetIdentity: tls.GetIdentity,
},
}
listener, err := rafftls.Listen("tcp", address, config)
return listener, err
}
|
[
3,
6
] |
package handlers
import (
"net"
"time"
"code.cloudfoundry.org/clock"
"github.com/cloudfoundry/bosh-utils/logger"
"github.com/miekg/dns"
"bosh-dns/dns/server"
"bosh-dns/dns/server/handlers/internal"
"bosh-dns/dns/server/records/dnsresolver"
)
type ForwardHandler struct {
clock clock.Clock
recursors RecursorPool
exchangerFactory ExchangerFactory
logger logger.Logger
logTag string
truncater dnsresolver.ResponseTruncater
}
//counterfeiter:generate . Exchanger
type Exchanger interface {
Exchange(*dns.Msg, string) (*dns.Msg, time.Duration, error)
}
type Cache interface {
Get(req *dns.Msg) *dns.Msg
Write(req, answer *dns.Msg)
GetExpired(*dns.Msg) *dns.Msg
}
func NewForwardHandler(
recursors RecursorPool,
exchangerFactory ExchangerFactory,
clock clock.Clock,
logger logger.Logger,
truncater dnsresolver.ResponseTruncater,
) ForwardHandler {
return ForwardHandler{
recursors: recursors,
exchangerFactory: exchangerFactory,
clock: clock,
logger: logger,
logTag: "ForwardHandler",
truncater: truncater,
}
}
func (r ForwardHandler) ServeDNS(responseWriter dns.ResponseWriter, request *dns.Msg) {
internal.LogReceivedRequest(r.logger, r, r.logTag, request)
before := r.clock.Now()
if len(request.Question) == 0 {
r.writeEmptyMessage(responseWriter, request)
return
}
network := r.network(responseWriter)
client := r.exchangerFactory(network)
err := r.recursors.PerformStrategically(func(recursor string) error {
exchangeAnswer, _, err := client.Exchange(request, recursor)
if err != nil {
question := request.Question[0].Name
r.logger.Error(r.logTag, "error recursing for %s to %q: %s", question, recursor, err.Error())
}
if exchangeAnswer != nil && exchangeAnswer.MsgHdr.Rcode != dns.RcodeSuccess {
question := request.Question[0].Name
err = server.NewDnsError(exchangeAnswer.MsgHdr.Rcode, question, recursor)
if exchangeAnswer.MsgHdr.Rcode == dns.RcodeNameError {
r.logger.Debug(r.logTag, "error recursing to %q: %s", recursor, err.Error())
} else {
r.logger.Error(r.logTag, "error recursing to %q: %s", recursor, err.Error())
}
}
if err != nil {
return err
}
r.truncater.TruncateIfNeeded(responseWriter, request, exchangeAnswer)
r.logRecursor(before, request, exchangeAnswer, "recursor="+recursor)
if writeErr := responseWriter.WriteMsg(exchangeAnswer); writeErr != nil {
r.logger.Error(r.logTag, "error writing response: %s", writeErr.Error())
}
return nil
})
if err != nil {
responseMessage := r.createResponseFromError(request, err)
r.logRecursor(before, request, responseMessage, "error=["+err.Error()+"]")
if err := responseWriter.WriteMsg(responseMessage); err != nil {
r.logger.Error(r.logTag, "error writing response: %s", err.Error())
}
}
}
func (r ForwardHandler) logRecursor(before time.Time, request *dns.Msg, response *dns.Msg, recursor string) {
duration := r.clock.Now().Sub(before).Nanoseconds()
internal.LogRequest(r.logger, r, r.logTag, duration, request, response, recursor)
}
func (ForwardHandler) network(responseWriter dns.ResponseWriter) string {
network := "udp"
if _, ok := responseWriter.RemoteAddr().(*net.TCPAddr); ok {
network = "tcp"
}
return network
}
func (r ForwardHandler) createResponseFromError(req *dns.Msg, err error) *dns.Msg {
responseMessage := &dns.Msg{}
responseMessage.SetReply(req)
switch err := err.(type) {
case net.Error:
responseMessage.SetRcode(req, dns.RcodeServerFailure)
case server.DnsError:
if err.Rcode() == dns.RcodeServerFailure {
responseMessage.SetRcode(req, dns.RcodeServerFailure)
} else {
responseMessage.SetRcode(req, dns.RcodeNameError)
}
break //nolint:gosimple
default:
responseMessage.SetRcode(req, dns.RcodeNameError)
break //nolint:gosimple
}
return responseMessage
}
func (r ForwardHandler) writeEmptyMessage(responseWriter dns.ResponseWriter, req *dns.Msg) {
emptyMessage := &dns.Msg{}
r.logger.Debug(r.logTag, "received a request with no questions")
emptyMessage.Authoritative = true
emptyMessage.SetRcode(req, dns.RcodeSuccess)
if err := responseWriter.WriteMsg(emptyMessage); err != nil {
r.logger.Error(r.logTag, "error writing response: %s", err.Error())
}
}
|
[
6
] |
package mqtts
import (
"encoding/json"
mqtt "github.com/eclipse/paho.mqtt.golang"
"github.com/muesli/cache2go"
"github.com/qingcloudhx/core/activity"
"github.com/qingcloudhx/core/data/coerce"
"github.com/qingcloudhx/core/data/metadata"
"github.com/qingcloudhx/core/support/log"
"github.com/qingcloudhx/core/support/ssl"
"strings"
"time"
)
var activityMd = activity.ToMetadata(&Settings{}, &Input{}, &Output{})
const (
IOT_DEVICE_STATUS_END = "iote-global-onoffline-end"
IOT_DEVICE_STATUS_EDGE = "iote-global-onoffline-edge"
DEVICE_STATUS_ONLINE = "online" // 在线
DEVICE_STATUS_OFFLINE = "offline" // 离线
)
func init() {
_ = activity.Register(&Activity{}, New)
}
func New(ctx activity.InitContext) (activity.Activity, error) {
settings := &Settings{}
err := metadata.MapToStruct(ctx.Settings(), settings, true)
if err != nil {
return nil, err
}
ctx.Logger().Infof("activity init setting:%+v", settings)
options := initClientOption(ctx.Logger(), settings)
if strings.HasPrefix(settings.Broker, "ssl") {
cfg := &ssl.Config{}
if len(settings.SSLConfig) != 0 {
err := cfg.FromMap(settings.SSLConfig)
if err != nil {
return nil, err
}
if _, set := settings.SSLConfig["skipVerify"]; !set {
cfg.SkipVerify = true
}
if _, set := settings.SSLConfig["useSystemCert"]; !set {
cfg.UseSystemCert = true
}
} else {
//using ssl but not configured, use defaults
cfg.SkipVerify = true
cfg.UseSystemCert = true
}
tlsConfig, err := ssl.NewClientTLSConfig(cfg)
if err != nil {
return nil, err
}
options.SetTLSConfig(tlsConfig)
}
mqttClient := mqtt.NewClient(options)
if token := mqttClient.Connect(); token.Wait() && token.Error() != nil {
ctx.Logger().Error(token.Error())
return nil, token.Error()
}
if settings.Topic == "" {
if deviceId, thingId, err := parseToken(settings.Password); err != nil {
return nil, err
} else {
settings.Topic = buildUpTopic(deviceId, thingId)
ctx.Logger().Infof("mqtt topic:%s", settings.Topic)
}
}
act := &Activity{client: mqttClient, settings: settings}
return act, nil
}
type Activity struct {
settings *Settings
client mqtt.Client
}
func (a *Activity) Metadata() *activity.Metadata {
return activityMd
}
func (a *Activity) Eval(ctx activity.Context) (done bool, err error) {
input := &Input{}
err = ctx.GetInputObject(input)
if err != nil {
return true, err
}
ctx.Logger().Infof("eval start:%+v", input)
//heartbeat string
//data map
for _, v := range input.Data {
object, _ := coerce.ToObject(v)
topic := object["topic"].(string)
if input.Type == "heartbeat" {
message := object["message"].(string)
data := &DeviceUpStatusMsg{}
//decodeBytes, err := base64.StdEncoding.DecodeString(input.Message.(string))
err = json.Unmarshal([]byte(message), data)
if err != nil {
ctx.Logger().Errorf("Unmarshal error Message: %s", message)
return false, nil
}
if _, err := get(data.DeviceId); err != nil {
add(data.DeviceId, 15*time.Second, data, func(item *cache2go.CacheItem) {
data.Status = DEVICE_STATUS_OFFLINE
out, _ := json.Marshal(data)
if token := a.client.Publish(topic, byte(a.settings.Qos), true, out); token.Wait() && token.Error() != nil {
ctx.Logger().Debugf("Error in publishing: %v", err)
} else {
ctx.Logger().Debugf("Published Topic:%s, Message: %s", topic, string(out))
}
})
} else {
add(data.DeviceId, 15*time.Second, data, func(item *cache2go.CacheItem) {
data.Status = DEVICE_STATUS_OFFLINE
out, _ := json.Marshal(data)
if token := a.client.Publish(topic, byte(a.settings.Qos), true, out); token.Wait() && token.Error() != nil {
ctx.Logger().Debugf("Error in publishing: %v", err)
} else {
ctx.Logger().Debugf("Published Topic:%s, Message: %s", topic, string(out))
}
})
ctx.Logger().Debugf("Recv Heartbeat Topic:%s,Message:%s", topic, message)
return true, nil
}
//add(data.DeviceId,15 * time.Second,data,func(key interface{}){})
} else {
if topic == "" {
ctx.Logger().Infof("filter message")
return true, nil
}
}
go func() {
time.Sleep(time.Duration(a.settings.Delay) * time.Millisecond)
message, _ := json.Marshal(object["message"])
ctx.Logger().Infof("[Activity] Eval Topic:%s,Message:%s", topic, message)
if token := a.client.Publish(topic, byte(a.settings.Qos), true, message); token.Wait() && token.Error() != nil {
ctx.Logger().Debugf("Error in publishing: %v", err)
return
}
ctx.Logger().Infof("Published Message Success: %s", message)
}()
}
return true, nil
}
func initClientOption(logger log.Logger, settings *Settings) *mqtt.ClientOptions {
opts := mqtt.NewClientOptions()
opts.AddBroker(settings.Broker)
opts.SetClientID(settings.Id)
opts.SetUsername(settings.Username)
opts.SetPassword(settings.Password)
opts.SetCleanSession(settings.CleanSession)
if settings.Store != "" && settings.Store != ":memory:" {
logger.Debugf("Using file store: %s", settings.Store)
opts.SetStore(mqtt.NewFileStore(settings.Store))
}
return opts
}
|
[
3,
4
] |
package metrix
import (
"time"
)
type Stats struct {
ContainerId string
Image string
MemoryUsage uint64 //in bytes
CPUUnitCost string
MemoryUnitCost string
AllocatedMemory int64
AllocatedCpu int64
CPUStats CPUStats //in percentage of total cpu used
PreCPUStats CPUStats
NetworkIn uint64
NetworkOut uint64
AccountId string
AssemblyId string
QuotaId string
AssemblyName string
AssembliesId string
Status string
AuditPeriod time.Time
}
type CPUStats struct {
PercpuUsage []uint64
UsageInUsermode uint64
TotalUsage uint64
UsageInKernelmode uint64
SystemCPUUsage uint64
}
|
[
3
] |
package target
import (
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strings"
"github.com/upsight/ron/color"
"github.com/upsight/ron/execute"
yaml "gopkg.in/yaml.v2"
)
// Configs is a mapping of filename to target file.
type Configs struct {
RemoteEnv string // The remote hosts to run the command on. This is (file):env
RemoteHosts []*execute.SSHConfig // a list of remote hosts to execute on.
Files []*File
StdOut io.Writer
StdErr io.Writer
}
// NewConfigs takes a default set of yaml in config format and then
// overrides them with a new set of config target replacements.
func NewConfigs(configs []*RawConfig, remoteEnv string, stdOut io.Writer, stdErr io.Writer) (*Configs, error) {
if stdOut == nil {
stdOut = os.Stdout
}
if stdErr == nil {
stdErr = os.Stderr
}
confs := &Configs{
RemoteEnv: remoteEnv,
Files: []*File{},
StdOut: stdOut,
StdErr: stdErr,
}
osEnvs := ParseOSEnvs(os.Environ())
// parentFile here is the highest priority ron.yaml file.
var parentFile *File
for _, config := range configs {
var targets map[string]*Target
if err := yaml.Unmarshal([]byte(config.Targets), &targets); err != nil {
return nil, err
}
var remotes Remotes
if err := yaml.Unmarshal([]byte(config.Remotes), &remotes); err != nil {
return nil, err
}
// initialize io for each target.
for name, target := range targets {
target.W = stdOut
target.WErr = stdErr
if target.Name == "" {
target.Name = name
}
target.targetConfigs = confs
}
f := &File{
rawConfig: config,
Filepath: config.Filepath,
Targets: targets,
Remotes: remotes,
}
for _, t := range targets {
t.File = f
}
e, err := NewEnv(parentFile, config, osEnvs, stdOut)
if err != nil {
return nil, err
}
f.Env = e
if parentFile != nil {
parentFile.Env.MergeTo(f.Env)
}
confs.Files = append(confs.Files, f)
if strings.HasSuffix(config.Filepath, ConfigFileName) {
parentFile = f
}
}
if remoteEnv != "" {
// find any remote hosts if set.
filePrefix, env := splitTarget(remoteEnv)
for _, tf := range confs.Files {
if filePrefix != "" && tf.Basename() != filePrefix {
continue
}
if r, ok := tf.Remotes[env]; ok {
confs.RemoteHosts = r
break
}
}
}
return confs, nil
}
// List prints out each target and its before and after targets.
func (tc *Configs) List(verbose bool, fuzzy string) {
filePrefix, fuzzyTarget := splitTarget(fuzzy)
LOOP_FILES:
for _, tf := range tc.Files {
// If a file prefix is provided check this file matches.
if filePrefix != "" && tf.Basename() != filePrefix {
continue LOOP_FILES
}
targetNameWidth := 0
targetNames := []string{}
LOOP_TARGETS:
for k := range tf.Targets {
if len(k) > targetNameWidth {
targetNameWidth = len(k)
}
if fuzzyTarget != "" {
if ok, _ := filepath.Match(fuzzyTarget, k); !ok {
continue LOOP_TARGETS
}
}
targetNames = append(targetNames, k)
}
sort.Strings(targetNames)
basename := tf.Basename()
tc.StdOut.Write([]byte(color.Green(fmt.Sprintf("(%s) %s\n", basename, tf.Filepath))))
for _, targetName := range targetNames {
if target, ok := tc.Target(basename + ":" + targetName); ok {
target.List(verbose, targetNameWidth)
}
}
tc.StdOut.Write([]byte(color.Green("---\n\n")))
}
}
// ListClean will print out a full list of targets suitable for bash completion.
func (tc *Configs) ListClean() {
targets := []string{}
for _, tf := range tc.Files {
basename := tf.Basename()
for k := range tf.Targets {
targets = append(targets, basename+":"+k)
}
}
sort.Strings(targets)
for _, t := range targets {
tc.StdOut.Write([]byte(t + " "))
}
}
// Target retrieves the named target from config. If it doesn't
// exist a bool false will be returned along with nil. If the name
// contains a file prefix such as "default:mytarget", it will only
// search within that configuration file.
func (tc *Configs) Target(name string) (*Target, bool) {
filePrefix, target := splitTarget(name)
for _, tf := range tc.Files {
if filePrefix != "" && tf.Basename() != filePrefix {
continue
}
target, ok := tf.Targets[target]
if ok {
return target, ok
}
}
return nil, false
}
// GetEnv will return the targets associated environment variables to
// use when running the target.
func (tc *Configs) GetEnv(name string) MSS {
filePrefix, _ := splitTarget(name)
for _, tf := range tc.Files {
if filePrefix != "" && tf.Basename() != filePrefix {
continue
}
envs, _ := tf.Env.Config()
return envs
}
return nil
}
// ListEnvs will print out the list of file envs.
func (tc *Configs) ListEnvs() error {
for _, tf := range tc.Files {
tc.StdOut.Write([]byte(color.Green(fmt.Sprintf("(%s) %s\n", tf.Basename(), tf.Filepath))))
tf.Env.List()
tc.StdOut.Write([]byte(color.Green("---\n\n")))
}
return nil
}
// ListRemotes will print out the list of remote envs set in each file.
func (tc *Configs) ListRemotes() error {
for _, tf := range tc.Files {
tc.StdOut.Write([]byte(color.Green(fmt.Sprintf("(%s) %s\n", tf.Basename(), tf.Filepath))))
if err := tf.Remotes.List(tc.StdOut); err != nil {
tc.StdErr.Write([]byte(color.Red(err.Error())))
}
tc.StdOut.Write([]byte(color.Green("---\n\n")))
}
return nil
}
|
[
6
] |
package main
import (
"fmt"
)
func main() {
fmt.Println("hello world;")
lists := []int {2, 1, 4, 3, 6, 4, 10, 9, 2}
fmt.Println(lists)
res := mergeKLists(lists)
fmt.Println(res)
}
func mergeKLists(lists []int) []int {
headLen := len(lists)
res := []int {}
for headLen != 0 {
lists = consHeap(lists)
res = append(res, lists[0])
fmt.Println(res)
lists = lists[1:]
headLen -= 1
}
return res
}
func consHeap(lists []int) []int {
var j int = (len(lists) - 1 ) / 2
for ; j >= 0; j -- {
if 2 * j + 2 <= len(lists) - 1 && lists[j] > lists[2 * j + 2] {
//证明最后一个根节点有右孩子
lists[j], lists[2 * j + 2] = lists[2 * j + 2], lists[j]
}
if 2 * j + 1 <= len(lists) - 1 && lists[j] > lists[2 * j + 1] {
//zh证明有左孩子
lists[j], lists[2 * j + 1] = lists[2 * j + 1], lists[j]
}
}
return lists
}
|
[
3
] |
package db
import (
"context"
"time"
"github.com/go-redis/redis/v8"
)
type redisDB struct {
client *redis.Client
ctx context.Context
}
//NewRedisDB create redis client
func NewRedisDB(address, password string) (kv KV, err error) {
rdb := redis.NewClient(&redis.Options{
Addr: address,
Password: password, // no password set
DB: 0, // use default DB
})
kv = &redisDB{
client: rdb,
ctx: context.TODO(),
}
return
}
func (m *redisDB) WithContext(ctx context.Context) KV {
m.ctx = ctx
return m
}
// Get the item with the provided key.
func (m *redisDB) Get(key string) (val []byte, err error) {
val, err = m.client.Get(m.ctx, key).Bytes()
return
}
// Add writes the given item, if no value already exists for its key.
func (m *redisDB) Add(key string, val []byte, expiration time.Duration) (err error) {
//NX -- Only set the key if it does not already exist.
_, err = m.client.SetNX(m.ctx, key, string(val), expiration).Result()
return
}
// Set writes the given item, unconditionally.
func (m *redisDB) Set(key string, val []byte, expiration time.Duration) (err error) {
return m.client.Set(m.ctx, key, string(val), expiration).Err()
}
// Incr increment key
func (m *redisDB) Incr(key string) (err error) {
return m.client.Incr(m.ctx, key).Err()
}
// IncrBy increment key
func (m *redisDB) IncrBy(key string, val int64) (err error) {
if val == 0 {
return nil
}
return m.client.IncrBy(m.ctx, key, val).Err()
}
// Delete deletes the item with the provided key.
func (m *redisDB) Delete(key string) (err error) {
err = m.client.Del(m.ctx, key).Err()
return
}
|
[
3
] |
package main
import (
"fmt"
"math"
)
func main() {
fmt.Println("PS")
p := MkParticle(10, 30, 0, 0, 9.8, 0.0)
f1 := false
f2 := false
l := MkLine(-5, 10, 15, -10, 0.8)
l2 := MkLine(20, -15, 50, -20, 0.8)
fmt.Printf("%4.4f, %4.4f, %4.4f, %4.4f, %4.4f, %4.4f, %4.4f\n",
p.T, p.Px, p.Py, p.Vx, p.Vy, l.X1, l.Y1)
fmt.Printf("%4.4f, %4.4f, %4.4f, %4.4f, %4.4f, %4.4f, %4.4f\n",
p.T, p.Px, p.Py, p.Vx, p.Vy, l.X2, l.Y2)
fmt.Printf("%4.4f, %4.4f, %4.4f, %4.4f, %4.4f, %4.4f, %4.4f\n",
p.T, p.Px, p.Py, p.Vx, p.Vy, l2.X1, l2.Y1)
fmt.Printf("%4.4f, %4.4f, %4.4f, %4.4f, %4.4f, %4.4f, %4.4f\n",
p.T, p.Px, p.Py, p.Vx, p.Vy, l2.X2, l2.Y2)
p.Show()
for i := 0; i < 150; i ++ {
np := MkZParticle()
np, f1 = lineHit(l, 0.03125, p, f1)
if !f1 {
np, f2 = lineHit(l2, 0.03125, p, f2)
}
p = np
p.Showh()
}
}
type Particle struct {
Px, Py, Vx, Vy, G, T float64
}
type Line struct {
X1, Y1, X2, Y2, K float64
}
func HitLine(l Line, x3, y3, px, py float64) bool {
vx1 := l.X2 - l.X1
vy1 := l.Y2 - l.Y1
vx2 := x3 - l.X2
vy2 := y3 - l.Y2
vx3 := l.X1 - x3
vy3 := l.Y1 - y3
pvx1 := px - l.X1
pvy1 := py - l.Y1
pvx2 := px - l.X2
pvy2 := py - l.Y2
pvx3 := px - x3
pvy3 := py - y3
return BrokenLine (
vx1 * pvy1 - pvx1 * vy1,
vx2 * pvy2 - pvx2 * vy2,
vx3 * pvy3 - pvx3 * vy3)
}
func BrokenLine(v1, v2, v3 float64) bool {
if v1 < 0 && 0 < v2 && 0 < v3 {
return true
} else if 0 < v1 && v2 < 0 && v3 < 0 {
return true
} else {
return false
}
}
func lineHit(l Line, t float64, p Particle, flag bool) (Particle, bool) {
np := Renew(t, p)
hit := HitLine(l, p.Px, p.Py, np.Px, np.Py)
if flag {
} else if hit{
np = BinHitLine(l, 8, p, np)
sita := math.Pi - qsita(l, p, np)
np.Vx = np.Vx * math.Cos(sita) - np.Vy * math.Sin(sita)
np.Vy = np.Vx * math.Sin(sita) + np.Vy * math.Cos(sita)
}
return np, hit
}
func qsita(l Line, p, np Particle) float64 {
lvx := l.X2 - l.X1
lvy := l.Y2 - l.Y1
_, _, lvs := uv(lvx, lvy)
pvx := np.Px - p.Px
pvy := np.Py - p.Py
_, _, pvs := uv(pvx, pvy)
inner := lvx * pvy + lvy * pvx
inner /= lvs
inner /= pvs
return math.Acos(inner)
}
func uv (xs, ys float64) (float64, float64, float64) {
s := math.Sqrt(xs * xs + ys * ys)
return xs / s, ys / s, s
}
func BinHitLine(l Line, time int, p, np Particle) Particle {
dt := np.T - p.T
cp := Renew(0.5 * dt, p)
if 0 < time {
if HitLine(l, p.Px, p.Py, np.Px, np.Py) {
return BinHitLine(l, time - 1, p, cp)
} else {
return BinHitLine(l, time - 1, cp, np)
}
} else {
return cp
}
}
func yukabehit(t, h, v, k float64, p Particle) Particle {
np := Renew(t, p)
if np.Py < h {
np = BinHitYuka(h, 8, p, np)
np.Vy *= -k
} else if v < np.Px {
np = BinHitYuka(v, 8, p, np)
np.Vx *= -k
}
return np
}
func yukahit(t, h, k float64, p Particle) Particle {
np := Renew(t, p)
if np.Py < h {
np = BinHitYuka(h, 4, p, np)
np.Vy *= k
}
return np
}
func BinHitKabe(v float64, time int, p, np Particle) Particle {
dt := np.T - p.T
cp := Renew(0.5 * dt, p)
if 0 < time {
if v < cp.Px {
return BinHitYuka(v, time - 1, p, cp)
} else {
return BinHitYuka(v, time - 1, cp, np)
}
} else {
return cp
}
}
func BinHitYuka(h float64, time int, p, np Particle) Particle {
dt := np.T - p.T
cp := Renew(0.5 * dt, p)
if 0 < time {
if cp.Py < h {
return BinHitYuka(h, time - 1, p, cp)
} else {
return BinHitYuka(h, time - 1, cp, np)
}
} else {
return cp
}
}
func MkLine(x1, y1, x2, y2, k float64) Line {
var p Line
p.X1 = x1
p.Y1 = y1
p.X2 = x2
p.Y2 = y2
p.K = k
return p
}
func MkParticle (px, py, vx, vy, g, t float64) Particle {
var p Particle
p.Px = px
p.Py = py
p.Vx = vx
p.Vy = vy
p.G = g
p.T = t
return p
}
func MkZParticle () Particle {
return MkParticle(0, 0, 0, 0, 9.8, 0)
}
func (this *Particle) ShowPos () {
fmt.Println(this.T, this.Px, this.Py)
}
func (this *Particle) Show () {
fmt.Println(this.T, this.Px, this.Py, this.Vx, this.Vy)
}
func (this *Particle) Showh () {
fmt.Printf("%4.2f, %4.2f, %4.2f, %4.2f, %4.2f\n", this.T, this.Px, this.Py, this.Vx, this.Vy)
}
func Renew(t float64, p Particle) Particle {
res := MkParticle(
p.Px + p.Vx * t,
p.Py + p.Vy * t,
p.Vx,
p.Vy - p.G * t,
p.G, p.T + t)
return res
}
|
[
5
] |
package add
import (
"testing"
)
func assertEquals(t *testing.T, expected int, result int) {
if result != expected {
t.Errorf("Hohohoho I want %d but got %d", expected, result)
}
}
func TestAdd_AddsTwoInts(t *testing.T) {
assertEquals(t, 30, Add(10, 20))
}
func TestAdd_AddsThreeInts(t *testing.T) {
assertEquals(t, 60, Add(10, 20, 30))
}
func TestAdd_AddsTenInts(t *testing.T) {
assertEquals(t, 400, Add(10, 20, 30, 40, 10, 20, 30, 40, 100, 100))
}
|
[
6
] |
package handler
import (
"fgame/fgame/game/charge/charge"
chargetemplate "fgame/fgame/game/charge/template"
"fgame/fgame/game/player/dao"
"fgame/fgame/game/remote/cmd"
cmdpb "fgame/fgame/game/remote/cmd/pb"
"fmt"
log "github.com/Sirupsen/logrus"
"github.com/golang/protobuf/proto"
)
func init() {
cmd.RegisterCmdHandler(cmd.CmdType(cmdpb.CmdType_CMD_CHARGE_TYPE), cmd.CmdHandlerFunc(handleCharge))
}
func handleCharge(msg proto.Message) (err error) {
log.Info("cmd:请求充值")
cmdCharge := msg.(*cmdpb.CmdCharge)
orderId := cmdCharge.GetOrderId()
playerId := cmdCharge.GetPlayerId()
chargeId := cmdCharge.GetChargeId()
money := cmdCharge.GetMoney()
err = orderCharge(orderId, playerId, chargeId, money)
if err != nil {
log.WithFields(
log.Fields{
"orderId": orderId,
"playerId": playerId,
"chargeId": chargeId,
"money": money,
"err": err,
}).Error("cmd:请求充值,错误")
return
}
log.WithFields(
log.Fields{
"orderId": orderId,
"playerId": playerId,
"chargeId": chargeId,
"money": money,
}).Info("cmd:请求充值,成功")
return
}
func orderCharge(orderId string, playerId int64, chargeId int32, money int32) (err error) {
pe, err := dao.GetPlayerDao().QueryById(playerId)
if err != nil {
return
}
if pe == nil {
return cmd.ErrorCodeCommonPlayerNoExist
}
//验证其它参数
chargeTemplate := chargetemplate.GetChargeTemplateService().GetChargeTemplate(chargeId)
if chargeTemplate == nil {
err = cmd.ErrorCodeCommonArgumentInvalid
return
}
flag, err := charge.GetChargeService().Charge(orderId, playerId, chargeId, money)
if err != nil {
return
}
if !flag {
panic(fmt.Errorf("充值应该成功"))
}
return
}
|
[
3,
6
] |
package models
import (
"strings"
"github.com/Devil39/enigma/pkg/entities"
)
//UserModel represents user model struct
type UserModel struct {
UUID string `db:"uid"`
EmailID string `db:"emailid"`
Password string `db:"password"`
SolvedQuestions string `db:"questionssolved"`
HintsUsed string `db:"hintsused"`
}
//ToEntity converts given user model to user entity
func (userModel *UserModel) ToEntity() entities.User {
return entities.User{
UUID: userModel.UUID,
EmailID: userModel.EmailID,
SolvedQuestions: strings.Split(userModel.SolvedQuestions, ","),
HintsUsed: strings.Split(userModel.HintsUsed, ","),
}
}
|
[
3
] |
/**
* @author Luis Gerardo Leon Ortega
Escribir un programa que llene un vector de tamaño n llena con números aleatorios del 0
al 100. El programa debe calcular:
a. La media de los datos
b. La desviación estandar
c. La varianza
*/
package main
import "fmt"
func main() {
/* entradas */
const size = 6
var n [size]int
var media float64
var varianza float64
var deviacionEstandar float64
//Get values
for i := 0; i < size; i++ {
fmt.Scan(&n[i])
media = media + float64(n[i])
}
media /= size
varianza = recursiveSum(n, size-1, media)
varianza /= size - 1
deviacionEstandar = sqrt(varianza)
fmt.Println("La variaza es: ", varianza, " La media es: ", media, " La desviacion estandar es: ", deviacionEstandar)
}
func sqrt(entrada float64) float64 {
var raizCuadrada float64 = entrada / 2
var t float64
for (t - raizCuadrada) != 0 {
t = raizCuadrada
if t == 0 {
raizCuadrada = (t + (entrada / t)) / 2 //make again
}
raizCuadrada = (t + (entrada / t)) / 2
}
return raizCuadrada
}
func alCuadrado(val float64) float64 {
return val * val
}
func recursiveSum(n [6]int, size float64, media float64) float64 {
if size == 0 {
return alCuadrado(float64(n[int(size)]) - media)
}
return alCuadrado(float64(n[int(size)])-media) + recursiveSum(n, size-1, media)
}
/*
QA Reviso: Carlos Chan
Entradas: 1,2,3,4,5,6
Salidas:
La variaza es: 3.5
La media es: 3.5
La desviacion estandar es: 1.8708286933869707
El programa funciona correcto, pero se supone que debia ser para un vector de largo N con valores aleatorios,
no para un vector de largo 6 con valores introducidos por el usuario
*/
|
[
0,
3,
6
] |
package controllers
import (
"audrop-api/models"
"audrop-api/services/dao"
"encoding/json"
"fmt"
"log"
"net/http"
"strconv"
"github.com/gin-gonic/gin"
)
// CreateArtist POST /
func CreateArtistCtrl(c *gin.Context) {
// Let gin do the parsing of the body, by using a binding.
var artist models.Artist
_ = c.BindJSON(&artist)
fmt.Println(artist)
success := dao.CreateArtist(artist) // Inserts the artist to the DB
if !success {
c.JSON(http.StatusCreated, gin.H{"response": "OK"})
return
}
//c.JSON(http.StatusOK, c)
}
// GetArtists Controller
func GetArtistsCtrl(c *gin.Context) {
artists := dao.GetArtists()
//TODO: Look at refactoring the json.Marshal & Unmarshal [DRY]
response, err := json.Marshal(artists)
if err != nil {
panic(err)
}
err = json.Unmarshal(response, &artists)
if err != nil {
panic(err)
}
c.JSON(http.StatusOK, gin.H{"statusCode": http.StatusOK, "response": artists})
}
// GetArtist Controller by artistid
func GetArtistCtrl(c *gin.Context) {
artistId := c.Param("id")
id, err := strconv.Atoi(artistId)
if err != nil {
log.Printf("Error Occur while Converting string %d to %T", id, id)
}
artist := dao.GetArtist(id)
log.Printf("%+v", artist)
//TODO: Look at refactoring the json.Marshal & Unmarshal [DRY]
response, err := json.Marshal(artist)
if err != nil {
panic(err)
}
err = json.Unmarshal(response, &artist)
if err != nil {
panic(err)
}
c.JSON(http.StatusOK, gin.H{"statusCode": http.StatusOK, "response": artist})
}
|
[
3
] |
package main
import "fmt"
func looping() {
//For Looping
for i := 0; i <= 10; i++ {
fmt.Println("Perulangan for ke", i, "Cek")
}
//For looping with variable
var a = 0
for a < 10 {
fmt.Println("Perulangan for dengan Variable ", a)
a++
}
// For loop without initial condition
for {
fmt.Println("Perulangan A", a)
a--
if a == 0 {
break
}
}
}
|
[
3
] |
// http-server
package main
import (
"bytes"
"encoding/json"
"log"
"net/http"
)
func answerSystemsHTTP(url string, m Message) Message {
b, err := json.Marshal(m)
checkError(err, "marshalling")
log.Println(string(b))
req, err := http.NewRequest("POST", url, bytes.NewReader(b))
req.Header.Set("X-Custom-Header", "fromServer")
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
checkError(err, "client")
decoder := json.NewDecoder(resp.Body)
err = decoder.Decode(&m)
checkError(err, "decoding JSON")
defer resp.Body.Close()
return m
}
func handleJSON(rw http.ResponseWriter, request *http.Request) {
decoder := json.NewDecoder(request.Body)
var m Message
err := decoder.Decode(&m)
checkError(err, "decoding JSON")
if m.System == "A" {
fl := findAId(m.OperationID, m.Action)
if fl == 1 {
log.Println("Already in the table!")
m.System = "SRV"
m.OperationID = 0
m.Action = "Already in the table!"
} else if fl == 0 {
insertNewAction(m.OperationID, m.Action)
log.Println("Sending to B...")
m = answerSystemsHTTP("http://localhost:8083/b", m)
} else {
log.Println("Error!")
m.System = "SRV"
m.OperationID = 0
m.Action = "Error!"
}
showAllRows()
}
if m.System == "B" {
fl := updateBId(m.OperationID, m.Action, false)
log.Println("Sending to A...")
if fl == true {
log.Println("Success!")
m.System = "SRV"
m.OperationID = 0
m.Action = "Success!"
} else {
log.Println("Error!")
m.System = "SRV"
m.OperationID = 0
m.Action = "Error!"
}
showAllRows()
}
rw.Header().Set("Content-Type", "application/json")
rw.WriteHeader(http.StatusCreated)
json.NewEncoder(rw).Encode(m)
}
func serveHTTP() {
log.Println("Staring HTTP Server..")
r := http.NewServeMux()
r.HandleFunc("/", handleJSON)
log.Fatal(http.ListenAndServe(":8080", r))
}
|
[
5
] |
package gen_mmo
import (
"errors"
)
type BoundingBox struct {
max *Vector
min *Vector
}
func NewBoundingBox(min, max *Vector) *BoundingBox {
b := new(BoundingBox)
b.min = min
b.max = max
return b
}
func NewBoundingBoxFromPoints(points ...*Vector) (*BoundingBox, error) {
if points == nil {
return nil, errors.New("the points can not be nil!")
}
if len(points) <= 0 {
return nil, errors.New("the points' len can not be zero!")
}
min := points[0]
max := points[1]
for _, point := range points {
min = VMin(min, point)
max = VMax(max, point)
}
return NewBoundingBox(min, max), nil
}
func (b *BoundingBox) Max() *Vector {
return b.max
}
func (b *BoundingBox) Min() *Vector {
return b.min
}
func (b *BoundingBox) SetMax(max *Vector) {
b.max = max
}
func (b *BoundingBox) SetMin(min *Vector) {
b.min = min
}
func (b *BoundingBox) Size() *Vector {
return VSubtract(b.Max(), b.Min())
}
func (b *BoundingBox) Contains(point *Vector) bool {
return (point.X() < b.Min().X() || point.X() > b.Max().X() ||
point.Y() < b.Min().Y() || point.Y() > b.Max().Y() ||
point.Z() < b.Min().Z() || point.Z() > b.Max().Z()) == false
}
func (b *BoundingBox) Contains2d(point *Vector) bool {
return (point.X() < b.Min().X() || point.X() > b.Max().X() ||
point.Y() < b.Min().Y() || point.Y() > b.Max().Y()) == false
}
func (b *BoundingBox) IntersectWith(other *BoundingBox) *BoundingBox {
return NewBoundingBox(VMax(b.Min(), other.Min()), VMin(b.Max(), other.Max()))
}
func (b *BoundingBox) UnionWith(other *BoundingBox) *BoundingBox {
return NewBoundingBox(VMin(b.Min(), other.Min()), VMax(b.Max(), other.Max()))
}
func (b *BoundingBox) IsValid() bool {
return (b.Max().X() < b.Min().X() || b.Max().Y() < b.Min().Y() || b.Max().Z() < b.Min().Z()) == false
}
|
[
1
] |
/*
Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spi
import (
"encoding/base64"
"fmt"
"strconv"
"strings"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/gardener/machine-controller-manager/pkg/util/provider/machinecodes/codes"
"github.com/gardener/machine-controller-manager/pkg/util/provider/machinecodes/status"
corev1 "k8s.io/api/core/v1"
api "github.com/gardener/machine-controller-manager-provider-alicloud/pkg/alicloud/apis"
)
const (
// AlicloudAccessKeyID is a constant for a key name that is part of the Alibaba cloud credentials.
AlicloudAccessKeyID string = "alicloudAccessKeyID"
// AlicloudAccessKeySecret is a constant for a key name that is part of the Alibaba cloud credentials.
AlicloudAccessKeySecret string = "alicloudAccessKeySecret"
// AlicloudAlternativeAccessKeyID is a constant for a key name of a secret containing the Alibaba cloud
// credentials (access key id).
AlicloudAlternativeAccessKeyID = "accessKeyID"
// AlicloudAlternativeAccessKeySecret is a constant for a key name of a secret containing the Alibaba cloud
// credentials (access key secret).
AlicloudAlternativeAccessKeySecret = "accessKeySecret"
// AlicloudUserData is a constant for user data
AlicloudUserData string = "userData"
// AlicloudDriverName is the name of the CSI driver for Alibaba Cloud
AlicloudDriverName = "diskplugin.csi.alibabacloud.com"
)
// ECSClient provides an interface
type ECSClient interface {
RunInstances(request *ecs.RunInstancesRequest) (*ecs.RunInstancesResponse, error)
DescribeInstances(request *ecs.DescribeInstancesRequest) (*ecs.DescribeInstancesResponse, error)
DeleteInstance(request *ecs.DeleteInstanceRequest) (*ecs.DeleteInstanceResponse, error)
}
// PluginSPI provides an interface to deal with cloud provider session
// You can optionally enhance this interface to add interface methods here
// You can use it to mock cloud provider calls
type PluginSPI interface {
NewECSClient(secret *corev1.Secret, region string) (ECSClient, error)
NewRunInstancesRequest(providerSpec *api.ProviderSpec, machineName string, userData []byte) (*ecs.RunInstancesRequest, error)
NewDescribeInstancesRequest(machineName, instanceID string, tags map[string]string) (*ecs.DescribeInstancesRequest, error)
NewDeleteInstanceRequest(instanceID string, force bool) (*ecs.DeleteInstanceRequest, error)
NewInstanceDataDisks(disks []api.AlicloudDataDisk, machineName string) []ecs.RunInstancesDataDisk
NewRunInstanceTags(tags map[string]string) ([]ecs.RunInstancesTag, error)
}
// PluginSPIImpl is the real implementation of SPI interface that makes the calls to the provider SDK.
type PluginSPIImpl struct{}
// NewECSClient returns a new instance of the ECS client.
func (pluginSPI *PluginSPIImpl) NewECSClient(secret *corev1.Secret, region string) (ECSClient, error) {
accessKeyID := extractCredentialsFromData(secret.Data, AlicloudAccessKeyID, AlicloudAlternativeAccessKeyID)
accessKeySecret := extractCredentialsFromData(secret.Data, AlicloudAccessKeySecret, AlicloudAlternativeAccessKeySecret)
ecsClient, err := ecs.NewClientWithAccessKey(region, accessKeyID, accessKeySecret)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return ecsClient, err
}
// NewRunInstancesRequest returns a new request of run instance.
func (pluginSPI *PluginSPIImpl) NewRunInstancesRequest(providerSpec *api.ProviderSpec, machineName string, userData []byte) (*ecs.RunInstancesRequest, error) {
request := ecs.CreateRunInstancesRequest()
request.ImageId = providerSpec.ImageID
request.InstanceType = providerSpec.InstanceType
request.RegionId = providerSpec.Region
request.ZoneId = providerSpec.ZoneID
request.SecurityGroupId = providerSpec.SecurityGroupID
request.VSwitchId = providerSpec.VSwitchID
request.PrivateIpAddress = providerSpec.PrivateIPAddress
request.InstanceChargeType = providerSpec.InstanceChargeType
request.InternetChargeType = providerSpec.InternetChargeType
request.SpotStrategy = providerSpec.SpotStrategy
request.IoOptimized = providerSpec.IoOptimized
request.KeyPairName = providerSpec.KeyPairName
if providerSpec.InternetMaxBandwidthIn != nil {
request.InternetMaxBandwidthIn = requests.NewInteger(int(*providerSpec.InternetMaxBandwidthIn))
}
if providerSpec.InternetMaxBandwidthOut != nil {
request.InternetMaxBandwidthOut = requests.NewInteger(int(*providerSpec.InternetMaxBandwidthOut))
}
if providerSpec.DataDisks != nil && len(providerSpec.DataDisks) > 0 {
dataDisks := pluginSPI.NewInstanceDataDisks(providerSpec.DataDisks, machineName)
request.DataDisk = &dataDisks
}
if providerSpec.SystemDisk != nil {
request.SystemDiskCategory = providerSpec.SystemDisk.Category
request.SystemDiskSize = fmt.Sprintf("%d", providerSpec.SystemDisk.Size)
}
tags, err := pluginSPI.NewRunInstanceTags(providerSpec.Tags)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
request.Tag = &tags
request.InstanceName = machineName
request.ClientToken = utils.GetUUIDV4()
request.UserData = base64.StdEncoding.EncodeToString(userData)
return request, nil
}
// NewDescribeInstancesRequest returns a new request of describe instance.
func (pluginSPI *PluginSPIImpl) NewDescribeInstancesRequest(machineName, instanceID string, tags map[string]string) (*ecs.DescribeInstancesRequest, error) {
request := ecs.CreateDescribeInstancesRequest()
if instanceID != "" {
request.InstanceIds = "[\"" + instanceID + "\"]"
} else if machineName != "" {
request.InstanceName = machineName
} else {
searchFilters := make(map[string]string)
for k, v := range tags {
if strings.Contains(k, "kubernetes.io/cluster/") || strings.Contains(k, "kubernetes.io/role/") {
searchFilters[k] = v
}
}
if len(searchFilters) < 2 {
return nil, fmt.Errorf("Can't find VMs with none of machineID/Tag[kubernetes.io/cluster/*]/Tag[kubernetes.io/role/*]")
}
var tags []ecs.DescribeInstancesTag
for k, v := range searchFilters {
tags = append(tags, ecs.DescribeInstancesTag{
Key: k,
Value: v,
})
}
request.Tag = &tags
}
return request, nil
}
// NewDeleteInstanceRequest returns a new request of delete instance.
func (pluginSPI *PluginSPIImpl) NewDeleteInstanceRequest(instanceID string, force bool) (*ecs.DeleteInstanceRequest, error) {
request := ecs.CreateDeleteInstanceRequest()
request.InstanceId = instanceID
request.Force = requests.NewBoolean(force)
return request, nil
}
// NewInstanceDataDisks returns instances data disks.
func (pluginSPI *PluginSPIImpl) NewInstanceDataDisks(disks []api.AlicloudDataDisk, machineName string) []ecs.RunInstancesDataDisk {
var instanceDataDisks []ecs.RunInstancesDataDisk
for _, disk := range disks {
instanceDataDisk := ecs.RunInstancesDataDisk{
Category: disk.Category,
Encrypted: strconv.FormatBool(disk.Encrypted),
DiskName: fmt.Sprintf("%s-%s-data-disk", machineName, disk.Name),
Description: disk.Description,
Size: fmt.Sprintf("%d", disk.Size),
}
if disk.DeleteWithInstance != nil {
instanceDataDisk.DeleteWithInstance = strconv.FormatBool(*disk.DeleteWithInstance)
} else {
instanceDataDisk.DeleteWithInstance = strconv.FormatBool(true)
}
if disk.Category == "DiskEphemeralSSD" {
instanceDataDisk.DeleteWithInstance = ""
}
instanceDataDisks = append(instanceDataDisks, instanceDataDisk)
}
return instanceDataDisks
}
// NewRunInstanceTags returns tags of Running Instances.
func (pluginSPI *PluginSPIImpl) NewRunInstanceTags(tags map[string]string) ([]ecs.RunInstancesTag, error) {
runInstancesTags := make([]ecs.RunInstancesTag, 0, 2)
hasCluster, hasRole := false, false
for k, v := range tags {
if strings.Contains(k, "kubernetes.io/cluster/") {
hasCluster = true
} else if strings.Contains(k, "kubernetes.io/role/") {
hasRole = true
}
runInstancesTags = append(runInstancesTags, ecs.RunInstancesTag{Key: k, Value: v})
}
if !hasCluster || !hasRole {
err := fmt.Errorf("Tags should at least contains 2 keys, which are prefixed with kubernetes.io/cluster and kubernetes.io/role")
return nil, err
}
return runInstancesTags, nil
}
// extractCredentialsFromData extracts and trims a value from the given data map. The first key that exists is being
// returned, otherwise, the next key is tried, etc. If no key exists then an empty string is returned.
func extractCredentialsFromData(data map[string][]byte, keys ...string) string {
for _, key := range keys {
if val, ok := data[key]; ok {
return strings.TrimSpace(string(val))
}
}
return ""
}
|
[
5
] |
package main
import (
"bytes"
"fmt"
"net/http"
"os"
"sync"
"time"
"github.com/cube2222/Blog/NATS/MasterWorker"
"github.com/golang/protobuf/proto"
"github.com/nats-io/nats"
"github.com/satori/go.uuid"
)
var Tasks []Transport.Task
var TaskMutex sync.Mutex
var oldestFinishedTaskPointer int
var nc *nats.Conn
func main() {
if len(os.Args) != 2 {
fmt.Println("Wrong number of arguments. Need NATS server address.")
return
}
var err error
nc, err = nats.Connect(os.Args[1])
if err != nil {
fmt.Println(err)
}
Tasks = make([]Transport.Task, 0, 20)
TaskMutex = sync.Mutex{}
oldestFinishedTaskPointer = 0
initTestTasks()
nc.Subscribe("Work.TaskToDo", func(m *nats.Msg) {
myTaskPointer, ok := getNextTask()
if ok {
data, err := proto.Marshal(myTaskPointer)
if err == nil {
nc.Publish(m.Reply, data)
}
}
})
nc.Subscribe("Work.TaskFinished", func(m *nats.Msg) {
myTask := Transport.Task{}
err := proto.Unmarshal(m.Data, &myTask)
if err == nil {
TaskMutex.Lock()
Tasks[myTask.Id].State = 2
Tasks[myTask.Id].Finisheduuid = myTask.Finisheduuid
TaskMutex.Unlock()
}
})
select {}
}
func getNextTask() (*Transport.Task, bool) {
TaskMutex.Lock()
defer TaskMutex.Unlock()
for i := oldestFinishedTaskPointer; i < len(Tasks); i++ {
if i == oldestFinishedTaskPointer && Tasks[i].State == 2 {
oldestFinishedTaskPointer++
} else {
if Tasks[i].State == 0 {
Tasks[i].State = 1
go resetTaskIfNotFinished(i)
return &Tasks[i], true
}
}
}
return nil, false
}
func resetTaskIfNotFinished(i int) {
time.Sleep(2 * time.Minute)
TaskMutex.Lock()
if Tasks[i].State != 2 {
Tasks[i].State = 0
}
}
func initTestTasks() {
for i := 0; i < 20; i++ {
newTask := Transport.Task{Uuid: uuid.NewV4().String(), State: 0}
fileServerAddressTransport := Transport.DiscoverableServiceTransport{}
msg, err := nc.Request("Discovery.FileServer", nil, 1000*time.Millisecond)
if err == nil && msg != nil {
err := proto.Unmarshal(msg.Data, &fileServerAddressTransport)
if err != nil {
continue
}
}
if err != nil {
continue
}
fileServerAddress := fileServerAddressTransport.Address
data := make([]byte, 0, 1024)
buf := bytes.NewBuffer(data)
fmt.Fprint(buf, "get,my,data,my,get,get,have")
r, err := http.Post(fileServerAddress+"/"+newTask.Uuid, "", buf)
if err != nil || r.StatusCode != http.StatusOK {
continue
}
newTask.Id = int32(len(Tasks))
Tasks = append(Tasks, newTask)
}
}
|
[
4
] |
package cmd
import (
"context"
"fmt"
"github.com/spf13/cobra"
"github.com/wenwenxiong/go-kubesphere/kubesphere"
)
func NewAppCommand(ctx context.Context) *cobra.Command {
var command = &cobra.Command{
Use: "app [OPTIONS] [COMMANDS]",
Short: "op for app .",
Long: `get update app in kubesphere appstore.`,
Run: func(cmd *cobra.Command, args []string) {
},
}
command.AddCommand(NewAppGetCommand(ctx),
NewAppUpdateCommand(ctx))
return command
}
func NewAppGetCommand(ctx context.Context) *cobra.Command {
var appName string
var command = &cobra.Command{
Use: "get [OPTIONS] ",
Short: "get app ",
Long: `get app in kubesphere store`,
Run: func(cmd *cobra.Command, args []string) {
accessToke := GetAccessToken(apigateway)
client := GetClient(apigateway)
var c *kubesphere.App
if appName != "" {
c = &kubesphere.App{
Name: String(appName),
}
}
r, _, err :=client.Openpitrixs.GetApp(ctx, c, accessToke)
if _, ok := err.(*kubesphere.TwoFactorAuthError); ok {
fmt.Print("\nGitHub OTP: ")
}
if err != nil {
fmt.Printf("\nerror: %v\n", err)
return
}
if *r.TotalCount > 0 {
fmt.Printf("get app result total: \n%v\n", *r.TotalCount)
for i, v := range r.Items {
fmt.Printf("\tget app at %d result:\n", i+1)
fmt.Printf("\t\tapp name: %s\n", *v.Name)
fmt.Printf("\t\tapp id: %s\n", *v.AppId)
//fmt.Printf("\t\tapp category id: %s\n", *v.CategoryId)
}
}else {
fmt.Printf("get app result: \n%v\n", *r.TotalCount)
}
},
}
command.Flags().StringVarP(&appName, "appName", "n", "", "app name")
return command
}
func NewAppUpdateCommand(ctx context.Context) *cobra.Command {
var appId string
var appNewName string
var appNewDescription string
var appNewCategoryId string
var command = &cobra.Command{
Use: "update [OPTIONS] ",
Short: "update app ",
Long: `update app in kubesphere store`,
Run: func(cmd *cobra.Command, args []string) {
accessToke := GetAccessToken(apigateway)
client := GetClient(apigateway)
c := &kubesphere.App{
AppId: String(appId),
}
if appNewName != "" {
c.SetName(&appNewName)
}
if appNewDescription != "" {
c.SetDescription(&appNewDescription)
}
if appNewCategoryId != "" {
c.SetCategoryId(&appNewCategoryId)
}
r, _, err :=client.Openpitrixs.UpdateApp(ctx, c, accessToke)
if _, ok := err.(*kubesphere.TwoFactorAuthError); ok {
fmt.Print("\nkubesphere OTP: ")
}
if err != nil {
fmt.Printf("\nerror: %v\n", err)
return
}
if r != nil {
fmt.Printf("update app id: %s, result %s\n", appId, *r.Message)
}
},
}
command.Flags().StringVarP(&appId, "appId", "i", "", "app id")
command.MarkFlagRequired("appId")
command.Flags().StringVarP(&appNewName, "appNewName", "n", "", "app new name")
command.Flags().StringVarP(&appNewDescription, "appNewDescription", "d", "", "app new description")
command.Flags().StringVarP(&appNewCategoryId, "appNewCategoryId", "c", "", "app new category Id")
return command
}
|
[
3
] |
package sls
import (
"github.com/hdksky/aliyungo/common"
"testing"
"github.com/golang/protobuf/proto"
"time"
)
const (
AccessKeyId = ""
AccessKeySecret = ""
Region = common.Hangzhou
TestProjectName = "test-project123"
TestLogstoreName = "test-logstore"
)
func DefaultProject(t *testing.T) *Project {
client := NewClient(Region, false, AccessKeyId, AccessKeySecret)
err := client.CreateProject(TestProjectName, "description")
if err != nil {
if e, ok := err.(*Error); ok && e.Code != "ProjectAlreadyExist" {
t.Fatalf("create project fail: %s", err.Error())
}
}
p, err := client.Project(TestProjectName)
if err != nil {
t.Fatalf("get project fail: %s", err.Error())
}
//Create default logstore
logstore := &Logstore{
TTL: 2,
Shard: 3,
Name: TestLogstoreName,
}
err = p.CreateLogstore(logstore)
if err != nil {
if e, ok := err.(*Error); ok && e.Code != "LogStoreAlreadyExist" {
t.Fatalf("create logstore fail: %s", err.Error())
}
}
return p
}
func TestClient_PutLogs(t *testing.T) {
region := common.Beijing
project := "testych"
logStore := "test1"
client := NewClient(region, false, AccessKeyId, AccessKeySecret)
contents := []*Log_Content{}
key := "log1"
value := "value1"
contents = append( contents, &Log_Content{
Key: &key,
Value: &value,
})
key2 := "log2"
value2 := "value2"
contents = append( contents, &Log_Content{
Key: &key2,
Value: &value2,
})
logs := []*Log{}
logs = append(logs, &Log{
Time: proto.Uint32(uint32(time.Now().Unix())),
Contents: contents,
})
request := &PutLogsRequest{
Project : project,
LogStore: logStore,
LogItems : LogGroup{
Logs: logs,
},
}
err:=client.PutLogs( request )
if err!= nil {
t.Errorf( "get the error %v", err )
}
}
|
[
3
] |
package merger
import (
"reflect"
"time"
)
func tryMergeAll(dstval reflect.Value, srcval reflect.Value, tc TypeConverters) bool {
if tryMergeTimeString(dstval, srcval) {
return true
}
if tryMergeNumeric(dstval, srcval) {
return true
}
// if tc != nil {
if tc.TrySet(dstval, srcval) {
return true
}
// }
return false
}
func tryMergeTimeString(dstval reflect.Value, srcval reflect.Value) bool {
// not matching types
srcval = getRealValue(srcval)
// try to convert
if srcval.Kind() == reflect.String && dstval.Kind() == reflect.Struct {
// maybe it's time
vvv := dstval.Interface()
if _, ok := vvv.(time.Time); ok {
t0 := time.Now()
t1 := &t0
if t1.UnmarshalJSON([]byte("\""+srcval.String()+"\"")) == nil {
// set
t0 = *t1
newsrc := reflect.ValueOf(t0)
dstval.Set(newsrc)
return true
}
}
}
return false
}
func tryMergeNumeric(dstval reflect.Value, srcval reflect.Value) bool {
// not matching types
srcval = getRealValue(srcval)
switch dstval.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return _mNumericInt(dstval, srcval)
//TODO: floats, uints
}
return false
}
func _mNumericInt(dstval reflect.Value, srcval reflect.Value) bool {
switch srcval.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
ii0 := srcval.Int()
dstval.SetInt(ii0)
return true
case reflect.Float32, reflect.Float64:
ff0 := srcval.Float()
ii0 := int64(ff0)
dstval.SetInt(ii0)
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
ui0 := srcval.Uint()
ii0 := int64(ui0)
dstval.SetInt(ii0)
return true
}
return false
}
func getRealValue(v reflect.Value) reflect.Value {
n := 25
kk := v.Kind()
for (kk == reflect.Interface || kk == reflect.Ptr) && n > 0 {
v = v.Elem()
kk = v.Kind()
n--
}
return v
}
|
[
6
] |
// This file implements a basic linear pattern infill.
package clip
import (
"GoSlice/data"
"fmt"
clipper "github.com/aligator/go.clipper"
)
// linear provides an infill which consists of simple parallel lines.
// The direction of the lines is switching for each layer by 90°..
type linear struct {
verticalPaths clipper.Paths
horizontalPaths clipper.Paths
lineWidth data.Micrometer
}
// NewLinearPattern provides a simple linear infill pattern consisting of simple parallel lines.
// The direction of the lines is switching for each layer by 90°.
func NewLinearPattern(min data.MicroPoint, max data.MicroPoint, lineWidth data.Micrometer) Pattern {
verticalLines := clipper.Paths{}
numLine := 0
// generate the verticalLines
for x := min.X(); x <= max.X(); x += lineWidth {
verticalLines = append(verticalLines, clipper.Path{
&clipper.IntPoint{
X: clipper.CInt(x),
Y: clipper.CInt(max.Y()),
},
&clipper.IntPoint{
X: clipper.CInt(x),
Y: clipper.CInt(min.Y()),
},
})
numLine++
}
horizontalLines := clipper.Paths{}
numLine = 0
// generate the horizontalLines
for y := min.Y(); y <= max.Y(); y += lineWidth {
horizontalLines = append(horizontalLines, clipper.Path{
&clipper.IntPoint{
X: clipper.CInt(max.X()),
Y: clipper.CInt(y),
},
&clipper.IntPoint{
X: clipper.CInt(min.X()),
Y: clipper.CInt(y),
},
})
numLine++
}
return linear{
verticalPaths: verticalLines,
horizontalPaths: horizontalLines,
lineWidth: lineWidth,
}
}
// Fill implements the Pattern interface by using simple linear lines as infill.
func (p linear) Fill(layerNr int, part data.LayerPart) data.Paths {
resultInfill := p.getInfill(layerNr, clipperPath(part.Outline()), clipperPaths(part.Holes()), 0)
return p.sortInfill(microPaths(resultInfill, false))
}
// sortInfill optimizes the order of the infill lines.
func (p linear) sortInfill(unsorted data.Paths) data.Paths {
if len(unsorted) == 0 {
return unsorted
}
// Save all sorted paths here.
sorted := data.Paths{unsorted[0]}
// Saves already used indices.
isUsed := make([]bool, len(unsorted))
isUsed[0] = true
// Saves the last path to know where to continue.
lastindex := 0
// Save if the first or second point from the lastPath was the last point.
lastPoint := 0
for len(sorted) < len(unsorted) {
point := unsorted[lastindex][lastPoint]
bestIndex := -1
bestDiff := data.Micrometer(-1)
// get the line with the nearest point (of the same side)
for i, line := range unsorted {
if isUsed[i] {
continue
}
point2 := line[lastPoint]
differenceVec := point.Sub(point2)
if bestDiff == -1 || differenceVec.ShorterThanOrEqual(bestDiff) {
bestIndex = i
bestDiff = differenceVec.Size()
continue
}
}
if bestIndex > -1 {
lastindex = bestIndex
sorted = append(sorted, unsorted[lastindex])
isUsed[bestIndex] = true
lastPoint = 1 - lastPoint
} else {
sorted = append(sorted, unsorted[lastindex])
isUsed[lastindex] = true
}
if lastPoint == 1 {
sorted[len(sorted)-1] = []data.MicroPoint{
sorted[len(sorted)-1][1],
sorted[len(sorted)-1][0],
}
}
}
if len(sorted) < len(unsorted) {
panic("the sorted lines should have the same amount as the unsorted lines")
}
return sorted
}
// getInfill fills a polygon (with holes)
func (p linear) getInfill(layerNr int, outline clipper.Path, holes clipper.Paths, overlap float32) clipper.Paths {
var result clipper.Paths
// clip the paths with the lines using intersection
exset := clipper.Paths{outline}
co := clipper.NewClipperOffset()
cl := clipper.NewClipper(clipper.IoNone)
// generate the ex-set for the overlap (only if needed)
if overlap != 0 {
co.AddPaths(exset, clipper.JtSquare, clipper.EtClosedPolygon)
co.MiterLimit = 2
exset = co.Execute(float64(-overlap))
co.Clear()
co.AddPaths(holes, clipper.JtSquare, clipper.EtClosedPolygon)
co.MiterLimit = 2
holes = co.Execute(float64(overlap))
}
// clip the lines by the outline and holes
cl.AddPaths(exset, clipper.PtClip, true)
cl.AddPaths(holes, clipper.PtClip, true)
if layerNr%2 == 0 {
cl.AddPaths(p.verticalPaths, clipper.PtSubject, false)
} else {
cl.AddPaths(p.horizontalPaths, clipper.PtSubject, false)
}
tree, ok := cl.Execute2(clipper.CtIntersection, clipper.PftEvenOdd, clipper.PftEvenOdd)
if !ok {
fmt.Println("getLinearFill failed")
return nil
}
for _, c := range tree.Childs() {
result = append(result, c.Contour())
}
return result
}
|
[
1,
6
] |
package kluge
import (
"image"
"math"
"math/rand"
"github.com/fogleman/gg"
)
// Point represents a point, speciying the coordinate
// and the RGB color at the same position
type Point struct {
X int
Y int
R uint8
G uint8
B uint8
}
func newPoint(x, y int, r, g, b uint8) *Point {
return &Point{
X: x,
Y: y,
R: r,
G: g,
B: b,
}
}
func randFloats(min, max float64) float64 {
return min + rand.Float64()*(max-min)
}
// GeneratePoints run through the image creating random points
func GeneratePoints(img image.Image, threshold float64) []*Point {
points := make([]*Point, 0)
for x := 0; x < img.Bounds().Max.X; x++ {
for y := 0; y < img.Bounds().Max.Y; y++ {
r, g, b, _ := img.At(x, y).RGBA()
if uint8(r) <= 250 && uint8(g) <= 250 && uint8(b) <= 250 {
val := randFloats(0, 100)
if val < threshold {
points = append(points, newPoint(x, y, uint8(r), uint8(g), uint8(b)))
}
}
}
}
return points
}
// BuildLines create a new image and draw lines between points with
// a minimum distance
func BuildLines(w, h int, points []*Point, minDist float64) *gg.Context {
// create empty context
ctx := gg.NewContext(w, h)
// iterate over points and draw a line if they
// have a mininum distance
for i, p := range points {
for j := 0; j < i; j++ {
if math.Hypot(float64(p.X-points[j].X), float64(p.Y-points[j].Y)) < minDist {
ctx.MoveTo(float64(p.X), float64(p.Y))
ctx.LineTo(float64(points[j].X), float64(points[j].Y))
// ctx.SetLineWidth(0.65)
ctx.SetRGB255(int(points[j].R), int(points[j].G), int(points[j].B))
ctx.Stroke()
}
}
}
return ctx
}
|
[
1
] |
package controllers
import (
"fmt"
"net/http"
"strconv"
"github.com/stevobengtson/budgetme/models"
"github.com/stevobengtson/budgetme/repository"
"github.com/gin-gonic/gin"
)
//GetUsers ... Get all users
func GetUsers(c *gin.Context) {
var users []models.User
pagination := models.GeneratePaginationFromRequest(c)
err := repository.GetAllUsersPaged(&users, &pagination)
if err != nil {
c.AbortWithStatus(http.StatusNotFound)
} else {
c.JSON(http.StatusOK, users)
}
}
//CreateUser ... Create User
func CreateUser(c *gin.Context) {
var user models.User
c.BindJSON(&user)
err := repository.CreateUser(&user)
if err != nil {
c.AbortWithStatus(http.StatusNotFound)
} else {
c.JSON(http.StatusOK, user)
}
}
//GetUserByID ... Get the user by id
func GetUserByID(c *gin.Context) {
user, err := getUserFromParams(c)
if err != nil {
c.AbortWithStatus(http.StatusNotFound)
} else {
c.JSON(http.StatusOK, user)
}
}
//UpdateUser ... Update the user information
func UpdateUser(c *gin.Context) {
user, err := getUserFromParams(c)
if err != nil {
c.AbortWithStatus(http.StatusNotFound)
return
}
if currentUser := c.Value("currentUser").(models.User); currentUser.ID != user.ID {
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "unable to update other users"})
return
}
c.BindJSON(&user)
err = repository.UpdateUser(&user, fmt.Sprint(user.ID))
if err != nil {
c.AbortWithStatus(http.StatusNotFound)
} else {
c.JSON(http.StatusOK, user)
}
}
//DeleteUser ... Delete the user
func DeleteUser(c *gin.Context) {
user, err := getUserFromParams(c)
if err != nil {
c.AbortWithStatus(http.StatusNotFound)
return
}
if currentUser := c.Value("currentUser").(models.User); currentUser.ID != user.ID {
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "unable to delete other users"})
return
}
err = repository.DeleteUser(&user, fmt.Sprint(user.ID))
if err != nil {
c.AbortWithStatus(http.StatusNotFound)
} else {
c.JSON(http.StatusOK, gin.H{"result": "user is deleted"})
}
}
func getUserFromParams(c *gin.Context) (models.User, error) {
var user models.User
var userId uint64
var err error
userId, err = strconv.ParseUint(c.Params.ByName("id"), 10, 64)
if err != nil {
return user, err
}
err = repository.GetUserByID(&user, uint(userId))
if err != nil {
return user, err
}
return user, nil
}
|
[
3
] |
// LLAMA Scraper pulls stats from Collectors and then writes them to the indicated database.
package llama
import (
"errors"
"fmt"
influxdb_client "github.com/influxdata/influxdb1-client/v2"
"log"
"sync"
"time"
)
// Set default timeout for writes to 5 seconds
// This may be worth adding as a parameter in the future
const DefaultTimeout = time.Second * 5
// NewInfluxDbWriter provides a client for writing LLAMA datapoints to InfluxDB
func NewInfluxDbWriter(host string, port string, user string, pass string, db string) (*InfluxDbWriter, error) {
// Create the InfluxDB writer
url := fmt.Sprintf("http://%v:%v", host, port)
log.Println("Creating InfluxDB writer for", url)
ifdbc, err := influxdb_client.NewHTTPClient(influxdb_client.HTTPConfig{
Addr: url,
Username: user,
Password: pass,
Timeout: DefaultTimeout,
})
if err != nil {
return &InfluxDbWriter{}, err
}
writer := &InfluxDbWriter{
client: ifdbc,
db: db,
}
return writer, nil
}
// InfluxDbWriter is used for writing datapoints to an InfluxDB instance
type InfluxDbWriter struct {
client influxdb_client.Client
db string
}
// Close will close the InfluxDB client connection and release any associated resources
func (w *InfluxDbWriter) Close() error {
log.Println("Closing InfluxDB client connection")
return w.client.Close()
}
// Write will commit the batched points to the database
func (w *InfluxDbWriter) Write(batch influxdb_client.BatchPoints) error {
// Write to the DB
start := time.Now()
err := w.client.Write(batch)
elapsed := time.Since(start).Seconds()
if err != nil {
log.Println("DB write failed after:", elapsed, "seconds")
return err
}
// Only track write delay for successes
log.Println("DB write completed in:", elapsed, "seconds")
// TODO(dmar): Log rate of `db_write_delay`
return nil
}
// Batch will group the points into a batch for writing to the database
func (w *InfluxDbWriter) Batch(points Points) (influxdb_client.BatchPoints, error) {
// Create batch
bp, err := influxdb_client.NewBatchPoints(influxdb_client.BatchPointsConfig{
Database: w.db,
Precision: "s", // Write as second precision to improve perf, since we don't need it more granular
})
if err != nil {
return nil, err
}
// Convert all points and add to batch
for _, dp := range points {
// This is needed because the the InfluxDB client expects the interface type
// More casting weirdness :/
// TODO(dmar): Reevaluate this in the future, since this was likely a mistake
// initially and isn't necessary.
newFields := make(map[string]interface{})
for key, value := range dp.Fields {
newFields[key] = float64(value)
}
pt, err := influxdb_client.NewPoint(
dp.Measurement,
dp.Tags,
newFields,
dp.Time,
)
if err != nil {
return nil, err
}
bp.AddPoint(pt)
}
return bp, nil
}
// BatchWrite will group and write the indicates points to the associated InfluxDB host
func (w *InfluxDbWriter) BatchWrite(points Points) error {
batch, err := w.Batch(points)
if err != nil {
return errors.New(fmt.Sprintln("Failed to create batch from points:", err))
}
err = w.Write(batch)
if err != nil {
return errors.New(fmt.Sprintln("Failed to write batch:", err))
}
return nil
}
// Scraper pulls stats from collectors and writes them to a backend
type Scraper struct {
writer *InfluxDbWriter
collectors []Client
port string
}
// NewScraper creates and initializes a means of collecting stats and writing them to a database
func NewScraper(collectors []string, cPort string, dbHost string, dbPort string, dbUser string, dbPass string, dbName string) (*Scraper, error) {
var clients []Client
for _, collector := range collectors {
c := NewClient(collector, cPort)
clients = append(clients, c)
}
w, err := NewInfluxDbWriter(dbHost, dbPort, dbUser, dbPass, dbName)
if err != nil {
return &Scraper{}, err
}
s := &Scraper{
writer: w,
collectors: clients,
port: cPort,
}
return s, nil
}
// Run performs collections for all assocated collectors
func (s *Scraper) Run() {
log.Println("Collection cycle starting")
// Make sure we don't leave DB connections hanging open
defer s.writer.Close()
var wg sync.WaitGroup
// For each collector
for _, collector := range s.collectors {
wg.Add(1)
go func(c Client) {
defer wg.Done()
err := s.run(c)
HandleMinorError(err)
}(collector)
}
wg.Wait()
log.Println("Collection cycle complete")
}
func (s *Scraper) run(collector Client) error {
log.Println(collector.Hostname(), "- Collection cycle started")
// Pull stats
points, err := collector.GetPoints()
numPoints := float64(len(points))
if err != nil {
log.Println(collector.Hostname(), "- Collection failed:", err)
// TODO(dmar): Log rate of `failed_collections`
return err
}
log.Println(collector.Hostname(), "- Pulled datapoints:", numPoints)
// TODO(dmar): Log rate of `pulled_points`
// Write them to the client
err = s.writer.BatchWrite(points)
if err != nil {
log.Println(collector.Hostname(), "- Collection failed:", err)
// TODO(dmar): Log rate of `failed_collections`
return err
}
log.Println(collector.Hostname(), "- Wrote datapoints")
// TODO(dmar): Log rate of `written_points`
log.Println(collector.Hostname(), "- Collection cycle completed")
// TODO(dmar): Log rate of `successful_collections`
return nil
}
|
[
6
] |
package main
import (
"net/http"
"log"
"os/exec"
"strings"
"regexp"
"strconv"
"math"
"html/template"
simplejson "github.com/bitly/go-simplejson"
"github.com/chrisport/slotprovider"
"sync"
"encoding/json"
"os"
"io/ioutil"
"time"
)
//TODO proper project setup, modular separation
var (
//TODO use file templates
bootstrap = `<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" crossorigin="anonymous">
<style> body {margin: 1em;}</style>`
inProgressMessage = "<html><head>" + bootstrap + "<meta http-equiv=\"refresh\" content=\"2;\"></head><body>recognition in progress... you will be redirected automatically</body></html>"
resultPage = `<html><head>` + bootstrap + `</head><body>
{{.ErrorMessage}}<h1>{{if .Artist}}{{.Artist}} - {{.TrackName}}{{end}}<h1>
<button type="submit" class="btn btn-primary" onClick="window.history.go(-1); return false;">Search more</button>
</body></html>`
hourRegex = regexp.MustCompile("([0-9]+)h")
minuteRegex = regexp.MustCompile("([0-9]+)m")
secondRegex = regexp.MustCompile("([0-9]+)s")
cleanUrlRegex = regexp.MustCompile("(.*)\\?")
notRecognisedMessage = "Track could not be recognised."
)
func main() {
loadCacheFromDisc()
go func() {
for {
time.Sleep(20 * time.Second)
saveCacheToDisc()
}
}()
Serve()
}
type Result struct {
ErrorMessage string `json:"error,omitempty"`
Artist string `json:"artist,omitempty"`
TrackName string `json:"trackName,omitempty"`
RawBody string `json:"rawBody,omitempty"`
}
func Serve() {
fs := http.FileServer(http.Dir("frontend"))
http.Handle("/", fs)
http.HandleFunc("/stats", func(rw http.ResponseWriter, req *http.Request) {
dump, err := dumpCache()
if err != nil {
rw.Write([]byte(err.Error()))
}
rw.Write(dump)
})
throttledRecogniser := newThrottledRecogniser()
http.HandleFunc("/api/recognise", func(rw http.ResponseWriter, req *http.Request) {
q := req.URL.Query()
songUrl := q["url"][0]
songUrl = cleanUrl(songUrl)
ts := q["t"][0]
result := throttledRecogniser(songUrl, ts)
if result.RawBody != "" {
rw.Write([]byte(result.RawBody))
return
}
t := template.New("some template") // Create a template.
//t2, err := t.ParseFiles("./frontend/result_page.html") // Parse template file.
t2, err := t.Parse(resultPage) // Parse template file.
if err != nil {
panic(err)
}
t2.Execute(rw, *result)
})
log.Fatal(http.ListenAndServe(":3000", nil))
}
func newThrottledRecogniser() func(songUrl string, ts string) *Result {
recognitionSP := slotprovider.New(5)
return func(songUrl, ts string) *Result {
timeInSeconds, err := extractTimeInSeconds(ts)
if err != nil {
return &Result{ErrorMessage:err.Error()}
}
timeInSeconds = floorToInterval(timeInSeconds, 30)
fullUrl := songUrl + "#t=" + strconv.Itoa(timeInSeconds) + "s"
initialized, res := getFromCache(fullUrl)
if res != nil {
log.Printf("Responding to %v with cached result", fullUrl)
return res
} else if initialized {
// this item is processing currently
log.Printf("Responding to %v with 'in progress'", fullUrl)
return &Result{RawBody:inProgressMessage}
}
//else start recognition
isSlotAcquired, releaseSlot := recognitionSP.AcquireSlot()
if !isSlotAcquired {
log.Printf("Responding to %v with 'no free slots'", fullUrl)
return &Result{ErrorMessage:"Request limit reached. We are not able to recognize more songs at the moment. Please try later."}
}
reserveCache(fullUrl)
go func() {
defer releaseSlot()
log.Printf("Start recognition of %v", fullUrl)
result := RecogniseSong(songUrl, timeInSeconds)
res := parseResult(result)
putResultToCache(fullUrl, res)
}()
log.Printf("Responding to %v with 'in progress' and start recognition", fullUrl)
return &Result{RawBody:inProgressMessage}
}
}
func floorToInterval(time int, intervall int) int {
fx := float32(time) / float32(intervall)
f := time / intervall
if fx - float32(f) < 0.5 {
return f * intervall
} else {
return (f + 1) * intervall
}
}
func parseResult(result string) *Result {
sj, err := simplejson.NewJson([]byte(result))
if err != nil {
return &Result{ErrorMessage:"Error occurred"}
}
noResult, err := (*sj).GetPath("status", "msg").String()
if noResult == "No result" {
return &Result{ErrorMessage:notRecognisedMessage}
}
artist, err := (*sj).GetPath("metadata", "music").GetIndex(0).Get("artists").GetIndex(0).Get("name").String()
if err != nil {
return &Result{ErrorMessage:"Error occurred"}
}
trackName, err := (*sj).GetPath("metadata", "music").GetIndex(0).Get("title").String()
if err != nil {
return &Result{ErrorMessage:"Error occurred"}
} else {
return &Result{Artist:artist, TrackName:trackName}
}
}
func RecogniseSong(songUrl string, timeInSeconds int) (string) {
filePath, err := downloadSong(songUrl)
if err != nil {
return "Error while downloading: " + err.Error()
}
result, err := sendSongToACR(filePath, timeInSeconds)
if err != nil {
return "Error while recognizing: " + err.Error()
}
return result
}
func downloadSong(songUrl string) (string, error) {
var fileName string
var err error
if strings.Contains(songUrl, "youtube") {
fileName, err = executeAndGetLastLine("./download_youtube.sh", songUrl)
if err != nil {
return "", err
}
} else if strings.Contains(songUrl, "soundcloud") {
fileName, err = executeAndGetLastLine("./download_soundcloud.sh", songUrl)
if err != nil {
return "", err
}
}
log.Println("Downloaded song to file: ", fileName, songUrl)
return fileName, nil
}
func sendSongToACR(filePath string, timeInSeconds int) (string, error) {
return executeAndGetLastLine("./recognise.sh", filePath, strconv.Itoa(timeInSeconds))
}
func executeAndGetLastLine(script string, opts... string) (string, error) {
out, err := exec.Command(script, opts...).Output()
if err != nil {
log.Printf("[ERROR] Script %v failed with %v\n", script, err)
return "", err
}
log.Printf("[SUCCESS] Script %v finished\n", script)
return getLastLine(string(out)), nil
}
// ### HELPER ####
func getLastLine(input string) string {
lines := strings.Split(string(input), "\n")
res := "unknown error occurred"
for i := len(lines) - 1; i >= 0; i-- {
if lines[i] != "" {
res = lines[i]
break;
}
}
return res
}
func extractTimeInSeconds(timestamp string) (int, error) {
if timestamp == "" {
return 0, nil
} else if strings.Contains(timestamp, "s") || strings.Contains(timestamp, "h") || strings.Contains(timestamp, "m") {
return extractFromHMSFormat(timestamp)
} else {
return extractFromCOLONFormat(timestamp)
}
}
func extractFromCOLONFormat(timestamp string) (int, error) {
p := strings.Split(timestamp, ":")
factor := int(math.Pow(float64(60), float64(len(p) - 1)))
total := 0
for i := 0; i < len(p); i++ {
c, err := strconv.Atoi(p[i])
if err != nil {
return 0, err
}
total += c * factor
factor = factor / 60
}
return total, nil
}
func extractFromHMSFormat(timestamp string) (int, error) {
t := 0
match := hourRegex.FindStringSubmatch(timestamp)
if len(match) > 0 {
hrs, err := strconv.Atoi(match[1])
if err != nil {
return 0, err
}
t += hrs * 1200
}
match = minuteRegex.FindStringSubmatch(timestamp)
if len(match) > 0 {
min, err := strconv.Atoi(match[1])
if err != nil {
return 0, err
}
t += min * 60
}
match = secondRegex.FindStringSubmatch(timestamp)
if len(match) > 0 {
sec, err := strconv.Atoi(match[1])
if err != nil {
return 0, err
}
t += sec
}
return t, nil
}
func cleanUrl(url string) string {
if strings.Contains(url, "soundcloud") && strings.Contains(url, "?") {
url = cleanUrlRegex.FindStringSubmatch(url)[0]
url = url[:len(url) - 1]
}
return url
}
// ############ Cache ############
var (
cache = make(map[string]*Result)
cacheMux = sync.Mutex{}
InitialResult = &Result{ErrorMessage:"Processing"}
)
func reserveCache(id string) {
cacheMux.Lock()
defer cacheMux.Unlock()
cache[id] = InitialResult
}
func putResultToCache(id string, result *Result) {
cacheMux.Lock()
defer cacheMux.Unlock()
if cache[id] == nil {
log.Printf("Warning: Result for %v has been stored in cache without prior reservation\n", id)
}
cache[id] = result
}
func dumpCache() ([]byte, error) {
cacheMux.Lock()
defer cacheMux.Unlock()
return json.Marshal(cache)
}
const cacheDumpFilePath = "cachedump.json"
func loadCacheFromDisc() {
if content, err := ioutil.ReadFile(cacheDumpFilePath); err == nil {
var savedCache map[string]*Result
err := json.Unmarshal(content, &savedCache)
if err != nil {
log.Println(err)
return
}
keysToDelete := make([]string, 0)
for k, v := range savedCache {
if v.ErrorMessage != "" && v.ErrorMessage != notRecognisedMessage {
keysToDelete = append(keysToDelete, k)
}
}
for _, k := range keysToDelete {
delete(savedCache, k)
}
cacheMux.Lock()
defer cacheMux.Unlock()
cache = savedCache
} else if !os.IsNotExist(err) {
log.Println("Could not load dump", err)
}
}
func saveCacheToDisc() {
log.Println("Saving result dump to disk")
var f *os.File
var err error
f, err = os.OpenFile(cacheDumpFilePath, os.O_CREATE | os.O_RDWR | os.O_TRUNC, 0666)
defer f.Close()
bytes, err := dumpCache()
if err != nil {
log.Println(err)
return
}
f.Write(bytes)
}
func getFromCache(id string) (initialized bool, result *Result) {
cacheMux.Lock()
defer cacheMux.Unlock()
existing := cache[id]
if existing == nil {
return false, nil
}
if existing == InitialResult {
return true, nil
}
return true, existing
}
|
[
0,
3,
5,
6
] |
package data
import (
"database/sql"
"time"
"github.com/sirupsen/logrus"
"fmt"
)
type PreparationRecord struct {
Uid string
ProgramType string
Process string
Port string
Status string
Error string
CreateTime string
UpdateTime string
}
type PreparationSource interface {
// CheckAndInitPreTable
CheckAndInitPreTable()
// InitPreparationTable when first executed
InitPreparationTable() error
// PreparationTableExists return true if preparation exists, otherwise return false or error if execute sql exception
PreparationTableExists() (bool, error)
// InsertPreparationRecord
InsertPreparationRecord(record *PreparationRecord) error
// QueryPreparationByUid
QueryPreparationByUid(uid string) (*PreparationRecord, error)
// QueryRunningPreByTypeAndProcess
QueryRunningPreByTypeAndProcess(programType string, process string) (*PreparationRecord, error)
// ListPreparationRecords
ListPreparationRecords() ([]*PreparationRecord, error)
// UpdatePreparationRecordByUid
UpdatePreparationRecordByUid(uid, status, errMsg string) error
}
const preparationTableDDL = `CREATE TABLE IF NOT EXISTS preparation (
id INTEGER PRIMARY KEY AUTOINCREMENT,
uid VARCHAR(32) UNIQUE,
program_type VARCHAR NOT NULL,
process VARCHAR,
port VARCHAR,
status VARCHAR,
error VARCHAR,
create_time VARCHAR,
update_time VARCHAR
)`
var preIndexDDL = []string{
`CREATE INDEX pre_uid_uidx ON preparation (uid)`,
`CREATE INDEX pre_status_idx ON preparation (uid)`,
`CREATE INDEX pre_type_process_idx ON preparation (program_type, process)`,
}
var insertPreDML = `INSERT INTO
preparation (uid, program_type, process, port, status, error, create_time, update_time)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
`
func (s *Source) CheckAndInitPreTable() {
exists, err := s.PreparationTableExists()
if err != nil {
logrus.Fatalf(err.Error())
}
if !exists {
err = s.InitPreparationTable()
if err != nil {
logrus.Fatalf(err.Error())
}
}
}
func (s *Source) InitPreparationTable() error {
_, err := s.DB.Exec(preparationTableDDL)
if err != nil {
return fmt.Errorf("create preparation table err, %s", err)
}
for _, sql := range preIndexDDL {
s.DB.Exec(sql)
}
return nil
}
func (s *Source) PreparationTableExists() (bool, error) {
stmt, err := s.DB.Prepare(tableExistsDQL)
if err != nil {
return false, fmt.Errorf("select preparation table exists err when invoke db prepare, %s", err)
}
defer stmt.Close()
rows, err := stmt.Query("preparation")
if err != nil {
return false, fmt.Errorf("select preparation table exists or not err, %s", err)
}
defer rows.Close()
var c int
for rows.Next() {
rows.Scan(&c)
break
}
return c != 0, nil
}
func (s *Source) InsertPreparationRecord(record *PreparationRecord) error {
stmt, err := s.DB.Prepare(insertPreDML)
if err != nil {
return err
}
defer stmt.Close()
_, err = stmt.Exec(
record.Uid,
record.ProgramType,
record.Process,
record.Port,
record.Status,
record.Error,
record.CreateTime,
record.UpdateTime,
)
if err != nil {
return err
}
return nil
}
func (s *Source) QueryPreparationByUid(uid string) (*PreparationRecord, error) {
stmt, err := s.DB.Prepare(`SELECT * FROM preparation WHERE uid = ?`)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query(uid)
if err != nil {
return nil, err
}
defer rows.Close()
records, err := getPreparationRecordFrom(rows)
if err != nil {
return nil, err
}
if len(records) == 0 {
return nil, nil
}
return records[0], nil
}
func (s *Source) QueryRunningPreByTypeAndProcess(programType string, process string) (*PreparationRecord, error) {
query := `SELECT * FROM preparation WHERE program_type = ? and process = ? and status = "Running"`
if process == "" {
query = `SELECT * FROM preparation WHERE program_type = ? and status = "Running"`
}
stmt, err := s.DB.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
var rows *sql.Rows
if process == "" {
rows, err = stmt.Query(programType)
} else {
rows, err = stmt.Query(programType, process)
}
if err != nil {
return nil, err
}
defer rows.Close()
records, err := getPreparationRecordFrom(rows)
if err != nil {
return nil, err
}
if len(records) == 0 {
return nil, nil
}
return records[0], nil
}
func (s *Source) ListPreparationRecords() ([]*PreparationRecord, error) {
stmt, err := s.DB.Prepare(`SELECT * FROM preparation`)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query()
defer rows.Close()
return getPreparationRecordFrom(rows)
}
func getPreparationRecordFrom(rows *sql.Rows) ([]*PreparationRecord, error) {
records := make([]*PreparationRecord, 0)
for rows.Next() {
var id int
var uid, t, p, port, status, error, createTime, updateTime string
err := rows.Scan(&id, &uid, &t, &p, &port, &status, &error, &createTime, &updateTime)
if err != nil {
return nil, err
}
record := &PreparationRecord{
Uid: uid,
ProgramType: t,
Process: p,
Port: port,
Status: status,
Error: error,
CreateTime: createTime,
UpdateTime: updateTime,
}
records = append(records, record)
}
return records, nil
}
func (s *Source) UpdatePreparationRecordByUid(uid, status, errMsg string) error {
stmt, err := s.DB.Prepare(`UPDATE preparation
SET status = ?, error = ?, update_time = ?
WHERE uid = ?
`)
if err != nil {
return err
}
defer stmt.Close()
_, err = stmt.Exec(status, errMsg, time.Now().Format(time.RFC3339Nano), uid)
if err != nil {
return err
}
return nil
}
|
[
1,
6
] |
package kubelet
import (
"fmt"
"github.com/golang/glog"
"net/http"
"net/url"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
"github.com/turbonomic/kubeturbo/pkg/discovery/util/httputil"
netutil "k8s.io/apimachinery/pkg/util/net"
"k8s.io/client-go/rest"
"k8s.io/client-go/transport"
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
)
const (
summaryPath string = "/stats/summary/"
specPath string = "/spec"
DefaultKubeletPort = 10255
DefaultKubeletHttps = false
defaultConnTimeOut = 20 * time.Second
defaultTLSHandShakeTimeout = 10 * time.Second
)
// Since http.Client is thread safe (https://golang.org/src/net/http/client.go)
// KubeletClient is also thread-safe if concurrent goroutines won't change the fields.
type KubeletClient struct {
client *http.Client
scheme string
port int
}
func (kc *KubeletClient) GetSummary(host string) (*stats.Summary, error) {
requestURL := url.URL{
Scheme: kc.scheme,
Host: fmt.Sprintf("%s:%d", host, kc.port),
Path: summaryPath,
}
req, err := http.NewRequest("GET", requestURL.String(), nil)
if err != nil {
return nil, err
}
summary := &stats.Summary{}
client := kc.client
err = httputil.PostRequestAndGetValue(client, req, summary)
return summary, err
}
func (kc *KubeletClient) GetMachineInfo(host string) (*cadvisorapi.MachineInfo, error) {
requestURL := url.URL{
Scheme: kc.scheme,
Host: fmt.Sprintf("%s:%d", host, kc.port),
Path: specPath,
}
req, err := http.NewRequest("GET", requestURL.String(), nil)
if err != nil {
return nil, err
}
var minfo cadvisorapi.MachineInfo
err = httputil.PostRequestAndGetValue(kc.client, req, &minfo)
return &minfo, err
}
// get machine single-core Frequency, in Khz
func (kc *KubeletClient) GetMachineCpuFrequency(host string) (uint64, error) {
minfo, err := kc.GetMachineInfo(host)
if err != nil {
glog.Errorf("failed to get machine[%s] cpu.frequency: %v", host, err)
return 0, err
}
return minfo.CpuFrequency, nil
}
//----------------- kubeletConfig -----------------------------------
type KubeletConfig struct {
kubeConfig *rest.Config
enableHttps bool
port int
timeout time.Duration // timeout when fetching information from kubelet;
tlsTimeOut time.Duration
}
// Create a new KubeletConfig based on kubeConfig.
func NewKubeletConfig(kubeConfig *rest.Config) *KubeletConfig {
return &KubeletConfig{
kubeConfig: kubeConfig,
port: DefaultKubeletPort,
enableHttps: DefaultKubeletHttps,
timeout: defaultConnTimeOut,
tlsTimeOut: defaultTLSHandShakeTimeout,
}
}
func (kc *KubeletConfig) WithPort(port int) *KubeletConfig {
kc.port = port
return kc
}
func (kc *KubeletConfig) EnableHttps(enable bool) *KubeletConfig {
kc.enableHttps = enable
return kc
}
func (kc *KubeletConfig) Timeout(timeout int) *KubeletConfig {
kc.timeout = time.Duration(timeout) * time.Second
return kc
}
func (kc *KubeletConfig) Create() (*KubeletClient, error) {
//1. http transport
transport, err := makeTransport(kc.kubeConfig, kc.enableHttps, kc.tlsTimeOut)
if err != nil {
return nil, err
}
c := &http.Client{
Transport: transport,
Timeout: kc.timeout,
}
//2. scheme
scheme := "http"
if kc.enableHttps {
scheme = "https"
}
//3. create a KubeletClient
return &KubeletClient{
client: c,
scheme: scheme,
port: kc.port,
}, nil
}
//------------Generate a http.Transport based on rest.Config-------------------
// Note: Following code is copied from Heapster
// https://github.com/kubernetes/heapster/blob/d2a1cf189921a68edd025d034ebdb348d7587509/metrics/sources/kubelet/util/kubelet_client.go#L48
// The reason to copy the code from Heapster, instead of using kubernetes/pkg/kubelet/client.MakeTransport(), is that
// Depending on Kubernetes will make it difficult to maintain the package dependency.
// So I copied this code, which only depending on "k8s.io/client-go".
func makeTransport(config *rest.Config, enableHttps bool, timeout time.Duration) (http.RoundTripper, error) {
//1. get transport.config
cfg := transportConfig(config, enableHttps)
tlsConfig, err := transport.TLSConfigFor(cfg)
if err != nil {
glog.Errorf("failed to get TLSConfig: %v", err)
return nil, err
}
if tlsConfig == nil {
glog.Warningf("tlsConfig is nil.")
}
//2. http client
rt := http.DefaultTransport
if tlsConfig != nil {
rt = netutil.SetOldTransportDefaults(&http.Transport{
TLSClientConfig: tlsConfig,
TLSHandshakeTimeout: timeout,
})
}
return transport.HTTPWrappersForConfig(cfg, rt)
}
func transportConfig(config *rest.Config, enableHttps bool) *transport.Config {
cfg := &transport.Config{
TLS: transport.TLSConfig{
CAFile: config.CAFile,
CAData: config.CAData,
CertFile: config.CertFile,
CertData: config.CertData,
KeyFile: config.KeyFile,
KeyData: config.KeyData,
},
BearerToken: config.BearerToken,
}
if enableHttps && !cfg.HasCA() {
cfg.TLS.Insecure = true
glog.Warning("insecure TLS transport.")
}
return cfg
}
|
[
3
] |
package framework
import (
"time"
v2vv1alpha1 "github.com/kubevirt/vm-import-operator/pkg/apis/v2v/v1alpha1"
"github.com/kubevirt/vm-import-operator/pkg/conditions"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
)
// EnsureVMIDoesNotExist blocks until VM import with given name does not exist in the cluster
func (f *Framework) EnsureVMIDoesNotExist(vmiName string) error {
return wait.PollImmediate(2*time.Second, 1*time.Minute, func() (bool, error) {
_, err := f.VMImportClient.V2vV1alpha1().VirtualMachineImports(f.Namespace.Name).Get(vmiName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return true, nil
}
return false, err
}
return false, nil
})
}
// WaitForVMImportConditionInStatus blocks until VM import with given name has given status condition with given status
func (f *Framework) WaitForVMImportConditionInStatus(pollInterval time.Duration, timeout time.Duration, vmiName string, conditionType v2vv1alpha1.VirtualMachineImportConditionType, status corev1.ConditionStatus) error {
pollErr := wait.PollImmediate(pollInterval, timeout, func() (bool, error) {
retrieved, err := f.VMImportClient.V2vV1alpha1().VirtualMachineImports(f.Namespace.Name).Get(vmiName, metav1.GetOptions{})
if err != nil {
return false, err
}
succeededCondition := conditions.FindConditionOfType(retrieved.Status.Conditions, conditionType)
if succeededCondition == nil {
return false, nil
}
if succeededCondition.Status != status {
return false, nil
}
return true, nil
})
return pollErr
}
// WaitForVMToBeProcessing blocks until VM import with given name is in Processing state
func (f *Framework) WaitForVMToBeProcessing(vmiName string) error {
return f.WaitForVMImportConditionInStatus(2*time.Second, time.Minute, vmiName, v2vv1alpha1.Processing, corev1.ConditionTrue)
}
|
[
6
] |
package raftbadger
import (
"errors"
"github.com/dgraph-io/badger"
"github.com/hashicorp/raft"
)
var (
// ErrKeyNotFound is an error indicating a given key does not exist
ErrKeyNotFound = errors.New("not found")
)
// BadgerStore provides access to Badger for Raft to store and retrieve
// log entries. It also provides key/value storage, and can be used as
// a LogStore and StableStore.
type BadgerStore struct {
// conn is the underlying handle to the db.
conn *badger.DB
// The path to the Badger database directory.
path string
}
// Options contains all the configuration used to open the Badger db
type Options struct {
// Path is the directory path to the Badger db to use.
Path string
// BadgerOptions contains any specific Badger options you might
// want to specify.
BadgerOptions *badger.Options
// NoSync causes the database to skip fsync calls after each
// write to the log. This is unsafe, so it should be used
// with caution.
NoSync bool
}
// NewBadgerStore takes a file path and returns a connected Raft backend.
func NewBadgerStore(path string) (*BadgerStore, error) {
return New(Options{Path: path})
}
// func NewDefaultStableStore(path string) (*BadgerStore, error) {
// opts := badger.DefaultOptions
// opts.MaxLevels = 2
// return New(Options{Path: path, BadgerOptions: &opts})
// }
// New uses the supplied options to open the Badger db and prepare it for
// use as a raft backend.
func New(options Options) (*BadgerStore, error) {
// build badger options
if options.BadgerOptions == nil {
defaultOpts := badger.DefaultOptions
options.BadgerOptions = &defaultOpts
}
options.BadgerOptions.Dir = options.Path
options.BadgerOptions.ValueDir = options.Path
options.BadgerOptions.SyncWrites = !options.NoSync
// Try to connect
handle, err := badger.Open(*options.BadgerOptions)
if err != nil {
return nil, err
}
// Create the new store
store := &BadgerStore{
conn: handle,
path: options.Path,
}
return store, nil
}
// Close is used to gracefully close the DB connection.
func (b *BadgerStore) Close() error {
return b.conn.Close()
}
// FirstIndex returns the first known index from the Raft log.
func (b *BadgerStore) FirstIndex() (uint64, error) {
return b.firstIndex(false)
}
// LastIndex returns the last known index from the Raft log.
func (b *BadgerStore) LastIndex() (uint64, error) {
return b.firstIndex(true)
}
func (b *BadgerStore) firstIndex(reverse bool) (uint64, error) {
var value uint64
err := b.conn.View(func(txn *badger.Txn) error {
it := txn.NewIterator(badger.IteratorOptions{
PrefetchValues: false,
Reverse: reverse,
})
defer it.Close()
it.Rewind()
if it.Valid() {
value = bytesToUint64(it.Item().Key())
}
return nil
})
if err != nil {
return 0, err
}
return value, nil
}
// GetLog gets a log entry from Badger at a given index.
func (b *BadgerStore) GetLog(index uint64, log *raft.Log) error {
err := b.conn.View(func(txn *badger.Txn) error {
item, err := txn.Get(uint64ToBytes(index))
if err != nil {
switch err {
case badger.ErrKeyNotFound:
return raft.ErrLogNotFound
default:
return err
}
}
val, err := item.Value()
if err != nil {
return err
}
return decodeMsgPack(val, log)
})
if err != nil {
return err
}
return nil
}
// StoreLog stores a single raft log.
func (b *BadgerStore) StoreLog(log *raft.Log) error {
return b.StoreLogs([]*raft.Log{log})
}
// StoreLogs stores a set of raft logs.
func (b *BadgerStore) StoreLogs(logs []*raft.Log) error {
err := b.conn.Update(func(txn *badger.Txn) error {
for _, log := range logs {
key := uint64ToBytes(log.Index)
val, err := encodeMsgPack(log)
if err != nil {
return err
}
if err := txn.Set(key, val.Bytes()); err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
return nil
}
// DeleteRange deletes logs within a given range inclusively.
func (b *BadgerStore) DeleteRange(min, max uint64) error {
// we manage the transaction manually in order to avoid ErrTxnTooBig errors
txn := b.conn.NewTransaction(true)
it := txn.NewIterator(badger.IteratorOptions{
PrefetchValues: false,
Reverse: false,
})
for it.Seek(uint64ToBytes(min)); it.Valid(); it.Next() {
key := make([]byte, 8)
it.Item().KeyCopy(key)
// Handle out-of-range log index
if bytesToUint64(key) > max {
break
}
// Delete in-range log index
if err := txn.Delete(key); err != nil {
if err == badger.ErrTxnTooBig {
it.Close()
err = txn.Commit(nil)
if err != nil {
return err
}
return b.DeleteRange(bytesToUint64(key), max)
}
return err
}
}
it.Close()
err := txn.Commit(nil)
if err != nil {
return err
}
return nil
}
// Set is used to set a key/value set outside of the raft log.
func (b *BadgerStore) Set(key []byte, val []byte) error {
return b.conn.Update(func(txn *badger.Txn) error {
return txn.Set(key, val)
})
}
// Get is used to retrieve a value from the k/v store by key
func (b *BadgerStore) Get(key []byte) ([]byte, error) {
var value []byte
err := b.conn.View(func(txn *badger.Txn) error {
item, err := txn.Get(key)
if err != nil {
switch err {
case badger.ErrKeyNotFound:
return ErrKeyNotFound
default:
return err
}
}
value, err = item.ValueCopy(value)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return value, nil
}
// SetUint64 is like Set, but handles uint64 values
func (b *BadgerStore) SetUint64(key []byte, val uint64) error {
return b.Set(key, uint64ToBytes(val))
}
// GetUint64 is like Get, but handles uint64 values
func (b *BadgerStore) GetUint64(key []byte) (uint64, error) {
val, err := b.Get(key)
if err != nil {
return 0, err
}
return bytesToUint64(val), nil
}
|
[
1,
6
] |
package main
import (
"fmt"
"sort"
)
func main() {
lista := []int{-5, -4, -3, -2, -1, 0, 1, 2}
diferenca := Solution(lista)
fmt.Println(diferenca)
}
func Solution(A []int) int {
ordenada := A
sort.Ints(ordenada)
alvo := Somatorio(ordenada[0], ordenada[len(ordenada) - 1]) / 2
dif := buscaBinaria(ordenada, alvo)
//esquerda, direita := Divide(ordenada)
//sumEsq := Somatorio(esquerda[0], esquerda[len(esquerda) - 1])
//sumDir := Somatorio(direita[0], direita[len(direita) - 1])
//dif := sumEsq - sumDir
//if dif < 0 {dif *= -1}
return dif
}
func buscaBinaria(seq []int, alvo int) int {
E := 0
D := len(seq) - 1
for E <= D {
meio := (E + D) / 2
somaMeio := Somatorio(seq[0], seq[meio])
somaMeioMais := Somatorio(seq[0], seq[meio] + 1)
if somaMeio < alvo && somaMeioMais > alvo {
return somaMeioMais - somaMeio
} else if somaMeio < alvo {
E = meio + 1
} else {
D = meio - 1
}
}
return 0
}
func Somatorio(inicio int, fim int) int {
termoInicio := 1
termoFim := 1
if inicio < 0 {termoInicio *= -1}
if fim < 0 {termoFim *= -1}
somatorio := (fim*(fim + termoFim)/2) - (inicio*(inicio + termoInicio)/2)
return somatorio
}
|
[
2,
3,
5,
6
] |
package tree
import "container/list"
func CollectTheCoins(coins []int, edges [][]int) int {
var l1 int = len(coins)
var graph []map[int]bool = make([]map[int]bool, l1)
for i := 0; i < l1; i++ {
graph[i] = make(map[int]bool)
}
for _, edge := range edges {
graph[edge[0]][edge[1]] = true
graph[edge[1]][edge[0]] = true
}
var q1 list.List
var delete_node map[int]bool = make(map[int]bool)
for i := 0; i < l1; i++ {
if len(graph[i]) == 1 && coins[i] == 0 {
q1.PushBack(i)
}
}
for q1.Len() > 0 {
var cur_len int = q1.Len()
for i := 0; i < cur_len; i++ {
var cur int = q1.Front().Value.(int)
delete_node[cur] = true
q1.Remove(q1.Front())
for next, _ := range graph[cur] {
delete(graph[next], cur)
if _, ok := delete_node[next]; ok {
continue
}
if coins[next] == 1 {
continue
}
if len(graph[next]) == 1 {
q1.PushBack(next)
}
}
graph[cur] = make(map[int]bool)
}
}
var q2 list.List
for i := 0; i < l1; i++ {
if len(graph[i]) == 1 {
q2.PushBack(i)
//delete_node[i] = true
}
}
var steps int = 2
for steps > 0 {
var cur_len int = q2.Len()
for i := 0; i < cur_len; i++ {
var cur int = q2.Front().Value.(int)
q2.Remove(q2.Front())
delete_node[cur] = true
for next, _ := range graph[cur] {
delete(graph[next], cur)
if _, ok := delete_node[next]; ok {
continue
}
if len(graph[next]) == 1 {
q2.PushBack(next)
}
}
//graph[cur] = make(map[int]bool)
}
steps--
}
var res int = 0
for _, edge := range edges {
node1 := edge[0]
node2 := edge[1]
if _, ok1 := delete_node[node1]; !ok1 {
if _, ok2 := delete_node[node2]; !ok2 {
res += 2
}
}
}
return res
}
|
[
3
] |
package context
import (
"github.com/stretchr/testify/assert"
"testing"
)
func testApplicationContextEvent(t *testing.T, event ApplicationContextEvent, eventId ApplicationEventId, parentEventId ApplicationEventId) {
assert.Equal(t, eventId, event.GetEventId())
assert.Equal(t, parentEventId, event.GetParentEventId())
assert.NotEqual(t, 0, event.GetTimestamp())
assert.NotNil(t, event.GetSource())
assert.NotNil(t, event.GetApplicationContext())
}
func TestApplicationContextStartedEvent(t *testing.T) {
context := &testContext{}
event := NewApplicationContextStartedEvent(context)
testApplicationContextEvent(t, event, ApplicationContextStartedEventId(), ApplicationContextEventId())
}
func TestApplicationContextRefreshedEvent(t *testing.T) {
context := &testContext{}
event := NewApplicationContextRefreshedEvent(context)
testApplicationContextEvent(t, event, ApplicationContextRefreshedEventId(), ApplicationContextEventId())
}
func TestApplicationContextStoppedEvent(t *testing.T) {
context := &testContext{}
event := NewApplicationContextStoppedEvent(context)
testApplicationContextEvent(t, event, ApplicationContextStoppedEventId(), ApplicationContextEventId())
}
func TestApplicationContextClosedEvent(t *testing.T) {
context := &testContext{}
event := NewApplicationContextClosedEvent(context)
testApplicationContextEvent(t, event, ApplicationContextClosedEventId(), ApplicationContextEventId())
}
|
[
6
] |
package alarm
import (
"github.com/gin-gonic/gin"
"strconv"
mid "github.com/WeBankPartners/open-monitor/monitor-server/middleware"
m "github.com/WeBankPartners/open-monitor/monitor-server/models"
"github.com/WeBankPartners/open-monitor/monitor-server/services/db"
"github.com/WeBankPartners/open-monitor/monitor-server/services/prom"
"fmt"
"strings"
"github.com/WeBankPartners/open-monitor/monitor-server/services/other"
"net/http"
"io/ioutil"
"encoding/json"
)
func ListTpl(c *gin.Context) {
searchType := c.Query("type")
id,_ := strconv.Atoi(c.Query("id"))
if searchType == "" || id <= 0 {
mid.ReturnValidateFail(c, "Type or id can not be empty")
return
}
if !(searchType == "endpoint" || searchType == "grp") {
mid.ReturnValidateFail(c, "Type must be \"endpoint\" or \"grp\"")
return
}
var query m.TplQuery
query.SearchType = searchType
query.SearchId = id
err := db.GetStrategys(&query, true)
if err != nil {
mid.ReturnError(c, "Query strategy failed", err)
return
}
mid.ReturnData(c, query.Tpl)
}
func AddStrategy(c *gin.Context) {
var param m.TplStrategyTable
if err := c.ShouldBindJSON(¶m); err==nil {
// check param
param.Expr = strings.Replace(param.Expr, "'", "", -1)
param.Content = strings.Replace(param.Content, "'", "", -1)
param.Content = strings.Replace(param.Content, "\"", "", -1)
if !mid.IsIllegalCond(param.Cond) || !mid.IsIllegalLast(param.Last) {
mid.ReturnValidateFail(c, "cond or last param validate fail")
return
}
// check tpl
if param.TplId <= 0 {
if param.GrpId + param.EndpointId <= 0 {
mid.ReturnValidateFail(c, "Both endpoint and group id are missing")
return
}
if param.GrpId > 0 && param.EndpointId > 0 {
mid.ReturnValidateFail(c, "Endpoint and group id can not be provided at the same time")
return
}
err,tplObj := db.AddTpl(param.GrpId, param.EndpointId, "")
if err != nil {
mid.ReturnError(c, "Add strategy failed", err)
return
}
param.TplId = tplObj.Id
}
strategyObj := m.StrategyTable{TplId:param.TplId,Metric:param.Metric,Expr:param.Expr,Cond:param.Cond,Last:param.Last,Priority:param.Priority,Content:param.Content}
err = db.UpdateStrategy(&m.UpdateStrategy{Strategy:[]*m.StrategyTable{&strategyObj}, Operation:"insert"})
if err != nil {
mid.ReturnError(c, "Insert strategy failed", err)
return
}
err = SaveConfigFile(param.TplId, false)
if err != nil {
mid.ReturnError(c, "Save alert rules file failed", err)
return
}
mid.ReturnSuccess(c, "Success")
}else{
mid.ReturnValidateFail(c, fmt.Sprintf("Parameter validation failed %v", err))
}
}
func EditStrategy(c *gin.Context) {
var param m.TplStrategyTable
if err := c.ShouldBindJSON(¶m); err==nil {
if param.StrategyId <= 0 {
mid.ReturnValidateFail(c, "Strategy id can not be empty")
return
}
// check param
param.Expr = strings.Replace(param.Expr, "'", "", -1)
param.Content = strings.Replace(param.Content, "'", "", -1)
param.Content = strings.Replace(param.Content, "\"", "", -1)
if !mid.IsIllegalCond(param.Cond) || !mid.IsIllegalLast(param.Last) {
mid.ReturnValidateFail(c, "cond or last param validate fail")
return
}
strategyObj := m.StrategyTable{Id:param.StrategyId,Metric:param.Metric,Expr:param.Expr,Cond:param.Cond,Last:param.Last,Priority:param.Priority,Content:param.Content}
err = db.UpdateStrategy(&m.UpdateStrategy{Strategy:[]*m.StrategyTable{&strategyObj}, Operation:"update"})
if err != nil {
mid.ReturnError(c, "Update strategy failed", err)
return
}
_,strategy := db.GetStrategyTable(param.StrategyId)
db.UpdateTpl(strategy.TplId, "")
err = SaveConfigFile(strategy.TplId, false)
if err != nil {
mid.ReturnError(c, "Save alert rules file failed", err)
return
}
mid.ReturnSuccess(c, "Success")
}else{
mid.ReturnValidateFail(c, fmt.Sprintf("Parameter validation failed %v", err))
}
}
func DeleteStrategy(c *gin.Context) {
strategyId,_ := strconv.Atoi(c.Query("id"))
if strategyId <= 0 {
mid.ReturnValidateFail(c, "Id can not be empty")
return
}
_,strategy := db.GetStrategyTable(strategyId)
if strategy.Id <= 0 {
mid.ReturnValidateFail(c, "The strategy id is not in use")
return
}
err := db.UpdateStrategy(&m.UpdateStrategy{Strategy:[]*m.StrategyTable{&m.StrategyTable{Id:strategyId}}, Operation:"delete"})
if err != nil {
mid.ReturnError(c, "Delete strategy failed", err)
return
}
db.UpdateTpl(strategy.TplId, "")
err = SaveConfigFile(strategy.TplId, false)
if err != nil {
mid.ReturnError(c, "Save prometheus rule file failed", err)
return
}
mid.ReturnSuccess(c, "Success")
}
func SearchObjOption(c *gin.Context) {
searchType := c.Query("type")
searchMsg := c.Query("search")
if searchType == "" || searchMsg == "" {
mid.ReturnValidateFail(c, "Type or search content can not be empty")
return
}
var err error
var data []*m.OptionModel
if searchType == "endpoint" {
err,data = db.SearchHost(searchMsg)
}else{
err,data = db.SearchGrp(searchMsg)
}
if err != nil {
mid.ReturnError(c, "Search failed", err)
return
}
for _,v := range data {
v.OptionTypeName = v.OptionType
}
mid.ReturnData(c, data)
}
func SaveConfigFile(tplId int, fromCluster bool) error {
var err error
idList := db.GetParentTpl(tplId)
err = updateConfigFile(tplId)
if err != nil {
mid.LogError("update prometheus rule file error", err)
return err
}
if len(idList) > 0 {
for _,v := range idList {
err = updateConfigFile(v)
if err != nil {
mid.LogError(fmt.Sprintf("update prometheus rule tpl id %d error", v), err)
}
}
}
if err != nil {
return err
}
err = prom.ReloadConfig()
if err != nil {
mid.LogError("reload prometheus config error", err)
return err
}
if !fromCluster {
go other.SyncConfig(tplId, m.SyncConsulDto{})
}
return nil
}
func updateConfigFile(tplId int) error {
err,tplObj := db.GetTpl(tplId,0 ,0)
if err != nil {
mid.LogError("get tpl error", err)
return err
}
var query m.TplQuery
var isGrp bool
if tplObj.GrpId > 0 {
isGrp = true
query.SearchType = "grp"
query.SearchId = tplObj.GrpId
}else{
isGrp = false
query.SearchType = "endpoint"
query.SearchId = tplObj.EndpointId
}
err = db.GetStrategys(&query, false)
if err != nil {
mid.LogError("get strategy error", err)
return err
}
var fileName string
var endpointExpr,guidExpr string
if len(query.Tpl) > 0 {
fileName = query.Tpl[len(query.Tpl)-1].ObjName
if isGrp {
tmpStrategy := []*m.StrategyTable{}
tmpStrategyMap := make(map[string]*m.StrategyTable)
for _,v := range query.Tpl {
for _,vv := range v.Strategy {
tmpStrategyMap[vv.Metric] = vv
}
}
for _,v := range tmpStrategyMap {
tmpStrategy = append(tmpStrategy, v)
}
query.Tpl[len(query.Tpl)-1].Strategy = tmpStrategy
}
}else{
if isGrp {
_,grpObj := db.GetSingleGrp(tplObj.GrpId, "")
fileName = grpObj.Name
}else{
endpointObj := m.EndpointTable{Id:tplObj.EndpointId}
db.GetEndpoint(&endpointObj)
fileName = endpointObj.Guid
if endpointObj.AddressAgent != "" {
endpointExpr = endpointObj.AddressAgent
}else {
endpointExpr = endpointObj.Address
}
guidExpr = endpointObj.Guid
}
}
if isGrp {
_,endpointObjs := db.GetEndpointsByGrp(tplObj.GrpId)
if len(endpointObjs) > 0 {
for _, v := range endpointObjs {
if v.AddressAgent != "" {
endpointExpr += fmt.Sprintf("%s|", v.AddressAgent)
}else {
endpointExpr += fmt.Sprintf("%s|", v.Address)
}
guidExpr += fmt.Sprintf("%s|", v.Guid)
}
endpointExpr = endpointExpr[:len(endpointExpr)-1]
guidExpr = guidExpr[:len(guidExpr)-1]
}
}
if fileName == "" {
return nil
}
err,isExist,cObj := prom.GetConfig(fileName, isGrp)
if err != nil {
mid.LogError("get prom get config error", err)
return err
}
rfu := []*m.RFRule{}
if !isExist {
cObj.Name = fileName
}
if len(query.Tpl) > 0 {
if !isGrp && endpointExpr == "" && query.Tpl[len(query.Tpl)-1].ObjType == "endpoint" {
endpointObj := m.EndpointTable{Guid:query.Tpl[len(query.Tpl)-1].ObjName}
db.GetEndpoint(&endpointObj)
if endpointObj.AddressAgent != "" {
endpointExpr = endpointObj.AddressAgent
}else {
endpointExpr = endpointObj.Address
}
guidExpr = endpointObj.Guid
}
for _,v := range query.Tpl[len(query.Tpl)-1].Strategy {
tmpRfu := m.RFRule{}
tmpRfu.Alert = v.Metric
if !strings.Contains(v.Cond, " ") && v.Cond != "" {
if strings.Contains(v.Cond, "=") {
v.Cond = v.Cond[:2] + " " + v.Cond[2:]
}else{
v.Cond = v.Cond[:1] + " " + v.Cond[1:]
}
}
//if strings.Contains(v.Expr, " ") {
// v.Expr = strings.Replace(v.Expr, " ", "", -1)
//}
if strings.Contains(v.Expr, "$address") {
if isGrp {
v.Expr = strings.Replace(v.Expr, "=\"$address\"", "=~\""+endpointExpr+"\"", -1)
}else{
v.Expr = strings.Replace(v.Expr, "=\"$address\"", "=\""+endpointExpr+"\"", -1)
}
}
if strings.Contains(v.Expr, "$guid") {
if isGrp {
v.Expr = strings.Replace(v.Expr, "=\"$guid\"", "=~\""+guidExpr+"\"", -1)
}else{
v.Expr = strings.Replace(v.Expr, "=\"$guid\"", "=\""+guidExpr+"\"", -1)
}
}
tmpRfu.Expr = fmt.Sprintf("%s %s", v.Expr, v.Cond)
tmpRfu.For = v.Last
tmpRfu.Labels = make(map[string]string)
tmpRfu.Labels["strategy_id"] = fmt.Sprintf("%d", v.Id)
tmpRfu.Annotations = m.RFAnnotation{Summary:fmt.Sprintf("{{$labels.instance}}__%s__%s__{{$value}}", v.Priority, v.Metric), Description:v.Content}
rfu = append(rfu, &tmpRfu)
}
if len(query.Tpl[len(query.Tpl)-1].Strategy) == 0 {
rfu = []*m.RFRule{}
}
}
cObj.Rules = rfu
err = prom.SetConfig(fileName, isGrp, cObj, isExist)
if err != nil {
mid.LogError("prom set config error", err)
}
return err
}
func SearchUserRole(c *gin.Context) {
search := c.Query("search")
err,roles := db.SearchUserRole(search, "role")
if err != nil {
mid.LogError("search role error", err)
}
if len(roles) < 15 {
err,users := db.SearchUserRole(search, "user")
if err != nil {
mid.LogError("search user error", err)
}
for _,v := range users {
if len(roles) >= 15 {
break
}
roles = append(roles, v)
}
}
mid.ReturnData(c, roles)
}
func UpdateTplAction(c *gin.Context) {
var param m.UpdateActionDto
if err := c.ShouldBindJSON(¶m); err==nil {
var userIds,roleIds []int
var extraMail,extraPhone []string
for _,v := range param.Accept {
tmpFlag := false
if strings.HasPrefix(v.OptionType, "user_") {
tmpId,_ := strconv.Atoi(strings.Split(v.OptionType, "_")[1])
for _,vv := range userIds {
if vv == tmpId {
tmpFlag = true
break
}
}
if !tmpFlag {
userIds = append(userIds, tmpId)
}
}
if strings.HasPrefix(v.OptionType,"role_") {
tmpId,_ := strconv.Atoi(strings.Split(v.OptionType, "_")[1])
for _,vv := range roleIds {
if vv == tmpId {
tmpFlag = true
break
}
}
if !tmpFlag {
roleIds = append(roleIds, tmpId)
}
}
if v.OptionType == "mail" {
for _,vv := range extraMail {
if vv == v.OptionValue {
tmpFlag = true
break
}
}
if !tmpFlag {
extraMail = append(extraMail, v.OptionValue)
}
}
if v.OptionType == "phone" {
for _,vv := range extraPhone {
if vv == v.OptionValue {
tmpFlag = true
break
}
}
if !tmpFlag {
extraPhone = append(extraPhone, v.OptionValue)
}
}
}
err = db.UpdateTplAction(param.TplId, userIds, roleIds, extraMail, extraPhone)
if err != nil {
mid.ReturnError(c, "Update tpl action fail ", err)
}else{
mid.ReturnSuccess(c, "Success")
}
}else{
mid.ReturnValidateFail(c, fmt.Sprintf("Parameter validation failed %v", err))
}
}
func SyncConfigHandle(w http.ResponseWriter,r *http.Request) {
mid.LogInfo("start sync config")
var response mid.RespJson
w.Header().Set("Content-Type", "application/json")
defer w.Write([]byte(fmt.Sprintf("{\"code\":%d,\"msg\":\"%s\",\"data\":\"%v\"}", response.Code,response.Msg,response.Data)))
tplId,_ := strconv.Atoi(r.FormValue("id"))
if tplId <= 0 {
response.Code = 401
response.Msg = "Parameter id is empty"
return
}
err := SaveConfigFile(tplId, true)
if err != nil {
response.Code = 500
response.Msg = "Sync save config file fail"
response.Data = err
return
}
response.Code = 200
response.Msg = "Success"
}
func SyncConsulHandle(w http.ResponseWriter,r *http.Request) {
mid.LogInfo("start sync consul")
var response mid.RespJson
w.Header().Set("Content-Type", "application/json")
defer w.Write([]byte(fmt.Sprintf("{\"code\":%d,\"msg\":\"%s\",\"data\":\"%v\"}", response.Code,response.Msg,response.Data)))
var param m.SyncConsulDto
b,_ := ioutil.ReadAll(r.Body)
err := json.Unmarshal(b, ¶m)
if err != nil {
response.Code = 401
response.Msg = "Param json format fail"
response.Data = err
return
}
if param.Guid == "" {
response.Code = 401
response.Msg = "Guid is empty"
return
}
if param.IsRegister {
err = prom.RegisteConsul(param.Guid,param.Ip,param.Port,param.Tags,param.Interval,true)
}else{
err = prom.DeregisteConsul(param.Guid,true)
}
if err != nil {
response.Code = 500
response.Msg = "Sync consul fail"
response.Data = err
return
}
response.Code = 200
response.Msg = "Success"
}
|
[
3
] |
package _58_周赛
//给你一个下标从 0 开始的字符串 word 和一个字符 ch 。找出 ch 第一次出现的下标 i ,反转 word 中从下标 0 开始、直到下标 i 结束(含下标 i )的那段字符。如果 word 中不存在字符 ch ,则无需进行任何操作。
//
//例如,如果 word = "abcdefd" 且 ch = "d" ,那么你应该 反转 从下标 0 开始、直到下标 3 结束(含下标 3 )。结果字符串将会是 "dcbaefd" 。
//返回 结果字符串 。
func reversePrefix(word string, ch byte) string {
str := []byte(word)
for i, c := range word {
if byte(c) == ch {
for j := 0; j <= i/2; j++ {
str[j], str[i-j] = str[i-j], str[j]
}
break
}
}
return string(str)
}
|
[
3
] |
package resque
import (
"strings"
"time"
"github.com/hoisie/redis"
"github.com/kiddom/go-resque"
"github.com/kiddom/go-resque/driver"
)
func init() {
resque.Register("redisv2", &drv{})
}
type drv struct {
client *redis.Client
driver.Enqueuer
schedule map[string]struct{}
nameSpace string
}
func (d *drv) SetClient(name string, client interface{}) {
d.client = client.(*redis.Client)
d.schedule = make(map[string]struct{})
d.nameSpace = name
}
func (d *drv) ListPush(queue string, jobJSON string) (int64, error) {
return 1, d.client.Rpush(d.nameSpace+"queue:"+queue, []byte(jobJSON))
}
func (d *drv) ListPushDelay(t time.Time, queue string, jobJSON string) (bool, error) {
_, err := d.client.Zadd(queue, []byte(jobJSON), timeToSecondsWithNanoPrecision(t))
if err != nil {
return false, err
}
if _, ok := d.schedule[queue]; !ok {
d.schedule[queue] = struct{}{}
}
return true, nil
}
func timeToSecondsWithNanoPrecision(t time.Time) float64 {
return float64(t.UnixNano()) / 1000000000.0 // nanoSecondPrecision
}
func (d *drv) Poll() {
go func(d *drv) {
for {
for key := range d.schedule {
now := timeToSecondsWithNanoPrecision(time.Now())
r, _ := d.client.Zrangebyscore(key+"-inf", now, 1)
var jobs []string
for _, job := range r {
jobs = append(jobs, string(job))
}
if len(jobs) == 0 {
continue
}
if removed, _ := d.client.Zrem(key, []byte(jobs[0])); removed {
queue := strings.TrimPrefix(key, d.nameSpace)
d.client.Lpush(d.nameSpace+"queue:"+queue, []byte(jobs[0]))
}
}
time.Sleep(100 * time.Millisecond)
}
}(d)
}
|
[
6
] |
package runtime
import (
"fmt"
"reflect"
"github.com/pensando/sw/api"
)
var (
errNotAPIObject = fmt.Errorf("Object is not an API Object")
errNotListObject = fmt.Errorf("Object is not a List Object")
)
// GetObjectMeta returns the ObjectMeta if its an API object, error otherwise
func GetObjectMeta(obj interface{}) (*api.ObjectMeta, error) {
if obj == nil {
return nil, errNotAPIObject
}
switch t := obj.(type) {
case ObjectMetaAccessor:
if reflect.ValueOf(t) == reflect.Zero(reflect.TypeOf(t)) {
return nil, errNotAPIObject
}
if meta := t.GetObjectMeta(); meta != nil {
return meta, nil
}
}
return nil, errNotAPIObject
}
// GetListMeta returns the ListMeta if its a list object, error otherwise
func GetListMeta(obj interface{}) (*api.ListMeta, error) {
switch t := obj.(type) {
case ListMetaAccessor:
if meta := t.GetListMeta(); meta != nil {
return meta, nil
}
}
return nil, errNotListObject
}
|
[
7
] |
package goker
type Action uint8
const (
_ Action = iota
PutBlind
SitOut
Raise //need value
Call //Depends on Raise
Check
Fold
AllIn
)
var actionName = map[Action]string{
PutBlind : "PutBlind",
SitOut : "SitOut",
Call : "Call",
Check : "Check",
Raise : "Raise",
Fold : "Fold",
AllIn : "AllIn",
}
type Event uint8
const (
_ Event = iota
Ante
BeforePocketDraw
PocketDraw
TableDraw0
TableDraw1
TableDraw2
BeforeShowdown
Showdown
Same //when returning from wrong Action, back to same event for another input
)
var eventName = map[Event]string{
Ante : "Ante",
BeforePocketDraw : "BeforePocketDraw",
PocketDraw : "PocketDraw",
TableDraw0 : "TableDraw0",
TableDraw1 : "TableDraw1",
TableDraw2 : "TableDraw2",
BeforeShowdown : "BeforeShowdown",
Showdown : "Showdown",
Same : "Same",
}
var validAction = map[Event][]Action{
Ante : []Action{PutBlind, SitOut},
BeforePocketDraw : []Action{Check, Raise, Call,Fold, AllIn},
TableDraw0 : []Action{Check, Raise, Call,Fold, AllIn},
TableDraw1 : []Action{Call, Raise, Check,Fold, AllIn},
TableDraw2 : []Action{Call, Raise, Check,Fold, AllIn},
BeforeShowdown: []Action{Call, Raise, Check,Fold, AllIn},
Same : []Action{Call, Raise, Check,Fold, AllIn},
}
type Money int
|
[
3
] |
package gum
import (
"fmt"
"io/ioutil"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// uses fixtures created from examples in the paper
func TestPaperValidation(t *testing.T) {
src, dst := readFixtures("testdata/paper/src.json", "testdata/paper/dst.json")
sm := newSubtreeMatcher()
mappings := sm.Match(src, dst)
assert.Equal(t, 10, mappings.Size())
s := getChild(src, 0, 2, 1)
d := getChild(dst, 0, 2, 1)
assert.True(t, mappings.Has(s, d), "%v = %v mapping not found", s, d)
s = getChild(src, 0, 2, 3)
d = getChild(dst, 0, 2, 3)
assert.True(t, mappings.Has(s, d), "%v = %v mapping not found", s, d)
s = getChild(src, 0, 2, 4, 0, 0)
d = getChild(dst, 0, 2, 4, 0, 0)
assert.True(t, mappings.Has(s, d), "%v = %v mapping not found", s, d)
s = getChild(src, 0, 2, 4, 0, 1)
d = getChild(dst, 0, 2, 4, 0, 2, 1)
assert.True(t, mappings.Has(s, d), "%v = %v mapping not found", s, d)
bum := newBottomUpMatcher(mappings)
bum.simThreshold = 0.2
mappings = bum.Match(src, dst)
// 15 = 10 from top-down + 5 containers + 4 recovery mapping
assert.Equal(t, 19, mappings.Size())
// containers
assert.True(t, mappings.Has(s, d), "%v = %v mapping not found", src, dst)
s = getChild(src, 0)
d = getChild(dst, 0)
assert.True(t, mappings.Has(s, d), "%v = %v mapping not found", s, d)
s = getChild(src, 0, 2)
d = getChild(dst, 0, 2)
assert.True(t, mappings.Has(s, d), "%v = %v mapping not found", s, d)
s = getChild(src, 0, 2, 4)
d = getChild(dst, 0, 2, 4)
assert.True(t, mappings.Has(s, d), "%v = %v mapping not found", s, d)
s = getChild(src, 0, 2, 4, 0)
d = getChild(dst, 0, 2, 4, 0)
assert.True(t, mappings.Has(s, d), "%v = %v mapping not found", s, d)
// recovery
s = getChild(src, 0, 0)
d = getChild(dst, 0, 0)
assert.True(t, mappings.Has(s, d), "%v = %v mapping not found", s, d)
s = getChild(src, 0, 1)
d = getChild(dst, 0, 1)
assert.True(t, mappings.Has(s, d), "%v = %v mapping not found", s, d)
s = getChild(src, 0, 2, 0)
d = getChild(dst, 0, 2, 0)
assert.True(t, mappings.Has(s, d), "%v = %v mapping not found", s, d)
s = getChild(src, 0, 2, 2)
d = getChild(dst, 0, 2, 2)
assert.True(t, mappings.Has(s, d), "%v = %v mapping not found", s, d)
}
func TestMinHeightThreshold(t *testing.T) {
// FIXME
t.Skip("it doesn't work")
src, dst := readFixtures("testdata/gumtree/src.json", "testdata/gumtree/dst.json")
m := NewMatcher()
m.MinHeight = 0
m.MaxSize = 0
mappings := m.Match(src, dst)
assert.Len(t, mappings, 5)
m = NewMatcher()
m.MinHeight = 1
m.MaxSize = 0
mappings = m.Match(src, dst)
assert.Len(t, mappings, 4)
}
func TestMinSizeThreshold(t *testing.T) {
src, dst := readFixtures("testdata/gumtree/src.json", "testdata/gumtree/dst.json")
m := NewMatcher()
m.MinHeight = 0
m.MaxSize = 5
mappings := m.Match(src, dst)
assert.Len(t, mappings, 6)
}
func readFixtures(fSrc, fDst string) (*Tree, *Tree) {
srcJSON, err := ioutil.ReadFile(fSrc)
if err != nil {
panic(err)
}
dstJSON, err := ioutil.ReadFile(fDst)
if err != nil {
panic(err)
}
src, err := treeFromJSON(string(srcJSON))
if err != nil {
panic(err)
}
dst, err := treeFromJSON(string(dstJSON))
if err != nil {
panic(err)
}
return src, dst
}
func treePrint(t *Tree, tab int) {
fmt.Println(strings.Repeat("-", tab), t, t.GetID())
for _, c := range t.Children {
treePrint(c, tab+1)
}
}
func getChild(t *Tree, path ...int) *Tree {
for _, i := range path {
t = t.Children[i]
}
return t
}
func TestNotModified(t *testing.T) {
orgSrc, orgDst := readFixtures("testdata/paper/src.json", "testdata/paper/dst.json")
src, dst := readFixtures("testdata/paper/src.json", "testdata/paper/dst.json")
mappings := Match(src, dst)
deepCompare(t, orgSrc, src)
deepCompare(t, orgDst, dst)
Patch(src, dst, mappings)
deepCompare(t, orgSrc, src)
deepCompare(t, orgDst, dst)
}
func deepCompare(t *testing.T, a *Tree, b *Tree) {
aTrees := getTrees(a)
bTrees := getTrees(b)
require.Equal(t, len(bTrees), len(aTrees))
for i, bT := range bTrees {
aT := aTrees[i]
require.Equal(t, bT.size, aT.size)
require.Equal(t, bT.height, aT.height)
require.Equal(t, b.id, a.id)
require.Equal(t, b.Type, a.Type)
require.Equal(t, b.Value, a.Value)
require.Equal(t, len(b.Children), len(a.Children))
}
}
|
[
6
] |
package main
import (
"fmt"
"gormangendr/simple"
"log"
)
func ClientConnect(target string) {
fmt.Println("0")
conn, err := simple.Dial(target)
if err != "" {
log.Fatalf("did not connect: %v", err)
return
}
go HandleClient(conn)
}
func HandleClient(conn *simple.ClientConn) {
nc := simple.NewNodeClient(conn)
fmt.Println("2", nc)
r, err := nc.Handshake()
if err != "" {
log.Fatalf("could not: %v", err)
}
fmt.Println("|", r.Version, r.ExtraParams)
fmt.Println("3")
//nodeId := ReadPublicKey()
//nc.ClientAuth(nodeId, sig)
//&request.node_id, &request.signature
}
|
[
3
] |
package share_mem
import (
"sync"
"testing"
"time"
)
//协程不安全
func TestShareMemory(t *testing.T) {
counter := 0
for i := 0; i < 5000; i++ {
go func() {
counter++
}()
}
time.Sleep(time.Millisecond * 500)
t.Log("counter = ", counter)
}
//协程安全
func TestShareMemorySafe(t *testing.T) {
var mut sync.Mutex
counter := 0
for i := 0; i < 5000; i++ {
go func() {
defer func() {
mut.Unlock()
}()
mut.Lock()
counter++
}()
}
time.Sleep(time.Millisecond * 500)
t.Log("counter = ", counter)
}
func TestShareMemoryWaitGroup(t *testing.T) {
var mut sync.Mutex
var wg sync.WaitGroup
counter := 0
for i := 0; i < 5000; i++ {
wg.Add(1)
go func() {
defer func() {
mut.Unlock()
wg.Done()
}()
mut.Lock()
counter++
}()
}
wg.Wait()
t.Log("counter = ", counter)
}
|
[
3
] |
package imgcache
import (
"html"
"net/http"
"strings"
"sync/atomic"
)
//func Handler(w http.ResponseWriter, r *http.Request) {
// MainPage(w, r)
//}
//MainPage handler
func MainPage(w http.ResponseWriter, r *http.Request) {
path := html.EscapeString(r.URL.Path)
log("mainPage", path)
switch r.Method {
case "GET":
vals := r.URL.Query()
url := vals.Get("url")
if path == "/" && url == "" {
w.WriteHeader(200)
return
}
bin, err := urlGet(url)
if err != nil {
log("urlGet err", err)
atomic.AddUint64(&reqError, 1)
w.WriteHeader(404)
return
}
buf := make([]byte, 512)
copy(buf, bin)
cntType := strings.ToLower(http.DetectContentType(buf))
if !strings.HasPrefix(cntType, "image") {
log("not image err", url)
w.WriteHeader(404)
atomic.AddUint64(&reqError, 1)
return
}
w.Header().Set("Content-Type", cntType)
atomic.AddUint64(&reqSuccess, 1)
w.Write(bin)
return
default:
log("wrong params")
w.WriteHeader(503)
return
}
}
// MainPageNoCache - load image on the fly
func MainPageNoCache(w http.ResponseWriter, r *http.Request) {
path := html.EscapeString(r.URL.Path)
log("mainPage", path)
switch r.Method {
case "GET":
vals := r.URL.Query()
url := vals.Get("url")
if path == "/" && url == "" {
w.WriteHeader(200)
return
}
bin, err := imgLoad(url)
if err != nil {
log("imgLoad err", err)
atomic.AddUint64(&reqError, 1)
w.WriteHeader(404)
return
}
buf := make([]byte, 512)
copy(buf, bin)
cntType := strings.ToLower(http.DetectContentType(buf))
if !strings.HasPrefix(cntType, "image") {
log("not image err", url)
w.WriteHeader(404)
atomic.AddUint64(&reqError, 1)
return
}
w.Header().Set("Content-Type", cntType)
atomic.AddUint64(&reqSuccess, 1)
w.Write(bin)
return
default:
log("wrong params")
w.WriteHeader(503)
return
}
}
|
[
3
] |
package mdns
import (
"context"
"fmt"
"net"
"strings"
"sync"
"time"
"golang.org/x/net/dns/dnsmessage"
)
// ServiceEntry is returned after we query for a service
type ServiceEntry struct {
Name string
Host string
AddrV4 net.IP
AddrV6 net.IP
Port int
Info string
InfoFields []string
Addr net.IP // @Deprecated
hasTXT bool
}
// complete is used to check if we have all the info we need
func (s *ServiceEntry) complete() bool {
return (s.AddrV4 != nil || s.AddrV6 != nil || s.Addr != nil) && s.Port != 0 && s.hasTXT
}
func (s *ServiceEntry) String() string {
fields := make([]string, 0)
if s.Name != "" {
fields = append(fields, fmt.Sprintf("Name:%s", s.Name))
}
if s.Host != "" {
fields = append(fields, fmt.Sprintf("Host:%s", s.Host))
}
if s.AddrV4 != nil {
fields = append(fields, fmt.Sprintf("AddrV4:%v", s.AddrV4))
}
if s.AddrV6 != nil {
fields = append(fields, fmt.Sprintf("Host:%v", s.AddrV6))
}
if s.Port != 0 {
fields = append(fields, fmt.Sprintf("Port:%d", s.Port))
}
if s.Info != "" {
fields = append(fields, fmt.Sprintf("Info:%s", s.Info))
}
if s.InfoFields != nil && len(s.InfoFields) != 0 {
fields = append(fields, fmt.Sprintf("InfoFields:%v", s.InfoFields))
}
fields = append(fields, fmt.Sprintf("hasTXT:%v", s.hasTXT))
return strings.Join(fields, ",")
}
// QueryParam is used to customize how a Lookup is performed
type QueryParam struct {
Service string // Service to lookup
Domain string // Lookup domain, default "local"
Timeout time.Duration // Lookup timeout, default 1 second
Interface *net.Interface // Multicast interface to use
Entries chan *ServiceEntry // Entries Channel
WantUnicastResponse bool // Unicast response desired, as per 5.4 in RFC
}
// DefaultParams is used to return a default set of QueryParam's
func DefaultParams(service string) *QueryParam {
return &QueryParam{
Service: service,
Domain: "local",
Timeout: time.Second,
Entries: make(chan *ServiceEntry),
WantUnicastResponse: false, // TODO(reddaly): Change this default.
}
}
// Query looks up a given service, in a domain, waiting at most
// for a timeout before finishing the query. The results are streamed
// to a channel. Sends will not block, so clients should make sure to
// either read or buffer.
func Query(params *QueryParam) (dnsmessage.ResourceHeader, net.Addr, error) {
// Create a new client
client, err := newClient(params.Interface)
if err != nil {
return dnsmessage.ResourceHeader{}, nil, err
}
//defer client.Close()
// Set the multicast interface
// if params.Interface != nil {
// if err := client.setInterface(params.Interface); err != nil {
// return err
// }
// }
// Ensure defaults are set
if params.Domain == "" {
params.Domain = "local"
}
if params.Timeout == 0 {
params.Timeout = time.Second
}
// Run the query
return client.query(params)
}
// Client provides a query interface that can be used to
// search for service providers using mDNS
type client struct {
conn *Conn
closed bool
closedCh chan struct{} // TODO(reddaly): This doesn't appear to be used.
closeLock sync.Mutex
}
// NewClient creates a new mdns Client that can be used to query
// for records
func newClient(iface *net.Interface) (*client, error) {
// TODO(reddaly): At least attempt to bind to the port required in the spec.
// Create a IPv4 listener
conn, err := NewConn(&Config{Zone: nil})
if err != nil {
return nil, fmt.Errorf("[ERR] mdns: Failed to create client conn: %v", err)
}
c := &client{
conn: conn,
closedCh: make(chan struct{}),
}
return c, nil
}
func (c *client) getIsClosed() bool {
c.closeLock.Lock()
defer c.closeLock.Unlock()
return c.closed
}
// Close is used to cleanup the client
func (c *client) Close() error {
c.closeLock.Lock()
defer c.closeLock.Unlock()
if c.closed {
return nil
}
c.closed = true
close(c.closedCh)
if c.conn != nil {
c.conn.Close()
}
return nil
}
// query is used to perform a lookup and stream results
func (c *client) query(params *QueryParam) (dnsmessage.ResourceHeader, net.Addr, error) {
ctx, _ := context.WithCancel(context.Background())
return c.conn.Query(ctx, params)
}
|
[
3
] |
// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chi
import (
"fmt"
apps "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
chopmodel "github.com/altinity/clickhouse-operator/pkg/model"
)
// getConfigMap gets ConfigMap either by namespaced name or by labels
func (c *Controller) getConfigMap(obj *meta.ObjectMeta) (*core.ConfigMap, error) {
// Check whether object with such name already exists in k8s
res, err := c.configMapLister.ConfigMaps(obj.Namespace).Get(obj.Name)
if res != nil {
// Object found by name
return res, nil
}
if apierrors.IsNotFound(err) {
// Object with such name not found
// Try to find by labels
if set, err := chopmodel.GetSelectorHostFromObjectMeta(obj); err == nil {
selector := labels.SelectorFromSet(set)
objects, err := c.configMapLister.ConfigMaps(obj.Namespace).List(selector)
if err != nil {
return nil, err
}
if len(objects) == 1 {
// Object found by labels
return objects[0], nil
}
}
}
// Object not found
return nil, err
}
// getService gets Service either by namespaced name or by labels
func (c *Controller) getService(obj *meta.ObjectMeta) (*core.Service, error) {
// Check whether object with such name already exists in k8s
res, err := c.serviceLister.Services(obj.Namespace).Get(obj.Name)
if res != nil {
// Object found by name
return res, nil
}
if apierrors.IsNotFound(err) {
// Object with such name not found
// Try to find by labels
if set, err := chopmodel.GetSelectorHostFromObjectMeta(obj); err == nil {
selector := labels.SelectorFromSet(set)
objects, err := c.serviceLister.Services(obj.Namespace).List(selector)
if err != nil {
return nil, err
}
if len(objects) == 1 {
// Object found by labels
return objects[0], nil
}
}
}
// Object not found
return nil, err
}
// getStatefulSet gets StatefulSet either by namespaced name or by labels
func (c *Controller) getStatefulSet(obj *meta.ObjectMeta) (*apps.StatefulSet, error) {
// Check whether object with such name already exists in k8s
res, err := c.statefulSetLister.StatefulSets(obj.Namespace).Get(obj.Name)
if res != nil {
// Object found by name
return res, nil
}
if apierrors.IsNotFound(err) {
// Object with such name not found
// Try to find by labels
if set, err := chopmodel.GetSelectorHostFromObjectMeta(obj); err != nil {
return nil, err
} else {
selector := labels.SelectorFromSet(set)
if objects, err := c.statefulSetLister.StatefulSets(obj.Namespace).List(selector); err != nil {
return nil, err
} else if len(objects) == 1 {
// Object found by labels
return objects[0], nil
} else if len(objects) > 1 {
// Object found by labels
return nil, fmt.Errorf("ERROR too much objects returned by selector")
} else {
// Zero? Fall through and return IsNotFound() error
}
}
}
// Object not found
return nil, err
}
func (c *Controller) GetChiByObjectMeta(objectMeta *meta.ObjectMeta) (*chiv1.ClickHouseInstallation, error) {
chiName, err := chopmodel.GetChiNameFromObjectMeta(objectMeta)
if err != nil {
return nil, fmt.Errorf("ObjectMeta %s does not generated by CHI %v", objectMeta.Name, err)
}
return c.chiLister.ClickHouseInstallations(objectMeta.Namespace).Get(chiName)
}
|
[
5
] |
// +build noscalar
//
// This implementation of Value disables special casing of ints, floats and
// bools. It causes more memory allocations.
package runtime
// A Value is a runtime value.
type Value struct {
iface interface{}
}
// AsValue returns a Value for the passed interface.
func AsValue(i interface{}) Value {
return Value{iface: i}
}
// Interface turns the Value into an interface.
func (v Value) Interface() interface{} {
return v.iface
}
// IntValue returns a Value holding the given arg.
func IntValue(n int64) Value {
return Value{iface: n}
}
// FloatValue returns a Value holding the given arg.
func FloatValue(f float64) Value {
return Value{iface: f}
}
// BoolValue returns a Value holding the given arg.
func BoolValue(b bool) Value {
return Value{iface: b}
}
// StringValue returns a Value holding the given arg.
func StringValue(s string) Value {
return Value{iface: s}
}
// TableValue returns a Value holding the given arg.
func TableValue(t *Table) Value {
return Value{iface: t}
}
// FunctionValue returns a Value holding the given arg.
func FunctionValue(c Callable) Value {
return Value{iface: c}
}
// ContValue returns a Value holding the given arg.
func ContValue(c Cont) Value {
return Value{iface: c}
}
// ArrayValue returns a Value holding the given arg.
func ArrayValue(a []Value) Value {
return Value{iface: a}
}
// CodeValue returns a Value holding the given arg.
func CodeValue(c *Code) Value {
return Value{iface: c}
}
// ThreadValue returns a Value holding the given arg.
func ThreadValue(t *Thread) Value {
return Value{iface: t}
}
// LightUserDataValue returns a Value holding the given arg.
func LightUserDataValue(d LightUserData) Value {
return Value{iface: d}
}
// UserDataValue returns a Value holding the given arg.
func UserDataValue(u *UserData) Value {
return Value{iface: u}
}
// NilValue is a value holding Nil.
var NilValue = Value{}
// Type returns the ValueType of v.
func (v Value) Type() ValueType {
if v.iface == nil {
return NilType
}
switch v.iface.(type) {
case int64:
return IntType
case float64:
return FloatType
case bool:
return BoolType
case string:
return StringType
case *Table:
return TableType
case *Code:
return CodeType
case Callable:
return FunctionType
case *Thread:
return ThreadType
case *UserData:
return UserDataType
default:
return UnknownType
}
}
// NumberType return the ValueType of v if it is a number, otherwise
// UnknownType.
func (v Value) NumberType() ValueType {
switch v.iface.(type) {
case int64:
return IntType
case float64:
return FloatType
}
return UnknownType
}
// AsInt returns v as a int64 (or panics).
func (v Value) AsInt() int64 {
return v.iface.(int64)
}
// AsFloat returns v as a float64 (or panics).
func (v Value) AsFloat() float64 {
return v.iface.(float64)
}
// AsBool returns v as a bool (or panics).
func (v Value) AsBool() bool {
return v.iface.(bool)
}
// AsString returns v as a string (or panics).
func (v Value) AsString() string {
return v.iface.(string)
}
// AsTable returns v as a *Table (or panics).
func (v Value) AsTable() *Table {
return v.iface.(*Table)
}
// AsCont returns v as a Cont (or panics).
func (v Value) AsCont() Cont {
return v.iface.(Cont)
}
// AsArray returns v as a [] (or panics).
func (v Value) AsArray() []Value {
return v.iface.([]Value)
}
// AsClosure returns v as a *Closure (or panics).
func (v Value) AsClosure() *Closure {
return v.iface.(*Closure)
}
// AsCode returns v as a *Code (or panics).
func (v Value) AsCode() *Code {
return v.iface.(*Code)
}
// AsUserData returns v as a *UserData (or panics).
func (v Value) AsUserData() *UserData {
return v.iface.(*UserData)
}
// AsFunction returns v as a Callable (or panics).
func (v Value) AsFunction() Callable {
return v.iface.(Callable)
}
// TryInt converts v to type int64 if possible (ok is false otherwise).
func (v Value) TryInt() (n int64, ok bool) {
n, ok = v.iface.(int64)
return
}
// TryFloat converts v to type float64 if possible (ok is false otherwise).
func (v Value) TryFloat() (n float64, ok bool) {
n, ok = v.iface.(float64)
return
}
// TryString converts v to type string if possible (ok is false otherwise).
func (v Value) TryString() (s string, ok bool) {
s, ok = v.iface.(string)
return
}
// TryCallable converts v to type Callable if possible (ok is false otherwise).
func (v Value) TryCallable() (c Callable, ok bool) {
c, ok = v.iface.(Callable)
return
}
// TryClosure converts v to type *Closure if possible (ok is false otherwise).
func (v Value) TryClosure() (c *Closure, ok bool) {
c, ok = v.iface.(*Closure)
return
}
// TryThread converts v to type *Thread if possible (ok is false otherwise).
func (v Value) TryThread() (t *Thread, ok bool) {
t, ok = v.iface.(*Thread)
return
}
// TryTable converts v to type *Table if possible (ok is false otherwise).
func (v Value) TryTable() (t *Table, ok bool) {
t, ok = v.iface.(*Table)
return
}
// TryUserData converts v to type *UserData if possible (ok is false otherwise).
func (v Value) TryUserData() (u *UserData, ok bool) {
u, ok = v.iface.(*UserData)
return
}
// TryBool converts v to type bool if possible (ok is false otherwise).
func (v Value) TryBool() (b bool, ok bool) {
b, ok = v.iface.(bool)
return
}
// TryCont converts v to type Cont if possible (ok is false otherwise).
func (v Value) TryCont() (c Cont, ok bool) {
c, ok = v.iface.(Cont)
return
}
// TryCode converts v to type *Code if possible (ok is false otherwise).
func (v Value) TryCode() (c *Code, ok bool) {
c, ok = v.iface.(*Code)
return
}
// IsNil returns true if v is nil.
func (v Value) IsNil() bool {
return v.iface == nil
}
|
[
6
] |
package main
import (
"fmt"
"net/http"
"strings"
"time"
"github.com/PuerkitoBio/goquery"
"github.com/pkg/errors"
)
type PrincetonReview struct {
ticker <-chan time.Time
}
func NewPrincetonReview(rateLimit time.Duration) *PrincetonReview {
return &PrincetonReview{
ticker: time.Tick(rateLimit),
}
}
func (pr *PrincetonReview) FetchInfo(c *College) (err error) {
url := fmt.Sprintf("https://www.princetonreview.com/college/x-%d", c.PrincetonReviewId)
<-pr.ticker // rate limit
res, fetchErr := http.Get(url)
if fetchErr != nil {
return fetchErr
}
doc, htmlParseErr := goquery.NewDocumentFromReader(res.Body)
if htmlParseErr != nil {
return htmlParseErr
}
// Recover from any parse errors
defer func() {
if panicked := recover(); panicked != nil {
err = errors.Errorf("princeton parse error: %v", panicked)
}
}()
// lock college for editing
c.Lock()
defer c.Unlock()
DefaultInt(&c.NumApplicants, getPrincetonInt(doc, "Applicants"))
DefaultFloat(&c.AcceptanceRate, getPrincetonFloat(doc, "Acceptance Rate"))
DefaultFloat(&c.GPAAverage, getPrincetonFloat(doc, "Average HS GPA"))
// ACT range
ACTRangeLow, ACTRangeHigh := getPrincetonIntRange(doc, "ACT Composite")
DefaultInt(&c.ACTRangeLow, ACTRangeLow)
DefaultInt(&c.ACTRangeHigh, ACTRangeHigh)
return nil
}
func getPrincetonItem(root *goquery.Document, label string) *string {
parent := root.Find("div.col-sm-4").FilterFunction(func(i int, el *goquery.Selection) bool {
siblings := el.Children()
if siblings.Length() <= 1 {
return false
}
labelEl := siblings.Slice(0, 1) // first child
return strings.ToLower(labelEl.Text()) == strings.ToLower(label)
})
if parent.Length() == 0 {
return nil
}
value := strings.TrimSpace(parent.Find("div:last-child").Text())
return &value
}
func getPrincetonInt(root *goquery.Document, label string) *int {
value := getPrincetonItem(root, label)
if value == nil {
return nil
}
valInt := MustParseInt(TrimFormattedNumber(*value))
return &valInt
}
func getPrincetonIntRange(root *goquery.Document, label string) (low *int, hi *int) {
valueStr := getPrincetonItem(root, label)
if valueStr == nil {
return nil, nil
}
split := strings.Split(*valueStr, " - ")
if len(split) != 2 {
panic(errors.Errorf("couldn't split range correctly for '%s'", label))
}
loInt := MustParseInt(split[0])
hiInt := MustParseInt(split[1])
return &loInt, &hiInt
}
func getPrincetonFloat(root *goquery.Document, label string) *float64 {
value := getPrincetonItem(root, label)
if value == nil {
return nil
}
valFloat := MustParseFloat64(TrimFormattedNumber(*value))
return &valFloat
}
|
[
6
] |
package storage
import (
"runtime"
"sync"
"testing"
"github.com/stretchr/testify/assert"
)
func TestConcurrentMapSingleClientStoreAndLoad(t *testing.T) {
m := NewGenericConcurrentMap()
m.Store("foo", "bar")
m.Store("foo2", "2")
val, ok := m.Load("foo")
assert.Equal(t, ok, true)
assert.Equal(t, val, "bar")
val, ok = m.Load("foo2")
assert.Equal(t, val, "2")
_, ok = m.Load("foo3")
assert.Equal(t, ok, false)
}
func TestConcurrentSingleClientMapDelete(t *testing.T) {
m := NewGenericConcurrentMap()
m.Store("foo", "bar")
m.Store("foo2", "2")
ok := m.Delete("foo")
assert.Equal(t, ok, true)
ok = m.Delete("foo2")
assert.Equal(t, ok, true)
ok = m.Delete("foo3")
assert.Equal(t, ok, false)
}
func reader(t *testing.T, g *GenericConcurrentMap, c chan string, key string) {
v, _ := g.Load(key)
c <- v
}
func writer(g *GenericConcurrentMap, c chan string, key string, value string) {
g.Store(key, value)
c <- value
}
func TestConcurrentMapAccessMultipleClients(t *testing.T) {
runtime.GOMAXPROCS(4)
// Single writer, 2 readers
// Ideas for this test are taken from https://golang.org/src/runtime/rwmutex_test.go
m := NewGenericConcurrentMap()
// Store initial value
m.Store("foo", "omg")
c := make(chan string, 1)
done := make(chan string)
// Enforce sequential access via channels
go reader(t, m, c, "foo")
assert.Equal(t, <-c, "omg")
go reader(t, m, c, "foo")
assert.Equal(t, <-c, "omg")
go writer(m, done, "foo", "lol")
<-done
go reader(t, m, c, "foo")
assert.Equal(t, <-c, "lol")
go reader(t, m, c, "foo")
assert.Equal(t, <-c, "lol")
// Try concurrent reads without waiting, but waiting only on write
m.Store("foo", "lol")
go reader(t, m, c, "foo")
go reader(t, m, c, "foo")
go writer(m, done, "foo", "lol2")
go reader(t, m, c, "foo")
<-done
go reader(t, m, c, "foo")
for i := 0; i < 3; i++ {
val := <-c
assert.Equal(t, val, "lol")
}
assert.Equal(t, <-c, "lol2")
}
func TestConcurrentMapWriteMultipleWriters(t *testing.T) {
m := NewGenericConcurrentMap()
done := make(chan string)
c := make(chan string, 1)
// We need this variable to hold the first value that is written. Because goroutines
// can run concurrently, we don't know which write will succeed. By storing the return
// value from write, we know which value to compare against
var curr string
// Two concurrent writers. Any may win first because we are only waiting for one
go writer(m, done, "foo", "lol")
go writer(m, done, "foo", "lol2")
curr = <-done
go reader(t, m, c, "foo")
assert.Equal(t, <-c, curr)
// If we now assert a reader, we may get lol or lol2, because we are not waiting on done.
// We have no way of knowing which one without the wait
curr = <-done
go reader(t, m, c, "foo")
assert.Equal(t, <-c, curr)
}
func TestConcurrentMapWriteAndDelete(t *testing.T) {
m := NewGenericConcurrentMap()
var wg sync.WaitGroup
wg.Add(1)
// If we schedule one after each other, it may fail.
// There is no guarantee that write will finish first
// Here we use a waitgroup to wait for counter to go to zero
// Run write first
go func() {
m.Store("foo", "2")
wg.Done()
}()
go func() {
wg.Wait()
// Waitgroup counter is now zero
ok := m.Delete("foo")
assert.Equal(t, ok, true)
}()
wg.Add(1)
// Now run delete first
go func() {
wg.Wait()
m.Store("foo", "2")
}()
go func() {
ok := m.Delete("foo")
assert.Equal(t, ok, false)
wg.Done()
}()
}
|
[
6
] |
package main
import (
"fmt"
"math"
"os"
"strconv"
"strings"
)
type Instruction struct {
Direction string
Steps int
}
type InstructionParser interface {
Parse(string) []Instruction
}
type BunnyInstructionParser struct{}
func NewBunnyInstructionParser() *BunnyInstructionParser {
return &BunnyInstructionParser{}
}
func (bip *BunnyInstructionParser) Parse(in string) []Instruction {
var instructions []Instruction
split := strings.Split(in, ", ")
for _, v := range split {
steps, _ := strconv.ParseInt(v[1:], 10, 64)
instructions = append(instructions, Instruction{Direction: v[:1], Steps: int(steps)})
}
return instructions
}
type Compass interface {
Turn(string) Compass
}
type North struct{}
type East struct{}
type South struct{}
type West struct{}
func (n *North) Turn(dir string) Compass {
if dir == "R" {
return &East{}
}
return &West{}
}
func (e *East) Turn(dir string) Compass {
if dir == "R" {
return &South{}
}
return &North{}
}
func (s *South) Turn(dir string) Compass {
if dir == "R" {
return &West{}
}
return &East{}
}
func (w *West) Turn(dir string) Compass {
if dir == "R" {
return &North{}
}
return &South{}
}
type Point struct {
X int
Y int
}
type Taxi struct {
Position Point
Direction Compass
BeenTo []Point
}
func NewTaxi() Taxi {
return Taxi{
Position: Point{X: 0, Y: 0},
Direction: &North{},
}
}
func (t *Taxi) FollowInstructions(instructions string) {
bip := NewBunnyInstructionParser()
in := bip.Parse(instructions)
for _, i := range in {
t.Direction = t.Direction.Turn(i.Direction)
for x := 0; x < i.Steps; x++ {
t.Drive()
}
}
}
func (t *Taxi) Drive() {
switch t.Direction.(type) {
case *North:
t.Position.Y = t.Position.Y + 1
case *East:
t.Position.X = t.Position.X + 1
case *South:
t.Position.Y = t.Position.Y - 1
default:
t.Position.X = t.Position.X - 1
}
t.BeenTo = append(t.BeenTo, t.Position)
}
func (t *Taxi) Distance() float64 {
return distanceFromPoint(t.Position)
}
func distanceFromPoint(p Point) float64 {
x := math.Abs(float64(p.X))
y := math.Abs(float64(p.Y))
return x + y
}
func (t *Taxi) DistanceFromFirstRepeatedPosition() float64 {
positions := make(map[Point]bool)
for _, location := range t.BeenTo {
if _, ok := positions[location]; ok {
return distanceFromPoint(location)
}
positions[location] = true
}
return float64(0)
}
func main() {
if len(os.Args) != 2 {
fmt.Fprintf(os.Stderr, "You must pass instructions as the first and only argument, you passed %v\n", len(os.Args))
os.Exit(1)
}
taxi := NewTaxi()
taxi.FollowInstructions(os.Args[1])
fmt.Fprintf(os.Stdout, "You have travelled a distance of %v\n", taxi.Distance())
fmt.Fprintf(os.Stdout, "The Easter Bunny HQ is %v blocks away\n", taxi.DistanceFromFirstRepeatedPosition())
}
|
[
0
] |
package main
import (
"fmt"
"log"
"strconv"
"strings"
"github.com/miekg/dns"
)
var Level2Records = map[string]string{
"cis-hub-dongguan-1.cmecloud.cn.": "192.168.0.4",
}
var Level3Records = map[string]string{
"*.ecis-suzhou-1.cmecloud.cn.": "192.168.0.5",
"*.ecis-hangzhou-1.cmecloud.cn.": "192.168.0.6",
}
func parseQuery(m *dns.Msg) {
for _, q := range m.Question {
switch q.Qtype {
case dns.TypeA:
log.Printf("Query for %s\n", q.Name)
pointsCount := strings.Count(q.Name, ".")
if pointsCount == 3 { // 二级域名,全域名匹配
if _, ok := Level2Records[q.Name]; ok {
log.Printf("ip is %s\n", Level2Records[q.Name])
rr, err := dns.NewRR(fmt.Sprintf("%s A %s", q.Name, Level2Records[q.Name]))
if err == nil {
m.Answer = append(m.Answer, rr)
}
}
}
if pointsCount == 4 {// 三级域名
arr := strings.SplitN(q.Name, ".", 2)
log.Printf("三级域名分割 for %s\n", arr[1])
// 从三级域名开始匹配
if _, ok := Level3Records["*." + arr[1]]; ok {
log.Printf("ip is %s\n", Level3Records["*." + arr[1]])
rr, err := dns.NewRR(fmt.Sprintf("%s A %s", q.Name, Level3Records["*." + arr[1]]))
if err == nil {
m.Answer = append(m.Answer, rr)
}
}
}
}
}
}
func handleDnsRequest(w dns.ResponseWriter, r *dns.Msg) {
m := new(dns.Msg)
m.SetReply(r)
m.Compress = false
switch r.Opcode {
case dns.OpcodeQuery:
parseQuery(m)
}
w.WriteMsg(m)
}
func main() {
// attach request handler func
dns.HandleFunc("service.", handleDnsRequest)
dns.HandleFunc("cis-hub-dongguan-1.cmecloud.cn.", handleDnsRequest)
dns.HandleFunc("ecis-suzhou-1.cmecloud.cn.", handleDnsRequest)
dns.HandleFunc("ecis-hangzhou-1.cmecloud.cn.", handleDnsRequest)
// start server
port := 5354
server := &dns.Server{Addr: ":" + strconv.Itoa(port), Net: "udp"}
log.Printf("Starting at %d\n", port)
err := server.ListenAndServe()
defer server.Shutdown()
if err != nil {
log.Fatalf("Failed to start server: %s\n ", err.Error())
}
}
|
[
7
] |
package controller
import (
// . "eaciit/wfdemo-git/library/models"
"eaciit/wfdemo-git/web/helper"
"net/http"
"github.com/eaciit/knot/knot.v1"
"github.com/eaciit/toolkit"
)
type PageController struct {
App
Params toolkit.M
}
var (
DefaultIncludes = []string{"_head.html", "_menu.html", "_loader.html", "_script_template.html"}
)
func CreatePageController(AppName string) *PageController {
var controller = new(PageController)
controller.Params = toolkit.M{"AppName": AppName}
return controller
}
func (w *PageController) GetParams(r *knot.WebContext, isAnalyst bool) toolkit.M {
w.Params.Set("AntiCache", toolkit.RandomString(20))
w.Params.Set("CurrentDateData", helper.GetLastDateData(r))
// w.Params.Set("Menus", r.Session("menus", []string{}))
if isAnalyst {
projectList, _ := helper.GetProjectList()
turbineList, _ := helper.GetTurbineList()
w.Params.Set("ProjectList", projectList)
w.Params.Set("TurbineList", turbineList)
}
r.Writer.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
r.Writer.Header().Set("Pragma", "no-cache")
r.Writer.Header().Set("Expires", "0")
// WriteLog(r.Session("sessionid", ""), "access", r.Request.URL.String())
return w.Params
}
func (w *PageController) Index(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-index.html"
return w.GetParams(r, false)
}
func (w *PageController) Login(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.ViewName = "page-login.html"
if r.Session("sessionid", "") != "" {
r.SetSession("sessionid", "")
}
return w.GetParams(r, false)
}
func (w *PageController) DataBrowser(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-databrowser.html"
return w.GetParams(r, false).Set("ColumnList", GetCustomFieldList())
}
func (w *PageController) DataBrowserNew(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-databrowser-new.html"
return w.GetParams(r, false).Set("ColumnList", GetCustomFieldList())
}
/*func (w *PageController) AnalyticWindDistribution(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-wind-distribution.html"
return w.GetParams(r, true)
}*/
/*func (w *PageController) AnalyticWindAvailability(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-wind-availability-analysis.html"
return w.GetParams(r, true)
}*/
/*func (w *PageController) AnalyticWindRose(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-analytic-wind-rose.html"
return w.GetParams(r, true)
}*/
/*func (w *PageController) AnalyticWindRoseDetail(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-analytic-wind-rose-detail.html"
return w.GetParams(r, true)
}*/
/*func (w *PageController) AnalyticWindRoseFlexi(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-analytic-wind-rose-flexi.html"
return w.GetParams(r, true)
}*/
/*func (w *PageController) AnalyticWRFlexiDetail(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-analytic-wr-flexi-detail.html"
return w.GetParams(r, true)
}*/
func (w *PageController) AnalyticPerformanceIndex(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-analytic-performance-index.html"
return w.GetParams(r, true)
}
func (w *PageController) AnalyticPowerCurve(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-analytic-power-curve.html"
return w.GetParams(r, true)
}
/*func (w *PageController) AnalyticDgrScada(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-analytic-dgr-scada.html"
return w.GetParams(r, true)
}*/
func (w *PageController) AnalyticKeyMetrics(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-analytic-key-metrics.html"
return w.GetParams(r, true)
}
func (w *PageController) AnalyticKpi(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-analytic-kpi.html"
return w.GetParams(r, true)
}
/*func (w *PageController) AnalyticAvailability(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-availability-analysis.html"
return w.GetParams(r, true)
}*/
func (w *PageController) AnalyticLoss(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-loss-analysis.html"
return w.GetParams(r, true)
}
func (w *PageController) AnalyticDataConsistency(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-data-consistency.html"
return w.GetParams(r, true)
}
func (w *PageController) AnalyticMeteorology(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-analytic-meteorology.html"
return w.GetParams(r, true)
}
func (w *PageController) AnalyticComparison(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-analytic-comparison.html"
return w.GetParams(r, true)
}
func (w *PageController) AnalyticDataHistogram(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-analytic-data-histogram.html"
return w.GetParams(r, true)
}
func (w *PageController) DataEntryPowerCurve(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-dataentry-power-curve.html"
return w.GetParams(r, false)
}
func (w *PageController) Access(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-access.html"
return w.GetParams(r, false)
}
func (w *PageController) Group(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-group.html"
return w.GetParams(r, false)
}
func (w *PageController) Session(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-session.html"
return w.GetParams(r, false)
}
func (w *PageController) Log(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-log.html"
return w.GetParams(r, false)
}
func (w *PageController) AdminTable(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-admintable.html"
return w.GetParams(r, false)
}
func (w *PageController) User(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-user.html"
return w.GetParams(r, false)
}
func (w *PageController) Monitoring(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-monitoring.html"
return w.GetParams(r, false)
}
func (w *PageController) Dashboard(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-dashboard.html"
r.Config.IncludeFiles = append(DefaultIncludes, []string{"page-dashboard-summary.html", "page-dashboard-production.html", "page-dashboard-availability.html"}...)
return w.GetParams(r, false)
}
func (w *PageController) Home(r *knot.WebContext) interface{} {
http.Redirect(r.Writer, r.Request, "dashboard", http.StatusTemporaryRedirect)
return w.GetParams(r, false)
}
func (w *PageController) TurbineHealth(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-turbine-health.html"
return w.GetParams(r, false)
}
func (w *PageController) DataSensorGovernance(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-data-sensor-governance.html"
return w.GetParams(r, false)
}
func (w *PageController) TimeSeries(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-time-series.html"
return w.GetParams(r, false)
}
func (w *PageController) DIYView(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-diy-view.html"
return w.GetParams(r, false)
}
func (w *PageController) SCMManagement(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-scm.html"
return w.GetParams(r, false)
}
func (w *PageController) IssueTracking(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-issue-tracking.html"
return w.GetParams(r, false)
}
func (w *PageController) Reporting(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputTemplate
r.Config.LayoutTemplate = LayoutFile
r.Config.ViewName = "page-reporting.html"
return w.GetParams(r, false)
}
|
[
2
] |
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Mauro Risonho de Paula Assumpcao - firebitsbr - [email protected]
* local file inclusion tester (beta)
*
*/
package main
import (
"crypto/tls"
"flag"
"fmt"
"github.com/jackdanger/collectlinks"
"net/http"
"net/url"
"os"
)
func usage() {
fmt.Fprintf(os.Stderr, "usage: crawl http://example.com/path/file.html\n")
flag.PrintDefaults()
os.Exit(2)
}
func main() {
fmt.Println("Mauro Risonho de Paula Assumpcao - firebisbr")
fmt.Println("LFI Scanner (Local File Inclusion)")
flag.Usage = usage
flag.Parse()
args := flag.Args()
fmt.Println(args)
if len(args) < 1 {
usage()
fmt.Println("Please specify start page")
os.Exit(1)
}
queue := make(chan string)
filteredQueue := make(chan string)
go func() { queue <- args[0] }()
go filterQueue(queue, filteredQueue)
// pull from the filtered queue, add to the unfiltered queue
for uri := range filteredQueue {
enqueue(uri, queue)
}
}
func filterQueue(in chan string, out chan string) {
var seen = make(map[string]bool)
for val := range in {
if !seen[val] {
seen[val] = true
out <- val
}
}
}
func enqueue(uri string, queue chan string) {
fmt.Println("fetching", uri + "../../../../etc/passwd")
transport := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
client := http.Client{Transport: transport}
resp, err := client.Get(uri)
if err != nil {
return
}
defer resp.Body.Close()
links := collectlinks.All(resp.Body)
for _, link := range links {
absolute := fixUrl(link, uri)
if uri != "" {
go func() { queue <- absolute }()
}
}
}
func fixUrl(href, base string) string {
uri, err := url.Parse(href)
if err != nil {
return ""
}
baseUrl, err := url.Parse(base)
if err != nil {
return ""
}
uri = baseUrl.ResolveReference(uri)
return uri.String()
}
|
[
6
] |
package main
import (
"log"
"errors"
"strconv"
"strings"
"io/ioutil"
)
type IntervalOfLines struct {
Begin int
End int
}
type FileBuckets struct {
linesIntervals []IntervalOfLines
}
func (fb FileBuckets) GetFilesIntervals(begin int, end int) []IntervalOfLines {
files := make([]IntervalOfLines, 0, 2)
for _, linesInterval := range fb.linesIntervals {
if (begin > linesInterval.End) || (end < linesInterval.Begin) {
continue
}
files = append(files, IntervalOfLines{linesInterval.Begin, linesInterval.End})
}
return files
}
func parseFileName(fileName string) (IntervalOfLines, error) {
splittedFirst := strings.Split(fileName, ".")
if len(splittedFirst) < 2 {
return IntervalOfLines{}, errors.New("incorrect filename format")
} else {
splittedSecond := strings.Split(splittedFirst[0], "-")
if len(splittedSecond) != 2 {
return IntervalOfLines{}, errors.New("interval is not fully reflected in the filename")
} else {
beg, err := strconv.Atoi(splittedSecond[0])
if err != nil {
log.Fatal(err)
}
end, err := strconv.Atoi(splittedSecond[1])
if err != nil {
log.Fatal(err)
}
return IntervalOfLines{beg, end}, nil
}
}
}
func NewFileBuckets(dirPath string) FileBuckets {
fb := FileBuckets{
linesIntervals : make([]IntervalOfLines, 0, 100),
}
files, err := ioutil.ReadDir(dirPath)
if err != nil {
log.Fatal(err)
}
for _, file := range files {
interval, err := parseFileName(file.Name())
if err != nil {
log.Fatal(err)
}
fb.linesIntervals = append(fb.linesIntervals, interval)
}
return fb
}
|
[
6
] |
package main
import "fmt"
// https://leetcode.com/problems/find-closest-node-to-given-two-nodes/
// You are given a directed graph of n nodes numbered from 0 to n - 1, where each node has at most
// one outgoing edge.
// The graph is represented with a given 0-indexed array edges of size n, indicating that there is
// a directed edge from node i to node edges[i]. If there is no outgoing edge from i, then edges[i]
// == -1.
// You are also given two integers node1 and node2.
// Return the index of the node that can be reached from both node1 and node2, such that the maximum
// between the distance from node1 to that node, and from node2 to that node is minimized. If there
// are multiple answers, return the node with the smallest index, and if no possible answer exists,
// return -1.
// Note that edges may contain cycles.
// Example 1:
// Input: edges = [2,2,3,-1], node1 = 0, node2 = 1
// Output: 2
// Explanation: The distance from node 0 to node 2 is 1, and the distance from node 1 to node 2 is 1.
// The maximum of those two distances is 1. It can be proven that we cannot get a
// node with a smaller maximum distance than 1, so we return node 2.
// Example 2:
// Input: edges = [1,2,-1], node1 = 0, node2 = 2
// Output: 2
// Explanation: The distance from node 0 to node 2 is 2, and the distance from node 2 to itself is 0.
// The maximum of those two distances is 2. It can be proven that we cannot get a
// node with a smaller maximum distance than 2, so we return node 2.
// Constraints:
// n == edges.length
// 2 <= n <= 10⁵
// -1 <= edges[i] < n
// edges[i] != i
// 0 <= node1, node2 < n
func closestMeetingNode(edges []int, node1 int, node2 int) int {
n := len(edges)
dist1 := make([]int, n)
dist2 := make([]int, n)
visited1 := make([]bool, n)
visited2 := make([]bool, n)
// dfs node1
i := node1
visited1[i] = true
dist1[i] = 1
for edges[i] != -1 && !visited1[edges[i]] {
dist1[edges[i]] = dist1[i] + 1
i = edges[i]
visited1[i] = true
}
// dfs node2
i = node2
visited2[i] = true
dist2[i] = 1
for edges[i] != -1 && !visited2[edges[i]] {
dist2[edges[i]] = dist2[i] + 1
i = edges[i]
visited2[i] = true
}
fmt.Println(dist1)
fmt.Println(dist2)
// find answer
ans := -1
min := 100000
for i := range edges {
if dist1[i] > 0 && dist2[i] > 0 { // both reachable
max := dist1[i]
if dist2[i] > max { // get max of two distances
max = dist2[i]
}
if max < min { // update minimal
min = max
ans = i
}
}
}
return ans
}
func main() {
for _, v := range []struct {
edges []int
n1, n2, ans int
}{
{[]int{2, 2, 3, -1}, 0, 1, 2},
{[]int{1, 2, -1}, 0, 2, 2},
{[]int{5, 3, 1, 0, 2, 4, 5}, 3, 2, 3},
} {
fmt.Println(closestMeetingNode(v.edges, v.n1, v.n2), v.ans)
}
}
|
[
6
] |
package p2p_layer
import (
"github.com/lampo100/botnet_p2p/models"
"log"
"github.com/lampo100/botnet_p2p/message_layer"
"sync"
)
var routingTable models.BucketList
var BBLMessageChannel chan models.Message
var myNode models.Node
var terminateChannel chan struct{}
var hasTerminated chan struct{}
var nextLayerTerminated chan struct{}
var mutex = &sync.Mutex{}
func InitLayer(selfNode models.Node, messageChannel chan models.Message, terminate chan struct{}, thisTerminated chan struct{}) {
BBLMessageChannel = messageChannel
myNode = selfNode
terminateChannel = terminate
hasTerminated = thisTerminated
nextLayerTerminated = make(chan struct{})
routingTable.Init(myNode)
message_layer.InitLayer(myNode, BBLMessageChannel, terminateChannel, nextLayerTerminated)
log.Println("[P2] Initialized")
go func() {
<-terminateChannel
<-nextLayerTerminated
log.Println("[P2] Terminated")
hasTerminated <- struct{}{}
}()
}
func Ping(selfNode, targetNode models.Node) error {
return message_layer.Ping(targetNode)
}
func PingResponse(selfNode, targetNode models.Node) error {
return message_layer.PingResponse(targetNode)
}
func FindNode(selfNode, targetNode models.Node, guid models.UUID) error {
return message_layer.FindNode(targetNode, guid)
}
func FoundNodes(selfNode, targetNode models.Node, guid models.UUID) error {
mutex.Lock()
nodes := routingTable.NearestNodes(guid, 100)
mutex.Unlock()
return message_layer.FoundNodes(targetNode, nodes)
}
func LeaveNetwork() error {
var err error
nodes := routingTable.GetAllNodes()
for _, node := range nodes {
err = message_layer.LeaveNetwork(node)
}
return err
}
func Command(sender, target models.Node, command string, shouldRespond bool) error {
return message_layer.Command(target, command, shouldRespond)
}
func CommandResponse(selfNode, targetNode models.Node, command, response string) error {
return message_layer.CommandResponse(targetNode, command, response)
}
func FileChunk(target models.Node, uuid models.UUID, name string, size, number uint32, data []byte) error {
return message_layer.FileChunk(target, uuid, name, size, number, data)
}
func RequestFile(target models.Node, path string) error {
return message_layer.RequestFile(target, path)
}
func AddNodeToRoutingTable(node models.Node) {
log.Printf("[P2] Adding node to RT: %v\n", node)
mutex.Lock()
routingTable.Insert(node)
mutex.Unlock()
log.Printf("[P2] RoutingTable:\n%v", routingTable.String())
}
func RemoveFromRoutingTable(node models.Node) {
log.Printf("[P2] Removing node to RT: %v\n", node)
mutex.Lock()
routingTable.Remove(node)
mutex.Unlock()
log.Printf("[P2] RoutingTable:\n%v", routingTable.String())
}
|
[
6
] |
// The MIT License (MIT)
//
// Copyright (c) 2016, Cagatay Dogan
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package dictionary
import (
"errors"
"math"
)
const hashPrime int = 101
const MaxPrimeArrayLength int = 0x7FEFFFFD
var primes = []int{
3, 7, 11, 17, 23, 29, 37, 47, 59, 71, 89, 107, 131, 163, 197, 239, 293, 353, 431, 521, 631, 761, 919,
1103, 1327, 1597, 1931, 2333, 2801, 3371, 4049, 4861, 5839, 7013, 8419, 10103, 12143, 14591,
17519, 21023, 25229, 30293, 36353, 43627, 52361, 62851, 75431, 90523, 108631, 130363, 156437,
187751, 225307, 270371, 324449, 389357, 467237, 560689, 672827, 807403, 968897, 1162687, 1395263,
1674319, 2009191, 2411033, 2893249, 3471899, 4166287, 4999559, 5999471, 7199369}
func IsPrime(candidate int) bool {
if (candidate & 1) != 0 {
limit := candidate * candidate
for divisor := 3; divisor <= limit; divisor += 2 {
if (candidate % divisor) == 0 {
return false
}
}
return true
}
return (candidate == 2)
}
func GetPrime(min int) (int, error) {
if min < 0 {
return 0, errors.New("Capacity overflow")
}
primesLen := len(primes)
for i := 0; i < primesLen; i++ {
prime := primes[i]
if prime >= min {
return prime, nil
}
}
for i := (min | 1); i < math.MaxInt32; i += 2 {
if IsPrime(i) && ((i-1)%hashPrime != 0) {
return i, nil
}
}
return min, nil
}
func ExpandPrime(oldSize int) int {
newSize := 2 * oldSize
if uint(newSize) > uint(MaxPrimeArrayLength) && MaxPrimeArrayLength > oldSize {
return MaxPrimeArrayLength
}
result, _ := GetPrime(newSize)
return result
}
|
[
1
] |
package auth
import (
"bytes"
"context"
"embed"
"errors"
"fmt"
"html/template"
"io/fs"
"os"
"strings"
"sync"
"github.com/fatih/color"
"github.com/Masterminds/sprig"
"github.com/davecgh/go-spew/spew"
"github.com/volatiletech/authboss/v3"
)
const (
renderFileNotFound = `<p>Sorry, the auth page could not be compiled. Either the <b>base.html.tmpl</b> or <b>%s.html.tmpl</b> file is missing.</p>`
renderCompileError = `<p>Sorry, the auth page could not be compiled.<br /><br /><pre>%s</pre></p>`
renderContentType = "text/html"
)
var (
errPatternMatchesNoFiles = errors.New("pattern matches no files")
)
//go:embed templates
var authPagesEmbeddedFS embed.FS
func (s *Service) newAuthPagesRenderer() (authboss.Renderer, error) {
if s.config.UIDirAuthPages == "" {
return &authPagesRenderer{
pageFS: authPagesEmbeddedFS,
templates: map[string]*template.Template{},
}, nil
}
switch s.config.DeveloperDisableCachingAuthPages {
case true:
_, _ = fmt.Fprintf(os.Stdout, "> Rendering Auth Pages from %q. %s\n", s.config.UIDirAuthPages, color.RedString("Template caching is disabled."))
default:
_, _ = fmt.Fprintf(os.Stdout, "> Rendering Auth Pages from %q. Templates are cached.\n", s.config.UIDirAuthPages)
}
return &authPagesRenderer{
pageFS: os.DirFS(s.config.UIDirAuthPages),
templates: map[string]*template.Template{},
providers: s.providers,
disableCache: s.config.DeveloperDisableCachingAuthPages,
}, nil
}
type authPagesRenderer struct {
pageFS fs.FS
disableCache bool
providers []*provider
templates map[string]*template.Template
templateLock sync.RWMutex
}
func (a *authPagesRenderer) funcs() template.FuncMap {
funcs := sprig.FuncMap()
funcs["htmlAttr"] = func(attrs ...string) template.HTMLAttr {
var str string
for _, attr := range attrs {
parts := strings.Split(attr, "=")
if len(parts) == 1 {
str = str + fmt.Sprintf("%s ", parts[0])
continue
}
str = str + fmt.Sprintf("%s=%q ", parts[0], parts[1])
}
return template.HTMLAttr(str)
}
return funcs
}
func (a *authPagesRenderer) loadSinglePage(name string) (*template.Template, error) {
tmpl, err := template.New("page").
Funcs(a.funcs()).
ParseFS(a.pageFS, "partials/*.html.tmpl", "_base.html.tmpl", fmt.Sprintf("%s.html.tmpl", name))
if err != nil {
return nil, fmt.Errorf("template.New...ParseFS: %w", err)
}
if tmpl == nil {
return nil, fmt.Errorf("nil template %q", name)
}
return tmpl, nil
}
func (a *authPagesRenderer) Load(names ...string) error {
a.templateLock.Lock()
defer a.templateLock.Unlock()
for _, name := range names {
tmpl, err := a.loadSinglePage(name)
if err != nil {
return fmt.Errorf("load auth page template: %w", err)
}
if !a.disableCache {
a.templates[name] = tmpl
}
}
return nil
}
func (a *authPagesRenderer) Render(ctx context.Context, page string, data authboss.HTMLData) ([]byte, string, error) {
a.templateLock.RLock()
defer a.templateLock.RUnlock()
var err error
var buf bytes.Buffer
var tmpl = a.templates[page]
if a.disableCache {
tmpl, err = a.loadSinglePage(page)
if err != nil {
switch {
case strings.Contains(err.Error(), "pattern matches no files"):
buf.WriteString(fmt.Sprintf(renderFileNotFound, page))
return buf.Bytes(), renderContentType, err
default:
buf.WriteString(fmt.Sprintf(renderCompileError, err.Error()))
return buf.Bytes(), renderContentType, err
}
}
}
data = data.Merge(authboss.HTMLData{
"providers": a.providers,
"csrfToken": "",
})
spew.Dump(data)
if tmpl == nil {
buf.WriteString(fmt.Sprintf(renderFileNotFound, page))
return buf.Bytes(), renderContentType, err
}
if err := tmpl.Funcs(a.funcs()).ExecuteTemplate(&buf, "base", data); err != nil {
buf.WriteString(fmt.Sprintf(renderCompileError, err.Error()))
return buf.Bytes(), renderContentType, err
}
return buf.Bytes(), renderContentType, err
}
|
[
0
] |
package util
import (
"crypto/sha512"
"encoding/hex"
"io"
"os"
"os/exec"
"strings"
"github.com/alecthomas/kingpin"
"github.com/apex/log"
"github.com/develar/errors"
"github.com/json-iterator/go"
)
func ConfigureIsRemoveStageParam(command *kingpin.CmdClause) *bool {
var isRemoveStageDefaultValue string
if IsDebugEnabled() && !IsEnvTrue("BUILDER_REMOVE_STAGE_EVEN_IF_DEBUG") {
isRemoveStageDefaultValue = "false"
} else {
isRemoveStageDefaultValue = "true"
}
return command.Flag("remove-stage", "Whether to remove stage after build.").Default(isRemoveStageDefaultValue).Bool()
}
func IsDebugEnabled() bool {
return getLevel() <= log.DebugLevel
}
func getLevel() log.Level {
if logger, ok := log.Log.(*log.Logger); ok {
return logger.Level
}
return log.InvalidLevel
}
func WriteJsonToStdOut(v interface{}) error {
serializedInputInfo, err := jsoniter.ConfigFastest.Marshal(v)
if err != nil {
return errors.WithStack(err)
}
_, err = os.Stdout.Write(serializedInputInfo)
_ = os.Stdout.Close()
return errors.WithStack(err)
}
// useful for snap, where prime command took a lot of time and we need to read progress messages
func ExecuteWithInheritedStdOutAndStdErr(command *exec.Cmd, currentWorkingDirectory string) error {
preCommandExecute(command, currentWorkingDirectory)
// not an error - command error output printed to out stdout (like logging)
command.Stdout = os.Stderr
command.Stderr = os.Stderr
err := command.Run()
if err != nil {
return errors.WithStack(err)
}
return nil
}
func Execute(command *exec.Cmd, currentWorkingDirectory string) ([]byte, error) {
preCommandExecute(command, currentWorkingDirectory)
output, err := command.Output()
if err != nil {
errorOut := ""
if exitError, ok := err.(*exec.ExitError); ok {
errorOut = string(exitError.Stderr)
}
return nil, errors.New("error: " + err.Error() +
"\npath: " + command.Path +
"\nargs: " + argListToSafeString(command.Args) +
"\noutput: " + string(output) +
"\nerror output:" + errorOut)
} else if IsDebugEnabled() && len(output) != 0 && !(strings.HasSuffix(command.Path, "openssl") || strings.HasSuffix(command.Path, "openssl.exe")) {
log.Debug(string(output))
}
return output, nil
}
func argListToSafeString(args []string) string {
var result strings.Builder
for index, value := range args {
if strings.HasPrefix(value, "pass:") {
hasher := sha512.New()
_, err := hasher.Write([]byte(value))
if err == nil {
value = "sha512-first-8-chars-" + hex.EncodeToString(hasher.Sum(nil)[0:4])
} else {
log.WithError(err).Warn("cannot compute sha512 hash of password to log")
value = "<hidden>"
}
}
if index > 0 {
result.WriteRune(' ')
}
result.WriteString(value)
}
return result.String()
}
func StartPipedCommands(producer *exec.Cmd, consumer *exec.Cmd) error {
err := producer.Start()
if err != nil {
return errors.WithStack(err)
}
err = consumer.Start()
if err != nil {
return errors.WithStack(err)
}
return nil
}
func RunPipedCommands(producer *exec.Cmd, consumer *exec.Cmd) error {
err := StartPipedCommands(producer, consumer)
if err != nil {
return errors.WithStack(err)
}
err = WaitPipedCommand(producer, consumer)
if err != nil {
return errors.WithStack(err)
}
return nil
}
func WaitPipedCommand(producer *exec.Cmd, consumer *exec.Cmd) error {
err := producer.Wait()
if err != nil {
return errors.WithStack(err)
}
err = consumer.Wait()
if err != nil {
return errors.WithStack(err)
}
return nil
}
func preCommandExecute(command *exec.Cmd, currentWorkingDirectory string) {
if currentWorkingDirectory != "" {
command.Dir = currentWorkingDirectory
}
log.WithFields(log.Fields{
"path": command.Path,
"args": argListToSafeString(command.Args),
}).Debug("execute command")
}
func LogErrorAndExit(err error) {
log.Fatalf("%+v\n", err)
}
// http://www.blevesearch.com/news/Deferred-Cleanup,-Checking-Errors,-and-Potential-Problems/
func Close(c io.Closer) {
err := c.Close()
if err != nil && err != os.ErrClosed {
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrClosed {
return
}
log.Errorf("%v", err)
}
}
|
[
6
] |
package main
import (
"client"
"counter"
"flag"
"fmt"
"html/template"
"log"
"logg"
"math/rand"
"net/http"
"runtime"
"scenario"
"strconv"
"time"
)
// to reduce size of thread, speed up
const SizePerThread = 10000000
// Counter will be an atomic, to count the number of request handled
// which will be used to print PPS, etc.
type Hammer struct {
counter *counter.Counter
client client.ClientInterface
monitor *time.Ticker
// ideally error should be organized by type TODO
throttle <-chan time.Time
// 0 for constant, 1 for flexible
mode int
modeAdjInv int
}
// init
func (c *Hammer) Init(clientType string) {
switch mode {
case "constant":
c.mode = 0
case "flexible":
c.mode = 1
default:
c.mode = 0
}
c.modeAdjInv = 5
c.counter = new(counter.Counter)
c.client, _ = client.New(clientType, proxy)
}
// main goroutine to drive traffic
func (c *Hammer) hammer(rg *rand.Rand) {
// before send out, update send count
c.counter.RecordSend()
call, session, cur, err := profile.NextCall(rg)
if err != nil {
log.Println("next call error: ", err)
return
}
response_time, err := c.client.Do(call, debug)
if session != nil {
// session type so we need to lock for next step
defer session.LockNext(cur)
}
if err != nil {
if response_time != -1 {
// only document successful request
c.counter.RecordError()
}
log.Println(err)
} else {
c.counter.RecordRes(response_time, slowThreshold)
}
}
func (c *Hammer) monitorHammer() {
log.Println(c.counter.GeneralStat(), profile.CustomizedReport())
}
func (c *Hammer) launch(rps int, warmup int) {
_p := time.Duration(rps)
_interval := time.Second / _p
c.throttle = time.Tick(_interval * time.Nanosecond)
switch c.mode {
case 0:
// constant mode, enable warmup
if warmup != 0 {
t := 1
i := int(warmup / 4)
w := time.Tick(time.Second * time.Duration(i))
c.throttle = time.Tick(_interval * time.Duration(4) * time.Nanosecond)
go func() {
for t < 4 {
// 4, 3, 2, 1 times _interval for each warmup/4
<-w
t += 1
c.throttle = time.Tick(_interval * time.Duration(int(6/t)) * time.Nanosecond)
}
}()
}
break
case 1:
// no warm up in this mode
w := time.Tick(time.Second * time.Duration(5))
var j1 int64 = 0
go func() {
for {
<-w
j2 := c.counter.GetAllStat()[5]
if j1 == 0 {
j1 = j2
}
switch {
case j1 < j2:
// getting slower
if j2-j1 > int64(float64(j1)*0.1) {
_interval = time.Duration(int(float64(_interval.Nanoseconds()) * 1.1))
c.throttle = time.Tick(_interval)
}
break
case j1 > j2:
// getting faster
if j1-j2 > int64(float64(j1)*0.1) {
_interval = time.Duration(int(float64(_interval.Nanoseconds()) * 0.9))
c.throttle = time.Tick(_interval)
}
break
default:
break
}
j1 = j2
}
}()
break
}
go func() {
for {
i := rand.Intn(len(rands))
<-c.throttle
go c.hammer(rands[i])
}
}()
c.monitor = time.NewTicker(time.Second)
go func() {
for {
// rate limit for monitor routine
<-c.monitor.C
go c.monitorHammer()
}
}()
// do log here
log_intv := time.Tick(time.Duration(logIntv) * time.Second)
go func() {
for {
<-log_intv
logger.Log(c.counter.GetAllStat(), logIntv)
}
}()
}
func (c *Hammer) health(rw http.ResponseWriter, req *http.Request) {
rw.Header().Set("Content-Type", "application/json")
rw.Header().Set("Content-Length", strconv.Itoa(len("health")))
rw.WriteHeader(200)
rw.Write([]byte("health"))
}
func (c *Hammer) log(rw http.ResponseWriter, req *http.Request) {
p := struct {
Title string
Data string
}{
Title: fmt.Sprintf(
"Performance Log [rps:%d]",
rps),
Data: logger.Read(),
}
t, _ := template.ParseFiles("log.tpl")
t.Execute(rw, p)
}
// init the program from command line
var (
rps int
profileFile string
slowThreshold int64
debug bool
proxy string
mode string
duration int
warmup int
logIntv int
logType string
// profile
profile *scenario.Profile
logger logg.Logger
// rands
rands []*rand.Rand
)
func init() {
flag.IntVar(&rps, "r", 500, "Request # per second")
flag.StringVar(&profileFile, "p", "", "Profile json file path (required)")
flag.Int64Var(&slowThreshold, "t", 200, "Threshold for slow response in ms")
flag.BoolVar(&debug, "D", false, "Debug flag (true|false)")
flag.StringVar(&proxy, "P", "nil", "Http proxy (e.g. http://127.0.0.1:8888)")
flag.IntVar(&logIntv, "i", 6, "Log interval")
flag.StringVar(&mode, "m", "constant", "Load generate mode (constant|flexible)")
flag.StringVar(&logType, "l", "default", "Log type (file|db)")
flag.IntVar(&duration, "d", 0, "Test duration, infinite by default")
flag.IntVar(&warmup, "w", 0, "Test wrapup duration, infinite by default")
}
// main func
func main() {
flag.Parse()
NCPU := runtime.NumCPU()
runtime.GOMAXPROCS(NCPU)
// to speed up
rands = make([]*rand.Rand, NCPU)
for i, _ := range rands {
s := rand.NewSource(time.Now().UnixNano())
rands[i] = rand.New(s)
}
log.Println("cpu # ->", NCPU)
log.Println("profile ->", profileFile)
log.Println("rps ->", rps)
log.Println("slow req ->", slowThreshold, "ms")
log.Println("proxy ->", proxy)
log.Println("mode ->", mode)
log.Println("duration ->", duration, "s")
log.Println("wrapup ->", warmup, "s")
profile, _ = scenario.New(profileFile)
logger, _ = logg.NewLogger(logType, fmt.Sprintf("%d_%d", rps, slowThreshold))
rand.Seed(time.Now().UnixNano())
hamm := new(Hammer)
hamm.Init(profile.Client)
go hamm.launch(rps, warmup)
if duration != 0 {
timer := time.NewTimer(time.Second * time.Duration(duration))
<-timer.C
} else {
var input string
for {
// block exiting
fmt.Scanln(&input)
if input == "exit" {
break
}
}
}
// http.HandleFunc("/log", hamm.log)
// http.HandleFunc("/health", hamm.health)
// http.ListenAndServe(":9090", nil)
}
|
[
6
] |
package encryption
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"reflect"
"testing"
"time"
"github.com/stretchr/testify/require"
clientv3 "go.etcd.io/etcd/client/v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1"
"k8s.io/client-go/kubernetes"
)
var protoEncodingPrefix = []byte{0x6b, 0x38, 0x73, 0x00}
var (
apiserverScheme = runtime.NewScheme()
apiserverCodecs = serializer.NewCodecFactory(apiserverScheme)
)
const (
jsonEncodingPrefix = "{"
protoEncryptedDataPrefix = "k8s:enc:"
aesCBCTransformerPrefixV1 = "k8s:enc:aescbc:v1:"
aesGCMTransformerPrefixV1 = "k8s:enc:aesgcm:v1:"
secretboxTransformerPrefixV1 = "k8s:enc:secretbox:v1:"
)
func init() {
utilruntime.Must(apiserverconfigv1.AddToScheme(apiserverScheme))
}
// AssertEncryptionConfig checks if the encryption config holds only targetGRs, this ensures that only those resources were encrypted,
// we don't check the keys because e2e tests are run randomly and we would have to consider all encryption secrets to get the right order of the keys.
// We test the content of the encryption config in more detail in unit and integration tests
func AssertEncryptionConfig(t testing.TB, clientSet ClientSet, encryptionConfigSecretName string, namespace string, targetGRs []schema.GroupResource) {
t.Helper()
t.Logf("Checking if %q in %q has desired GRs %v", encryptionConfigSecretName, namespace, targetGRs)
encryptionCofnigSecret, err := clientSet.Kube.CoreV1().Secrets(namespace).Get(context.TODO(), encryptionConfigSecretName, metav1.GetOptions{})
require.NoError(t, err)
encodedEncryptionConfig, foundEncryptionConfig := encryptionCofnigSecret.Data["encryption-config"]
if !foundEncryptionConfig {
t.Errorf("Haven't found encryption config at %q key in the encryption secret %q", "encryption-config", encryptionConfigSecretName)
}
decoder := apiserverCodecs.UniversalDecoder(apiserverconfigv1.SchemeGroupVersion)
encryptionConfigObj, err := runtime.Decode(decoder, encodedEncryptionConfig)
require.NoError(t, err)
encryptionConfig, ok := encryptionConfigObj.(*apiserverconfigv1.EncryptionConfiguration)
if !ok {
t.Errorf("Unable to decode encryption config, unexpected wrong type %T", encryptionConfigObj)
}
for _, rawActualResource := range encryptionConfig.Resources {
if len(rawActualResource.Resources) != 1 {
t.Errorf("Invalid encryption config for resource %s, expected exactly one resource, got %d", rawActualResource.Resources, len(rawActualResource.Resources))
}
actualResource := schema.ParseGroupResource(rawActualResource.Resources[0])
actualResourceFound := false
for _, expectedResource := range targetGRs {
if reflect.DeepEqual(expectedResource, actualResource) {
actualResourceFound = true
break
}
}
if !actualResourceFound {
t.Errorf("Encryption config has an invalid resource %v", actualResource)
}
}
}
func AssertLastMigratedKey(t testing.TB, kubeClient kubernetes.Interface, targetGRs []schema.GroupResource, namespace, labelSelector string) {
t.Helper()
expectedGRs := targetGRs
t.Logf("Checking if the last migrated key was used to encrypt %v", expectedGRs)
lastMigratedKeyMeta, err := GetLastKeyMeta(t, kubeClient, namespace, labelSelector)
require.NoError(t, err)
if len(lastMigratedKeyMeta.Name) == 0 {
t.Log("Nothing to check no new key was created")
return
}
if len(expectedGRs) != len(lastMigratedKeyMeta.Migrated) {
t.Errorf("Wrong number of migrated resources for %q key, expected %d, got %d", lastMigratedKeyMeta.Name, len(expectedGRs), len(lastMigratedKeyMeta.Migrated))
}
for _, expectedGR := range expectedGRs {
if !hasResource(expectedGR, lastMigratedKeyMeta.Migrated) {
t.Errorf("%q wasn't used to encrypt %v, only %v", lastMigratedKeyMeta.Name, expectedGR, lastMigratedKeyMeta.Migrated)
}
}
}
func VerifyResources(t testing.TB, etcdClient EtcdClient, etcdKeyPreifx string, expectedMode string, allowEmpty bool) (int, error) {
timeout, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
resp, err := etcdClient.Get(timeout, etcdKeyPreifx, clientv3.WithPrefix())
switch {
case err != nil:
return 0, fmt.Errorf("failed to list prefix %s: %v", etcdKeyPreifx, err)
case (resp.Count == 0 || len(resp.Kvs) == 0) && !allowEmpty:
return 0, fmt.Errorf("empty list response for prefix %s: %+v", etcdKeyPreifx, resp)
case resp.More:
return 0, fmt.Errorf("incomplete list response for prefix %s: %+v", etcdKeyPreifx, resp)
}
for _, keyValue := range resp.Kvs {
if err := verifyPrefixForRawData(expectedMode, keyValue.Value); err != nil {
return 0, fmt.Errorf("key %s failed check: %v\n%s", keyValue.Key, err, hex.Dump(keyValue.Value))
}
}
return len(resp.Kvs), nil
}
func verifyPrefixForRawData(expectedMode string, data []byte) error {
if len(data) == 0 {
return fmt.Errorf("empty data")
}
conditionToStr := func(condition bool) string {
if condition {
return "encrypted"
}
return "unencrypted"
}
expectedEncrypted := true
if expectedMode == "identity" {
expectedMode = "identity-proto"
expectedEncrypted = false
}
actualMode, isEncrypted := encryptionModeFromEtcdValue(data)
if expectedEncrypted != isEncrypted {
return fmt.Errorf("unexpected encrypted state, expected data to be %q but was %q with mode %q", conditionToStr(expectedEncrypted), conditionToStr(isEncrypted), actualMode)
}
if actualMode != expectedMode {
return fmt.Errorf("unexpected encryption mode %q, expected %q, was data encrypted/decrypted with a wrong key", actualMode, expectedMode)
}
return nil
}
func encryptionModeFromEtcdValue(data []byte) (string, bool) {
isEncrypted := bytes.HasPrefix(data, []byte(protoEncryptedDataPrefix)) // all encrypted data has this prefix
return func() string {
switch {
case hasPrefixAndTrailingData(data, []byte(aesCBCTransformerPrefixV1)): // AES-CBC has this prefix
return "aescbc"
case hasPrefixAndTrailingData(data, []byte(aesGCMTransformerPrefixV1)): // AES-GCM has this prefix
return "aesgcm"
case hasPrefixAndTrailingData(data, []byte(secretboxTransformerPrefixV1)): // Secretbox has this prefix
return "secretbox"
case hasPrefixAndTrailingData(data, []byte(jsonEncodingPrefix)): // unencrypted json data has this prefix
return "identity-json"
case hasPrefixAndTrailingData(data, protoEncodingPrefix): // unencrypted protobuf data has this prefix
return "identity-proto"
default:
return "unknown" // this should never happen
}
}(), isEncrypted
}
func hasPrefixAndTrailingData(data, prefix []byte) bool {
return bytes.HasPrefix(data, prefix) && len(data) > len(prefix)
}
|
[
6
] |
package libvirt
import (
"fmt"
"math/rand"
"net/url"
"strings"
"time"
"github.com/libvirt/libvirt-go"
"github.com/pkg/errors"
"k8s.io/kops/upup/pkg/fi/cloudup/libvirt/libvirtxml"
)
const uuidStringLength = 36
var random *rand.Rand
func init() {
source := rand.NewSource(time.Now().UnixNano())
random = rand.New(source)
}
func getCapabilitiesXML(connect *libvirt.Connect) (libvirtxml.Capabilities, error) {
xml, err := connect.GetCapabilities()
if err != nil {
return libvirtxml.Capabilities{}, errors.Wrapf(err, "failed to fetch libvirt capabilities")
}
return libvirtxml.NewCapabilitiesForXML(xml)
}
func lookupStoragePool(connect *libvirt.Connect, lookup string) (*libvirt.StoragePool, error) {
if len(lookup) == uuidStringLength {
if pool, _ := connect.LookupStoragePoolByUUIDString(lookup); pool != nil {
return pool, nil
}
}
if pool, _ := connect.LookupStoragePoolByName(lookup); pool != nil {
return pool, nil
}
return nil, errors.Errorf("could not find storage pool '%s'", lookup)
}
func lookupDomain(connect *libvirt.Connect, lookup string) (*libvirt.Domain, error) {
if len(lookup) == uuidStringLength {
if domain, _ := connect.LookupDomainByUUIDString(lookup); domain != nil {
return domain, nil
}
}
if domain, _ := connect.LookupDomainByName(lookup); domain != nil {
return domain, nil
}
return nil, errors.Errorf("could not find domain '%s'", lookup)
}
func getDomainXML(domain *libvirt.Domain) (libvirtxml.Domain, error) {
xml, err := domain.GetXMLDesc(libvirt.DomainXMLFlags(0))
if err != nil {
return libvirtxml.Domain{}, errors.Wrapf(err, "failed to fetch domain XML description")
}
return libvirtxml.NewDomainForXML(xml)
}
func listAllDomains(connect *libvirt.Connect) ([]libvirtxml.Domain, error) {
var result []libvirtxml.Domain
flags := libvirt.CONNECT_LIST_DOMAINS_ACTIVE |
libvirt.CONNECT_LIST_DOMAINS_INACTIVE |
libvirt.CONNECT_LIST_DOMAINS_PERSISTENT |
libvirt.CONNECT_LIST_DOMAINS_TRANSIENT |
libvirt.CONNECT_LIST_DOMAINS_RUNNING |
libvirt.CONNECT_LIST_DOMAINS_PAUSED |
libvirt.CONNECT_LIST_DOMAINS_SHUTOFF |
libvirt.CONNECT_LIST_DOMAINS_OTHER |
libvirt.CONNECT_LIST_DOMAINS_MANAGEDSAVE |
libvirt.CONNECT_LIST_DOMAINS_NO_MANAGEDSAVE |
libvirt.CONNECT_LIST_DOMAINS_AUTOSTART |
libvirt.CONNECT_LIST_DOMAINS_NO_AUTOSTART |
libvirt.CONNECT_LIST_DOMAINS_HAS_SNAPSHOT |
libvirt.CONNECT_LIST_DOMAINS_NO_SNAPSHOT
domains, err := connect.ListAllDomains(flags)
if err != nil {
return nil, errors.Wrapf(err, "failed to list domains")
}
for _, domain := range domains {
domainXML, err := getDomainXML(&domain)
if err != nil {
return nil, err
}
result = append(result, domainXML)
domain.Free()
}
return result, nil
}
func cloneStorageVolume(pool *libvirt.StoragePool, sourceName string, destName string) error {
volumeXML, err := lookupStorageVolume(pool, sourceName)
if err != nil {
return err
}
volumeType := volumeXML.Type()
if volumeType != "file" {
errors.Errorf("cannot clone storage volume '%s' - unsupported volume type '%s'", sourceName, volumeType)
}
volumeXML.SetName(destName)
volumeXML.SetKey("")
targetXML := volumeXML.Target()
targetXML.RemoveTimestamps()
sourcePath := targetXML.Path()
targetXML.SetPath("") // will be filled-in by libvirt
{
// set backing store as the souorce target
backingStoreXML := volumeXML.BackingStore()
backingStoreXML.SetPath(sourcePath)
backingStoreXML.Format().SetType(targetXML.Format().Type())
backingStoreXML.RemoveTimestamps()
}
// switch to a format that supports backing store
switch targetXML.Format().Type() {
case "raw":
targetXML.Format().SetType("qcow2")
}
xmlString, err := volumeXML.MarshalToXML()
if err != nil {
return err
}
storageVol, err := pool.StorageVolCreateXML(xmlString, libvirt.StorageVolCreateFlags(0))
if err != nil {
return errors.Wrapf(err, "failed to clone storage volume '%s' to '%s'", sourceName, destName)
}
defer storageVol.Free()
return err
}
func lookupStorageVolume(pool *libvirt.StoragePool, volumeName string) (libvirtxml.StorageVolume, error) {
volume, err := pool.LookupStorageVolByName(volumeName)
if err != nil {
return libvirtxml.StorageVolume{}, errors.Errorf("could not find storage volume '%s'", volumeName)
}
defer volume.Free()
xml, err := volume.GetXMLDesc(0)
if err != nil {
return libvirtxml.StorageVolume{}, errors.Wrapf(err, "failed to fetch XML description for storage volume '%s'", volumeName)
}
return libvirtxml.NewStorageVolumeForXML(xml)
}
func randomMACAddressNoConflict(connect *libvirt.Connect) (string, error) {
uri, err := connect.GetURI()
if err != nil {
return "", errors.Wrapf(err, "failed to fetch libvirt connection uri")
}
allDomains, err := listAllDomains(connect)
if err != nil {
return "", err
}
for i := 0; i < 256; i++ {
mac, err := randomMACAddress(uri)
if err != nil {
return "", err
}
if hasConflictingMACAddress(allDomains, mac) {
continue
}
return mac, nil
}
return "", errors.New("failed to generate non-conflicting MAC address")
}
func randomMACAddress(uri string) (string, error) {
url, err := url.Parse(uri)
if err != nil {
return "", errors.Wrapf(err, "failed to parse libvirt connection uri")
}
var mac []byte
if isQemuURL(url) {
mac = []byte{0x52, 0x54, 0x00}
} else if isXenURL(url) {
mac = []byte{0x00, 0x16, 0x3E}
}
for len(mac) < 6 {
b := random.Uint32()
mac = append(mac, byte(b))
}
return fmt.Sprintf("%02X:%02X:%02X:%02X:%02X:%02X", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]), nil
}
func isQemuURL(url *url.URL) bool {
return strings.HasPrefix(url.Scheme, "qemu")
}
func isXenURL(url *url.URL) bool {
return strings.HasPrefix(url.Scheme, "xen") ||
strings.HasPrefix(url.Scheme, "libxl")
}
func hasConflictingMACAddress(domains []libvirtxml.Domain, mac string) bool {
mac = strings.ToLower(mac)
for _, domain := range domains {
interfaces := domain.Devices().Interfaces()
for _, iface := range interfaces {
ifaceMAC := iface.MACAddress()
if strings.ToLower(ifaceMAC) == mac {
return true
}
}
}
return false
}
// inspiration drawn from https://github.com/virt-manager/virt-manager/blob/master/virtinst/cloner.py
func createDomain(connect *libvirt.Connect, name string, domainTemplateXML string, diskPath string) error {
domainXML, err := libvirtxml.NewDomainForXML(domainTemplateXML)
if err != nil {
return err
}
domainXML.SetID("")
domainXML.SetUUID("")
domainXML.SetName(name)
// Set the graphics device port to auto, in order to avoid conflicts
graphics := domainXML.Devices().Graphics()
for _, graphic := range graphics {
graphic.SetPort(-1)
}
// generate random MAC address for network interfaces
interfaces := domainXML.Devices().Interfaces()
for _, iface := range interfaces {
mac, err := randomMACAddressNoConflict(connect)
if err != nil {
return err
}
iface.SetTargetDevice("")
iface.SetMACAddress(mac)
}
// reset path for guest agent channel
channels := domainXML.Devices().Channels()
for _, channel := range channels {
if channel.Type() != "unix" {
continue
}
// will be set by libvirt
channel.SetSourcePath("")
}
if domainXML.Devices().Emulator() == "" {
setEmulator(connect, domainXML)
}
setDiskPath(domainXML, diskPath)
xml, err := domainXML.MarshalToXML()
if err != nil {
return err
}
domain, err := connect.DomainCreateXML(xml, libvirt.DomainCreateFlags(0))
if err != nil {
return errors.Wrapf(err, "failed to create domain '%s'", name)
}
defer domain.Free()
return nil
}
func setEmulator(connect *libvirt.Connect, domain libvirtxml.Domain) error {
capabilities, err := getCapabilitiesXML(connect)
if err != nil {
return err
}
hostArch := capabilities.Host().CPU().Arch()
guests := capabilities.Guests()
var emulator string
for _, guest := range guests {
if guest.Arch().Name() != hostArch {
continue
}
emulator = guest.Arch().Emulator()
if guest.OSType() == "hvm" {
// found hardware-assisted vm - use this emulator
break
}
}
if emulator == "" {
return errors.Errorf("found no guest matching host architecture '%s'", hostArch)
}
domain.Devices().SetEmulator(emulator)
return nil
}
func setDiskPath(domain libvirtxml.Domain, diskPath string) error {
disks := domain.Devices().Disks()
if len(disks) != 1 {
return errors.Errorf("multiple disks detected for domain '%s' - single disk domain templates are supported atm.", domain.Name())
}
disk := disks[0]
disk.Source().SetFile(diskPath)
return nil
}
|
[
6,
7
] |
package ctx
import (
"net/http"
"encoding/json"
"util/logger"
)
type CustomResponse struct {
ReturnCode int `json:"return_code"`
ReturnMessage interface{} `json:"return_message"`
Data interface{} `json:"data"`
}
func WriteJSON(w http.ResponseWriter, code int, message interface{}, data interface{}) {
response := CustomResponse{ReturnCode: code, ReturnMessage: message, Data: data}
ret, err := json.Marshal(response)
if err != nil {
logger.Error.Println(err)
WriteJSON(w, 500, err.Error(), err.Error())
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Accept", "application/json")
w.Write(ret)
return
}
|
[
6
] |
package weather
import (
"crypto/hmac"
"crypto/md5"
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
)
// UoM describes imperial and metric systems..
type UoM int
const (
Imperial UoM = iota
Metric
)
const (
RssUrl = "https://weather-ydn-yql.media.yahoo.com/forecastrss"
)
var (
// MinUpdateTimeoutSeconds defines number of seconds before next actual update.
MinUpdateTimeoutSeconds = int64(5 * 60)
)
// YahooWeatherProvider defines Yahoo Weather wrapper.
type YahooWeatherProvider struct {
appID string
clientID string
clientSecret string
compositeKey string
lastLocation string
lastLocationNorm string
lastUnit UoM
lastUnitStr string
lastQueryTime int64
lastData *Weather
}
// NewProvider constructs a new Yahoo provider.
func NewProvider(appID string, clientID string, clientSecret string) *YahooWeatherProvider {
yw := &YahooWeatherProvider{
appID: appID,
clientID: clientID,
clientSecret: clientSecret,
compositeKey: url.QueryEscape(clientSecret) + "&",
lastUnitStr: "f",
lastUnit: Imperial,
lastData: &Weather{},
}
return yw
}
// Query gets current weather at the specified location.
// If location is the same as in previous request and now() - last_request_time() < MinUpdateTimeoutSeconds,
// previous result used.
func (provider *YahooWeatherProvider) Query(location string, unit UoM) (*Weather, error) {
var err error = nil
if location != provider.lastLocation || unit != provider.lastUnit ||
time.Now().UTC().Unix()-provider.lastQueryTime > MinUpdateTimeoutSeconds {
provider.lastLocation = location
provider.lastLocationNorm = strings.ReplaceAll(strings.ToLower(location), ", ", ",")
provider.lastUnit = unit
if unit == Metric {
provider.lastUnitStr = "c"
} else {
provider.lastUnitStr = "f"
}
err = provider.update()
}
return provider.lastData, err
}
// Performs actual update.
func (provider *YahooWeatherProvider) update() error {
sign, err := provider.getAuth()
if err != nil {
return err
}
url_ := fmt.Sprintf("%s?location=%s&u=%s&format=json", RssUrl,
provider.lastLocationNorm, provider.lastUnitStr)
client := &http.Client{}
req, err := http.NewRequest(http.MethodGet, url_, nil)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-Yahoo-App-Id", provider.appID)
req.Header.Set("Authorization", sign)
res, err := client.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return errors.New(fmt.Sprintf("wrong HTTP status: %d", res.StatusCode))
}
dec := json.NewDecoder(res.Body)
weather := &Weather{}
err = dec.Decode(weather)
if err != nil {
provider.lastData = nil
return err
}
provider.lastData = weather
provider.lastQueryTime = time.Now().UTC().Unix()
return nil
}
// Generating OAuth1 signature.
func (provider *YahooWeatherProvider) getAuth() (string, error) {
nonce, err := getNonce()
if err != nil {
return "", err
}
oauth := map[string]string{
"oauth_consumer_key": provider.clientID,
"oauth_nonce": nonce,
"oauth_signature_method": "HMAC-SHA1",
"oauth_timestamp": getTimestamp(),
"oauth_version": "1.0",
}
query := map[string]string{
"location": provider.lastLocationNorm,
"format": "json",
"u": provider.lastUnitStr,
}
merged := make(map[string]string)
for k, v := range oauth {
merged[k] = v
}
for k, v := range query {
merged[k] = v
}
keys := make([]string, 0)
for k, _ := range merged {
keys = append(keys, k)
}
sort.Strings(keys)
sortedParams := make([]string, 0)
for _, v := range keys {
sortedParams = append(sortedParams, fmt.Sprintf("%s=%s", url.QueryEscape(v), url.QueryEscape(merged[v])))
}
baseString := fmt.Sprintf("GET&%s&%s", url.QueryEscape(RssUrl),
url.QueryEscape(strings.Join(sortedParams, "&")))
h := hmac.New(sha1.New, []byte(provider.clientSecret+"&"))
_, err = h.Write([]byte(baseString))
if err != nil {
return "", err
}
oauth["oauth_signature"] = base64.StdEncoding.EncodeToString(h.Sum(nil))
headerParams := make([]string, 0)
for k, v := range oauth {
headerParams = append(headerParams, fmt.Sprintf("%s=\"%s\"", k, v))
}
return fmt.Sprintf("OAuth %s", strings.Join(headerParams, ", ")), nil
}
// Generating 32-byte nonce.
func getNonce() (string, error) {
h := md5.New()
_, err := h.Write(h.Sum([]byte(time.Now().Format(time.RFC3339Nano))))
if err != nil {
return "", err
}
return hex.EncodeToString(h.Sum(nil)), nil
}
// Obtaining current timestamp in UTC.
func getTimestamp() string {
return strconv.FormatInt(time.Now().UTC().Unix(), 10)
}
|
[
6
] |
// Copyright 2020 Ant Group. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
package backend
import (
"os"
"time"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
type OSSBackend struct {
objectPrefix string
bucket *oss.Bucket
}
func newOSSBackend(endpoint, bucket, objectPrefix, accessKeyID, accessKeySecret string) (*OSSBackend, error) {
client, err := oss.New(endpoint, accessKeyID, accessKeySecret)
if err != nil {
return nil, errors.Wrap(err, "init oss backend")
}
_bucket, err := client.Bucket(bucket)
if err != nil {
return nil, errors.Wrap(err, "init oss backend")
}
return &OSSBackend{
objectPrefix: objectPrefix,
bucket: _bucket,
}, nil
}
const (
splitPartsCount = 4
// Blob size bigger than 100MB, apply multiparts upload.
multipartsUploadThreshold = 100 * 1024 * 1024
)
// Upload blob as image layer to oss backend. Depending on blob's size, upload it
// by multiparts method or the normal method
func (b *OSSBackend) Upload(blobID string, blobPath string) error {
blobID = b.objectPrefix + blobID
if exist, err := b.bucket.IsObjectExist(blobID); err != nil {
return err
} else if exist {
return nil
}
var stat os.FileInfo
stat, err := os.Stat(blobPath)
if err != nil {
return err
}
blobSize := stat.Size()
var needMultiparts bool = false
// Blob size bigger than 100MB, apply multiparts upload.
if blobSize >= multipartsUploadThreshold {
needMultiparts = true
}
start := time.Now()
if needMultiparts {
logrus.Debugf("Upload %s using multiparts method", blobID)
chunks, err := oss.SplitFileByPartNum(blobPath, splitPartsCount)
if err != nil {
return err
}
imur, err := b.bucket.InitiateMultipartUpload(blobID)
if err != nil {
return err
}
var parts []oss.UploadPart
g := new(errgroup.Group)
for _, chunk := range chunks {
ck := chunk
g.Go(func() error {
p, err := b.bucket.UploadPartFromFile(imur, blobPath, ck.Offset, ck.Size, ck.Number)
if err != nil {
return err
}
// TODO: We don't verify data part MD5 from ETag right now.
// But we can do it if we have to.
parts = append(parts, p)
return nil
})
}
if err := g.Wait(); err != nil {
return errors.Wrap(err, "Uploading parts failed")
}
_, err = b.bucket.CompleteMultipartUpload(imur, parts)
if err != nil {
return err
}
} else {
reader, err := os.Open(blobPath)
if err != nil {
return err
}
defer reader.Close()
err = b.bucket.PutObject(blobID, reader)
if err != nil {
return err
}
}
end := time.Now()
elapsed := end.Sub(start)
logrus.Debugf("Uploading blob %s costs %s", blobID, elapsed)
return err
}
func (b *OSSBackend) Check(blobID string) (bool, error) {
blobID = b.objectPrefix + blobID
return b.bucket.IsObjectExist(blobID)
}
|
[
6
] |
// Copyright 2019 the orbs-contract-sdk authors
// This file is part of the orbs-contract-sdk library in the Orbs project.
//
// This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree.
// The above notice should be included in all copies or substantial portions of the software.
package safeuint64
import "math"
func Add(x uint64, y uint64) uint64 {
if y > math.MaxUint64-x {
panic("integer overflow on add")
}
return x + y
}
func Sub(x uint64, y uint64) uint64 {
if x < y {
panic("integer overflow on sub")
}
return x - y
}
func Mul(x uint64, y uint64) uint64 {
if x == 0 || y == 0 {
return 0
}
if y > math.MaxUint64/x {
panic("integer overflow on mul")
}
return x * y
}
func Div(x uint64, y uint64) uint64 {
if y == 0 {
panic("division by zero")
}
return x / y
}
func Mod(x uint64, y uint64) uint64 {
if y == 0 {
panic("division by zero")
}
return x % y
}
|
[
6
] |
package lib
/*
Copyright (c) 2014 Eric Anderton <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
import (
"bytes"
"fmt"
toml "github.com/pelletier/go-toml"
log "grapnel/log"
"regexp"
"text/template"
)
type MatchMap map[string]*regexp.Regexp
type ReplaceMap map[string]*template.Template
type StringMap map[string]string
type RewriteRule struct {
Matches MatchMap
Replacements ReplaceMap
}
type RewriteRuleArray []*RewriteRule
func NewRewriteRule() *RewriteRule {
return &RewriteRule{
Matches: MatchMap{},
Replacements: ReplaceMap{},
}
}
func RewriteTemplate(tmpl string) (*template.Template, error) {
return template.New("").Funcs(replaceFuncs).Parse(tmpl)
}
func TypeResolverRule(matchField, matchExpr, typeValue string) *RewriteRule {
rule := NewRewriteRule()
rule.Matches["type"] = regexp.MustCompile(`^$`)
rule.Matches[matchField] = regexp.MustCompile(matchExpr)
rule.Replacements["type"] = template.Must(RewriteTemplate(typeValue))
return rule
}
func BuildRewriteRule(matches StringMap, replacements StringMap) *RewriteRule {
rule := NewRewriteRule()
for key, value := range matches {
rule.Matches[key] = regexp.MustCompile(value)
}
for key, value := range replacements {
rule.Replacements[key] = template.Must(RewriteTemplate(value))
}
return rule
}
func (self *RewriteRule) AddMatch(field, expr string) error {
regex, err := regexp.Compile(expr)
if err != nil {
return err
}
self.Matches[field] = regex
return nil
}
func (self *RewriteRule) AddReplacement(field, expr string) error {
tmpl, err := template.New(field).Parse(expr)
if err != nil {
return err
}
self.Replacements[field] = tmpl
return nil
}
// apply a match rule
func (self *RewriteRule) Apply(dep *Dependency) error {
// match *all* expressions against the dependency
depValues := dep.Flatten()
for field, match := range self.Matches {
if !match.MatchString(depValues[field]) {
return nil // no match
}
}
// generate new value map
newValues := map[string]string{}
writer := &bytes.Buffer{}
for field, tmpl := range self.Replacements {
writer.Reset()
if err := tmpl.Execute(writer, depValues); err != nil {
// TODO: need waaaay more context for this to be useful
return fmt.Errorf("Error executing replacement rule: %v", err)
}
newValues[field] = writer.String()
}
// set up the new dependency
if err := dep.SetValues(newValues); err != nil {
return err
}
log.Debug("Dependency rewritten: %t", dep)
// return new dependency
return nil
}
func (self RewriteRuleArray) Apply(dep *Dependency) error {
for _, rule := range self {
if err := rule.Apply(dep); err != nil {
return err
}
}
return nil
}
// Loads rewrite rules in a TOML file, specified by the filename argument.
// Returns an array of RewriteRules, or error.
func LoadRewriteRules(filename string) (RewriteRuleArray, error) {
// load the config file
tree, err := toml.LoadFile(filename)
if err != nil {
return nil, fmt.Errorf("%s: %s", filename, err)
}
// curry the filename and position into an error format function
pos := toml.Position{}
errorf := func(format string, values ...interface{}) (RewriteRuleArray, error) {
curriedFormat := filename + " " + pos.String() + ": " + format
return nil, fmt.Errorf(curriedFormat, values...)
}
rules := RewriteRuleArray{}
if rewriteTree := tree.Get("rewrite"); rewriteTree != nil {
for _, ruleTree := range rewriteTree.([]*toml.TomlTree) {
rule := NewRewriteRule()
matchTree, ok := ruleTree.Get("match").(*toml.TomlTree)
if !ok {
pos = ruleTree.GetPosition("")
return errorf("Expected 'match' subtree for rewrite rule")
}
replaceTree, ok := ruleTree.Get("replace").(*toml.TomlTree)
if !ok {
pos = ruleTree.GetPosition("")
return errorf("Expected 'replace' subtree for rewrite rule")
}
for _, key := range matchTree.Keys() {
matchString, ok := matchTree.Get(key).(string)
if !ok {
pos = matchTree.GetPosition(key)
return errorf("Match expression must be a string value")
}
matchRegex, err := regexp.Compile(matchString)
if err != nil {
pos = matchTree.GetPosition(key)
return errorf("Error compiling match expression: %s", err)
}
rule.Matches[key] = matchRegex
}
for _, key := range replaceTree.Keys() {
replaceString, ok := replaceTree.Get(key).(string)
if !ok {
pos = replaceTree.GetPosition(key)
return errorf("Replace expression must be a string value")
}
replaceTempl, err := RewriteTemplate(replaceString)
if err != nil {
pos = replaceTree.GetPosition(key)
return errorf("Error compiling replace expression: %s", err)
}
rule.Replacements[key] = replaceTempl
}
rules = append(rules, rule)
}
}
return rules, nil
}
func replace_Replace(value, expr, repl string) (string, error) {
regex, err := regexp.Compile(expr)
if err != nil {
return "", err
}
return regex.ReplaceAllString(value, repl), nil
}
var replaceFuncs = template.FuncMap{
"replace": replace_Replace,
}
var BasicRewriteRules = RewriteRuleArray{
// generic rewrite for missing url
&RewriteRule{
Matches: MatchMap{
"import": regexp.MustCompile(`.+`),
"url": regexp.MustCompile(`^$`),
},
Replacements: ReplaceMap{
"url": template.Must(RewriteTemplate(`http://{{.import}}`)),
},
},
// generic rewrite for missing import
&RewriteRule{
Matches: MatchMap{
"import": regexp.MustCompile(`^$`),
"url": regexp.MustCompile(`.+`),
},
Replacements: ReplaceMap{
"import": template.Must(RewriteTemplate(`{{.host}}/{{.path}}`)),
},
},
}
|
[
6
] |
// +build softdevice,!s110v8
package bluetooth
/*
// Define SoftDevice functions as regular function declarations (not inline
// static functions).
#define SVCALL_AS_NORMAL_FUNCTION
#include "nrf_sdm.h"
#include "nrf_nvic.h"
#include "ble.h"
#include "ble_gap.h"
void assertHandler(void);
*/
import "C"
import (
"machine"
"unsafe"
)
//export assertHandler
func assertHandler() {
println("SoftDevice assert")
}
var clockConfigXtal C.nrf_clock_lf_cfg_t = C.nrf_clock_lf_cfg_t{
source: C.NRF_CLOCK_LF_SRC_XTAL,
rc_ctiv: 0,
rc_temp_ctiv: 0,
accuracy: C.NRF_CLOCK_LF_ACCURACY_250_PPM,
}
func (a *Adapter) enable() error {
// Enable the SoftDevice.
var clockConfig *C.nrf_clock_lf_cfg_t
if machine.HasLowFrequencyCrystal {
clockConfig = &clockConfigXtal
}
errCode := C.sd_softdevice_enable(clockConfig, C.nrf_fault_handler_t(C.assertHandler))
if errCode != 0 {
return Error(errCode)
}
// Enable the BLE stack.
appRAMBase := uint32(0x200039c0)
errCode = C.sd_ble_enable(&appRAMBase)
return makeError(errCode)
}
func handleEvent() {
id := eventBuf.header.evt_id
switch {
case id >= C.BLE_GAP_EVT_BASE && id <= C.BLE_GAP_EVT_LAST:
gapEvent := eventBuf.evt.unionfield_gap_evt()
switch id {
case C.BLE_GAP_EVT_CONNECTED:
connectEvent := gapEvent.params.unionfield_connected()
switch connectEvent.role {
case C.BLE_GAP_ROLE_PERIPH:
if debug {
println("evt: connected in peripheral role")
}
currentConnection.Reg = gapEvent.conn_handle
DefaultAdapter.connectHandler(nil, true)
case C.BLE_GAP_ROLE_CENTRAL:
if debug {
println("evt: connected in central role")
}
connectionAttempt.connectionHandle = gapEvent.conn_handle
connectionAttempt.state.Set(2) // connection was successful
DefaultAdapter.connectHandler(nil, true)
}
case C.BLE_GAP_EVT_DISCONNECTED:
if debug {
println("evt: disconnected")
}
// Clean up state for this connection.
for i, cb := range gattcNotificationCallbacks {
if cb.connectionHandle == currentConnection.Reg {
gattcNotificationCallbacks[i].valueHandle = 0 // 0 means invalid
}
}
currentConnection.Reg = C.BLE_CONN_HANDLE_INVALID
// Auto-restart advertisement if needed.
if defaultAdvertisement.isAdvertising.Get() != 0 {
// The advertisement was running but was automatically stopped
// by the connection event.
// Note that it cannot be restarted during connect like this,
// because it would need to be reconfigured as a non-connectable
// advertisement. That's left as a future addition, if
// necessary.
C.sd_ble_gap_adv_start(defaultAdvertisement.handle, C.BLE_CONN_CFG_TAG_DEFAULT)
}
DefaultAdapter.connectHandler(nil, false)
case C.BLE_GAP_EVT_ADV_REPORT:
advReport := gapEvent.params.unionfield_adv_report()
if debug && &scanReportBuffer.data[0] != advReport.data.p_data {
// Sanity check.
panic("scanReportBuffer != advReport.p_data")
}
// Prepare the globalScanResult, which will be passed to the
// callback.
scanReportBuffer.len = byte(advReport.data.len)
globalScanResult.RSSI = int16(advReport.rssi)
globalScanResult.Address = Address{
MACAddress{MAC: advReport.peer_addr.addr,
isRandom: advReport.peer_addr.bitfield_addr_type() != 0},
}
globalScanResult.AdvertisementPayload = &scanReportBuffer
// Signal to the main thread that there was a scan report.
// Scanning will be resumed (from the main thread) once the scan
// report has been processed.
gotScanReport.Set(1)
case C.BLE_GAP_EVT_CONN_PARAM_UPDATE_REQUEST:
// Respond with the default PPCP connection parameters by passing
// nil:
// > If NULL is provided on a peripheral role, the parameters in the
// > PPCP characteristic of the GAP service will be used instead. If
// > NULL is provided on a central role and in response to a
// > BLE_GAP_EVT_CONN_PARAM_UPDATE_REQUEST, the peripheral request
// > will be rejected
C.sd_ble_gap_conn_param_update(gapEvent.conn_handle, nil)
case C.BLE_GAP_EVT_DATA_LENGTH_UPDATE_REQUEST:
// We need to respond with sd_ble_gap_data_length_update. Setting
// both parameters to nil will make sure we send the default values.
C.sd_ble_gap_data_length_update(gapEvent.conn_handle, nil, nil)
default:
if debug {
println("unknown GAP event:", id)
}
}
case id >= C.BLE_GATTS_EVT_BASE && id <= C.BLE_GATTS_EVT_LAST:
gattsEvent := eventBuf.evt.unionfield_gatts_evt()
switch id {
case C.BLE_GATTS_EVT_WRITE:
writeEvent := gattsEvent.params.unionfield_write()
len := writeEvent.len - writeEvent.offset
data := (*[255]byte)(unsafe.Pointer(&writeEvent.data[0]))[:len:len]
handler := DefaultAdapter.getCharWriteHandler(writeEvent.handle)
if handler != nil {
handler.callback(Connection(gattsEvent.conn_handle), int(writeEvent.offset), data)
}
case C.BLE_GATTS_EVT_SYS_ATTR_MISSING:
// This event is generated when reading the Generic Attribute
// service. It appears to be necessary for bonded devices.
// From the docs:
// > If the pointer is NULL, the system attribute info is
// > initialized, assuming that the application does not have any
// > previously saved system attribute data for this device.
// Maybe we should look at the error, but as there's not really a
// way to handle it, ignore it.
C.sd_ble_gatts_sys_attr_set(gattsEvent.conn_handle, nil, 0, 0)
case C.BLE_GATTS_EVT_EXCHANGE_MTU_REQUEST:
// This event is generated by some devices. While we could support
// larger MTUs, this default MTU is supported everywhere.
C.sd_ble_gatts_exchange_mtu_reply(gattsEvent.conn_handle, C.BLE_GATT_ATT_MTU_DEFAULT)
default:
if debug {
println("unknown GATTS event:", id, id-C.BLE_GATTS_EVT_BASE)
}
}
case id >= C.BLE_GATTC_EVT_BASE && id <= C.BLE_GATTC_EVT_LAST:
gattcEvent := eventBuf.evt.unionfield_gattc_evt()
switch id {
case C.BLE_GATTC_EVT_PRIM_SRVC_DISC_RSP:
discoveryEvent := gattcEvent.params.unionfield_prim_srvc_disc_rsp()
if debug {
println("evt: discovered primary service", discoveryEvent.count)
}
discoveringService.state.Set(2) // signal there is a result
if discoveryEvent.count >= 1 {
// Theoretically there may be more, but as we're only using
// sd_ble_gattc_primary_services_discover, there should only be
// one discovered service. Use the first as a sensible fallback.
discoveringService.startHandle.Set(discoveryEvent.services[0].handle_range.start_handle)
discoveringService.endHandle.Set(discoveryEvent.services[0].handle_range.end_handle)
discoveringService.uuid = discoveryEvent.services[0].uuid
} else {
// No service found.
discoveringService.startHandle.Set(0)
}
case C.BLE_GATTC_EVT_CHAR_DISC_RSP:
discoveryEvent := gattcEvent.params.unionfield_char_disc_rsp()
if debug {
println("evt: discovered characteristics", discoveryEvent.count)
}
if discoveryEvent.count >= 1 {
// There may be more, but for ease of implementing we only
// handle the first.
discoveringCharacteristic.handle_value.Set(discoveryEvent.chars[0].handle_value)
discoveringCharacteristic.char_props = discoveryEvent.chars[0].char_props
discoveringCharacteristic.uuid = discoveryEvent.chars[0].uuid
} else {
// zero indicates we received no characteristic, set handle_value to last
discoveringCharacteristic.handle_value.Set(0xffff)
}
case C.BLE_GATTC_EVT_DESC_DISC_RSP:
discoveryEvent := gattcEvent.params.unionfield_desc_disc_rsp()
if debug {
println("evt: discovered descriptors", discoveryEvent.count)
}
if discoveryEvent.count >= 1 {
// There may be more, but for ease of implementing we only
// handle the first.
uuid := discoveryEvent.descs[0].uuid
if uuid._type == C.BLE_UUID_TYPE_BLE && uuid.uuid == 0x2902 {
// Found a CCCD (Client Characteristic Configuration
// Descriptor), which has a 16-bit UUID with value 0x2902).
discoveringCharacteristic.handle_value.Set(discoveryEvent.descs[0].handle)
} else {
// Found something else?
// TODO: handle this properly by continuing the scan. For
// now, give up if we found something other than a CCCD.
if debug {
println(" found some other descriptor (unimplemented)")
}
}
}
case C.BLE_GATTC_EVT_READ_RSP:
readEvent := gattcEvent.params.unionfield_read_rsp()
if debug {
println("evt: read response, data length", readEvent.len)
}
readingCharacteristic.handle_value.Set(readEvent.handle)
readingCharacteristic.offset = readEvent.offset
readingCharacteristic.length = readEvent.len
// copy read event data into Go slice
copy(readingCharacteristic.value, (*[255]byte)(unsafe.Pointer(&readEvent.data[0]))[:readEvent.len:readEvent.len])
case C.BLE_GATTC_EVT_HVX:
hvxEvent := gattcEvent.params.unionfield_hvx()
switch hvxEvent._type {
case C.BLE_GATT_HVX_NOTIFICATION:
if debug {
println("evt: notification", hvxEvent.handle)
}
// Find the callback and call it (if there is any).
for _, callbackInfo := range gattcNotificationCallbacks {
if callbackInfo.valueHandle == hvxEvent.handle && callbackInfo.connectionHandle == gattcEvent.conn_handle {
// Create a Go slice from the data, to pass to the
// callback.
data := (*[255]byte)(unsafe.Pointer(&hvxEvent.data[0]))[:hvxEvent.len:hvxEvent.len]
if callbackInfo.callback != nil {
callbackInfo.callback(data)
}
break
}
}
}
default:
if debug {
println("unknown GATTC event:", id, id-C.BLE_GATTC_EVT_BASE)
}
}
default:
if debug {
println("unknown event:", id)
}
}
}
|
[
4,
7
] |
package core
import (
"bytes"
"code.google.com/p/go.crypto/openpgp"
"code.google.com/p/gopass"
"fmt"
"os"
"os/exec"
"os/user"
"path"
"strings"
)
type PGP struct {
SecKeyRingPath string
KeyRingPath string
}
type PGPSignature struct {
Keys []openpgp.Key
KeyId uint64
}
func GetDefaultKeyRingPath() (string, error) {
usr, err := user.Current()
if err != nil {
return "", err
}
return path.Join(usr.HomeDir, ".gnupg", "pubring.gpg"), nil
}
func GetDefaultSecKeyRingPath() (string, error) {
usr, err := user.Current()
if err != nil {
return "", err
}
return path.Join(usr.HomeDir, ".gnupg", "secring.gpg"), nil
}
func NewPGP() (*PGP, error) {
defaultKeyRingPath, err := GetDefaultKeyRingPath()
if err != nil {
return nil, err
}
defaultSecKeyRingPath, err := GetDefaultSecKeyRingPath()
if err != nil {
return nil, err
}
return &PGP{
KeyRingPath: defaultKeyRingPath,
SecKeyRingPath: defaultSecKeyRingPath,
}, nil
}
func ReadKeyRing(keyRingPath string) (*openpgp.EntityList, error) {
defaultKeyRing, err := os.Open(keyRingPath)
if err != nil {
return nil, err
}
defer defaultKeyRing.Close()
entityList, err := openpgp.ReadKeyRing(defaultKeyRing)
if err != nil {
return nil, err
}
return &entityList, nil
}
func FetchKey(keyid string) error {
var reply string
fmt.Printf("PGP Key:%s was not found on your keyring, Do you want to import it? (y/n) ", keyid)
fmt.Scanf("%s", &reply)
if reply != "y" {
return fmt.Errorf("Key %s not found and user skipped importing", keyid)
}
_, err := exec.Command("gpg", "--recv-keys", keyid).Output()
if err != nil {
return fmt.Errorf("Cannot import key %s from public servers", keyid)
}
fmt.Printf("PGP Key: %s imported correctly into the keyring\n", keyid)
return nil
}
func ConfirmKey(entity *openpgp.Entity, config *Config) bool {
var answer string
fmt.Printf("Configuration file Signed-off by PGP Key: %s\n", entity.PrimaryKey.KeyIdShortString())
for _, identity := range entity.Identities {
fmt.Printf("* %s\n", identity.Name)
}
fmt.Printf("Proceed (y/n)? ")
fmt.Scanf("%s", &answer)
return answer == "y"
}
func HasKey(keyid string, entities *openpgp.EntityList) (*openpgp.Entity, error) {
for _, entity := range *entities {
if entity.PrimaryKey.CanSign() && entity.PrimaryKey.KeyIdShortString() == keyid {
return entity, nil
}
}
return nil, fmt.Errorf("cannot find key id: %s", keyid)
}
func (p *PGP) Sign(readed string, keyid string) (string, error) {
entities, err := ReadKeyRing(p.SecKeyRingPath)
if err != nil {
return "", nil
}
entity, err := HasKey(keyid, entities)
if err != nil {
return "", err
}
password, err := gopass.GetPass(fmt.Sprintf("Please insert password for key with id '%s': ",
entity.PrimaryKey.KeyIdShortString()))
if err != nil {
return "", err
}
err = entity.PrivateKey.Decrypt([]byte(password))
if err != nil {
return "", err
}
buff := new(bytes.Buffer)
if err := openpgp.ArmoredDetachSign(buff, entity, bytes.NewReader([]byte(readed)), nil); err != nil {
return "", err
}
return buff.String(), nil
}
func (p *PGP) Verify(readed string, signed string) (*openpgp.Entity, error) {
entities, err := ReadKeyRing(p.KeyRingPath)
if err != nil {
return nil, err
}
signer, err := openpgp.CheckArmoredDetachedSignature(
entities,
strings.NewReader(readed),
strings.NewReader(signed),
)
if err != nil {
return nil, err
}
return signer, nil
}
|
[
6
] |
package doze
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
type TestController struct{}
func (t TestController) SimpleGet(c *Context) ResponseSender {
return NewOKJSONResponse(TestStruct{"Simple Get"})
}
func (t TestController) SimplePost(c *Context) ResponseSender {
var ts TestStruct
c.BindJSONEntity(&ts)
return NewOKJSONResponse(ts)
}
func (t TestController) SimplePut(c *Context) ResponseSender {
var ts TestStruct
c.BindJSONEntity(&ts)
ts.Message = ts.Message + " Updated"
return NewOKJSONResponse(ts)
}
type TestStruct struct {
Message string
}
const RestRoot = "/rest/api"
var (
mux *http.ServeMux
server *httptest.Server
r RestRouter
)
func setup() {
mux = http.NewServeMux()
server = httptest.NewServer(mux)
r = Router("test")
r.Add(NewRoute().Named("simpleGet").For(RestRoot+"/simpleget").With(http.MethodGet, TestController{}.SimpleGet))
r.Add(NewRoute().Named("simplePost").For(RestRoot+"/simplepost").With(http.MethodPost, TestController{}.SimplePost))
r.Add(NewRoute().Named("simplePut").For(RestRoot+"/simpleput").With(http.MethodPut, TestController{}.SimplePut))
}
func teardown() {
server.Close()
}
func TestRestMethodNotAllowed(t *testing.T) {
setup()
defer teardown()
mux.Handle(RestRoot+"/", NewHandler(r))
resp, _ := http.Get(server.URL + RestRoot + "/simplepost")
assert.Equal(t, http.StatusMethodNotAllowed, resp.StatusCode, "they should be equal")
}
func TestRestMethodNotFound(t *testing.T) {
setup()
defer teardown()
mux.Handle(RestRoot+"/", NewHandler(r))
resp, _ := http.Get(server.URL + RestRoot + "/notfound")
assert.Equal(t, http.StatusNotFound, resp.StatusCode, "they should be equal")
}
func TestRestSimpleGet(t *testing.T) {
setup()
defer teardown()
mux.Handle(RestRoot+"/", NewHandler(r))
resp, _ := http.Get(server.URL + RestRoot + "/simpleget")
body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, `{"Message":"Simple Get"}`, string(body), "they should be equal")
}
func TestRestSimplePost(t *testing.T) {
setup()
defer teardown()
mux.Handle(RestRoot+"/", NewHandler(r))
resp, _ := http.Post(server.URL+RestRoot+"/simplepost", "application/json", strings.NewReader(`{"Message":"Simple Post"}`))
body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, `{"Message":"Simple Post"}`, string(body), "they should be equal")
}
func TestRestSimplePut(t *testing.T) {
setup()
defer teardown()
mux.Handle(RestRoot+"/", NewHandler(r))
req, _ := http.NewRequest(http.MethodPut, server.URL+RestRoot+"/simpleput", strings.NewReader(`{"Message":"Simple Put"}`))
resp, _ := http.DefaultClient.Do(req)
body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, `{"Message":"Simple Put Updated"}`, string(body), "they should be equal")
}
|
[
0
] |
package tarantool
import (
"bytes"
"fmt"
"io"
"gopkg.in/vmihailenco/msgpack.v2"
)
// cache precompiled
type packData struct {
defaultSpace interface{}
packedDefaultSpace []byte
packedDefaultIndex []byte
packedIterEq []byte
packedDefaultLimit []byte
packedDefaultOffset []byte
packedSingleKey []byte
spaceMap map[string]uint64
indexMap map[uint64](map[string]uint64)
primaryKeyMap map[uint64]([]int)
}
func encodeValues2(v1, v2 interface{}) []byte {
var buf bytes.Buffer
encoder := msgpack.NewEncoder(&buf)
encoder.Encode(v1)
encoder.Encode(v2)
return buf.Bytes()
}
func packSelectSingleKey() []byte {
var buf bytes.Buffer
encoder := msgpack.NewEncoder(&buf)
encoder.EncodeUint32(KeyKey)
encoder.EncodeArrayLen(1)
return buf.Bytes()
}
func newPackData(defaultSpace interface{}) *packData {
var packedDefaultSpace []byte
if spaceNo, ok := defaultSpace.(uint64); ok {
packedDefaultSpace = encodeValues2(KeySpaceNo, spaceNo)
}
return &packData{
defaultSpace: defaultSpace,
packedDefaultSpace: packedDefaultSpace,
packedDefaultIndex: encodeValues2(KeyIndexNo, uint32(0)),
packedIterEq: encodeValues2(KeyIterator, IterEq),
packedDefaultLimit: encodeValues2(KeyLimit, DefaultLimit),
packedDefaultOffset: encodeValues2(KeyOffset, 0),
packedSingleKey: packSelectSingleKey(),
spaceMap: make(map[string]uint64),
indexMap: make(map[uint64](map[string]uint64)),
primaryKeyMap: make(map[uint64]([]int)),
}
}
func (data *packData) spaceNo(space interface{}) (uint64, error) {
if space == nil {
space = data.defaultSpace
}
switch value := space.(type) {
default:
return 0, fmt.Errorf("Wrong space %#v", space)
case int:
return uint64(value), nil
case uint:
return uint64(value), nil
case int64:
return uint64(value), nil
case uint64:
return value, nil
case int32:
return uint64(value), nil
case uint32:
return uint64(value), nil
case string:
spaceNo, exists := data.spaceMap[value]
if exists {
return spaceNo, nil
} else {
return 0, fmt.Errorf("Unknown space %#v", space)
}
}
}
func (data *packData) encodeSpace(space interface{}, encoder *msgpack.Encoder) error {
spaceNo, err := data.spaceNo(space)
if err != nil {
return err
}
encoder.EncodeUint32(KeySpaceNo)
encoder.Encode(spaceNo)
return nil
}
func (data *packData) writeSpace(space interface{}, w io.Writer, encoder *msgpack.Encoder) error {
if space == nil && data.packedDefaultSpace != nil {
w.Write(data.packedDefaultSpace)
return nil
}
return data.encodeSpace(space, encoder)
}
func (data *packData) indexNo(space interface{}, index interface{}) (uint64, error) {
if index == nil {
return 0, nil
}
switch value := index.(type) {
default:
return 0, fmt.Errorf("Wrong index %#v", space)
case int:
return uint64(value), nil
case uint:
return uint64(value), nil
case int64:
return uint64(value), nil
case uint64:
return value, nil
case int32:
return uint64(value), nil
case uint32:
return uint64(value), nil
case string:
spaceNo, err := data.spaceNo(space)
if err != nil {
return 0, nil
}
spaceData, exists := data.indexMap[spaceNo]
if !exists {
return 0, fmt.Errorf("No indexes defined for space %#v", space)
}
indexNo, exists := spaceData[value]
if exists {
return indexNo, nil
} else {
return 0, fmt.Errorf("Unknown index %#v", index)
}
}
}
func (data *packData) writeIndex(space interface{}, index interface{}, w io.Writer, encoder *msgpack.Encoder) error {
if index == nil {
w.Write(data.packedDefaultIndex)
return nil
}
indexNo, err := data.indexNo(space, index)
if err != nil {
return err
}
encoder.EncodeUint32(KeyIndexNo)
encoder.Encode(indexNo)
return nil
}
|
[
6
] |
package pack
import (
"errors"
"io"
"time"
"github.com/drausin/libri/libri/author/io/enc"
"github.com/drausin/libri/libri/author/io/page"
"github.com/drausin/libri/libri/author/io/print"
cerrors "github.com/drausin/libri/libri/common/errors"
"github.com/drausin/libri/libri/common/id"
"github.com/drausin/libri/libri/common/storage"
"github.com/drausin/libri/libri/librarian/api"
)
// EntryPacker creates entry documents from raw content.
type EntryPacker interface {
// Pack prints pages from the content, encrypts their metadata, and binds them together
// into an entry *api.Document.
Pack(content io.Reader, mediaType string, keys *enc.EEK, authorPub []byte) (
*api.Document, *api.EntryMetadata, error)
}
// NewEntryPacker creates a new Packer instance.
func NewEntryPacker(
params *print.Parameters,
metadataEnc enc.EntryMetadataEncrypter,
docSL storage.DocumentSLD,
) EntryPacker {
pageS := page.NewStorerLoader(docSL)
return &entryPacker{
params: params,
metadataEnc: metadataEnc,
printer: print.NewPrinter(params, pageS),
pageS: pageS,
docL: docSL,
}
}
type entryPacker struct {
params *print.Parameters
metadataEnc enc.EntryMetadataEncrypter
printer print.Printer
pageS page.Storer
docL storage.DocumentLoader
}
func (p *entryPacker) Pack(content io.Reader, mediaType string, keys *enc.EEK, authorPub []byte) (
*api.Document, *api.EntryMetadata, error) {
pageKeys, metadata, err := p.printer.Print(content, mediaType, keys, authorPub)
if err != nil {
return nil, nil, err
}
// TODO (drausin) add additional metadata K/V here
// - relative filepath
// - file mode permissions
encMetadata, err := p.metadataEnc.Encrypt(metadata, keys)
if err != nil {
return nil, nil, err
}
doc, err := newEntryDoc(authorPub, pageKeys, encMetadata, p.docL)
return doc, metadata, err
}
// EntryUnpacker writes individual pages to the content io.Writer.
type EntryUnpacker interface {
// Unpack extracts the individual pages from a document and stitches them together to write
// to the content io.Writer.
Unpack(content io.Writer, entryDoc *api.Document, keys *enc.EEK) (*api.EntryMetadata, error)
}
type entryUnpacker struct {
params *print.Parameters
metadataDec enc.MetadataDecrypter
scanner print.Scanner
}
// NewEntryUnpacker creates a new EntryUnpacker with the given parameters, metadata decrypter, and
// storage.DocumentStorerLoader.
func NewEntryUnpacker(
params *print.Parameters,
metadataDec enc.MetadataDecrypter,
docSL storage.DocumentSLD,
) EntryUnpacker {
pageL := page.NewStorerLoader(docSL)
return &entryUnpacker{
params: params,
metadataDec: metadataDec,
scanner: print.NewScanner(params, pageL),
}
}
func (u *entryUnpacker) Unpack(content io.Writer, entryDoc *api.Document, keys *enc.EEK) (
*api.EntryMetadata, error) {
entry := entryDoc.Contents.(*api.Document_Entry).Entry
encMetadata, err := enc.NewEncryptedMetadata(
entry.MetadataCiphertext,
entry.MetadataCiphertextMac,
)
if err != nil {
return nil, err
}
metadata, err := u.metadataDec.Decrypt(encMetadata, keys)
if err != nil {
return nil, err
}
var pageKeys []id.ID
if entry.Page != nil {
_, docKey, err2 := api.GetPageDocument(entry.Page)
if err2 != nil {
return nil, err2
}
pageKeys = []id.ID{docKey}
} else if entry.PageKeys != nil {
pageKeys, err = api.GetEntryPageKeys(entryDoc)
cerrors.MaybePanic(err) // should never happen
} else {
return nil, api.ErrUnexpectedDocumentType
}
return metadata, u.scanner.Scan(content, pageKeys, keys, metadata)
}
func newEntryDoc(
authorPub []byte,
pageIDs []id.ID,
encMeta *enc.EncryptedMetadata,
docL storage.DocumentLoader,
) (*api.Document, error) {
var entry *api.Entry
var err error
if len(pageIDs) == 1 {
entry, err = newSinglePageEntry(authorPub, pageIDs[0], encMeta, docL)
} else {
entry, err = newMultiPageEntry(authorPub, pageIDs, encMeta)
}
if err != nil {
return nil, err
}
doc := &api.Document{
Contents: &api.Document_Entry{
Entry: entry,
},
}
return doc, nil
}
func newSinglePageEntry(
authorPub []byte,
pageKey id.ID,
encMeta *enc.EncryptedMetadata,
docL storage.DocumentLoader,
) (*api.Entry, error) {
pageDoc, err := docL.Load(pageKey)
if err != nil {
return nil, err
}
pageContent, ok := pageDoc.Contents.(*api.Document_Page)
if !ok {
return nil, errors.New("not a page")
}
return &api.Entry{
AuthorPublicKey: authorPub,
Page: pageContent.Page,
CreatedTime: uint32(time.Now().Unix()),
MetadataCiphertext: encMeta.Ciphertext,
MetadataCiphertextMac: encMeta.CiphertextMAC,
}, nil
}
func newMultiPageEntry(
authorPub []byte, pageKeys []id.ID, encMeta *enc.EncryptedMetadata,
) (*api.Entry, error) {
pageKeyBytes := make([][]byte, len(pageKeys))
for i, pageKey := range pageKeys {
pageKeyBytes[i] = pageKey.Bytes()
}
return &api.Entry{
AuthorPublicKey: authorPub,
PageKeys: pageKeyBytes,
CreatedTime: uint32(time.Now().Unix()),
MetadataCiphertext: encMeta.Ciphertext,
MetadataCiphertextMac: encMeta.CiphertextMAC,
}, nil
}
|
[
5
] |
package main
import "sort"
func main() {
}
// leetcode2171_拿出最少数目的魔法豆
func minimumRemoval(beans []int) int64 {
n := len(beans)
sum := int64(0)
for i := 0; i < n; i++ {
sum = sum + int64(beans[i])
}
res := sum
sort.Ints(beans)
for i := 0; i < n; i++ {
res = min(res, sum-int64(beans[i])*int64(n-i)) // 把较大的n-i个数都变为beans[i]
}
return res
}
func min(a, b int64) int64 {
if a > b {
return b
}
return a
}
|
[
0
] |
package crumb
import (
"context"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"github.com/aaronland/go-string/random"
"io"
_ "log"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
func init() {
ctx := context.Background()
RegisterCrumb(ctx, "encrypted", NewEncryptedCrumb)
}
type EncryptedCrumb struct {
Crumb
extra string
separator string
secret string
ttl int64
key string
}
func NewRandomEncryptedCrumbURI(ctx context.Context, ttl int, key string) (string, error) {
r_opts := random.DefaultOptions()
r_opts.AlphaNumeric = true
s, err := random.String(r_opts)
if err != nil {
return "", err
}
r_opts.Length = 8
e, err := random.String(r_opts)
if err != nil {
return "", err
}
params := url.Values{}
params.Set("extra", e)
params.Set("separator", ":")
params.Set("secret", s)
params.Set("ttl", strconv.Itoa(ttl))
params.Set("key", key)
uri := fmt.Sprintf("encrypted://?%s", params.Encode())
return uri, nil
}
func NewEncryptedCrumb(ctx context.Context, uri string) (Crumb, error) {
u, err := url.Parse(uri)
if err != nil {
return nil, err
}
q := u.Query()
extra := q.Get("extra")
separator := q.Get("separator")
secret := q.Get("secret")
str_ttl := q.Get("ttl")
if extra == "" {
return nil, errors.New("Empty extra= key")
}
if separator == "" {
return nil, errors.New("Empty separator= key")
}
if secret == "" {
return nil, errors.New("Empty secret= key")
}
if str_ttl == "" {
return nil, errors.New("Empty ttl= key")
}
ttl, err := strconv.ParseInt(str_ttl, 10, 64)
if err != nil {
return nil, err
}
cr := &EncryptedCrumb{
extra: extra,
separator: separator,
secret: secret,
ttl: ttl,
}
key := q.Get("key")
if key != "" {
cr.key = key
}
return cr, nil
}
func (cr *EncryptedCrumb) Generate(req *http.Request, extra ...string) (string, error) {
crumb_base, err := cr.crumbBase(req, extra...)
if err != nil {
return "", err
}
crumb_hash, err := cr.hashCrumb(crumb_base)
if err != nil {
return "", err
}
now := time.Now()
ts := now.Unix()
str_ts := strconv.FormatInt(ts, 10)
crumb_parts := []string{
str_ts,
crumb_hash,
}
crumb_var := strings.Join(crumb_parts, cr.separator)
enc_var, err := cr.encryptCrumb(crumb_var)
if err != nil {
return "", err
}
return enc_var, nil
}
func (cr *EncryptedCrumb) Validate(req *http.Request, enc_var string, extra ...string) (bool, error) {
crumb_var, err := cr.decryptCrumb(enc_var)
if err != nil {
return false, err
}
crumb_parts := strings.Split(crumb_var, cr.separator)
if len(crumb_parts) != 2 {
return false, errors.New("Invalid crumb")
}
crumb_ts := crumb_parts[0]
crumb_test := crumb_parts[1]
crumb_base, err := cr.crumbBase(req, extra...)
if err != nil {
return false, err
}
crumb_hash, err := cr.hashCrumb(crumb_base)
if err != nil {
return false, err
}
ok, err := cr.compareHashes(crumb_hash, crumb_test)
if err != nil {
return false, err
}
if !ok {
return false, errors.New("Crumb mismatch")
}
if cr.ttl > 0 {
then, err := strconv.ParseInt(crumb_ts, 10, 64)
if err != nil {
return false, err
}
now := time.Now()
ts := now.Unix()
if ts-then > cr.ttl {
return false, errors.New("Crumb has expired")
}
}
return true, nil
}
func (cr *EncryptedCrumb) Key(req *http.Request) string {
switch cr.key {
case "":
return req.URL.Path
default:
return cr.key
}
}
func (cr *EncryptedCrumb) crumbBase(req *http.Request, extra ...string) (string, error) {
crumb_key := cr.Key(req)
base := make([]string, 0)
base = append(base, crumb_key)
base = append(base, req.UserAgent())
base = append(base, cr.extra)
for _, e := range extra {
base = append(base, e)
}
str_base := strings.Join(base, "-")
return str_base, nil
}
func (cr *EncryptedCrumb) compareHashes(this_enc string, that_enc string) (bool, error) {
if len(this_enc) != len(that_enc) {
return false, nil
}
match := this_enc == that_enc
return match, nil
}
func (cr *EncryptedCrumb) hashCrumb(raw string) (string, error) {
msg := []byte(raw)
mac := sha256.New()
mac.Write(msg)
hash := mac.Sum(nil)
enc := hex.EncodeToString(hash[:])
return enc, nil
}
// https://gist.github.com/manishtpatel/8222606
// https://github.com/blaskovicz/go-cryptkeeper/blob/master/encrypted_string.go
func (cr *EncryptedCrumb) encryptCrumb(text string) (string, error) {
plaintext := []byte(text)
secret := []byte(cr.secret)
block, err := aes.NewCipher(secret)
if err != nil {
return "", err
}
ciphertext := make([]byte, aes.BlockSize+len(plaintext))
iv := ciphertext[:aes.BlockSize]
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
return "", err
}
cipher.NewCFBEncrypter(block, iv).XORKeyStream(ciphertext[aes.BlockSize:], plaintext)
return hex.EncodeToString(ciphertext), nil
}
func (cr *EncryptedCrumb) decryptCrumb(enc_crumb string) (string, error) {
ciphertext, err := hex.DecodeString(enc_crumb)
if err != nil {
return "", err
}
secret := []byte(cr.secret)
block, err := aes.NewCipher(secret)
if err != nil {
return "", err
}
if byteLen := len(ciphertext); byteLen < aes.BlockSize {
return "", fmt.Errorf("invalid cipher size %d.", byteLen)
}
iv := ciphertext[:aes.BlockSize]
ciphertext = ciphertext[aes.BlockSize:]
cipher.NewCFBDecrypter(block, iv).XORKeyStream(ciphertext, ciphertext)
return string(ciphertext), nil
}
|
[
6
] |
package sort
import (
"log"
)
func BubbleSort(nums []int) {
n := len(nums)
for i := 0; i < n; i++ {
for j := 0; j < n-i-1; j++ {
if nums[j] > nums[j+1] {
swap(nums, j, j+1)
}
}
}
}
func QuickSort(nums []int) {
quickSort(nums, 0, len(nums))
}
func quickSort(nums []int, left int, right int) {
if left < right {
pivot := getPivot(nums, left, right)
quickSort(nums, left, pivot)
quickSort(nums, pivot+1, right)
}
}
func getPivot(nums []int, left int, right int) int {
pivot := left
index := left + 1
for i := index; i < right; i++ {
if nums[i] < nums[pivot] {
swap(nums, i, index)
index++
}
}
log.Print(nums, index)
swap(nums, pivot, index-1)
return index - 1
}
func PickSort(nums []int) {
n := len(nums)
for i := 0; i < n; i++ {
minindex := i
for j := i; j < n; j++ {
if nums[j] < nums[minindex] {
minindex = j
}
}
swap(nums, i, minindex)
}
}
func swap(nums []int, i int, j int) {
nums[i], nums[j] = nums[j], nums[i]
}
|
[
6
] |
// package admin provides an implementation of the API described in auth/schema/adminschema.
package admin
import (
"net/http"
"github.com/coreos/go-oidc/oidc"
"github.com/go-gorp/gorp"
"github.com/coreos/dex/client"
"github.com/coreos/dex/db"
"github.com/coreos/dex/schema/adminschema"
"github.com/coreos/dex/user"
"github.com/coreos/dex/user/manager"
)
// AdminAPI provides the logic necessary to implement the Admin API.
type AdminAPI struct {
userManager *manager.UserManager
userRepo user.UserRepo
passwordInfoRepo user.PasswordInfoRepo
clientIdentityRepo client.ClientIdentityRepo
localConnectorID string
}
// TODO(ericchiang): Swap the DbMap for a storage interface. See #278
func NewAdminAPI(dbMap *gorp.DbMap, userManager *manager.UserManager, localConnectorID string) *AdminAPI {
if localConnectorID == "" {
panic("must specify non-blank localConnectorID")
}
return &AdminAPI{
userManager: userManager,
userRepo: db.NewUserRepo(dbMap),
passwordInfoRepo: db.NewPasswordInfoRepo(dbMap),
clientIdentityRepo: db.NewClientIdentityRepo(dbMap),
localConnectorID: localConnectorID,
}
}
// Error is the error type returned by AdminAPI methods.
type Error struct {
Type string
// The HTTP Code to return for this type of error.
Code int
Desc string
// The underlying error - not to be consumed by external users.
Internal error
}
func (e Error) Error() string {
return e.Type
}
func errorMaker(typ string, desc string, code int) func(internal error) Error {
return func(internal error) Error {
return Error{
Type: typ,
Code: code,
Desc: desc,
Internal: internal,
}
}
}
var (
errorMap = map[error]func(error) Error{
user.ErrorNotFound: errorMaker("resource_not_found", "Resource could not be found.", http.StatusNotFound),
user.ErrorDuplicateEmail: errorMaker("bad_request", "Email already in use.", http.StatusBadRequest),
user.ErrorInvalidEmail: errorMaker("bad_request", "invalid email.", http.StatusBadRequest),
}
)
func (a *AdminAPI) GetAdmin(id string) (adminschema.Admin, error) {
usr, err := a.userRepo.Get(nil, id)
if err != nil {
return adminschema.Admin{}, mapError(err)
}
pwi, err := a.passwordInfoRepo.Get(nil, id)
if err != nil {
return adminschema.Admin{}, mapError(err)
}
return adminschema.Admin{
Id: id,
Email: usr.Email,
Password: string(pwi.Password),
}, nil
}
func (a *AdminAPI) CreateAdmin(admn adminschema.Admin) (string, error) {
userID, err := a.userManager.CreateUser(user.User{
Email: admn.Email,
Admin: true}, user.Password(admn.Password), a.localConnectorID)
if err != nil {
return "", mapError(err)
}
return userID, nil
}
func (a *AdminAPI) GetState() (adminschema.State, error) {
state := adminschema.State{}
admins, err := a.userRepo.GetAdminCount(nil)
if err != nil {
return adminschema.State{}, err
}
state.AdminUserCreated = admins > 0
return state, nil
}
type ClientRegistrationRequest struct {
IsAdmin bool `json:"isAdmin"`
Client oidc.ClientMetadata `json:"client"`
}
func (a *AdminAPI) CreateClient(req ClientRegistrationRequest) (oidc.ClientRegistrationResponse, error) {
if err := req.Client.Valid(); err != nil {
return oidc.ClientRegistrationResponse{}, mapError(err)
}
// metadata is guarenteed to have at least one redirect_uri by earlier validation.
id, err := oidc.GenClientID(req.Client.RedirectURIs[0].Host)
if err != nil {
return oidc.ClientRegistrationResponse{}, mapError(err)
}
c, err := a.clientIdentityRepo.New(id, req.Client, req.IsAdmin)
if err != nil {
return oidc.ClientRegistrationResponse{}, mapError(err)
}
return oidc.ClientRegistrationResponse{ClientID: c.ID, ClientSecret: c.Secret, ClientMetadata: req.Client}, nil
}
func mapError(e error) error {
if mapped, ok := errorMap[e]; ok {
return mapped(e)
}
return Error{
Code: http.StatusInternalServerError,
Type: "server_error",
Desc: "",
Internal: e,
}
}
|
[
6
] |
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package route
import (
"strings"
"github.com/vmware/vsphere-automation-sdk-go/runtime/protocol/client"
"github.com/vmware/vsphere-automation-sdk-go/services/nsxt/infra/realized_state"
"github.com/vmware/vsphere-automation-sdk-go/services/nsxt/infra/tier_1s"
"github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model"
"github.com/vmware/vsphere-automation-sdk-go/services/nsxt/search"
)
// NsxtBroker is an internal interface to access nsxt backend
type NsxtBroker interface {
QueryEntities(queryParam string) (model.SearchResponse, error)
CreateStaticRoute(routerPath string, staticRouteID string, staticRoute model.StaticRoutes) error
DeleteStaticRoute(routerPath string, staticRouteID string) error
ListRealizedEntities(path string) (model.GenericPolicyRealizedResourceListResult, error)
}
// nsxtBroker includes NSXT API clients
type nsxtBroker struct {
// TODO: will add tier0 static routes client
tier1StaticRoutesClient tier_1s.StaticRoutesClient
realizedEntitiesClient realized_state.RealizedEntitiesClient
queryClient search.QueryClient
}
// NewNsxtBroker creates a new NsxtBroker to the NSXT API
func NewNsxtBroker(connector client.Connector) (NsxtBroker, error) {
return &nsxtBroker{
tier1StaticRoutesClient: tier_1s.NewStaticRoutesClient(connector),
realizedEntitiesClient: realized_state.NewRealizedEntitiesClient(connector),
queryClient: search.NewQueryClient(connector),
}, nil
}
func (b *nsxtBroker) QueryEntities(queryParam string) (model.SearchResponse, error) {
queryParam = strings.ReplaceAll(queryParam, "/", "\\/")
return b.queryClient.List(queryParam, nil, nil, nil, nil, nil)
}
func (b *nsxtBroker) CreateStaticRoute(routerPath string, staticRouteID string, staticRoute model.StaticRoutes) error {
routerID := getRouterID(routerPath)
return b.tier1StaticRoutesClient.Patch(routerID, staticRouteID, staticRoute)
}
func (b *nsxtBroker) DeleteStaticRoute(routerPath string, staticRouteID string) error {
routerID := getRouterID(routerPath)
return b.tier1StaticRoutesClient.Delete(routerID, staticRouteID)
}
func (b *nsxtBroker) ListRealizedEntities(path string) (model.GenericPolicyRealizedResourceListResult, error) {
return b.realizedEntitiesClient.List(path, nil)
}
// getRouterID returns router ID from router path
func getRouterID(routerPath string) string {
path := strings.Split(routerPath, "/")
return path[len(path)-1]
}
|
[
6
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.