file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
routex.go | package routex
import (
"broker"
"encoding/json"
"fmt"
"github.com/googollee/go-pubsub"
"github.com/googollee/go-rest"
"logger"
"math/rand"
"model"
"net/http"
"net/url"
"notifier"
"os"
"routex/model"
"sync"
"time"
)
type RouteMap struct {
rest.Service `prefix:"/v3/routex" mime:"application/json"`
updateIdentity rest.SimpleNode `route:"/_inner/update_identity" method:"POST"`
updateExfee rest.SimpleNode `route:"/_inner/update_exfee" method:"POST"`
searchRoutex rest.SimpleNode `route:"/_inner/search/crosses" method:"POST"`
getRoutex rest.SimpleNode `route:"/_inner/users/:user_id/crosses/:cross_id" method:"GET"`
setUser rest.SimpleNode `route:"/users/crosses/:cross_id" method:"POST"`
updateBreadcrums rest.SimpleNode `route:"/breadcrumbs" method:"POST"`
updateBreadcrumsInner rest.SimpleNode `route:"/_inner/breadcrumbs/users/:user_id" method:"POST"`
getBreadcrums rest.SimpleNode `route:"/breadcrumbs/crosses/:cross_id" method:"GET"`
getUserBreadcrums rest.SimpleNode `route:"/breadcrumbs/crosses/:cross_id/users/:user_id" method:"GET"`
getUserBreadcrumsInner rest.SimpleNode `route:"/_inner/breadcrumbs/users/:user_id" method:"GET"`
searchGeomarks rest.SimpleNode `route:"/_inner/geomarks/crosses/:cross_id" method:"GET"`
getGeomarks rest.SimpleNode `route:"/geomarks/crosses/:cross_id" method:"GET"`
setGeomark rest.SimpleNode `route:"/geomarks/crosses/:cross_id/:mark_type/:kind.:mark_id" method:"PUT"`
deleteGeomark rest.SimpleNode `route:"/geomarks/crosses/:cross_id/:mark_type/:kind.:mark_id" method:"DELETE"`
stream rest.Streaming `route:"/crosses/:cross_id" method:"WATCH"`
options rest.SimpleNode `route:"/crosses/:cross_id" method:"OPTIONS"`
sendNotification rest.SimpleNode `route:"/notification/crosses/:cross_id" method:"POST"`
rand *rand.Rand
routexRepo rmodel.RoutexRepo
breadcrumbCache rmodel.BreadcrumbCache
breadcrumbsRepo rmodel.BreadcrumbsRepo
geomarksRepo rmodel.GeomarksRepo
conversion rmodel.GeoConversionRepo
platform *broker.Platform
config *model.Config
tutorialDatas map[int64][]rmodel.TutorialData
pubsub *pubsub.Pubsub
castLocker sync.RWMutex
quit chan int
}
func New(routexRepo rmodel.RoutexRepo, breadcrumbCache rmodel.BreadcrumbCache, breadcrumbsRepo rmodel.BreadcrumbsRepo, geomarksRepo rmodel.GeomarksRepo, conversion rmodel.GeoConversionRepo, platform *broker.Platform, config *model.Config) (*RouteMap, error) {
tutorialDatas := make(map[int64][]rmodel.TutorialData)
for _, userId := range config.TutorialBotUserIds {
file := config.Routex.TutorialDataFile[fmt.Sprintf("%d", userId)]
f, err := os.Open(file)
if err != nil {
return nil, fmt.Errorf("can't find tutorial file %s for tutorial bot %d", file, userId)
}
var datas []rmodel.TutorialData
decoder := json.NewDecoder(f)
err = decoder.Decode(&datas)
if err != nil {
return nil, fmt.Errorf("invalid tutorial data %s for tutorial bot %d: %s", file, userId, err)
}
tutorialDatas[userId] = datas
}
ret := &RouteMap{
rand: rand.New(rand.NewSource(time.Now().Unix())),
routexRepo: routexRepo,
breadcrumbCache: breadcrumbCache,
breadcrumbsRepo: breadcrumbsRepo,
geomarksRepo: geomarksRepo, conversion: conversion,
platform: platform,
tutorialDatas: tutorialDatas,
config: config,
pubsub: pubsub.New(20),
quit: make(chan int),
}
go ret.tutorialGenerator()
return ret, nil
}
func (m RouteMap) | (ctx rest.Context, identity model.Identity) {
id := rmodel.Identity{
Identity: identity,
Type: "identity",
Action: "update",
}
m.pubsub.Publish(m.identityName(identity), id)
}
func (m RouteMap) UpdateExfee(ctx rest.Context, invitations model.Invitation) {
var crossId int64
var action string
ctx.Bind("cross_id", &crossId)
ctx.Bind("action", &action)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
if action != "join" && action != "remove" {
ctx.Return(http.StatusBadRequest, "invalid action: %s", action)
return
}
id := rmodel.Invitation{
Identity: invitations.Identity,
Notifications: invitations.Notifications,
Type: "invitation",
Action: action,
}
m.pubsub.Publish(m.publicName(crossId), id)
}
type UserCrossSetup struct {
SaveBreadcrumbs bool `json:"save_breadcrumbs,omitempty"`
AfterInSeconds int `json:"after_in_seconds,omitempty"`
}
func (m RouteMap) SetUser(ctx rest.Context, setup UserCrossSetup) {
token, ok := m.auth(ctx)
if !ok {
ctx.Return(http.StatusUnauthorized, "invalid token")
return
}
var crossId int64
ctx.Bind("cross_id", &crossId)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
if setup.AfterInSeconds == 0 {
setup.AfterInSeconds = 60 * 60
}
m.switchWindow(crossId, token.Identity, setup.SaveBreadcrumbs, setup.AfterInSeconds)
}
func (m RouteMap) SearchRoutex(ctx rest.Context, crossIds []int64) {
ret, err := m.routexRepo.Search(crossIds)
if err != nil {
logger.ERROR("search for route failed: %s with %+v", err, crossIds)
ctx.Return(http.StatusInternalServerError, err)
return
}
ctx.Render(ret)
}
type RoutexInfo struct {
InWindow *bool `json:"in_window"`
Objects []rmodel.Geomark `json:"objects"`
}
func (m RouteMap) GetRoutex(ctx rest.Context) {
var userId, crossId int64
ctx.Bind("cross_id", &crossId)
ctx.Bind("user_id", &userId)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
endAt, err := m.breadcrumbsRepo.GetWindowEnd(userId, crossId)
if err != nil {
logger.ERROR("get user %d cross %d routex failed: %s", userId, crossId, err)
ctx.Return(http.StatusInternalServerError, err)
return
}
ret := RoutexInfo{}
if endAt != 0 {
ret.InWindow = new(bool)
*ret.InWindow = endAt >= time.Now().Unix()
}
query := make(url.Values)
query.Set("user_id", fmt.Sprintf("%d", userId))
cross, err := m.platform.FindCross(crossId, query)
if err == nil {
ret.Objects = m.getObjects(cross, true)
} else {
logger.ERROR("get user %d cross %d failed: %s", userId, crossId, err)
ctx.Return(http.StatusInternalServerError, err)
return
}
ctx.Render(ret)
}
func (m RouteMap) Stream(ctx rest.StreamContext) {
token, ok := m.auth(ctx)
if !ok {
ctx.Return(http.StatusUnauthorized, "invalid token")
return
}
var forceOpen bool
var coordinate string
ctx.Bind("force_window_open", &forceOpen)
ctx.Bind("coordinate", &coordinate)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
now := time.Now()
endAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil || endAt <= now.Unix() {
if !forceOpen {
ctx.Return(http.StatusForbidden, "not in window")
return
}
after := 15 * 60
if endAt == 0 {
after = 60 * 60
}
var openAfter int
ctx.BindReset()
ctx.Bind("force_window_open", &openAfter)
if ctx.BindError() == nil {
after = openAfter
}
endAt = now.Unix() + int64(after)
m.switchWindow(int64(token.Cross.ID), token.Identity, true, after)
}
c := make(chan interface{}, 10)
m.pubsub.Subscribe(m.publicName(int64(token.Cross.ID)), c)
if token.Cross.By.UserID == m.config.Routex.TutorialCreator {
m.pubsub.Subscribe(m.tutorialName(), c)
}
for _, inv := range token.Cross.Exfee.Invitations {
m.pubsub.Subscribe(m.identityName(inv.Identity), c)
}
logger.DEBUG("streaming connected by user %d, cross %d", token.UserId, token.Cross.ID)
defer func() {
logger.DEBUG("streaming disconnect by user %d, cross %d", token.UserId, token.Cross.ID)
m.pubsub.UnsubscribeAll(c)
close(c)
}()
willEnd := endAt - now.Unix()
err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{willEnd},
})
if err != nil {
return
}
toMars := coordinate == "mars"
isTutorial := false
if token.Cross.By.UserID == m.config.Routex.TutorialCreator {
isTutorial = true
}
hasCreated := false
ctx.Return(http.StatusOK)
quit := make(chan int)
defer func() { close(quit) }()
for _, mark := range m.getObjects(token.Cross, toMars) {
if isTutorial && !hasCreated && !mark.IsBreadcrumbs() {
hasCreated = true
}
if err := ctx.Render(mark); err != nil {
return
}
}
ctx.SetWriteDeadline(time.Now().Add(broker.NetworkTimeout))
if err := ctx.Render(map[string]string{"type": "command", "action": "init_end"}); err != nil {
return
}
lastCheck := now.Unix()
for ctx.Ping() == nil {
select {
case d := <-c:
switch data := d.(type) {
case rmodel.Geomark:
if isTutorial && !hasCreated {
if data.Id == m.breadcrumbsId(token.UserId) {
locale, by := "", ""
for _, i := range token.Cross.Exfee.Invitations {
if i.Identity.UserID == token.UserId {
locale, by = i.Identity.Locale, i.Identity.Id()
break
}
}
tutorialMark, err := m.setTutorial(data.Positions[0].GPS[0], data.Positions[0].GPS[1], token.UserId, int64(token.Cross.ID), locale, by)
if err != nil {
logger.ERROR("create tutorial geomark for user %d in cross %d failed: %s", token.UserId, token.Cross.ID, err)
} else {
hasCreated = true
if toMars {
tutorialMark.ToMars(m.conversion)
}
err := ctx.Render(tutorialMark)
if err != nil {
return
}
}
}
}
if toMars {
data.ToMars(m.conversion)
}
d = data
case rmodel.Identity:
switch data.Action {
case "join":
if token.Cross.Exfee.Join(data.Identity) {
m.pubsub.Subscribe(m.identityName(data.Identity), c)
}
case "remove":
if token.Cross.Exfee.Remove(data.Identity) {
m.pubsub.Unsubscribe(m.identityName(data.Identity), c)
}
}
}
ctx.SetWriteDeadline(time.Now().Add(broker.NetworkTimeout))
err := ctx.Render(d)
if err != nil {
return
}
case <-time.After(broker.NetworkTimeout):
case <-time.After(time.Duration(endAt-time.Now().Unix()) * time.Second):
newEndAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil || newEndAt == 0 || newEndAt <= time.Now().Unix() {
return
}
endAt = newEndAt
err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{endAt - time.Now().Unix()},
})
if err != nil {
return
}
}
if time.Now().Unix()-lastCheck > 60 {
lastCheck = time.Now().Unix()
newEndAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil {
logger.ERROR("can't set user %d cross %d: %s", token.UserId, token.Cross.ID, err)
continue
}
endAt = newEndAt
err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{endAt - time.Now().Unix()},
})
if err != nil {
return
}
}
}
}
func (m RouteMap) Options(ctx rest.Context) {
ctx.Response().Header().Set("Access-Control-Allow-Origin", m.config.AccessDomain)
ctx.Response().Header().Set("Access-Control-Allow-Credentials", "true")
ctx.Response().Header().Set("Cache-Control", "no-cache")
ctx.Return(http.StatusNoContent)
}
func (m RouteMap) SendNotification(ctx rest.Context) {
token, ok := m.auth(ctx)
if !ok {
ctx.Return(http.StatusUnauthorized, "invalid token")
return
}
var id string
ctx.Bind("id", &id)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
to := model.FromIdentityId(id)
var toInvitation *model.Invitation
for _, inv := range token.Cross.Exfee.Invitations {
if inv.Identity.Equal(to) {
toInvitation = &inv
break
}
}
if toInvitation == nil {
ctx.Return(http.StatusForbidden, "%s is not attend cross %d", to.Id(), token.Cross.ID)
return
}
to = toInvitation.Identity
recipients, err := m.platform.GetRecipientsById(to.Id())
if err != nil {
ctx.Return(http.StatusInternalServerError, err)
return
}
m.update(int64(token.Cross.ID), token.Identity)
arg := notifier.RequestArg{
CrossId: token.Cross.ID,
From: token.Identity,
}
pushed := false
for _, recipient := range recipients {
switch recipient.Provider {
case "iOS":
fallthrough
case "Android":
arg.To = recipient
m.sendRequest(arg)
pushed = true
}
}
if to.Provider == "wechat" {
if ok, err := m.platform.CheckWechatFollowing(to.ExternalUsername); (err != nil || !ok) && !pushed {
ctx.Return(http.StatusNotAcceptable, "can't find provider avaliable")
}
}
go func() {
arg.To = to.ToRecipient()
m.sendRequest(arg)
for _, id := range toInvitation.Notifications {
to := model.FromIdentityId(id)
arg.To.ExternalUsername, arg.To.Provider = to.ExternalUsername, to.Provider
m.sendRequest(arg)
}
}()
}
func (m *RouteMap) getObjects(cross model.Cross, toMars bool) []rmodel.Geomark {
isTutorial := false
if cross.By.UserID == m.config.Routex.TutorialCreator {
isTutorial = true
}
var ret []rmodel.Geomark
breadcrumbs, err := m.breadcrumbCache.LoadAllCross(int64(cross.ID))
now := time.Now()
if isTutorial {
for _, id := range m.config.TutorialBotUserIds {
l := m.getTutorialData(now, id, 1)
if len(l) > 0 {
breadcrumbs[id] = l[0]
}
}
}
users := make(map[int64]bool)
for _, inv := range cross.Exfee.Invitations {
users[inv.Identity.UserID] = true
}
if err == nil {
for userId, l := range breadcrumbs {
if !users[userId] {
if err := m.breadcrumbCache.RemoveCross(userId, int64(cross.ID)); err != nil {
logger.ERROR("remove user %d cross %d breadcrumb error: %s", userId, cross.ID, err)
}
continue
}
mark := m.breadcrumbsToGeomark(userId, 1, []rmodel.SimpleLocation{l})
if toMars {
mark.ToMars(m.conversion)
}
ret = append(ret, mark)
}
} else {
logger.ERROR("can't get current breadcrumb of cross %d: %s", cross.ID, err)
}
marks, err := m.getGeomarks_(cross, toMars)
if err == nil {
ret = append(ret, marks...)
} else {
logger.ERROR("can't get route of cross %d: %s", cross.ID, err)
}
return ret
}
func (m *RouteMap) sendRequest(arg notifier.RequestArg) {
body, err := json.Marshal(arg)
if err != nil {
logger.ERROR("can't marshal: %s with %+v", err, arg)
return
}
url := fmt.Sprintf("http://%s:%d/v3/notifier/routex/request", m.config.ExfeService.Addr, m.config.ExfeService.Port)
resp, err := broker.HttpResponse(broker.Http("POST", url, "applicatioin/json", body))
if err != nil {
logger.ERROR("post %s error: %s with %#v", url, err, string(body))
return
}
resp.Close()
}
func (m RouteMap) switchWindow(crossId int64, identity model.Identity, save bool, afterInSeconds int) {
m.update(crossId, identity)
if save {
if err := m.breadcrumbsRepo.EnableCross(identity.UserID, crossId, afterInSeconds); err != nil {
logger.ERROR("set user %d enable cross %d breadcrumbs repo failed: %s", identity.UserID, crossId, err)
}
if err := m.breadcrumbCache.EnableCross(identity.UserID, crossId, afterInSeconds); err != nil {
logger.ERROR("set user %d enable cross %d breadcrumb cache failed: %s", identity.UserID, crossId, err)
}
} else {
if err := m.breadcrumbsRepo.DisableCross(identity.UserID, crossId); err != nil {
logger.ERROR("set user %d disable cross %d breadcrumbs repo failed: %s", identity.UserID, crossId, err)
}
if err := m.breadcrumbCache.DisableCross(identity.UserID, crossId); err != nil {
logger.ERROR("set user %d disable cross %d breadcrumb cache failed: %s", identity.UserID, crossId, err)
}
}
}
func (m RouteMap) update(crossId int64, by model.Identity) {
if err := m.routexRepo.Update(crossId); err != nil {
logger.ERROR("update routex user %d cross %d error: %s", err)
}
cross := make(map[string]interface{})
cross["widgets"] = []map[string]string{
map[string]string{"type": "routex"},
}
m.platform.BotCrossUpdate("cross_id", fmt.Sprintf("%d", crossId), cross, by)
}
func (m *RouteMap) auth(ctx rest.Context) (rmodel.Token, bool) {
ctx.Response().Header().Set("Access-Control-Allow-Origin", m.config.AccessDomain)
ctx.Response().Header().Set("Access-Control-Allow-Credentials", "true")
ctx.Response().Header().Set("Cache-Control", "no-cache")
defer ctx.BindReset()
var token rmodel.Token
authData := ctx.Request().Header.Get("Exfe-Auth-Data")
// if authData == "" {
// authData = `{"token_type":"user_token","user_id":475,"signin_time":1374046388,"last_authenticate":1374046388}`
// }
if authData != "" {
if err := json.Unmarshal([]byte(authData), &token); err != nil {
return token, false
}
}
var crossIdFlag bool
ctx.Bind("cross_id", &crossIdFlag)
if ctx.BindError() != nil || !crossIdFlag {
if token.TokenType == "user_token" {
return token, true
}
return token, false
}
var crossId int64
ctx.Bind("cross_id", &crossId)
if err := ctx.BindError(); err != nil {
return token, false
}
query := make(url.Values)
switch token.TokenType {
case "user_token":
query.Set("user_id", fmt.Sprintf("%d", token.UserId))
case "cross_access_token":
if int64(token.CrossId) != crossId {
return token, false
}
default:
return token, false
}
var err error
if token.Cross, err = m.platform.FindCross(int64(crossId), query); err != nil {
return token, false
}
for _, inv := range token.Cross.Exfee.Invitations {
switch token.TokenType {
case "cross_access_token":
if inv.Identity.ID == token.IdentityId {
token.UserId = inv.Identity.UserID
token.Identity = inv.Identity
return token, true
}
case "user_token":
if inv.Identity.UserID == token.UserId {
token.Identity = inv.Identity
return token, true
}
}
}
return token, false
}
func (m RouteMap) publicName(crossId int64) string {
return fmt.Sprintf("routex:cross_%d", crossId)
}
func (m RouteMap) tutorialName() string {
return "routex:tutorial:data"
}
func (m RouteMap) identityName(identity model.Identity) string {
return fmt.Sprintf("routex:identity:%s", identity.Id())
}
func (m RouteMap) tutorialGenerator() {
for {
select {
case <-m.quit:
return
case <-time.After(time.Second * 10):
now := time.Now()
for userId := range m.tutorialDatas {
positions := m.getTutorialData(now, userId, 1)
if len(positions) == 0 {
continue
}
mark := m.breadcrumbsToGeomark(userId, 1, positions)
m.pubsub.Publish(m.tutorialName(), mark)
}
}
}
}
| UpdateIdentity | identifier_name |
routex.go | package routex
import (
"broker"
"encoding/json"
"fmt"
"github.com/googollee/go-pubsub"
"github.com/googollee/go-rest"
"logger"
"math/rand"
"model"
"net/http"
"net/url"
"notifier"
"os"
"routex/model"
"sync"
"time"
)
type RouteMap struct {
rest.Service `prefix:"/v3/routex" mime:"application/json"`
updateIdentity rest.SimpleNode `route:"/_inner/update_identity" method:"POST"`
updateExfee rest.SimpleNode `route:"/_inner/update_exfee" method:"POST"`
searchRoutex rest.SimpleNode `route:"/_inner/search/crosses" method:"POST"`
getRoutex rest.SimpleNode `route:"/_inner/users/:user_id/crosses/:cross_id" method:"GET"`
setUser rest.SimpleNode `route:"/users/crosses/:cross_id" method:"POST"`
updateBreadcrums rest.SimpleNode `route:"/breadcrumbs" method:"POST"`
updateBreadcrumsInner rest.SimpleNode `route:"/_inner/breadcrumbs/users/:user_id" method:"POST"`
getBreadcrums rest.SimpleNode `route:"/breadcrumbs/crosses/:cross_id" method:"GET"`
getUserBreadcrums rest.SimpleNode `route:"/breadcrumbs/crosses/:cross_id/users/:user_id" method:"GET"`
getUserBreadcrumsInner rest.SimpleNode `route:"/_inner/breadcrumbs/users/:user_id" method:"GET"`
searchGeomarks rest.SimpleNode `route:"/_inner/geomarks/crosses/:cross_id" method:"GET"`
getGeomarks rest.SimpleNode `route:"/geomarks/crosses/:cross_id" method:"GET"`
setGeomark rest.SimpleNode `route:"/geomarks/crosses/:cross_id/:mark_type/:kind.:mark_id" method:"PUT"`
deleteGeomark rest.SimpleNode `route:"/geomarks/crosses/:cross_id/:mark_type/:kind.:mark_id" method:"DELETE"`
stream rest.Streaming `route:"/crosses/:cross_id" method:"WATCH"`
options rest.SimpleNode `route:"/crosses/:cross_id" method:"OPTIONS"`
sendNotification rest.SimpleNode `route:"/notification/crosses/:cross_id" method:"POST"`
rand *rand.Rand
routexRepo rmodel.RoutexRepo
breadcrumbCache rmodel.BreadcrumbCache
breadcrumbsRepo rmodel.BreadcrumbsRepo
geomarksRepo rmodel.GeomarksRepo
conversion rmodel.GeoConversionRepo
platform *broker.Platform
config *model.Config
tutorialDatas map[int64][]rmodel.TutorialData
pubsub *pubsub.Pubsub
castLocker sync.RWMutex
quit chan int
}
func New(routexRepo rmodel.RoutexRepo, breadcrumbCache rmodel.BreadcrumbCache, breadcrumbsRepo rmodel.BreadcrumbsRepo, geomarksRepo rmodel.GeomarksRepo, conversion rmodel.GeoConversionRepo, platform *broker.Platform, config *model.Config) (*RouteMap, error) {
tutorialDatas := make(map[int64][]rmodel.TutorialData)
for _, userId := range config.TutorialBotUserIds {
file := config.Routex.TutorialDataFile[fmt.Sprintf("%d", userId)]
f, err := os.Open(file)
if err != nil {
return nil, fmt.Errorf("can't find tutorial file %s for tutorial bot %d", file, userId)
}
var datas []rmodel.TutorialData
decoder := json.NewDecoder(f)
err = decoder.Decode(&datas)
if err != nil {
return nil, fmt.Errorf("invalid tutorial data %s for tutorial bot %d: %s", file, userId, err)
}
tutorialDatas[userId] = datas
}
ret := &RouteMap{
rand: rand.New(rand.NewSource(time.Now().Unix())),
routexRepo: routexRepo,
breadcrumbCache: breadcrumbCache,
breadcrumbsRepo: breadcrumbsRepo,
geomarksRepo: geomarksRepo, conversion: conversion,
platform: platform,
tutorialDatas: tutorialDatas,
config: config,
pubsub: pubsub.New(20),
quit: make(chan int),
}
go ret.tutorialGenerator()
return ret, nil
}
func (m RouteMap) UpdateIdentity(ctx rest.Context, identity model.Identity) {
id := rmodel.Identity{
Identity: identity,
Type: "identity",
Action: "update",
}
m.pubsub.Publish(m.identityName(identity), id)
}
func (m RouteMap) UpdateExfee(ctx rest.Context, invitations model.Invitation) {
var crossId int64
var action string
ctx.Bind("cross_id", &crossId)
ctx.Bind("action", &action)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
if action != "join" && action != "remove" {
ctx.Return(http.StatusBadRequest, "invalid action: %s", action)
return
}
id := rmodel.Invitation{
Identity: invitations.Identity,
Notifications: invitations.Notifications,
Type: "invitation",
Action: action,
}
m.pubsub.Publish(m.publicName(crossId), id)
}
type UserCrossSetup struct {
SaveBreadcrumbs bool `json:"save_breadcrumbs,omitempty"`
AfterInSeconds int `json:"after_in_seconds,omitempty"`
}
func (m RouteMap) SetUser(ctx rest.Context, setup UserCrossSetup) {
token, ok := m.auth(ctx)
if !ok {
ctx.Return(http.StatusUnauthorized, "invalid token")
return
}
var crossId int64
ctx.Bind("cross_id", &crossId)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
if setup.AfterInSeconds == 0 {
setup.AfterInSeconds = 60 * 60
}
m.switchWindow(crossId, token.Identity, setup.SaveBreadcrumbs, setup.AfterInSeconds)
}
func (m RouteMap) SearchRoutex(ctx rest.Context, crossIds []int64) {
ret, err := m.routexRepo.Search(crossIds)
if err != nil {
logger.ERROR("search for route failed: %s with %+v", err, crossIds)
ctx.Return(http.StatusInternalServerError, err)
return
}
ctx.Render(ret)
}
type RoutexInfo struct {
InWindow *bool `json:"in_window"`
Objects []rmodel.Geomark `json:"objects"`
}
func (m RouteMap) GetRoutex(ctx rest.Context) {
var userId, crossId int64
ctx.Bind("cross_id", &crossId)
ctx.Bind("user_id", &userId) | if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
endAt, err := m.breadcrumbsRepo.GetWindowEnd(userId, crossId)
if err != nil {
logger.ERROR("get user %d cross %d routex failed: %s", userId, crossId, err)
ctx.Return(http.StatusInternalServerError, err)
return
}
ret := RoutexInfo{}
if endAt != 0 {
ret.InWindow = new(bool)
*ret.InWindow = endAt >= time.Now().Unix()
}
query := make(url.Values)
query.Set("user_id", fmt.Sprintf("%d", userId))
cross, err := m.platform.FindCross(crossId, query)
if err == nil {
ret.Objects = m.getObjects(cross, true)
} else {
logger.ERROR("get user %d cross %d failed: %s", userId, crossId, err)
ctx.Return(http.StatusInternalServerError, err)
return
}
ctx.Render(ret)
}
func (m RouteMap) Stream(ctx rest.StreamContext) {
token, ok := m.auth(ctx)
if !ok {
ctx.Return(http.StatusUnauthorized, "invalid token")
return
}
var forceOpen bool
var coordinate string
ctx.Bind("force_window_open", &forceOpen)
ctx.Bind("coordinate", &coordinate)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
now := time.Now()
endAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil || endAt <= now.Unix() {
if !forceOpen {
ctx.Return(http.StatusForbidden, "not in window")
return
}
after := 15 * 60
if endAt == 0 {
after = 60 * 60
}
var openAfter int
ctx.BindReset()
ctx.Bind("force_window_open", &openAfter)
if ctx.BindError() == nil {
after = openAfter
}
endAt = now.Unix() + int64(after)
m.switchWindow(int64(token.Cross.ID), token.Identity, true, after)
}
c := make(chan interface{}, 10)
m.pubsub.Subscribe(m.publicName(int64(token.Cross.ID)), c)
if token.Cross.By.UserID == m.config.Routex.TutorialCreator {
m.pubsub.Subscribe(m.tutorialName(), c)
}
for _, inv := range token.Cross.Exfee.Invitations {
m.pubsub.Subscribe(m.identityName(inv.Identity), c)
}
logger.DEBUG("streaming connected by user %d, cross %d", token.UserId, token.Cross.ID)
defer func() {
logger.DEBUG("streaming disconnect by user %d, cross %d", token.UserId, token.Cross.ID)
m.pubsub.UnsubscribeAll(c)
close(c)
}()
willEnd := endAt - now.Unix()
err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{willEnd},
})
if err != nil {
return
}
toMars := coordinate == "mars"
isTutorial := false
if token.Cross.By.UserID == m.config.Routex.TutorialCreator {
isTutorial = true
}
hasCreated := false
ctx.Return(http.StatusOK)
quit := make(chan int)
defer func() { close(quit) }()
for _, mark := range m.getObjects(token.Cross, toMars) {
if isTutorial && !hasCreated && !mark.IsBreadcrumbs() {
hasCreated = true
}
if err := ctx.Render(mark); err != nil {
return
}
}
ctx.SetWriteDeadline(time.Now().Add(broker.NetworkTimeout))
if err := ctx.Render(map[string]string{"type": "command", "action": "init_end"}); err != nil {
return
}
lastCheck := now.Unix()
for ctx.Ping() == nil {
select {
case d := <-c:
switch data := d.(type) {
case rmodel.Geomark:
if isTutorial && !hasCreated {
if data.Id == m.breadcrumbsId(token.UserId) {
locale, by := "", ""
for _, i := range token.Cross.Exfee.Invitations {
if i.Identity.UserID == token.UserId {
locale, by = i.Identity.Locale, i.Identity.Id()
break
}
}
tutorialMark, err := m.setTutorial(data.Positions[0].GPS[0], data.Positions[0].GPS[1], token.UserId, int64(token.Cross.ID), locale, by)
if err != nil {
logger.ERROR("create tutorial geomark for user %d in cross %d failed: %s", token.UserId, token.Cross.ID, err)
} else {
hasCreated = true
if toMars {
tutorialMark.ToMars(m.conversion)
}
err := ctx.Render(tutorialMark)
if err != nil {
return
}
}
}
}
if toMars {
data.ToMars(m.conversion)
}
d = data
case rmodel.Identity:
switch data.Action {
case "join":
if token.Cross.Exfee.Join(data.Identity) {
m.pubsub.Subscribe(m.identityName(data.Identity), c)
}
case "remove":
if token.Cross.Exfee.Remove(data.Identity) {
m.pubsub.Unsubscribe(m.identityName(data.Identity), c)
}
}
}
ctx.SetWriteDeadline(time.Now().Add(broker.NetworkTimeout))
err := ctx.Render(d)
if err != nil {
return
}
case <-time.After(broker.NetworkTimeout):
case <-time.After(time.Duration(endAt-time.Now().Unix()) * time.Second):
newEndAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil || newEndAt == 0 || newEndAt <= time.Now().Unix() {
return
}
endAt = newEndAt
err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{endAt - time.Now().Unix()},
})
if err != nil {
return
}
}
if time.Now().Unix()-lastCheck > 60 {
lastCheck = time.Now().Unix()
newEndAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil {
logger.ERROR("can't set user %d cross %d: %s", token.UserId, token.Cross.ID, err)
continue
}
endAt = newEndAt
err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{endAt - time.Now().Unix()},
})
if err != nil {
return
}
}
}
}
func (m RouteMap) Options(ctx rest.Context) {
ctx.Response().Header().Set("Access-Control-Allow-Origin", m.config.AccessDomain)
ctx.Response().Header().Set("Access-Control-Allow-Credentials", "true")
ctx.Response().Header().Set("Cache-Control", "no-cache")
ctx.Return(http.StatusNoContent)
}
func (m RouteMap) SendNotification(ctx rest.Context) {
token, ok := m.auth(ctx)
if !ok {
ctx.Return(http.StatusUnauthorized, "invalid token")
return
}
var id string
ctx.Bind("id", &id)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
to := model.FromIdentityId(id)
var toInvitation *model.Invitation
for _, inv := range token.Cross.Exfee.Invitations {
if inv.Identity.Equal(to) {
toInvitation = &inv
break
}
}
if toInvitation == nil {
ctx.Return(http.StatusForbidden, "%s is not attend cross %d", to.Id(), token.Cross.ID)
return
}
to = toInvitation.Identity
recipients, err := m.platform.GetRecipientsById(to.Id())
if err != nil {
ctx.Return(http.StatusInternalServerError, err)
return
}
m.update(int64(token.Cross.ID), token.Identity)
arg := notifier.RequestArg{
CrossId: token.Cross.ID,
From: token.Identity,
}
pushed := false
for _, recipient := range recipients {
switch recipient.Provider {
case "iOS":
fallthrough
case "Android":
arg.To = recipient
m.sendRequest(arg)
pushed = true
}
}
if to.Provider == "wechat" {
if ok, err := m.platform.CheckWechatFollowing(to.ExternalUsername); (err != nil || !ok) && !pushed {
ctx.Return(http.StatusNotAcceptable, "can't find provider avaliable")
}
}
go func() {
arg.To = to.ToRecipient()
m.sendRequest(arg)
for _, id := range toInvitation.Notifications {
to := model.FromIdentityId(id)
arg.To.ExternalUsername, arg.To.Provider = to.ExternalUsername, to.Provider
m.sendRequest(arg)
}
}()
}
func (m *RouteMap) getObjects(cross model.Cross, toMars bool) []rmodel.Geomark {
isTutorial := false
if cross.By.UserID == m.config.Routex.TutorialCreator {
isTutorial = true
}
var ret []rmodel.Geomark
breadcrumbs, err := m.breadcrumbCache.LoadAllCross(int64(cross.ID))
now := time.Now()
if isTutorial {
for _, id := range m.config.TutorialBotUserIds {
l := m.getTutorialData(now, id, 1)
if len(l) > 0 {
breadcrumbs[id] = l[0]
}
}
}
users := make(map[int64]bool)
for _, inv := range cross.Exfee.Invitations {
users[inv.Identity.UserID] = true
}
if err == nil {
for userId, l := range breadcrumbs {
if !users[userId] {
if err := m.breadcrumbCache.RemoveCross(userId, int64(cross.ID)); err != nil {
logger.ERROR("remove user %d cross %d breadcrumb error: %s", userId, cross.ID, err)
}
continue
}
mark := m.breadcrumbsToGeomark(userId, 1, []rmodel.SimpleLocation{l})
if toMars {
mark.ToMars(m.conversion)
}
ret = append(ret, mark)
}
} else {
logger.ERROR("can't get current breadcrumb of cross %d: %s", cross.ID, err)
}
marks, err := m.getGeomarks_(cross, toMars)
if err == nil {
ret = append(ret, marks...)
} else {
logger.ERROR("can't get route of cross %d: %s", cross.ID, err)
}
return ret
}
func (m *RouteMap) sendRequest(arg notifier.RequestArg) {
body, err := json.Marshal(arg)
if err != nil {
logger.ERROR("can't marshal: %s with %+v", err, arg)
return
}
url := fmt.Sprintf("http://%s:%d/v3/notifier/routex/request", m.config.ExfeService.Addr, m.config.ExfeService.Port)
resp, err := broker.HttpResponse(broker.Http("POST", url, "applicatioin/json", body))
if err != nil {
logger.ERROR("post %s error: %s with %#v", url, err, string(body))
return
}
resp.Close()
}
func (m RouteMap) switchWindow(crossId int64, identity model.Identity, save bool, afterInSeconds int) {
m.update(crossId, identity)
if save {
if err := m.breadcrumbsRepo.EnableCross(identity.UserID, crossId, afterInSeconds); err != nil {
logger.ERROR("set user %d enable cross %d breadcrumbs repo failed: %s", identity.UserID, crossId, err)
}
if err := m.breadcrumbCache.EnableCross(identity.UserID, crossId, afterInSeconds); err != nil {
logger.ERROR("set user %d enable cross %d breadcrumb cache failed: %s", identity.UserID, crossId, err)
}
} else {
if err := m.breadcrumbsRepo.DisableCross(identity.UserID, crossId); err != nil {
logger.ERROR("set user %d disable cross %d breadcrumbs repo failed: %s", identity.UserID, crossId, err)
}
if err := m.breadcrumbCache.DisableCross(identity.UserID, crossId); err != nil {
logger.ERROR("set user %d disable cross %d breadcrumb cache failed: %s", identity.UserID, crossId, err)
}
}
}
func (m RouteMap) update(crossId int64, by model.Identity) {
if err := m.routexRepo.Update(crossId); err != nil {
logger.ERROR("update routex user %d cross %d error: %s", err)
}
cross := make(map[string]interface{})
cross["widgets"] = []map[string]string{
map[string]string{"type": "routex"},
}
m.platform.BotCrossUpdate("cross_id", fmt.Sprintf("%d", crossId), cross, by)
}
func (m *RouteMap) auth(ctx rest.Context) (rmodel.Token, bool) {
ctx.Response().Header().Set("Access-Control-Allow-Origin", m.config.AccessDomain)
ctx.Response().Header().Set("Access-Control-Allow-Credentials", "true")
ctx.Response().Header().Set("Cache-Control", "no-cache")
defer ctx.BindReset()
var token rmodel.Token
authData := ctx.Request().Header.Get("Exfe-Auth-Data")
// if authData == "" {
// authData = `{"token_type":"user_token","user_id":475,"signin_time":1374046388,"last_authenticate":1374046388}`
// }
if authData != "" {
if err := json.Unmarshal([]byte(authData), &token); err != nil {
return token, false
}
}
var crossIdFlag bool
ctx.Bind("cross_id", &crossIdFlag)
if ctx.BindError() != nil || !crossIdFlag {
if token.TokenType == "user_token" {
return token, true
}
return token, false
}
var crossId int64
ctx.Bind("cross_id", &crossId)
if err := ctx.BindError(); err != nil {
return token, false
}
query := make(url.Values)
switch token.TokenType {
case "user_token":
query.Set("user_id", fmt.Sprintf("%d", token.UserId))
case "cross_access_token":
if int64(token.CrossId) != crossId {
return token, false
}
default:
return token, false
}
var err error
if token.Cross, err = m.platform.FindCross(int64(crossId), query); err != nil {
return token, false
}
for _, inv := range token.Cross.Exfee.Invitations {
switch token.TokenType {
case "cross_access_token":
if inv.Identity.ID == token.IdentityId {
token.UserId = inv.Identity.UserID
token.Identity = inv.Identity
return token, true
}
case "user_token":
if inv.Identity.UserID == token.UserId {
token.Identity = inv.Identity
return token, true
}
}
}
return token, false
}
func (m RouteMap) publicName(crossId int64) string {
return fmt.Sprintf("routex:cross_%d", crossId)
}
func (m RouteMap) tutorialName() string {
return "routex:tutorial:data"
}
func (m RouteMap) identityName(identity model.Identity) string {
return fmt.Sprintf("routex:identity:%s", identity.Id())
}
func (m RouteMap) tutorialGenerator() {
for {
select {
case <-m.quit:
return
case <-time.After(time.Second * 10):
now := time.Now()
for userId := range m.tutorialDatas {
positions := m.getTutorialData(now, userId, 1)
if len(positions) == 0 {
continue
}
mark := m.breadcrumbsToGeomark(userId, 1, positions)
m.pubsub.Publish(m.tutorialName(), mark)
}
}
}
} | random_line_split |
|
routex.go | package routex
import (
"broker"
"encoding/json"
"fmt"
"github.com/googollee/go-pubsub"
"github.com/googollee/go-rest"
"logger"
"math/rand"
"model"
"net/http"
"net/url"
"notifier"
"os"
"routex/model"
"sync"
"time"
)
type RouteMap struct {
rest.Service `prefix:"/v3/routex" mime:"application/json"`
updateIdentity rest.SimpleNode `route:"/_inner/update_identity" method:"POST"`
updateExfee rest.SimpleNode `route:"/_inner/update_exfee" method:"POST"`
searchRoutex rest.SimpleNode `route:"/_inner/search/crosses" method:"POST"`
getRoutex rest.SimpleNode `route:"/_inner/users/:user_id/crosses/:cross_id" method:"GET"`
setUser rest.SimpleNode `route:"/users/crosses/:cross_id" method:"POST"`
updateBreadcrums rest.SimpleNode `route:"/breadcrumbs" method:"POST"`
updateBreadcrumsInner rest.SimpleNode `route:"/_inner/breadcrumbs/users/:user_id" method:"POST"`
getBreadcrums rest.SimpleNode `route:"/breadcrumbs/crosses/:cross_id" method:"GET"`
getUserBreadcrums rest.SimpleNode `route:"/breadcrumbs/crosses/:cross_id/users/:user_id" method:"GET"`
getUserBreadcrumsInner rest.SimpleNode `route:"/_inner/breadcrumbs/users/:user_id" method:"GET"`
searchGeomarks rest.SimpleNode `route:"/_inner/geomarks/crosses/:cross_id" method:"GET"`
getGeomarks rest.SimpleNode `route:"/geomarks/crosses/:cross_id" method:"GET"`
setGeomark rest.SimpleNode `route:"/geomarks/crosses/:cross_id/:mark_type/:kind.:mark_id" method:"PUT"`
deleteGeomark rest.SimpleNode `route:"/geomarks/crosses/:cross_id/:mark_type/:kind.:mark_id" method:"DELETE"`
stream rest.Streaming `route:"/crosses/:cross_id" method:"WATCH"`
options rest.SimpleNode `route:"/crosses/:cross_id" method:"OPTIONS"`
sendNotification rest.SimpleNode `route:"/notification/crosses/:cross_id" method:"POST"`
rand *rand.Rand
routexRepo rmodel.RoutexRepo
breadcrumbCache rmodel.BreadcrumbCache
breadcrumbsRepo rmodel.BreadcrumbsRepo
geomarksRepo rmodel.GeomarksRepo
conversion rmodel.GeoConversionRepo
platform *broker.Platform
config *model.Config
tutorialDatas map[int64][]rmodel.TutorialData
pubsub *pubsub.Pubsub
castLocker sync.RWMutex
quit chan int
}
func New(routexRepo rmodel.RoutexRepo, breadcrumbCache rmodel.BreadcrumbCache, breadcrumbsRepo rmodel.BreadcrumbsRepo, geomarksRepo rmodel.GeomarksRepo, conversion rmodel.GeoConversionRepo, platform *broker.Platform, config *model.Config) (*RouteMap, error) {
tutorialDatas := make(map[int64][]rmodel.TutorialData)
for _, userId := range config.TutorialBotUserIds {
file := config.Routex.TutorialDataFile[fmt.Sprintf("%d", userId)]
f, err := os.Open(file)
if err != nil {
return nil, fmt.Errorf("can't find tutorial file %s for tutorial bot %d", file, userId)
}
var datas []rmodel.TutorialData
decoder := json.NewDecoder(f)
err = decoder.Decode(&datas)
if err != nil {
return nil, fmt.Errorf("invalid tutorial data %s for tutorial bot %d: %s", file, userId, err)
}
tutorialDatas[userId] = datas
}
ret := &RouteMap{
rand: rand.New(rand.NewSource(time.Now().Unix())),
routexRepo: routexRepo,
breadcrumbCache: breadcrumbCache,
breadcrumbsRepo: breadcrumbsRepo,
geomarksRepo: geomarksRepo, conversion: conversion,
platform: platform,
tutorialDatas: tutorialDatas,
config: config,
pubsub: pubsub.New(20),
quit: make(chan int),
}
go ret.tutorialGenerator()
return ret, nil
}
func (m RouteMap) UpdateIdentity(ctx rest.Context, identity model.Identity) |
func (m RouteMap) UpdateExfee(ctx rest.Context, invitations model.Invitation) {
var crossId int64
var action string
ctx.Bind("cross_id", &crossId)
ctx.Bind("action", &action)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
if action != "join" && action != "remove" {
ctx.Return(http.StatusBadRequest, "invalid action: %s", action)
return
}
id := rmodel.Invitation{
Identity: invitations.Identity,
Notifications: invitations.Notifications,
Type: "invitation",
Action: action,
}
m.pubsub.Publish(m.publicName(crossId), id)
}
type UserCrossSetup struct {
SaveBreadcrumbs bool `json:"save_breadcrumbs,omitempty"`
AfterInSeconds int `json:"after_in_seconds,omitempty"`
}
func (m RouteMap) SetUser(ctx rest.Context, setup UserCrossSetup) {
token, ok := m.auth(ctx)
if !ok {
ctx.Return(http.StatusUnauthorized, "invalid token")
return
}
var crossId int64
ctx.Bind("cross_id", &crossId)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
if setup.AfterInSeconds == 0 {
setup.AfterInSeconds = 60 * 60
}
m.switchWindow(crossId, token.Identity, setup.SaveBreadcrumbs, setup.AfterInSeconds)
}
func (m RouteMap) SearchRoutex(ctx rest.Context, crossIds []int64) {
ret, err := m.routexRepo.Search(crossIds)
if err != nil {
logger.ERROR("search for route failed: %s with %+v", err, crossIds)
ctx.Return(http.StatusInternalServerError, err)
return
}
ctx.Render(ret)
}
type RoutexInfo struct {
InWindow *bool `json:"in_window"`
Objects []rmodel.Geomark `json:"objects"`
}
func (m RouteMap) GetRoutex(ctx rest.Context) {
var userId, crossId int64
ctx.Bind("cross_id", &crossId)
ctx.Bind("user_id", &userId)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
endAt, err := m.breadcrumbsRepo.GetWindowEnd(userId, crossId)
if err != nil {
logger.ERROR("get user %d cross %d routex failed: %s", userId, crossId, err)
ctx.Return(http.StatusInternalServerError, err)
return
}
ret := RoutexInfo{}
if endAt != 0 {
ret.InWindow = new(bool)
*ret.InWindow = endAt >= time.Now().Unix()
}
query := make(url.Values)
query.Set("user_id", fmt.Sprintf("%d", userId))
cross, err := m.platform.FindCross(crossId, query)
if err == nil {
ret.Objects = m.getObjects(cross, true)
} else {
logger.ERROR("get user %d cross %d failed: %s", userId, crossId, err)
ctx.Return(http.StatusInternalServerError, err)
return
}
ctx.Render(ret)
}
func (m RouteMap) Stream(ctx rest.StreamContext) {
token, ok := m.auth(ctx)
if !ok {
ctx.Return(http.StatusUnauthorized, "invalid token")
return
}
var forceOpen bool
var coordinate string
ctx.Bind("force_window_open", &forceOpen)
ctx.Bind("coordinate", &coordinate)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
now := time.Now()
endAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil || endAt <= now.Unix() {
if !forceOpen {
ctx.Return(http.StatusForbidden, "not in window")
return
}
after := 15 * 60
if endAt == 0 {
after = 60 * 60
}
var openAfter int
ctx.BindReset()
ctx.Bind("force_window_open", &openAfter)
if ctx.BindError() == nil {
after = openAfter
}
endAt = now.Unix() + int64(after)
m.switchWindow(int64(token.Cross.ID), token.Identity, true, after)
}
c := make(chan interface{}, 10)
m.pubsub.Subscribe(m.publicName(int64(token.Cross.ID)), c)
if token.Cross.By.UserID == m.config.Routex.TutorialCreator {
m.pubsub.Subscribe(m.tutorialName(), c)
}
for _, inv := range token.Cross.Exfee.Invitations {
m.pubsub.Subscribe(m.identityName(inv.Identity), c)
}
logger.DEBUG("streaming connected by user %d, cross %d", token.UserId, token.Cross.ID)
defer func() {
logger.DEBUG("streaming disconnect by user %d, cross %d", token.UserId, token.Cross.ID)
m.pubsub.UnsubscribeAll(c)
close(c)
}()
willEnd := endAt - now.Unix()
err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{willEnd},
})
if err != nil {
return
}
toMars := coordinate == "mars"
isTutorial := false
if token.Cross.By.UserID == m.config.Routex.TutorialCreator {
isTutorial = true
}
hasCreated := false
ctx.Return(http.StatusOK)
quit := make(chan int)
defer func() { close(quit) }()
for _, mark := range m.getObjects(token.Cross, toMars) {
if isTutorial && !hasCreated && !mark.IsBreadcrumbs() {
hasCreated = true
}
if err := ctx.Render(mark); err != nil {
return
}
}
ctx.SetWriteDeadline(time.Now().Add(broker.NetworkTimeout))
if err := ctx.Render(map[string]string{"type": "command", "action": "init_end"}); err != nil {
return
}
lastCheck := now.Unix()
for ctx.Ping() == nil {
select {
case d := <-c:
switch data := d.(type) {
case rmodel.Geomark:
if isTutorial && !hasCreated {
if data.Id == m.breadcrumbsId(token.UserId) {
locale, by := "", ""
for _, i := range token.Cross.Exfee.Invitations {
if i.Identity.UserID == token.UserId {
locale, by = i.Identity.Locale, i.Identity.Id()
break
}
}
tutorialMark, err := m.setTutorial(data.Positions[0].GPS[0], data.Positions[0].GPS[1], token.UserId, int64(token.Cross.ID), locale, by)
if err != nil {
logger.ERROR("create tutorial geomark for user %d in cross %d failed: %s", token.UserId, token.Cross.ID, err)
} else {
hasCreated = true
if toMars {
tutorialMark.ToMars(m.conversion)
}
err := ctx.Render(tutorialMark)
if err != nil {
return
}
}
}
}
if toMars {
data.ToMars(m.conversion)
}
d = data
case rmodel.Identity:
switch data.Action {
case "join":
if token.Cross.Exfee.Join(data.Identity) {
m.pubsub.Subscribe(m.identityName(data.Identity), c)
}
case "remove":
if token.Cross.Exfee.Remove(data.Identity) {
m.pubsub.Unsubscribe(m.identityName(data.Identity), c)
}
}
}
ctx.SetWriteDeadline(time.Now().Add(broker.NetworkTimeout))
err := ctx.Render(d)
if err != nil {
return
}
case <-time.After(broker.NetworkTimeout):
case <-time.After(time.Duration(endAt-time.Now().Unix()) * time.Second):
newEndAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil || newEndAt == 0 || newEndAt <= time.Now().Unix() {
return
}
endAt = newEndAt
err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{endAt - time.Now().Unix()},
})
if err != nil {
return
}
}
if time.Now().Unix()-lastCheck > 60 {
lastCheck = time.Now().Unix()
newEndAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil {
logger.ERROR("can't set user %d cross %d: %s", token.UserId, token.Cross.ID, err)
continue
}
endAt = newEndAt
err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{endAt - time.Now().Unix()},
})
if err != nil {
return
}
}
}
}
func (m RouteMap) Options(ctx rest.Context) {
ctx.Response().Header().Set("Access-Control-Allow-Origin", m.config.AccessDomain)
ctx.Response().Header().Set("Access-Control-Allow-Credentials", "true")
ctx.Response().Header().Set("Cache-Control", "no-cache")
ctx.Return(http.StatusNoContent)
}
func (m RouteMap) SendNotification(ctx rest.Context) {
token, ok := m.auth(ctx)
if !ok {
ctx.Return(http.StatusUnauthorized, "invalid token")
return
}
var id string
ctx.Bind("id", &id)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
to := model.FromIdentityId(id)
var toInvitation *model.Invitation
for _, inv := range token.Cross.Exfee.Invitations {
if inv.Identity.Equal(to) {
toInvitation = &inv
break
}
}
if toInvitation == nil {
ctx.Return(http.StatusForbidden, "%s is not attend cross %d", to.Id(), token.Cross.ID)
return
}
to = toInvitation.Identity
recipients, err := m.platform.GetRecipientsById(to.Id())
if err != nil {
ctx.Return(http.StatusInternalServerError, err)
return
}
m.update(int64(token.Cross.ID), token.Identity)
arg := notifier.RequestArg{
CrossId: token.Cross.ID,
From: token.Identity,
}
pushed := false
for _, recipient := range recipients {
switch recipient.Provider {
case "iOS":
fallthrough
case "Android":
arg.To = recipient
m.sendRequest(arg)
pushed = true
}
}
if to.Provider == "wechat" {
if ok, err := m.platform.CheckWechatFollowing(to.ExternalUsername); (err != nil || !ok) && !pushed {
ctx.Return(http.StatusNotAcceptable, "can't find provider avaliable")
}
}
go func() {
arg.To = to.ToRecipient()
m.sendRequest(arg)
for _, id := range toInvitation.Notifications {
to := model.FromIdentityId(id)
arg.To.ExternalUsername, arg.To.Provider = to.ExternalUsername, to.Provider
m.sendRequest(arg)
}
}()
}
func (m *RouteMap) getObjects(cross model.Cross, toMars bool) []rmodel.Geomark {
isTutorial := false
if cross.By.UserID == m.config.Routex.TutorialCreator {
isTutorial = true
}
var ret []rmodel.Geomark
breadcrumbs, err := m.breadcrumbCache.LoadAllCross(int64(cross.ID))
now := time.Now()
if isTutorial {
for _, id := range m.config.TutorialBotUserIds {
l := m.getTutorialData(now, id, 1)
if len(l) > 0 {
breadcrumbs[id] = l[0]
}
}
}
users := make(map[int64]bool)
for _, inv := range cross.Exfee.Invitations {
users[inv.Identity.UserID] = true
}
if err == nil {
for userId, l := range breadcrumbs {
if !users[userId] {
if err := m.breadcrumbCache.RemoveCross(userId, int64(cross.ID)); err != nil {
logger.ERROR("remove user %d cross %d breadcrumb error: %s", userId, cross.ID, err)
}
continue
}
mark := m.breadcrumbsToGeomark(userId, 1, []rmodel.SimpleLocation{l})
if toMars {
mark.ToMars(m.conversion)
}
ret = append(ret, mark)
}
} else {
logger.ERROR("can't get current breadcrumb of cross %d: %s", cross.ID, err)
}
marks, err := m.getGeomarks_(cross, toMars)
if err == nil {
ret = append(ret, marks...)
} else {
logger.ERROR("can't get route of cross %d: %s", cross.ID, err)
}
return ret
}
func (m *RouteMap) sendRequest(arg notifier.RequestArg) {
body, err := json.Marshal(arg)
if err != nil {
logger.ERROR("can't marshal: %s with %+v", err, arg)
return
}
url := fmt.Sprintf("http://%s:%d/v3/notifier/routex/request", m.config.ExfeService.Addr, m.config.ExfeService.Port)
resp, err := broker.HttpResponse(broker.Http("POST", url, "applicatioin/json", body))
if err != nil {
logger.ERROR("post %s error: %s with %#v", url, err, string(body))
return
}
resp.Close()
}
func (m RouteMap) switchWindow(crossId int64, identity model.Identity, save bool, afterInSeconds int) {
m.update(crossId, identity)
if save {
if err := m.breadcrumbsRepo.EnableCross(identity.UserID, crossId, afterInSeconds); err != nil {
logger.ERROR("set user %d enable cross %d breadcrumbs repo failed: %s", identity.UserID, crossId, err)
}
if err := m.breadcrumbCache.EnableCross(identity.UserID, crossId, afterInSeconds); err != nil {
logger.ERROR("set user %d enable cross %d breadcrumb cache failed: %s", identity.UserID, crossId, err)
}
} else {
if err := m.breadcrumbsRepo.DisableCross(identity.UserID, crossId); err != nil {
logger.ERROR("set user %d disable cross %d breadcrumbs repo failed: %s", identity.UserID, crossId, err)
}
if err := m.breadcrumbCache.DisableCross(identity.UserID, crossId); err != nil {
logger.ERROR("set user %d disable cross %d breadcrumb cache failed: %s", identity.UserID, crossId, err)
}
}
}
func (m RouteMap) update(crossId int64, by model.Identity) {
if err := m.routexRepo.Update(crossId); err != nil {
logger.ERROR("update routex user %d cross %d error: %s", err)
}
cross := make(map[string]interface{})
cross["widgets"] = []map[string]string{
map[string]string{"type": "routex"},
}
m.platform.BotCrossUpdate("cross_id", fmt.Sprintf("%d", crossId), cross, by)
}
func (m *RouteMap) auth(ctx rest.Context) (rmodel.Token, bool) {
ctx.Response().Header().Set("Access-Control-Allow-Origin", m.config.AccessDomain)
ctx.Response().Header().Set("Access-Control-Allow-Credentials", "true")
ctx.Response().Header().Set("Cache-Control", "no-cache")
defer ctx.BindReset()
var token rmodel.Token
authData := ctx.Request().Header.Get("Exfe-Auth-Data")
// if authData == "" {
// authData = `{"token_type":"user_token","user_id":475,"signin_time":1374046388,"last_authenticate":1374046388}`
// }
if authData != "" {
if err := json.Unmarshal([]byte(authData), &token); err != nil {
return token, false
}
}
var crossIdFlag bool
ctx.Bind("cross_id", &crossIdFlag)
if ctx.BindError() != nil || !crossIdFlag {
if token.TokenType == "user_token" {
return token, true
}
return token, false
}
var crossId int64
ctx.Bind("cross_id", &crossId)
if err := ctx.BindError(); err != nil {
return token, false
}
query := make(url.Values)
switch token.TokenType {
case "user_token":
query.Set("user_id", fmt.Sprintf("%d", token.UserId))
case "cross_access_token":
if int64(token.CrossId) != crossId {
return token, false
}
default:
return token, false
}
var err error
if token.Cross, err = m.platform.FindCross(int64(crossId), query); err != nil {
return token, false
}
for _, inv := range token.Cross.Exfee.Invitations {
switch token.TokenType {
case "cross_access_token":
if inv.Identity.ID == token.IdentityId {
token.UserId = inv.Identity.UserID
token.Identity = inv.Identity
return token, true
}
case "user_token":
if inv.Identity.UserID == token.UserId {
token.Identity = inv.Identity
return token, true
}
}
}
return token, false
}
func (m RouteMap) publicName(crossId int64) string {
return fmt.Sprintf("routex:cross_%d", crossId)
}
func (m RouteMap) tutorialName() string {
return "routex:tutorial:data"
}
func (m RouteMap) identityName(identity model.Identity) string {
return fmt.Sprintf("routex:identity:%s", identity.Id())
}
func (m RouteMap) tutorialGenerator() {
for {
select {
case <-m.quit:
return
case <-time.After(time.Second * 10):
now := time.Now()
for userId := range m.tutorialDatas {
positions := m.getTutorialData(now, userId, 1)
if len(positions) == 0 {
continue
}
mark := m.breadcrumbsToGeomark(userId, 1, positions)
m.pubsub.Publish(m.tutorialName(), mark)
}
}
}
}
| {
id := rmodel.Identity{
Identity: identity,
Type: "identity",
Action: "update",
}
m.pubsub.Publish(m.identityName(identity), id)
} | identifier_body |
Checker_ruleunit_test.js | /******************************************************************************
Copyright:: 2020- IBM, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*****************************************************************************/
'use strict';
let ace = require('../../../src/index');
const mapRuleToG = {
"RPT_List_Misuse": "3",
"RPT_Marquee_Trigger": "5",
"RPT_Headers_FewWords": "7",
"WCAG20_Input_ExplicitLabelImage": "10",
"RPT_Img_UsemapValid": "11",
"WCAG20_Object_HasText": "20",
"WCAG20_Applet_HasAlt": "21",
"RPT_Media_AudioTrigger": "24",
"RPT_Blockquote_HasCite": "25",
"RPT_Meta_Refresh": "33",
"WCAG20_Frame_HasTitle": "39",
"WCAG20_Input_ExplicitLabel": "41",
"RPT_Media_AltBrief": "99",
"WCAG20_A_TargetAndText": "112",
"WCAG20_Area_HasAlt": "240",
"RPT_Media_ImgColorUsage": "245",
"WCAG20_Meta_RedirectZero": "254",
"RPT_Elem_Deprecated": "256",
"RPT_Blockquote_WrapsTextQuote": "263",
"RPT_Elem_EventMouseAndKey": "269",
"WCAG20_Doc_HasTitle": "273",
"RPT_Block_ShouldBeHeading": "322",
"WCAG20_Form_HasSubmit": "324",
"RPT_Elem_UniqueId": "377",
"RPT_Font_ColorInForm": "394",
"RPT_Label_UniqueFor": "398",
"RPT_Img_AltCommonMisuse": "453",
"RPT_Img_LongDescription2": "454",
"WCAG20_Img_HasAlt": "455",
"RPT_Style_BackgroundImage": "456",
"RPT_Pre_ASCIIArt": "458",
"RPT_Media_VideoReferenceTrigger": "511",
"RPT_Media_AudioVideoAltFilename": "460",
"RPT_Style_ColorSemantics1": "466",
"WCAG20_Select_HasOptGroup": "467",
"RPT_List_UseMarkup": "468",
"RPT_Script_OnclickHTML1": "470",
"WCAG20_Table_Structure": "471",
"WCAG20_Img_AltTriggerNonDecorative": "473",
"WCAG20_Blink_AlwaysTrigger": "478",
"RPT_Blink_CSSTrigger1": "479",
"RPT_Html_SkipNav": "481",
"RPT_Title_Valid": "484",
"RPT_Header_HasContent": "488",
"WCAG20_Html_HasLang": "490",
"WCAG20_Form_TargetAndText": "491",
"WCAG20_A_HasText": "495",
"WCAG20_Fieldset_HasLegend": "497",
"RPT_Media_VideoObjectTrigger": "501",
"RPT_Text_SensoryReference": "502",
"RPT_Embed_AutoStart": "503",
"RPT_Style_HinderFocus1": "506",
"WCAG20_Elem_Lang_Valid": "507",
"WCAG20_Img_LinkTextNotRedundant": "1000",
"RPT_Style_ExternalStyleSheet": "1073",
"RPT_Header_Trigger": "1002",
"RPT_Script_OnclickHTML2": "1007",
"WCAG20_Table_CapSummRedundant": "1011",
"WCAG20_Input_LabelBefore": "1017",
"WCAG20_Input_LabelAfter": "1018",
"WCAG20_Embed_HasNoEmbed": "1020",
"WCAG20_Table_Scope_Valid": "1025",
"WCAG20_Img_TitleEmptyWhenAltNull": "1027",
"WCAG20_Input_InFieldSet": "1028",
"WCAG20_Input_RadioChkInFieldSet": "1029",
"WCAG20_Select_NoChangeAction": "1035",
"WCAG20_Input_HasOnchange": "1050",
"RPT_Embed_HasAlt": "1051",
"Valerie_Noembed_HasContent": "1052",
"Valerie_Caption_HasContent": "1053",
"Valerie_Caption_InTable": "1054",
"Valerie_Label_HasContent": "1055",
"Valerie_Elem_DirValid": "1056",
"Valerie_Frame_SrcHtml": "1057",
"Valerie_Table_DataCellRelationships": "1059",
"RPT_Table_LayoutTrigger": "1060",
"RPT_Table_DataHeadingsAria": "1061",
"WCAG20_Label_RefValid": "1062",
"WCAG20_Elem_UniqueAccessKey": "1063",
"WCAG20_Script_FocusBlurs": "1064",
"HAAC_Img_UsemapAlt": "1067",
"WCAG20_Text_Emoticons": "1068",
"WCAG20_Style_BeforeAfter": "1069",
"WCAG20_Text_LetterSpacing": "1070",
"Rpt_Aria_ValidRole": "1074",
"Rpt_Aria_ValidPropertyValue": "1076",
"Rpt_Aria_ValidIdRef": "1077",
"Rpt_Aria_RequiredProperties": "1079",
"Rpt_Aria_EmptyPropertyValue": "1082",
"Rpt_Aria_ValidProperty": "1083",
"Rpt_Aria_InvalidTabindexForActivedescendant": "1084",
"Rpt_Aria_MissingFocusableChild": "1086",
"Rpt_Aria_MissingKeyboardHandler": "1087",
"WCAG20_Img_PresentationImgHasNonNullAlt": "1090",
"Rpt_Aria_MultipleSearchLandmarks": "1097",
"Rpt_Aria_MultipleApplicationLandmarks": "1099",
"Rpt_Aria_ApplicationLandmarkLabel": "1100",
"Rpt_Aria_MultipleDocumentRoles": "1101",
"WCAG20_Label_TargetInvisible": "1112",
"HAAC_Video_HasNoTrack": "1117",
"HAAC_Audio_Video_Trigger": "1119",
"HAAC_Input_HasRequired": "1124",
"HAAC_Aria_ImgAlt": "1128",
"HAAC_BackgroundImg_HasTextOrTitle": "1132",
"HAAC_Accesskey_NeedLabel": "1140",
"HAAC_Aria_Or_HTML5_Attr": "1141",
"HAAC_Canvas": "1143",
"HAAC_Figure_label": "1144",
"HAAC_Input_Placeholder": "1145",
"HAAC_Aria_Native_Host_Sematics": "1146",
"RPT_Form_ChangeEmpty": "1147",
"IBMA_Color_Contrast_WCAG2AA": "1148",
"IBMA_Color_Contrast_WCAG2AA_PV": "1149",
"WCAG20_Body_FirstASkips_Native_Host_Sematics": "1150",
"WCAG20_Body_FirstAContainsSkipText_Native_Host_Sematics": "1151",
"Rpt_Aria_RequiredChildren_Native_Host_Sematics": "1152",
"Rpt_Aria_RequiredParent_Native_Host_Sematics": "1153",
"Rpt_Aria_EventHandlerMissingRole_Native_Host_Sematics": "1154",
"Rpt_Aria_WidgetLabels_Implicit": "1156",
"Rpt_Aria_OrphanedContent_Native_Host_Sematics": "1157",
"Rpt_Aria_RegionLabel_Implicit": "1158",
"Rpt_Aria_MultipleMainsVisibleLabel_Implicit": "1159",
"Rpt_Aria_MultipleBannerLandmarks_Implicit": "1160",
"Rpt_Aria_MultipleComplementaryLandmarks_Implicit": "1161",
"Rpt_Aria_MultipleContentinfoLandmarks_Implicit": "1162",
"Rpt_Aria_MultipleFormLandmarks_Implicit": "1163",
"Rpt_Aria_MultipleNavigationLandmarks_Implicit": "1164",
"Rpt_Aria_ComplementaryLandmarkLabel_Implicit": "1165",
"Rpt_Aria_MultipleArticleRoles_Implicit": "1166",
"Rpt_Aria_ArticleRoleLabel_Implicit": "1167",
"Rpt_Aria_MultipleGroupRoles_Implicit": "1168",
"Rpt_Aria_GroupRoleLabel_Implicit": "1169",
"Rpt_Aria_MultipleContentinfoInSiblingSet_Implicit": "1170",
"Rpt_Aria_OneBannerInSiblingSet_Implicit": "1172",
"Rpt_Aria_ContentinfoWithNoMain_Implicit": "1173",
"Rpt_Aria_ComplementaryRequiredLabel_Implicit": "1174",
"Rpt_Aria_MultipleRegionsUniqueLabel_Implicit": "1176",
"IBMA_Focus_Tabbable": "1177",
"IBMA_Focus_MultiTab": "1178",
"WCAG20_Table_SummaryAria3": "1179",
"RPT_Style_Trigger2": "1180",
"Rpt_Aria_MultipleMainsRequireLabel_Implicit_2": "1182",
"HAAC_Media_DocumentTrigger2": "1183",
"HAAC_Aria_ErrorMessage": "1184",
"HAAC_List_Group_ListItem": "1185",
"HAAC_ActiveDescendantCheck": "1186",
"HAAC_Application_Role_Text": "1187",
"Rpt_Aria_MultipleToolbarUniqueLabel": "1188",
"HAAC_Combobox_ARIA_11_Guideline": "1193",
"HAAC_Combobox_Must_Have_Text_Input": "1194",
"HAAC_Combobox_DOM_Focus": "1195",
"HAAC_Combobox_Autocomplete": "1196",
"HAAC_Combobox_Autocomplete_Invalid": "1197",
"HAAC_Combobox_Expanded": "1198",
"HAAC_Combobox_Popup": "1199",
"WCAG21_Style_Viewport": "1200",
"WCAG21_Label_Accessible": "1202",
"WCAG21_Input_Autocomplete": "1203",
"WCAG20_Input_VisibleLabel": "1204"
}
let mapGToRule = {}
for (const key in mapRuleToG) {
mapGToRule[mapRuleToG[key]] = key;
}
// Describe this Suite of testscases, describe is a test Suite and 'it' is a testcase.
describe("Rule Unit Tests", function() {
// Variable Decleration
let originalTimeout;
// All the html unit testscases will be stored in the window.__html__ by the preprocessor
let unitTestcaseHTML = window.__html__;
// Loop over all the unitTestcase html/htm files and perform a scan for them
for (let unitTestFile in unitTestcaseHTML) {
// Get the extension of the file we are about to scan
let fileExtension = unitTestFile.substr(unitTestFile.lastIndexOf('.') + 1);
// Make sure the unit testcase we are trying to scan is actually and html/htm files, if it is not
// just move on to the next one.
if (fileExtension !== 'html' && fileExtension !== 'htm' && fileExtension !== 'svg') {
continue;
}
// This function is used to execute for each of the unitTestFiles, we have to use this type of function
// to allow dynamic creation/execution of the Unit Testcases. This is like forcing an syncronous execution
// for the testcases. Which is needed to make sure that all the tests run in the same order.
// For now we do not need to consider threaded execution because over all theses testscases will take at
// most half 1 sec * # of testcses (500ms * 780)
(function(unitTestFile) {
// Description of the test case that will be run.
describe("Load Test: " + unitTestFile, function() {
// Function to run before every testcase (it --> is a testcase)
// This before function allows to add async support to a testcase.
// The testcase will not run until the done function is called
beforeEach(function() {
// Extract the current jasmine DEFAULT_TIMEOUT_INTERVAL value to restore later on
originalTimeout = jasmine.DEFAULT_TIMEOUT_INTERVAL;
// Set the DEFAULT_TIMEOUT_INTERVAL to 3min seconds, to allow for the DAP scan to finish.
jasmine.DEFAULT_TIMEOUT_INTERVAL = 180000;
});
// The Individual testcase for each of the unittestcases.
// Note the done that is passed in, this is used to wait for asyn functions.
it('a11y scan should match expected value', async function() {
// Extract the unitTestcase data file from the unitTestcase hash map.
// This will contain the full content of the testcase file. Includes the document
// object also.
let unitTestDataFileContent = unitTestcaseHTML[unitTestFile];
// Create an iframe element in the body of the current document
let iframe = document.createElement('iframe');
iframe.id = "unitTestcaseIframe";
// Append the iframe to the body
document.body.appendChild(iframe);
// Start to write the contents of the html file into the iframe document
// This will include the entire html page, including the doc type and all.
iframe.contentWindow.document.open();
iframe.contentWindow.document.write(unitTestDataFileContent);
iframe.contentWindow.document.close();
// Get the iframe window
let iframeWin = document.getElementById("unitTestcaseIframe").contentWindow;
// Get the iframe document that was just created
let iframeDoc = iframeWin.document;
let checker = new ace.Checker();
let report = await checker.check(iframeDoc, null);
expect(report.results).toBeDefined();
// Extract the ruleCoverage object from the unit testcases that is loaded on to the iframe.
let expectedInfo = iframeWin.UnitTest;
let legacyExpectedInfo = iframeWin.OpenAjax &&
iframeWin.OpenAjax.a11y &&
iframeWin.OpenAjax.a11y.ruleCoverage;
if (expectedInfo && expectedInfo.ruleIds) | else if (legacyExpectedInfo) {
let expectedInfo = {}
let actualInfo = {}
for (const item of legacyExpectedInfo) {
if (checker.engine.getRule(mapGToRule[item.ruleId])) {
expectedInfo[item.ruleId] = [];
actualInfo[item.ruleId] = [];
for (let xpath of item.failedXpaths) {
xpath = xpath.replace(/([^\]])\//g, "$1[1]/");
if (!xpath.endsWith("]")) xpath += "[1]";
expectedInfo[item.ruleId].push(xpath);
}
} else {
console.log("WARNING:",item.ruleId,"does not exist in current ruleset");
}
}
for (const issue of report.results) {
delete issue.node;
delete issue.ruleTime;
delete issue.bounds;
const ruleId = mapRuleToG[issue.ruleId];
if (ruleId in expectedInfo && issue.value[1] !== "PASS") {
actualInfo[ruleId].push(issue.path.dom);
}
}
for (const ruleId in expectedInfo) {
expectedInfo[ruleId].sort();
actualInfo[ruleId].sort();
}
expect(actualInfo).withContext("\nExpected:" + JSON.stringify(expectedInfo, null, 2) + "\nActual:" + JSON.stringify(actualInfo, null, 2)).toEqual(expectedInfo);
}
// let violationsData = data.report.fail;
// // In the case that the violationData is not defined then trigger an error right away.
// if (violationsData) {
// // Only try to verify results if there are baseline/expected results to actually verify
// if (expectedInfo) {
// // Decleare the actualMap which will store all the actual xpath results
// let actualMap = {};
// // Loop over all the violation Data and extract the gID and the xpath for the gId and
// // add it to the actual Map.
// violationsData.forEach(function (actual) {
// // Create a new array in the case that one does not exists
// actualMap[actual.ruleId] = actualMap[actual.ruleId] || [];
// // Fix up the xPath as we need to replace [1] with space so that it can actually match correctly.
// let fixComp = actual.component.replace(/\[1\]/g, "");
// // Add the fixed xPath to the actual map for the gId
// actualMap[actual.ruleId].push(fixComp);
// });
// // Loop over all the expected Infor objects and fix up the xPath so that it is ready for compare
// expectedInfo.forEach(function (expected) {
// // Temp array to store all the fixed xpaths
// let temp = [];
// // Fix all the xPaths that are in the failedXpaths array
// expected.failedXpaths.forEach(function (xpath) {
// temp.push(xpath.replace(/\[1\]/g, ""));
// });
// // Reasign the temp fixed xpath to failedXpath
// expected.failedXpaths = temp;
// });
// // Loop over all the expected xPaths and make sure they are present in the actual results.
// // TODO: Add support for checking passed xPath here also.
// expectedInfo.forEach(function (expected) {
// // In the case the xPath exists in the actualMap then sort them
// if (actualMap[expected.ruleId]) {
// actualMap[expected.ruleId] = actualMap[expected.ruleId].sort();
// }
// // In the case the failedXpaths exists in the expected object then sort them
// if (expected.failedXpaths) {
// expected.failedXpaths = expected.failedXpaths.sort();
// }
// // In the case that the expected failed map is empty and we found violations triggered for this rule then mark this as failed.
// if (expected.failedXpaths.length == 0) {
// expect(typeof (actualMap[expected.ruleId])).toEqual('undefined', "\nShould trigger NO violations, but triggered for rule: " + expected.ruleId + " with " + actualMap[expected.ruleId]);
// }
// // In the case that the expected rule rule to be triggered is not triggered then throw error as this test failed
// else if (!(expected.ruleId in actualMap)) {
// expect(false).toBe(true, "\nShould trigger violations, but triggered none: " + expected.ruleId + " " + expected.failedXpaths);
// }
// // Verify the results match using toEqual, this will compare whe whole object
// else {
// expect(expected.failedXpaths).toEqual(actualMap[expected.ruleId]);
// }
// });
// } else {
// expect(false).toEqual(true, "\nThere is no baseline defined for: " + unitTestFile);
// }
// } else {
// expect(false).toEqual(true, "\nWas unable to scan: " + unitTestFile);
// }
// Mark the testcases as done.
// done();
// data.report.fail.forEach(function (violation) {
// // Extract all the information for each individual violation
// let severity = violation.severity;
// let severityCode = violation.severityCode;
// let message = violation.message;
// let component = violation.component;
// let componentNode = violation.componentNode;
// let ruleId = violation.ruleId;
// let lineNumber = violation.lineNumber;
// let levelCode = violation.levelCode;
// let level = violation.level;
// let help = violation.help;
// let msgArgs = violation.msgArgs;
// let filterHidden = violation.filterHidden;
//
// // Build the individual violations report to make it more readable to the user.
// let individualViolationBuilt = severity + ", " + message + ", " + component + ", " +
// ruleId + ", " + lineNumber + ", " + level + ", " + help;
//
// // Log the report for now.
// console.log(individualViolationBuilt);
// });
// });
});
// Function to run after every testcase (it --> is a testcase)
// This function will reset the DEFAULT_TIMEOUT_INTERVAL for a testcase in jasmine
afterEach(function() {
// Get iframe and then remove it from the page
let iframe = document.getElementById("unitTestcaseIframe");
iframe.parentElement.removeChild(iframe);
// Reset the Jasmine timeout
jasmine.DEFAULT_TIMEOUT_INTERVAL = originalTimeout;
});
});
}(unitTestFile));
}
}); | {
let filtReport = [];
for (const issue of report.results) {
delete issue.node;
delete issue.ruleTime;
delete issue.bounds;
if (expectedInfo.ruleIds.includes(issue.ruleId)) {
// These are too variable between runs - don't test these
delete issue.snippet;
filtReport.push(issue);
}
}
expect(filtReport).withContext(JSON.stringify(filtReport, null, 2)).toEqual(expectedInfo.results);
} | conditional_block |
Checker_ruleunit_test.js | /******************************************************************************
Copyright:: 2020- IBM, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*****************************************************************************/
'use strict';
let ace = require('../../../src/index');
const mapRuleToG = {
"RPT_List_Misuse": "3",
"RPT_Marquee_Trigger": "5",
"RPT_Headers_FewWords": "7",
"WCAG20_Input_ExplicitLabelImage": "10",
"RPT_Img_UsemapValid": "11",
"WCAG20_Object_HasText": "20",
"WCAG20_Applet_HasAlt": "21",
"RPT_Media_AudioTrigger": "24",
"RPT_Blockquote_HasCite": "25",
"RPT_Meta_Refresh": "33",
"WCAG20_Frame_HasTitle": "39",
"WCAG20_Input_ExplicitLabel": "41",
"RPT_Media_AltBrief": "99",
"WCAG20_A_TargetAndText": "112",
"WCAG20_Area_HasAlt": "240",
"RPT_Media_ImgColorUsage": "245",
"WCAG20_Meta_RedirectZero": "254",
"RPT_Elem_Deprecated": "256",
"RPT_Blockquote_WrapsTextQuote": "263",
"RPT_Elem_EventMouseAndKey": "269",
"WCAG20_Doc_HasTitle": "273",
"RPT_Block_ShouldBeHeading": "322",
"WCAG20_Form_HasSubmit": "324",
"RPT_Elem_UniqueId": "377",
"RPT_Font_ColorInForm": "394",
"RPT_Label_UniqueFor": "398",
"RPT_Img_AltCommonMisuse": "453",
"RPT_Img_LongDescription2": "454",
"WCAG20_Img_HasAlt": "455",
"RPT_Style_BackgroundImage": "456",
"RPT_Pre_ASCIIArt": "458",
"RPT_Media_VideoReferenceTrigger": "511",
"RPT_Media_AudioVideoAltFilename": "460",
"RPT_Style_ColorSemantics1": "466",
"WCAG20_Select_HasOptGroup": "467",
"RPT_List_UseMarkup": "468",
"RPT_Script_OnclickHTML1": "470",
"WCAG20_Table_Structure": "471",
"WCAG20_Img_AltTriggerNonDecorative": "473",
"WCAG20_Blink_AlwaysTrigger": "478",
"RPT_Blink_CSSTrigger1": "479",
"RPT_Html_SkipNav": "481",
"RPT_Title_Valid": "484",
"RPT_Header_HasContent": "488",
"WCAG20_Html_HasLang": "490",
"WCAG20_Form_TargetAndText": "491",
"WCAG20_A_HasText": "495",
"WCAG20_Fieldset_HasLegend": "497",
"RPT_Media_VideoObjectTrigger": "501",
"RPT_Text_SensoryReference": "502",
"RPT_Embed_AutoStart": "503",
"RPT_Style_HinderFocus1": "506",
"WCAG20_Elem_Lang_Valid": "507",
"WCAG20_Img_LinkTextNotRedundant": "1000",
"RPT_Style_ExternalStyleSheet": "1073",
"RPT_Header_Trigger": "1002",
"RPT_Script_OnclickHTML2": "1007",
"WCAG20_Table_CapSummRedundant": "1011",
"WCAG20_Input_LabelBefore": "1017",
"WCAG20_Input_LabelAfter": "1018",
"WCAG20_Embed_HasNoEmbed": "1020",
"WCAG20_Table_Scope_Valid": "1025",
"WCAG20_Img_TitleEmptyWhenAltNull": "1027",
"WCAG20_Input_InFieldSet": "1028",
"WCAG20_Input_RadioChkInFieldSet": "1029",
"WCAG20_Select_NoChangeAction": "1035",
"WCAG20_Input_HasOnchange": "1050",
"RPT_Embed_HasAlt": "1051",
"Valerie_Noembed_HasContent": "1052",
"Valerie_Caption_HasContent": "1053",
"Valerie_Caption_InTable": "1054",
"Valerie_Label_HasContent": "1055",
"Valerie_Elem_DirValid": "1056",
"Valerie_Frame_SrcHtml": "1057",
"Valerie_Table_DataCellRelationships": "1059",
"RPT_Table_LayoutTrigger": "1060",
"RPT_Table_DataHeadingsAria": "1061",
"WCAG20_Label_RefValid": "1062",
"WCAG20_Elem_UniqueAccessKey": "1063",
"WCAG20_Script_FocusBlurs": "1064",
"HAAC_Img_UsemapAlt": "1067",
"WCAG20_Text_Emoticons": "1068",
"WCAG20_Style_BeforeAfter": "1069",
"WCAG20_Text_LetterSpacing": "1070",
"Rpt_Aria_ValidRole": "1074",
"Rpt_Aria_ValidPropertyValue": "1076",
"Rpt_Aria_ValidIdRef": "1077",
"Rpt_Aria_RequiredProperties": "1079",
"Rpt_Aria_EmptyPropertyValue": "1082",
"Rpt_Aria_ValidProperty": "1083",
"Rpt_Aria_InvalidTabindexForActivedescendant": "1084",
"Rpt_Aria_MissingFocusableChild": "1086",
"Rpt_Aria_MissingKeyboardHandler": "1087",
"WCAG20_Img_PresentationImgHasNonNullAlt": "1090",
"Rpt_Aria_MultipleSearchLandmarks": "1097",
"Rpt_Aria_MultipleApplicationLandmarks": "1099",
"Rpt_Aria_ApplicationLandmarkLabel": "1100",
"Rpt_Aria_MultipleDocumentRoles": "1101",
"WCAG20_Label_TargetInvisible": "1112",
"HAAC_Video_HasNoTrack": "1117",
"HAAC_Audio_Video_Trigger": "1119",
"HAAC_Input_HasRequired": "1124",
"HAAC_Aria_ImgAlt": "1128",
"HAAC_BackgroundImg_HasTextOrTitle": "1132",
"HAAC_Accesskey_NeedLabel": "1140",
"HAAC_Aria_Or_HTML5_Attr": "1141",
"HAAC_Canvas": "1143",
"HAAC_Figure_label": "1144",
"HAAC_Input_Placeholder": "1145",
"HAAC_Aria_Native_Host_Sematics": "1146",
"RPT_Form_ChangeEmpty": "1147",
"IBMA_Color_Contrast_WCAG2AA": "1148",
"IBMA_Color_Contrast_WCAG2AA_PV": "1149",
"WCAG20_Body_FirstASkips_Native_Host_Sematics": "1150",
"WCAG20_Body_FirstAContainsSkipText_Native_Host_Sematics": "1151",
"Rpt_Aria_RequiredChildren_Native_Host_Sematics": "1152",
"Rpt_Aria_RequiredParent_Native_Host_Sematics": "1153",
"Rpt_Aria_EventHandlerMissingRole_Native_Host_Sematics": "1154",
"Rpt_Aria_WidgetLabels_Implicit": "1156",
"Rpt_Aria_OrphanedContent_Native_Host_Sematics": "1157",
"Rpt_Aria_RegionLabel_Implicit": "1158",
"Rpt_Aria_MultipleMainsVisibleLabel_Implicit": "1159",
"Rpt_Aria_MultipleBannerLandmarks_Implicit": "1160",
"Rpt_Aria_MultipleComplementaryLandmarks_Implicit": "1161",
"Rpt_Aria_MultipleContentinfoLandmarks_Implicit": "1162",
"Rpt_Aria_MultipleFormLandmarks_Implicit": "1163",
"Rpt_Aria_MultipleNavigationLandmarks_Implicit": "1164",
"Rpt_Aria_ComplementaryLandmarkLabel_Implicit": "1165",
"Rpt_Aria_MultipleArticleRoles_Implicit": "1166",
"Rpt_Aria_ArticleRoleLabel_Implicit": "1167",
"Rpt_Aria_MultipleGroupRoles_Implicit": "1168",
"Rpt_Aria_GroupRoleLabel_Implicit": "1169",
"Rpt_Aria_MultipleContentinfoInSiblingSet_Implicit": "1170",
"Rpt_Aria_OneBannerInSiblingSet_Implicit": "1172",
"Rpt_Aria_ContentinfoWithNoMain_Implicit": "1173",
"Rpt_Aria_ComplementaryRequiredLabel_Implicit": "1174",
"Rpt_Aria_MultipleRegionsUniqueLabel_Implicit": "1176",
"IBMA_Focus_Tabbable": "1177",
"IBMA_Focus_MultiTab": "1178",
"WCAG20_Table_SummaryAria3": "1179",
"RPT_Style_Trigger2": "1180",
"Rpt_Aria_MultipleMainsRequireLabel_Implicit_2": "1182",
"HAAC_Media_DocumentTrigger2": "1183",
"HAAC_Aria_ErrorMessage": "1184",
"HAAC_List_Group_ListItem": "1185",
"HAAC_ActiveDescendantCheck": "1186",
"HAAC_Application_Role_Text": "1187",
"Rpt_Aria_MultipleToolbarUniqueLabel": "1188",
"HAAC_Combobox_ARIA_11_Guideline": "1193",
"HAAC_Combobox_Must_Have_Text_Input": "1194",
"HAAC_Combobox_DOM_Focus": "1195",
"HAAC_Combobox_Autocomplete": "1196",
"HAAC_Combobox_Autocomplete_Invalid": "1197",
"HAAC_Combobox_Expanded": "1198",
"HAAC_Combobox_Popup": "1199",
"WCAG21_Style_Viewport": "1200",
"WCAG21_Label_Accessible": "1202",
"WCAG21_Input_Autocomplete": "1203",
"WCAG20_Input_VisibleLabel": "1204"
}
let mapGToRule = {}
for (const key in mapRuleToG) {
mapGToRule[mapRuleToG[key]] = key;
}
// Describe this Suite of testscases, describe is a test Suite and 'it' is a testcase.
describe("Rule Unit Tests", function() {
// Variable Decleration
let originalTimeout;
// All the html unit testscases will be stored in the window.__html__ by the preprocessor
let unitTestcaseHTML = window.__html__;
// Loop over all the unitTestcase html/htm files and perform a scan for them
for (let unitTestFile in unitTestcaseHTML) {
// Get the extension of the file we are about to scan
let fileExtension = unitTestFile.substr(unitTestFile.lastIndexOf('.') + 1);
// Make sure the unit testcase we are trying to scan is actually and html/htm files, if it is not
// just move on to the next one.
if (fileExtension !== 'html' && fileExtension !== 'htm' && fileExtension !== 'svg') {
continue;
}
// This function is used to execute for each of the unitTestFiles, we have to use this type of function
// to allow dynamic creation/execution of the Unit Testcases. This is like forcing an syncronous execution
// for the testcases. Which is needed to make sure that all the tests run in the same order.
// For now we do not need to consider threaded execution because over all theses testscases will take at
// most half 1 sec * # of testcses (500ms * 780)
(function(unitTestFile) {
// Description of the test case that will be run.
describe("Load Test: " + unitTestFile, function() {
// Function to run before every testcase (it --> is a testcase)
// This before function allows to add async support to a testcase.
// The testcase will not run until the done function is called
beforeEach(function() {
// Extract the current jasmine DEFAULT_TIMEOUT_INTERVAL value to restore later on
originalTimeout = jasmine.DEFAULT_TIMEOUT_INTERVAL;
// Set the DEFAULT_TIMEOUT_INTERVAL to 3min seconds, to allow for the DAP scan to finish.
jasmine.DEFAULT_TIMEOUT_INTERVAL = 180000;
});
// The Individual testcase for each of the unittestcases.
// Note the done that is passed in, this is used to wait for asyn functions.
it('a11y scan should match expected value', async function() {
// Extract the unitTestcase data file from the unitTestcase hash map.
// This will contain the full content of the testcase file. Includes the document
// object also.
let unitTestDataFileContent = unitTestcaseHTML[unitTestFile];
// Create an iframe element in the body of the current document
let iframe = document.createElement('iframe');
iframe.id = "unitTestcaseIframe";
// Append the iframe to the body
document.body.appendChild(iframe);
// Start to write the contents of the html file into the iframe document
// This will include the entire html page, including the doc type and all.
iframe.contentWindow.document.open();
iframe.contentWindow.document.write(unitTestDataFileContent);
iframe.contentWindow.document.close();
// Get the iframe window
let iframeWin = document.getElementById("unitTestcaseIframe").contentWindow;
// Get the iframe document that was just created
let iframeDoc = iframeWin.document;
let checker = new ace.Checker();
let report = await checker.check(iframeDoc, null);
expect(report.results).toBeDefined();
// Extract the ruleCoverage object from the unit testcases that is loaded on to the iframe.
let expectedInfo = iframeWin.UnitTest;
let legacyExpectedInfo = iframeWin.OpenAjax &&
iframeWin.OpenAjax.a11y &&
iframeWin.OpenAjax.a11y.ruleCoverage;
if (expectedInfo && expectedInfo.ruleIds) {
let filtReport = [];
for (const issue of report.results) {
delete issue.node;
delete issue.ruleTime;
delete issue.bounds;
if (expectedInfo.ruleIds.includes(issue.ruleId)) {
// These are too variable between runs - don't test these
delete issue.snippet;
filtReport.push(issue);
}
}
expect(filtReport).withContext(JSON.stringify(filtReport, null, 2)).toEqual(expectedInfo.results);
} else if (legacyExpectedInfo) {
let expectedInfo = {}
let actualInfo = {}
for (const item of legacyExpectedInfo) {
if (checker.engine.getRule(mapGToRule[item.ruleId])) {
expectedInfo[item.ruleId] = [];
actualInfo[item.ruleId] = [];
for (let xpath of item.failedXpaths) {
xpath = xpath.replace(/([^\]])\//g, "$1[1]/");
if (!xpath.endsWith("]")) xpath += "[1]";
expectedInfo[item.ruleId].push(xpath);
}
} else {
console.log("WARNING:",item.ruleId,"does not exist in current ruleset");
}
}
for (const issue of report.results) {
delete issue.node;
delete issue.ruleTime;
delete issue.bounds;
const ruleId = mapRuleToG[issue.ruleId];
if (ruleId in expectedInfo && issue.value[1] !== "PASS") {
actualInfo[ruleId].push(issue.path.dom);
}
}
for (const ruleId in expectedInfo) {
expectedInfo[ruleId].sort();
actualInfo[ruleId].sort();
}
expect(actualInfo).withContext("\nExpected:" + JSON.stringify(expectedInfo, null, 2) + "\nActual:" + JSON.stringify(actualInfo, null, 2)).toEqual(expectedInfo);
}
// let violationsData = data.report.fail;
// // In the case that the violationData is not defined then trigger an error right away.
// if (violationsData) {
// // Only try to verify results if there are baseline/expected results to actually verify
// if (expectedInfo) {
// // Decleare the actualMap which will store all the actual xpath results
// let actualMap = {};
// // Loop over all the violation Data and extract the gID and the xpath for the gId and
// // add it to the actual Map.
// violationsData.forEach(function (actual) {
// // Create a new array in the case that one does not exists
// actualMap[actual.ruleId] = actualMap[actual.ruleId] || [];
// // Fix up the xPath as we need to replace [1] with space so that it can actually match correctly.
// let fixComp = actual.component.replace(/\[1\]/g, "");
// // Add the fixed xPath to the actual map for the gId
// actualMap[actual.ruleId].push(fixComp);
// });
// // Loop over all the expected Infor objects and fix up the xPath so that it is ready for compare
// expectedInfo.forEach(function (expected) {
// // Temp array to store all the fixed xpaths
// let temp = [];
// // Fix all the xPaths that are in the failedXpaths array
// expected.failedXpaths.forEach(function (xpath) {
// temp.push(xpath.replace(/\[1\]/g, ""));
// });
// // Reasign the temp fixed xpath to failedXpath
// expected.failedXpaths = temp;
// });
// // Loop over all the expected xPaths and make sure they are present in the actual results.
// // TODO: Add support for checking passed xPath here also.
// expectedInfo.forEach(function (expected) {
// // In the case the xPath exists in the actualMap then sort them
// if (actualMap[expected.ruleId]) {
// actualMap[expected.ruleId] = actualMap[expected.ruleId].sort();
// }
// // In the case the failedXpaths exists in the expected object then sort them
// if (expected.failedXpaths) {
// expected.failedXpaths = expected.failedXpaths.sort();
// }
// // In the case that the expected failed map is empty and we found violations triggered for this rule then mark this as failed.
// if (expected.failedXpaths.length == 0) { | // }
// // In the case that the expected rule rule to be triggered is not triggered then throw error as this test failed
// else if (!(expected.ruleId in actualMap)) {
// expect(false).toBe(true, "\nShould trigger violations, but triggered none: " + expected.ruleId + " " + expected.failedXpaths);
// }
// // Verify the results match using toEqual, this will compare whe whole object
// else {
// expect(expected.failedXpaths).toEqual(actualMap[expected.ruleId]);
// }
// });
// } else {
// expect(false).toEqual(true, "\nThere is no baseline defined for: " + unitTestFile);
// }
// } else {
// expect(false).toEqual(true, "\nWas unable to scan: " + unitTestFile);
// }
// Mark the testcases as done.
// done();
// data.report.fail.forEach(function (violation) {
// // Extract all the information for each individual violation
// let severity = violation.severity;
// let severityCode = violation.severityCode;
// let message = violation.message;
// let component = violation.component;
// let componentNode = violation.componentNode;
// let ruleId = violation.ruleId;
// let lineNumber = violation.lineNumber;
// let levelCode = violation.levelCode;
// let level = violation.level;
// let help = violation.help;
// let msgArgs = violation.msgArgs;
// let filterHidden = violation.filterHidden;
//
// // Build the individual violations report to make it more readable to the user.
// let individualViolationBuilt = severity + ", " + message + ", " + component + ", " +
// ruleId + ", " + lineNumber + ", " + level + ", " + help;
//
// // Log the report for now.
// console.log(individualViolationBuilt);
// });
// });
});
// Function to run after every testcase (it --> is a testcase)
// This function will reset the DEFAULT_TIMEOUT_INTERVAL for a testcase in jasmine
afterEach(function() {
// Get iframe and then remove it from the page
let iframe = document.getElementById("unitTestcaseIframe");
iframe.parentElement.removeChild(iframe);
// Reset the Jasmine timeout
jasmine.DEFAULT_TIMEOUT_INTERVAL = originalTimeout;
});
});
}(unitTestFile));
}
}); | // expect(typeof (actualMap[expected.ruleId])).toEqual('undefined', "\nShould trigger NO violations, but triggered for rule: " + expected.ruleId + " with " + actualMap[expected.ruleId]); | random_line_split |
shows_data.js | exports.showsData = [
{
"id": 1,
"name": "Theatre",
"info": "Theatre is a collaborative form of performing art that uses live performers, usually actors or actresses, to present the experience of a real or imagined event before a live audience in a specific place, often a stage. The performers may communicate this experience to the audience through combinations of gesture, speech, song, music, and dance. Elements of art, such as painted scenery and stagecraft such as lighting are used to enhance the physicality, presence and immediacy of the experience.",
"info2": "The art forms of ballet and opera are also theatre and use many conventions such as acting, costumes and staging.",
"description": "Theatre",
"photos": [
"https://aws-tiqets-cdn.imgix.net/images/content/8910979ce40044d5a0e71aab06ccd3dd.jpg?auto=format&fit=crop&ixlib=python-3.2.1&q=25&s=fbb67a7c4d2c475a89ce03c34f0a4213&w=375&h=250&dpr=2.625",
"https://www.potsdam.edu/sites/default/files/styles/optimized/public/2019-08/MT.jpg?itok=282s-0tZ",
"https://es.parisinfo.com/var/otcp/sites/images/node_43/node_51/node_77884/node_77888/th%C3%A9%C3%A2tre-du-palais-royal-salle-de-spectacle-%7C-630x405-%7C-%C2%A9-b.-richeb%C3%A9/15817471-1-fre-FR/Th%C3%A9%C3%A2tre-du-Palais-Royal-Salle-de-spectacle-%7C-630x405-%7C-%C2%A9-B.-Richeb%C3%A9.jpg"
]
},
{
"id": 2,
"name": "Concert",
"info": "A concert is a live music performance in front of an audience. The performance may be by a single musician, sometimes then called a recital, or by a musical ensemble, such as an orchestra, choir, or band. Concerts are held in a wide variety and size of settings, from private houses and small nightclubs, dedicated concert halls, amphitheatres and parks, to large multipurpose buildings, such as arenas and stadiums. Indoor concerts held in the largest venues are sometimes called arena concerts or amphitheatre concerts. Informal names for a concert include show and gig.",
"description": "Concert",
"photos": [
"https://ichef.bbci.co.uk/news/1024/cpsprodpb/16441/production/_109910219_massiveattack2.jpg",
"http://www.surinenglish.com/noticias/202009/23/media/cortadas/Imagen%[email protected]",
"https://www.diariobahiadecadiz.com/noticias/wp-content/uploads/2018/12/conciertoconcertmusicfestivalchi18-web-750x430.jpg"
]
},
{
"id": 3,
"name": "Cirque du Soleil",
"info": "A circus is a company of performers who put on diverse entertainment shows that may include clowns, acrobats, trained animals, trapeze acts, musicians, dancers, hoopers, tightrope walkers, jugglers, magicians, unicyclists, as well as other object manipulation and stunt-oriented artists. Contemporary circus has been credited with a revival of the circus tradition since the late 1970s, when a number of groups began to experiment with new circus formats and aesthetics, typically avoiding the use of animals to focus exclusively on human artistry. Circuses within the movement have tended to favor a theatrical approach, combining character-driven circus acts with original music in a broad variety of styles to convey complex themes or stories.",
"description": "Cirque du Soleil",
"photos": [
"https://agendainfantil.es/wp-content/uploads/2020/01/circus-roncalli.jpg",
"https://www.lovevalencia.com/wp-content/uploads/2017/12/circo-wonderland-valencia-inframundo.jpg",
"https://cadenaser00.epimg.net/ser/imagenes/2020/06/29/internacional/1593459707_054833_1593460181_noticia_normal.jpg"
]
}
]
exports.shows = [
{
id: 26,
name: "The Lion King",
price: 35.99,
hotelPoints: 500,
photo: "https://www.miaminews24.com/wp-content/uploads/2019/05/cheetah-tab.jpg",
description: "When an unthinkable tragedy, orchestrated by Simba’s wicked uncle, Scar, takes his father’s life, Simba flees the Pride Lands, leaving his loss and the life he knew behind. Eventually companioned by two hilarious and unlikely friends, Simba starts anew. But when weight of responsibility and a desperate plea from the now ravaged Pride Lands come to find the adult prince.",
quantity: 0,
category: "theatre"
},
{
id: 27,
name: "The Phantom of the Opera",
price: 30.50,
hotelPoints: 450,
photo: "https://cdn.londonandpartners.com/asset/the-phantom-of-the-opera-musical-at-her-majestys-theatre_phantom-of-the-opera-image-courtesy-of-cameron-mackintosh_240b6958e824776c4b4b222d72281b95.jpg",
description: "Based on the 1910 horror novel by Gaston Leroux, which has been adapted into countless films, The Phantom of the Opera follows a deformed composer who haunts the grand Paris Opera House. Sheltered from the outside world in an underground cavern, the lonely, romantic man tutors and composes operas for Christine, a gorgeous young soprano star-to-be. ",
quantity: 0,
category: "theatre"
},
{
id: 28,
name: "Aladdin",
price: 42.99,
hotelPoints: 550,
photo: "https://www.broadwaycollection.com/wp-content/uploads/2015/08/aladdin.jpg",
description: "In the middle-eastern town of Agrabah, Princess Jasmine is feeling hemmed in by her father’s desire to find her a royal groom. Meanwhile, the Sultan’s right-hand man, Jafar, is plotting to take over the throne. When Jasmine sneaks out of the palace incognito, she forms an instant connection with Aladdin, a charming street urchin and reformed thief.",
quantity: 0,
category: "theatre"
},
{
id: 29, | name: "Wicked",
price: 32.50,
hotelPoints: 470,
photo: "https://image.nuevayork.es/wp-content/uploads/2014/11/Wicked-on-Broadway-Tickets.png.webp",
description: "Wicked, the Broadway sensation, looks at what happened in the Land of Oz…but from a different angle. Long before Dorothy arrives, there is another girl, born with emerald-green skin—smart, fiery, misunderstood, and possessing an extraordinary talent. When she meets a bubbly blonde who is exceptionally popular, their initial rivalry turns into the unlikeliest of friendships…",
quantity: 0,
category: "theatre"
},
{
id: 30,
name: "Ain't Too Proud",
price: 25.99,
hotelPoints: 400,
photo: "https://www.love4musicals.com/wp-content/uploads/2018/11/1200x6752-1200x675-e1541668270340.jpg",
description: "Ain't Too Proud is the electrifying new musical that follows The Temptations' extraordinary journey from the streets of Detroit to the Rock & Roll Hall of Fame. With their signature dance moves and unmistakable harmonies, they rose to the top of the charts creating an amazing 42 Top Ten Hits with 14 reaching number one.",
quantity: 0,
category: "theatre"
},
{
id: 31,
name: "Queen - Wembley",
price: 89.99,
hotelPoints: 1000,
photo: "https://images-na.ssl-images-amazon.com/images/I/71g40mlbinL._SL1072_.jpg",
description: "Exactly 365 days after their world famous Live Aid gig, Queen returns to Wembley Stadium. Why this concert is better than the Live Aid one? Simple. Because it’s longer. A full-length rock show instead of a 20 minute TV gig. From the first chord of One Vision to the last euphoric singalong of We Are The Champions; Freddie, Bryan, Roger and John show London who the best live band in the world is.",
quantity: 0,
category: "concert"
},
{
id: 32,
name: "Led Zeppelin - Royal Albert",
price: 69.99,
hotelPoints: 800,
photo: "https://www.nacionrock.com/wp-content/uploads/117162885.jpg",
description: "One singer, one guitarist, one bass player and a drummer. Classic. Rock. In 1970 Robert Plant, Jimmy Page, John Paul Jones and John Bonham celebrated Jimmy’s 26th birthday at the Royal Albert Hall. “At the time the largest and most prestigious gig”, according to the birthday boy himself. Only two years after their first gig (as a band called “The New Yardbirds”) they gave rock history one of the most legendary two-and-a-half-hours of all time.",
quantity: 0,
category: "concert"
},
{
id: 33,
name: "David Bowie - Santa Monica",
price: 85.99,
hotelPoints: 950,
photo: "https://img.discogs.com/V7w6P2ut4y_EiD5Pf4InpMZ-_tk=/fit-in/300x300/filters:strip_icc():format(jpeg):mode_rgb():quality(40)/discogs-images/R-9909633-1488376223-6630.png.jpg",
description: "Davy Jones. No, not the singer of The Monkees, but the legendary artist also known as Ziggy Stardust, the Thin White Duke, Aladdin Sane, Jareth the Goblin King, or just Bowie. David Bowie. From his revolutionary breakthrough with Space Oddity to the last painful notes of Black Star, Bowie was a true artist. It’s not possible to choose the best concert of an artist with such a rich history of live performances. ",
quantity: 0,
category: "concert"
},
{
id: 34,
name: "Beyonce - Formation World Tour",
price: 75.50,
hotelPoints: 850,
photo: "https://lastfm.freetls.fastly.net/i/u/770x0/ad61dcc924c46e3ef570d96f3d5a183a.jpg",
description: "Is it possible to be a legend when you are still alive, don’t have a criminal record, no past of drug abuse, are happily married, never have an off-night… actually being too damn annoyingly perfect? Yes you can. We might miss a few legendary artists of the 60s, 70s and 80s on this list, but we can’t ignore the spectacular world tour of this millennial icon… the Mother Teresa of pop music: Beyonce!",
quantity: 0,
category: "concert"
},
{
id: 35,
name: "U2 - Slane Castle",
price: 89.99,
hotelPoints: 1000,
photo: "https://lh3.googleusercontent.com/-B1FrsALBBas/YEhoWNdRYHI/AAAAAAABOlc/NPtg1YvdgWwggqtd78uXorhb3sfwSyBEwCLcBGAsYHQ/w400-h400/1981-08-16-Slane-SlaneCastle-Front1.jpg",
description: "U2 are an Irish rock band from Dublin, formed in 1976. The group consists of Bono (lead vocals and rhythm guitar), the Edge (lead guitar, keyboards, and backing vocals), Adam Clayton (bass guitar), and Larry Mullen Jr. (drums and percussion). Initially rooted in post-punk, U2's musical style has evolved throughout their career, yet has maintained an anthemic quality built on Bono's expressive vocals and the Edge's chiming, effects-based guitar sounds.",
quantity: 0,
category: "concert"
},
{
id: 36,
name: "KÀ - The glory of battle",
price: 50.99,
hotelPoints: 550,
photo: "https://i.ytimg.com/vi/w8vmF6tbU1k/sddefault.jpg",
description: "Gravity is challenged, enemies engage in noble combat that transforms into acrobatics. Never has a battle been fought in such a way. The battlefield is a colossal scene in motion. Everything is reversed: the earth becomes heaven. These clashes from another dimension redefine what it means to fight. The path is determination and victory is the reward.",
quantity: 0,
category: "circus"
},
{
id: 37,
name: "Mystere - Laugh with life",
price: 65.99,
hotelPoints: 750,
photo: "https://media.tacdn.com/media/attractions-splice-spp-674x446/07/71/7d/88.jpg",
description: "Inside the imagination there is a fun place to be. This world is a joyous adventure filled with music and bright colors, athletics and bubbly friends. Nonsense, crazy stunts, and mind-blowing jokes abound. A merry house of laughter ... and you are at the front door. Knock, knock ... Who is it? MYSTERE. Discover the beautiful side of life, only on Treasure Island. Reserve today!",
quantity: 0,
category: "circus"
},
{
id: 38,
name: "KURIOS - World of curiosities",
price: 60.50,
hotelPoints: 720,
photo: "https://www.valenciateatros.com/wp-content/uploads/2020/05/KURIOS-banner-.jpg",
description: "Enter the cabinet of curiosities of an ambitious inventor who defies the laws of time, space and dimensions to reinvent everything around him. Suddenly, the visible becomes invisible, the perspectives are transformed and the world is literally give. KURIOS is a show “that cannot be missed for any reason”.",
quantity: 0,
category: "circus"
},
{
id: 39,
name: "JOYÀ - Experience for the senses",
price: 42.99,
hotelPoints: 430,
photo: "https://www.cirquedusoleil.com/-/media/cds/images/shows/joya/joya_facebook_share_1200x630.jpg?db=web&la=en&vs=1&hash=7AF442B3AA5CA5FFD2A1EAC65B4E8233F51EB998",
description: "JOYÀ, the first permanent Cirque du Soleil show in Mexico, combines culinary and performing arts in an intimate theater designed to engage audiences through the five senses. JOYÀ tells of the adventure of a teenager who is suddenly catapulted into the heart of a mysterious jungle that is inside the universe of her grandfather, an extravagant man who loves nature, who constantly searches for the meaning of life.",
quantity: 0,
category: "circus"
},
{
id: 40,
name: "CRYSTAL - Performance on ice",
price: 58.99,
hotelPoints: 650,
photo: "https://i0.wp.com/thehappening.com/wp-content/uploads/2019/07/crystal.jpg?fit=1024%2C694&ssl=1",
description: "Crystal is not just a frozen show, it is Cirque du Soleil's first experience on ice . Watch world-class ice skaters and acrobats claim a new frozen stage, swiftly and fluidly, as they defy the laws of gravity with never-before-seen stunts. A new kind of show where Cirque du Soleil meets ice to defy all expectations.",
quantity: 0,
category: "circus"
}
]; | random_line_split |
|
tool.js | //生产环境
window.myDomain = 'http://ai.aiknowsclauses.com/';
const baseURL = 'https://back.aiknowsclauses.com/';
const appId = 'wx2e03c5129c077bde';
const umengId = 1268531919;
//测试环境
// window.myDomain = 'https://ai.aijkang.com/';
// const baseURL = 'https://aikcback.aijkang.com/';
// const appId = 'wx923490c114577ba3';
// const umengId = 1268512783;
import Vue from 'vue'
import axios from 'axios'
import '@/assets/lib/sha1.js'
let util = {};
// 友盟统计埋点
{
let cnzz_s_tag = document.createElement('script');
cnzz_s_tag.type = 'text/javascript';
cnzz_s_tag.async = true;
cnzz_s_tag.charset = 'utf-8';
cnzz_s_tag.src = 'https://s22.cnzz.com/z_stat.php?id='+umengId+'&web_id='+umengId+'&async=1';
let root_s = document.getElementsByTagName('script')[0];
root_s.parentNode.insertBefore(cnzz_s_tag, root_s);
}
// 以下为工具函数及Vue自定义过滤器
// 马尧 2017-06-14
util.isNumber = function(input) {
return (typeof input === "number") && Number.isFinite(input);
}
util.isString = function(input) {
return (typeof input === "string") && (input.__proto__ === String.prototype);
}
util.isArray = function(input) {
// return !!input && (typeof input === "object") && (input.__proto__ === Array.prototype);
// 以上代码在原型判别时出现问题(Vue把data下的数组做了改造)
return !!input && (typeof input === "object") && (!!input.__proto__.push);
}
util.isObject = function(input) {
return !!input && (typeof input === "object") && (input.__proto__ === Object.prototype);
}
util.isFunction = function(input) {
return !!input && (typeof input === "function") && (input.__proto__ === Function.prototype);
}
// 判断传入的参数是否为空对象
util.isEmptyObject = function(obj) {
if (!util.isObject(obj)) {
return null
}
for (var i in obj) {
return false;
}
return true;
}
// 获取url中的指定参数值
util.getQueryString = function(name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)");
var split = window.location.href.split('?');
var len = split.length;
if (len == 1) {
return null
}
var r = split[len - 1].match(reg);
if (r != null) {
return decodeURIComponent(r[2]).split('#')[0];
}
return null;
}
// 将传入的对象深复制一份(而不是指针引用)
util.copy = function(obj) {
if (!util.isObject(obj) && !util.isArray(obj)) {
return null
}
return JSON.parse(JSON.stringify(obj));
}
// 根据时间戳计算距离现在相差多久{flag,year,month,day,hour,minute,second}
util.getOffsetTime = function(time) {
let output = {
status: null,
year: null,
month: null,
day: null,
hour: null,
minute: null,
second: null
}
let thisTime = +new Date();
let thatTime = +new Date(time);
let offsetTime = thatTime - thisTime;
if (offsetTime < 0) {
output.status = 'past';
} else {
output.status = 'future';
}
offsetTime = Math.abs(offsetTime);
output.second = ~~(offsetTime / 1000) % 60;
output.minute = ~~(offsetTime / 1000 / 60) % 60;
output.hour = ~~(offsetTime / 1000 / 60 / 60) % 24;
output.day = ~~(offsetTime / 1000 / 60 / 60 / 24) % 30;
output.month = ~~(offsetTime / 1000 / 60 / 60 / 24 / 30) % 12;
output.year = ~~(offsetTime / 1000 / 60 / 60 / 24 / 30 / 12);
return output;
}
// 根据剩余时间的时间戳计算还剩多少天/时/分/秒
util.getLeftTime = function(time) {
let output = {
day: 0,
hour: 0,
minute: 0,
second: 0
}
if (!(time > 0)) {
return output;
}
output.second = ~~(time / 1000) % 60;
output.minute = ~~(time / 1000 / 60) % 60;
output.hour = ~~(time / 1000 / 60 / 60) % 24;
output.day = ~~(time / 1000 / 60 / 60 / 24);
return output;
}
/**
* 禁止微信浏览器下拉回弹
*/
util.stopDrag = function() {
window.lastY = 0; //最后一次y坐标点
document.body.ontouchstart = function(event) {
window.lastY = event.originalEvent.changedTouches[0].clientY; //点击屏幕时记录最后一次Y度坐标。
};
document.body.ontouchmove = function(event) {
var el = document.getElementsByClassName('scroll');
if (el.length == 0) {
return;
}
var y = event.originalEvent.changedTouches[0].clientY;
var st = el[0].scrollTop; //滚动条高度
if (y >= window.lastY && st <= 10) { //如果滚动条高度小于0,可以理解为到顶了,且是下拉情况下,阻止touchmove事件。
window.lastY = y;
event.preventDefault();
}
window.lastY = y;
};
}
/**
* 禁止微信浏览器下拉回弹
*/
util.overscroll = function() {
var el = document.querySelector('.scroll');
el.addEventListener('touchstart', function(evt) {
window.lastY = evt.targetTouches[0].clientY; //点击屏幕时记录最后一次Y度坐标。
var top = el.scrollTop;
var totalScroll = el.scrollHeight;
var currentScroll = top + el.offsetHeight;
if (top === 0) {
el.scrollTop = 1;
} else if (currentScroll === totalScroll) {
el.scrollTop = top - 1;
}
});
el.addEventListener('touchmove', function(evt) {
var y = evt.targetTouches[0].clientY;
if (el.offsetHeight < el.scrollHeight) {
evt._isScroller = true;
}
if (el.scrollTop <= 10 && y > window.lastY) {
evt._isScroller = false;
}
});
// 禁止微信浏览器下拉"露底"
document.body.ontouchstart = function(evt) {
evt.preventDefault();
if (!evt._isScroller) {
evt.preventDefault();
}
};
}
// 获取jssdk | on() {
// 获取当前时间戳(以s为单位)
var timestamp = Date.parse(new Date()) / 1000;
var jsapi_ticket = window.jsapi_ticket;
var nonceStr = createNonceStr();
function createNonceStr() {
var chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
var str = "";
for (var i = 0; i < 16; i++) {
var randomNum = parseInt(Math.random() * chars.length, 10);
str += chars.substr(randomNum, 1);
}
return str;
}
// 如获取不到ticket则提前结束函数
if (!jsapi_ticket) {
return false;
}
// 这里参数的顺序要按照 key 值 ASCII 码升序排序
var string = "jsapi_ticket=" + jsapi_ticket + "&noncestr=" + nonceStr + "×tamp=" + timestamp + "&url=" + window.location.href.split('#')[0];
var data = new Uint8Array(encodeUTF8(string));
var result = sha1(data);
var signature = Array.prototype.map.call(result, function(e) {
return (e < 16 ? "0" : "") + e.toString(16);
}).join("");
// return出去
var json = {}
json.timestamp = timestamp;
json.nonceStr = nonceStr;
json.signature = signature;
return json;
}
// 禁止微信分享
util.shareDeny = function() {
//禁止分享配置
wx.config({
debug: false,
appId: 'wx0cad77c43b1d74ce',
timestamp: 123123213,
nonceStr: '123123123',
signature: '123123123',
jsApiList: [
'hideOptionMenu',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'hideOptionMenu',
]
});
wx.hideOptionMenu();
});
window.weixinShare.status = 'deny';
}
// 综合评价分享出去
util.shareEvaluation = function(name) {
var json = util.getJssdkSign();
if (json == false) {
return;
}
// 获取保险id
var insuranceId = util.getQueryString('id');
wx.showOptionMenu();
//配置微信分享
wx.config({
debug: false,
appId: appId,
timestamp: json.timestamp,
nonceStr: json.nonceStr,
signature: json.signature,
jsApiList: [
'checkJsApi',
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
// 2.1 监听“分享给朋友”,按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareAppMessage({
title: name + ',人工智能大揭秘',
desc: name + ',人工智能大揭秘',
link: window.location.origin + '/expert/?route=detail&id=' + insuranceId,
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击发送给朋友');
},
success: function(res) {
//alert('已分享');
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
// 2.2 监听“分享到朋友圈”按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareTimeline({
title: name + ',人工智能大揭秘',
link: window.location.origin + '/expert/?route=detail&id=' + insuranceId,
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击分享到朋友圈');
},
success: function(res) {
//alert('已分享');
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
});
window.weixinShare.status = 'evaluation';
}
// 报告分享出去
util.shareReport = function(name, code) {
var json = util.getJssdkSign();
if (json == false) {
return;
}
wx.showOptionMenu();
//配置微信分享
wx.config({
debug: false,
appId: appId,
timestamp: json.timestamp,
nonceStr: json.nonceStr,
signature: json.signature,
jsApiList: [
'checkJsApi',
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
// 2.1 监听“分享给朋友”,按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareAppMessage({
title: name + ',这个报告很棒哦,分享给你~',
desc: name + ',这个报告很棒哦,分享给你~',
link: window.location.origin + '/expert/?route=report&invitationCode=' + encodeURIComponent(code),
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击发送给朋友');
},
success: function(res) {
//alert('已分享');
util.ajax('/report/generateLink', 'post', {
invitationCode: encodeURIComponent(code)
}).then(function(res) {
if (res.code == '100000') {
// window.ui.showToast('已分享','',true);
}
});
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
// 2.2 监听“分享到朋友圈”按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareTimeline({
title: name + ',这个报告很棒哦,分享给你~',
link: window.location.origin + '/expert/?route=report&invitationCode=' + encodeURIComponent(code),
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击分享到朋友圈');
},
success: function(res) {
//alert('已分享');
util.ajax('/report/generateLink', 'post', {
invitationCode: encodeURIComponent(code)
}).then(function(res) {
if (res.code == '100000') {
// window.ui.showToast('已分享','',true);
}
})
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
});
window.weixinShare.status = 'report';
}
window.weixinShare = {
status: 'init',
shareDeny: util.shareDeny,
shareEvaluation: util.shareEvaluation,
shareReport: util.shareReport
};
util.filter = {};
// 限制数组数量, num:输出数组的成员数量
util.filter.limitBy = function(input, num) {
if (util.isArray(input) == false || util.isNumber(num) == false || num < 0) {
return [];
}
return _.take(input, num);
}
// 对数组成员排序, param:排序依据,是成员的一个属性名; flag:小于0时逆序,其他情况正序排列
util.filter.orderBy = function(input, param, flag = 1) {
if (util.isArray(input) == false || util.isString(param) == false || !input[0].hasOwnProperty(param)) {
return input;
}
flag = flag < 0 ? 'desc' : 'asc';
return _.orderBy(input, param, flag);
}
// 对数组进行"关键词包含检查",只显示包含keyword的条目, keyword:关键词
util.filter.filterBy = function(input, keyword) {
if (util.isArray(input) == false || util.isString(keyword) == false) {
return input;
}
var output = [];
_.each(input, function(item) {
if (util.isString(item) && item.toString().toLowerCase().indexOf(keyword.toLowerCase()) > -1) {
output.push(item);
}
});
return output;
}
// 检查localStorage里是否存有customerId, 如没有,弹层展示二维码; 如有,返回customerId
util.getCustomerId = function() {
let customerId = window.localStorage.getItem('customerId');
if (!customerId) {
window.ui.showQrcode();
return false;
} else {
return customerId;
}
}
// 以下是对axios的封装
//baseURL和appId放在代码第一行位置了
window.isSendingAjax = false;
util.axios = axios;
// axios的封装
// 调用示例
// GET请求: this.$util.ajax('/login','get').then(function(res){...}).catch(function(err){...})
// POST请求: this.$util.ajax('/login','post',this.obj).then(function(res){...}).catch(function(err){...})
/**
* [axios封装通用函数]
* @param {String} url [将要请求的接口路径]
* @param {String} method [请求方式 'post' or 'get']
* @param {Object} data [json对象]
* @param {String} misc [杂项, 传入'loading'的时候页面显示Loading浮层,阻止用户与页面的任何交互,直到请求完成; 传入'protect'用于防止用户重复提交,会判断window.isSendingAjax的状态,决定是否阻止此次请求;]
* @return {Function} [返回axios的实例的.post或.get方法并执行,用户自行定义.then()回调函数]
* GET请求示例: this.$util.ajax('/login','get').then((res)=>{...}).catch((err)=>{...})
* POST请求示例: this.$util.ajax('/login','post',this.obj).then((res)=>{...}).catch((err)=>{...})
*/
util.ajax = function(url, method, data = {}, misc = null) {
let AISessionToken = window.sessionStorage.getItem('AISessionToken');
if (AISessionToken == null) {
AISessionToken = '';
// window.ui && window.ui.showQrcode();
// return; // 无token时阻止此次ajax请求
}
let config = {
baseURL: baseURL,
timeout: 20000,
responseType: "json",
crossDomain: true,
headers: {
// 'Content-Type': 'application/x-www-form-urlencoded',
'Content-Type': 'application/x-www-form-urlencoded'
// 'AISessionToken': AISessionToken
}
}
config.url = url;
if (data) {
config.data = data;
}
let ajax = util.axios.create(config);
if (misc == 'loading') {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
window.ui && window.ui.showLoading();
return config;
}, function(error) {
// 对请求错误做些什么
window.ui && window.ui.showLoading();
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.ui && window.ui.hideLoading();
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.ui && window.ui.hideLoading();
return Promise.reject(error);
});
}
if (misc == 'filter') {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
window.ui && window.ui.showLoading('', true);
return config;
}, function(error) {
// 对请求错误做些什么
window.ui && window.ui.showLoading('', true);
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.ui && window.ui.hideLoading();
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.ui && window.ui.hideLoading();
return Promise.reject(error);
});
}
if (misc == 'protect') {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
if (window.isSendingAjax == false) {
window.isSendingAjax = true;
} else {
config.baseURL = 'http://127.0.0.1'
}
return config;
}, function(error) {
// 对请求错误做些什么
window.isSendingAjax = false;
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.isSendingAjax = false;
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.isSendingAjax = false;
return Promise.reject(error);
});
}
if (misc == null) {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
return config;
}, function(error) {
// 对请求错误做些什么
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
return Promise.reject(error);
});
}
if (window.isSendingAjax === true) {
return false;
}
if (method.toLowerCase() == 'get') { // 如果是GET请求,则直接发送
return ajax.get(url);
} else if (method.toLowerCase() == 'post') { // 如果是POST请求,先计算signature,再发送
data = util.appendSignature(data);
return ajax.post(url, data);
} else {
return false;
}
}
// 将json对象转换成url格式(键名a-z方式顺序排列),计算签名再放入json中
util.appendSignature = function(obj) {
const seed = "420E496DCF9D9CEC4FD231AC3C258820";
if (util.isEmptyObject(obj)) {
return {
"signature": b64_hmac_sha1(seed, '') + '='
}
}
let string = util.a2z(obj).param;
let string2 = util.a2z(obj).encodedParam;
let signature = b64_hmac_sha1(seed, string) + '=';
string = string2 + '&signature=' + encodeURIComponent(signature);
return string;
// obj2.signature=signature;
// return obj2;
}
// json转为ASCII码升序的url字符串
util.a2z = function(obj) {
let arr = [];
// 将obj中的键名依次存入空数组
for (let i in obj) {
arr[arr.length] = i;
}
// 将数组内的元素按字母顺序正序排列
arr.sort();
let arr2=arr.slice(0);
// 将键值对内部用等号连接,在外部用&连接
for (let i = 0; i < arr.length; i++) {
let key=arr[i];
arr[i] = key + '=' + obj[key];
}
for (let j = 0; j < arr2.length; j++) {
let key=arr2[j];
arr2[j] = key + '=' + encodeURIComponent(obj[key]);
}
let output={
param: arr.join('&'),
encodedParam: arr2.join('&')
}
return output;
}
Vue.prototype.$util = util;
// 延迟执行指定方法
// this.debounce(方法名,延迟毫秒数,参数1,参数2...)
Vue.mixin({
data(){
return {
debounceTimer:null
}
},
methods:{
debounce(func,time,...args){
if(!this.$util.isFunction(func) || !(time>=0)){
return;
}
window.clearTimeout(this.debounceTimer);
this.debounceTimer = window.setTimeout(()=>{
func(...args);
},time)
}
}
})
// Vue自定义过滤器,将长于指定位数的字符串截断,末尾显示省略号
Vue.filter('more', function(input, num = 5) {
if (!util.isString(input)) {
return input;
}
if (input.length > num) {
return input.slice(0, num) + '...';
} else {
return input;
}
})
// Vue自定义过滤器,将长于指定位数的字符串截断,中间显示省略号
Vue.filter('more2', function(input, num = 16) {
if (!util.isString(input) || input.length<=num || num<6) {
return input;
}
if (input.length > num) {
return input.slice(0, num-4) + '...' + input.slice(-4);
} else {
return input;
}
})
// Vue自定义过滤器,将文本中的'2个全角空格'或'4个半角空格'过滤掉
Vue.filter('nospace', function(input) {
if (!util.isString(input)) {
return input;
}
let output=input.replace(/ /g,'').replace(/ /g,'');
return output;
})
Vue.prototype.$store = {
}
// 向浏览历史堆栈中追加一个空锚点,用于防止浏览器回退关闭
Vue.prototype.pushHistory = function() {
let state = {
title: 'title',
url: '#'
}
window.history.pushState(state, "title", "#");
util.shareDeny();
}
window.ajaxRetryTimes=20;
// 判断当前是否取到了token,执行or延迟200ms重试执行,最多重试20次
Vue.prototype.doAndRetry = function(func) {
let that = this;
let token = window.sessionStorage.getItem('AISessionToken');
if (token) {
func();
window.ajaxRetryTimes = 0;
return;
}
if (window.ajaxRetryTimes > 0) {
window.ajaxRetryTimes--;
setTimeout(function() {
that.doAndRetry(func);
}, 200);
}
}
| 签名的方法
util.getJssdkSign = functi | conditional_block |
tool.js | //生产环境
window.myDomain = 'http://ai.aiknowsclauses.com/';
const baseURL = 'https://back.aiknowsclauses.com/';
const appId = 'wx2e03c5129c077bde';
const umengId = 1268531919;
//测试环境
// window.myDomain = 'https://ai.aijkang.com/';
// const baseURL = 'https://aikcback.aijkang.com/';
// const appId = 'wx923490c114577ba3';
// const umengId = 1268512783;
import Vue from 'vue'
import axios from 'axios'
import '@/assets/lib/sha1.js'
let util = {};
// 友盟统计埋点
{
let cnzz_s_tag = document.createElement('script');
cnzz_s_tag.type = 'text/javascript';
cnzz_s_tag.async = true;
cnzz_s_tag.charset = 'utf-8';
cnzz_s_tag.src = 'https://s22.cnzz.com/z_stat.php?id='+umengId+'&web_id='+umengId+'&async=1';
let root_s = document.getElementsByTagName('script')[0];
root_s.parentNode.insertBefore(cnzz_s_tag, root_s);
}
// 以下为工具函数及Vue自定义过滤器
// 马尧 2017-06-14
util.isNumber = function(input) {
return (typeof input === "number") && Number.isFinite(input);
}
util.isString = function(input) {
return (typeof input === "string") && (input.__proto__ === String.prototype);
}
util.isArray = function(input) {
// return !!input && (typeof input === "object") && (input.__proto__ === Array.prototype);
// 以上代码在原型判别时出现问题(Vue把data下的数组做了改造)
return !!input && (typeof input === "object") && (!!input.__proto__.push);
}
util.isObject = function(input) {
return !!input && (typeof input === "object") && (input.__proto__ === Object.prototype);
}
util.isFunction = function(input) {
return !!input && (typeof input === "function") && (input.__proto__ === Function.prototype);
}
// 判断传入的参数是否为空对象
util.isEmptyObject = function(obj) {
if (!util.isObject(obj)) {
return null
}
for (var i in obj) {
return false;
}
return true;
}
// 获取url中的指定参数值
util.getQueryString = function(name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)");
var split = window.location.href.split('?');
var len = split.length;
if (len == 1) {
return null
}
var r = split[len - 1].match(reg);
if (r != null) {
return decodeURIComponent(r[2]).split('#')[0];
}
return null;
}
// 将传入的对象深复制一份(而不是指针引用)
util.copy = function(obj) {
if (!util.isObject(obj) && !util.isArray(obj)) {
return null
}
return JSON.parse(JSON.stringify(obj));
}
// 根据时间戳计算距离现在相差多久{flag,year,month,day,hour,minute,second}
util.getOffsetTime = function(time) {
let output = {
status: null,
year: null,
month: null,
day: null,
hour: null,
minute: null,
second: null
}
let thisTime = +new Date();
let thatTime = +new Date(time);
let offsetTime = thatTime - thisTime;
if (offsetTime < 0) {
output.status = 'past';
} else {
output.status = 'future';
}
offsetTime = Math.abs(offsetTime);
output.second = ~~(offsetTime / 1000) % 60;
output.minute = ~~(offsetTime / 1000 / 60) % 60;
output.hour = ~~(offsetTime / 1000 / 60 / 60) % 24;
output.day = ~~(offsetTime / 1000 / 60 / 60 / 24) % 30;
output.month = ~~(offsetTime / 1000 / 60 / 60 / 24 / 30) % 12;
output.year = ~~(offsetTime / 1000 / 60 / 60 / 24 / 30 / 12);
return output;
}
// 根据剩余时间的时间戳计算还剩多少天/时/分/秒
util.getLeftTime = function(time) {
let output = {
day: 0,
hour: 0,
minute: 0,
second: 0
}
if (!(time > 0)) {
return output;
}
output.second = ~~(time / 1000) % 60;
output.minute = ~~(time / 1000 / 60) % 60;
output.hour = ~~(time / 1000 / 60 / 60) % 24;
output.day = ~~(time / 1000 / 60 / 60 / 24);
return output;
}
/**
* 禁止微信浏览器下拉回弹
*/
util.stopDrag = function() {
window.lastY = 0; //最后一次y坐标点
document.body.ontouchstart = function(event) {
window.lastY = event.originalEvent.changedTouches[0].clientY; //点击屏幕时记录最后一次Y度坐标。
};
document.body.ontouchmove = function(event) {
var el = document.getElementsByClassName('scroll');
if (el.length == 0) {
return;
}
var y = event.originalEvent.changedTouches[0].clientY;
var st = el[0].scrollTop; //滚动条高度
if (y >= window.lastY && st <= 10) { //如果滚动条高度小于0,可以理解为到顶了,且是下拉情况下,阻止touchmove事件。
window.lastY = y;
event.preventDefault();
}
window.lastY = y;
};
}
/**
* 禁止微信浏览器下拉回弹
*/
util.overscroll = function() {
var el = document.querySelector('.scroll');
el.addEventListener('touchstart', function(evt) {
window.lastY = evt.targetTouches[0].clientY; //点击屏幕时记录最后一次Y度坐标。
var top = el.scrollTop;
var totalScroll = el.scrollHeight;
var currentScroll = top + el.offsetHeight;
if (top === 0) {
el.scrollTop = 1;
} else if (currentScroll === totalScroll) {
el.scrollTop = top - 1;
}
});
el.addEventListener('touchmove', function(evt) {
var y = evt.targetTouches[0].clientY;
if (el.offsetHeight < el.scrollHeight) {
evt._isScroller = true;
}
if (el.scrollTop <= 10 && y > window.lastY) {
evt._isScroller = false;
}
});
// 禁止微信浏览器下拉"露底"
document.body.ontouchstart = function(evt) {
evt.preventDefault();
if (!evt._isScroller) {
evt.preventDefault();
}
};
}
// 获取jssdk签名的方法
util.getJssdkSign = function() {
// 获取当前时间戳(以s为单位)
var timestamp = Date.parse(new Date()) / 1000;
var jsapi_ticket = window.jsapi_ticket;
var nonceStr = createNonceStr();
function createNonceStr() {
var chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
var str = "";
for (var i = 0; i < 16; i++) {
var randomNum = parseInt(Math.random() * chars.length, 10);
str += chars.substr(randomNum, 1);
}
return str;
}
// 如获取不到ticket则提前结束函数
if (!jsapi_ticket) {
return false;
}
// 这里参数的顺序要按照 key 值 ASCII 码升序排序
var string = "jsapi_ticket=" + jsapi_ticket + "&noncestr=" + nonceStr + "×tamp=" + timestamp + "&url=" + window.location.href.split('#')[0];
var data = new Uint8Array(encodeUTF8(string));
var result = sha1(data);
var signature = Array.prototype.map.call(result, function(e) {
return (e < 16 ? "0" : "") + e.toString(16);
}).join("");
// return出去
var json = {}
json.timestamp = timestamp;
json.nonceStr = nonceStr;
json.signature = signature;
return json;
}
// 禁止微信分享
util.shareDeny = function() {
//禁止分享配置
wx.config({
debug: false,
appId: 'wx0cad77c43b1d74ce',
timestamp: 123123213,
nonceStr: '123123123',
signature: '123123123',
jsApiList: [
'hideOptionMenu',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'hideOptionMenu',
]
});
wx.hideOptionMenu();
});
window.weixinShare.status = 'deny';
}
// 综合评价分享出去
util.shareEvaluation = function(name) {
var json = util.getJssdkSign();
if (json == false) {
return;
}
// 获取保险id
var insuranceId = util.getQueryString('id');
wx.showOptionMenu();
//配置微信分享
wx.config({
debug: false,
appId: appId,
timestamp: json.timestamp,
nonceStr: json.nonceStr,
signature: json.signature,
jsApiList: [
'checkJsApi',
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
// 2.1 监听“分享给朋友”,按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareAppMessage({
title: name + ',人工智能大揭秘',
desc: name + ',人工智能大揭秘',
link: window.location.origin + '/expert/?route=detail&id=' + insuranceId,
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击发送给朋友');
},
success: function(res) {
//alert('已分享');
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
// 2.2 监听“分享到朋友圈”按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareTimeline({
title: name + ',人工智能大揭秘',
link: window.location.origin + '/expert/?route=detail&id=' + insuranceId,
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击分享到朋友圈');
},
success: function(res) {
//alert('已分享');
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
});
window.weixinShare.status = 'evaluation';
}
// 报告分享出去
util.shareReport = function(name, code) {
var json = util.getJssdkSign();
if (json == false) {
return;
}
wx.showOptionMenu();
//配置微信分享
wx.config({
debug: false,
appId: appId,
timestamp: json.timestamp,
nonceStr: json.nonceStr,
signature: json.signature,
jsApiList: [
'checkJsApi',
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
// 2.1 监听“分享给朋友”,按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareAppMessage({
title: name + ',这个报告很棒哦,分享给你~',
desc: name + ',这个报告很棒哦,分享给你~',
link: window.location.origin + '/expert/?route=report&invitationCode=' + encodeURIComponent(code),
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击发送给朋友');
},
success: function(res) {
//alert('已分享');
util.ajax('/report/generateLink', 'post', {
invitationCode: encodeURIComponent(code)
}).then(function(res) {
if (res.code == '100000') {
// window.ui.showToast('已分享','',true);
}
});
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
// 2.2 监听“分享到朋友圈”按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareTimeline({
title: name + ',这个报告很棒哦,分享给你~',
link: window.location.origin + '/expert/?route=report&invitationCode=' + encodeURIComponent(code),
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击分享到朋友圈');
},
success: function(res) {
//alert('已分享');
util.ajax('/report/generateLink', 'post', {
invitationCode: encodeURIComponent(code)
}).then(function(res) {
if (res.code == '100000') {
// window.ui.showToast('已分享','',true);
}
})
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
});
window.weixinShare.status = 'report';
}
window.weixinShare = {
status: 'init',
shareDeny: util.shareDeny,
shareEvaluation: util.shareEvaluation,
shareReport: util.shareReport
};
util.filter = {};
// 限制数组数量, num:输出数组的成员数量
util.filter.limitBy = function(input, num) {
if (util.isArray(input) == false || util.isNumber(num) == false || num < 0) {
return [];
}
return _.take(input, num);
}
// 对数组成员排序, param:排序依据,是成员的一个属性名; flag:小于0时逆序,其他情况正序排列
util.filter.orderBy = function(input, param, flag = 1) {
if (util.isArray(input) == false || util.isString(param) == false || !input[0].hasOwnProperty(param)) {
return input;
}
flag = flag < 0 ? 'desc' : 'asc';
return _.orderBy(input, param, flag);
}
// 对数组进行"关键词包含检查",只显示包含keyword的条目, keyword:关键词
util.filter.filterBy = function(input, keyword) {
if (util.isArray(input) == false || util.isString(keyword) == false) {
return input;
}
var output = [];
_.each(input, function(item) {
if (util.isString(item) && item.toString().toLowerCase().indexOf(keyword.toLowerCase()) > -1) {
output.push(item);
}
});
return output;
}
// 检查localStorage里是否存有customerId, 如没有,弹层展示二维码; 如有,返回customerId
util.getCustomerId = function() {
let customerId = window.localStorage.getItem('customerId');
if (!customerId) {
window.ui.showQrcode();
return false;
} else {
return customerId;
}
}
// 以下是对axios的封装
//baseURL和appId放在代码第一行位置了
window.isSendingAjax = false;
util.axios = axios;
// axios的封装
// 调用示例
// GET请求: this.$util.ajax('/login','get').then(function(res){...}).catch(function(err){...})
// POST请求: this.$util.ajax('/login','post',this.obj).then(function(res){...}).catch(function(err){...})
/**
* [axios封装通用函数]
* @param {String} url [将要请求的接口路径]
* @param {String} method [请求方式 'post' or 'get']
* @param {Object} data [json对象]
* @param {String} misc [杂项, 传入'loading'的时候页面显示Loading浮层,阻止用户与页面的任何交互,直到请求完成; 传入'protect'用于防止用户重复提交,会判断window.isSendingAjax的状态,决定是否阻止此次请求;]
* @return {Function} [返回axios的实例的.post或.get方法并执行,用户自行定义.then()回调函数]
* GET请求示例: this.$util.ajax('/login','get').then((res)=>{...}).catch((err)=>{...})
* POST请求示例: this.$util.ajax('/login','post',this.obj).then((res)=>{...}).catch((err)=>{...})
*/
util.ajax = function(url, method, data = {}, misc = null) {
let AISessionToken = window.sessionStorage.getItem('AISessionToken');
if (AISessionToken == null) {
AISessionToken = '';
// window.ui && window.ui.showQrcode();
// return; // 无token时阻止此次ajax请求
}
let config = {
baseURL: baseURL,
timeout: 20000,
responseType: "json",
crossDomain: true,
headers: {
// 'Content-Type': 'application/x-www-form-urlencoded',
'Content-Type': 'application/x-www-form-urlencoded'
// 'AISessionToken': AISessionToken
}
}
config.url = url;
if (data) {
config.data = data;
}
let ajax = util.axios.create(config);
if (misc == 'loading') {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
window.ui && window.ui.showLoading();
return config;
}, function(error) {
// 对请求错误做些什么
window.ui && window.ui.showLoading();
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.ui && window.ui.hideLoading();
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.ui && window.ui.hideLoading();
return Promise.reject(error);
});
}
if (misc == 'filter') {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
window.ui && window.ui.showLoading('', true);
return config;
}, function(error) {
// 对请求错误做些什么
window.ui && window.ui.showLoading('', true);
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.ui && window.ui.hideLoading();
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.ui && window.ui.hideLoading();
return Promise.reject(error);
});
}
if (misc == 'protect') {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
if (window.isSendingAjax == false) {
window.isSendingAjax = true;
} else {
config.baseURL = 'http://127.0.0.1'
}
return config;
}, function(error) {
// 对请求错误做些什么
window.isSendingAjax = false;
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.isSendingAjax = false;
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.isSendingAjax = false;
return Promise.reject(error);
});
}
if (misc == null) {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
return config;
}, function(error) {
// 对请求错误做些什么
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
return Promise.reject(error);
});
}
if (window.isSendingAjax === true) {
return false;
}
if (method.toLowerCase() == 'get') { // 如果是GET请求,则直接发送
return ajax.get(url);
} else if (method.toLowerCase() == 'post') { // 如果是POST请求,先计算signature,再发送
data = util.appendSignature(data);
return ajax.post(url, data);
} else {
return false;
}
}
// 将json对象转换成url格式(键名a-z方式顺序排列),计算签名再放入json中
util.appendSignature = function(obj) {
const seed = "420E496DCF9D9CEC4FD231AC3C258820";
if (util.isEmptyObject(obj)) {
return {
"signature": b64_hmac_sha1(seed, '') + '='
}
}
let string = util.a2z(obj).param;
let string2 = util.a2z(obj).encodedParam;
let signature = b64_hmac_sha1(seed, string) + '=';
string = string2 + '&signature=' + encodeURIComponent(signature);
return string;
// obj2.signature=signature;
// return obj2;
}
// json转为ASCII码升序的url字符串
util.a2z = function(obj) {
let arr = [];
// 将obj中的键名依次存入空数组
for (let i in obj) {
arr[arr.length] = i;
}
// 将数组内的元素按字母顺序正序排列
arr.sort();
let arr2=arr.slice(0);
// 将键值对内部用等号连接,在外部用&连接
for (let i = 0; i < arr.length; i++) {
let key=arr[i];
arr[i] = key + '=' + obj[key];
}
for (let j = 0; j < arr2.length; j++) {
let key=arr2[j];
arr2[j] = key + '=' + encodeURIComponent(obj[key]);
}
let output={
param: arr.join('&'),
encodedParam: arr2.join('&')
}
return output;
}
Vue.prototype.$util = util;
// 延迟执行指定方法
// this.debounce(方法名,延迟毫秒数,参数1,参数2...)
Vue.mixin({
data(){
return {
debounceTimer:null
}
},
methods:{
debounce(func,time,...args){
if(!this.$util.isFunction(func) || !(time>=0)){
return;
}
window.clearTimeout(this.debounceTimer);
this.debounceTimer = window.setTimeout(()=>{
func(...args);
},time)
}
}
})
// Vue自定义过滤器,将长于指定位数的字符串截断,末尾显示省略号
Vue.filter('more', function(input, num = 5) {
if (!util.isString(input)) {
return input;
}
if (input.length > num) {
return input.slice(0, num) + '...';
} else {
return input;
}
})
// Vue自定义过滤器,将长于指定位数的字符串截断,中间显示省略号
Vue.filter('more2', function(input, num = 16) {
if (!util.isString(input) || input.length<=num || num<6) {
return input;
}
if (input.length > num) {
return input.slice(0, num-4) + '...' + input.slice(-4);
} else {
return input;
}
})
// Vue自定义过滤器,将文本中的'2个全角空格'或'4个半角空格'过滤掉
Vue.filter('nospace', function(input) {
if (!util.isString(input)) {
return input;
}
let output=input.replace(/ /g,'').replace(/ /g,'');
return output;
})
Vue.prototype.$store = {
}
// 向浏览历史堆栈中追加一个空锚点,用于防止浏览器回退关闭
Vue.prototype.pushHistory = function() {
let state = {
title: 'title',
url: '#'
}
window.history.pushState(state, "title", "#");
util.shareDeny();
}
window.ajaxRetryTimes=20;
// 判断当前是否取到了token,执行or延迟200ms重试执行,最多重试20次
Vue.prototype.doAndRetry = function(func) {
let that = this;
let token = window.sessionStorage.getItem('AISessionToken');
if (token) {
func();
window.ajaxRetryTimes = 0;
return;
}
if (window.ajaxRetryTimes > 0) {
window.ajaxRetryTimes--;
setTimeout(function() {
that.doAndRetry(func);
}, 200);
}
}
| identifier_name |
||
tool.js | //生产环境
window.myDomain = 'http://ai.aiknowsclauses.com/';
const baseURL = 'https://back.aiknowsclauses.com/';
const appId = 'wx2e03c5129c077bde';
const umengId = 1268531919;
//测试环境
// window.myDomain = 'https://ai.aijkang.com/';
// const baseURL = 'https://aikcback.aijkang.com/';
// const appId = 'wx923490c114577ba3';
// const umengId = 1268512783;
import Vue from 'vue'
import axios from 'axios'
import '@/assets/lib/sha1.js'
let util = {};
// 友盟统计埋点
{
let cnzz_s_tag = document.createElement('script');
cnzz_s_tag.type = 'text/javascript';
cnzz_s_tag.async = true;
cnzz_s_tag.charset = 'utf-8';
cnzz_s_tag.src = 'https://s22.cnzz.com/z_stat.php?id='+umengId+'&web_id='+umengId+'&async=1';
let root_s = document.getElementsByTagName('script')[0];
root_s.parentNode.insertBefore(cnzz_s_tag, root_s);
}
// 以下为工具函数及Vue自定义过滤器
// 马尧 2017-06-14
util.isNumber = function(input) {
return (typeof input === "number") && Number.isFinite(input);
}
util.isString = function(input) {
return (typeof input === "string") && (input.__proto__ === String.prototype);
}
util.isArray = function(input) {
// return !!input && (typeof input === "object") && (input.__proto__ === Array.prototype);
// 以上代码在原型判别时出现问题(Vue把data下的数组做了改造)
return !!input && (typeof input === "object") && (!!input.__proto__.push);
}
util.isObject = function(input) {
return !!input && (typeof input === "object") && (input.__proto__ === Object.prototype);
}
util.isFunction = function(input) {
return !!input && (typeof input === "function") && (input.__proto__ === Function.prototype);
}
// 判断传入的参数是否为空对象
util.isEmptyObject = function(obj) {
if (!util.isObject(obj)) {
return null
}
for (var i in obj) {
return false;
}
return true;
}
// 获取url中的指定参数值
util.getQueryString = function(name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)");
var split = window.location.href.split('?');
var len = split.length;
if (len == 1) {
return null
}
var r = split[len - 1].match(reg);
if (r != null) {
return decodeURIComponent(r[2]).split('#')[0];
}
return null;
}
// 将传入的对象深复制一份(而不是指针引用)
util.copy = function(obj) {
if (!util.isObject(obj) && !util.isArray(obj)) {
return null
}
return JSON.parse(JSON.stringify(obj));
}
// 根据时间戳计算距离现在相差多久{flag,year,month,day,hour,minute,second}
util.getOffsetTime = function(time) {
let output = {
status: null,
year: null,
month: null,
day: null,
hour: null,
minute: null,
second: null
}
let thisTime = +new Date();
let thatTime = +new Date(time);
let offsetTime = thatTime - thisTime;
if (offsetTime < 0) {
output.status = 'past';
} else {
output.status = 'future';
}
offsetTime = Math.abs(offsetTime);
output.second = ~~(offsetTime / 1000) % 60;
output.minute = ~~(offsetTime / 1000 / 60) % 60;
output.hour = ~~(offsetTime / 1000 / 60 / 60) % 24;
output.day = ~~(offsetTime / 1000 / 60 / 60 / 24) % 30;
output.month = ~~(offsetTime / 1000 / 60 / 60 / 24 / 30) % 12;
output.year = ~~(offsetTime / 1000 / 60 / 60 / 24 / 30 / 12);
return output;
}
// 根据剩余时间的时间戳计算还剩多少天/时/分/秒
util.getLeftTime = function(time) {
let output = {
day: 0,
hour: 0,
minute: 0,
second: 0
}
if (!(time > 0)) {
return output;
}
output.second = ~~(time / 1000) % 60;
output.minute = ~~(time / 1000 / 60) % 60;
output.hour = ~~(time / 1000 / 60 / 60) % 24;
output.day = ~~(time / 1000 / 60 / 60 / 24);
return output;
}
/**
* 禁止微信浏览器下拉回弹
*/
util.stopDrag = function() {
window.lastY = 0; //最后一次y坐标点
document.body.ontouchstart = function(event) {
window.lastY = event.originalEvent.changedTouches[0].clientY; //点击屏幕时记录最后一次Y度坐标。
};
document.body.ontouchmove = function(event) {
var el = document.getElementsByClassName('scroll');
if (el.length == 0) {
return;
}
var y = event.originalEvent.changedTouches[0].clientY;
var st = el[0].scrollTop; //滚动条高度
if (y >= window.lastY && st <= 10) { //如果滚动条高度小于0,可以理解为到顶了,且是下拉情况下,阻止touchmove事件。
window.lastY = y;
event.preventDefault();
}
window.lastY = y;
};
}
/**
* 禁止微信浏览器下拉回弹
*/
util.overscroll = function() {
var el = document.querySelector('.scroll');
el.addEventListener('touchstart', function(evt) {
window.lastY = evt.targetTouches[0].clientY; //点击屏幕时记录最后一次Y度坐标。
var top = el.scrollTop;
var totalScroll = el.scrollHeight;
var currentScroll = top + el.offsetHeight;
if (top === 0) {
el.scrollTop = 1;
} else if (currentScroll === totalScroll) {
el.scrollTop = top - 1;
}
});
el.addEventListener('touchmove', function(evt) {
var y = evt.targetTouches[0].clientY;
if (el.offsetHeight < el.scrollHeight) {
evt._isScroller = true;
}
if (el.scrollTop <= 10 && y > window.lastY) {
evt._isScroller = false;
}
});
// 禁止微信浏览器下拉"露底"
document.body.ontouchstart = function(evt) {
evt.preventDefault();
if (!evt._isScroller) {
evt.preventDefault();
}
};
}
// 获取jssdk签名的方法
util.getJssdkSign = function() {
// 获取当前时间戳(以s为单位)
var timestamp = Date.parse(new Date()) / 1000;
var jsapi_ticket = window.jsapi_ticket;
var nonceStr = createNonceStr();
function createNonceStr() {
var chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
var str = "";
for (var i = 0; i < 16; i++) {
var randomNum = parseInt(Math.random() * chars.length, 10);
str += chars.substr(randomNum, 1);
}
return str;
}
// 如获取不到ticket则提前结束函数
if (!jsapi_ticket) {
return false;
}
// 这里参数的顺序要按照 key 值 ASCII 码升序排序
var string = "jsapi_ticket=" + jsapi_ticket + "&noncestr=" + nonceStr + "×tamp=" + timestamp + "&url=" + window.location.href.split('#')[0];
var dat | on.nonceStr = nonceStr;
json.signature = signature;
return json;
}
// 禁止微信分享
util.shareDeny = function() {
//禁止分享配置
wx.config({
debug: false,
appId: 'wx0cad77c43b1d74ce',
timestamp: 123123213,
nonceStr: '123123123',
signature: '123123123',
jsApiList: [
'hideOptionMenu',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'hideOptionMenu',
]
});
wx.hideOptionMenu();
});
window.weixinShare.status = 'deny';
}
// 综合评价分享出去
util.shareEvaluation = function(name) {
var json = util.getJssdkSign();
if (json == false) {
return;
}
// 获取保险id
var insuranceId = util.getQueryString('id');
wx.showOptionMenu();
//配置微信分享
wx.config({
debug: false,
appId: appId,
timestamp: json.timestamp,
nonceStr: json.nonceStr,
signature: json.signature,
jsApiList: [
'checkJsApi',
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
// 2.1 监听“分享给朋友”,按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareAppMessage({
title: name + ',人工智能大揭秘',
desc: name + ',人工智能大揭秘',
link: window.location.origin + '/expert/?route=detail&id=' + insuranceId,
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击发送给朋友');
},
success: function(res) {
//alert('已分享');
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
// 2.2 监听“分享到朋友圈”按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareTimeline({
title: name + ',人工智能大揭秘',
link: window.location.origin + '/expert/?route=detail&id=' + insuranceId,
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击分享到朋友圈');
},
success: function(res) {
//alert('已分享');
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
});
window.weixinShare.status = 'evaluation';
}
// 报告分享出去
util.shareReport = function(name, code) {
var json = util.getJssdkSign();
if (json == false) {
return;
}
wx.showOptionMenu();
//配置微信分享
wx.config({
debug: false,
appId: appId,
timestamp: json.timestamp,
nonceStr: json.nonceStr,
signature: json.signature,
jsApiList: [
'checkJsApi',
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
// 2.1 监听“分享给朋友”,按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareAppMessage({
title: name + ',这个报告很棒哦,分享给你~',
desc: name + ',这个报告很棒哦,分享给你~',
link: window.location.origin + '/expert/?route=report&invitationCode=' + encodeURIComponent(code),
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击发送给朋友');
},
success: function(res) {
//alert('已分享');
util.ajax('/report/generateLink', 'post', {
invitationCode: encodeURIComponent(code)
}).then(function(res) {
if (res.code == '100000') {
// window.ui.showToast('已分享','',true);
}
});
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
// 2.2 监听“分享到朋友圈”按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareTimeline({
title: name + ',这个报告很棒哦,分享给你~',
link: window.location.origin + '/expert/?route=report&invitationCode=' + encodeURIComponent(code),
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击分享到朋友圈');
},
success: function(res) {
//alert('已分享');
util.ajax('/report/generateLink', 'post', {
invitationCode: encodeURIComponent(code)
}).then(function(res) {
if (res.code == '100000') {
// window.ui.showToast('已分享','',true);
}
})
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
});
window.weixinShare.status = 'report';
}
window.weixinShare = {
status: 'init',
shareDeny: util.shareDeny,
shareEvaluation: util.shareEvaluation,
shareReport: util.shareReport
};
util.filter = {};
// 限制数组数量, num:输出数组的成员数量
util.filter.limitBy = function(input, num) {
if (util.isArray(input) == false || util.isNumber(num) == false || num < 0) {
return [];
}
return _.take(input, num);
}
// 对数组成员排序, param:排序依据,是成员的一个属性名; flag:小于0时逆序,其他情况正序排列
util.filter.orderBy = function(input, param, flag = 1) {
if (util.isArray(input) == false || util.isString(param) == false || !input[0].hasOwnProperty(param)) {
return input;
}
flag = flag < 0 ? 'desc' : 'asc';
return _.orderBy(input, param, flag);
}
// 对数组进行"关键词包含检查",只显示包含keyword的条目, keyword:关键词
util.filter.filterBy = function(input, keyword) {
if (util.isArray(input) == false || util.isString(keyword) == false) {
return input;
}
var output = [];
_.each(input, function(item) {
if (util.isString(item) && item.toString().toLowerCase().indexOf(keyword.toLowerCase()) > -1) {
output.push(item);
}
});
return output;
}
// 检查localStorage里是否存有customerId, 如没有,弹层展示二维码; 如有,返回customerId
util.getCustomerId = function() {
let customerId = window.localStorage.getItem('customerId');
if (!customerId) {
window.ui.showQrcode();
return false;
} else {
return customerId;
}
}
// 以下是对axios的封装
//baseURL和appId放在代码第一行位置了
window.isSendingAjax = false;
util.axios = axios;
// axios的封装
// 调用示例
// GET请求: this.$util.ajax('/login','get').then(function(res){...}).catch(function(err){...})
// POST请求: this.$util.ajax('/login','post',this.obj).then(function(res){...}).catch(function(err){...})
/**
* [axios封装通用函数]
* @param {String} url [将要请求的接口路径]
* @param {String} method [请求方式 'post' or 'get']
* @param {Object} data [json对象]
* @param {String} misc [杂项, 传入'loading'的时候页面显示Loading浮层,阻止用户与页面的任何交互,直到请求完成; 传入'protect'用于防止用户重复提交,会判断window.isSendingAjax的状态,决定是否阻止此次请求;]
* @return {Function} [返回axios的实例的.post或.get方法并执行,用户自行定义.then()回调函数]
* GET请求示例: this.$util.ajax('/login','get').then((res)=>{...}).catch((err)=>{...})
* POST请求示例: this.$util.ajax('/login','post',this.obj).then((res)=>{...}).catch((err)=>{...})
*/
util.ajax = function(url, method, data = {}, misc = null) {
let AISessionToken = window.sessionStorage.getItem('AISessionToken');
if (AISessionToken == null) {
AISessionToken = '';
// window.ui && window.ui.showQrcode();
// return; // 无token时阻止此次ajax请求
}
let config = {
baseURL: baseURL,
timeout: 20000,
responseType: "json",
crossDomain: true,
headers: {
// 'Content-Type': 'application/x-www-form-urlencoded',
'Content-Type': 'application/x-www-form-urlencoded'
// 'AISessionToken': AISessionToken
}
}
config.url = url;
if (data) {
config.data = data;
}
let ajax = util.axios.create(config);
if (misc == 'loading') {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
window.ui && window.ui.showLoading();
return config;
}, function(error) {
// 对请求错误做些什么
window.ui && window.ui.showLoading();
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.ui && window.ui.hideLoading();
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.ui && window.ui.hideLoading();
return Promise.reject(error);
});
}
if (misc == 'filter') {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
window.ui && window.ui.showLoading('', true);
return config;
}, function(error) {
// 对请求错误做些什么
window.ui && window.ui.showLoading('', true);
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.ui && window.ui.hideLoading();
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.ui && window.ui.hideLoading();
return Promise.reject(error);
});
}
if (misc == 'protect') {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
if (window.isSendingAjax == false) {
window.isSendingAjax = true;
} else {
config.baseURL = 'http://127.0.0.1'
}
return config;
}, function(error) {
// 对请求错误做些什么
window.isSendingAjax = false;
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.isSendingAjax = false;
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.isSendingAjax = false;
return Promise.reject(error);
});
}
if (misc == null) {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
return config;
}, function(error) {
// 对请求错误做些什么
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
return Promise.reject(error);
});
}
if (window.isSendingAjax === true) {
return false;
}
if (method.toLowerCase() == 'get') { // 如果是GET请求,则直接发送
return ajax.get(url);
} else if (method.toLowerCase() == 'post') { // 如果是POST请求,先计算signature,再发送
data = util.appendSignature(data);
return ajax.post(url, data);
} else {
return false;
}
}
// 将json对象转换成url格式(键名a-z方式顺序排列),计算签名再放入json中
util.appendSignature = function(obj) {
const seed = "420E496DCF9D9CEC4FD231AC3C258820";
if (util.isEmptyObject(obj)) {
return {
"signature": b64_hmac_sha1(seed, '') + '='
}
}
let string = util.a2z(obj).param;
let string2 = util.a2z(obj).encodedParam;
let signature = b64_hmac_sha1(seed, string) + '=';
string = string2 + '&signature=' + encodeURIComponent(signature);
return string;
// obj2.signature=signature;
// return obj2;
}
// json转为ASCII码升序的url字符串
util.a2z = function(obj) {
let arr = [];
// 将obj中的键名依次存入空数组
for (let i in obj) {
arr[arr.length] = i;
}
// 将数组内的元素按字母顺序正序排列
arr.sort();
let arr2=arr.slice(0);
// 将键值对内部用等号连接,在外部用&连接
for (let i = 0; i < arr.length; i++) {
let key=arr[i];
arr[i] = key + '=' + obj[key];
}
for (let j = 0; j < arr2.length; j++) {
let key=arr2[j];
arr2[j] = key + '=' + encodeURIComponent(obj[key]);
}
let output={
param: arr.join('&'),
encodedParam: arr2.join('&')
}
return output;
}
Vue.prototype.$util = util;
// 延迟执行指定方法
// this.debounce(方法名,延迟毫秒数,参数1,参数2...)
Vue.mixin({
data(){
return {
debounceTimer:null
}
},
methods:{
debounce(func,time,...args){
if(!this.$util.isFunction(func) || !(time>=0)){
return;
}
window.clearTimeout(this.debounceTimer);
this.debounceTimer = window.setTimeout(()=>{
func(...args);
},time)
}
}
})
// Vue自定义过滤器,将长于指定位数的字符串截断,末尾显示省略号
Vue.filter('more', function(input, num = 5) {
if (!util.isString(input)) {
return input;
}
if (input.length > num) {
return input.slice(0, num) + '...';
} else {
return input;
}
})
// Vue自定义过滤器,将长于指定位数的字符串截断,中间显示省略号
Vue.filter('more2', function(input, num = 16) {
if (!util.isString(input) || input.length<=num || num<6) {
return input;
}
if (input.length > num) {
return input.slice(0, num-4) + '...' + input.slice(-4);
} else {
return input;
}
})
// Vue自定义过滤器,将文本中的'2个全角空格'或'4个半角空格'过滤掉
Vue.filter('nospace', function(input) {
if (!util.isString(input)) {
return input;
}
let output=input.replace(/ /g,'').replace(/ /g,'');
return output;
})
Vue.prototype.$store = {
}
// 向浏览历史堆栈中追加一个空锚点,用于防止浏览器回退关闭
Vue.prototype.pushHistory = function() {
let state = {
title: 'title',
url: '#'
}
window.history.pushState(state, "title", "#");
util.shareDeny();
}
window.ajaxRetryTimes=20;
// 判断当前是否取到了token,执行or延迟200ms重试执行,最多重试20次
Vue.prototype.doAndRetry = function(func) {
let that = this;
let token = window.sessionStorage.getItem('AISessionToken');
if (token) {
func();
window.ajaxRetryTimes = 0;
return;
}
if (window.ajaxRetryTimes > 0) {
window.ajaxRetryTimes--;
setTimeout(function() {
that.doAndRetry(func);
}, 200);
}
}
| a = new Uint8Array(encodeUTF8(string));
var result = sha1(data);
var signature = Array.prototype.map.call(result, function(e) {
return (e < 16 ? "0" : "") + e.toString(16);
}).join("");
// return出去
var json = {}
json.timestamp = timestamp;
js | identifier_body |
tool.js | //生产环境
window.myDomain = 'http://ai.aiknowsclauses.com/';
const baseURL = 'https://back.aiknowsclauses.com/';
const appId = 'wx2e03c5129c077bde';
const umengId = 1268531919;
//测试环境
// window.myDomain = 'https://ai.aijkang.com/';
// const baseURL = 'https://aikcback.aijkang.com/';
// const appId = 'wx923490c114577ba3';
// const umengId = 1268512783;
import Vue from 'vue'
import axios from 'axios'
import '@/assets/lib/sha1.js'
let util = {};
// 友盟统计埋点
{
let cnzz_s_tag = document.createElement('script');
cnzz_s_tag.type = 'text/javascript';
cnzz_s_tag.async = true;
cnzz_s_tag.charset = 'utf-8';
cnzz_s_tag.src = 'https://s22.cnzz.com/z_stat.php?id='+umengId+'&web_id='+umengId+'&async=1';
let root_s = document.getElementsByTagName('script')[0];
root_s.parentNode.insertBefore(cnzz_s_tag, root_s);
}
// 以下为工具函数及Vue自定义过滤器
// 马尧 2017-06-14
util.isNumber = function(input) {
return (typeof input === "number") && Number.isFinite(input);
}
util.isString = function(input) {
return (typeof input === "string") && (input.__proto__ === String.prototype);
}
util.isArray = function(input) {
// return !!input && (typeof input === "object") && (input.__proto__ === Array.prototype);
// 以上代码在原型判别时出现问题(Vue把data下的数组做了改造)
return !!input && (typeof input === "object") && (!!input.__proto__.push);
}
util.isObject = function(input) {
return !!input && (typeof input === "object") && (input.__proto__ === Object.prototype);
}
util.isFunction = function(input) {
return !!input && (typeof input === "function") && (input.__proto__ === Function.prototype);
}
// 判断传入的参数是否为空对象
util.isEmptyObject = function(obj) {
if (!util.isObject(obj)) {
return null
}
for (var i in obj) {
return false;
}
return true;
}
// 获取url中的指定参数值
util.getQueryString = function(name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)");
var split = window.location.href.split('?');
var len = split.length;
if (len == 1) {
return null
}
var r = split[len - 1].match(reg);
if (r != null) {
return decodeURIComponent(r[2]).split('#')[0];
}
return null;
}
// 将传入的对象深复制一份(而不是指针引用)
util.copy = function(obj) {
if (!util.isObject(obj) && !util.isArray(obj)) {
return null
}
return JSON.parse(JSON.stringify(obj));
}
// 根据时间戳计算距离现在相差多久{flag,year,month,day,hour,minute,second}
util.getOffsetTime = function(time) {
let output = {
status: null,
year: null,
month: null,
day: null,
hour: null,
minute: null,
second: null
}
let thisTime = +new Date();
let thatTime = +new Date(time);
let offsetTime = thatTime - thisTime;
if (offsetTime < 0) {
output.status = 'past';
} else {
output.status = 'future';
}
offsetTime = Math.abs(offsetTime);
output.second = ~~(offsetTime / 1000) % 60;
output.minute = ~~(offsetTime / 1000 / 60) % 60;
output.hour = ~~(offsetTime / 1000 / 60 / 60) % 24;
output.day = ~~(offsetTime / 1000 / 60 / 60 / 24) % 30;
output.month = ~~(offsetTime / 1000 / 60 / 60 / 24 / 30) % 12;
output.year = ~~(offsetTime / 1000 / 60 / 60 / 24 / 30 / 12);
return output;
}
// 根据剩余时间的时间戳计算还剩多少天/时/分/秒
util.getLeftTime = function(time) {
let output = {
day: 0,
hour: 0,
minute: 0,
second: 0
}
if (!(time > 0)) {
return output;
}
output.second = ~~(time / 1000) % 60;
output.minute = ~~(time / 1000 / 60) % 60;
output.hour = ~~(time / 1000 / 60 / 60) % 24;
output.day = ~~(time / 1000 / 60 / 60 / 24);
return output;
}
/**
* 禁止微信浏览器下拉回弹
*/
util.stopDrag = function() {
window.lastY = 0; //最后一次y坐标点
document.body.ontouchstart = function(event) {
window.lastY = event.originalEvent.changedTouches[0].clientY; //点击屏幕时记录最后一次Y度坐标。
};
document.body.ontouchmove = function(event) {
var el = document.getElementsByClassName('scroll');
if (el.length == 0) {
return;
}
var y = event.originalEvent.changedTouches[0].clientY;
var st = el[0].scrollTop; //滚动条高度
if (y >= window.lastY && st <= 10) { //如果滚动条高度小于0,可以理解为到顶了,且是下拉情况下,阻止touchmove事件。
window.lastY = y;
event.preventDefault();
}
window.lastY = y;
};
}
/**
* 禁止微信浏览器下拉回弹
*/
util.overscroll = function() {
var el = document.querySelector('.scroll');
el.addEventListener('touchstart', function(evt) {
window.lastY = evt.targetTouches[0].clientY; //点击屏幕时记录最后一次Y度坐标。
var top = el.scrollTop;
var totalScroll = el.scrollHeight;
var currentScroll = top + el.offsetHeight;
if (top === 0) {
el.scrollTop = 1;
} else if (currentScroll === totalScroll) {
el.scrollTop = top - 1;
}
});
el.addEventListener('touchmove', function(evt) {
var y = evt.targetTouches[0].clientY;
if (el.offsetHeight < el.scrollHeight) {
evt._isScroller = true;
}
if (el.scrollTop <= 10 && y > window.lastY) {
evt._isScroller = false;
}
});
// 禁止微信浏览器下拉"露底"
document.body.ontouchstart = function(evt) {
evt.preventDefault();
if (!evt._isScroller) {
evt.preventDefault();
}
};
}
// 获取jssdk签名的方法
util.getJssdkSign = function() {
// 获取当前时间戳(以s为单位)
var timestamp = Date.parse(new Date()) / 1000;
var jsapi_ticket = window.jsapi_ticket;
var nonceStr = createNonceStr();
function createNonceStr() {
var chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
var str = "";
for (var i = 0; i < 16; i++) {
var randomNum = parseInt(Math.random() * chars.length, 10);
str += chars.substr(randomNum, 1);
}
return str;
}
// 如获取不到ticket则提前结束函数
if (!jsapi_ticket) {
return false;
}
// 这里参数的顺序要按照 key 值 ASCII 码升序排序
var string = "jsapi_ticket=" + jsapi_ticket + "&noncestr=" + nonceStr + "×tamp=" + timestamp + "&url=" + window.location.href.split('#')[0];
var data = new Uint8Array(encodeUTF8(string));
var result = sha1(data);
var signature = Array.prototype.map.call(result, function(e) {
return (e < 16 ? "0" : "") + e.toString(16);
}).join("");
// return出去
var json = {}
json.timestamp = timestamp;
json.nonceStr = nonceStr;
json.signature = signature;
return json;
}
// 禁止微信分享
util.shareDeny = function() {
//禁止分享配置
wx.config({
debug: false,
appId: 'wx0cad77c43b1d74ce',
timestamp: 123123213,
nonceStr: '123123123',
signature: '123123123',
jsApiList: [
'hideOptionMenu',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'hideOptionMenu',
]
});
wx.hideOptionMenu();
});
window.weixinShare.status = 'deny';
}
// 综合评价分享出去
util.shareEvaluation = function(name) {
var json = util.getJssdkSign();
if (json == false) {
return;
}
// 获取保险id
var insuranceId = util.getQueryString('id');
wx.showOptionMenu();
//配置微信分享
wx.config({
debug: false,
appId: appId,
timestamp: json.timestamp,
nonceStr: json.nonceStr,
signature: json.signature,
jsApiList: [
'checkJsApi',
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
// 2.1 监听“分享给朋友”,按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareAppMessage({
title: name + ',人工智能大揭秘',
desc: name + ',人工智能大揭秘',
link: window.location.origin + '/expert/?route=detail&id=' + insuranceId,
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击发送给朋友');
},
success: function(res) {
//alert('已分享');
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
// 2.2 监听“分享到朋友圈”按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareTimeline({
title: name + ',人工智能大揭秘',
link: window.location.origin + '/expert/?route=detail&id=' + insuranceId,
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击分享到朋友圈');
},
success: function(res) {
//alert('已分享');
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
});
window.weixinShare.status = 'evaluation';
}
// 报告分享出去
util.shareReport = function(name, code) {
var json = util.getJssdkSign();
if (json == false) {
return;
}
wx.showOptionMenu();
//配置微信分享
wx.config({
debug: false,
appId: appId,
timestamp: json.timestamp,
nonceStr: json.nonceStr,
signature: json.signature,
jsApiList: [
'checkJsApi',
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
// 2.1 监听“分享给朋友”,按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareAppMessage({
title: name + ',这个报告很棒哦,分享给你~',
desc: name + ',这个报告很棒哦,分享给你~',
link: window.location.origin + '/expert/?route=report&invitationCode=' + encodeURIComponent(code),
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击发送给朋友');
},
success: function(res) {
//alert('已分享');
util.ajax('/report/generateLink', 'post', {
invitationCode: encodeURIComponent(code)
}).then(function(res) {
if (res.code == '100000') {
// window.ui.showToast('已分享','',true);
}
});
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
// 2.2 监听“分享到朋友圈”按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareTimeline({
title: name + ',这个报告很棒哦,分享给你~',
link: window.location.origin + '/expert/?route=report&invitationCode=' + encodeURIComponent(code),
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击分享到朋友圈');
},
success: function(res) {
//alert('已分享');
util.ajax('/report/generateLink', 'post', {
invitationCode: encodeURIComponent(code)
}).then(function(res) {
if (res.code == '100000') {
// window.ui.showToast('已分享','',true);
}
})
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
});
window.weixinShare.status = 'report';
}
window.weixinShare = {
status: 'init',
shareDeny: util.shareDeny,
shareEvaluation: util.shareEvaluation,
shareReport: util.shareReport
};
util.filter = {};
// 限制数组数量, num:输出数组的成员数量
util.filter.limitBy = function(input, num) {
if (util.isArray(input) == false || util.isNumber(num) == false || num < 0) {
return [];
}
return _.take(input, num);
}
// 对数组成员排序, param:排序依据,是成员的一个属性名; flag:小于0时逆序,其他情况正序排列
util.filter.orderBy = function(input, param, flag = 1) {
if (util.isArray(input) == false || util.isString(param) == false || !input[0].hasOwnProperty(param)) {
return input;
}
flag = flag < 0 ? 'desc' : 'asc';
return _.orderBy(input, param, flag);
}
// 对数组进行"关键词包含检查",只显示包含keyword的条目, keyword:关键词
util.filter.filterBy = function(input, keyword) {
if (util.isArray(input) == false || util.isString(keyword) == false) {
return input;
}
var output = [];
_.each(input, function(item) {
if (util.isString(item) && item.toString().toLowerCase().indexOf(keyword.toLowerCase()) > -1) {
output.push(item);
}
});
return output;
}
// 检查localStorage里是否存有customerId, 如没有,弹层展示二维码; 如有,返回customerId
util.getCustomerId = function() {
let customerId = window.localStorage.getItem('customerId');
if (!customerId) {
window.ui.showQrcode();
return false;
} else {
return customerId;
}
}
// 以下是对axios的封装
//baseURL和appId放在代码第一行位置了
window.isSendingAjax = false;
util.axios = axios;
// axios的封装
// 调用示例
// GET请求: this.$util.ajax('/login','get').then(function(res){...}).catch(function(err){...})
// POST请求: this.$util.ajax('/login','post',this.obj).then(function(res){...}).catch(function(err){...})
/**
* [axios封装通用函数]
* @param {String} url [将要请求的接口路径]
* @param {String} method [请求方式 'post' or 'get']
* @param {Object} data [json对象]
* @param {String} misc [杂项, 传入'loading'的时候页面显示Loading浮层,阻止用户与页面的任何交互,直到请求完成; 传入'protect'用于防止用户重复提交,会判断window.isSendingAjax的状态,决定是否阻止此次请求;]
* @return {Function} [返回axios的实例的.post或.get方法并执行,用户自行定义.then()回调函数]
* GET请求示例: this.$util.ajax('/login','get').then((res)=>{...}).catch((err)=>{...})
* POST请求示例: this.$util.ajax('/login','post',this.obj).then((res)=>{...}).catch((err)=>{...})
*/
util.ajax = function(url, method, data = {}, misc = null) {
let AISessionToken = window.sessionStorage.getItem('AISessionToken');
if (AISessionToken == null) {
AISessionToken = '';
// window.ui && window.ui.showQrcode();
// return; // 无token时阻止此次ajax请求
}
let config = {
baseURL: baseURL,
timeout: 20000,
responseType: "json",
crossDomain: true,
headers: {
// 'Content-Type': 'application/x-www-form-urlencoded',
'Content-Type': 'application/x-www-form-urlencoded'
// 'AISessionToken': AISessionToken
}
}
config.url = url;
if (data) {
config.data = data;
}
let ajax = util.axios.create(config);
if (misc == 'loading') {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
window.ui && window.ui.showLoading();
return config;
}, function(error) {
// 对请求错误做些什么
window.ui && window.ui.showLoading();
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.ui && window.ui.hideLoading();
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.ui && window.ui.hideLoading();
return Promise.reject(error);
});
}
if (misc == 'filter') {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
window.ui && window.ui.showLoading('', true);
return config;
}, function(error) {
// 对请求错误做些什么
window.ui && window.ui.showLoading('', true);
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.ui && window.ui.hideLoading();
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.ui && window.ui.hideLoading();
return Promise.reject(error);
});
}
if (misc == 'protect') {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
if (window.isSendingAjax == false) {
window.isSendingAjax = true;
} else {
config.baseURL = 'http://127.0.0.1'
}
return config;
}, function(error) {
// 对请求错误做些什么
window.isSendingAjax = false;
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.isSendingAjax = false;
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.isSendingAjax = false;
return Promise.reject(error);
});
}
if (misc == null) {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
return config;
}, function(error) {
// 对请求错误做些什么
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
return Promise.reject(error);
});
}
if (window.isSendingAjax === true) {
return false;
}
if (method.toLowerCase() == 'get') { // 如果是GET请求,则直接发送
return ajax.get(url);
} else if (method.toLowerCase() == 'post') { // 如果是POST请求,先计算signature,再发送
data = util.appendSignature(data);
return ajax.post(url, data);
} else {
return false;
}
}
// 将json对象转换成url格式(键名a-z方式顺序排列),计算签名再放入json中
util.appendSignature = function(obj) {
const seed = "420E496DCF9D9CEC4FD231AC3C258820";
if (util.isEmptyObject(obj)) {
return {
"signature": b64_hmac_sha1(seed, '') + '='
}
}
let string = util.a2z(obj).param;
let string2 = util.a2z(obj).encodedParam;
let signature = b64_hmac_sha1(seed, string) + '=';
string = string2 + '&signature=' + encodeURIComponent(signature);
return string;
// obj2.signature=signature;
// return obj2;
}
// json转为ASCII码升序的url字符串
util.a2z = function(obj) {
let arr = [];
// 将obj中的键名依次存入空数组
for (let i in obj) {
arr[arr.length] = i;
}
// 将数组内的元素按字母顺序正序排列
arr.sort();
let arr2=arr.slice(0);
// 将键值对内部用等号连接,在外部用&连接
for (let i = 0; i < arr.length; i++) {
let key=arr[i];
arr[i] = key + '=' + obj[key];
}
for (let j = 0; j < arr2.length; j++) {
let key=arr2[j];
arr2[j] = key + '=' + encodeURIComponent(obj[key]);
}
let output={
param: arr.join('&'),
encodedParam: arr2.join('&')
}
return output;
}
Vue.prototype.$util = util;
// 延迟执行指定方法
// this.debounce(方法名,延迟毫秒数,参数1,参数2...)
Vue.mixin({
data(){
return {
debounceTimer:null
}
},
methods:{
debounce(func,time,...args){
if(!this.$util.isFunction(func) || !(time>=0)){
return;
}
window.clearTimeout(this.debounceTimer);
this.debounceTimer = window.setTimeout(()=>{
func(...args);
},time)
}
}
})
// Vue自定义过滤器,将长于指定位数的字符串截断,末尾显示省略号
Vue.filter('more', function(input, num = 5) {
if (!util.isString(input)) {
return input;
}
if (input.length > num) {
return input.slice(0, num) + '...';
} else {
return input;
}
})
// Vue自定义过滤器,将长于指定位数的字符串截断,中间显示省略号
Vue.filter('more2', function(input, num = 16) {
if (!util.isString(input) || input.length<=num || num<6) {
return input;
}
if (input.length > num) {
return input.slice(0, num-4) + '...' + input.slice(-4);
} else {
return input;
}
})
// Vue自定义过滤器,将文本中的'2个全角空格'或'4个半角空格'过滤掉
Vue.filter('nospace', function(input) {
if (!util.isString(input)) { | }
let output=input.replace(/ /g,'').replace(/ /g,'');
return output;
})
Vue.prototype.$store = {
}
// 向浏览历史堆栈中追加一个空锚点,用于防止浏览器回退关闭
Vue.prototype.pushHistory = function() {
let state = {
title: 'title',
url: '#'
}
window.history.pushState(state, "title", "#");
util.shareDeny();
}
window.ajaxRetryTimes=20;
// 判断当前是否取到了token,执行or延迟200ms重试执行,最多重试20次
Vue.prototype.doAndRetry = function(func) {
let that = this;
let token = window.sessionStorage.getItem('AISessionToken');
if (token) {
func();
window.ajaxRetryTimes = 0;
return;
}
if (window.ajaxRetryTimes > 0) {
window.ajaxRetryTimes--;
setTimeout(function() {
that.doAndRetry(func);
}, 200);
}
} | return input; | random_line_split |
tf.js | function printPage(grid) {
var tableStr = '<table cellpadding="0" cellspacing="0" width="100%" id="statisticByDay">';
var cm = grid.getColumnModel();
var colCount = cm.getColumnCount();
var temp_obj = new Array();
// 只下载没有隐藏的列(isHidden()为true表示隐藏,其他都为显示)
// 临时数组,存放所有当前显示列的下标
for ( var i = 0; i < colCount; i++) {// 从第三列开始,因为我的第1、2列是分别是rownumber和selectmodel。
if (cm.isHidden(i) == true) {
} else {
temp_obj.push(i);
}
}
tableStr = tableStr + '<thead><tr>';
for ( var i = 0; i < temp_obj.length; i++) {
// 显示列的列标题
tableStr = tableStr + '<td>' + cm.getColumnHeader(temp_obj[i])
+ '</td>';
}
tableStr = tableStr + '</tr></thead>';
var store = grid.getStore();
var recordCount = store.getCount();
tableStr = tableStr + '<tbody>'
for ( var i = 0; i < recordCount; i++) {
var r = store.getAt(i);
tableStr = tableStr + '<tr>';
for ( var j = 0; j < temp_obj.length; j++) {
var dataIndex = cm.getDataIndex(temp_obj[j]);
var tdValue = r.get(dataIndex);
var rendererFunc = cm.getRenderer(temp_obj[j]);
if (rendererFunc != null) {
tdValue = rendererFunc(tdValue);
}
if (tdValue == null || tdValue == 0) {
tdValue = ' ';
}
if (j != 0)
tableStr = tableStr + '<td style="text-align:center;">'
+ tdValue + '</td>';
else
tableStr = tableStr + '<td>' + tdValue + '</td>';
}
tableStr = tableStr + '</tr>';
}
tableStr = tableStr + '</tbody></table>';
var head = '<link rel="stylesheet" type="text/css" href="../css/printReport.css" />';
var titleHTML = tableStr;// document.getElementById("printGridfff").innerHTML;
var newwin = window.open('about:blank', '', '');
newwin.document.write(head);
newwin.document.write(titleHTML);
newwin.document.location.reload();
newwin.print();
// newwin.close();
}
tfMoney = function(v, sign) {
if (!sign)
sign = '';
v = (Math.round((v - 0) * 100)) / 100;
v = (v == Math.floor(v)) ? v + ".00" : ((v * 10 == Math.floor(v * 10)) ? v
+ "0" : v);
v = String(v);
var ps = v.split('.');
var whole = ps[0];
var sub = ps[1] ? '.' + ps[1] : '.00';
var r = /(\d+)(\d{3})/;
while (r.test(whole)) {
whole = whole.replace(r, '$1' + ',' + '$2');
}
v = whole + sub;
if (v.charAt(0) == '-') {
return '-' + sign + v.substr(1);
}
return sign + v;
}
Ext.apply(Ext.form.VTypes, {
daterange : function(val, field) {
var date = field.parseDate(val);
if (!date) {
return;
}
if (field.startDateField
&& (!this.dateRangeMax || (date.getTime() != this.dateRangeMax
.getTime()))) {
var start = Ext.getCmp(field.startDateField);
start.setMaxValue(date);
start.validate();
this.dateRangeMax = date;
} else if (field.endDateField
&& (!this.dateRangeMin || (date.getTime() != this.dateRangeMin
.getTime()))) {
var end = Ext.getCmp(field.endDateField);
end.setMinValue(date);
end.validate();
this.dateRangeMin = date;
}
/*
* Always return true since we're only using this vtype to set the
* min/max allowed values (these are tested for after the vtype test)
*/
return true;
},
password : function(val, field) {
if (field.initialPassField) {
var pwd = Ext.getCmp(field.initialPassField);
return (val == pwd.getValue());
}
return true;
},
passwordText : 'Passwords do not match'
});
Ext.grid.CheckColumn = function(config) {
Ext.apply(this, config);
if (!this.id) {
this.id = Ext.id();
}
this.renderer = this.renderer.createDelegate(this);
};
Ext.grid.CheckColumn.prototype = {
init : function(grid) {
this.grid = grid;
this.grid.on('render', function() {
var view = this.grid.getView();
view.mainBody.on('mousedown', this.onMouseDown, this);
}, this);
},
onMouseDown : function(e, t) {
if (t.className && t.className.indexOf('x-grid3-cc-' + this.id) != -1) {
e.stopEvent();
var index = this.grid.getView().findRowIndex(t);
var record = this.grid.store.getAt(index);
record.set(this.dataIndex, !record.data[this.dataIndex]);
}
},
renderer : function(v, p, record) {
p.css += ' x-grid3-check-col-td';
return '<div class="x-grid3-check-col' + (v ? '-on' : '')
+ ' x-grid3-cc-' + this.id + '"> </div>';
}
};
Ext.namespace('Ext.tf.util');
/**
* Compiles a selector/xpath query into a reusable function. The returned
* function takes one parameter "root" (optional), which is the context node
* from where the query should start.
*
* @param {Ext.form.FormPanel}
* formPanel 包含主数据的FormPanel
* @param {Ext.grid.GridPanel/Ext.grid.EditorGridPanel}
* gridPanel 包含细节数据的GridPanel
* @param {Array}
* excludes gridPanel中不需要获取的列, 数组中加入需要摈弃的grid.store.fields中
* @param {Array}
* resultPropNames (可选) 定义返回的json对象的属性名,缺省为["formData", "gridData"]
* @return {Object} 缺省为{formData:masterData, gridData:detailData}
* masterData为json对象, detailData为[json对象],
* detailData数组中的json对象的属性名与grid.store的fields定义相同
*/
Ext.tf.util.gatherData = function(formPanel, gridPanel, excludes,
resultPropNames) {
var store = gridPanel.store;
var gridDataList = [];
var formData = formPanel.getForm().getValues(false);
store.each(function(rec) {
for ( var i = excludes.length - 1; i >= 0; --i) {
delete rec.data[excludes[i]];
}
;
gridDataList.push(rec.data)
});
if (resultPropNames) {
var result = {};
result[resultPropNames[0]] = formData;
result[resultPropNames[1]] = gridDataList;
return result;
} else
return {
formData : formData,
gridData : gridDataList
};
}
Ext.tf.util.debug = function(msg) {
if (typeof (console) != "undefined") {
console.debug(msg);
}
}
/*
* Usage : var lable = Ext.tf.util.OptCache.getLabel("Nationality", "1");
*
*/
Ext.tf.util.OptCache = {};
Ext.tf.util.OptCache.data = {};
Ext.tf.util.OptCache.getOptions = function(optName) {
var util = Ext.tf.util;
if (!util.OptCache.data[optName]) {
OptionProvider.getOptions(optName, {
async : false,
callback : function(list) {
var opt = {};
for ( var i = list.length - 1; i >= 0; --i) {
opt[list[i].id] = list[i].name;
}
;
util.OptCache.data[optName] = opt;
}
});
} else {
util.debug("util.OptCache.getOptions: using cache");
}
return util.OptCache.data[optName];
};
Ext.tf.util.OptCache.getLabel = function(optName, key) {
var util = Ext.tf.util;
var options = util.OptCache.getOptions(optName);
if (options) {
return options[key];
} else {
return '';
}
};
/**
* 回车对应函数 handler
*/
Ext.tf.util.enterKey = function(handler) {
return {
key : [ 10, 13 ],
fn : handler
}
};
// //////////////////////
Ext.ns("Ext.tf");
Ext.tf.currentUser = null;
Ext.tf.SimpleFormPanel = Ext.extend(Ext.FormPanel, {
autoHeight : true,
frame : true,
defaultType : 'textfield',
initComponent : function() {
Ext.applyIf(this, {
// saveFn : function() {}
});
this.build();
Ext.tf.SimpleFormPanel.superclass.initComponent.call(this);
},
build : function() {
this.buttons = [ {
text : '确认',
handler : this.saveFn
}, {
text : '取消',
handler : this.close
} ]
// this.keys = [ Ext.tf.util.enterKey(this.saveFn) ];
}
})
Ext.tf.SimpleQueryFormPanel = Ext.extend(Ext.FormPanel, {
collapsible : true,
title : '查询',
labelWidth : 75,
frame : true,
bodyStyle : 'padding:5px 5px 0',
width : 500,
defaults : {
width : 230
},
defaultType : 'textfield',
initComponent : function() {
Ext.apply(this, this.queryConfigEx);
Ext.tf.SimpleQueryFormPanel.superclass.initComponent.call(this);
}
});
Ext.tf.SimpleGridPanel = Ext.extend(Ext.grid.GridPanel, {
loadMask : {
msg : '正在加载数据...'
},
viewConfig : {
forceFit : true
},
width : 500,
height : 300,
frame : true,
// toggle: in grid, whether double click fire edit
dblclickToggle : true,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : true,
initComponent : function() {
Ext.apply(this, this.gridConfigEx);
Ext.tf.SimpleGridPanel.superclass.initComponent.call(this);
this.dblclickToggle && this.on('rowdblclick', this.edit, this);
// 右键菜单
this.contextmenuToggle
&& this.on('rowcontextmenu', this.contextmenu, this);
},
contextmenu : function(grid, rowIndex, e) {
e.preventDefault();
e.stopEvent();
var updateMenu = new Ext.menu.Item({
iconCls : 'edit',
id : 'updateMenu',
text : '修改',
handler : this.edit.createDelegate(this)
});
var deleteMenu = new Ext.menu.Item({
iconCls : 'delete',
id : 'deleteMenu',
text : '删除',
handler : this.del.createDelegate(this)
});
var selections = this.getSelections();
if (selections.length > 1) {
updateMenu.disable();
}
var menuList = [ updateMenu, deleteMenu ];
this.grid_menu = new Ext.menu.Menu({
id : 'mainMenu',
items : menuList
});
var coords = e.getXY();
grid.getSelectionModel().selectRow(rowIndex);
this.grid_menu.showAt([ coords[0], coords[1] ]);
}
});
/**
* 功能页面的panel类 Config 说明: title : '模块目录管理', pageSize : 10, queryUrl :
* ModuleService.findModuleCategory.createDelegate(this), editUrl :
* ModuleService.editModuleCategory.createDelegate(this), deleteUrl : xxx,
* // Grid 需要的配置信息, 会覆盖掉缺省的 gridConfigEx : {}; // query panel 需要的配置信息, 会覆盖掉缺省的
* queryConfigEx : {};
*
* //查询用到的form配置 queryConfig : [ { fieldLabel : '名称', name : 'name', allowBlank :
* true } ], //编辑用到的form配置 editConfig : [ { fieldLabel : '模块目录名称', name : 'name' }, {
* fieldLabel : '排列顺序', name : 'ordinal' } ], //reader的配置 readerConfig : [ {
* name : 'id', mapping : 'id' }, { name : 'name', mapping : 'name' }, { name :
* 'ordinal', mapping : 'ordinal' } ], //网格记录显示的配置 gridCm : [ { "hidden" : true,
* "header" : "ID", "sortable" : true, "dataIndex" : "id" }, { "header" :
* "模块目录名称", "sortable" : true, "dataIndex" : "name" }, { "header" : "排列顺序",
* "sortable" : true, "dataIndex" : "ordinal" } ]
*/
Ext.tf.SimplePanel = Ext.extend(Ext.Panel, {
closable : true,
hasAdd : true,
queryUrl : Ext.emptyFn,
editUrl : Ext.emptyFn,
deleteUrl : Ext.emptyFn,
// toggle: in grid, whether double click fire edit
dblclickToggle : true,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : true,
initComponent : function() {
try {
Ext.applyIf(this, {
pageSize : 10
});
this.reader = new Ext.data.JsonReader({
totalProperty : "totalSize", // 总记录数
root : "data", // 分页对象中的数据集
id : "id" //
}, Ext.data.Record.create(this.readerConfig));
this.store = new Ext.data.Store({
proxy : new Ext.ux.data.DWRProxy({
dwrFunction : this.queryUrl,
listeners : {
'beforeload' : function(dataProxy, params) {
// alert(dwr.util.toDescriptiveString(params, 2));
// alert(this.queryUrl +"eeeee");
var o = this.queryForm.getForm().getValues(false);
console.log(o);
if(o.isDetail != undefined){
if(o.isDetail == '1')
o.isDetail = true;
else if(o.isDetail == '0')
o.isDetail = false;
}
if (!params.limit)
params.limit = this.pageSize;
params[dataProxy.loadArgsKey] = [ o, params ];
}.createDelegate(this),
'load' : function ( obj, records, options ) {
console.log("load=======================================================")
console.log(obj);
console.log(records);
console.log(options);
console.log("load=======================================================")
}
}
}),
reader : this.reader
});
this.pagingBar = new App.PagingToolbar({
pageSize : this.pageSize,
store : this.store,
displayInfo : true,
displayMsg : '{0} - {1} of {2}',
emptyMsg : "没有记录"
});
this.grid = new Ext.tf.SimpleGridPanel({
gridConfigEx : this.gridConfigEx,
edit : this.edit.createDelegate(this),
del : this.del.createDelegate(this),
store : this.store,
cm : new Ext.grid.ColumnModel(this.gridCm),
dblclickToggle : this.dblclickToggle,
contextmenuToggle : this.contextmenuToggle,
bbar : this.pagingBar
});
this.editForm = new Ext.tf.SimpleFormPanel({
items : this.editConfig,
close : function() {
this.editWin.hide();
}.createDelegate(this),
saveFn : function() {
var formBean = this.editForm.getForm().getValues(false);
console.log(formBean)
if(formBean.isDetail){
if(formBean.isDetail == '一级目录'){
formBean.isDetail = false;
}else if(formBean.isDetail == '二级目录'){
formBean.isDetail = true;
}
}
//console.log(formBean);
this.editUrl(formBean, function() {
Ext.MessageBox.alert("提示", "保存成功!");
this.editWin.hide();
this.store.reload();
}.createDelegate(this));
}.createDelegate(this)
});
this.editWin = new Ext.Window({
title : '',
// closeAction : 'hide',
modal : true,
autoHeight : true,
close : function() {
this.hide();
},
// autoWidth : true,
width : 300,
items : [ this.editForm ]
});
this.addRecord = function() {
this.editForm.getForm().reset();
this.editWin.show();
};
this.query = function() {
this.grid.selModel.clearSelections();
this.store.reload();
};
this.queryForm = new Ext.tf.SimpleQueryFormPanel(
{
queryConfigEx : this.queryConfigEx,
items : this.queryConfig,
buttons : [ {
text : '查询',
formBind : true,
scope : this,
handler : this.query.createDelegate(this)
} ],
keys : [ Ext.tf.util.enterKey(this.query
.createDelegate(this)) ]
});
if (this.hasAdd) {
this.queryForm.addButton('新增', this.addRecord, this);
}
this.items = [ this.queryForm, this.grid ];
Ext.tf.SimplePanel.superclass.initComponent.call(this);
} catch (e) {
//console.log(e);
throw e;
}
},
// private
edit : function() {
var selections = this.grid.getSelections();
if (selections.length == 0) {
Ext.MessageBox.alert("提示", "请选择一条的记录!");
return;
} else if (selections.length != 1) {
Ext.MessageBox.alert("提示", "不能选择多行编辑!");
return;
}
this.editWin.show();
this.editForm.getForm().loadRecord(selections[0]);
},
del : function() {
var selections = this.grid.getSelections();
if (selections.length == 0) {
Ext.MessageBox.alert("提示", "请选择一条的记录!");
return;
}
var fn = function(e) {
if (e == "yes") {
var ids = new Array();
for ( var i = 0, len = selections.length; i < len; i++) {
try {
// 如果选中的record没有在这一页显示,remove就会出问题
selections[i].get("id");
ids[i] = selections[i].get("id");
} catch (e) {
// //console.log(e);
}
}
this.deleteUrl(ids.join(","), function() {
Ext.MessageBox.alert("提示", "删除完毕!");
this.store.reload();
}.createDelegate(this));
}
}
Ext.MessageBox.confirm("提示", "确认要删除所选择的记录么?", fn, this);
},
// public
load : function() {
return this.store.load({
params : {
start : 0,
limit : this.pageSize
}
});
}
});
/**
* 弹出窗口控件
*/
Ext.tf.PopSelect = Ext.extend(Ext.form.TriggerField, {
triggerClass : 'x-form-date-trigger',
readOnly : true,
initComponent : function() {
Ext.tf.PopSelect.superclass.initComponent(this);
},
/**
* Find ref element, set value
*/
setRefName : function(v) {
var refName = this.refName || ''; // If not refName, then ??
var form = this.findParentBy(function(v) {
if (Ext.type(v.getForm) == 'function')
return true;
});
if (form != null) {
Ext.each(form.find("name", refName), function(field) {
field.setValue(v);
});
}
return this;
},
onDestroy : function() {
Ext.destroy(this.win, this.panel);
Ext.tf.PopSelect.superclass.onDestroy.call(this);
},
edit : function() {
var grid = this.panel.grid;
var store = this.panel.store;
var view = grid.getView();
var sm = grid.getSelectionModel();
for ( var i = 0; i < view.getRows().length; i++) {
if (sm.isSelected(i)) {
var record = store.getAt(i);
var id = record.get('id');
var name = record.get('name');
this.setValue(name);
this.setRefName(id);
}
;
}
;
this.win.hide();
},
// pop select window
onTriggerClick : function() {
if (this.win == null) {
this.panel = new Ext.tf.SimplePanel({
title : '',
pageSize : 10,
hasAdd : false,
dblclickToggle : false,
contextmenuToggle : false,
gridConfigEx : {
height : 200
},
queryUrl : this.queryUrl,
// 查询条件Form
queryConfig : this.queryConfig,
// Grid 读取数据时的reader
readerConfig : this.readerConfig,
// Grid的列
gridCm : this.gridCm
});
this.panel.grid.on('rowdblclick', this.edit, this);
this.win = new Ext.Window({
title : this.title,
modal : true,
width : 520,
autoHeight : true,
closeAction : 'hide',
items : [ this.panel ],
buttons : [ {
text : '关闭',
handler : function() {
this.win.hide();
}.createDelegate(this)
}, {
text : '清除',
handler : function() {
this.setValue('');
this.setRefName('');
this.win.hide();
}.createDelegate(this)
}, {
text : '确认',
handler : this.edit.createDelegate(this)
} ]
});
}
this.win.show(this);
}
});
Ext.reg("popselect", Ext.tf.PopSelect);
/**
* SimpleReportPanel
*/
Ext.tf.SimpleReportPanel = Ext.extend(Ext.Panel, {
closable : true,
layout : 'fit',
autoScroll : true,
queryUrl : Ext.emptyFn,
// toggle: in grid, whether double click fire edit
dblclickToggle : false,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : false,
initComponent : function() {
try {
this.reader = new Ext.data.JsonReader({
totalProperty : "totalSize",
root : "data",
id : "id"
}, Ext.data.Record.create(this.readerConfig));
this.store = new Ext.data.Store({
proxy : new Ext.ux.data.DWRProxy({
dwrFunction : this.queryUrl
}),
reader : this.reader
});
this.grid = new Ext.tf.SimpleGridPanel({
tbar : [ {
text : '刷新',
handler : function() {
this.load();
}.createDelegate(this)
}, {
text : '打印',
handler : function() {
printPage(this.grid);
}.createDelegate(this)
} ],
viewConfig : {
forceFit : ''
},
width : '', | cm : new Ext.grid.ColumnModel(this.gridCm),
dblclickToggle : this.dblclickToggle,
contextmenuToggle : this.contextmenuToggle
});
this.items = [ this.grid ];
Ext.tf.SimplePanel.superclass.initComponent.call(this);
} catch (e) {
//console.log(e);
throw e;
}
},
// public
load : function() {
return this.store.load();
}
});
Ext.tf.WorkQueryPanel = Ext.extend(Ext.Panel, {
closable : true,
hasAdd : true,
queryUrl : Ext.emptyFn,
editUrl : Ext.emptyFn,
width : 800,
// toggle: in grid, whether double click fire edit
dblclickToggle : true,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : true,
initComponent : function() {
try {
Ext.applyIf(this, {
pageSize : 10
});
this.reader = new Ext.data.JsonReader({
totalProperty : "totalSize", // 总记录数
root : "data", // 分页对象中的数据集
id : "id" //
}, Ext.data.Record.create(this.readerConfig));
this.store = new Ext.data.Store({
proxy : new Ext.ux.data.DWRProxy({
dwrFunction : this.queryUrl,
listeners : {
'beforeload' : function(dataProxy, params) {
var o = this.queryForm.getForm().getValues(false);
console.log(o);
if (!params.limit)
params.limit = this.pageSize;
params[dataProxy.loadArgsKey] = [ o, params ];
}.createDelegate(this)
}
}),
reader : this.reader
});
this.pagingBar = new App.PagingToolbar({
pageSize : this.pageSize,
store : this.store,
displayInfo : true,
displayMsg : '{0} - {1} of {2}',
emptyMsg : "没有记录"
});
this.grid = new Ext.tf.SimpleGridPanel({
autoWidth : true,
autoHeight : true,
edit : this.edit.createDelegate(this),
store : this.store,
cm : new Ext.grid.ColumnModel(this.gridCm),
dblclickToggle : this.dblclickToggle,
contextmenuToggle : this.contextmenuToggle,
bbar : this.pagingBar
});
this.editForm = new Ext.tf.SimpleFormPanel({
autoWidth : true,
autoHeight : true,
items : this.editConfig,
close : function() {
this.editWin.hide();
}.createDelegate(this),
saveFn : function() {
var formBean = this.editForm.getForm().getValues(false);
this.editUrl(formBean, function() {
Ext.MessageBox.alert("提示", "保存成功!");
this.editWin.hide();
this.store.reload();
}.createDelegate(this));
}.createDelegate(this)
});
this.editWin = new Ext.Window({
title : '',
closeAction : 'hide',
modal : true,
autoHeight : true,
// autoWidth : true,
width : 300,
items : [ this.editForm ]
});
this.addRecord = function() {
this.editForm.getForm().reset();
this.editWin.show();
};
this.query = function() {
this.grid.selModel.clearSelections();
this.store.reload();
};
this.queryForm = new Ext.tf.SimpleQueryFormPanel(
{
queryConfigEx : {},
items : this.queryConfig,
autoWidth : true,
buttons : [ {
text : '查询',
formBind : true,
scope : this,
handler : this.query.createDelegate(this)
} ],
keys : [ Ext.tf.util.enterKey(this.query
.createDelegate(this)) ]
});
this.items = [ this.queryForm, this.grid ];
Ext.tf.SimplePanel.superclass.initComponent.call(this);
} catch (e) {
//console.log(e);
throw e;
}
},
// private
edit : function() {
var selections = this.grid.getSelections();
if (selections.length == 0) {
Ext.MessageBox.alert("提示", "请选择一条的记录!");
return;
} else if (selections.length != 1) {
Ext.MessageBox.alert("提示", "不能选择多行查看!");
return;
}
this.editWin.show();
this.editForm.getForm().loadRecord(selections[0]);
},
// public
load : function() {
return this.store.load({
params : {
start : 0,
limit : this.pageSize
}
});
}
}); | gridConfigEx : this.gridConfigEx,
store : this.store, | random_line_split |
tf.js | function | (grid) {
var tableStr = '<table cellpadding="0" cellspacing="0" width="100%" id="statisticByDay">';
var cm = grid.getColumnModel();
var colCount = cm.getColumnCount();
var temp_obj = new Array();
// 只下载没有隐藏的列(isHidden()为true表示隐藏,其他都为显示)
// 临时数组,存放所有当前显示列的下标
for ( var i = 0; i < colCount; i++) {// 从第三列开始,因为我的第1、2列是分别是rownumber和selectmodel。
if (cm.isHidden(i) == true) {
} else {
temp_obj.push(i);
}
}
tableStr = tableStr + '<thead><tr>';
for ( var i = 0; i < temp_obj.length; i++) {
// 显示列的列标题
tableStr = tableStr + '<td>' + cm.getColumnHeader(temp_obj[i])
+ '</td>';
}
tableStr = tableStr + '</tr></thead>';
var store = grid.getStore();
var recordCount = store.getCount();
tableStr = tableStr + '<tbody>'
for ( var i = 0; i < recordCount; i++) {
var r = store.getAt(i);
tableStr = tableStr + '<tr>';
for ( var j = 0; j < temp_obj.length; j++) {
var dataIndex = cm.getDataIndex(temp_obj[j]);
var tdValue = r.get(dataIndex);
var rendererFunc = cm.getRenderer(temp_obj[j]);
if (rendererFunc != null) {
tdValue = rendererFunc(tdValue);
}
if (tdValue == null || tdValue == 0) {
tdValue = ' ';
}
if (j != 0)
tableStr = tableStr + '<td style="text-align:center;">'
+ tdValue + '</td>';
else
tableStr = tableStr + '<td>' + tdValue + '</td>';
}
tableStr = tableStr + '</tr>';
}
tableStr = tableStr + '</tbody></table>';
var head = '<link rel="stylesheet" type="text/css" href="../css/printReport.css" />';
var titleHTML = tableStr;// document.getElementById("printGridfff").innerHTML;
var newwin = window.open('about:blank', '', '');
newwin.document.write(head);
newwin.document.write(titleHTML);
newwin.document.location.reload();
newwin.print();
// newwin.close();
}
tfMoney = function(v, sign) {
if (!sign)
sign = '';
v = (Math.round((v - 0) * 100)) / 100;
v = (v == Math.floor(v)) ? v + ".00" : ((v * 10 == Math.floor(v * 10)) ? v
+ "0" : v);
v = String(v);
var ps = v.split('.');
var whole = ps[0];
var sub = ps[1] ? '.' + ps[1] : '.00';
var r = /(\d+)(\d{3})/;
while (r.test(whole)) {
whole = whole.replace(r, '$1' + ',' + '$2');
}
v = whole + sub;
if (v.charAt(0) == '-') {
return '-' + sign + v.substr(1);
}
return sign + v;
}
Ext.apply(Ext.form.VTypes, {
daterange : function(val, field) {
var date = field.parseDate(val);
if (!date) {
return;
}
if (field.startDateField
&& (!this.dateRangeMax || (date.getTime() != this.dateRangeMax
.getTime()))) {
var start = Ext.getCmp(field.startDateField);
start.setMaxValue(date);
start.validate();
this.dateRangeMax = date;
} else if (field.endDateField
&& (!this.dateRangeMin || (date.getTime() != this.dateRangeMin
.getTime()))) {
var end = Ext.getCmp(field.endDateField);
end.setMinValue(date);
end.validate();
this.dateRangeMin = date;
}
/*
* Always return true since we're only using this vtype to set the
* min/max allowed values (these are tested for after the vtype test)
*/
return true;
},
password : function(val, field) {
if (field.initialPassField) {
var pwd = Ext.getCmp(field.initialPassField);
return (val == pwd.getValue());
}
return true;
},
passwordText : 'Passwords do not match'
});
Ext.grid.CheckColumn = function(config) {
Ext.apply(this, config);
if (!this.id) {
this.id = Ext.id();
}
this.renderer = this.renderer.createDelegate(this);
};
Ext.grid.CheckColumn.prototype = {
init : function(grid) {
this.grid = grid;
this.grid.on('render', function() {
var view = this.grid.getView();
view.mainBody.on('mousedown', this.onMouseDown, this);
}, this);
},
onMouseDown : function(e, t) {
if (t.className && t.className.indexOf('x-grid3-cc-' + this.id) != -1) {
e.stopEvent();
var index = this.grid.getView().findRowIndex(t);
var record = this.grid.store.getAt(index);
record.set(this.dataIndex, !record.data[this.dataIndex]);
}
},
renderer : function(v, p, record) {
p.css += ' x-grid3-check-col-td';
return '<div class="x-grid3-check-col' + (v ? '-on' : '')
+ ' x-grid3-cc-' + this.id + '"> </div>';
}
};
Ext.namespace('Ext.tf.util');
/**
* Compiles a selector/xpath query into a reusable function. The returned
* function takes one parameter "root" (optional), which is the context node
* from where the query should start.
*
* @param {Ext.form.FormPanel}
* formPanel 包含主数据的FormPanel
* @param {Ext.grid.GridPanel/Ext.grid.EditorGridPanel}
* gridPanel 包含细节数据的GridPanel
* @param {Array}
* excludes gridPanel中不需要获取的列, 数组中加入需要摈弃的grid.store.fields中
* @param {Array}
* resultPropNames (可选) 定义返回的json对象的属性名,缺省为["formData", "gridData"]
* @return {Object} 缺省为{formData:masterData, gridData:detailData}
* masterData为json对象, detailData为[json对象],
* detailData数组中的json对象的属性名与grid.store的fields定义相同
*/
Ext.tf.util.gatherData = function(formPanel, gridPanel, excludes,
resultPropNames) {
var store = gridPanel.store;
var gridDataList = [];
var formData = formPanel.getForm().getValues(false);
store.each(function(rec) {
for ( var i = excludes.length - 1; i >= 0; --i) {
delete rec.data[excludes[i]];
}
;
gridDataList.push(rec.data)
});
if (resultPropNames) {
var result = {};
result[resultPropNames[0]] = formData;
result[resultPropNames[1]] = gridDataList;
return result;
} else
return {
formData : formData,
gridData : gridDataList
};
}
Ext.tf.util.debug = function(msg) {
if (typeof (console) != "undefined") {
console.debug(msg);
}
}
/*
* Usage : var lable = Ext.tf.util.OptCache.getLabel("Nationality", "1");
*
*/
Ext.tf.util.OptCache = {};
Ext.tf.util.OptCache.data = {};
Ext.tf.util.OptCache.getOptions = function(optName) {
var util = Ext.tf.util;
if (!util.OptCache.data[optName]) {
OptionProvider.getOptions(optName, {
async : false,
callback : function(list) {
var opt = {};
for ( var i = list.length - 1; i >= 0; --i) {
opt[list[i].id] = list[i].name;
}
;
util.OptCache.data[optName] = opt;
}
});
} else {
util.debug("util.OptCache.getOptions: using cache");
}
return util.OptCache.data[optName];
};
Ext.tf.util.OptCache.getLabel = function(optName, key) {
var util = Ext.tf.util;
var options = util.OptCache.getOptions(optName);
if (options) {
return options[key];
} else {
return '';
}
};
/**
* 回车对应函数 handler
*/
Ext.tf.util.enterKey = function(handler) {
return {
key : [ 10, 13 ],
fn : handler
}
};
// //////////////////////
Ext.ns("Ext.tf");
Ext.tf.currentUser = null;
Ext.tf.SimpleFormPanel = Ext.extend(Ext.FormPanel, {
autoHeight : true,
frame : true,
defaultType : 'textfield',
initComponent : function() {
Ext.applyIf(this, {
// saveFn : function() {}
});
this.build();
Ext.tf.SimpleFormPanel.superclass.initComponent.call(this);
},
build : function() {
this.buttons = [ {
text : '确认',
handler : this.saveFn
}, {
text : '取消',
handler : this.close
} ]
// this.keys = [ Ext.tf.util.enterKey(this.saveFn) ];
}
})
Ext.tf.SimpleQueryFormPanel = Ext.extend(Ext.FormPanel, {
collapsible : true,
title : '查询',
labelWidth : 75,
frame : true,
bodyStyle : 'padding:5px 5px 0',
width : 500,
defaults : {
width : 230
},
defaultType : 'textfield',
initComponent : function() {
Ext.apply(this, this.queryConfigEx);
Ext.tf.SimpleQueryFormPanel.superclass.initComponent.call(this);
}
});
Ext.tf.SimpleGridPanel = Ext.extend(Ext.grid.GridPanel, {
loadMask : {
msg : '正在加载数据...'
},
viewConfig : {
forceFit : true
},
width : 500,
height : 300,
frame : true,
// toggle: in grid, whether double click fire edit
dblclickToggle : true,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : true,
initComponent : function() {
Ext.apply(this, this.gridConfigEx);
Ext.tf.SimpleGridPanel.superclass.initComponent.call(this);
this.dblclickToggle && this.on('rowdblclick', this.edit, this);
// 右键菜单
this.contextmenuToggle
&& this.on('rowcontextmenu', this.contextmenu, this);
},
contextmenu : function(grid, rowIndex, e) {
e.preventDefault();
e.stopEvent();
var updateMenu = new Ext.menu.Item({
iconCls : 'edit',
id : 'updateMenu',
text : '修改',
handler : this.edit.createDelegate(this)
});
var deleteMenu = new Ext.menu.Item({
iconCls : 'delete',
id : 'deleteMenu',
text : '删除',
handler : this.del.createDelegate(this)
});
var selections = this.getSelections();
if (selections.length > 1) {
updateMenu.disable();
}
var menuList = [ updateMenu, deleteMenu ];
this.grid_menu = new Ext.menu.Menu({
id : 'mainMenu',
items : menuList
});
var coords = e.getXY();
grid.getSelectionModel().selectRow(rowIndex);
this.grid_menu.showAt([ coords[0], coords[1] ]);
}
});
/**
* 功能页面的panel类 Config 说明: title : '模块目录管理', pageSize : 10, queryUrl :
* ModuleService.findModuleCategory.createDelegate(this), editUrl :
* ModuleService.editModuleCategory.createDelegate(this), deleteUrl : xxx,
* // Grid 需要的配置信息, 会覆盖掉缺省的 gridConfigEx : {}; // query panel 需要的配置信息, 会覆盖掉缺省的
* queryConfigEx : {};
*
* //查询用到的form配置 queryConfig : [ { fieldLabel : '名称', name : 'name', allowBlank :
* true } ], //编辑用到的form配置 editConfig : [ { fieldLabel : '模块目录名称', name : 'name' }, {
* fieldLabel : '排列顺序', name : 'ordinal' } ], //reader的配置 readerConfig : [ {
* name : 'id', mapping : 'id' }, { name : 'name', mapping : 'name' }, { name :
* 'ordinal', mapping : 'ordinal' } ], //网格记录显示的配置 gridCm : [ { "hidden" : true,
* "header" : "ID", "sortable" : true, "dataIndex" : "id" }, { "header" :
* "模块目录名称", "sortable" : true, "dataIndex" : "name" }, { "header" : "排列顺序",
* "sortable" : true, "dataIndex" : "ordinal" } ]
*/
Ext.tf.SimplePanel = Ext.extend(Ext.Panel, {
closable : true,
hasAdd : true,
queryUrl : Ext.emptyFn,
editUrl : Ext.emptyFn,
deleteUrl : Ext.emptyFn,
// toggle: in grid, whether double click fire edit
dblclickToggle : true,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : true,
initComponent : function() {
try {
Ext.applyIf(this, {
pageSize : 10
});
this.reader = new Ext.data.JsonReader({
totalProperty : "totalSize", // 总记录数
root : "data", // 分页对象中的数据集
id : "id" //
}, Ext.data.Record.create(this.readerConfig));
this.store = new Ext.data.Store({
proxy : new Ext.ux.data.DWRProxy({
dwrFunction : this.queryUrl,
listeners : {
'beforeload' : function(dataProxy, params) {
// alert(dwr.util.toDescriptiveString(params, 2));
// alert(this.queryUrl +"eeeee");
var o = this.queryForm.getForm().getValues(false);
console.log(o);
if(o.isDetail != undefined){
if(o.isDetail == '1')
o.isDetail = true;
else if(o.isDetail == '0')
o.isDetail = false;
}
if (!params.limit)
params.limit = this.pageSize;
params[dataProxy.loadArgsKey] = [ o, params ];
}.createDelegate(this),
'load' : function ( obj, records, options ) {
console.log("load=======================================================")
console.log(obj);
console.log(records);
console.log(options);
console.log("load=======================================================")
}
}
}),
reader : this.reader
});
this.pagingBar = new App.PagingToolbar({
pageSize : this.pageSize,
store : this.store,
displayInfo : true,
displayMsg : '{0} - {1} of {2}',
emptyMsg : "没有记录"
});
this.grid = new Ext.tf.SimpleGridPanel({
gridConfigEx : this.gridConfigEx,
edit : this.edit.createDelegate(this),
del : this.del.createDelegate(this),
store : this.store,
cm : new Ext.grid.ColumnModel(this.gridCm),
dblclickToggle : this.dblclickToggle,
contextmenuToggle : this.contextmenuToggle,
bbar : this.pagingBar
});
this.editForm = new Ext.tf.SimpleFormPanel({
items : this.editConfig,
close : function() {
this.editWin.hide();
}.createDelegate(this),
saveFn : function() {
var formBean = this.editForm.getForm().getValues(false);
console.log(formBean)
if(formBean.isDetail){
if(formBean.isDetail == '一级目录'){
formBean.isDetail = false;
}else if(formBean.isDetail == '二级目录'){
formBean.isDetail = true;
}
}
//console.log(formBean);
this.editUrl(formBean, function() {
Ext.MessageBox.alert("提示", "保存成功!");
this.editWin.hide();
this.store.reload();
}.createDelegate(this));
}.createDelegate(this)
});
this.editWin = new Ext.Window({
title : '',
// closeAction : 'hide',
modal : true,
autoHeight : true,
close : function() {
this.hide();
},
// autoWidth : true,
width : 300,
items : [ this.editForm ]
});
this.addRecord = function() {
this.editForm.getForm().reset();
this.editWin.show();
};
this.query = function() {
this.grid.selModel.clearSelections();
this.store.reload();
};
this.queryForm = new Ext.tf.SimpleQueryFormPanel(
{
queryConfigEx : this.queryConfigEx,
items : this.queryConfig,
buttons : [ {
text : '查询',
formBind : true,
scope : this,
handler : this.query.createDelegate(this)
} ],
keys : [ Ext.tf.util.enterKey(this.query
.createDelegate(this)) ]
});
if (this.hasAdd) {
this.queryForm.addButton('新增', this.addRecord, this);
}
this.items = [ this.queryForm, this.grid ];
Ext.tf.SimplePanel.superclass.initComponent.call(this);
} catch (e) {
//console.log(e);
throw e;
}
},
// private
edit : function() {
var selections = this.grid.getSelections();
if (selections.length == 0) {
Ext.MessageBox.alert("提示", "请选择一条的记录!");
return;
} else if (selections.length != 1) {
Ext.MessageBox.alert("提示", "不能选择多行编辑!");
return;
}
this.editWin.show();
this.editForm.getForm().loadRecord(selections[0]);
},
del : function() {
var selections = this.grid.getSelections();
if (selections.length == 0) {
Ext.MessageBox.alert("提示", "请选择一条的记录!");
return;
}
var fn = function(e) {
if (e == "yes") {
var ids = new Array();
for ( var i = 0, len = selections.length; i < len; i++) {
try {
// 如果选中的record没有在这一页显示,remove就会出问题
selections[i].get("id");
ids[i] = selections[i].get("id");
} catch (e) {
// //console.log(e);
}
}
this.deleteUrl(ids.join(","), function() {
Ext.MessageBox.alert("提示", "删除完毕!");
this.store.reload();
}.createDelegate(this));
}
}
Ext.MessageBox.confirm("提示", "确认要删除所选择的记录么?", fn, this);
},
// public
load : function() {
return this.store.load({
params : {
start : 0,
limit : this.pageSize
}
});
}
});
/**
* 弹出窗口控件
*/
Ext.tf.PopSelect = Ext.extend(Ext.form.TriggerField, {
triggerClass : 'x-form-date-trigger',
readOnly : true,
initComponent : function() {
Ext.tf.PopSelect.superclass.initComponent(this);
},
/**
* Find ref element, set value
*/
setRefName : function(v) {
var refName = this.refName || ''; // If not refName, then ??
var form = this.findParentBy(function(v) {
if (Ext.type(v.getForm) == 'function')
return true;
});
if (form != null) {
Ext.each(form.find("name", refName), function(field) {
field.setValue(v);
});
}
return this;
},
onDestroy : function() {
Ext.destroy(this.win, this.panel);
Ext.tf.PopSelect.superclass.onDestroy.call(this);
},
edit : function() {
var grid = this.panel.grid;
var store = this.panel.store;
var view = grid.getView();
var sm = grid.getSelectionModel();
for ( var i = 0; i < view.getRows().length; i++) {
if (sm.isSelected(i)) {
var record = store.getAt(i);
var id = record.get('id');
var name = record.get('name');
this.setValue(name);
this.setRefName(id);
}
;
}
;
this.win.hide();
},
// pop select window
onTriggerClick : function() {
if (this.win == null) {
this.panel = new Ext.tf.SimplePanel({
title : '',
pageSize : 10,
hasAdd : false,
dblclickToggle : false,
contextmenuToggle : false,
gridConfigEx : {
height : 200
},
queryUrl : this.queryUrl,
// 查询条件Form
queryConfig : this.queryConfig,
// Grid 读取数据时的reader
readerConfig : this.readerConfig,
// Grid的列
gridCm : this.gridCm
});
this.panel.grid.on('rowdblclick', this.edit, this);
this.win = new Ext.Window({
title : this.title,
modal : true,
width : 520,
autoHeight : true,
closeAction : 'hide',
items : [ this.panel ],
buttons : [ {
text : '关闭',
handler : function() {
this.win.hide();
}.createDelegate(this)
}, {
text : '清除',
handler : function() {
this.setValue('');
this.setRefName('');
this.win.hide();
}.createDelegate(this)
}, {
text : '确认',
handler : this.edit.createDelegate(this)
} ]
});
}
this.win.show(this);
}
});
Ext.reg("popselect", Ext.tf.PopSelect);
/**
* SimpleReportPanel
*/
Ext.tf.SimpleReportPanel = Ext.extend(Ext.Panel, {
closable : true,
layout : 'fit',
autoScroll : true,
queryUrl : Ext.emptyFn,
// toggle: in grid, whether double click fire edit
dblclickToggle : false,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : false,
initComponent : function() {
try {
this.reader = new Ext.data.JsonReader({
totalProperty : "totalSize",
root : "data",
id : "id"
}, Ext.data.Record.create(this.readerConfig));
this.store = new Ext.data.Store({
proxy : new Ext.ux.data.DWRProxy({
dwrFunction : this.queryUrl
}),
reader : this.reader
});
this.grid = new Ext.tf.SimpleGridPanel({
tbar : [ {
text : '刷新',
handler : function() {
this.load();
}.createDelegate(this)
}, {
text : '打印',
handler : function() {
printPage(this.grid);
}.createDelegate(this)
} ],
viewConfig : {
forceFit : ''
},
width : '',
gridConfigEx : this.gridConfigEx,
store : this.store,
cm : new Ext.grid.ColumnModel(this.gridCm),
dblclickToggle : this.dblclickToggle,
contextmenuToggle : this.contextmenuToggle
});
this.items = [ this.grid ];
Ext.tf.SimplePanel.superclass.initComponent.call(this);
} catch (e) {
//console.log(e);
throw e;
}
},
// public
load : function() {
return this.store.load();
}
});
Ext.tf.WorkQueryPanel = Ext.extend(Ext.Panel, {
closable : true,
hasAdd : true,
queryUrl : Ext.emptyFn,
editUrl : Ext.emptyFn,
width : 800,
// toggle: in grid, whether double click fire edit
dblclickToggle : true,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : true,
initComponent : function() {
try {
Ext.applyIf(this, {
pageSize : 10
});
this.reader = new Ext.data.JsonReader({
totalProperty : "totalSize", // 总记录数
root : "data", // 分页对象中的数据集
id : "id" //
}, Ext.data.Record.create(this.readerConfig));
this.store = new Ext.data.Store({
proxy : new Ext.ux.data.DWRProxy({
dwrFunction : this.queryUrl,
listeners : {
'beforeload' : function(dataProxy, params) {
var o = this.queryForm.getForm().getValues(false);
console.log(o);
if (!params.limit)
params.limit = this.pageSize;
params[dataProxy.loadArgsKey] = [ o, params ];
}.createDelegate(this)
}
}),
reader : this.reader
});
this.pagingBar = new App.PagingToolbar({
pageSize : this.pageSize,
store : this.store,
displayInfo : true,
displayMsg : '{0} - {1} of {2}',
emptyMsg : "没有记录"
});
this.grid = new Ext.tf.SimpleGridPanel({
autoWidth : true,
autoHeight : true,
edit : this.edit.createDelegate(this),
store : this.store,
cm : new Ext.grid.ColumnModel(this.gridCm),
dblclickToggle : this.dblclickToggle,
contextmenuToggle : this.contextmenuToggle,
bbar : this.pagingBar
});
this.editForm = new Ext.tf.SimpleFormPanel({
autoWidth : true,
autoHeight : true,
items : this.editConfig,
close : function() {
this.editWin.hide();
}.createDelegate(this),
saveFn : function() {
var formBean = this.editForm.getForm().getValues(false);
this.editUrl(formBean, function() {
Ext.MessageBox.alert("提示", "保存成功!");
this.editWin.hide();
this.store.reload();
}.createDelegate(this));
}.createDelegate(this)
});
this.editWin = new Ext.Window({
title : '',
closeAction : 'hide',
modal : true,
autoHeight : true,
// autoWidth : true,
width : 300,
items : [ this.editForm ]
});
this.addRecord = function() {
this.editForm.getForm().reset();
this.editWin.show();
};
this.query = function() {
this.grid.selModel.clearSelections();
this.store.reload();
};
this.queryForm = new Ext.tf.SimpleQueryFormPanel(
{
queryConfigEx : {},
items : this.queryConfig,
autoWidth : true,
buttons : [ {
text : '查询',
formBind : true,
scope : this,
handler : this.query.createDelegate(this)
} ],
keys : [ Ext.tf.util.enterKey(this.query
.createDelegate(this)) ]
});
this.items = [ this.queryForm, this.grid ];
Ext.tf.SimplePanel.superclass.initComponent.call(this);
} catch (e) {
//console.log(e);
throw e;
}
},
// private
edit : function() {
var selections = this.grid.getSelections();
if (selections.length == 0) {
Ext.MessageBox.alert("提示", "请选择一条的记录!");
return;
} else if (selections.length != 1) {
Ext.MessageBox.alert("提示", "不能选择多行查看!");
return;
}
this.editWin.show();
this.editForm.getForm().loadRecord(selections[0]);
},
// public
load : function() {
return this.store.load({
params : {
start : 0,
limit : this.pageSize
}
});
}
}); | printPage | identifier_name |
tf.js | function printPage(grid) {
var tableStr = '<table cellpadding="0" cellspacing="0" width="100%" id="statisticByDay">';
var cm = grid.getColumnModel();
var colCount = cm.getColumnCount();
var temp_obj = new Array();
// 只下载没有隐藏的列(isHidden()为true表示隐藏,其他都为显示)
// 临时数组,存放所有当前显示列的下标
for ( var i = 0; i < colCount; i++) {// 从第三列开始,因为我的第1、2列是分别是rownumber和selectmodel。
if (cm.isHidden(i) == true) {
} else {
temp_obj.push(i);
}
}
tableStr = tableStr + '<thead><tr>';
for ( var i = 0; i < temp_obj.length; i++) {
// 显示列的列标题
tableStr = tableStr + '<td>' + cm.getColumnHeader(temp_obj[i])
+ '</td>';
}
tableStr = tableStr + '</tr></thead>';
var store = grid.getStore();
var recordCount = store.getCount();
tableStr = tableStr + '<tbody>'
for ( var i = 0; i < recordCount; i++) {
var r = store.getAt(i);
tableStr = tableStr + '<tr>';
for ( var j = 0; j < temp_obj.length; j++) {
var dataIndex = cm.getDataIndex(temp_obj[j]);
var tdValue = r.get(dataIndex);
var rendererFunc = cm.getRenderer(temp_obj[j]);
if (rendererFunc != null) {
tdValue = rendererFunc(tdValue);
}
if (tdValue == null || tdValue == 0) {
tdValue = ' ';
}
if (j != 0)
tableStr = tableStr + '<td style="text-align:center;">'
+ tdValue + '</td>';
else
tableStr = tableStr + '<td>' + tdValue + '</td>';
}
tableStr = tableStr + '</tr>';
}
tableStr = tableStr + '</tbody></table>';
var head = '<link rel="stylesheet" type="text/css" href="../css/printReport.css" />';
var titleHTML = tableStr;// document.getElementById("printGridfff").innerHTML;
var newwin = window.open('about:blank', '', '');
newwin.document.write(head);
newwin.document.write(titleHTML);
newwin.document.location.reload();
newwin.print();
// newwin.close();
}
tfMoney = function(v, sign) {
if (!sign)
sign = '';
v = (Math.round((v - 0) * 100)) / 100;
v = (v == Math.floor(v)) ? v + ".00" : ((v * 10 == Math.floor(v * 10)) ? v
+ "0" : v);
v = String(v);
var ps = v.split('.');
var whole = ps[0];
var sub = ps[1] ? '.' + ps[1] : '.00';
var r = /(\d+)(\d{3})/;
while (r.test(whole)) {
whole = whole.replace(r, '$1' + ',' + '$2');
}
v = whole + sub;
if (v.charAt(0) == '-') {
return '-' + sign + v.substr(1);
}
return sign + v;
}
Ext.apply(Ext.form.VTypes, {
daterange : function(val, field) {
var date = field.parseDate(val);
if (!date) {
return;
}
if (field.startDateField
&& (!this.dateRangeMax || (date.getTime() != this.dateRangeMax
.getTime()))) {
var start = Ext.getCmp(field.startDateField);
start.setMaxValue(date);
start.validate();
this.dateRangeMax = date;
} else if (field.endDateField
&& (!this.dateRangeMin || (date.getTime() != this.dateRangeMin
.getTime()))) {
var end = Ext.getCmp(field.endDateField);
end.setMinValue(date);
end.validate();
this.dateRangeMin = date;
}
/*
* Always return true since we're only using this vtype to set the
* min/max allowed values (these are tested for after the vtype test)
*/
return true;
},
password : function(val, field) {
if (field.initialPassField) {
var pwd = Ext.getCmp(field.initialPassField);
return (val == pwd.getValue());
}
return true;
},
passwordText : 'Passwords do not match'
});
Ext.grid.CheckColumn = function(config) {
Ext.apply(this, config);
if (!this.id) {
this.id = Ext.id();
}
this.renderer = this.renderer.createDelegate(this);
};
Ext.grid.CheckColumn.prototype = {
init : function(grid) {
this.grid = grid;
this.grid.on('render', function() {
var view = this.grid.getView();
view.mainBody.on('mousedown', this.onMouseDown, this);
}, this);
},
onMouseDown : function(e, t) {
if (t.className && t.className.indexOf('x-grid3-cc-' + this.id) != -1) {
e.stopEvent();
var index = this.grid.getView().findRowIndex(t);
var record = this.grid.store.getAt(index);
record.set(this.dataIndex, !record.data[this.dataIndex]);
}
},
renderer : function(v, p, record) {
p.css += ' x-grid3-check-col-td';
return '<div class="x-grid3-check-col' + (v ? '-on' : '')
+ ' x-grid3-cc-' + this.id + '"> </div>';
}
};
Ext.namespace('Ext.tf.util');
/**
* Compiles a selector/xpath query into a reusable function. The returned
* function takes one parameter "root" (optional), which is the context node
* from where the query should start.
*
* @param {Ext.form.FormPanel}
* formPanel 包含主数据的FormPanel
* @param {Ext.grid.GridPanel/Ext.grid.EditorGridPanel}
* gridPanel 包含细节数据的GridPanel
* @param {Array}
* excludes gridPanel中不需要获取的列, 数组中加入需要摈弃的grid.store.fields中
* @param {Array}
* resultPropNames (可选) 定义返回的json对象的属性名,缺省为["formData", "gridData"]
* @return {Object} 缺省为{formData:masterData, gridData:detailData}
* masterData为json对象, detailData为[json对象],
* detailData数组中的json对象的属性名与grid.store的fields定义相同
*/
Ext.tf.util.gatherData = function(formPanel, gridPanel, excludes,
resultPropNames) {
var store = gridPanel.store;
var gridDataList = [];
var formData = formPanel.getForm().getValues(false);
store.each(function(rec) {
for ( var i = excludes.length - 1; i >= 0; --i) {
delete rec.data[excludes[i]];
}
;
gridDataList.push(rec.data)
});
if (resultPropNames) {
var result = {};
result[resultPropNames[0]] = formData;
result[resultPropNames[1]] = gridDataList;
return result;
} else
return {
formData : formData,
gridData : gridDataList
};
}
Ext.tf.util.debug = function(msg) {
if (typeof (console) != "undefined") {
console.debug(msg);
}
}
/*
* Usage : var lable = Ext.tf.util.OptCache.getLabel("Nationality", "1");
*
*/
Ext.tf.util.OptCache = {};
Ext.tf.util.OptCache.data = {};
Ext.tf.util.OptCache.getOptions = function(optName) {
var util = Ext.tf.util;
if (!util.OptCache.data[optName]) {
OptionProvider.getOptions(optName, {
async : false,
callback : function(list) {
var opt = {};
for ( var i = list.length - 1; i >= 0; --i) {
opt[list[i].id] = list[i].name;
}
;
util.OptCache.data[optName] = opt;
}
});
} else {
util.debug("util.OptCache.getOptions: using cache");
}
return util.OptCache.data[optName];
};
Ext.tf.util.OptCache.getLabel = function(optName, key) {
var util = Ext.tf.util;
var options = util.OptCache.getOptions(optName);
if (options) {
return options[key];
} else {
return '';
}
};
/**
* 回车对应函数 handler
*/
Ext.tf.util.enterKey = function(handler) {
return {
key : [ 10, 13 ],
fn : handler
}
};
// //////////////////////
Ext.ns("Ext.tf");
Ext.tf.currentUser = null;
Ext.tf.SimpleFormPanel = Ext.extend(Ext.FormPanel, {
autoHeight : true,
frame : true,
defaultType : 'textfield',
initComponent : function() {
Ext.applyIf(this, {
// saveFn : function() {}
});
this.build();
Ext.tf.SimpleFormPanel.superclass.initComponent.call(this);
},
build : function() {
this.buttons = [ {
text : '确认',
handler : this.saveFn
}, {
text : '取消',
handler : this.close
} ]
// this.keys = [ Ext.tf.util.enterKey(this.saveFn) ];
}
})
Ext.tf.SimpleQueryFormPanel = Ext.extend(Ext.FormPanel, {
collapsible : true,
title : '查询',
labelWidth : 75,
frame : true,
bodyStyle : 'padding:5px 5px 0',
width : 500,
defaults : {
width : 230
},
defaultType : 'textfield',
initComponent : function() {
Ext.apply(this, this.queryConfigEx);
Ext.tf.SimpleQueryFormPanel.superclass.initComponent.call(this);
}
});
Ext.tf.SimpleGridPanel = Ext.extend(Ext.grid.GridPanel, {
loadMask : {
msg : '正在加载数据...'
},
viewConfig : {
forceFit : true
},
width : 500,
height : 300,
frame : true,
// toggle: in grid, whether double click fire edit
dblclickToggle : true,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : true,
initComponent : function() {
Ext.apply(this, this.gridConfigEx);
Ext.tf.SimpleGridPanel.superclass.initComponent.call(this);
this.dblclickToggle && this.on('rowdblclick', this.edit, this);
// 右键菜单
this.contextmenuToggle
&& this.on('rowcontextmenu', this.contextmenu, this);
},
contextmenu : function(grid, rowIndex, e) {
e.preventDefault();
e.stopEvent();
var updateMenu = new Ext.menu.Item({
iconCls : 'edit',
id : 'updateMenu',
text : '修改',
handler : this.edit.createDelegate(this)
});
var deleteMenu = new Ext.menu.Item({
iconCls : 'delete',
id : 'deleteMenu',
text : '删除',
handler : this.del.createDelegate(this)
});
var selections = this.getSelections();
if (selections.length > 1) {
updateMenu.disable();
}
var menuList = [ updateMenu, deleteMenu ];
this.grid_menu = new Ext.menu.Menu({
id : 'mainMenu',
items : menuList
});
var coords = e.getXY();
grid.getSelectionModel().selectRow(rowIndex);
this.grid_menu.showAt([ coords[0], coords[1] ]);
}
});
/**
* 功能页面的panel类 Config 说明: title : '模块目录管理', pageSize : 10, queryUrl :
* ModuleService.findModuleCategory.createDelegate(this), editUrl :
* ModuleService.editModuleCategory.createDelegate(this), deleteUrl : xxx,
* // Grid 需要的配置信息, 会覆盖掉缺省的 gridConfigEx : {}; // query panel 需要的配置信息, 会覆盖掉缺省的
* queryConfigEx : {};
*
* //查询用到的form配置 queryConfig : [ { fieldLabel : '名称', name : 'name', allowBlank :
* true } ], //编辑用到的form配置 editConfig : [ { fieldLabel : '模块目录名称', name : 'name' }, {
* fieldLabel : '排列顺序', name : 'ordinal' } ], //reader的配置 readerConfig : [ {
* name : 'id', mapping : 'id' }, { name : 'name', mapping : 'name' }, { name :
* 'ordinal', mapping : 'ordinal' } ], //网格记录显示的配置 gridCm : [ { "hidden" : true,
* "header" : "ID", "sortable" : true, "dataIndex" : "id" }, { "header" :
* "模块目录名称", "sortable" : true, "dataIndex" : "name" }, { "header" : "排列顺序",
* "sortable" : true, "dataIndex" : "ordinal" } ]
*/
Ext.tf.SimplePanel = Ext.extend(Ext.Panel, {
closable : true,
hasAdd : true,
queryUrl : Ext.emptyFn,
editUrl : Ext.emptyFn,
deleteUrl : Ext.emptyFn,
// toggle: in grid, whether double click fire edit
dblclickToggle : true,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : true,
initComponent : function() {
try {
Ext.applyIf(this, {
pageSize : 10
});
this.reader = new Ext.data.JsonReader({
totalProperty : "totalSize", // 总记录数
root : "data", // 分页对象中的数据集
id : "id" //
}, Ext.data.Record.create(this.readerConfig));
this.store = new Ext.data.Store({
proxy : new Ext.ux.data.DWRProxy({
dwrFunction : this.queryUrl,
listeners : {
'beforeload' : function(dataProxy, params) {
// alert(dwr.util.toDescriptiveString(params, 2));
// alert(this.queryUrl +"eeeee");
var o = this.queryForm.getForm().getValues(false);
console.log(o);
if(o.isDetail != undefined){
if(o.isDetail == '1')
o.isDetail = true;
else if(o.isDetail == '0')
o.isDetail = false;
}
if (!params.limit)
params.limit = this.pageSize;
params[dataProxy.loadArgsKey] = [ o, params ];
}.createDelegate(this),
'load' : function ( obj, records, options ) {
console.log("load=======================================================")
console.log(obj);
console.log(records);
console.log(options);
console.log("load=======================================================")
}
}
}),
reader : this.reader
});
this.pagingBar = new App.PagingToolbar({
pageSize : this.pageSize,
store : this.store,
displayInfo : true,
displayMsg : '{0} - {1} of {2}',
emptyMsg : "没有记录"
});
this.grid = new Ext.tf.SimpleGridPanel({
gridConfigEx : this.gridConfigEx,
edit : this.edit.createDelegate(this),
del : this.del.createDelegate(this),
store : this.store,
cm : new Ext.grid.ColumnModel(this.gridCm),
dblclickToggle : this.dblclickToggle,
contextmenuToggle : this.contextmenuToggle,
bbar : this.pagingBar
});
this.editForm = new Ext.tf.SimpleFormPanel({
items : this.editConfig,
close : function() {
this.editWin.hide();
}.createDelegate(this),
saveFn : function() {
var formBean = this.editForm.getForm().getValues(false);
console.log(formBean)
if(formBean.isDetail){
if(formBean.isDetail == '一级目录'){
formBean.isDetail = false;
}else if(formBean.isDetail == '二级目录'){
formBean.isDetail = true;
}
}
//console.log(formBean);
this.editUrl(formBean, function() {
Ext.MessageBox.alert("提示", "保存成功!");
this.editWin.hide();
this.store.reload();
}.createDelegate(this));
}.createDelegate(this)
});
this.editWin = new Ext.Window({
title : '',
// closeAction : 'hide',
modal : true,
autoHeight : true,
close : function() {
this.hide();
},
// autoWidth : true,
width : 300,
items : [ this.editForm ]
});
this.addRecord = function() {
this.editForm.getForm().reset();
this.editWin.show();
};
this.query = function() {
this.grid.selModel.clearSelections();
this.store.reload();
};
this.queryForm = new Ext.tf.SimpleQueryFormPanel(
{
queryConfigEx : this.queryConfigEx,
items : this.queryConfig,
buttons : [ {
text : '查询',
formBind : true,
scope : this,
handler : this.query.createDelegate(this)
} ],
keys : [ Ext.tf.util.enterKey(this.query
.createDelegate(this)) ]
});
if (this.hasAdd) {
this.queryForm.addButton('新增', this.addRecord, this);
}
this.items = [ this.queryForm, this.grid ];
Ext.tf.SimplePanel.superclass.initComponent.call(this);
} catch (e) {
//console.log(e);
throw e;
}
},
// private
edit : function() {
var selections = this.grid.getSelections();
if (selections.length == 0) {
Ext.MessageBox.alert("提示", "请选择一条的记录!");
return;
} else if (selections.length != 1) {
Ext.MessageBox.alert("提示", "不能选择多行编辑!");
return;
}
this.editWin.show();
this.editForm.getForm().loadRecord(selections[0]);
},
del : function() {
var selections = this.grid.getSelections();
if (selections.length == 0) {
Ext.MessageBox.alert("提示", "请选择一条的记录!");
return;
}
var fn = function(e) {
if (e == "yes") {
var ids = new Array();
for ( var i = 0, len = selections.length; i < len; i++) {
try {
// 如果选中的record没有在这一页显示,remove就会出问题
selections[i].get("id");
ids[i] = selections[i].get("id");
} catch (e) {
// //console.log(e);
}
}
this.deleteUrl(ids.join(","), function() {
Ext.MessageBox.alert("提示", "删除完毕!");
this.store.reload();
}.createDelegate(this));
}
}
Ext.MessageBox.confirm("提示", "确认要删除所选择的记录么?", fn, this);
},
// public
load : function() {
return this.store.load({
params : {
start : 0,
limit : this.pageSize
}
});
}
});
/**
* 弹出窗口控件
*/
Ex | ch(form.find("name", refName), function(field) {
field.setValue(v);
});
}
return this;
},
onDestroy : function() {
Ext.destroy(this.win, this.panel);
Ext.tf.PopSelect.superclass.onDestroy.call(this);
},
edit : function() {
var grid = this.panel.grid;
var store = this.panel.store;
var view = grid.getView();
var sm = grid.getSelectionModel();
for ( var i = 0; i < view.getRows().length; i++) {
if (sm.isSelected(i)) {
var record = store.getAt(i);
var id = record.get('id');
var name = record.get('name');
this.setValue(name);
this.setRefName(id);
}
;
}
;
this.win.hide();
},
// pop select window
onTriggerClick : function() {
if (this.win == null) {
this.panel = new Ext.tf.SimplePanel({
title : '',
pageSize : 10,
hasAdd : false,
dblclickToggle : false,
contextmenuToggle : false,
gridConfigEx : {
height : 200
},
queryUrl : this.queryUrl,
// 查询条件Form
queryConfig : this.queryConfig,
// Grid 读取数据时的reader
readerConfig : this.readerConfig,
// Grid的列
gridCm : this.gridCm
});
this.panel.grid.on('rowdblclick', this.edit, this);
this.win = new Ext.Window({
title : this.title,
modal : true,
width : 520,
autoHeight : true,
closeAction : 'hide',
items : [ this.panel ],
buttons : [ {
text : '关闭',
handler : function() {
this.win.hide();
}.createDelegate(this)
}, {
text : '清除',
handler : function() {
this.setValue('');
this.setRefName('');
this.win.hide();
}.createDelegate(this)
}, {
text : '确认',
handler : this.edit.createDelegate(this)
} ]
});
}
this.win.show(this);
}
});
Ext.reg("popselect", Ext.tf.PopSelect);
/**
* SimpleReportPanel
*/
Ext.tf.SimpleReportPanel = Ext.extend(Ext.Panel, {
closable : true,
layout : 'fit',
autoScroll : true,
queryUrl : Ext.emptyFn,
// toggle: in grid, whether double click fire edit
dblclickToggle : false,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : false,
initComponent : function() {
try {
this.reader = new Ext.data.JsonReader({
totalProperty : "totalSize",
root : "data",
id : "id"
}, Ext.data.Record.create(this.readerConfig));
this.store = new Ext.data.Store({
proxy : new Ext.ux.data.DWRProxy({
dwrFunction : this.queryUrl
}),
reader : this.reader
});
this.grid = new Ext.tf.SimpleGridPanel({
tbar : [ {
text : '刷新',
handler : function() {
this.load();
}.createDelegate(this)
}, {
text : '打印',
handler : function() {
printPage(this.grid);
}.createDelegate(this)
} ],
viewConfig : {
forceFit : ''
},
width : '',
gridConfigEx : this.gridConfigEx,
store : this.store,
cm : new Ext.grid.ColumnModel(this.gridCm),
dblclickToggle : this.dblclickToggle,
contextmenuToggle : this.contextmenuToggle
});
this.items = [ this.grid ];
Ext.tf.SimplePanel.superclass.initComponent.call(this);
} catch (e) {
//console.log(e);
throw e;
}
},
// public
load : function() {
return this.store.load();
}
});
Ext.tf.WorkQueryPanel = Ext.extend(Ext.Panel, {
closable : true,
hasAdd : true,
queryUrl : Ext.emptyFn,
editUrl : Ext.emptyFn,
width : 800,
// toggle: in grid, whether double click fire edit
dblclickToggle : true,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : true,
initComponent : function() {
try {
Ext.applyIf(this, {
pageSize : 10
});
this.reader = new Ext.data.JsonReader({
totalProperty : "totalSize", // 总记录数
root : "data", // 分页对象中的数据集
id : "id" //
}, Ext.data.Record.create(this.readerConfig));
this.store = new Ext.data.Store({
proxy : new Ext.ux.data.DWRProxy({
dwrFunction : this.queryUrl,
listeners : {
'beforeload' : function(dataProxy, params) {
var o = this.queryForm.getForm().getValues(false);
console.log(o);
if (!params.limit)
params.limit = this.pageSize;
params[dataProxy.loadArgsKey] = [ o, params ];
}.createDelegate(this)
}
}),
reader : this.reader
});
this.pagingBar = new App.PagingToolbar({
pageSize : this.pageSize,
store : this.store,
displayInfo : true,
displayMsg : '{0} - {1} of {2}',
emptyMsg : "没有记录"
});
this.grid = new Ext.tf.SimpleGridPanel({
autoWidth : true,
autoHeight : true,
edit : this.edit.createDelegate(this),
store : this.store,
cm : new Ext.grid.ColumnModel(this.gridCm),
dblclickToggle : this.dblclickToggle,
contextmenuToggle : this.contextmenuToggle,
bbar : this.pagingBar
});
this.editForm = new Ext.tf.SimpleFormPanel({
autoWidth : true,
autoHeight : true,
items : this.editConfig,
close : function() {
this.editWin.hide();
}.createDelegate(this),
saveFn : function() {
var formBean = this.editForm.getForm().getValues(false);
this.editUrl(formBean, function() {
Ext.MessageBox.alert("提示", "保存成功!");
this.editWin.hide();
this.store.reload();
}.createDelegate(this));
}.createDelegate(this)
});
this.editWin = new Ext.Window({
title : '',
closeAction : 'hide',
modal : true,
autoHeight : true,
// autoWidth : true,
width : 300,
items : [ this.editForm ]
});
this.addRecord = function() {
this.editForm.getForm().reset();
this.editWin.show();
};
this.query = function() {
this.grid.selModel.clearSelections();
this.store.reload();
};
this.queryForm = new Ext.tf.SimpleQueryFormPanel(
{
queryConfigEx : {},
items : this.queryConfig,
autoWidth : true,
buttons : [ {
text : '查询',
formBind : true,
scope : this,
handler : this.query.createDelegate(this)
} ],
keys : [ Ext.tf.util.enterKey(this.query
.createDelegate(this)) ]
});
this.items = [ this.queryForm, this.grid ];
Ext.tf.SimplePanel.superclass.initComponent.call(this);
} catch (e) {
//console.log(e);
throw e;
}
},
// private
edit : function() {
var selections = this.grid.getSelections();
if (selections.length == 0) {
Ext.MessageBox.alert("提示", "请选择一条的记录!");
return;
} else if (selections.length != 1) {
Ext.MessageBox.alert("提示", "不能选择多行查看!");
return;
}
this.editWin.show();
this.editForm.getForm().loadRecord(selections[0]);
},
// public
load : function() {
return this.store.load({
params : {
start : 0,
limit : this.pageSize
}
});
}
}); | t.tf.PopSelect = Ext.extend(Ext.form.TriggerField, {
triggerClass : 'x-form-date-trigger',
readOnly : true,
initComponent : function() {
Ext.tf.PopSelect.superclass.initComponent(this);
},
/**
* Find ref element, set value
*/
setRefName : function(v) {
var refName = this.refName || ''; // If not refName, then ??
var form = this.findParentBy(function(v) {
if (Ext.type(v.getForm) == 'function')
return true;
});
if (form != null) {
Ext.ea | conditional_block |
tf.js | function printPage(grid) | + ".00" : ((v * 10 == Math.floor(v * 10)) ? v
+ "0" : v);
v = String(v);
var ps = v.split('.');
var whole = ps[0];
var sub = ps[1] ? '.' + ps[1] : '.00';
var r = /(\d+)(\d{3})/;
while (r.test(whole)) {
whole = whole.replace(r, '$1' + ',' + '$2');
}
v = whole + sub;
if (v.charAt(0) == '-') {
return '-' + sign + v.substr(1);
}
return sign + v;
}
Ext.apply(Ext.form.VTypes, {
daterange : function(val, field) {
var date = field.parseDate(val);
if (!date) {
return;
}
if (field.startDateField
&& (!this.dateRangeMax || (date.getTime() != this.dateRangeMax
.getTime()))) {
var start = Ext.getCmp(field.startDateField);
start.setMaxValue(date);
start.validate();
this.dateRangeMax = date;
} else if (field.endDateField
&& (!this.dateRangeMin || (date.getTime() != this.dateRangeMin
.getTime()))) {
var end = Ext.getCmp(field.endDateField);
end.setMinValue(date);
end.validate();
this.dateRangeMin = date;
}
/*
* Always return true since we're only using this vtype to set the
* min/max allowed values (these are tested for after the vtype test)
*/
return true;
},
password : function(val, field) {
if (field.initialPassField) {
var pwd = Ext.getCmp(field.initialPassField);
return (val == pwd.getValue());
}
return true;
},
passwordText : 'Passwords do not match'
});
Ext.grid.CheckColumn = function(config) {
Ext.apply(this, config);
if (!this.id) {
this.id = Ext.id();
}
this.renderer = this.renderer.createDelegate(this);
};
Ext.grid.CheckColumn.prototype = {
init : function(grid) {
this.grid = grid;
this.grid.on('render', function() {
var view = this.grid.getView();
view.mainBody.on('mousedown', this.onMouseDown, this);
}, this);
},
onMouseDown : function(e, t) {
if (t.className && t.className.indexOf('x-grid3-cc-' + this.id) != -1) {
e.stopEvent();
var index = this.grid.getView().findRowIndex(t);
var record = this.grid.store.getAt(index);
record.set(this.dataIndex, !record.data[this.dataIndex]);
}
},
renderer : function(v, p, record) {
p.css += ' x-grid3-check-col-td';
return '<div class="x-grid3-check-col' + (v ? '-on' : '')
+ ' x-grid3-cc-' + this.id + '"> </div>';
}
};
Ext.namespace('Ext.tf.util');
/**
* Compiles a selector/xpath query into a reusable function. The returned
* function takes one parameter "root" (optional), which is the context node
* from where the query should start.
*
* @param {Ext.form.FormPanel}
* formPanel 包含主数据的FormPanel
* @param {Ext.grid.GridPanel/Ext.grid.EditorGridPanel}
* gridPanel 包含细节数据的GridPanel
* @param {Array}
* excludes gridPanel中不需要获取的列, 数组中加入需要摈弃的grid.store.fields中
* @param {Array}
* resultPropNames (可选) 定义返回的json对象的属性名,缺省为["formData", "gridData"]
* @return {Object} 缺省为{formData:masterData, gridData:detailData}
* masterData为json对象, detailData为[json对象],
* detailData数组中的json对象的属性名与grid.store的fields定义相同
*/
Ext.tf.util.gatherData = function(formPanel, gridPanel, excludes,
resultPropNames) {
var store = gridPanel.store;
var gridDataList = [];
var formData = formPanel.getForm().getValues(false);
store.each(function(rec) {
for ( var i = excludes.length - 1; i >= 0; --i) {
delete rec.data[excludes[i]];
}
;
gridDataList.push(rec.data)
});
if (resultPropNames) {
var result = {};
result[resultPropNames[0]] = formData;
result[resultPropNames[1]] = gridDataList;
return result;
} else
return {
formData : formData,
gridData : gridDataList
};
}
Ext.tf.util.debug = function(msg) {
if (typeof (console) != "undefined") {
console.debug(msg);
}
}
/*
* Usage : var lable = Ext.tf.util.OptCache.getLabel("Nationality", "1");
*
*/
Ext.tf.util.OptCache = {};
Ext.tf.util.OptCache.data = {};
Ext.tf.util.OptCache.getOptions = function(optName) {
var util = Ext.tf.util;
if (!util.OptCache.data[optName]) {
OptionProvider.getOptions(optName, {
async : false,
callback : function(list) {
var opt = {};
for ( var i = list.length - 1; i >= 0; --i) {
opt[list[i].id] = list[i].name;
}
;
util.OptCache.data[optName] = opt;
}
});
} else {
util.debug("util.OptCache.getOptions: using cache");
}
return util.OptCache.data[optName];
};
Ext.tf.util.OptCache.getLabel = function(optName, key) {
var util = Ext.tf.util;
var options = util.OptCache.getOptions(optName);
if (options) {
return options[key];
} else {
return '';
}
};
/**
* 回车对应函数 handler
*/
Ext.tf.util.enterKey = function(handler) {
return {
key : [ 10, 13 ],
fn : handler
}
};
// //////////////////////
Ext.ns("Ext.tf");
Ext.tf.currentUser = null;
Ext.tf.SimpleFormPanel = Ext.extend(Ext.FormPanel, {
autoHeight : true,
frame : true,
defaultType : 'textfield',
initComponent : function() {
Ext.applyIf(this, {
// saveFn : function() {}
});
this.build();
Ext.tf.SimpleFormPanel.superclass.initComponent.call(this);
},
build : function() {
this.buttons = [ {
text : '确认',
handler : this.saveFn
}, {
text : '取消',
handler : this.close
} ]
// this.keys = [ Ext.tf.util.enterKey(this.saveFn) ];
}
})
Ext.tf.SimpleQueryFormPanel = Ext.extend(Ext.FormPanel, {
collapsible : true,
title : '查询',
labelWidth : 75,
frame : true,
bodyStyle : 'padding:5px 5px 0',
width : 500,
defaults : {
width : 230
},
defaultType : 'textfield',
initComponent : function() {
Ext.apply(this, this.queryConfigEx);
Ext.tf.SimpleQueryFormPanel.superclass.initComponent.call(this);
}
});
Ext.tf.SimpleGridPanel = Ext.extend(Ext.grid.GridPanel, {
loadMask : {
msg : '正在加载数据...'
},
viewConfig : {
forceFit : true
},
width : 500,
height : 300,
frame : true,
// toggle: in grid, whether double click fire edit
dblclickToggle : true,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : true,
initComponent : function() {
Ext.apply(this, this.gridConfigEx);
Ext.tf.SimpleGridPanel.superclass.initComponent.call(this);
this.dblclickToggle && this.on('rowdblclick', this.edit, this);
// 右键菜单
this.contextmenuToggle
&& this.on('rowcontextmenu', this.contextmenu, this);
},
contextmenu : function(grid, rowIndex, e) {
e.preventDefault();
e.stopEvent();
var updateMenu = new Ext.menu.Item({
iconCls : 'edit',
id : 'updateMenu',
text : '修改',
handler : this.edit.createDelegate(this)
});
var deleteMenu = new Ext.menu.Item({
iconCls : 'delete',
id : 'deleteMenu',
text : '删除',
handler : this.del.createDelegate(this)
});
var selections = this.getSelections();
if (selections.length > 1) {
updateMenu.disable();
}
var menuList = [ updateMenu, deleteMenu ];
this.grid_menu = new Ext.menu.Menu({
id : 'mainMenu',
items : menuList
});
var coords = e.getXY();
grid.getSelectionModel().selectRow(rowIndex);
this.grid_menu.showAt([ coords[0], coords[1] ]);
}
});
/**
* 功能页面的panel类 Config 说明: title : '模块目录管理', pageSize : 10, queryUrl :
* ModuleService.findModuleCategory.createDelegate(this), editUrl :
* ModuleService.editModuleCategory.createDelegate(this), deleteUrl : xxx,
* // Grid 需要的配置信息, 会覆盖掉缺省的 gridConfigEx : {}; // query panel 需要的配置信息, 会覆盖掉缺省的
* queryConfigEx : {};
*
* //查询用到的form配置 queryConfig : [ { fieldLabel : '名称', name : 'name', allowBlank :
* true } ], //编辑用到的form配置 editConfig : [ { fieldLabel : '模块目录名称', name : 'name' }, {
* fieldLabel : '排列顺序', name : 'ordinal' } ], //reader的配置 readerConfig : [ {
* name : 'id', mapping : 'id' }, { name : 'name', mapping : 'name' }, { name :
* 'ordinal', mapping : 'ordinal' } ], //网格记录显示的配置 gridCm : [ { "hidden" : true,
* "header" : "ID", "sortable" : true, "dataIndex" : "id" }, { "header" :
* "模块目录名称", "sortable" : true, "dataIndex" : "name" }, { "header" : "排列顺序",
* "sortable" : true, "dataIndex" : "ordinal" } ]
*/
Ext.tf.SimplePanel = Ext.extend(Ext.Panel, {
closable : true,
hasAdd : true,
queryUrl : Ext.emptyFn,
editUrl : Ext.emptyFn,
deleteUrl : Ext.emptyFn,
// toggle: in grid, whether double click fire edit
dblclickToggle : true,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : true,
initComponent : function() {
try {
Ext.applyIf(this, {
pageSize : 10
});
this.reader = new Ext.data.JsonReader({
totalProperty : "totalSize", // 总记录数
root : "data", // 分页对象中的数据集
id : "id" //
}, Ext.data.Record.create(this.readerConfig));
this.store = new Ext.data.Store({
proxy : new Ext.ux.data.DWRProxy({
dwrFunction : this.queryUrl,
listeners : {
'beforeload' : function(dataProxy, params) {
// alert(dwr.util.toDescriptiveString(params, 2));
// alert(this.queryUrl +"eeeee");
var o = this.queryForm.getForm().getValues(false);
console.log(o);
if(o.isDetail != undefined){
if(o.isDetail == '1')
o.isDetail = true;
else if(o.isDetail == '0')
o.isDetail = false;
}
if (!params.limit)
params.limit = this.pageSize;
params[dataProxy.loadArgsKey] = [ o, params ];
}.createDelegate(this),
'load' : function ( obj, records, options ) {
console.log("load=======================================================")
console.log(obj);
console.log(records);
console.log(options);
console.log("load=======================================================")
}
}
}),
reader : this.reader
});
this.pagingBar = new App.PagingToolbar({
pageSize : this.pageSize,
store : this.store,
displayInfo : true,
displayMsg : '{0} - {1} of {2}',
emptyMsg : "没有记录"
});
this.grid = new Ext.tf.SimpleGridPanel({
gridConfigEx : this.gridConfigEx,
edit : this.edit.createDelegate(this),
del : this.del.createDelegate(this),
store : this.store,
cm : new Ext.grid.ColumnModel(this.gridCm),
dblclickToggle : this.dblclickToggle,
contextmenuToggle : this.contextmenuToggle,
bbar : this.pagingBar
});
this.editForm = new Ext.tf.SimpleFormPanel({
items : this.editConfig,
close : function() {
this.editWin.hide();
}.createDelegate(this),
saveFn : function() {
var formBean = this.editForm.getForm().getValues(false);
console.log(formBean)
if(formBean.isDetail){
if(formBean.isDetail == '一级目录'){
formBean.isDetail = false;
}else if(formBean.isDetail == '二级目录'){
formBean.isDetail = true;
}
}
//console.log(formBean);
this.editUrl(formBean, function() {
Ext.MessageBox.alert("提示", "保存成功!");
this.editWin.hide();
this.store.reload();
}.createDelegate(this));
}.createDelegate(this)
});
this.editWin = new Ext.Window({
title : '',
// closeAction : 'hide',
modal : true,
autoHeight : true,
close : function() {
this.hide();
},
// autoWidth : true,
width : 300,
items : [ this.editForm ]
});
this.addRecord = function() {
this.editForm.getForm().reset();
this.editWin.show();
};
this.query = function() {
this.grid.selModel.clearSelections();
this.store.reload();
};
this.queryForm = new Ext.tf.SimpleQueryFormPanel(
{
queryConfigEx : this.queryConfigEx,
items : this.queryConfig,
buttons : [ {
text : '查询',
formBind : true,
scope : this,
handler : this.query.createDelegate(this)
} ],
keys : [ Ext.tf.util.enterKey(this.query
.createDelegate(this)) ]
});
if (this.hasAdd) {
this.queryForm.addButton('新增', this.addRecord, this);
}
this.items = [ this.queryForm, this.grid ];
Ext.tf.SimplePanel.superclass.initComponent.call(this);
} catch (e) {
//console.log(e);
throw e;
}
},
// private
edit : function() {
var selections = this.grid.getSelections();
if (selections.length == 0) {
Ext.MessageBox.alert("提示", "请选择一条的记录!");
return;
} else if (selections.length != 1) {
Ext.MessageBox.alert("提示", "不能选择多行编辑!");
return;
}
this.editWin.show();
this.editForm.getForm().loadRecord(selections[0]);
},
del : function() {
var selections = this.grid.getSelections();
if (selections.length == 0) {
Ext.MessageBox.alert("提示", "请选择一条的记录!");
return;
}
var fn = function(e) {
if (e == "yes") {
var ids = new Array();
for ( var i = 0, len = selections.length; i < len; i++) {
try {
// 如果选中的record没有在这一页显示,remove就会出问题
selections[i].get("id");
ids[i] = selections[i].get("id");
} catch (e) {
// //console.log(e);
}
}
this.deleteUrl(ids.join(","), function() {
Ext.MessageBox.alert("提示", "删除完毕!");
this.store.reload();
}.createDelegate(this));
}
}
Ext.MessageBox.confirm("提示", "确认要删除所选择的记录么?", fn, this);
},
// public
load : function() {
return this.store.load({
params : {
start : 0,
limit : this.pageSize
}
});
}
});
/**
* 弹出窗口控件
*/
Ext.tf.PopSelect = Ext.extend(Ext.form.TriggerField, {
triggerClass : 'x-form-date-trigger',
readOnly : true,
initComponent : function() {
Ext.tf.PopSelect.superclass.initComponent(this);
},
/**
* Find ref element, set value
*/
setRefName : function(v) {
var refName = this.refName || ''; // If not refName, then ??
var form = this.findParentBy(function(v) {
if (Ext.type(v.getForm) == 'function')
return true;
});
if (form != null) {
Ext.each(form.find("name", refName), function(field) {
field.setValue(v);
});
}
return this;
},
onDestroy : function() {
Ext.destroy(this.win, this.panel);
Ext.tf.PopSelect.superclass.onDestroy.call(this);
},
edit : function() {
var grid = this.panel.grid;
var store = this.panel.store;
var view = grid.getView();
var sm = grid.getSelectionModel();
for ( var i = 0; i < view.getRows().length; i++) {
if (sm.isSelected(i)) {
var record = store.getAt(i);
var id = record.get('id');
var name = record.get('name');
this.setValue(name);
this.setRefName(id);
}
;
}
;
this.win.hide();
},
// pop select window
onTriggerClick : function() {
if (this.win == null) {
this.panel = new Ext.tf.SimplePanel({
title : '',
pageSize : 10,
hasAdd : false,
dblclickToggle : false,
contextmenuToggle : false,
gridConfigEx : {
height : 200
},
queryUrl : this.queryUrl,
// 查询条件Form
queryConfig : this.queryConfig,
// Grid 读取数据时的reader
readerConfig : this.readerConfig,
// Grid的列
gridCm : this.gridCm
});
this.panel.grid.on('rowdblclick', this.edit, this);
this.win = new Ext.Window({
title : this.title,
modal : true,
width : 520,
autoHeight : true,
closeAction : 'hide',
items : [ this.panel ],
buttons : [ {
text : '关闭',
handler : function() {
this.win.hide();
}.createDelegate(this)
}, {
text : '清除',
handler : function() {
this.setValue('');
this.setRefName('');
this.win.hide();
}.createDelegate(this)
}, {
text : '确认',
handler : this.edit.createDelegate(this)
} ]
});
}
this.win.show(this);
}
});
Ext.reg("popselect", Ext.tf.PopSelect);
/**
* SimpleReportPanel
*/
Ext.tf.SimpleReportPanel = Ext.extend(Ext.Panel, {
closable : true,
layout : 'fit',
autoScroll : true,
queryUrl : Ext.emptyFn,
// toggle: in grid, whether double click fire edit
dblclickToggle : false,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : false,
initComponent : function() {
try {
this.reader = new Ext.data.JsonReader({
totalProperty : "totalSize",
root : "data",
id : "id"
}, Ext.data.Record.create(this.readerConfig));
this.store = new Ext.data.Store({
proxy : new Ext.ux.data.DWRProxy({
dwrFunction : this.queryUrl
}),
reader : this.reader
});
this.grid = new Ext.tf.SimpleGridPanel({
tbar : [ {
text : '刷新',
handler : function() {
this.load();
}.createDelegate(this)
}, {
text : '打印',
handler : function() {
printPage(this.grid);
}.createDelegate(this)
} ],
viewConfig : {
forceFit : ''
},
width : '',
gridConfigEx : this.gridConfigEx,
store : this.store,
cm : new Ext.grid.ColumnModel(this.gridCm),
dblclickToggle : this.dblclickToggle,
contextmenuToggle : this.contextmenuToggle
});
this.items = [ this.grid ];
Ext.tf.SimplePanel.superclass.initComponent.call(this);
} catch (e) {
//console.log(e);
throw e;
}
},
// public
load : function() {
return this.store.load();
}
});
Ext.tf.WorkQueryPanel = Ext.extend(Ext.Panel, {
closable : true,
hasAdd : true,
queryUrl : Ext.emptyFn,
editUrl : Ext.emptyFn,
width : 800,
// toggle: in grid, whether double click fire edit
dblclickToggle : true,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : true,
initComponent : function() {
try {
Ext.applyIf(this, {
pageSize : 10
});
this.reader = new Ext.data.JsonReader({
totalProperty : "totalSize", // 总记录数
root : "data", // 分页对象中的数据集
id : "id" //
}, Ext.data.Record.create(this.readerConfig));
this.store = new Ext.data.Store({
proxy : new Ext.ux.data.DWRProxy({
dwrFunction : this.queryUrl,
listeners : {
'beforeload' : function(dataProxy, params) {
var o = this.queryForm.getForm().getValues(false);
console.log(o);
if (!params.limit)
params.limit = this.pageSize;
params[dataProxy.loadArgsKey] = [ o, params ];
}.createDelegate(this)
}
}),
reader : this.reader
});
this.pagingBar = new App.PagingToolbar({
pageSize : this.pageSize,
store : this.store,
displayInfo : true,
displayMsg : '{0} - {1} of {2}',
emptyMsg : "没有记录"
});
this.grid = new Ext.tf.SimpleGridPanel({
autoWidth : true,
autoHeight : true,
edit : this.edit.createDelegate(this),
store : this.store,
cm : new Ext.grid.ColumnModel(this.gridCm),
dblclickToggle : this.dblclickToggle,
contextmenuToggle : this.contextmenuToggle,
bbar : this.pagingBar
});
this.editForm = new Ext.tf.SimpleFormPanel({
autoWidth : true,
autoHeight : true,
items : this.editConfig,
close : function() {
this.editWin.hide();
}.createDelegate(this),
saveFn : function() {
var formBean = this.editForm.getForm().getValues(false);
this.editUrl(formBean, function() {
Ext.MessageBox.alert("提示", "保存成功!");
this.editWin.hide();
this.store.reload();
}.createDelegate(this));
}.createDelegate(this)
});
this.editWin = new Ext.Window({
title : '',
closeAction : 'hide',
modal : true,
autoHeight : true,
// autoWidth : true,
width : 300,
items : [ this.editForm ]
});
this.addRecord = function() {
this.editForm.getForm().reset();
this.editWin.show();
};
this.query = function() {
this.grid.selModel.clearSelections();
this.store.reload();
};
this.queryForm = new Ext.tf.SimpleQueryFormPanel(
{
queryConfigEx : {},
items : this.queryConfig,
autoWidth : true,
buttons : [ {
text : '查询',
formBind : true,
scope : this,
handler : this.query.createDelegate(this)
} ],
keys : [ Ext.tf.util.enterKey(this.query
.createDelegate(this)) ]
});
this.items = [ this.queryForm, this.grid ];
Ext.tf.SimplePanel.superclass.initComponent.call(this);
} catch (e) {
//console.log(e);
throw e;
}
},
// private
edit : function() {
var selections = this.grid.getSelections();
if (selections.length == 0) {
Ext.MessageBox.alert("提示", "请选择一条的记录!");
return;
} else if (selections.length != 1) {
Ext.MessageBox.alert("提示", "不能选择多行查看!");
return;
}
this.editWin.show();
this.editForm.getForm().loadRecord(selections[0]);
},
// public
load : function() {
return this.store.load({
params : {
start : 0,
limit : this.pageSize
}
});
}
}); | {
var tableStr = '<table cellpadding="0" cellspacing="0" width="100%" id="statisticByDay">';
var cm = grid.getColumnModel();
var colCount = cm.getColumnCount();
var temp_obj = new Array();
// 只下载没有隐藏的列(isHidden()为true表示隐藏,其他都为显示)
// 临时数组,存放所有当前显示列的下标
for ( var i = 0; i < colCount; i++) {// 从第三列开始,因为我的第1、2列是分别是rownumber和selectmodel。
if (cm.isHidden(i) == true) {
} else {
temp_obj.push(i);
}
}
tableStr = tableStr + '<thead><tr>';
for ( var i = 0; i < temp_obj.length; i++) {
// 显示列的列标题
tableStr = tableStr + '<td>' + cm.getColumnHeader(temp_obj[i])
+ '</td>';
}
tableStr = tableStr + '</tr></thead>';
var store = grid.getStore();
var recordCount = store.getCount();
tableStr = tableStr + '<tbody>'
for ( var i = 0; i < recordCount; i++) {
var r = store.getAt(i);
tableStr = tableStr + '<tr>';
for ( var j = 0; j < temp_obj.length; j++) {
var dataIndex = cm.getDataIndex(temp_obj[j]);
var tdValue = r.get(dataIndex);
var rendererFunc = cm.getRenderer(temp_obj[j]);
if (rendererFunc != null) {
tdValue = rendererFunc(tdValue);
}
if (tdValue == null || tdValue == 0) {
tdValue = ' ';
}
if (j != 0)
tableStr = tableStr + '<td style="text-align:center;">'
+ tdValue + '</td>';
else
tableStr = tableStr + '<td>' + tdValue + '</td>';
}
tableStr = tableStr + '</tr>';
}
tableStr = tableStr + '</tbody></table>';
var head = '<link rel="stylesheet" type="text/css" href="../css/printReport.css" />';
var titleHTML = tableStr;// document.getElementById("printGridfff").innerHTML;
var newwin = window.open('about:blank', '', '');
newwin.document.write(head);
newwin.document.write(titleHTML);
newwin.document.location.reload();
newwin.print();
// newwin.close();
}
tfMoney = function(v, sign) {
if (!sign)
sign = '';
v = (Math.round((v - 0) * 100)) / 100;
v = (v == Math.floor(v)) ? v | identifier_body |
index.go | package stored
import (
"bytes"
"errors"
"fmt"
"reflect"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
"github.com/mmcloughlin/geohash"
)
// Index represend all indexes sored has
type Index struct {
Name string
Unique bool
Geo int // geo precision used to
search bool // means for each word
dir directory.DirectorySubspace
valueDir directory.DirectorySubspace
object *Object
optional bool
fields []*Field
handle func(interface{}) KeyTuple
checkHandler func(obj interface{}) bool
}
// IndexOption is in option struct which allow to set differnt options
type IndexOption struct {
// CheckHandler describes should index be written for specific object or not
CheckHandler func(obj interface{}) bool
}
func (i *Index) isEmpty(input *Struct) bool {
for _, field := range i.fields {
if !field.isEmpty(input.Get(field)) {
return false
}
}
return true
}
// getKey will return index tuple
func (i *Index) getKey(input *Struct) (key tuple.Tuple) {
if i.handle != nil {
keyTuple := i.handle(input.value.Interface())
// Would not index object if key is empty
if keyTuple == nil || len(keyTuple) == 0 {
return nil
}
tmpTuple := tuple.Tuple{}
for _, element := range keyTuple {
tmpTuple = append(tmpTuple, element)
}
// embedded tuple cause problems with partitial fetching
key = tmpTuple
} else {
key = tuple.Tuple{}
if i.Geo != 0 {
latInterface := input.Get(i.fields[0])
lngInterface := input.Get(i.fields[1])
lat, long := latInterface.(float64), lngInterface.(float64)
if lat == 0.0 && long == 0.0 {
return nil
}
hash := geohash.Encode(lat, long)
if i.Geo < 12 {
hash = hash[0:i.Geo] // Cutting hash to needed precision
}
key = append(key, hash)
} else {
//key = tuple.Tuple{indexValue}
for _, field := range i.fields {
indexValue := input.Get(field)
key = append(key, field.tupleElement(indexValue))
}
}
}
return
}
func (i *Index) needValueStore() bool {
if i.handle != nil {
return true
}
return false
}
// getOldKey is just an wrapper around getKey, except case when index has handle, when it can be dynamically changed
func (i *Index) getOldKey(tr fdb.Transaction, primaryTuple tuple.Tuple, oldObject *Struct) (tuple.Tuple, error) {
if i.needValueStore() { // the index is custom
bites, err := tr.Get(i.valueDir.Pack(primaryTuple)).Get()
if err != nil {
return nil, err
}
oldKey, err := tuple.Unpack(bites)
if err != nil {
return nil, err
}
return oldKey, nil
}
return i.getKey(oldObject), nil
}
// writeSearch will set new index keys and delete old ones for text search index
func (i *Index) writeSearch(tr fdb.Transaction, primaryTuple tuple.Tuple, input, oldObject *Struct) error {
newWords := searchGetInputWords(i, input)
toAddWords := map[string]bool{}
skip := false
if i.checkHandler != nil {
if !i.checkHandler(input.value.Interface()) {
//fmt.Println("skipping index")
skip = true
}
// old value is better to delete any way
}
if !skip {
for _, word := range newWords {
toAddWords[word] = true
}
fmt.Println("index words >>", newWords)
}
toDeleteWords := map[string]bool{}
if oldObject != nil {
oldWords := searchGetInputWords(i, oldObject)
for _, word := range oldWords {
_, ok := toAddWords[word]
if ok {
delete(toAddWords, word)
} else {
toDeleteWords[word] = true
}
}
}
for word := range toAddWords {
key := tuple.Tuple{word}
fullKey := append(key, primaryTuple...)
fmt.Println("write search key", fullKey, "packed", i.dir.Pack(fullKey))
tr.Set(i.dir.Pack(fullKey), []byte{})
}
for word := range toDeleteWords {
key := tuple.Tuple{word}
fullKey := append(key, primaryTuple...)
tr.Clear(i.dir.Pack(fullKey))
}
return nil
}
// Write writes index related keys
func (i *Index) Write(tr fdb.Transaction, primaryTuple tuple.Tuple, input, oldObject *Struct) error {
if i.search {
return i.writeSearch(tr, primaryTuple, input, oldObject)
}
key := i.getKey(input)
if oldObject != nil {
toDelete, err := i.getOldKey(tr, primaryTuple, oldObject)
if err != nil { // if error fetching old index - throw it right here
return err
}
if toDelete != nil {
if reflect.DeepEqual(toDelete, key) {
return nil
}
i.Delete(tr, primaryTuple, toDelete)
}
}
if i.optional && i.isEmpty(input) { // no need to delete any inex than
return nil
}
// nil means should not index this object
if key == nil {
return nil
}
if i.Unique {
previousPromise := tr.Get(i.dir.Pack(key))
tr.Set(i.dir.Pack(key), primaryTuple.Pack()) // will be cancelled in case of error
previousBytes, err := previousPromise.Get()
if err != nil {
return err
}
if len(previousBytes) != 0 {
if !bytes.Equal(primaryTuple.Pack(), previousBytes) {
return ErrAlreadyExist
}
}
} else {
fullKey := append(key, primaryTuple...)
tr.Set(i.dir.Pack(fullKey), []byte{})
}
if i.needValueStore() {
tr.Set(i.valueDir.Pack(primaryTuple), key.Pack())
}
return nil
}
// Delete removes selected index
func (i *Index) Delete(tr fdb.Transaction, primaryTuple tuple.Tuple, key tuple.Tuple) {
if key == nil {
fmt.Println("index key is NIL strange behavior")
// no need to clean, this field wasn't indexed
return
}
sub := i.dir.Sub(key...)
if i.Unique {
fmt.Println("+++ delete the index", sub)
tr.Clear(sub)
} else {
// Add primary here
sub = sub.Sub(primaryTuple...)
tr.Clear(sub) // removing old keys
}
}
func (i *Index) getIterator(tr fdb.ReadTransaction, q *Query) (subspace.Subspace, *fdb.RangeIterator) {
if i.Unique {
i.object.panic("index is unique (lists not supported)")
}
//if len(q.primary) != 0 {
sub := i.dir.Sub(q.primary...)
start, end := sub.FDBRangeKeys()
if q.from != nil {
//start = sub.Sub(q.from...)
if q.reverse {
end = sub.Pack(q.from)
} else {
start = sub.Pack(q.from)
}
if q.to != nil {
if q.reverse {
start = sub.Pack(q.to)
} else {
end = sub.Pack(q.to)
}
}
}
r := fdb.KeyRange{Begin: start, End: end}
rangeResult := tr.GetRange(r, fdb.RangeOptions{Mode: fdb.StreamingModeWantAll, Limit: q.limit, Reverse: q.reverse})
iterator := rangeResult.Iterator()
return sub, iterator
}
// getList will fetch and request all the objects using the index
func (i *Index) getList(tr fdb.ReadTransaction, q *Query) ([]*needObject, error) {
sub, iterator := i.getIterator(tr, q)
primaryLen := len(i.object.primaryFields)
values := []*needObject{}
for iterator.Advance() {
kv, err := iterator.Get()
if err != nil {
return nil, err
}
fullTuple, err := sub.Unpack(kv.Key)
if err != nil {
return nil, err
}
if len(fullTuple)-primaryLen < 0 {
return nil, errors.New("invalid data: key too short")
}
key := fullTuple[len(fullTuple)-primaryLen:]
values = append(values, i.object.need(tr, i.object.sub(key)))
}
return values, nil
}
// getPrimariesList will fetch just an list of primaries
func (i *Index) getPrimariesList(tr fdb.ReadTransaction, q *Query) (*Slice, error) {
sub, iterator := i.getIterator(tr, q)
primaryLen := len(i.object.primaryFields)
values := []*Value{}
for iterator.Advance() {
kv, err := iterator.Get()
if err != nil {
return nil, err
}
fullTuple, err := sub.Unpack(kv.Key)
if err != nil {
return nil, err
}
if len(fullTuple)-primaryLen < 0 {
return nil, errors.New("invalid data: key too short")
}
key := fullTuple[len(fullTuple)-primaryLen:]
value := Value{object: i.object}
value.fromKeyTuple(key)
values = append(values, &value)
}
return &Slice{values: values}, nil
}
func (i *Index) getPrimary(tr fdb.ReadTransaction, indexKey tuple.Tuple) (subspace.Subspace, error) {
sub := i.dir.Sub(indexKey...)
if i.Unique {
bytes, err := tr.Get(sub).Get()
if err != nil {
return nil, err
}
if len(bytes) == 0 {
return nil, ErrNotFound
}
primaryTuple, err := tuple.Unpack(bytes)
if err != nil {
return nil, err
}
return i.object.primary.Sub(primaryTuple...), nil
}
sel := fdb.FirstGreaterThan(sub)
primaryKey, err := tr.GetKey(sel).Get()
if err != nil {
return nil, err
}
primaryTuple, err := sub.Unpack(primaryKey)
//primary, err := UnpackKeyIndex(indexKey, primaryKey)
if err != nil || len(primaryTuple) < 1 {
return nil, ErrNotFound
}
return i.object.primary.Sub(primaryTuple...), nil
}
// ReindexUnsafe will update index info (NOT consistency safe function)
// this function will use data provited by th object so should be used with care
func (i *Index) ReindexUnsafe(data interface{}) *PromiseErr {
input := structAny(data)
p := i.object.promiseErr()
p.do(func() Chain {
primaryTuple := input.getPrimary(i.object)
err := i.Write(p.tr, primaryTuple, input, nil)
if err != nil {
return p.fail(err)
}
return p.done(nil)
})
return p
}
func (i *Index) doClearAll(tr fdb.Transaction) |
// ClearAll will remove all data for specific index
func (i *Index) ClearAll() error {
_, err := i.object.db.Transact(func(tr fdb.Transaction) (ret interface{}, e error) {
i.doClearAll(tr)
return
})
return err
}
// Reindex will reindex index data
func (i *Index) Reindex() {
i.ClearAll()
object := i.object
query := object.ListAll().Limit(100)
errorCount := 0
for query.Next() {
query.Slice().Each(func(item interface{}) {
input := structAny(item)
primaryTuple := input.getPrimary(object)
_, err := object.db.Transact(func(tr fdb.Transaction) (ret interface{}, e error) {
/*sub := object.sub(primaryTuple)
needed := object.need(tr, sub)
value, err := needed.fetch()
var oldObject *Struct
if err != ErrNotFound {
if err != nil {
return
}
err = value.Err()
if err != nil {
return
}
oldObject = structAny(value.Interface())
}*/
//err = i.Write(tr, primaryTuple, input, oldObject)
err := i.Write(tr, primaryTuple, input, nil) // write everything
return nil, err
})
if err != nil {
fmt.Println("reindex fail of object «"+object.name+"»:", err)
errorCount++
}
})
}
if errorCount > 0 {
fmt.Printf("Reindex finished with %d errors\n", errorCount)
} else {
fmt.Println("Reindex successfully finished")
}
}
// SetOption allow to set option
func (i *Index) SetOption(option IndexOption) {
if option.CheckHandler != nil {
i.checkHandler = option.CheckHandler
}
}
// Options allow to set list of options
func (i *Index) Options(options ...IndexOption) {
for _, option := range options {
i.SetOption(option)
}
}
| {
start, end := i.dir.FDBRangeKeys()
tr.ClearRange(fdb.KeyRange{Begin: start, End: end})
} | identifier_body |
index.go | package stored
import (
"bytes"
"errors"
"fmt"
"reflect"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
"github.com/mmcloughlin/geohash"
)
// Index represend all indexes sored has
type Index struct {
Name string
Unique bool
Geo int // geo precision used to
search bool // means for each word
dir directory.DirectorySubspace
valueDir directory.DirectorySubspace
object *Object
optional bool
fields []*Field
handle func(interface{}) KeyTuple
checkHandler func(obj interface{}) bool
}
// IndexOption is in option struct which allow to set differnt options
type IndexOption struct {
// CheckHandler describes should index be written for specific object or not
CheckHandler func(obj interface{}) bool
}
func (i *Index) isEmpty(input *Struct) bool {
for _, field := range i.fields {
if !field.isEmpty(input.Get(field)) {
return false
}
}
return true
}
// getKey will return index tuple
func (i *Index) getKey(input *Struct) (key tuple.Tuple) {
if i.handle != nil {
keyTuple := i.handle(input.value.Interface())
// Would not index object if key is empty
if keyTuple == nil || len(keyTuple) == 0 {
return nil
}
tmpTuple := tuple.Tuple{}
for _, element := range keyTuple {
tmpTuple = append(tmpTuple, element)
}
// embedded tuple cause problems with partitial fetching
key = tmpTuple
} else {
key = tuple.Tuple{}
if i.Geo != 0 {
latInterface := input.Get(i.fields[0])
lngInterface := input.Get(i.fields[1])
lat, long := latInterface.(float64), lngInterface.(float64)
if lat == 0.0 && long == 0.0 {
return nil
} | hash = hash[0:i.Geo] // Cutting hash to needed precision
}
key = append(key, hash)
} else {
//key = tuple.Tuple{indexValue}
for _, field := range i.fields {
indexValue := input.Get(field)
key = append(key, field.tupleElement(indexValue))
}
}
}
return
}
func (i *Index) needValueStore() bool {
if i.handle != nil {
return true
}
return false
}
// getOldKey is just an wrapper around getKey, except case when index has handle, when it can be dynamically changed
func (i *Index) getOldKey(tr fdb.Transaction, primaryTuple tuple.Tuple, oldObject *Struct) (tuple.Tuple, error) {
if i.needValueStore() { // the index is custom
bites, err := tr.Get(i.valueDir.Pack(primaryTuple)).Get()
if err != nil {
return nil, err
}
oldKey, err := tuple.Unpack(bites)
if err != nil {
return nil, err
}
return oldKey, nil
}
return i.getKey(oldObject), nil
}
// writeSearch will set new index keys and delete old ones for text search index
func (i *Index) writeSearch(tr fdb.Transaction, primaryTuple tuple.Tuple, input, oldObject *Struct) error {
newWords := searchGetInputWords(i, input)
toAddWords := map[string]bool{}
skip := false
if i.checkHandler != nil {
if !i.checkHandler(input.value.Interface()) {
//fmt.Println("skipping index")
skip = true
}
// old value is better to delete any way
}
if !skip {
for _, word := range newWords {
toAddWords[word] = true
}
fmt.Println("index words >>", newWords)
}
toDeleteWords := map[string]bool{}
if oldObject != nil {
oldWords := searchGetInputWords(i, oldObject)
for _, word := range oldWords {
_, ok := toAddWords[word]
if ok {
delete(toAddWords, word)
} else {
toDeleteWords[word] = true
}
}
}
for word := range toAddWords {
key := tuple.Tuple{word}
fullKey := append(key, primaryTuple...)
fmt.Println("write search key", fullKey, "packed", i.dir.Pack(fullKey))
tr.Set(i.dir.Pack(fullKey), []byte{})
}
for word := range toDeleteWords {
key := tuple.Tuple{word}
fullKey := append(key, primaryTuple...)
tr.Clear(i.dir.Pack(fullKey))
}
return nil
}
// Write writes index related keys
func (i *Index) Write(tr fdb.Transaction, primaryTuple tuple.Tuple, input, oldObject *Struct) error {
if i.search {
return i.writeSearch(tr, primaryTuple, input, oldObject)
}
key := i.getKey(input)
if oldObject != nil {
toDelete, err := i.getOldKey(tr, primaryTuple, oldObject)
if err != nil { // if error fetching old index - throw it right here
return err
}
if toDelete != nil {
if reflect.DeepEqual(toDelete, key) {
return nil
}
i.Delete(tr, primaryTuple, toDelete)
}
}
if i.optional && i.isEmpty(input) { // no need to delete any inex than
return nil
}
// nil means should not index this object
if key == nil {
return nil
}
if i.Unique {
previousPromise := tr.Get(i.dir.Pack(key))
tr.Set(i.dir.Pack(key), primaryTuple.Pack()) // will be cancelled in case of error
previousBytes, err := previousPromise.Get()
if err != nil {
return err
}
if len(previousBytes) != 0 {
if !bytes.Equal(primaryTuple.Pack(), previousBytes) {
return ErrAlreadyExist
}
}
} else {
fullKey := append(key, primaryTuple...)
tr.Set(i.dir.Pack(fullKey), []byte{})
}
if i.needValueStore() {
tr.Set(i.valueDir.Pack(primaryTuple), key.Pack())
}
return nil
}
// Delete removes selected index
func (i *Index) Delete(tr fdb.Transaction, primaryTuple tuple.Tuple, key tuple.Tuple) {
if key == nil {
fmt.Println("index key is NIL strange behavior")
// no need to clean, this field wasn't indexed
return
}
sub := i.dir.Sub(key...)
if i.Unique {
fmt.Println("+++ delete the index", sub)
tr.Clear(sub)
} else {
// Add primary here
sub = sub.Sub(primaryTuple...)
tr.Clear(sub) // removing old keys
}
}
func (i *Index) getIterator(tr fdb.ReadTransaction, q *Query) (subspace.Subspace, *fdb.RangeIterator) {
if i.Unique {
i.object.panic("index is unique (lists not supported)")
}
//if len(q.primary) != 0 {
sub := i.dir.Sub(q.primary...)
start, end := sub.FDBRangeKeys()
if q.from != nil {
//start = sub.Sub(q.from...)
if q.reverse {
end = sub.Pack(q.from)
} else {
start = sub.Pack(q.from)
}
if q.to != nil {
if q.reverse {
start = sub.Pack(q.to)
} else {
end = sub.Pack(q.to)
}
}
}
r := fdb.KeyRange{Begin: start, End: end}
rangeResult := tr.GetRange(r, fdb.RangeOptions{Mode: fdb.StreamingModeWantAll, Limit: q.limit, Reverse: q.reverse})
iterator := rangeResult.Iterator()
return sub, iterator
}
// getList will fetch and request all the objects using the index
func (i *Index) getList(tr fdb.ReadTransaction, q *Query) ([]*needObject, error) {
sub, iterator := i.getIterator(tr, q)
primaryLen := len(i.object.primaryFields)
values := []*needObject{}
for iterator.Advance() {
kv, err := iterator.Get()
if err != nil {
return nil, err
}
fullTuple, err := sub.Unpack(kv.Key)
if err != nil {
return nil, err
}
if len(fullTuple)-primaryLen < 0 {
return nil, errors.New("invalid data: key too short")
}
key := fullTuple[len(fullTuple)-primaryLen:]
values = append(values, i.object.need(tr, i.object.sub(key)))
}
return values, nil
}
// getPrimariesList will fetch just an list of primaries
func (i *Index) getPrimariesList(tr fdb.ReadTransaction, q *Query) (*Slice, error) {
sub, iterator := i.getIterator(tr, q)
primaryLen := len(i.object.primaryFields)
values := []*Value{}
for iterator.Advance() {
kv, err := iterator.Get()
if err != nil {
return nil, err
}
fullTuple, err := sub.Unpack(kv.Key)
if err != nil {
return nil, err
}
if len(fullTuple)-primaryLen < 0 {
return nil, errors.New("invalid data: key too short")
}
key := fullTuple[len(fullTuple)-primaryLen:]
value := Value{object: i.object}
value.fromKeyTuple(key)
values = append(values, &value)
}
return &Slice{values: values}, nil
}
func (i *Index) getPrimary(tr fdb.ReadTransaction, indexKey tuple.Tuple) (subspace.Subspace, error) {
sub := i.dir.Sub(indexKey...)
if i.Unique {
bytes, err := tr.Get(sub).Get()
if err != nil {
return nil, err
}
if len(bytes) == 0 {
return nil, ErrNotFound
}
primaryTuple, err := tuple.Unpack(bytes)
if err != nil {
return nil, err
}
return i.object.primary.Sub(primaryTuple...), nil
}
sel := fdb.FirstGreaterThan(sub)
primaryKey, err := tr.GetKey(sel).Get()
if err != nil {
return nil, err
}
primaryTuple, err := sub.Unpack(primaryKey)
//primary, err := UnpackKeyIndex(indexKey, primaryKey)
if err != nil || len(primaryTuple) < 1 {
return nil, ErrNotFound
}
return i.object.primary.Sub(primaryTuple...), nil
}
// ReindexUnsafe will update index info (NOT consistency safe function)
// this function will use data provited by th object so should be used with care
func (i *Index) ReindexUnsafe(data interface{}) *PromiseErr {
input := structAny(data)
p := i.object.promiseErr()
p.do(func() Chain {
primaryTuple := input.getPrimary(i.object)
err := i.Write(p.tr, primaryTuple, input, nil)
if err != nil {
return p.fail(err)
}
return p.done(nil)
})
return p
}
func (i *Index) doClearAll(tr fdb.Transaction) {
start, end := i.dir.FDBRangeKeys()
tr.ClearRange(fdb.KeyRange{Begin: start, End: end})
}
// ClearAll will remove all data for specific index
func (i *Index) ClearAll() error {
_, err := i.object.db.Transact(func(tr fdb.Transaction) (ret interface{}, e error) {
i.doClearAll(tr)
return
})
return err
}
// Reindex will reindex index data
func (i *Index) Reindex() {
i.ClearAll()
object := i.object
query := object.ListAll().Limit(100)
errorCount := 0
for query.Next() {
query.Slice().Each(func(item interface{}) {
input := structAny(item)
primaryTuple := input.getPrimary(object)
_, err := object.db.Transact(func(tr fdb.Transaction) (ret interface{}, e error) {
/*sub := object.sub(primaryTuple)
needed := object.need(tr, sub)
value, err := needed.fetch()
var oldObject *Struct
if err != ErrNotFound {
if err != nil {
return
}
err = value.Err()
if err != nil {
return
}
oldObject = structAny(value.Interface())
}*/
//err = i.Write(tr, primaryTuple, input, oldObject)
err := i.Write(tr, primaryTuple, input, nil) // write everything
return nil, err
})
if err != nil {
fmt.Println("reindex fail of object «"+object.name+"»:", err)
errorCount++
}
})
}
if errorCount > 0 {
fmt.Printf("Reindex finished with %d errors\n", errorCount)
} else {
fmt.Println("Reindex successfully finished")
}
}
// SetOption allow to set option
func (i *Index) SetOption(option IndexOption) {
if option.CheckHandler != nil {
i.checkHandler = option.CheckHandler
}
}
// Options allow to set list of options
func (i *Index) Options(options ...IndexOption) {
for _, option := range options {
i.SetOption(option)
}
} | hash := geohash.Encode(lat, long)
if i.Geo < 12 { | random_line_split |
index.go | package stored
import (
"bytes"
"errors"
"fmt"
"reflect"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
"github.com/mmcloughlin/geohash"
)
// Index represend all indexes sored has
type Index struct {
Name string
Unique bool
Geo int // geo precision used to
search bool // means for each word
dir directory.DirectorySubspace
valueDir directory.DirectorySubspace
object *Object
optional bool
fields []*Field
handle func(interface{}) KeyTuple
checkHandler func(obj interface{}) bool
}
// IndexOption is in option struct which allow to set differnt options
type IndexOption struct {
// CheckHandler describes should index be written for specific object or not
CheckHandler func(obj interface{}) bool
}
func (i *Index) isEmpty(input *Struct) bool {
for _, field := range i.fields {
if !field.isEmpty(input.Get(field)) {
return false
}
}
return true
}
// getKey will return index tuple
func (i *Index) getKey(input *Struct) (key tuple.Tuple) {
if i.handle != nil {
keyTuple := i.handle(input.value.Interface())
// Would not index object if key is empty
if keyTuple == nil || len(keyTuple) == 0 {
return nil
}
tmpTuple := tuple.Tuple{}
for _, element := range keyTuple {
tmpTuple = append(tmpTuple, element)
}
// embedded tuple cause problems with partitial fetching
key = tmpTuple
} else {
key = tuple.Tuple{}
if i.Geo != 0 {
latInterface := input.Get(i.fields[0])
lngInterface := input.Get(i.fields[1])
lat, long := latInterface.(float64), lngInterface.(float64)
if lat == 0.0 && long == 0.0 {
return nil
}
hash := geohash.Encode(lat, long)
if i.Geo < 12 {
hash = hash[0:i.Geo] // Cutting hash to needed precision
}
key = append(key, hash)
} else {
//key = tuple.Tuple{indexValue}
for _, field := range i.fields {
indexValue := input.Get(field)
key = append(key, field.tupleElement(indexValue))
}
}
}
return
}
func (i *Index) | () bool {
if i.handle != nil {
return true
}
return false
}
// getOldKey is just an wrapper around getKey, except case when index has handle, when it can be dynamically changed
func (i *Index) getOldKey(tr fdb.Transaction, primaryTuple tuple.Tuple, oldObject *Struct) (tuple.Tuple, error) {
if i.needValueStore() { // the index is custom
bites, err := tr.Get(i.valueDir.Pack(primaryTuple)).Get()
if err != nil {
return nil, err
}
oldKey, err := tuple.Unpack(bites)
if err != nil {
return nil, err
}
return oldKey, nil
}
return i.getKey(oldObject), nil
}
// writeSearch will set new index keys and delete old ones for text search index
func (i *Index) writeSearch(tr fdb.Transaction, primaryTuple tuple.Tuple, input, oldObject *Struct) error {
newWords := searchGetInputWords(i, input)
toAddWords := map[string]bool{}
skip := false
if i.checkHandler != nil {
if !i.checkHandler(input.value.Interface()) {
//fmt.Println("skipping index")
skip = true
}
// old value is better to delete any way
}
if !skip {
for _, word := range newWords {
toAddWords[word] = true
}
fmt.Println("index words >>", newWords)
}
toDeleteWords := map[string]bool{}
if oldObject != nil {
oldWords := searchGetInputWords(i, oldObject)
for _, word := range oldWords {
_, ok := toAddWords[word]
if ok {
delete(toAddWords, word)
} else {
toDeleteWords[word] = true
}
}
}
for word := range toAddWords {
key := tuple.Tuple{word}
fullKey := append(key, primaryTuple...)
fmt.Println("write search key", fullKey, "packed", i.dir.Pack(fullKey))
tr.Set(i.dir.Pack(fullKey), []byte{})
}
for word := range toDeleteWords {
key := tuple.Tuple{word}
fullKey := append(key, primaryTuple...)
tr.Clear(i.dir.Pack(fullKey))
}
return nil
}
// Write writes index related keys
func (i *Index) Write(tr fdb.Transaction, primaryTuple tuple.Tuple, input, oldObject *Struct) error {
if i.search {
return i.writeSearch(tr, primaryTuple, input, oldObject)
}
key := i.getKey(input)
if oldObject != nil {
toDelete, err := i.getOldKey(tr, primaryTuple, oldObject)
if err != nil { // if error fetching old index - throw it right here
return err
}
if toDelete != nil {
if reflect.DeepEqual(toDelete, key) {
return nil
}
i.Delete(tr, primaryTuple, toDelete)
}
}
if i.optional && i.isEmpty(input) { // no need to delete any inex than
return nil
}
// nil means should not index this object
if key == nil {
return nil
}
if i.Unique {
previousPromise := tr.Get(i.dir.Pack(key))
tr.Set(i.dir.Pack(key), primaryTuple.Pack()) // will be cancelled in case of error
previousBytes, err := previousPromise.Get()
if err != nil {
return err
}
if len(previousBytes) != 0 {
if !bytes.Equal(primaryTuple.Pack(), previousBytes) {
return ErrAlreadyExist
}
}
} else {
fullKey := append(key, primaryTuple...)
tr.Set(i.dir.Pack(fullKey), []byte{})
}
if i.needValueStore() {
tr.Set(i.valueDir.Pack(primaryTuple), key.Pack())
}
return nil
}
// Delete removes selected index
func (i *Index) Delete(tr fdb.Transaction, primaryTuple tuple.Tuple, key tuple.Tuple) {
if key == nil {
fmt.Println("index key is NIL strange behavior")
// no need to clean, this field wasn't indexed
return
}
sub := i.dir.Sub(key...)
if i.Unique {
fmt.Println("+++ delete the index", sub)
tr.Clear(sub)
} else {
// Add primary here
sub = sub.Sub(primaryTuple...)
tr.Clear(sub) // removing old keys
}
}
func (i *Index) getIterator(tr fdb.ReadTransaction, q *Query) (subspace.Subspace, *fdb.RangeIterator) {
if i.Unique {
i.object.panic("index is unique (lists not supported)")
}
//if len(q.primary) != 0 {
sub := i.dir.Sub(q.primary...)
start, end := sub.FDBRangeKeys()
if q.from != nil {
//start = sub.Sub(q.from...)
if q.reverse {
end = sub.Pack(q.from)
} else {
start = sub.Pack(q.from)
}
if q.to != nil {
if q.reverse {
start = sub.Pack(q.to)
} else {
end = sub.Pack(q.to)
}
}
}
r := fdb.KeyRange{Begin: start, End: end}
rangeResult := tr.GetRange(r, fdb.RangeOptions{Mode: fdb.StreamingModeWantAll, Limit: q.limit, Reverse: q.reverse})
iterator := rangeResult.Iterator()
return sub, iterator
}
// getList will fetch and request all the objects using the index
func (i *Index) getList(tr fdb.ReadTransaction, q *Query) ([]*needObject, error) {
sub, iterator := i.getIterator(tr, q)
primaryLen := len(i.object.primaryFields)
values := []*needObject{}
for iterator.Advance() {
kv, err := iterator.Get()
if err != nil {
return nil, err
}
fullTuple, err := sub.Unpack(kv.Key)
if err != nil {
return nil, err
}
if len(fullTuple)-primaryLen < 0 {
return nil, errors.New("invalid data: key too short")
}
key := fullTuple[len(fullTuple)-primaryLen:]
values = append(values, i.object.need(tr, i.object.sub(key)))
}
return values, nil
}
// getPrimariesList will fetch just an list of primaries
func (i *Index) getPrimariesList(tr fdb.ReadTransaction, q *Query) (*Slice, error) {
sub, iterator := i.getIterator(tr, q)
primaryLen := len(i.object.primaryFields)
values := []*Value{}
for iterator.Advance() {
kv, err := iterator.Get()
if err != nil {
return nil, err
}
fullTuple, err := sub.Unpack(kv.Key)
if err != nil {
return nil, err
}
if len(fullTuple)-primaryLen < 0 {
return nil, errors.New("invalid data: key too short")
}
key := fullTuple[len(fullTuple)-primaryLen:]
value := Value{object: i.object}
value.fromKeyTuple(key)
values = append(values, &value)
}
return &Slice{values: values}, nil
}
func (i *Index) getPrimary(tr fdb.ReadTransaction, indexKey tuple.Tuple) (subspace.Subspace, error) {
sub := i.dir.Sub(indexKey...)
if i.Unique {
bytes, err := tr.Get(sub).Get()
if err != nil {
return nil, err
}
if len(bytes) == 0 {
return nil, ErrNotFound
}
primaryTuple, err := tuple.Unpack(bytes)
if err != nil {
return nil, err
}
return i.object.primary.Sub(primaryTuple...), nil
}
sel := fdb.FirstGreaterThan(sub)
primaryKey, err := tr.GetKey(sel).Get()
if err != nil {
return nil, err
}
primaryTuple, err := sub.Unpack(primaryKey)
//primary, err := UnpackKeyIndex(indexKey, primaryKey)
if err != nil || len(primaryTuple) < 1 {
return nil, ErrNotFound
}
return i.object.primary.Sub(primaryTuple...), nil
}
// ReindexUnsafe will update index info (NOT consistency safe function)
// this function will use data provited by th object so should be used with care
func (i *Index) ReindexUnsafe(data interface{}) *PromiseErr {
input := structAny(data)
p := i.object.promiseErr()
p.do(func() Chain {
primaryTuple := input.getPrimary(i.object)
err := i.Write(p.tr, primaryTuple, input, nil)
if err != nil {
return p.fail(err)
}
return p.done(nil)
})
return p
}
func (i *Index) doClearAll(tr fdb.Transaction) {
start, end := i.dir.FDBRangeKeys()
tr.ClearRange(fdb.KeyRange{Begin: start, End: end})
}
// ClearAll will remove all data for specific index
func (i *Index) ClearAll() error {
_, err := i.object.db.Transact(func(tr fdb.Transaction) (ret interface{}, e error) {
i.doClearAll(tr)
return
})
return err
}
// Reindex will reindex index data
func (i *Index) Reindex() {
i.ClearAll()
object := i.object
query := object.ListAll().Limit(100)
errorCount := 0
for query.Next() {
query.Slice().Each(func(item interface{}) {
input := structAny(item)
primaryTuple := input.getPrimary(object)
_, err := object.db.Transact(func(tr fdb.Transaction) (ret interface{}, e error) {
/*sub := object.sub(primaryTuple)
needed := object.need(tr, sub)
value, err := needed.fetch()
var oldObject *Struct
if err != ErrNotFound {
if err != nil {
return
}
err = value.Err()
if err != nil {
return
}
oldObject = structAny(value.Interface())
}*/
//err = i.Write(tr, primaryTuple, input, oldObject)
err := i.Write(tr, primaryTuple, input, nil) // write everything
return nil, err
})
if err != nil {
fmt.Println("reindex fail of object «"+object.name+"»:", err)
errorCount++
}
})
}
if errorCount > 0 {
fmt.Printf("Reindex finished with %d errors\n", errorCount)
} else {
fmt.Println("Reindex successfully finished")
}
}
// SetOption allow to set option
func (i *Index) SetOption(option IndexOption) {
if option.CheckHandler != nil {
i.checkHandler = option.CheckHandler
}
}
// Options allow to set list of options
func (i *Index) Options(options ...IndexOption) {
for _, option := range options {
i.SetOption(option)
}
}
| needValueStore | identifier_name |
index.go | package stored
import (
"bytes"
"errors"
"fmt"
"reflect"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
"github.com/mmcloughlin/geohash"
)
// Index represend all indexes sored has
type Index struct {
Name string
Unique bool
Geo int // geo precision used to
search bool // means for each word
dir directory.DirectorySubspace
valueDir directory.DirectorySubspace
object *Object
optional bool
fields []*Field
handle func(interface{}) KeyTuple
checkHandler func(obj interface{}) bool
}
// IndexOption is in option struct which allow to set differnt options
type IndexOption struct {
// CheckHandler describes should index be written for specific object or not
CheckHandler func(obj interface{}) bool
}
func (i *Index) isEmpty(input *Struct) bool {
for _, field := range i.fields {
if !field.isEmpty(input.Get(field)) {
return false
}
}
return true
}
// getKey will return index tuple
func (i *Index) getKey(input *Struct) (key tuple.Tuple) {
if i.handle != nil {
keyTuple := i.handle(input.value.Interface())
// Would not index object if key is empty
if keyTuple == nil || len(keyTuple) == 0 {
return nil
}
tmpTuple := tuple.Tuple{}
for _, element := range keyTuple {
tmpTuple = append(tmpTuple, element)
}
// embedded tuple cause problems with partitial fetching
key = tmpTuple
} else {
key = tuple.Tuple{}
if i.Geo != 0 {
latInterface := input.Get(i.fields[0])
lngInterface := input.Get(i.fields[1])
lat, long := latInterface.(float64), lngInterface.(float64)
if lat == 0.0 && long == 0.0 {
return nil
}
hash := geohash.Encode(lat, long)
if i.Geo < 12 {
hash = hash[0:i.Geo] // Cutting hash to needed precision
}
key = append(key, hash)
} else {
//key = tuple.Tuple{indexValue}
for _, field := range i.fields {
indexValue := input.Get(field)
key = append(key, field.tupleElement(indexValue))
}
}
}
return
}
func (i *Index) needValueStore() bool {
if i.handle != nil {
return true
}
return false
}
// getOldKey is just an wrapper around getKey, except case when index has handle, when it can be dynamically changed
func (i *Index) getOldKey(tr fdb.Transaction, primaryTuple tuple.Tuple, oldObject *Struct) (tuple.Tuple, error) {
if i.needValueStore() { // the index is custom
bites, err := tr.Get(i.valueDir.Pack(primaryTuple)).Get()
if err != nil {
return nil, err
}
oldKey, err := tuple.Unpack(bites)
if err != nil {
return nil, err
}
return oldKey, nil
}
return i.getKey(oldObject), nil
}
// writeSearch will set new index keys and delete old ones for text search index
func (i *Index) writeSearch(tr fdb.Transaction, primaryTuple tuple.Tuple, input, oldObject *Struct) error {
newWords := searchGetInputWords(i, input)
toAddWords := map[string]bool{}
skip := false
if i.checkHandler != nil {
if !i.checkHandler(input.value.Interface()) {
//fmt.Println("skipping index")
skip = true
}
// old value is better to delete any way
}
if !skip {
for _, word := range newWords {
toAddWords[word] = true
}
fmt.Println("index words >>", newWords)
}
toDeleteWords := map[string]bool{}
if oldObject != nil {
oldWords := searchGetInputWords(i, oldObject)
for _, word := range oldWords {
_, ok := toAddWords[word]
if ok {
delete(toAddWords, word)
} else {
toDeleteWords[word] = true
}
}
}
for word := range toAddWords {
key := tuple.Tuple{word}
fullKey := append(key, primaryTuple...)
fmt.Println("write search key", fullKey, "packed", i.dir.Pack(fullKey))
tr.Set(i.dir.Pack(fullKey), []byte{})
}
for word := range toDeleteWords {
key := tuple.Tuple{word}
fullKey := append(key, primaryTuple...)
tr.Clear(i.dir.Pack(fullKey))
}
return nil
}
// Write writes index related keys
func (i *Index) Write(tr fdb.Transaction, primaryTuple tuple.Tuple, input, oldObject *Struct) error {
if i.search {
return i.writeSearch(tr, primaryTuple, input, oldObject)
}
key := i.getKey(input)
if oldObject != nil {
toDelete, err := i.getOldKey(tr, primaryTuple, oldObject)
if err != nil { // if error fetching old index - throw it right here
return err
}
if toDelete != nil {
if reflect.DeepEqual(toDelete, key) {
return nil
}
i.Delete(tr, primaryTuple, toDelete)
}
}
if i.optional && i.isEmpty(input) |
// nil means should not index this object
if key == nil {
return nil
}
if i.Unique {
previousPromise := tr.Get(i.dir.Pack(key))
tr.Set(i.dir.Pack(key), primaryTuple.Pack()) // will be cancelled in case of error
previousBytes, err := previousPromise.Get()
if err != nil {
return err
}
if len(previousBytes) != 0 {
if !bytes.Equal(primaryTuple.Pack(), previousBytes) {
return ErrAlreadyExist
}
}
} else {
fullKey := append(key, primaryTuple...)
tr.Set(i.dir.Pack(fullKey), []byte{})
}
if i.needValueStore() {
tr.Set(i.valueDir.Pack(primaryTuple), key.Pack())
}
return nil
}
// Delete removes selected index
func (i *Index) Delete(tr fdb.Transaction, primaryTuple tuple.Tuple, key tuple.Tuple) {
if key == nil {
fmt.Println("index key is NIL strange behavior")
// no need to clean, this field wasn't indexed
return
}
sub := i.dir.Sub(key...)
if i.Unique {
fmt.Println("+++ delete the index", sub)
tr.Clear(sub)
} else {
// Add primary here
sub = sub.Sub(primaryTuple...)
tr.Clear(sub) // removing old keys
}
}
func (i *Index) getIterator(tr fdb.ReadTransaction, q *Query) (subspace.Subspace, *fdb.RangeIterator) {
if i.Unique {
i.object.panic("index is unique (lists not supported)")
}
//if len(q.primary) != 0 {
sub := i.dir.Sub(q.primary...)
start, end := sub.FDBRangeKeys()
if q.from != nil {
//start = sub.Sub(q.from...)
if q.reverse {
end = sub.Pack(q.from)
} else {
start = sub.Pack(q.from)
}
if q.to != nil {
if q.reverse {
start = sub.Pack(q.to)
} else {
end = sub.Pack(q.to)
}
}
}
r := fdb.KeyRange{Begin: start, End: end}
rangeResult := tr.GetRange(r, fdb.RangeOptions{Mode: fdb.StreamingModeWantAll, Limit: q.limit, Reverse: q.reverse})
iterator := rangeResult.Iterator()
return sub, iterator
}
// getList will fetch and request all the objects using the index
func (i *Index) getList(tr fdb.ReadTransaction, q *Query) ([]*needObject, error) {
sub, iterator := i.getIterator(tr, q)
primaryLen := len(i.object.primaryFields)
values := []*needObject{}
for iterator.Advance() {
kv, err := iterator.Get()
if err != nil {
return nil, err
}
fullTuple, err := sub.Unpack(kv.Key)
if err != nil {
return nil, err
}
if len(fullTuple)-primaryLen < 0 {
return nil, errors.New("invalid data: key too short")
}
key := fullTuple[len(fullTuple)-primaryLen:]
values = append(values, i.object.need(tr, i.object.sub(key)))
}
return values, nil
}
// getPrimariesList will fetch just an list of primaries
func (i *Index) getPrimariesList(tr fdb.ReadTransaction, q *Query) (*Slice, error) {
sub, iterator := i.getIterator(tr, q)
primaryLen := len(i.object.primaryFields)
values := []*Value{}
for iterator.Advance() {
kv, err := iterator.Get()
if err != nil {
return nil, err
}
fullTuple, err := sub.Unpack(kv.Key)
if err != nil {
return nil, err
}
if len(fullTuple)-primaryLen < 0 {
return nil, errors.New("invalid data: key too short")
}
key := fullTuple[len(fullTuple)-primaryLen:]
value := Value{object: i.object}
value.fromKeyTuple(key)
values = append(values, &value)
}
return &Slice{values: values}, nil
}
func (i *Index) getPrimary(tr fdb.ReadTransaction, indexKey tuple.Tuple) (subspace.Subspace, error) {
sub := i.dir.Sub(indexKey...)
if i.Unique {
bytes, err := tr.Get(sub).Get()
if err != nil {
return nil, err
}
if len(bytes) == 0 {
return nil, ErrNotFound
}
primaryTuple, err := tuple.Unpack(bytes)
if err != nil {
return nil, err
}
return i.object.primary.Sub(primaryTuple...), nil
}
sel := fdb.FirstGreaterThan(sub)
primaryKey, err := tr.GetKey(sel).Get()
if err != nil {
return nil, err
}
primaryTuple, err := sub.Unpack(primaryKey)
//primary, err := UnpackKeyIndex(indexKey, primaryKey)
if err != nil || len(primaryTuple) < 1 {
return nil, ErrNotFound
}
return i.object.primary.Sub(primaryTuple...), nil
}
// ReindexUnsafe will update index info (NOT consistency safe function)
// this function will use data provited by th object so should be used with care
func (i *Index) ReindexUnsafe(data interface{}) *PromiseErr {
input := structAny(data)
p := i.object.promiseErr()
p.do(func() Chain {
primaryTuple := input.getPrimary(i.object)
err := i.Write(p.tr, primaryTuple, input, nil)
if err != nil {
return p.fail(err)
}
return p.done(nil)
})
return p
}
func (i *Index) doClearAll(tr fdb.Transaction) {
start, end := i.dir.FDBRangeKeys()
tr.ClearRange(fdb.KeyRange{Begin: start, End: end})
}
// ClearAll will remove all data for specific index
func (i *Index) ClearAll() error {
_, err := i.object.db.Transact(func(tr fdb.Transaction) (ret interface{}, e error) {
i.doClearAll(tr)
return
})
return err
}
// Reindex will reindex index data
func (i *Index) Reindex() {
i.ClearAll()
object := i.object
query := object.ListAll().Limit(100)
errorCount := 0
for query.Next() {
query.Slice().Each(func(item interface{}) {
input := structAny(item)
primaryTuple := input.getPrimary(object)
_, err := object.db.Transact(func(tr fdb.Transaction) (ret interface{}, e error) {
/*sub := object.sub(primaryTuple)
needed := object.need(tr, sub)
value, err := needed.fetch()
var oldObject *Struct
if err != ErrNotFound {
if err != nil {
return
}
err = value.Err()
if err != nil {
return
}
oldObject = structAny(value.Interface())
}*/
//err = i.Write(tr, primaryTuple, input, oldObject)
err := i.Write(tr, primaryTuple, input, nil) // write everything
return nil, err
})
if err != nil {
fmt.Println("reindex fail of object «"+object.name+"»:", err)
errorCount++
}
})
}
if errorCount > 0 {
fmt.Printf("Reindex finished with %d errors\n", errorCount)
} else {
fmt.Println("Reindex successfully finished")
}
}
// SetOption allow to set option
func (i *Index) SetOption(option IndexOption) {
if option.CheckHandler != nil {
i.checkHandler = option.CheckHandler
}
}
// Options allow to set list of options
func (i *Index) Options(options ...IndexOption) {
for _, option := range options {
i.SetOption(option)
}
}
| { // no need to delete any inex than
return nil
} | conditional_block |
main.go | package main
import (
"flag"
"io"
"io/ioutil"
"os"
"path"
"runtime"
"strconv"
"context"
"expvar"
"fmt"
"net"
"net/http"
"os/signal"
"strings"
"sync"
"syscall"
"time"
"encoding/json"
"github.com/gorilla/mux"
"github.com/quentin-m/etcd-cloud-operator/pkg/etcd"
"github.com/signalfx/gateway/config"
"github.com/signalfx/gateway/dp/dpbuffered"
"github.com/signalfx/gateway/internal-metrics"
"github.com/signalfx/gateway/logkey"
"github.com/signalfx/gateway/protocol"
"github.com/signalfx/gateway/protocol/demultiplexer"
"github.com/signalfx/gateway/protocol/signalfx"
_ "github.com/signalfx/go-metrics"
"github.com/signalfx/golib/datapoint"
"github.com/signalfx/golib/datapoint/dpsink"
"github.com/signalfx/golib/errors"
"github.com/signalfx/golib/eventcounter"
"github.com/signalfx/golib/httpdebug"
"github.com/signalfx/golib/log"
"github.com/signalfx/golib/reportsha"
"github.com/signalfx/golib/sfxclient"
"github.com/signalfx/golib/timekeeper"
"github.com/signalfx/golib/trace"
"github.com/signalfx/golib/web"
_ "github.com/signalfx/ondiskencoding"
_ "github.com/spaolacci/murmur3"
"gopkg.in/natefinch/lumberjack.v2"
_ "net/http/pprof"
)
var (
// Version is set by a build flag to the built version
Version = "0.9.10+"
// BuildDate is set by a build flag to the date of the build
BuildDate = ""
)
func writePidFile(pidFileName string) error {
pid := os.Getpid()
return ioutil.WriteFile(pidFileName, []byte(strconv.FormatInt(int64(pid), 10)), os.FileMode(0644))
}
// getCommaSeparatedStringEnvVar returns the given env var key's value split by comma or the default values
func getCommaSeparatedStringEnvVar(envVar string, def []string) []string {
if val := os.Getenv(envVar); val != "" {
def = def[:0]
for _, addr := range strings.Split(strings.Replace(val, " ", "", -1), ",") {
def = append(def, addr)
}
}
return def
}
// getStringEnvVar returns the given env var key's value or the default value
func getStringEnvVar(envVar string, def string) string {
if val := os.Getenv(envVar); val != "" {
return val
}
return def
}
// getDurationEnvVar returns the given env var key's value or the default value
func getDurationEnvVar(envVar string, def time.Duration) time.Duration {
if strVal := os.Getenv(envVar); strVal != "" {
if dur, err := time.ParseDuration(strVal); err == nil {
return dur
}
}
return def
}
func isStringInSlice(target string, strs []string) bool {
for _, addr := range strs {
if addr == target {
return true
}
}
return false
}
type gatewayFlags struct {
configFileName string
}
type etcdManager struct {
etcd.ServerConfig
logger log.Logger
removeTimeout time.Duration
operation string
targetCluster []string
server *etcd.Server
client *etcd.Client
}
func (mgr *etcdManager) setup(conf *config.GatewayConfig) {
mgr.LPAddress = getStringEnvVar("SFX_LISTEN_ON_PEER_ADDRESS", *conf.ListenOnPeerAddress)
mgr.APAddress = getStringEnvVar("SFX_ADVERTISE_PEER_ADDRESS", *conf.AdvertisePeerAddress)
mgr.LCAddress = getStringEnvVar("SFX_LISTEN_ON_CLIENT_ADDRESS", *conf.ListenOnClientAddress)
mgr.ACAddress = getStringEnvVar("SFX_ADVERTISE_CLIENT_ADDRESS", *conf.AdvertiseClientAddress)
mgr.MAddress = getStringEnvVar("SFX_ETCD_METRICS_ADDRESS", *conf.ETCDMetricsAddress)
mgr.UnhealthyMemberTTL = getDurationEnvVar("SFX_UNHEALTHY_MEMBER_TTL", *conf.UnhealthyMemberTTL)
mgr.removeTimeout = getDurationEnvVar("SFX_REMOVE_MEMBER_TIMEOUT", *conf.RemoveMemberTimeout)
mgr.DataDir = getStringEnvVar("SFX_CLUSTER_DATA_DIR", *conf.ClusterDataDir)
mgr.Name = getStringEnvVar("SFX_SERVER_NAME", *conf.ServerName)
mgr.ServerConfig.Name = mgr.Name
// if already set, then a command line flag was provided and takes precedence
if mgr.operation == "" {
mgr.operation = getStringEnvVar("SFX_CLUSTER_OPERATION", *conf.ClusterOperation)
}
mgr.targetCluster = getCommaSeparatedStringEnvVar("SFX_TARGET_CLUSTER_ADDRESSES", conf.TargetClusterAddresses)
}
func (mgr *etcdManager) start() (err error) {
// use a default server name if one is not provided
if mgr.ServerConfig.Name == "" {
mgr.ServerConfig.Name = fmt.Sprintf("%s", mgr.ServerConfig.ACAddress)
}
mgr.server = etcd.NewServer(mgr.ServerConfig)
switch strings.ToLower(mgr.operation) {
case "": // this is a valid option and means we shouldn't run etcd
return
case "seed":
mgr.logger.Log(fmt.Sprintf("starting etcd server %s to seed cluster", mgr.ServerConfig.Name))
if err = mgr.server.Seed(nil); err == nil {
if !isStringInSlice(mgr.AdvertisedClientAddress(), mgr.targetCluster) {
mgr.targetCluster = append(mgr.targetCluster, mgr.AdvertisedClientAddress())
}
mgr.client, err = etcd.NewClient(mgr.targetCluster, etcd.SecurityConfig{}, true)
}
case "join":
mgr.logger.Log(fmt.Sprintf("joining cluster with etcd server name: %s", mgr.ServerConfig.Name))
if mgr.client, err = etcd.NewClient(mgr.targetCluster, etcd.SecurityConfig{}, true); err == nil {
mgr.logger.Log(fmt.Sprintf("joining etcd cluster @ %s", mgr.client.Endpoints()))
if err = mgr.server.Join(mgr.client); err == nil {
mgr.logger.Log(fmt.Sprintf("successfully joined cluster at %s", mgr.targetCluster))
}
}
default:
err = fmt.Errorf("unsupported cluster-op specified \"%s\"", mgr.operation)
}
return err
}
func (mgr *etcdManager) getMemberID(ctx context.Context) (uint64, error) {
var memberID uint64
// use the client to retrieve this instance's member id
members, err := mgr.client.MemberList(ctx)
if members != nil {
for _, m := range members.Members {
if m.Name == mgr.Name {
memberID = m.ID
}
}
}
return memberID, err
}
func (mgr *etcdManager) removeMember() error {
var err error
var memberID uint64
ctx, cancel := context.WithTimeout(context.Background(), mgr.removeTimeout)
defer cancel()
// only remove yourself from the cluster if the server is running
if mgr.server.IsRunning() {
if memberID, err = mgr.getMemberID(ctx); err == nil {
removed := make(chan error, 1)
go func() {
defer close(removed)
removed <- mgr.client.RemoveMember(mgr.Name, memberID)
}()
select {
case err = <-removed:
cancel()
case <-ctx.Done():
}
if ctx.Err() != nil {
err = ctx.Err()
}
}
}
return err
}
func (mgr *etcdManager) shutdown(graceful bool) (err error) {
if mgr.server.IsRunning() {
// stop the etcd server
mgr.server.Stop(graceful, false) // graceful shutdown true, snapshot false
}
if mgr.client != nil {
// close the client if applicable
err = mgr.client.Close()
}
return err
}
type gateway struct {
flags gatewayFlags
listeners []protocol.Listener
forwarders []protocol.Forwarder
logger log.Logger
setupDoneSignal chan struct{}
tk timekeeper.TimeKeeper
debugServer *httpdebug.Server
debugServerListener net.Listener
internalMetricsServer *internal.Collector
internalMetricsListener net.Listener
stdout io.Writer
gomaxprocs func(int) int
debugContext web.HeaderCtxFlag
debugSink dpsink.ItemFlagger
ctxDims log.CtxDimensions
signalChan chan os.Signal
config *config.GatewayConfig
etcdMgr *etcdManager
versionMetric reportsha.SHA1Reporter
}
var mainInstance = gateway{
tk: timekeeper.RealTime{},
logger: log.DefaultLogger.CreateChild(),
stdout: os.Stdout,
gomaxprocs: runtime.GOMAXPROCS,
debugContext: web.HeaderCtxFlag{
HeaderName: "X-Debug-Id",
},
debugSink: dpsink.ItemFlagger{
EventMetaName: "dbg_events",
MetricDimensionName: "sf_metric",
},
signalChan: make(chan os.Signal, 1),
etcdMgr: &etcdManager{ServerConfig: etcd.ServerConfig{}, logger: log.DefaultLogger.CreateChild()},
}
func init() {
flag.StringVar(&mainInstance.flags.configFileName, "configfile", "sf/gateway.conf", "Name of the db gateway configuration file")
flag.StringVar(&mainInstance.etcdMgr.operation, "cluster-op", "", "operation to perform if running in cluster mode [\"seed\", \"join\", \"\"] this overrides the ClusterOperation set in the config file")
}
func (p *gateway) getLogOutput(loadedConfig *config.GatewayConfig) io.Writer {
logDir := *loadedConfig.LogDir
if logDir == "-" {
p.logger.Log("Sending logging to stdout")
return p.stdout
}
logMaxSize := *loadedConfig.LogMaxSize
logMaxBackups := *loadedConfig.LogMaxBackups
lumberjackLogger := &lumberjack.Logger{
Filename: path.Join(logDir, "gateway.log"),
MaxSize: logMaxSize, // megabytes
MaxBackups: logMaxBackups,
}
p.logger.Log(logkey.Filename, lumberjackLogger.Filename, logkey.Dir, os.TempDir(), "Logging redirect setup")
return lumberjackLogger
}
func (p *gateway) getLogger(loadedConfig *config.GatewayConfig) log.Logger {
out := p.getLogOutput(loadedConfig)
useJSON := *loadedConfig.LogFormat == "json"
if useJSON {
return log.NewJSONLogger(out, log.DefaultErrorHandler)
}
return log.NewLogfmtLogger(out, log.DefaultErrorHandler)
}
func forwarderName(f *config.ForwardTo) string {
if f.Name != nil {
return *f.Name
}
return f.Type
}
var errDupeForwarder = errors.New("cannot duplicate forwarder names or types without names")
func setupForwarders(ctx context.Context, tk timekeeper.TimeKeeper, loader *config.Loader, loadedConfig *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler, Checker *dpsink.ItemFlagger, cdim *log.CtxDimensions, manager *etcdManager) ([]protocol.Forwarder, error) {
allForwarders := make([]protocol.Forwarder, 0, len(loadedConfig.ForwardTo))
nameMap := make(map[string]bool) | forwardConfig.Server = manager.server
forwardConfig.Client = manager.client
forwardConfig.ClusterName = loadedConfig.ClusterName
forwardConfig.AdditionalDimensions = datapoint.AddMaps(loadedConfig.AdditionalDimensions, forwardConfig.AdditionalDimensions)
forwarder, err := loader.Forwarder(forwardConfig)
if err != nil {
return nil, err
}
name := forwarderName(forwardConfig)
if nameMap[name] {
logger.Log(fmt.Sprintf("Cannot add two forwarders with name '%s' or two unnamed forwarders of same type", name))
return nil, errDupeForwarder
}
nameMap[name] = true
logCtx = logCtx.With(logkey.Name, name)
// Buffering -> counting -> (forwarder)
limitedLogger := &log.RateLimitedLogger{
EventCounter: eventcounter.New(tk.Now(), time.Second),
Limit: 16,
Logger: logCtx,
Now: tk.Now,
}
dcount := &dpsink.Counter{
Logger: limitedLogger,
}
count := signalfx.UnifyNextSinkWrap(dcount)
endingSink := signalfx.FromChain(forwarder, signalfx.NextWrap(count))
bconf := &dpbuffered.Config{
Checker: Checker,
BufferSize: forwardConfig.BufferSize,
MaxTotalDatapoints: forwardConfig.BufferSize,
MaxTotalEvents: forwardConfig.BufferSize,
MaxTotalSpans: forwardConfig.BufferSize,
MaxDrainSize: forwardConfig.MaxDrainSize,
NumDrainingThreads: forwardConfig.DrainingThreads,
Name: forwardConfig.Name,
Cdim: cdim,
}
bf := dpbuffered.NewBufferedForwarder(ctx, bconf, endingSink, forwarder.Close, forwarder.StartupFinished, limitedLogger)
allForwarders = append(allForwarders, bf)
groupName := fmt.Sprintf("%s_f_%d", name, idx)
scheduler.AddGroupedCallback(groupName, forwarder)
scheduler.AddGroupedCallback(groupName, bf)
scheduler.AddGroupedCallback(groupName, dcount)
scheduler.GroupedDefaultDimensions(groupName, datapoint.AddMaps(loadedConfig.AdditionalDimensions, map[string]string{
"name": name,
"direction": "forwarder",
"source": "gateway",
"host": *loadedConfig.ServerName,
"type": forwardConfig.Type,
"cluster": *loadedConfig.ClusterName,
}))
}
return allForwarders, nil
}
var errDupeListener = errors.New("cannot duplicate listener names or types without names")
func setupListeners(tk timekeeper.TimeKeeper, hostname string, loadedConfig *config.GatewayConfig, loader *config.Loader, listenFrom []*config.ListenFrom, multiplexer signalfx.Sink, logger log.Logger, scheduler *sfxclient.Scheduler) ([]protocol.Listener, error) {
listeners := make([]protocol.Listener, 0, len(listenFrom))
nameMap := make(map[string]bool)
for idx, listenConfig := range listenFrom {
logCtx := log.NewContext(logger).With(logkey.Protocol, listenConfig.Type, logkey.Direction, "listener")
name := func() string {
if listenConfig.Name != nil {
return *listenConfig.Name
}
return listenConfig.Type
}()
if nameMap[name] {
logger.Log(fmt.Sprintf("Cannot add two listeners with name '%s' or two unnamed listners of same type", name))
return nil, errDupeListener
}
nameMap[name] = true
count := &dpsink.Counter{
Logger: &log.RateLimitedLogger{
EventCounter: eventcounter.New(tk.Now(), time.Second),
Limit: 16,
Logger: logCtx,
Now: tk.Now,
},
}
endingSink := signalfx.FromChain(multiplexer, signalfx.NextWrap(signalfx.UnifyNextSinkWrap(count)))
listener, err := loader.Listener(endingSink, listenConfig)
if err != nil {
logCtx.Log(log.Err, err, "unable to load config")
return nil, err
}
listeners = append(listeners, listener)
groupName := fmt.Sprintf("%s_l_%d", name, idx)
scheduler.AddGroupedCallback(groupName, listener)
scheduler.AddGroupedCallback(groupName, count)
scheduler.GroupedDefaultDimensions(groupName, datapoint.AddMaps(loadedConfig.AdditionalDimensions, map[string]string{
"name": name,
"direction": "listener",
"source": "gateway",
"host": hostname,
"type": listenConfig.Type,
"cluster": *loadedConfig.ClusterName,
}))
}
return listeners, nil
}
func splitSinks(forwarders []protocol.Forwarder) ([]dpsink.DSink, []dpsink.ESink, []trace.Sink) {
dsinks := make([]dpsink.DSink, 0, len(forwarders))
esinks := make([]dpsink.ESink, 0, len(forwarders))
tsinks := make([]trace.Sink, 0, len(forwarders))
for _, f := range forwarders {
dsinks = append(dsinks, f)
esinks = append(esinks, f)
tsinks = append(tsinks, f)
}
return dsinks, esinks, tsinks
}
func (p *gateway) setupInternalMetricsServer(conf *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler) error {
if conf.InternalMetricsListenerAddress == nil {
return nil
}
listener, err := net.Listen("tcp", *conf.InternalMetricsListenerAddress)
if err != nil {
return errors.Annotate(err, "cannot setup internal metrics server")
}
p.internalMetricsListener = listener
collector := internal.NewCollector(logger, scheduler)
handler := mux.NewRouter()
handler.Path("/internal-metrics").HandlerFunc(collector.MetricsHandler)
p.internalMetricsServer = collector
go func() {
err := http.Serve(listener, handler)
logger.Log(log.Err, err, "Finished serving internal metrics server")
}()
return nil
}
func (p *gateway) setupDebugServer(conf *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler) error {
if conf.LocalDebugServer == nil {
return nil
}
listener, err := net.Listen("tcp", *conf.LocalDebugServer)
if err != nil {
return errors.Annotate(err, "cannot setup debug server")
}
p.debugServerListener = listener
p.debugServer = httpdebug.New(&httpdebug.Config{
Logger: log.NewContext(logger).With(logkey.Protocol, "debugserver"),
ExplorableObj: p,
})
p.debugServer.Mux.Handle("/debug/dims", &p.debugSink)
p.debugServer.Exp2.Exported["config"] = conf.Var()
p.debugServer.Exp2.Exported["datapoints"] = scheduler.Var()
p.debugServer.Exp2.Exported["goruntime"] = expvar.Func(func() interface{} {
return runtime.Version()
})
p.debugServer.Exp2.Exported["debugdims"] = p.debugSink.Var()
p.debugServer.Exp2.Exported["gateway_version"] = expvar.Func(func() interface{} {
return Version
})
p.debugServer.Exp2.Exported["build_date"] = expvar.Func(func() interface{} {
return BuildDate
})
p.debugServer.Exp2.Exported["source"] = expvar.Func(func() interface{} {
return fmt.Sprintf("https://github.com/signalfx/gateway/tree/%s", Version)
})
go func() {
err := p.debugServer.Serve(listener)
logger.Log(log.Err, err, "Finished serving debug server")
}()
return nil
}
func setupGoMaxProcs(numProcs *int, gomaxprocs func(int) int) {
if numProcs != nil {
gomaxprocs(*numProcs)
} else {
numProcs := runtime.NumCPU()
gomaxprocs(numProcs)
}
}
func (p *gateway) gracefulShutdown() (err error) {
p.logger.Log("Starting graceful shutdown")
totalWaitTime := p.tk.After(*p.config.MaxGracefulWaitTimeDuration)
errs := make([]error, len(p.listeners)+len(p.forwarders)+1)
// close health checks on all first
for _, l := range p.listeners {
l.CloseHealthCheck()
}
// defer close of listeners and forwarders till we exit
defer func() {
p.logger.Log("close listeners")
for _, l := range p.listeners {
errs = append(errs, l.Close())
}
log.IfErr(p.logger, errors.NewMultiErr(errs))
p.logger.Log("Graceful shutdown done")
}()
p.logger.Log("Waiting for connections to drain")
startingTimeGood := p.tk.Now()
for {
select {
case <-totalWaitTime:
totalPipeline := p.Pipeline()
if totalPipeline > 0 {
p.logger.Log(logkey.TotalPipeline, totalPipeline, "Connections never drained. This could be bad ...")
}
return
case <-p.tk.After(*p.config.GracefulCheckIntervalDuration):
now := p.tk.Now()
totalPipeline := p.Pipeline()
p.logger.Log(logkey.TotalPipeline, totalPipeline, "Waking up for graceful shutdown")
if totalPipeline > 0 {
p.logger.Log(logkey.TotalPipeline, totalPipeline, "Items are still draining")
startingTimeGood = now
continue
}
if now.Sub(startingTimeGood) >= *p.config.SilentGracefulTimeDuration {
p.logger.Log(logkey.TotalPipeline, totalPipeline, "I've been silent. Graceful shutdown done")
return
}
}
}
}
func (p *gateway) Pipeline() int64 {
var totalForwarded int64
for _, f := range p.forwarders {
totalForwarded += f.Pipeline()
}
return totalForwarded
}
func (p *gateway) Close() error {
errs := make([]error, 0, len(p.forwarders)+1)
for _, f := range p.forwarders {
errs = append(errs, f.Close())
}
if p.etcdMgr != nil && p.etcdMgr.server != nil {
errs = append(errs, p.etcdMgr.removeMember())
errs = append(errs, p.etcdMgr.shutdown(true)) // shutdown the etcd server and close the client
}
if p.debugServer != nil {
errs = append(errs, p.debugServerListener.Close())
}
if p.internalMetricsServer != nil {
errs = append(errs, p.internalMetricsListener.Close())
}
return errors.NewMultiErr(errs)
}
func (p *gateway) main(ctx context.Context) error {
// Disable the default logger to make sure nobody else uses it
err := p.run(ctx)
return errors.NewMultiErr([]error{err, p.Close()})
}
func (p *gateway) setup(loadedConfig *config.GatewayConfig) {
if loadedConfig.DebugFlag != nil && *loadedConfig.DebugFlag != "" {
p.debugContext.SetFlagStr(*loadedConfig.DebugFlag)
}
p.config = loadedConfig
p.logger = log.NewContext(p.getLogger(loadedConfig)).With(logkey.Time, log.DefaultTimestamp, logkey.Caller, log.DefaultCaller)
p.debugSink.Logger = p.logger
log.DefaultLogger.Set(p.logger)
pidFilename := *loadedConfig.PidFilename
if err := writePidFile(pidFilename); err != nil {
p.logger.Log(log.Err, err, logkey.Filename, pidFilename, "cannot store pid in pid file")
}
defer func() {
log.IfErr(p.logger, os.Remove(pidFilename))
}()
defer func() {
log.DefaultLogger.Set(log.Discard)
}()
}
func (p *gateway) createCommonHTTPChain(loadedConfig *config.GatewayConfig) web.NextConstructor {
h := web.HeadersInRequest{
Headers: map[string]string{
"X-Gateway-Name": *loadedConfig.ServerName,
},
}
cf := &web.CtxWithFlag{
CtxFlagger: &p.ctxDims,
HeaderName: "X-Response-Id",
}
return web.NextConstructor(func(ctx context.Context, rw http.ResponseWriter, r *http.Request, next web.ContextHandler) {
cf.ServeHTTPC(ctx, rw, r, h.CreateMiddleware(next))
})
}
func (p *gateway) setupScheduler(loadedConfig *config.GatewayConfig) *sfxclient.Scheduler {
scheduler := sfxclient.NewScheduler()
scheduler.AddCallback(sfxclient.GoMetricsSource)
scheduler.DefaultDimensions(datapoint.AddMaps(loadedConfig.AdditionalDimensions, map[string]string{
"source": "gateway",
"host": *loadedConfig.ServerName,
"cluster": *loadedConfig.ClusterName,
}))
return scheduler
}
func (p *gateway) scheduleStatCollection(ctx context.Context, scheduler *sfxclient.Scheduler, loadedConfig *config.GatewayConfig, multiplexer signalfx.Sink) (context.Context, context.CancelFunc) {
// We still want to schedule stat collection so people can debug the server if they want
scheduler.Sink = dpsink.Discard
scheduler.ReportingDelayNs = (time.Second * 30).Nanoseconds()
finishedContext, cancelFunc := context.WithCancel(ctx)
if loadedConfig.StatsDelayDuration != nil && *loadedConfig.StatsDelayDuration != 0 {
scheduler.Sink = multiplexer
scheduler.ReportingDelayNs = loadedConfig.StatsDelayDuration.Nanoseconds()
} else {
p.logger.Log("skipping stat keeping")
}
return finishedContext, cancelFunc
}
func (p *gateway) setupForwardersAndListeners(ctx context.Context, loader *config.Loader, loadedConfig *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler) (signalfx.Sink, error) {
var err error
p.forwarders, err = setupForwarders(ctx, p.tk, loader, loadedConfig, logger, scheduler, &p.debugSink, &p.ctxDims, p.etcdMgr)
if err != nil {
p.logger.Log(log.Err, err, "Unable to setup forwarders")
return nil, errors.Annotate(err, "unable to setup forwarders")
}
dpSinks, eSinks, tSinks := splitSinks(p.forwarders)
dmux := &demultiplexer.Demultiplexer{
DatapointSinks: dpSinks,
EventSinks: eSinks,
TraceSinks: tSinks,
Logger: log.NewOnePerSecond(logger),
LateDuration: loadedConfig.LateThresholdDuration,
FutureDuration: loadedConfig.FutureThresholdDuration,
}
scheduler.AddCallback(dmux)
p.versionMetric.RepoURL = "https://github.com/signalfx/gateway"
p.versionMetric.FileName = "/buildInfo.json"
scheduler.AddCallback(&p.versionMetric)
multiplexer := signalfx.FromChain(dmux, signalfx.NextWrap(signalfx.UnifyNextSinkWrap(&p.debugSink)))
p.listeners, err = setupListeners(p.tk, *loadedConfig.ServerName, loadedConfig, loader, loadedConfig.ListenFrom, multiplexer, logger, scheduler)
if err != nil {
p.logger.Log(log.Err, err, "Unable to setup listeners")
return nil, errors.Annotate(err, "cannot setup listeners from configuration")
}
var errs []error
for _, f := range p.forwarders {
err = f.StartupFinished()
errs = append(errs, err)
log.IfErr(logger, err)
}
return multiplexer, FirstNonNil(errs...)
}
func (p *gateway) run(ctx context.Context) error {
p.debugSink.CtxFlagCheck = &p.debugContext
p.logger.Log(logkey.ConfigFile, p.flags.configFileName, "Looking for config file")
p.logger.Log(logkey.Env, strings.Join(os.Environ(), "-"), "Looking for config file")
loadedConfig, err := config.Load(p.flags.configFileName, p.logger)
if err != nil {
p.logger.Log(log.Err, err, "Unable to load config")
return err
}
p.setup(loadedConfig)
p.versionMetric.Logger = p.logger
logger := p.logger
scheduler := p.setupScheduler(loadedConfig)
if err := p.setupDebugServer(loadedConfig, logger, scheduler); err != nil {
p.logger.Log(log.Err, "debug server failed", err)
return err
}
if err := p.setupInternalMetricsServer(loadedConfig, logger, scheduler); err != nil {
p.logger.Log(log.Err, "internal metrics server failed", err)
return err
}
p.etcdMgr.setup(loadedConfig)
if err := p.etcdMgr.start(); err != nil {
p.logger.Log(log.Err, "unable to start etcd server", err)
return err
}
var bb []byte
if bb, err = json.Marshal(loadedConfig); err == nil {
logger.Log(logkey.Config, string(bb), logkey.Env, strings.Join(os.Environ(), "-"), "config loaded")
}
setupGoMaxProcs(loadedConfig.NumProcs, p.gomaxprocs)
chain := p.createCommonHTTPChain(loadedConfig)
loader := config.NewLoader(ctx, logger, Version, &p.debugContext, &p.debugSink, &p.ctxDims, chain)
multiplexer, err := p.setupForwardersAndListeners(ctx, loader, loadedConfig, logger, scheduler)
if err == nil {
finishedContext, cancelFunc := p.scheduleStatCollection(ctx, scheduler, loadedConfig, multiplexer)
// Schedule datapoint collection to a Discard sink so we can get the stats in Expvar()
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
err := scheduler.Schedule(finishedContext)
logger.Log(log.Err, err, logkey.Struct, "scheduler", "Schedule finished")
wg.Done()
}()
if p.setupDoneSignal != nil {
close(p.setupDoneSignal)
}
logger.Log("Setup done. Blocking!")
select {
case <-ctx.Done():
case <-p.signalChan:
err = p.gracefulShutdown()
}
cancelFunc()
wg.Wait()
}
return err
}
var flagParse = flag.Parse
func main() {
flagParse()
signal.Notify(mainInstance.signalChan, syscall.SIGTERM)
log.IfErr(log.DefaultLogger, mainInstance.main(context.Background()))
}
// FirstNonNil returns what it says it does
func FirstNonNil(errs ...error) error {
for _, err := range errs {
if err != nil {
return err
}
}
return nil
} | for idx, forwardConfig := range loadedConfig.ForwardTo {
logCtx := log.NewContext(logger).With(logkey.Protocol, forwardConfig.Type, logkey.Direction, "forwarder") | random_line_split |
main.go | package main
import (
"flag"
"io"
"io/ioutil"
"os"
"path"
"runtime"
"strconv"
"context"
"expvar"
"fmt"
"net"
"net/http"
"os/signal"
"strings"
"sync"
"syscall"
"time"
"encoding/json"
"github.com/gorilla/mux"
"github.com/quentin-m/etcd-cloud-operator/pkg/etcd"
"github.com/signalfx/gateway/config"
"github.com/signalfx/gateway/dp/dpbuffered"
"github.com/signalfx/gateway/internal-metrics"
"github.com/signalfx/gateway/logkey"
"github.com/signalfx/gateway/protocol"
"github.com/signalfx/gateway/protocol/demultiplexer"
"github.com/signalfx/gateway/protocol/signalfx"
_ "github.com/signalfx/go-metrics"
"github.com/signalfx/golib/datapoint"
"github.com/signalfx/golib/datapoint/dpsink"
"github.com/signalfx/golib/errors"
"github.com/signalfx/golib/eventcounter"
"github.com/signalfx/golib/httpdebug"
"github.com/signalfx/golib/log"
"github.com/signalfx/golib/reportsha"
"github.com/signalfx/golib/sfxclient"
"github.com/signalfx/golib/timekeeper"
"github.com/signalfx/golib/trace"
"github.com/signalfx/golib/web"
_ "github.com/signalfx/ondiskencoding"
_ "github.com/spaolacci/murmur3"
"gopkg.in/natefinch/lumberjack.v2"
_ "net/http/pprof"
)
var (
// Version is set by a build flag to the built version
Version = "0.9.10+"
// BuildDate is set by a build flag to the date of the build
BuildDate = ""
)
func writePidFile(pidFileName string) error {
pid := os.Getpid()
return ioutil.WriteFile(pidFileName, []byte(strconv.FormatInt(int64(pid), 10)), os.FileMode(0644))
}
// getCommaSeparatedStringEnvVar returns the given env var key's value split by comma or the default values
func getCommaSeparatedStringEnvVar(envVar string, def []string) []string {
if val := os.Getenv(envVar); val != "" {
def = def[:0]
for _, addr := range strings.Split(strings.Replace(val, " ", "", -1), ",") {
def = append(def, addr)
}
}
return def
}
// getStringEnvVar returns the given env var key's value or the default value
func getStringEnvVar(envVar string, def string) string {
if val := os.Getenv(envVar); val != "" {
return val
}
return def
}
// getDurationEnvVar returns the given env var key's value or the default value
func getDurationEnvVar(envVar string, def time.Duration) time.Duration {
if strVal := os.Getenv(envVar); strVal != "" {
if dur, err := time.ParseDuration(strVal); err == nil {
return dur
}
}
return def
}
func isStringInSlice(target string, strs []string) bool {
for _, addr := range strs {
if addr == target {
return true
}
}
return false
}
type gatewayFlags struct {
configFileName string
}
type etcdManager struct {
etcd.ServerConfig
logger log.Logger
removeTimeout time.Duration
operation string
targetCluster []string
server *etcd.Server
client *etcd.Client
}
func (mgr *etcdManager) setup(conf *config.GatewayConfig) {
mgr.LPAddress = getStringEnvVar("SFX_LISTEN_ON_PEER_ADDRESS", *conf.ListenOnPeerAddress)
mgr.APAddress = getStringEnvVar("SFX_ADVERTISE_PEER_ADDRESS", *conf.AdvertisePeerAddress)
mgr.LCAddress = getStringEnvVar("SFX_LISTEN_ON_CLIENT_ADDRESS", *conf.ListenOnClientAddress)
mgr.ACAddress = getStringEnvVar("SFX_ADVERTISE_CLIENT_ADDRESS", *conf.AdvertiseClientAddress)
mgr.MAddress = getStringEnvVar("SFX_ETCD_METRICS_ADDRESS", *conf.ETCDMetricsAddress)
mgr.UnhealthyMemberTTL = getDurationEnvVar("SFX_UNHEALTHY_MEMBER_TTL", *conf.UnhealthyMemberTTL)
mgr.removeTimeout = getDurationEnvVar("SFX_REMOVE_MEMBER_TIMEOUT", *conf.RemoveMemberTimeout)
mgr.DataDir = getStringEnvVar("SFX_CLUSTER_DATA_DIR", *conf.ClusterDataDir)
mgr.Name = getStringEnvVar("SFX_SERVER_NAME", *conf.ServerName)
mgr.ServerConfig.Name = mgr.Name
// if already set, then a command line flag was provided and takes precedence
if mgr.operation == "" {
mgr.operation = getStringEnvVar("SFX_CLUSTER_OPERATION", *conf.ClusterOperation)
}
mgr.targetCluster = getCommaSeparatedStringEnvVar("SFX_TARGET_CLUSTER_ADDRESSES", conf.TargetClusterAddresses)
}
func (mgr *etcdManager) start() (err error) {
// use a default server name if one is not provided
if mgr.ServerConfig.Name == "" {
mgr.ServerConfig.Name = fmt.Sprintf("%s", mgr.ServerConfig.ACAddress)
}
mgr.server = etcd.NewServer(mgr.ServerConfig)
switch strings.ToLower(mgr.operation) {
case "": // this is a valid option and means we shouldn't run etcd
return
case "seed":
mgr.logger.Log(fmt.Sprintf("starting etcd server %s to seed cluster", mgr.ServerConfig.Name))
if err = mgr.server.Seed(nil); err == nil {
if !isStringInSlice(mgr.AdvertisedClientAddress(), mgr.targetCluster) {
mgr.targetCluster = append(mgr.targetCluster, mgr.AdvertisedClientAddress())
}
mgr.client, err = etcd.NewClient(mgr.targetCluster, etcd.SecurityConfig{}, true)
}
case "join":
mgr.logger.Log(fmt.Sprintf("joining cluster with etcd server name: %s", mgr.ServerConfig.Name))
if mgr.client, err = etcd.NewClient(mgr.targetCluster, etcd.SecurityConfig{}, true); err == nil {
mgr.logger.Log(fmt.Sprintf("joining etcd cluster @ %s", mgr.client.Endpoints()))
if err = mgr.server.Join(mgr.client); err == nil {
mgr.logger.Log(fmt.Sprintf("successfully joined cluster at %s", mgr.targetCluster))
}
}
default:
err = fmt.Errorf("unsupported cluster-op specified \"%s\"", mgr.operation)
}
return err
}
func (mgr *etcdManager) getMemberID(ctx context.Context) (uint64, error) {
var memberID uint64
// use the client to retrieve this instance's member id
members, err := mgr.client.MemberList(ctx)
if members != nil {
for _, m := range members.Members {
if m.Name == mgr.Name {
memberID = m.ID
}
}
}
return memberID, err
}
func (mgr *etcdManager) removeMember() error {
var err error
var memberID uint64
ctx, cancel := context.WithTimeout(context.Background(), mgr.removeTimeout)
defer cancel()
// only remove yourself from the cluster if the server is running
if mgr.server.IsRunning() {
if memberID, err = mgr.getMemberID(ctx); err == nil {
removed := make(chan error, 1)
go func() {
defer close(removed)
removed <- mgr.client.RemoveMember(mgr.Name, memberID)
}()
select {
case err = <-removed:
cancel()
case <-ctx.Done():
}
if ctx.Err() != nil {
err = ctx.Err()
}
}
}
return err
}
func (mgr *etcdManager) shutdown(graceful bool) (err error) {
if mgr.server.IsRunning() {
// stop the etcd server
mgr.server.Stop(graceful, false) // graceful shutdown true, snapshot false
}
if mgr.client != nil {
// close the client if applicable
err = mgr.client.Close()
}
return err
}
type gateway struct {
flags gatewayFlags
listeners []protocol.Listener
forwarders []protocol.Forwarder
logger log.Logger
setupDoneSignal chan struct{}
tk timekeeper.TimeKeeper
debugServer *httpdebug.Server
debugServerListener net.Listener
internalMetricsServer *internal.Collector
internalMetricsListener net.Listener
stdout io.Writer
gomaxprocs func(int) int
debugContext web.HeaderCtxFlag
debugSink dpsink.ItemFlagger
ctxDims log.CtxDimensions
signalChan chan os.Signal
config *config.GatewayConfig
etcdMgr *etcdManager
versionMetric reportsha.SHA1Reporter
}
var mainInstance = gateway{
tk: timekeeper.RealTime{},
logger: log.DefaultLogger.CreateChild(),
stdout: os.Stdout,
gomaxprocs: runtime.GOMAXPROCS,
debugContext: web.HeaderCtxFlag{
HeaderName: "X-Debug-Id",
},
debugSink: dpsink.ItemFlagger{
EventMetaName: "dbg_events",
MetricDimensionName: "sf_metric",
},
signalChan: make(chan os.Signal, 1),
etcdMgr: &etcdManager{ServerConfig: etcd.ServerConfig{}, logger: log.DefaultLogger.CreateChild()},
}
func init() {
flag.StringVar(&mainInstance.flags.configFileName, "configfile", "sf/gateway.conf", "Name of the db gateway configuration file")
flag.StringVar(&mainInstance.etcdMgr.operation, "cluster-op", "", "operation to perform if running in cluster mode [\"seed\", \"join\", \"\"] this overrides the ClusterOperation set in the config file")
}
func (p *gateway) getLogOutput(loadedConfig *config.GatewayConfig) io.Writer {
logDir := *loadedConfig.LogDir
if logDir == "-" {
p.logger.Log("Sending logging to stdout")
return p.stdout
}
logMaxSize := *loadedConfig.LogMaxSize
logMaxBackups := *loadedConfig.LogMaxBackups
lumberjackLogger := &lumberjack.Logger{
Filename: path.Join(logDir, "gateway.log"),
MaxSize: logMaxSize, // megabytes
MaxBackups: logMaxBackups,
}
p.logger.Log(logkey.Filename, lumberjackLogger.Filename, logkey.Dir, os.TempDir(), "Logging redirect setup")
return lumberjackLogger
}
func (p *gateway) getLogger(loadedConfig *config.GatewayConfig) log.Logger {
out := p.getLogOutput(loadedConfig)
useJSON := *loadedConfig.LogFormat == "json"
if useJSON {
return log.NewJSONLogger(out, log.DefaultErrorHandler)
}
return log.NewLogfmtLogger(out, log.DefaultErrorHandler)
}
func forwarderName(f *config.ForwardTo) string {
if f.Name != nil {
return *f.Name
}
return f.Type
}
var errDupeForwarder = errors.New("cannot duplicate forwarder names or types without names")
func setupForwarders(ctx context.Context, tk timekeeper.TimeKeeper, loader *config.Loader, loadedConfig *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler, Checker *dpsink.ItemFlagger, cdim *log.CtxDimensions, manager *etcdManager) ([]protocol.Forwarder, error) {
allForwarders := make([]protocol.Forwarder, 0, len(loadedConfig.ForwardTo))
nameMap := make(map[string]bool)
for idx, forwardConfig := range loadedConfig.ForwardTo {
logCtx := log.NewContext(logger).With(logkey.Protocol, forwardConfig.Type, logkey.Direction, "forwarder")
forwardConfig.Server = manager.server
forwardConfig.Client = manager.client
forwardConfig.ClusterName = loadedConfig.ClusterName
forwardConfig.AdditionalDimensions = datapoint.AddMaps(loadedConfig.AdditionalDimensions, forwardConfig.AdditionalDimensions)
forwarder, err := loader.Forwarder(forwardConfig)
if err != nil {
return nil, err
}
name := forwarderName(forwardConfig)
if nameMap[name] {
logger.Log(fmt.Sprintf("Cannot add two forwarders with name '%s' or two unnamed forwarders of same type", name))
return nil, errDupeForwarder
}
nameMap[name] = true
logCtx = logCtx.With(logkey.Name, name)
// Buffering -> counting -> (forwarder)
limitedLogger := &log.RateLimitedLogger{
EventCounter: eventcounter.New(tk.Now(), time.Second),
Limit: 16,
Logger: logCtx,
Now: tk.Now,
}
dcount := &dpsink.Counter{
Logger: limitedLogger,
}
count := signalfx.UnifyNextSinkWrap(dcount)
endingSink := signalfx.FromChain(forwarder, signalfx.NextWrap(count))
bconf := &dpbuffered.Config{
Checker: Checker,
BufferSize: forwardConfig.BufferSize,
MaxTotalDatapoints: forwardConfig.BufferSize,
MaxTotalEvents: forwardConfig.BufferSize,
MaxTotalSpans: forwardConfig.BufferSize,
MaxDrainSize: forwardConfig.MaxDrainSize,
NumDrainingThreads: forwardConfig.DrainingThreads,
Name: forwardConfig.Name,
Cdim: cdim,
}
bf := dpbuffered.NewBufferedForwarder(ctx, bconf, endingSink, forwarder.Close, forwarder.StartupFinished, limitedLogger)
allForwarders = append(allForwarders, bf)
groupName := fmt.Sprintf("%s_f_%d", name, idx)
scheduler.AddGroupedCallback(groupName, forwarder)
scheduler.AddGroupedCallback(groupName, bf)
scheduler.AddGroupedCallback(groupName, dcount)
scheduler.GroupedDefaultDimensions(groupName, datapoint.AddMaps(loadedConfig.AdditionalDimensions, map[string]string{
"name": name,
"direction": "forwarder",
"source": "gateway",
"host": *loadedConfig.ServerName,
"type": forwardConfig.Type,
"cluster": *loadedConfig.ClusterName,
}))
}
return allForwarders, nil
}
var errDupeListener = errors.New("cannot duplicate listener names or types without names")
func setupListeners(tk timekeeper.TimeKeeper, hostname string, loadedConfig *config.GatewayConfig, loader *config.Loader, listenFrom []*config.ListenFrom, multiplexer signalfx.Sink, logger log.Logger, scheduler *sfxclient.Scheduler) ([]protocol.Listener, error) {
listeners := make([]protocol.Listener, 0, len(listenFrom))
nameMap := make(map[string]bool)
for idx, listenConfig := range listenFrom {
logCtx := log.NewContext(logger).With(logkey.Protocol, listenConfig.Type, logkey.Direction, "listener")
name := func() string {
if listenConfig.Name != nil {
return *listenConfig.Name
}
return listenConfig.Type
}()
if nameMap[name] {
logger.Log(fmt.Sprintf("Cannot add two listeners with name '%s' or two unnamed listners of same type", name))
return nil, errDupeListener
}
nameMap[name] = true
count := &dpsink.Counter{
Logger: &log.RateLimitedLogger{
EventCounter: eventcounter.New(tk.Now(), time.Second),
Limit: 16,
Logger: logCtx,
Now: tk.Now,
},
}
endingSink := signalfx.FromChain(multiplexer, signalfx.NextWrap(signalfx.UnifyNextSinkWrap(count)))
listener, err := loader.Listener(endingSink, listenConfig)
if err != nil {
logCtx.Log(log.Err, err, "unable to load config")
return nil, err
}
listeners = append(listeners, listener)
groupName := fmt.Sprintf("%s_l_%d", name, idx)
scheduler.AddGroupedCallback(groupName, listener)
scheduler.AddGroupedCallback(groupName, count)
scheduler.GroupedDefaultDimensions(groupName, datapoint.AddMaps(loadedConfig.AdditionalDimensions, map[string]string{
"name": name,
"direction": "listener",
"source": "gateway",
"host": hostname,
"type": listenConfig.Type,
"cluster": *loadedConfig.ClusterName,
}))
}
return listeners, nil
}
func splitSinks(forwarders []protocol.Forwarder) ([]dpsink.DSink, []dpsink.ESink, []trace.Sink) {
dsinks := make([]dpsink.DSink, 0, len(forwarders))
esinks := make([]dpsink.ESink, 0, len(forwarders))
tsinks := make([]trace.Sink, 0, len(forwarders))
for _, f := range forwarders {
dsinks = append(dsinks, f)
esinks = append(esinks, f)
tsinks = append(tsinks, f)
}
return dsinks, esinks, tsinks
}
func (p *gateway) setupInternalMetricsServer(conf *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler) error {
if conf.InternalMetricsListenerAddress == nil {
return nil
}
listener, err := net.Listen("tcp", *conf.InternalMetricsListenerAddress)
if err != nil {
return errors.Annotate(err, "cannot setup internal metrics server")
}
p.internalMetricsListener = listener
collector := internal.NewCollector(logger, scheduler)
handler := mux.NewRouter()
handler.Path("/internal-metrics").HandlerFunc(collector.MetricsHandler)
p.internalMetricsServer = collector
go func() {
err := http.Serve(listener, handler)
logger.Log(log.Err, err, "Finished serving internal metrics server")
}()
return nil
}
func (p *gateway) setupDebugServer(conf *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler) error {
if conf.LocalDebugServer == nil {
return nil
}
listener, err := net.Listen("tcp", *conf.LocalDebugServer)
if err != nil {
return errors.Annotate(err, "cannot setup debug server")
}
p.debugServerListener = listener
p.debugServer = httpdebug.New(&httpdebug.Config{
Logger: log.NewContext(logger).With(logkey.Protocol, "debugserver"),
ExplorableObj: p,
})
p.debugServer.Mux.Handle("/debug/dims", &p.debugSink)
p.debugServer.Exp2.Exported["config"] = conf.Var()
p.debugServer.Exp2.Exported["datapoints"] = scheduler.Var()
p.debugServer.Exp2.Exported["goruntime"] = expvar.Func(func() interface{} {
return runtime.Version()
})
p.debugServer.Exp2.Exported["debugdims"] = p.debugSink.Var()
p.debugServer.Exp2.Exported["gateway_version"] = expvar.Func(func() interface{} {
return Version
})
p.debugServer.Exp2.Exported["build_date"] = expvar.Func(func() interface{} {
return BuildDate
})
p.debugServer.Exp2.Exported["source"] = expvar.Func(func() interface{} {
return fmt.Sprintf("https://github.com/signalfx/gateway/tree/%s", Version)
})
go func() {
err := p.debugServer.Serve(listener)
logger.Log(log.Err, err, "Finished serving debug server")
}()
return nil
}
func setupGoMaxProcs(numProcs *int, gomaxprocs func(int) int) {
if numProcs != nil {
gomaxprocs(*numProcs)
} else {
numProcs := runtime.NumCPU()
gomaxprocs(numProcs)
}
}
func (p *gateway) gracefulShutdown() (err error) {
p.logger.Log("Starting graceful shutdown")
totalWaitTime := p.tk.After(*p.config.MaxGracefulWaitTimeDuration)
errs := make([]error, len(p.listeners)+len(p.forwarders)+1)
// close health checks on all first
for _, l := range p.listeners {
l.CloseHealthCheck()
}
// defer close of listeners and forwarders till we exit
defer func() {
p.logger.Log("close listeners")
for _, l := range p.listeners {
errs = append(errs, l.Close())
}
log.IfErr(p.logger, errors.NewMultiErr(errs))
p.logger.Log("Graceful shutdown done")
}()
p.logger.Log("Waiting for connections to drain")
startingTimeGood := p.tk.Now()
for {
select {
case <-totalWaitTime:
totalPipeline := p.Pipeline()
if totalPipeline > 0 {
p.logger.Log(logkey.TotalPipeline, totalPipeline, "Connections never drained. This could be bad ...")
}
return
case <-p.tk.After(*p.config.GracefulCheckIntervalDuration):
now := p.tk.Now()
totalPipeline := p.Pipeline()
p.logger.Log(logkey.TotalPipeline, totalPipeline, "Waking up for graceful shutdown")
if totalPipeline > 0 {
p.logger.Log(logkey.TotalPipeline, totalPipeline, "Items are still draining")
startingTimeGood = now
continue
}
if now.Sub(startingTimeGood) >= *p.config.SilentGracefulTimeDuration {
p.logger.Log(logkey.TotalPipeline, totalPipeline, "I've been silent. Graceful shutdown done")
return
}
}
}
}
func (p *gateway) Pipeline() int64 {
var totalForwarded int64
for _, f := range p.forwarders {
totalForwarded += f.Pipeline()
}
return totalForwarded
}
func (p *gateway) Close() error {
errs := make([]error, 0, len(p.forwarders)+1)
for _, f := range p.forwarders {
errs = append(errs, f.Close())
}
if p.etcdMgr != nil && p.etcdMgr.server != nil {
errs = append(errs, p.etcdMgr.removeMember())
errs = append(errs, p.etcdMgr.shutdown(true)) // shutdown the etcd server and close the client
}
if p.debugServer != nil {
errs = append(errs, p.debugServerListener.Close())
}
if p.internalMetricsServer != nil {
errs = append(errs, p.internalMetricsListener.Close())
}
return errors.NewMultiErr(errs)
}
func (p *gateway) main(ctx context.Context) error {
// Disable the default logger to make sure nobody else uses it
err := p.run(ctx)
return errors.NewMultiErr([]error{err, p.Close()})
}
func (p *gateway) setup(loadedConfig *config.GatewayConfig) {
if loadedConfig.DebugFlag != nil && *loadedConfig.DebugFlag != "" {
p.debugContext.SetFlagStr(*loadedConfig.DebugFlag)
}
p.config = loadedConfig
p.logger = log.NewContext(p.getLogger(loadedConfig)).With(logkey.Time, log.DefaultTimestamp, logkey.Caller, log.DefaultCaller)
p.debugSink.Logger = p.logger
log.DefaultLogger.Set(p.logger)
pidFilename := *loadedConfig.PidFilename
if err := writePidFile(pidFilename); err != nil {
p.logger.Log(log.Err, err, logkey.Filename, pidFilename, "cannot store pid in pid file")
}
defer func() {
log.IfErr(p.logger, os.Remove(pidFilename))
}()
defer func() {
log.DefaultLogger.Set(log.Discard)
}()
}
func (p *gateway) createCommonHTTPChain(loadedConfig *config.GatewayConfig) web.NextConstructor {
h := web.HeadersInRequest{
Headers: map[string]string{
"X-Gateway-Name": *loadedConfig.ServerName,
},
}
cf := &web.CtxWithFlag{
CtxFlagger: &p.ctxDims,
HeaderName: "X-Response-Id",
}
return web.NextConstructor(func(ctx context.Context, rw http.ResponseWriter, r *http.Request, next web.ContextHandler) {
cf.ServeHTTPC(ctx, rw, r, h.CreateMiddleware(next))
})
}
func (p *gateway) setupScheduler(loadedConfig *config.GatewayConfig) *sfxclient.Scheduler {
scheduler := sfxclient.NewScheduler()
scheduler.AddCallback(sfxclient.GoMetricsSource)
scheduler.DefaultDimensions(datapoint.AddMaps(loadedConfig.AdditionalDimensions, map[string]string{
"source": "gateway",
"host": *loadedConfig.ServerName,
"cluster": *loadedConfig.ClusterName,
}))
return scheduler
}
func (p *gateway) scheduleStatCollection(ctx context.Context, scheduler *sfxclient.Scheduler, loadedConfig *config.GatewayConfig, multiplexer signalfx.Sink) (context.Context, context.CancelFunc) {
// We still want to schedule stat collection so people can debug the server if they want
scheduler.Sink = dpsink.Discard
scheduler.ReportingDelayNs = (time.Second * 30).Nanoseconds()
finishedContext, cancelFunc := context.WithCancel(ctx)
if loadedConfig.StatsDelayDuration != nil && *loadedConfig.StatsDelayDuration != 0 {
scheduler.Sink = multiplexer
scheduler.ReportingDelayNs = loadedConfig.StatsDelayDuration.Nanoseconds()
} else {
p.logger.Log("skipping stat keeping")
}
return finishedContext, cancelFunc
}
func (p *gateway) setupForwardersAndListeners(ctx context.Context, loader *config.Loader, loadedConfig *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler) (signalfx.Sink, error) {
var err error
p.forwarders, err = setupForwarders(ctx, p.tk, loader, loadedConfig, logger, scheduler, &p.debugSink, &p.ctxDims, p.etcdMgr)
if err != nil {
p.logger.Log(log.Err, err, "Unable to setup forwarders")
return nil, errors.Annotate(err, "unable to setup forwarders")
}
dpSinks, eSinks, tSinks := splitSinks(p.forwarders)
dmux := &demultiplexer.Demultiplexer{
DatapointSinks: dpSinks,
EventSinks: eSinks,
TraceSinks: tSinks,
Logger: log.NewOnePerSecond(logger),
LateDuration: loadedConfig.LateThresholdDuration,
FutureDuration: loadedConfig.FutureThresholdDuration,
}
scheduler.AddCallback(dmux)
p.versionMetric.RepoURL = "https://github.com/signalfx/gateway"
p.versionMetric.FileName = "/buildInfo.json"
scheduler.AddCallback(&p.versionMetric)
multiplexer := signalfx.FromChain(dmux, signalfx.NextWrap(signalfx.UnifyNextSinkWrap(&p.debugSink)))
p.listeners, err = setupListeners(p.tk, *loadedConfig.ServerName, loadedConfig, loader, loadedConfig.ListenFrom, multiplexer, logger, scheduler)
if err != nil {
p.logger.Log(log.Err, err, "Unable to setup listeners")
return nil, errors.Annotate(err, "cannot setup listeners from configuration")
}
var errs []error
for _, f := range p.forwarders {
err = f.StartupFinished()
errs = append(errs, err)
log.IfErr(logger, err)
}
return multiplexer, FirstNonNil(errs...)
}
func (p *gateway) run(ctx context.Context) error {
p.debugSink.CtxFlagCheck = &p.debugContext
p.logger.Log(logkey.ConfigFile, p.flags.configFileName, "Looking for config file")
p.logger.Log(logkey.Env, strings.Join(os.Environ(), "-"), "Looking for config file")
loadedConfig, err := config.Load(p.flags.configFileName, p.logger)
if err != nil {
p.logger.Log(log.Err, err, "Unable to load config")
return err
}
p.setup(loadedConfig)
p.versionMetric.Logger = p.logger
logger := p.logger
scheduler := p.setupScheduler(loadedConfig)
if err := p.setupDebugServer(loadedConfig, logger, scheduler); err != nil {
p.logger.Log(log.Err, "debug server failed", err)
return err
}
if err := p.setupInternalMetricsServer(loadedConfig, logger, scheduler); err != nil {
p.logger.Log(log.Err, "internal metrics server failed", err)
return err
}
p.etcdMgr.setup(loadedConfig)
if err := p.etcdMgr.start(); err != nil {
p.logger.Log(log.Err, "unable to start etcd server", err)
return err
}
var bb []byte
if bb, err = json.Marshal(loadedConfig); err == nil {
logger.Log(logkey.Config, string(bb), logkey.Env, strings.Join(os.Environ(), "-"), "config loaded")
}
setupGoMaxProcs(loadedConfig.NumProcs, p.gomaxprocs)
chain := p.createCommonHTTPChain(loadedConfig)
loader := config.NewLoader(ctx, logger, Version, &p.debugContext, &p.debugSink, &p.ctxDims, chain)
multiplexer, err := p.setupForwardersAndListeners(ctx, loader, loadedConfig, logger, scheduler)
if err == nil {
finishedContext, cancelFunc := p.scheduleStatCollection(ctx, scheduler, loadedConfig, multiplexer)
// Schedule datapoint collection to a Discard sink so we can get the stats in Expvar()
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
err := scheduler.Schedule(finishedContext)
logger.Log(log.Err, err, logkey.Struct, "scheduler", "Schedule finished")
wg.Done()
}()
if p.setupDoneSignal != nil {
close(p.setupDoneSignal)
}
logger.Log("Setup done. Blocking!")
select {
case <-ctx.Done():
case <-p.signalChan:
err = p.gracefulShutdown()
}
cancelFunc()
wg.Wait()
}
return err
}
var flagParse = flag.Parse
func main() {
flagParse()
signal.Notify(mainInstance.signalChan, syscall.SIGTERM)
log.IfErr(log.DefaultLogger, mainInstance.main(context.Background()))
}
// FirstNonNil returns what it says it does
func FirstNonNil(errs ...error) error | {
for _, err := range errs {
if err != nil {
return err
}
}
return nil
} | identifier_body |
|
main.go | package main
import (
"flag"
"io"
"io/ioutil"
"os"
"path"
"runtime"
"strconv"
"context"
"expvar"
"fmt"
"net"
"net/http"
"os/signal"
"strings"
"sync"
"syscall"
"time"
"encoding/json"
"github.com/gorilla/mux"
"github.com/quentin-m/etcd-cloud-operator/pkg/etcd"
"github.com/signalfx/gateway/config"
"github.com/signalfx/gateway/dp/dpbuffered"
"github.com/signalfx/gateway/internal-metrics"
"github.com/signalfx/gateway/logkey"
"github.com/signalfx/gateway/protocol"
"github.com/signalfx/gateway/protocol/demultiplexer"
"github.com/signalfx/gateway/protocol/signalfx"
_ "github.com/signalfx/go-metrics"
"github.com/signalfx/golib/datapoint"
"github.com/signalfx/golib/datapoint/dpsink"
"github.com/signalfx/golib/errors"
"github.com/signalfx/golib/eventcounter"
"github.com/signalfx/golib/httpdebug"
"github.com/signalfx/golib/log"
"github.com/signalfx/golib/reportsha"
"github.com/signalfx/golib/sfxclient"
"github.com/signalfx/golib/timekeeper"
"github.com/signalfx/golib/trace"
"github.com/signalfx/golib/web"
_ "github.com/signalfx/ondiskencoding"
_ "github.com/spaolacci/murmur3"
"gopkg.in/natefinch/lumberjack.v2"
_ "net/http/pprof"
)
var (
// Version is set by a build flag to the built version
Version = "0.9.10+"
// BuildDate is set by a build flag to the date of the build
BuildDate = ""
)
func writePidFile(pidFileName string) error {
pid := os.Getpid()
return ioutil.WriteFile(pidFileName, []byte(strconv.FormatInt(int64(pid), 10)), os.FileMode(0644))
}
// getCommaSeparatedStringEnvVar returns the given env var key's value split by comma or the default values
func getCommaSeparatedStringEnvVar(envVar string, def []string) []string {
if val := os.Getenv(envVar); val != "" {
def = def[:0]
for _, addr := range strings.Split(strings.Replace(val, " ", "", -1), ",") {
def = append(def, addr)
}
}
return def
}
// getStringEnvVar returns the given env var key's value or the default value
func getStringEnvVar(envVar string, def string) string {
if val := os.Getenv(envVar); val != "" {
return val
}
return def
}
// getDurationEnvVar returns the given env var key's value or the default value
func getDurationEnvVar(envVar string, def time.Duration) time.Duration {
if strVal := os.Getenv(envVar); strVal != "" {
if dur, err := time.ParseDuration(strVal); err == nil {
return dur
}
}
return def
}
func isStringInSlice(target string, strs []string) bool {
for _, addr := range strs {
if addr == target {
return true
}
}
return false
}
type gatewayFlags struct {
configFileName string
}
type etcdManager struct {
etcd.ServerConfig
logger log.Logger
removeTimeout time.Duration
operation string
targetCluster []string
server *etcd.Server
client *etcd.Client
}
func (mgr *etcdManager) setup(conf *config.GatewayConfig) {
mgr.LPAddress = getStringEnvVar("SFX_LISTEN_ON_PEER_ADDRESS", *conf.ListenOnPeerAddress)
mgr.APAddress = getStringEnvVar("SFX_ADVERTISE_PEER_ADDRESS", *conf.AdvertisePeerAddress)
mgr.LCAddress = getStringEnvVar("SFX_LISTEN_ON_CLIENT_ADDRESS", *conf.ListenOnClientAddress)
mgr.ACAddress = getStringEnvVar("SFX_ADVERTISE_CLIENT_ADDRESS", *conf.AdvertiseClientAddress)
mgr.MAddress = getStringEnvVar("SFX_ETCD_METRICS_ADDRESS", *conf.ETCDMetricsAddress)
mgr.UnhealthyMemberTTL = getDurationEnvVar("SFX_UNHEALTHY_MEMBER_TTL", *conf.UnhealthyMemberTTL)
mgr.removeTimeout = getDurationEnvVar("SFX_REMOVE_MEMBER_TIMEOUT", *conf.RemoveMemberTimeout)
mgr.DataDir = getStringEnvVar("SFX_CLUSTER_DATA_DIR", *conf.ClusterDataDir)
mgr.Name = getStringEnvVar("SFX_SERVER_NAME", *conf.ServerName)
mgr.ServerConfig.Name = mgr.Name
// if already set, then a command line flag was provided and takes precedence
if mgr.operation == "" {
mgr.operation = getStringEnvVar("SFX_CLUSTER_OPERATION", *conf.ClusterOperation)
}
mgr.targetCluster = getCommaSeparatedStringEnvVar("SFX_TARGET_CLUSTER_ADDRESSES", conf.TargetClusterAddresses)
}
func (mgr *etcdManager) start() (err error) {
// use a default server name if one is not provided
if mgr.ServerConfig.Name == "" {
mgr.ServerConfig.Name = fmt.Sprintf("%s", mgr.ServerConfig.ACAddress)
}
mgr.server = etcd.NewServer(mgr.ServerConfig)
switch strings.ToLower(mgr.operation) {
case "": // this is a valid option and means we shouldn't run etcd
return
case "seed":
mgr.logger.Log(fmt.Sprintf("starting etcd server %s to seed cluster", mgr.ServerConfig.Name))
if err = mgr.server.Seed(nil); err == nil |
case "join":
mgr.logger.Log(fmt.Sprintf("joining cluster with etcd server name: %s", mgr.ServerConfig.Name))
if mgr.client, err = etcd.NewClient(mgr.targetCluster, etcd.SecurityConfig{}, true); err == nil {
mgr.logger.Log(fmt.Sprintf("joining etcd cluster @ %s", mgr.client.Endpoints()))
if err = mgr.server.Join(mgr.client); err == nil {
mgr.logger.Log(fmt.Sprintf("successfully joined cluster at %s", mgr.targetCluster))
}
}
default:
err = fmt.Errorf("unsupported cluster-op specified \"%s\"", mgr.operation)
}
return err
}
func (mgr *etcdManager) getMemberID(ctx context.Context) (uint64, error) {
var memberID uint64
// use the client to retrieve this instance's member id
members, err := mgr.client.MemberList(ctx)
if members != nil {
for _, m := range members.Members {
if m.Name == mgr.Name {
memberID = m.ID
}
}
}
return memberID, err
}
func (mgr *etcdManager) removeMember() error {
var err error
var memberID uint64
ctx, cancel := context.WithTimeout(context.Background(), mgr.removeTimeout)
defer cancel()
// only remove yourself from the cluster if the server is running
if mgr.server.IsRunning() {
if memberID, err = mgr.getMemberID(ctx); err == nil {
removed := make(chan error, 1)
go func() {
defer close(removed)
removed <- mgr.client.RemoveMember(mgr.Name, memberID)
}()
select {
case err = <-removed:
cancel()
case <-ctx.Done():
}
if ctx.Err() != nil {
err = ctx.Err()
}
}
}
return err
}
func (mgr *etcdManager) shutdown(graceful bool) (err error) {
if mgr.server.IsRunning() {
// stop the etcd server
mgr.server.Stop(graceful, false) // graceful shutdown true, snapshot false
}
if mgr.client != nil {
// close the client if applicable
err = mgr.client.Close()
}
return err
}
type gateway struct {
flags gatewayFlags
listeners []protocol.Listener
forwarders []protocol.Forwarder
logger log.Logger
setupDoneSignal chan struct{}
tk timekeeper.TimeKeeper
debugServer *httpdebug.Server
debugServerListener net.Listener
internalMetricsServer *internal.Collector
internalMetricsListener net.Listener
stdout io.Writer
gomaxprocs func(int) int
debugContext web.HeaderCtxFlag
debugSink dpsink.ItemFlagger
ctxDims log.CtxDimensions
signalChan chan os.Signal
config *config.GatewayConfig
etcdMgr *etcdManager
versionMetric reportsha.SHA1Reporter
}
var mainInstance = gateway{
tk: timekeeper.RealTime{},
logger: log.DefaultLogger.CreateChild(),
stdout: os.Stdout,
gomaxprocs: runtime.GOMAXPROCS,
debugContext: web.HeaderCtxFlag{
HeaderName: "X-Debug-Id",
},
debugSink: dpsink.ItemFlagger{
EventMetaName: "dbg_events",
MetricDimensionName: "sf_metric",
},
signalChan: make(chan os.Signal, 1),
etcdMgr: &etcdManager{ServerConfig: etcd.ServerConfig{}, logger: log.DefaultLogger.CreateChild()},
}
func init() {
flag.StringVar(&mainInstance.flags.configFileName, "configfile", "sf/gateway.conf", "Name of the db gateway configuration file")
flag.StringVar(&mainInstance.etcdMgr.operation, "cluster-op", "", "operation to perform if running in cluster mode [\"seed\", \"join\", \"\"] this overrides the ClusterOperation set in the config file")
}
func (p *gateway) getLogOutput(loadedConfig *config.GatewayConfig) io.Writer {
logDir := *loadedConfig.LogDir
if logDir == "-" {
p.logger.Log("Sending logging to stdout")
return p.stdout
}
logMaxSize := *loadedConfig.LogMaxSize
logMaxBackups := *loadedConfig.LogMaxBackups
lumberjackLogger := &lumberjack.Logger{
Filename: path.Join(logDir, "gateway.log"),
MaxSize: logMaxSize, // megabytes
MaxBackups: logMaxBackups,
}
p.logger.Log(logkey.Filename, lumberjackLogger.Filename, logkey.Dir, os.TempDir(), "Logging redirect setup")
return lumberjackLogger
}
func (p *gateway) getLogger(loadedConfig *config.GatewayConfig) log.Logger {
out := p.getLogOutput(loadedConfig)
useJSON := *loadedConfig.LogFormat == "json"
if useJSON {
return log.NewJSONLogger(out, log.DefaultErrorHandler)
}
return log.NewLogfmtLogger(out, log.DefaultErrorHandler)
}
func forwarderName(f *config.ForwardTo) string {
if f.Name != nil {
return *f.Name
}
return f.Type
}
var errDupeForwarder = errors.New("cannot duplicate forwarder names or types without names")
func setupForwarders(ctx context.Context, tk timekeeper.TimeKeeper, loader *config.Loader, loadedConfig *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler, Checker *dpsink.ItemFlagger, cdim *log.CtxDimensions, manager *etcdManager) ([]protocol.Forwarder, error) {
allForwarders := make([]protocol.Forwarder, 0, len(loadedConfig.ForwardTo))
nameMap := make(map[string]bool)
for idx, forwardConfig := range loadedConfig.ForwardTo {
logCtx := log.NewContext(logger).With(logkey.Protocol, forwardConfig.Type, logkey.Direction, "forwarder")
forwardConfig.Server = manager.server
forwardConfig.Client = manager.client
forwardConfig.ClusterName = loadedConfig.ClusterName
forwardConfig.AdditionalDimensions = datapoint.AddMaps(loadedConfig.AdditionalDimensions, forwardConfig.AdditionalDimensions)
forwarder, err := loader.Forwarder(forwardConfig)
if err != nil {
return nil, err
}
name := forwarderName(forwardConfig)
if nameMap[name] {
logger.Log(fmt.Sprintf("Cannot add two forwarders with name '%s' or two unnamed forwarders of same type", name))
return nil, errDupeForwarder
}
nameMap[name] = true
logCtx = logCtx.With(logkey.Name, name)
// Buffering -> counting -> (forwarder)
limitedLogger := &log.RateLimitedLogger{
EventCounter: eventcounter.New(tk.Now(), time.Second),
Limit: 16,
Logger: logCtx,
Now: tk.Now,
}
dcount := &dpsink.Counter{
Logger: limitedLogger,
}
count := signalfx.UnifyNextSinkWrap(dcount)
endingSink := signalfx.FromChain(forwarder, signalfx.NextWrap(count))
bconf := &dpbuffered.Config{
Checker: Checker,
BufferSize: forwardConfig.BufferSize,
MaxTotalDatapoints: forwardConfig.BufferSize,
MaxTotalEvents: forwardConfig.BufferSize,
MaxTotalSpans: forwardConfig.BufferSize,
MaxDrainSize: forwardConfig.MaxDrainSize,
NumDrainingThreads: forwardConfig.DrainingThreads,
Name: forwardConfig.Name,
Cdim: cdim,
}
bf := dpbuffered.NewBufferedForwarder(ctx, bconf, endingSink, forwarder.Close, forwarder.StartupFinished, limitedLogger)
allForwarders = append(allForwarders, bf)
groupName := fmt.Sprintf("%s_f_%d", name, idx)
scheduler.AddGroupedCallback(groupName, forwarder)
scheduler.AddGroupedCallback(groupName, bf)
scheduler.AddGroupedCallback(groupName, dcount)
scheduler.GroupedDefaultDimensions(groupName, datapoint.AddMaps(loadedConfig.AdditionalDimensions, map[string]string{
"name": name,
"direction": "forwarder",
"source": "gateway",
"host": *loadedConfig.ServerName,
"type": forwardConfig.Type,
"cluster": *loadedConfig.ClusterName,
}))
}
return allForwarders, nil
}
var errDupeListener = errors.New("cannot duplicate listener names or types without names")
func setupListeners(tk timekeeper.TimeKeeper, hostname string, loadedConfig *config.GatewayConfig, loader *config.Loader, listenFrom []*config.ListenFrom, multiplexer signalfx.Sink, logger log.Logger, scheduler *sfxclient.Scheduler) ([]protocol.Listener, error) {
listeners := make([]protocol.Listener, 0, len(listenFrom))
nameMap := make(map[string]bool)
for idx, listenConfig := range listenFrom {
logCtx := log.NewContext(logger).With(logkey.Protocol, listenConfig.Type, logkey.Direction, "listener")
name := func() string {
if listenConfig.Name != nil {
return *listenConfig.Name
}
return listenConfig.Type
}()
if nameMap[name] {
logger.Log(fmt.Sprintf("Cannot add two listeners with name '%s' or two unnamed listners of same type", name))
return nil, errDupeListener
}
nameMap[name] = true
count := &dpsink.Counter{
Logger: &log.RateLimitedLogger{
EventCounter: eventcounter.New(tk.Now(), time.Second),
Limit: 16,
Logger: logCtx,
Now: tk.Now,
},
}
endingSink := signalfx.FromChain(multiplexer, signalfx.NextWrap(signalfx.UnifyNextSinkWrap(count)))
listener, err := loader.Listener(endingSink, listenConfig)
if err != nil {
logCtx.Log(log.Err, err, "unable to load config")
return nil, err
}
listeners = append(listeners, listener)
groupName := fmt.Sprintf("%s_l_%d", name, idx)
scheduler.AddGroupedCallback(groupName, listener)
scheduler.AddGroupedCallback(groupName, count)
scheduler.GroupedDefaultDimensions(groupName, datapoint.AddMaps(loadedConfig.AdditionalDimensions, map[string]string{
"name": name,
"direction": "listener",
"source": "gateway",
"host": hostname,
"type": listenConfig.Type,
"cluster": *loadedConfig.ClusterName,
}))
}
return listeners, nil
}
func splitSinks(forwarders []protocol.Forwarder) ([]dpsink.DSink, []dpsink.ESink, []trace.Sink) {
dsinks := make([]dpsink.DSink, 0, len(forwarders))
esinks := make([]dpsink.ESink, 0, len(forwarders))
tsinks := make([]trace.Sink, 0, len(forwarders))
for _, f := range forwarders {
dsinks = append(dsinks, f)
esinks = append(esinks, f)
tsinks = append(tsinks, f)
}
return dsinks, esinks, tsinks
}
func (p *gateway) setupInternalMetricsServer(conf *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler) error {
if conf.InternalMetricsListenerAddress == nil {
return nil
}
listener, err := net.Listen("tcp", *conf.InternalMetricsListenerAddress)
if err != nil {
return errors.Annotate(err, "cannot setup internal metrics server")
}
p.internalMetricsListener = listener
collector := internal.NewCollector(logger, scheduler)
handler := mux.NewRouter()
handler.Path("/internal-metrics").HandlerFunc(collector.MetricsHandler)
p.internalMetricsServer = collector
go func() {
err := http.Serve(listener, handler)
logger.Log(log.Err, err, "Finished serving internal metrics server")
}()
return nil
}
func (p *gateway) setupDebugServer(conf *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler) error {
if conf.LocalDebugServer == nil {
return nil
}
listener, err := net.Listen("tcp", *conf.LocalDebugServer)
if err != nil {
return errors.Annotate(err, "cannot setup debug server")
}
p.debugServerListener = listener
p.debugServer = httpdebug.New(&httpdebug.Config{
Logger: log.NewContext(logger).With(logkey.Protocol, "debugserver"),
ExplorableObj: p,
})
p.debugServer.Mux.Handle("/debug/dims", &p.debugSink)
p.debugServer.Exp2.Exported["config"] = conf.Var()
p.debugServer.Exp2.Exported["datapoints"] = scheduler.Var()
p.debugServer.Exp2.Exported["goruntime"] = expvar.Func(func() interface{} {
return runtime.Version()
})
p.debugServer.Exp2.Exported["debugdims"] = p.debugSink.Var()
p.debugServer.Exp2.Exported["gateway_version"] = expvar.Func(func() interface{} {
return Version
})
p.debugServer.Exp2.Exported["build_date"] = expvar.Func(func() interface{} {
return BuildDate
})
p.debugServer.Exp2.Exported["source"] = expvar.Func(func() interface{} {
return fmt.Sprintf("https://github.com/signalfx/gateway/tree/%s", Version)
})
go func() {
err := p.debugServer.Serve(listener)
logger.Log(log.Err, err, "Finished serving debug server")
}()
return nil
}
func setupGoMaxProcs(numProcs *int, gomaxprocs func(int) int) {
if numProcs != nil {
gomaxprocs(*numProcs)
} else {
numProcs := runtime.NumCPU()
gomaxprocs(numProcs)
}
}
func (p *gateway) gracefulShutdown() (err error) {
p.logger.Log("Starting graceful shutdown")
totalWaitTime := p.tk.After(*p.config.MaxGracefulWaitTimeDuration)
errs := make([]error, len(p.listeners)+len(p.forwarders)+1)
// close health checks on all first
for _, l := range p.listeners {
l.CloseHealthCheck()
}
// defer close of listeners and forwarders till we exit
defer func() {
p.logger.Log("close listeners")
for _, l := range p.listeners {
errs = append(errs, l.Close())
}
log.IfErr(p.logger, errors.NewMultiErr(errs))
p.logger.Log("Graceful shutdown done")
}()
p.logger.Log("Waiting for connections to drain")
startingTimeGood := p.tk.Now()
for {
select {
case <-totalWaitTime:
totalPipeline := p.Pipeline()
if totalPipeline > 0 {
p.logger.Log(logkey.TotalPipeline, totalPipeline, "Connections never drained. This could be bad ...")
}
return
case <-p.tk.After(*p.config.GracefulCheckIntervalDuration):
now := p.tk.Now()
totalPipeline := p.Pipeline()
p.logger.Log(logkey.TotalPipeline, totalPipeline, "Waking up for graceful shutdown")
if totalPipeline > 0 {
p.logger.Log(logkey.TotalPipeline, totalPipeline, "Items are still draining")
startingTimeGood = now
continue
}
if now.Sub(startingTimeGood) >= *p.config.SilentGracefulTimeDuration {
p.logger.Log(logkey.TotalPipeline, totalPipeline, "I've been silent. Graceful shutdown done")
return
}
}
}
}
func (p *gateway) Pipeline() int64 {
var totalForwarded int64
for _, f := range p.forwarders {
totalForwarded += f.Pipeline()
}
return totalForwarded
}
func (p *gateway) Close() error {
errs := make([]error, 0, len(p.forwarders)+1)
for _, f := range p.forwarders {
errs = append(errs, f.Close())
}
if p.etcdMgr != nil && p.etcdMgr.server != nil {
errs = append(errs, p.etcdMgr.removeMember())
errs = append(errs, p.etcdMgr.shutdown(true)) // shutdown the etcd server and close the client
}
if p.debugServer != nil {
errs = append(errs, p.debugServerListener.Close())
}
if p.internalMetricsServer != nil {
errs = append(errs, p.internalMetricsListener.Close())
}
return errors.NewMultiErr(errs)
}
func (p *gateway) main(ctx context.Context) error {
// Disable the default logger to make sure nobody else uses it
err := p.run(ctx)
return errors.NewMultiErr([]error{err, p.Close()})
}
func (p *gateway) setup(loadedConfig *config.GatewayConfig) {
if loadedConfig.DebugFlag != nil && *loadedConfig.DebugFlag != "" {
p.debugContext.SetFlagStr(*loadedConfig.DebugFlag)
}
p.config = loadedConfig
p.logger = log.NewContext(p.getLogger(loadedConfig)).With(logkey.Time, log.DefaultTimestamp, logkey.Caller, log.DefaultCaller)
p.debugSink.Logger = p.logger
log.DefaultLogger.Set(p.logger)
pidFilename := *loadedConfig.PidFilename
if err := writePidFile(pidFilename); err != nil {
p.logger.Log(log.Err, err, logkey.Filename, pidFilename, "cannot store pid in pid file")
}
defer func() {
log.IfErr(p.logger, os.Remove(pidFilename))
}()
defer func() {
log.DefaultLogger.Set(log.Discard)
}()
}
func (p *gateway) createCommonHTTPChain(loadedConfig *config.GatewayConfig) web.NextConstructor {
h := web.HeadersInRequest{
Headers: map[string]string{
"X-Gateway-Name": *loadedConfig.ServerName,
},
}
cf := &web.CtxWithFlag{
CtxFlagger: &p.ctxDims,
HeaderName: "X-Response-Id",
}
return web.NextConstructor(func(ctx context.Context, rw http.ResponseWriter, r *http.Request, next web.ContextHandler) {
cf.ServeHTTPC(ctx, rw, r, h.CreateMiddleware(next))
})
}
func (p *gateway) setupScheduler(loadedConfig *config.GatewayConfig) *sfxclient.Scheduler {
scheduler := sfxclient.NewScheduler()
scheduler.AddCallback(sfxclient.GoMetricsSource)
scheduler.DefaultDimensions(datapoint.AddMaps(loadedConfig.AdditionalDimensions, map[string]string{
"source": "gateway",
"host": *loadedConfig.ServerName,
"cluster": *loadedConfig.ClusterName,
}))
return scheduler
}
func (p *gateway) scheduleStatCollection(ctx context.Context, scheduler *sfxclient.Scheduler, loadedConfig *config.GatewayConfig, multiplexer signalfx.Sink) (context.Context, context.CancelFunc) {
// We still want to schedule stat collection so people can debug the server if they want
scheduler.Sink = dpsink.Discard
scheduler.ReportingDelayNs = (time.Second * 30).Nanoseconds()
finishedContext, cancelFunc := context.WithCancel(ctx)
if loadedConfig.StatsDelayDuration != nil && *loadedConfig.StatsDelayDuration != 0 {
scheduler.Sink = multiplexer
scheduler.ReportingDelayNs = loadedConfig.StatsDelayDuration.Nanoseconds()
} else {
p.logger.Log("skipping stat keeping")
}
return finishedContext, cancelFunc
}
func (p *gateway) setupForwardersAndListeners(ctx context.Context, loader *config.Loader, loadedConfig *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler) (signalfx.Sink, error) {
var err error
p.forwarders, err = setupForwarders(ctx, p.tk, loader, loadedConfig, logger, scheduler, &p.debugSink, &p.ctxDims, p.etcdMgr)
if err != nil {
p.logger.Log(log.Err, err, "Unable to setup forwarders")
return nil, errors.Annotate(err, "unable to setup forwarders")
}
dpSinks, eSinks, tSinks := splitSinks(p.forwarders)
dmux := &demultiplexer.Demultiplexer{
DatapointSinks: dpSinks,
EventSinks: eSinks,
TraceSinks: tSinks,
Logger: log.NewOnePerSecond(logger),
LateDuration: loadedConfig.LateThresholdDuration,
FutureDuration: loadedConfig.FutureThresholdDuration,
}
scheduler.AddCallback(dmux)
p.versionMetric.RepoURL = "https://github.com/signalfx/gateway"
p.versionMetric.FileName = "/buildInfo.json"
scheduler.AddCallback(&p.versionMetric)
multiplexer := signalfx.FromChain(dmux, signalfx.NextWrap(signalfx.UnifyNextSinkWrap(&p.debugSink)))
p.listeners, err = setupListeners(p.tk, *loadedConfig.ServerName, loadedConfig, loader, loadedConfig.ListenFrom, multiplexer, logger, scheduler)
if err != nil {
p.logger.Log(log.Err, err, "Unable to setup listeners")
return nil, errors.Annotate(err, "cannot setup listeners from configuration")
}
var errs []error
for _, f := range p.forwarders {
err = f.StartupFinished()
errs = append(errs, err)
log.IfErr(logger, err)
}
return multiplexer, FirstNonNil(errs...)
}
func (p *gateway) run(ctx context.Context) error {
p.debugSink.CtxFlagCheck = &p.debugContext
p.logger.Log(logkey.ConfigFile, p.flags.configFileName, "Looking for config file")
p.logger.Log(logkey.Env, strings.Join(os.Environ(), "-"), "Looking for config file")
loadedConfig, err := config.Load(p.flags.configFileName, p.logger)
if err != nil {
p.logger.Log(log.Err, err, "Unable to load config")
return err
}
p.setup(loadedConfig)
p.versionMetric.Logger = p.logger
logger := p.logger
scheduler := p.setupScheduler(loadedConfig)
if err := p.setupDebugServer(loadedConfig, logger, scheduler); err != nil {
p.logger.Log(log.Err, "debug server failed", err)
return err
}
if err := p.setupInternalMetricsServer(loadedConfig, logger, scheduler); err != nil {
p.logger.Log(log.Err, "internal metrics server failed", err)
return err
}
p.etcdMgr.setup(loadedConfig)
if err := p.etcdMgr.start(); err != nil {
p.logger.Log(log.Err, "unable to start etcd server", err)
return err
}
var bb []byte
if bb, err = json.Marshal(loadedConfig); err == nil {
logger.Log(logkey.Config, string(bb), logkey.Env, strings.Join(os.Environ(), "-"), "config loaded")
}
setupGoMaxProcs(loadedConfig.NumProcs, p.gomaxprocs)
chain := p.createCommonHTTPChain(loadedConfig)
loader := config.NewLoader(ctx, logger, Version, &p.debugContext, &p.debugSink, &p.ctxDims, chain)
multiplexer, err := p.setupForwardersAndListeners(ctx, loader, loadedConfig, logger, scheduler)
if err == nil {
finishedContext, cancelFunc := p.scheduleStatCollection(ctx, scheduler, loadedConfig, multiplexer)
// Schedule datapoint collection to a Discard sink so we can get the stats in Expvar()
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
err := scheduler.Schedule(finishedContext)
logger.Log(log.Err, err, logkey.Struct, "scheduler", "Schedule finished")
wg.Done()
}()
if p.setupDoneSignal != nil {
close(p.setupDoneSignal)
}
logger.Log("Setup done. Blocking!")
select {
case <-ctx.Done():
case <-p.signalChan:
err = p.gracefulShutdown()
}
cancelFunc()
wg.Wait()
}
return err
}
var flagParse = flag.Parse
func main() {
flagParse()
signal.Notify(mainInstance.signalChan, syscall.SIGTERM)
log.IfErr(log.DefaultLogger, mainInstance.main(context.Background()))
}
// FirstNonNil returns what it says it does
func FirstNonNil(errs ...error) error {
for _, err := range errs {
if err != nil {
return err
}
}
return nil
}
| {
if !isStringInSlice(mgr.AdvertisedClientAddress(), mgr.targetCluster) {
mgr.targetCluster = append(mgr.targetCluster, mgr.AdvertisedClientAddress())
}
mgr.client, err = etcd.NewClient(mgr.targetCluster, etcd.SecurityConfig{}, true)
} | conditional_block |
main.go | package main
import (
"flag"
"io"
"io/ioutil"
"os"
"path"
"runtime"
"strconv"
"context"
"expvar"
"fmt"
"net"
"net/http"
"os/signal"
"strings"
"sync"
"syscall"
"time"
"encoding/json"
"github.com/gorilla/mux"
"github.com/quentin-m/etcd-cloud-operator/pkg/etcd"
"github.com/signalfx/gateway/config"
"github.com/signalfx/gateway/dp/dpbuffered"
"github.com/signalfx/gateway/internal-metrics"
"github.com/signalfx/gateway/logkey"
"github.com/signalfx/gateway/protocol"
"github.com/signalfx/gateway/protocol/demultiplexer"
"github.com/signalfx/gateway/protocol/signalfx"
_ "github.com/signalfx/go-metrics"
"github.com/signalfx/golib/datapoint"
"github.com/signalfx/golib/datapoint/dpsink"
"github.com/signalfx/golib/errors"
"github.com/signalfx/golib/eventcounter"
"github.com/signalfx/golib/httpdebug"
"github.com/signalfx/golib/log"
"github.com/signalfx/golib/reportsha"
"github.com/signalfx/golib/sfxclient"
"github.com/signalfx/golib/timekeeper"
"github.com/signalfx/golib/trace"
"github.com/signalfx/golib/web"
_ "github.com/signalfx/ondiskencoding"
_ "github.com/spaolacci/murmur3"
"gopkg.in/natefinch/lumberjack.v2"
_ "net/http/pprof"
)
var (
// Version is set by a build flag to the built version
Version = "0.9.10+"
// BuildDate is set by a build flag to the date of the build
BuildDate = ""
)
func writePidFile(pidFileName string) error {
pid := os.Getpid()
return ioutil.WriteFile(pidFileName, []byte(strconv.FormatInt(int64(pid), 10)), os.FileMode(0644))
}
// getCommaSeparatedStringEnvVar returns the given env var key's value split by comma or the default values
func getCommaSeparatedStringEnvVar(envVar string, def []string) []string {
if val := os.Getenv(envVar); val != "" {
def = def[:0]
for _, addr := range strings.Split(strings.Replace(val, " ", "", -1), ",") {
def = append(def, addr)
}
}
return def
}
// getStringEnvVar returns the given env var key's value or the default value
func getStringEnvVar(envVar string, def string) string {
if val := os.Getenv(envVar); val != "" {
return val
}
return def
}
// getDurationEnvVar returns the given env var key's value or the default value
func getDurationEnvVar(envVar string, def time.Duration) time.Duration {
if strVal := os.Getenv(envVar); strVal != "" {
if dur, err := time.ParseDuration(strVal); err == nil {
return dur
}
}
return def
}
func isStringInSlice(target string, strs []string) bool {
for _, addr := range strs {
if addr == target {
return true
}
}
return false
}
type gatewayFlags struct {
configFileName string
}
type etcdManager struct {
etcd.ServerConfig
logger log.Logger
removeTimeout time.Duration
operation string
targetCluster []string
server *etcd.Server
client *etcd.Client
}
func (mgr *etcdManager) setup(conf *config.GatewayConfig) {
mgr.LPAddress = getStringEnvVar("SFX_LISTEN_ON_PEER_ADDRESS", *conf.ListenOnPeerAddress)
mgr.APAddress = getStringEnvVar("SFX_ADVERTISE_PEER_ADDRESS", *conf.AdvertisePeerAddress)
mgr.LCAddress = getStringEnvVar("SFX_LISTEN_ON_CLIENT_ADDRESS", *conf.ListenOnClientAddress)
mgr.ACAddress = getStringEnvVar("SFX_ADVERTISE_CLIENT_ADDRESS", *conf.AdvertiseClientAddress)
mgr.MAddress = getStringEnvVar("SFX_ETCD_METRICS_ADDRESS", *conf.ETCDMetricsAddress)
mgr.UnhealthyMemberTTL = getDurationEnvVar("SFX_UNHEALTHY_MEMBER_TTL", *conf.UnhealthyMemberTTL)
mgr.removeTimeout = getDurationEnvVar("SFX_REMOVE_MEMBER_TIMEOUT", *conf.RemoveMemberTimeout)
mgr.DataDir = getStringEnvVar("SFX_CLUSTER_DATA_DIR", *conf.ClusterDataDir)
mgr.Name = getStringEnvVar("SFX_SERVER_NAME", *conf.ServerName)
mgr.ServerConfig.Name = mgr.Name
// if already set, then a command line flag was provided and takes precedence
if mgr.operation == "" {
mgr.operation = getStringEnvVar("SFX_CLUSTER_OPERATION", *conf.ClusterOperation)
}
mgr.targetCluster = getCommaSeparatedStringEnvVar("SFX_TARGET_CLUSTER_ADDRESSES", conf.TargetClusterAddresses)
}
func (mgr *etcdManager) start() (err error) {
// use a default server name if one is not provided
if mgr.ServerConfig.Name == "" {
mgr.ServerConfig.Name = fmt.Sprintf("%s", mgr.ServerConfig.ACAddress)
}
mgr.server = etcd.NewServer(mgr.ServerConfig)
switch strings.ToLower(mgr.operation) {
case "": // this is a valid option and means we shouldn't run etcd
return
case "seed":
mgr.logger.Log(fmt.Sprintf("starting etcd server %s to seed cluster", mgr.ServerConfig.Name))
if err = mgr.server.Seed(nil); err == nil {
if !isStringInSlice(mgr.AdvertisedClientAddress(), mgr.targetCluster) {
mgr.targetCluster = append(mgr.targetCluster, mgr.AdvertisedClientAddress())
}
mgr.client, err = etcd.NewClient(mgr.targetCluster, etcd.SecurityConfig{}, true)
}
case "join":
mgr.logger.Log(fmt.Sprintf("joining cluster with etcd server name: %s", mgr.ServerConfig.Name))
if mgr.client, err = etcd.NewClient(mgr.targetCluster, etcd.SecurityConfig{}, true); err == nil {
mgr.logger.Log(fmt.Sprintf("joining etcd cluster @ %s", mgr.client.Endpoints()))
if err = mgr.server.Join(mgr.client); err == nil {
mgr.logger.Log(fmt.Sprintf("successfully joined cluster at %s", mgr.targetCluster))
}
}
default:
err = fmt.Errorf("unsupported cluster-op specified \"%s\"", mgr.operation)
}
return err
}
func (mgr *etcdManager) getMemberID(ctx context.Context) (uint64, error) {
var memberID uint64
// use the client to retrieve this instance's member id
members, err := mgr.client.MemberList(ctx)
if members != nil {
for _, m := range members.Members {
if m.Name == mgr.Name {
memberID = m.ID
}
}
}
return memberID, err
}
func (mgr *etcdManager) removeMember() error {
var err error
var memberID uint64
ctx, cancel := context.WithTimeout(context.Background(), mgr.removeTimeout)
defer cancel()
// only remove yourself from the cluster if the server is running
if mgr.server.IsRunning() {
if memberID, err = mgr.getMemberID(ctx); err == nil {
removed := make(chan error, 1)
go func() {
defer close(removed)
removed <- mgr.client.RemoveMember(mgr.Name, memberID)
}()
select {
case err = <-removed:
cancel()
case <-ctx.Done():
}
if ctx.Err() != nil {
err = ctx.Err()
}
}
}
return err
}
func (mgr *etcdManager) shutdown(graceful bool) (err error) {
if mgr.server.IsRunning() {
// stop the etcd server
mgr.server.Stop(graceful, false) // graceful shutdown true, snapshot false
}
if mgr.client != nil {
// close the client if applicable
err = mgr.client.Close()
}
return err
}
type gateway struct {
flags gatewayFlags
listeners []protocol.Listener
forwarders []protocol.Forwarder
logger log.Logger
setupDoneSignal chan struct{}
tk timekeeper.TimeKeeper
debugServer *httpdebug.Server
debugServerListener net.Listener
internalMetricsServer *internal.Collector
internalMetricsListener net.Listener
stdout io.Writer
gomaxprocs func(int) int
debugContext web.HeaderCtxFlag
debugSink dpsink.ItemFlagger
ctxDims log.CtxDimensions
signalChan chan os.Signal
config *config.GatewayConfig
etcdMgr *etcdManager
versionMetric reportsha.SHA1Reporter
}
var mainInstance = gateway{
tk: timekeeper.RealTime{},
logger: log.DefaultLogger.CreateChild(),
stdout: os.Stdout,
gomaxprocs: runtime.GOMAXPROCS,
debugContext: web.HeaderCtxFlag{
HeaderName: "X-Debug-Id",
},
debugSink: dpsink.ItemFlagger{
EventMetaName: "dbg_events",
MetricDimensionName: "sf_metric",
},
signalChan: make(chan os.Signal, 1),
etcdMgr: &etcdManager{ServerConfig: etcd.ServerConfig{}, logger: log.DefaultLogger.CreateChild()},
}
func init() {
flag.StringVar(&mainInstance.flags.configFileName, "configfile", "sf/gateway.conf", "Name of the db gateway configuration file")
flag.StringVar(&mainInstance.etcdMgr.operation, "cluster-op", "", "operation to perform if running in cluster mode [\"seed\", \"join\", \"\"] this overrides the ClusterOperation set in the config file")
}
func (p *gateway) getLogOutput(loadedConfig *config.GatewayConfig) io.Writer {
logDir := *loadedConfig.LogDir
if logDir == "-" {
p.logger.Log("Sending logging to stdout")
return p.stdout
}
logMaxSize := *loadedConfig.LogMaxSize
logMaxBackups := *loadedConfig.LogMaxBackups
lumberjackLogger := &lumberjack.Logger{
Filename: path.Join(logDir, "gateway.log"),
MaxSize: logMaxSize, // megabytes
MaxBackups: logMaxBackups,
}
p.logger.Log(logkey.Filename, lumberjackLogger.Filename, logkey.Dir, os.TempDir(), "Logging redirect setup")
return lumberjackLogger
}
func (p *gateway) getLogger(loadedConfig *config.GatewayConfig) log.Logger {
out := p.getLogOutput(loadedConfig)
useJSON := *loadedConfig.LogFormat == "json"
if useJSON {
return log.NewJSONLogger(out, log.DefaultErrorHandler)
}
return log.NewLogfmtLogger(out, log.DefaultErrorHandler)
}
func forwarderName(f *config.ForwardTo) string {
if f.Name != nil {
return *f.Name
}
return f.Type
}
var errDupeForwarder = errors.New("cannot duplicate forwarder names or types without names")
func setupForwarders(ctx context.Context, tk timekeeper.TimeKeeper, loader *config.Loader, loadedConfig *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler, Checker *dpsink.ItemFlagger, cdim *log.CtxDimensions, manager *etcdManager) ([]protocol.Forwarder, error) {
allForwarders := make([]protocol.Forwarder, 0, len(loadedConfig.ForwardTo))
nameMap := make(map[string]bool)
for idx, forwardConfig := range loadedConfig.ForwardTo {
logCtx := log.NewContext(logger).With(logkey.Protocol, forwardConfig.Type, logkey.Direction, "forwarder")
forwardConfig.Server = manager.server
forwardConfig.Client = manager.client
forwardConfig.ClusterName = loadedConfig.ClusterName
forwardConfig.AdditionalDimensions = datapoint.AddMaps(loadedConfig.AdditionalDimensions, forwardConfig.AdditionalDimensions)
forwarder, err := loader.Forwarder(forwardConfig)
if err != nil {
return nil, err
}
name := forwarderName(forwardConfig)
if nameMap[name] {
logger.Log(fmt.Sprintf("Cannot add two forwarders with name '%s' or two unnamed forwarders of same type", name))
return nil, errDupeForwarder
}
nameMap[name] = true
logCtx = logCtx.With(logkey.Name, name)
// Buffering -> counting -> (forwarder)
limitedLogger := &log.RateLimitedLogger{
EventCounter: eventcounter.New(tk.Now(), time.Second),
Limit: 16,
Logger: logCtx,
Now: tk.Now,
}
dcount := &dpsink.Counter{
Logger: limitedLogger,
}
count := signalfx.UnifyNextSinkWrap(dcount)
endingSink := signalfx.FromChain(forwarder, signalfx.NextWrap(count))
bconf := &dpbuffered.Config{
Checker: Checker,
BufferSize: forwardConfig.BufferSize,
MaxTotalDatapoints: forwardConfig.BufferSize,
MaxTotalEvents: forwardConfig.BufferSize,
MaxTotalSpans: forwardConfig.BufferSize,
MaxDrainSize: forwardConfig.MaxDrainSize,
NumDrainingThreads: forwardConfig.DrainingThreads,
Name: forwardConfig.Name,
Cdim: cdim,
}
bf := dpbuffered.NewBufferedForwarder(ctx, bconf, endingSink, forwarder.Close, forwarder.StartupFinished, limitedLogger)
allForwarders = append(allForwarders, bf)
groupName := fmt.Sprintf("%s_f_%d", name, idx)
scheduler.AddGroupedCallback(groupName, forwarder)
scheduler.AddGroupedCallback(groupName, bf)
scheduler.AddGroupedCallback(groupName, dcount)
scheduler.GroupedDefaultDimensions(groupName, datapoint.AddMaps(loadedConfig.AdditionalDimensions, map[string]string{
"name": name,
"direction": "forwarder",
"source": "gateway",
"host": *loadedConfig.ServerName,
"type": forwardConfig.Type,
"cluster": *loadedConfig.ClusterName,
}))
}
return allForwarders, nil
}
var errDupeListener = errors.New("cannot duplicate listener names or types without names")
func setupListeners(tk timekeeper.TimeKeeper, hostname string, loadedConfig *config.GatewayConfig, loader *config.Loader, listenFrom []*config.ListenFrom, multiplexer signalfx.Sink, logger log.Logger, scheduler *sfxclient.Scheduler) ([]protocol.Listener, error) {
listeners := make([]protocol.Listener, 0, len(listenFrom))
nameMap := make(map[string]bool)
for idx, listenConfig := range listenFrom {
logCtx := log.NewContext(logger).With(logkey.Protocol, listenConfig.Type, logkey.Direction, "listener")
name := func() string {
if listenConfig.Name != nil {
return *listenConfig.Name
}
return listenConfig.Type
}()
if nameMap[name] {
logger.Log(fmt.Sprintf("Cannot add two listeners with name '%s' or two unnamed listners of same type", name))
return nil, errDupeListener
}
nameMap[name] = true
count := &dpsink.Counter{
Logger: &log.RateLimitedLogger{
EventCounter: eventcounter.New(tk.Now(), time.Second),
Limit: 16,
Logger: logCtx,
Now: tk.Now,
},
}
endingSink := signalfx.FromChain(multiplexer, signalfx.NextWrap(signalfx.UnifyNextSinkWrap(count)))
listener, err := loader.Listener(endingSink, listenConfig)
if err != nil {
logCtx.Log(log.Err, err, "unable to load config")
return nil, err
}
listeners = append(listeners, listener)
groupName := fmt.Sprintf("%s_l_%d", name, idx)
scheduler.AddGroupedCallback(groupName, listener)
scheduler.AddGroupedCallback(groupName, count)
scheduler.GroupedDefaultDimensions(groupName, datapoint.AddMaps(loadedConfig.AdditionalDimensions, map[string]string{
"name": name,
"direction": "listener",
"source": "gateway",
"host": hostname,
"type": listenConfig.Type,
"cluster": *loadedConfig.ClusterName,
}))
}
return listeners, nil
}
func splitSinks(forwarders []protocol.Forwarder) ([]dpsink.DSink, []dpsink.ESink, []trace.Sink) {
dsinks := make([]dpsink.DSink, 0, len(forwarders))
esinks := make([]dpsink.ESink, 0, len(forwarders))
tsinks := make([]trace.Sink, 0, len(forwarders))
for _, f := range forwarders {
dsinks = append(dsinks, f)
esinks = append(esinks, f)
tsinks = append(tsinks, f)
}
return dsinks, esinks, tsinks
}
func (p *gateway) setupInternalMetricsServer(conf *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler) error {
if conf.InternalMetricsListenerAddress == nil {
return nil
}
listener, err := net.Listen("tcp", *conf.InternalMetricsListenerAddress)
if err != nil {
return errors.Annotate(err, "cannot setup internal metrics server")
}
p.internalMetricsListener = listener
collector := internal.NewCollector(logger, scheduler)
handler := mux.NewRouter()
handler.Path("/internal-metrics").HandlerFunc(collector.MetricsHandler)
p.internalMetricsServer = collector
go func() {
err := http.Serve(listener, handler)
logger.Log(log.Err, err, "Finished serving internal metrics server")
}()
return nil
}
func (p *gateway) setupDebugServer(conf *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler) error {
if conf.LocalDebugServer == nil {
return nil
}
listener, err := net.Listen("tcp", *conf.LocalDebugServer)
if err != nil {
return errors.Annotate(err, "cannot setup debug server")
}
p.debugServerListener = listener
p.debugServer = httpdebug.New(&httpdebug.Config{
Logger: log.NewContext(logger).With(logkey.Protocol, "debugserver"),
ExplorableObj: p,
})
p.debugServer.Mux.Handle("/debug/dims", &p.debugSink)
p.debugServer.Exp2.Exported["config"] = conf.Var()
p.debugServer.Exp2.Exported["datapoints"] = scheduler.Var()
p.debugServer.Exp2.Exported["goruntime"] = expvar.Func(func() interface{} {
return runtime.Version()
})
p.debugServer.Exp2.Exported["debugdims"] = p.debugSink.Var()
p.debugServer.Exp2.Exported["gateway_version"] = expvar.Func(func() interface{} {
return Version
})
p.debugServer.Exp2.Exported["build_date"] = expvar.Func(func() interface{} {
return BuildDate
})
p.debugServer.Exp2.Exported["source"] = expvar.Func(func() interface{} {
return fmt.Sprintf("https://github.com/signalfx/gateway/tree/%s", Version)
})
go func() {
err := p.debugServer.Serve(listener)
logger.Log(log.Err, err, "Finished serving debug server")
}()
return nil
}
func | (numProcs *int, gomaxprocs func(int) int) {
if numProcs != nil {
gomaxprocs(*numProcs)
} else {
numProcs := runtime.NumCPU()
gomaxprocs(numProcs)
}
}
func (p *gateway) gracefulShutdown() (err error) {
p.logger.Log("Starting graceful shutdown")
totalWaitTime := p.tk.After(*p.config.MaxGracefulWaitTimeDuration)
errs := make([]error, len(p.listeners)+len(p.forwarders)+1)
// close health checks on all first
for _, l := range p.listeners {
l.CloseHealthCheck()
}
// defer close of listeners and forwarders till we exit
defer func() {
p.logger.Log("close listeners")
for _, l := range p.listeners {
errs = append(errs, l.Close())
}
log.IfErr(p.logger, errors.NewMultiErr(errs))
p.logger.Log("Graceful shutdown done")
}()
p.logger.Log("Waiting for connections to drain")
startingTimeGood := p.tk.Now()
for {
select {
case <-totalWaitTime:
totalPipeline := p.Pipeline()
if totalPipeline > 0 {
p.logger.Log(logkey.TotalPipeline, totalPipeline, "Connections never drained. This could be bad ...")
}
return
case <-p.tk.After(*p.config.GracefulCheckIntervalDuration):
now := p.tk.Now()
totalPipeline := p.Pipeline()
p.logger.Log(logkey.TotalPipeline, totalPipeline, "Waking up for graceful shutdown")
if totalPipeline > 0 {
p.logger.Log(logkey.TotalPipeline, totalPipeline, "Items are still draining")
startingTimeGood = now
continue
}
if now.Sub(startingTimeGood) >= *p.config.SilentGracefulTimeDuration {
p.logger.Log(logkey.TotalPipeline, totalPipeline, "I've been silent. Graceful shutdown done")
return
}
}
}
}
func (p *gateway) Pipeline() int64 {
var totalForwarded int64
for _, f := range p.forwarders {
totalForwarded += f.Pipeline()
}
return totalForwarded
}
func (p *gateway) Close() error {
errs := make([]error, 0, len(p.forwarders)+1)
for _, f := range p.forwarders {
errs = append(errs, f.Close())
}
if p.etcdMgr != nil && p.etcdMgr.server != nil {
errs = append(errs, p.etcdMgr.removeMember())
errs = append(errs, p.etcdMgr.shutdown(true)) // shutdown the etcd server and close the client
}
if p.debugServer != nil {
errs = append(errs, p.debugServerListener.Close())
}
if p.internalMetricsServer != nil {
errs = append(errs, p.internalMetricsListener.Close())
}
return errors.NewMultiErr(errs)
}
func (p *gateway) main(ctx context.Context) error {
// Disable the default logger to make sure nobody else uses it
err := p.run(ctx)
return errors.NewMultiErr([]error{err, p.Close()})
}
func (p *gateway) setup(loadedConfig *config.GatewayConfig) {
if loadedConfig.DebugFlag != nil && *loadedConfig.DebugFlag != "" {
p.debugContext.SetFlagStr(*loadedConfig.DebugFlag)
}
p.config = loadedConfig
p.logger = log.NewContext(p.getLogger(loadedConfig)).With(logkey.Time, log.DefaultTimestamp, logkey.Caller, log.DefaultCaller)
p.debugSink.Logger = p.logger
log.DefaultLogger.Set(p.logger)
pidFilename := *loadedConfig.PidFilename
if err := writePidFile(pidFilename); err != nil {
p.logger.Log(log.Err, err, logkey.Filename, pidFilename, "cannot store pid in pid file")
}
defer func() {
log.IfErr(p.logger, os.Remove(pidFilename))
}()
defer func() {
log.DefaultLogger.Set(log.Discard)
}()
}
func (p *gateway) createCommonHTTPChain(loadedConfig *config.GatewayConfig) web.NextConstructor {
h := web.HeadersInRequest{
Headers: map[string]string{
"X-Gateway-Name": *loadedConfig.ServerName,
},
}
cf := &web.CtxWithFlag{
CtxFlagger: &p.ctxDims,
HeaderName: "X-Response-Id",
}
return web.NextConstructor(func(ctx context.Context, rw http.ResponseWriter, r *http.Request, next web.ContextHandler) {
cf.ServeHTTPC(ctx, rw, r, h.CreateMiddleware(next))
})
}
func (p *gateway) setupScheduler(loadedConfig *config.GatewayConfig) *sfxclient.Scheduler {
scheduler := sfxclient.NewScheduler()
scheduler.AddCallback(sfxclient.GoMetricsSource)
scheduler.DefaultDimensions(datapoint.AddMaps(loadedConfig.AdditionalDimensions, map[string]string{
"source": "gateway",
"host": *loadedConfig.ServerName,
"cluster": *loadedConfig.ClusterName,
}))
return scheduler
}
func (p *gateway) scheduleStatCollection(ctx context.Context, scheduler *sfxclient.Scheduler, loadedConfig *config.GatewayConfig, multiplexer signalfx.Sink) (context.Context, context.CancelFunc) {
// We still want to schedule stat collection so people can debug the server if they want
scheduler.Sink = dpsink.Discard
scheduler.ReportingDelayNs = (time.Second * 30).Nanoseconds()
finishedContext, cancelFunc := context.WithCancel(ctx)
if loadedConfig.StatsDelayDuration != nil && *loadedConfig.StatsDelayDuration != 0 {
scheduler.Sink = multiplexer
scheduler.ReportingDelayNs = loadedConfig.StatsDelayDuration.Nanoseconds()
} else {
p.logger.Log("skipping stat keeping")
}
return finishedContext, cancelFunc
}
func (p *gateway) setupForwardersAndListeners(ctx context.Context, loader *config.Loader, loadedConfig *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler) (signalfx.Sink, error) {
var err error
p.forwarders, err = setupForwarders(ctx, p.tk, loader, loadedConfig, logger, scheduler, &p.debugSink, &p.ctxDims, p.etcdMgr)
if err != nil {
p.logger.Log(log.Err, err, "Unable to setup forwarders")
return nil, errors.Annotate(err, "unable to setup forwarders")
}
dpSinks, eSinks, tSinks := splitSinks(p.forwarders)
dmux := &demultiplexer.Demultiplexer{
DatapointSinks: dpSinks,
EventSinks: eSinks,
TraceSinks: tSinks,
Logger: log.NewOnePerSecond(logger),
LateDuration: loadedConfig.LateThresholdDuration,
FutureDuration: loadedConfig.FutureThresholdDuration,
}
scheduler.AddCallback(dmux)
p.versionMetric.RepoURL = "https://github.com/signalfx/gateway"
p.versionMetric.FileName = "/buildInfo.json"
scheduler.AddCallback(&p.versionMetric)
multiplexer := signalfx.FromChain(dmux, signalfx.NextWrap(signalfx.UnifyNextSinkWrap(&p.debugSink)))
p.listeners, err = setupListeners(p.tk, *loadedConfig.ServerName, loadedConfig, loader, loadedConfig.ListenFrom, multiplexer, logger, scheduler)
if err != nil {
p.logger.Log(log.Err, err, "Unable to setup listeners")
return nil, errors.Annotate(err, "cannot setup listeners from configuration")
}
var errs []error
for _, f := range p.forwarders {
err = f.StartupFinished()
errs = append(errs, err)
log.IfErr(logger, err)
}
return multiplexer, FirstNonNil(errs...)
}
func (p *gateway) run(ctx context.Context) error {
p.debugSink.CtxFlagCheck = &p.debugContext
p.logger.Log(logkey.ConfigFile, p.flags.configFileName, "Looking for config file")
p.logger.Log(logkey.Env, strings.Join(os.Environ(), "-"), "Looking for config file")
loadedConfig, err := config.Load(p.flags.configFileName, p.logger)
if err != nil {
p.logger.Log(log.Err, err, "Unable to load config")
return err
}
p.setup(loadedConfig)
p.versionMetric.Logger = p.logger
logger := p.logger
scheduler := p.setupScheduler(loadedConfig)
if err := p.setupDebugServer(loadedConfig, logger, scheduler); err != nil {
p.logger.Log(log.Err, "debug server failed", err)
return err
}
if err := p.setupInternalMetricsServer(loadedConfig, logger, scheduler); err != nil {
p.logger.Log(log.Err, "internal metrics server failed", err)
return err
}
p.etcdMgr.setup(loadedConfig)
if err := p.etcdMgr.start(); err != nil {
p.logger.Log(log.Err, "unable to start etcd server", err)
return err
}
var bb []byte
if bb, err = json.Marshal(loadedConfig); err == nil {
logger.Log(logkey.Config, string(bb), logkey.Env, strings.Join(os.Environ(), "-"), "config loaded")
}
setupGoMaxProcs(loadedConfig.NumProcs, p.gomaxprocs)
chain := p.createCommonHTTPChain(loadedConfig)
loader := config.NewLoader(ctx, logger, Version, &p.debugContext, &p.debugSink, &p.ctxDims, chain)
multiplexer, err := p.setupForwardersAndListeners(ctx, loader, loadedConfig, logger, scheduler)
if err == nil {
finishedContext, cancelFunc := p.scheduleStatCollection(ctx, scheduler, loadedConfig, multiplexer)
// Schedule datapoint collection to a Discard sink so we can get the stats in Expvar()
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
err := scheduler.Schedule(finishedContext)
logger.Log(log.Err, err, logkey.Struct, "scheduler", "Schedule finished")
wg.Done()
}()
if p.setupDoneSignal != nil {
close(p.setupDoneSignal)
}
logger.Log("Setup done. Blocking!")
select {
case <-ctx.Done():
case <-p.signalChan:
err = p.gracefulShutdown()
}
cancelFunc()
wg.Wait()
}
return err
}
var flagParse = flag.Parse
func main() {
flagParse()
signal.Notify(mainInstance.signalChan, syscall.SIGTERM)
log.IfErr(log.DefaultLogger, mainInstance.main(context.Background()))
}
// FirstNonNil returns what it says it does
func FirstNonNil(errs ...error) error {
for _, err := range errs {
if err != nil {
return err
}
}
return nil
}
| setupGoMaxProcs | identifier_name |
ntd_utils.py |
# metaDatasetGenerator imports
from core.config import cfg, cfgData, createFilenameID, createPathRepeat, createPathSetID
from datasets.imdb import imdb
# 'other' imports
import pickle
import numpy as np
import numpy.random as npr
import os.path as osp
import matplotlib
matplotlib.use("Agg")
from core.config import cfg, cfg_from_file, cfg_from_list, get_output_dir, loadDatasetIndexDict,iconicImagesFileFormat
from datasets.factory import get_repo_imdb
from datasets.ds_utils import load_mixture_set,print_each_size,computeTotalAnnosFromAnnoCount,cropImageToAnnoRegion,roidbSampleHOG,roidbSampleImage,roidbSampleImageHOG
import os.path as osp
import datasets.imdb
import argparse
import pprint
import numpy as np
import matplotlib.pyplot as plt
import sys,os,cv2,pickle,uuid
# pytorch imports
from datasets.pytorch_roidb_loader import RoidbDataset
from numpy import transpose as npt
from ntd.hog_svm import plot_confusion_matrix, extract_pyroidb_features,appendHOGtoRoidb,split_data, scale_data,train_SVM,findMaxRegions, make_confusion_matrix,appendHOGtoRoidbDict,split_tr_te_data
from utils.misc import *
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train(True) # Set model to training mode
else:
model.train(False) # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for data in dataloaders[phase]:
# get the inputs
inputs, labels = data
# wrap them in Variable
if use_gpu:
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.data[0] * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def roidbToFeatures(roidb,pyloader=roidbSampleHOG,calcHog=False,roidbSizes=None):
pyroidb = RoidbDataset(roidb,[0,1,2,3,4,5,6,7],
loader=pyloader,
transform=None)
if roidbSizes is not None:
pyroidb.roidbSizes = np.arange(len(roidb)) + 1
l_feat,l_idx,y = extract_pyroidb_features(pyroidb, 'hog', cfg.clsToSet, calc_feat = calcHog, \
spatial_size=(32, 32),hist_bins=32, \
orient=9, pix_per_cell=8, cell_per_block=2, \
hog_channel=0)
return l_feat,l_idx,y
def mangleTestingData(l_feat_te,l_idx_te,y_te,X_test,y_test,X_idx):
"""
Goal: to replace the indicies with setIDs associated with the datasets in the
"test" section of the mixed dataset from the "train" to the "test" features
testIndex: the index from the
yIndicies: a python dictionary; {"setID": list of indicies associated with the set; the indicies are the location of a sample from the set in the original testing set X_test}
-> an element in the list gives index of the next "setID" in the current testing data
->
l_feat_te: a list of hog features.
-> axis=0 is datasets
-> axis=1 is hog features for a specific dataset
-> lengths across axis=1 varies
y_te: a list of setIDs from the "testing" section of the mixed dataset
l_idx_te: locations of the sample in the original roidb
-> axis=0 is datasets
-> axis=1 is the sample location
idx: what use the "idx" from across the y_te?
**error case**: if the # of training examples loaded in y_test > available # of testing
-> shouldn't happend since the test/train split comes originally from a training set (at least) x2 the testing size
"""
print(len(y_te))
print(len(l_idx_te))
print(len(l_feat_te))
for i in range(8):
print(len(l_idx_te[i]))
print(len(l_feat_te[i]))
# replace the X_test for each match of y_test
yIndicies = {}
dsIndicies = [ 0 for _ in range(len(l_idx_te)) ]
for setID in y_te:
if setID not in yIndicies.keys():
yIndicies[setID] = list(np.where(y_test == setID)[0]) # find where the setID's are
print("{}: {}".format(setID,len(yIndicies[setID])))
if len(yIndicies[setID]) == 0: continue
dsIdx = dsIndicies[setID] # index for l_feat_te
testIndex = yIndicies[setID][0] # index for x_test
X_test[testIndex] = l_feat_te[setID][dsIdx] # replace sample content
X_idx[testIndex] = {"idx":int(l_idx_te[setID][dsIdx]),"split":"test"} # replace the lookup
dsIndicies[setID] += 1 # incriment
yIndicies[setID].remove(testIndex) # "incriment" index by removing element
print(dsIndicies)
def roidbToSVMData(roidbTr,roidbTe,train_size,test_size,loaderSettings):
ds_feat_tr,l_idx_tr,y_tr = roidbToFeatures(roidbTr,pyloader=loaderSettings['pyloader'],
calcHog=loaderSettings['calcHog'],
roidbSizes=loaderSettings['roidbSizes'])
"""
X_train, X_test, y_train, y_test, X_idx = split_data(train_size, test_size, \
l_feat_tr,l_idx_tr, y_tr,\
loaderSettings['dsHasTest'])
"""
ds_feat_te,l_idx_te,y_te = roidbToFeatures(roidbTe,pyloader=loaderSettings['pyloader'],
calcHog=loaderSettings['calcHog'],
roidbSizes=loaderSettings["roidbSizes"])
X_train, X_test, y_train, y_test, testing_idx = split_tr_te_data(ds_feat_tr,l_idx_tr,y_tr,
ds_feat_te,l_idx_te,y_te,
train_size, test_size,
loaderSettings['dsHasTest'])
print("-=-=- training dataset counts -=-=-")
for idx,feat in enumerate(ds_feat_tr):
print("{}: {}, {}".format(cfg.DATASET_NAMES_ORDERED[idx],len(feat),np.sum(y_train==idx)))
print("-=-=- testing dataset counts -=-=-")
for idx,feat in enumerate(ds_feat_te):
print("{}: {}, {}".format(cfg.DATASET_NAMES_ORDERED[idx],len(feat),np.sum(y_test==idx)))
# this is a work-around for the loading of a "testing" mixed dataset... overwrites the original split from the training data
#mangleTestingData(l_feat_te,l_idx_te,y_te,X_test,y_test,testing_idx)
X_train, X_test = scale_data(X_train, X_test)
print(X_train.shape)
print(y_train.shape)
if X_train.shape[0] != y_train.shape[0]:
raise ValueError("number of examples for x and y are different")
return X_train, X_test, y_train, y_test, testing_idx
def prepareMixedDataset(setID,repeat,size,addHOG=True):
mixedData = load_mixture_set(setID,repeat,size)
roidbTrDict,annoCountTr,roidbTrDict1k = mixedData["train"][0],mixedData["train"][1],mixedData["train"][2]
roidbTeDict,annoCountTe,roidbTeDict1k = mixedData["test"][0],mixedData["test"][1],mixedData['test'][2]
printRoidbDictImageNamesToTextFile(roidbTrDict,"train_{}".format(setID))
printRoidbDictImageNamesToTextFile(roidbTeDict,"test_{}".format(setID))
# does the dataset have a "testing" split?
dsHasTest = [ (i is not None) and (j is not None) for i,j in zip(annoCountTr[size],
annoCountTe[size]) ]
# cropped hog image input
if addHOG:
appendHOGtoRoidbDict(roidbTrDict,size)
appendHOGtoRoidbDict(roidbTeDict,size)
appendHOGtoRoidbDict(roidbTrDict1k,1000)
appendHOGtoRoidbDict(roidbTeDict1k,1000)
print("annoCountTr: {}".format(annoCountTr[size]))
print("annoCountTe: {}".format(annoCountTe[size]))
# print_report(roidbTr,annoCountTr,roidbTe,annoCountTe,setID,repeat,size)
annoSizes = {}
annoSizes['train'] = annoCountTr
annoSizes['test'] = annoCountTe
print("-="*50)
return roidbTrDict,roidbTeDict,roidbTrDict1k,roidbTeDict1k,dsHasTest,annoSizes
def loadSvmModel(modelParams,dataType,setID,repeat,size,X_train,y_train):
modelFn = modelParams['modelFn']
if modelFn is not None:
model = pickle.load(open(modelFn,"rb"))
else:
model = train_SVM(X_train,y_train)
fn = iconicImagesFileFormat().format("model{}_svm_{}_{}_{}.pkl".format(dataType,setID,repeat,size))
pickle.dump(model,open(fn,"wb"))
print(" saved model to {}".format(fn))
print("\n\n-=- model loaded -=-\n\n")
return model
def loadDlModel(modelParams,dataType,setID,repeat,size,X_train,y_train):
pass
def genConfCropped(modelParams,roidbTr,roidbTe,ntdGameInfo):
loaderSettings = {}
loaderSettings['pyloader'] = roidbSampleHOG
loaderSettings['calcHog'] = False
loaderSettings['roidbSizes'] = None
loaderSettings['dsHasTest'] = ntdGameInfo['dsHasTest'] # todo: kind of gross here
return genConf(modelParams,"Cropped",roidbTr,roidbTe,loaderSettings,ntdGameInfo)
def genConfRaw(modelParams,roidbTr,roidbTe,ntdGameInfo):
loaderSettings = {}
loaderSettings['pyloader'] = roidbSampleImageHOG
loaderSettings['calcHog'] = False
loaderSettings['roidbSizes'] = np.arange(len(roidbTr)) + 1
loaderSettings['dsHasTest'] = ntdGameInfo['dsHasTest'] # todo: kind of gross here
return genConf(modelParams,"Raw",roidbTr,roidbTe,loaderSettings,ntdGameInfo)
def genConfSVM(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo):
|
def genConfDl(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo):
X_train, X_test, y_train, y_test, X_idx = roidbToDlData(roidbTr,roidbTe,
ntdGameInfo['trainSize'],
ntdGameInfo['testSize'],
loaderSettings)
model = loadDlModel(modelParams,dataType,ntdGameInfo['setID'],ntdGameInfo['repeat'],
ntdGameInfo['size'],X_train,y_train)
print("accuracy on test data {}".format(model.score(X_test,y_test)))
return make_confusion_matrix(model, X_test, y_test, cfg.clsToSet),model
def genConf(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo):
modelType = modelParams['modelType']
if modelType == "svm":
return genConfSVM(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo)
elif modelType == "dl":
return genConfDl(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo)
else:
print("Uknown model type of {}".format(modelType))
return None
def saveNtdSummaryStats(cmRaw_l,cmCropped_l,cmDiff_l):
import scipy.stats as ss
cmRaw_l = np.array(cmRaw_l)
cmCropped_l = np.array(cmCropped_l)
cmDiff_l = np.array(cmDiff_l)
cmRaw_mean = np.mean(cmRaw_l,axis=0)
cmCropped_mean = np.mean(cmCropped_l,axis=0)
cmDiff_mean = np.mean(cmDiff_l,axis=0)
cmRaw_std = np.std(cmRaw_l,axis=0)
cmCropped_std = np.std(cmCropped_l,axis=0)
cmDiff_std = np.std(cmDiff_l,axis=0)
paired_tTest_num = cmRaw_mean - cmCropped_mean
paired_tTest_denom = np.sqrt( (cmRaw_std**2 + cmCropped_std**2) / len(cmRaw_l) )
# we know it's two tailed, but computing as one is more efficient
t_values = np.abs(paired_tTest_num) / paired_tTest_denom
print(t_values)
p_values = ss.t.sf(t_values,len(cmRaw_l)-1)
def saveMat(fn,mat):
fid = open(iconicImagesFileFormat().format(fn),"wb")
pickle.dump(mat,fid)
fid.close()
saveId_l = ["rawMean","rawStd","croppedMean","croppedStd","diffMean","diffStd","pValues"]
plotTitle_l = ["Raw Images","Raw Std", "Cropped Images", "Cropped Std","Raw - Cropped","Raw - Cropped (Std)", "P-Values"]
confMatStat = [cmRaw_mean,cmRaw_std,cmCropped_mean,cmCropped_std,cmDiff_mean,cmDiff_std,p_values]
for saveId,plotTitle,matStat in zip(saveId_l,plotTitle_l,confMatStat):
appendStr = "{}_{}".format(saveId,cfg.uuid)
pklFn = "ntd_stats_{}.pkl".format(appendStr)
saveMat(pklFn,matStat)
pathToPlot = osp.join(cfg.PATH_TO_NTD_OUTPUT, 'ntd_stats_{}.png'.format(appendStr))
plot_confusion_matrix(np.copy(matStat), cfg.clsToSet,
pathToPlot, title=plotTitle,
cmap = plt.cm.bwr_r,vmin=-100,vmax=100)
print(p_values)
| X_train, X_test, y_train, y_test, X_idx = roidbToSVMData(roidbTr,roidbTe,
ntdGameInfo['trainSize'],
ntdGameInfo['testSize'],
loaderSettings)
model = loadSvmModel(modelParams,dataType,ntdGameInfo['setID'],ntdGameInfo['repeat'],
ntdGameInfo['size'],X_train,y_train)
print(X_test.shape)
print(y_test.shape)
print("accuracy on test data {}".format(model.score(X_test,y_test)))
print(make_confusion_matrix(model, X_train, y_train, cfg.clsToSet))
print("-"*50)
return make_confusion_matrix(model, X_test, y_test, cfg.clsToSet),model | identifier_body |
ntd_utils.py |
# metaDatasetGenerator imports
from core.config import cfg, cfgData, createFilenameID, createPathRepeat, createPathSetID
from datasets.imdb import imdb
# 'other' imports
import pickle
import numpy as np
import numpy.random as npr
import os.path as osp
import matplotlib
matplotlib.use("Agg")
from core.config import cfg, cfg_from_file, cfg_from_list, get_output_dir, loadDatasetIndexDict,iconicImagesFileFormat
from datasets.factory import get_repo_imdb
from datasets.ds_utils import load_mixture_set,print_each_size,computeTotalAnnosFromAnnoCount,cropImageToAnnoRegion,roidbSampleHOG,roidbSampleImage,roidbSampleImageHOG
import os.path as osp
import datasets.imdb
import argparse
import pprint
import numpy as np
import matplotlib.pyplot as plt
import sys,os,cv2,pickle,uuid
# pytorch imports
from datasets.pytorch_roidb_loader import RoidbDataset
from numpy import transpose as npt
from ntd.hog_svm import plot_confusion_matrix, extract_pyroidb_features,appendHOGtoRoidb,split_data, scale_data,train_SVM,findMaxRegions, make_confusion_matrix,appendHOGtoRoidbDict,split_tr_te_data
from utils.misc import *
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train(True) # Set model to training mode
else:
model.train(False) # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for data in dataloaders[phase]:
# get the inputs
inputs, labels = data
# wrap them in Variable
if use_gpu:
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.data[0] * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def roidbToFeatures(roidb,pyloader=roidbSampleHOG,calcHog=False,roidbSizes=None):
pyroidb = RoidbDataset(roidb,[0,1,2,3,4,5,6,7],
loader=pyloader,
transform=None)
if roidbSizes is not None:
pyroidb.roidbSizes = np.arange(len(roidb)) + 1
l_feat,l_idx,y = extract_pyroidb_features(pyroidb, 'hog', cfg.clsToSet, calc_feat = calcHog, \
spatial_size=(32, 32),hist_bins=32, \
orient=9, pix_per_cell=8, cell_per_block=2, \
hog_channel=0)
return l_feat,l_idx,y
def mangleTestingData(l_feat_te,l_idx_te,y_te,X_test,y_test,X_idx):
"""
Goal: to replace the indicies with setIDs associated with the datasets in the
"test" section of the mixed dataset from the "train" to the "test" features
testIndex: the index from the
yIndicies: a python dictionary; {"setID": list of indicies associated with the set; the indicies are the location of a sample from the set in the original testing set X_test}
-> an element in the list gives index of the next "setID" in the current testing data
->
l_feat_te: a list of hog features.
-> axis=0 is datasets
-> axis=1 is hog features for a specific dataset
-> lengths across axis=1 varies
y_te: a list of setIDs from the "testing" section of the mixed dataset
l_idx_te: locations of the sample in the original roidb
-> axis=0 is datasets
-> axis=1 is the sample location
idx: what use the "idx" from across the y_te?
**error case**: if the # of training examples loaded in y_test > available # of testing
-> shouldn't happend since the test/train split comes originally from a training set (at least) x2 the testing size
"""
print(len(y_te))
print(len(l_idx_te))
print(len(l_feat_te))
for i in range(8):
print(len(l_idx_te[i]))
print(len(l_feat_te[i]))
# replace the X_test for each match of y_test
yIndicies = {}
dsIndicies = [ 0 for _ in range(len(l_idx_te)) ]
for setID in y_te:
if setID not in yIndicies.keys():
yIndicies[setID] = list(np.where(y_test == setID)[0]) # find where the setID's are
print("{}: {}".format(setID,len(yIndicies[setID])))
if len(yIndicies[setID]) == 0: continue
dsIdx = dsIndicies[setID] # index for l_feat_te
testIndex = yIndicies[setID][0] # index for x_test
X_test[testIndex] = l_feat_te[setID][dsIdx] # replace sample content
X_idx[testIndex] = {"idx":int(l_idx_te[setID][dsIdx]),"split":"test"} # replace the lookup
dsIndicies[setID] += 1 # incriment
yIndicies[setID].remove(testIndex) # "incriment" index by removing element
print(dsIndicies)
def roidbToSVMData(roidbTr,roidbTe,train_size,test_size,loaderSettings):
ds_feat_tr,l_idx_tr,y_tr = roidbToFeatures(roidbTr,pyloader=loaderSettings['pyloader'],
calcHog=loaderSettings['calcHog'],
roidbSizes=loaderSettings['roidbSizes'])
"""
X_train, X_test, y_train, y_test, X_idx = split_data(train_size, test_size, \
l_feat_tr,l_idx_tr, y_tr,\
loaderSettings['dsHasTest'])
"""
ds_feat_te,l_idx_te,y_te = roidbToFeatures(roidbTe,pyloader=loaderSettings['pyloader'],
calcHog=loaderSettings['calcHog'],
roidbSizes=loaderSettings["roidbSizes"])
X_train, X_test, y_train, y_test, testing_idx = split_tr_te_data(ds_feat_tr,l_idx_tr,y_tr,
ds_feat_te,l_idx_te,y_te,
train_size, test_size,
loaderSettings['dsHasTest'])
print("-=-=- training dataset counts -=-=-")
for idx,feat in enumerate(ds_feat_tr):
print("{}: {}, {}".format(cfg.DATASET_NAMES_ORDERED[idx],len(feat),np.sum(y_train==idx)))
print("-=-=- testing dataset counts -=-=-")
for idx,feat in enumerate(ds_feat_te):
print("{}: {}, {}".format(cfg.DATASET_NAMES_ORDERED[idx],len(feat),np.sum(y_test==idx)))
# this is a work-around for the loading of a "testing" mixed dataset... overwrites the original split from the training data
#mangleTestingData(l_feat_te,l_idx_te,y_te,X_test,y_test,testing_idx)
X_train, X_test = scale_data(X_train, X_test)
print(X_train.shape)
print(y_train.shape)
if X_train.shape[0] != y_train.shape[0]:
raise ValueError("number of examples for x and y are different")
return X_train, X_test, y_train, y_test, testing_idx
def prepareMixedDataset(setID,repeat,size,addHOG=True):
mixedData = load_mixture_set(setID,repeat,size)
roidbTrDict,annoCountTr,roidbTrDict1k = mixedData["train"][0],mixedData["train"][1],mixedData["train"][2]
roidbTeDict,annoCountTe,roidbTeDict1k = mixedData["test"][0],mixedData["test"][1],mixedData['test'][2]
printRoidbDictImageNamesToTextFile(roidbTrDict,"train_{}".format(setID))
printRoidbDictImageNamesToTextFile(roidbTeDict,"test_{}".format(setID))
# does the dataset have a "testing" split?
dsHasTest = [ (i is not None) and (j is not None) for i,j in zip(annoCountTr[size],
annoCountTe[size]) ]
# cropped hog image input
if addHOG:
appendHOGtoRoidbDict(roidbTrDict,size)
appendHOGtoRoidbDict(roidbTeDict,size)
appendHOGtoRoidbDict(roidbTrDict1k,1000)
appendHOGtoRoidbDict(roidbTeDict1k,1000)
print("annoCountTr: {}".format(annoCountTr[size]))
print("annoCountTe: {}".format(annoCountTe[size]))
# print_report(roidbTr,annoCountTr,roidbTe,annoCountTe,setID,repeat,size)
annoSizes = {}
annoSizes['train'] = annoCountTr
annoSizes['test'] = annoCountTe
print("-="*50)
return roidbTrDict,roidbTeDict,roidbTrDict1k,roidbTeDict1k,dsHasTest,annoSizes
def loadSvmModel(modelParams,dataType,setID,repeat,size,X_train,y_train):
modelFn = modelParams['modelFn']
if modelFn is not None:
model = pickle.load(open(modelFn,"rb"))
else:
model = train_SVM(X_train,y_train)
fn = iconicImagesFileFormat().format("model{}_svm_{}_{}_{}.pkl".format(dataType,setID,repeat,size))
pickle.dump(model,open(fn,"wb"))
print(" saved model to {}".format(fn))
print("\n\n-=- model loaded -=-\n\n")
return model
def loadDlModel(modelParams,dataType,setID,repeat,size,X_train,y_train):
pass
def genConfCropped(modelParams,roidbTr,roidbTe,ntdGameInfo):
loaderSettings = {}
loaderSettings['pyloader'] = roidbSampleHOG
loaderSettings['calcHog'] = False
loaderSettings['roidbSizes'] = None
loaderSettings['dsHasTest'] = ntdGameInfo['dsHasTest'] # todo: kind of gross here
return genConf(modelParams,"Cropped",roidbTr,roidbTe,loaderSettings,ntdGameInfo)
def genConfRaw(modelParams,roidbTr,roidbTe,ntdGameInfo):
loaderSettings = {}
loaderSettings['pyloader'] = roidbSampleImageHOG
loaderSettings['calcHog'] = False
loaderSettings['roidbSizes'] = np.arange(len(roidbTr)) + 1
loaderSettings['dsHasTest'] = ntdGameInfo['dsHasTest'] # todo: kind of gross here
return genConf(modelParams,"Raw",roidbTr,roidbTe,loaderSettings,ntdGameInfo)
def genConfSVM(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo):
X_train, X_test, y_train, y_test, X_idx = roidbToSVMData(roidbTr,roidbTe,
ntdGameInfo['trainSize'],
ntdGameInfo['testSize'],
loaderSettings)
model = loadSvmModel(modelParams,dataType,ntdGameInfo['setID'],ntdGameInfo['repeat'],
ntdGameInfo['size'],X_train,y_train)
print(X_test.shape)
print(y_test.shape)
print("accuracy on test data {}".format(model.score(X_test,y_test)))
print(make_confusion_matrix(model, X_train, y_train, cfg.clsToSet))
print("-"*50)
return make_confusion_matrix(model, X_test, y_test, cfg.clsToSet),model
def | (modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo):
X_train, X_test, y_train, y_test, X_idx = roidbToDlData(roidbTr,roidbTe,
ntdGameInfo['trainSize'],
ntdGameInfo['testSize'],
loaderSettings)
model = loadDlModel(modelParams,dataType,ntdGameInfo['setID'],ntdGameInfo['repeat'],
ntdGameInfo['size'],X_train,y_train)
print("accuracy on test data {}".format(model.score(X_test,y_test)))
return make_confusion_matrix(model, X_test, y_test, cfg.clsToSet),model
def genConf(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo):
modelType = modelParams['modelType']
if modelType == "svm":
return genConfSVM(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo)
elif modelType == "dl":
return genConfDl(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo)
else:
print("Uknown model type of {}".format(modelType))
return None
def saveNtdSummaryStats(cmRaw_l,cmCropped_l,cmDiff_l):
import scipy.stats as ss
cmRaw_l = np.array(cmRaw_l)
cmCropped_l = np.array(cmCropped_l)
cmDiff_l = np.array(cmDiff_l)
cmRaw_mean = np.mean(cmRaw_l,axis=0)
cmCropped_mean = np.mean(cmCropped_l,axis=0)
cmDiff_mean = np.mean(cmDiff_l,axis=0)
cmRaw_std = np.std(cmRaw_l,axis=0)
cmCropped_std = np.std(cmCropped_l,axis=0)
cmDiff_std = np.std(cmDiff_l,axis=0)
paired_tTest_num = cmRaw_mean - cmCropped_mean
paired_tTest_denom = np.sqrt( (cmRaw_std**2 + cmCropped_std**2) / len(cmRaw_l) )
# we know it's two tailed, but computing as one is more efficient
t_values = np.abs(paired_tTest_num) / paired_tTest_denom
print(t_values)
p_values = ss.t.sf(t_values,len(cmRaw_l)-1)
def saveMat(fn,mat):
fid = open(iconicImagesFileFormat().format(fn),"wb")
pickle.dump(mat,fid)
fid.close()
saveId_l = ["rawMean","rawStd","croppedMean","croppedStd","diffMean","diffStd","pValues"]
plotTitle_l = ["Raw Images","Raw Std", "Cropped Images", "Cropped Std","Raw - Cropped","Raw - Cropped (Std)", "P-Values"]
confMatStat = [cmRaw_mean,cmRaw_std,cmCropped_mean,cmCropped_std,cmDiff_mean,cmDiff_std,p_values]
for saveId,plotTitle,matStat in zip(saveId_l,plotTitle_l,confMatStat):
appendStr = "{}_{}".format(saveId,cfg.uuid)
pklFn = "ntd_stats_{}.pkl".format(appendStr)
saveMat(pklFn,matStat)
pathToPlot = osp.join(cfg.PATH_TO_NTD_OUTPUT, 'ntd_stats_{}.png'.format(appendStr))
plot_confusion_matrix(np.copy(matStat), cfg.clsToSet,
pathToPlot, title=plotTitle,
cmap = plt.cm.bwr_r,vmin=-100,vmax=100)
print(p_values)
| genConfDl | identifier_name |
ntd_utils.py |
# metaDatasetGenerator imports
from core.config import cfg, cfgData, createFilenameID, createPathRepeat, createPathSetID
from datasets.imdb import imdb
# 'other' imports
import pickle
import numpy as np
import numpy.random as npr
import os.path as osp
import matplotlib
matplotlib.use("Agg")
from core.config import cfg, cfg_from_file, cfg_from_list, get_output_dir, loadDatasetIndexDict,iconicImagesFileFormat
from datasets.factory import get_repo_imdb
from datasets.ds_utils import load_mixture_set,print_each_size,computeTotalAnnosFromAnnoCount,cropImageToAnnoRegion,roidbSampleHOG,roidbSampleImage,roidbSampleImageHOG
import os.path as osp
import datasets.imdb
import argparse
import pprint
import numpy as np
import matplotlib.pyplot as plt
import sys,os,cv2,pickle,uuid
# pytorch imports
from datasets.pytorch_roidb_loader import RoidbDataset
from numpy import transpose as npt
from ntd.hog_svm import plot_confusion_matrix, extract_pyroidb_features,appendHOGtoRoidb,split_data, scale_data,train_SVM,findMaxRegions, make_confusion_matrix,appendHOGtoRoidbDict,split_tr_te_data
from utils.misc import *
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
|
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def roidbToFeatures(roidb,pyloader=roidbSampleHOG,calcHog=False,roidbSizes=None):
pyroidb = RoidbDataset(roidb,[0,1,2,3,4,5,6,7],
loader=pyloader,
transform=None)
if roidbSizes is not None:
pyroidb.roidbSizes = np.arange(len(roidb)) + 1
l_feat,l_idx,y = extract_pyroidb_features(pyroidb, 'hog', cfg.clsToSet, calc_feat = calcHog, \
spatial_size=(32, 32),hist_bins=32, \
orient=9, pix_per_cell=8, cell_per_block=2, \
hog_channel=0)
return l_feat,l_idx,y
def mangleTestingData(l_feat_te,l_idx_te,y_te,X_test,y_test,X_idx):
"""
Goal: to replace the indicies with setIDs associated with the datasets in the
"test" section of the mixed dataset from the "train" to the "test" features
testIndex: the index from the
yIndicies: a python dictionary; {"setID": list of indicies associated with the set; the indicies are the location of a sample from the set in the original testing set X_test}
-> an element in the list gives index of the next "setID" in the current testing data
->
l_feat_te: a list of hog features.
-> axis=0 is datasets
-> axis=1 is hog features for a specific dataset
-> lengths across axis=1 varies
y_te: a list of setIDs from the "testing" section of the mixed dataset
l_idx_te: locations of the sample in the original roidb
-> axis=0 is datasets
-> axis=1 is the sample location
idx: what use the "idx" from across the y_te?
**error case**: if the # of training examples loaded in y_test > available # of testing
-> shouldn't happend since the test/train split comes originally from a training set (at least) x2 the testing size
"""
print(len(y_te))
print(len(l_idx_te))
print(len(l_feat_te))
for i in range(8):
print(len(l_idx_te[i]))
print(len(l_feat_te[i]))
# replace the X_test for each match of y_test
yIndicies = {}
dsIndicies = [ 0 for _ in range(len(l_idx_te)) ]
for setID in y_te:
if setID not in yIndicies.keys():
yIndicies[setID] = list(np.where(y_test == setID)[0]) # find where the setID's are
print("{}: {}".format(setID,len(yIndicies[setID])))
if len(yIndicies[setID]) == 0: continue
dsIdx = dsIndicies[setID] # index for l_feat_te
testIndex = yIndicies[setID][0] # index for x_test
X_test[testIndex] = l_feat_te[setID][dsIdx] # replace sample content
X_idx[testIndex] = {"idx":int(l_idx_te[setID][dsIdx]),"split":"test"} # replace the lookup
dsIndicies[setID] += 1 # incriment
yIndicies[setID].remove(testIndex) # "incriment" index by removing element
print(dsIndicies)
def roidbToSVMData(roidbTr,roidbTe,train_size,test_size,loaderSettings):
ds_feat_tr,l_idx_tr,y_tr = roidbToFeatures(roidbTr,pyloader=loaderSettings['pyloader'],
calcHog=loaderSettings['calcHog'],
roidbSizes=loaderSettings['roidbSizes'])
"""
X_train, X_test, y_train, y_test, X_idx = split_data(train_size, test_size, \
l_feat_tr,l_idx_tr, y_tr,\
loaderSettings['dsHasTest'])
"""
ds_feat_te,l_idx_te,y_te = roidbToFeatures(roidbTe,pyloader=loaderSettings['pyloader'],
calcHog=loaderSettings['calcHog'],
roidbSizes=loaderSettings["roidbSizes"])
X_train, X_test, y_train, y_test, testing_idx = split_tr_te_data(ds_feat_tr,l_idx_tr,y_tr,
ds_feat_te,l_idx_te,y_te,
train_size, test_size,
loaderSettings['dsHasTest'])
print("-=-=- training dataset counts -=-=-")
for idx,feat in enumerate(ds_feat_tr):
print("{}: {}, {}".format(cfg.DATASET_NAMES_ORDERED[idx],len(feat),np.sum(y_train==idx)))
print("-=-=- testing dataset counts -=-=-")
for idx,feat in enumerate(ds_feat_te):
print("{}: {}, {}".format(cfg.DATASET_NAMES_ORDERED[idx],len(feat),np.sum(y_test==idx)))
# this is a work-around for the loading of a "testing" mixed dataset... overwrites the original split from the training data
#mangleTestingData(l_feat_te,l_idx_te,y_te,X_test,y_test,testing_idx)
X_train, X_test = scale_data(X_train, X_test)
print(X_train.shape)
print(y_train.shape)
if X_train.shape[0] != y_train.shape[0]:
raise ValueError("number of examples for x and y are different")
return X_train, X_test, y_train, y_test, testing_idx
def prepareMixedDataset(setID,repeat,size,addHOG=True):
mixedData = load_mixture_set(setID,repeat,size)
roidbTrDict,annoCountTr,roidbTrDict1k = mixedData["train"][0],mixedData["train"][1],mixedData["train"][2]
roidbTeDict,annoCountTe,roidbTeDict1k = mixedData["test"][0],mixedData["test"][1],mixedData['test'][2]
printRoidbDictImageNamesToTextFile(roidbTrDict,"train_{}".format(setID))
printRoidbDictImageNamesToTextFile(roidbTeDict,"test_{}".format(setID))
# does the dataset have a "testing" split?
dsHasTest = [ (i is not None) and (j is not None) for i,j in zip(annoCountTr[size],
annoCountTe[size]) ]
# cropped hog image input
if addHOG:
appendHOGtoRoidbDict(roidbTrDict,size)
appendHOGtoRoidbDict(roidbTeDict,size)
appendHOGtoRoidbDict(roidbTrDict1k,1000)
appendHOGtoRoidbDict(roidbTeDict1k,1000)
print("annoCountTr: {}".format(annoCountTr[size]))
print("annoCountTe: {}".format(annoCountTe[size]))
# print_report(roidbTr,annoCountTr,roidbTe,annoCountTe,setID,repeat,size)
annoSizes = {}
annoSizes['train'] = annoCountTr
annoSizes['test'] = annoCountTe
print("-="*50)
return roidbTrDict,roidbTeDict,roidbTrDict1k,roidbTeDict1k,dsHasTest,annoSizes
def loadSvmModel(modelParams,dataType,setID,repeat,size,X_train,y_train):
modelFn = modelParams['modelFn']
if modelFn is not None:
model = pickle.load(open(modelFn,"rb"))
else:
model = train_SVM(X_train,y_train)
fn = iconicImagesFileFormat().format("model{}_svm_{}_{}_{}.pkl".format(dataType,setID,repeat,size))
pickle.dump(model,open(fn,"wb"))
print(" saved model to {}".format(fn))
print("\n\n-=- model loaded -=-\n\n")
return model
def loadDlModel(modelParams,dataType,setID,repeat,size,X_train,y_train):
pass
def genConfCropped(modelParams,roidbTr,roidbTe,ntdGameInfo):
loaderSettings = {}
loaderSettings['pyloader'] = roidbSampleHOG
loaderSettings['calcHog'] = False
loaderSettings['roidbSizes'] = None
loaderSettings['dsHasTest'] = ntdGameInfo['dsHasTest'] # todo: kind of gross here
return genConf(modelParams,"Cropped",roidbTr,roidbTe,loaderSettings,ntdGameInfo)
def genConfRaw(modelParams,roidbTr,roidbTe,ntdGameInfo):
loaderSettings = {}
loaderSettings['pyloader'] = roidbSampleImageHOG
loaderSettings['calcHog'] = False
loaderSettings['roidbSizes'] = np.arange(len(roidbTr)) + 1
loaderSettings['dsHasTest'] = ntdGameInfo['dsHasTest'] # todo: kind of gross here
return genConf(modelParams,"Raw",roidbTr,roidbTe,loaderSettings,ntdGameInfo)
def genConfSVM(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo):
X_train, X_test, y_train, y_test, X_idx = roidbToSVMData(roidbTr,roidbTe,
ntdGameInfo['trainSize'],
ntdGameInfo['testSize'],
loaderSettings)
model = loadSvmModel(modelParams,dataType,ntdGameInfo['setID'],ntdGameInfo['repeat'],
ntdGameInfo['size'],X_train,y_train)
print(X_test.shape)
print(y_test.shape)
print("accuracy on test data {}".format(model.score(X_test,y_test)))
print(make_confusion_matrix(model, X_train, y_train, cfg.clsToSet))
print("-"*50)
return make_confusion_matrix(model, X_test, y_test, cfg.clsToSet),model
def genConfDl(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo):
X_train, X_test, y_train, y_test, X_idx = roidbToDlData(roidbTr,roidbTe,
ntdGameInfo['trainSize'],
ntdGameInfo['testSize'],
loaderSettings)
model = loadDlModel(modelParams,dataType,ntdGameInfo['setID'],ntdGameInfo['repeat'],
ntdGameInfo['size'],X_train,y_train)
print("accuracy on test data {}".format(model.score(X_test,y_test)))
return make_confusion_matrix(model, X_test, y_test, cfg.clsToSet),model
def genConf(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo):
modelType = modelParams['modelType']
if modelType == "svm":
return genConfSVM(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo)
elif modelType == "dl":
return genConfDl(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo)
else:
print("Uknown model type of {}".format(modelType))
return None
def saveNtdSummaryStats(cmRaw_l,cmCropped_l,cmDiff_l):
import scipy.stats as ss
cmRaw_l = np.array(cmRaw_l)
cmCropped_l = np.array(cmCropped_l)
cmDiff_l = np.array(cmDiff_l)
cmRaw_mean = np.mean(cmRaw_l,axis=0)
cmCropped_mean = np.mean(cmCropped_l,axis=0)
cmDiff_mean = np.mean(cmDiff_l,axis=0)
cmRaw_std = np.std(cmRaw_l,axis=0)
cmCropped_std = np.std(cmCropped_l,axis=0)
cmDiff_std = np.std(cmDiff_l,axis=0)
paired_tTest_num = cmRaw_mean - cmCropped_mean
paired_tTest_denom = np.sqrt( (cmRaw_std**2 + cmCropped_std**2) / len(cmRaw_l) )
# we know it's two tailed, but computing as one is more efficient
t_values = np.abs(paired_tTest_num) / paired_tTest_denom
print(t_values)
p_values = ss.t.sf(t_values,len(cmRaw_l)-1)
def saveMat(fn,mat):
fid = open(iconicImagesFileFormat().format(fn),"wb")
pickle.dump(mat,fid)
fid.close()
saveId_l = ["rawMean","rawStd","croppedMean","croppedStd","diffMean","diffStd","pValues"]
plotTitle_l = ["Raw Images","Raw Std", "Cropped Images", "Cropped Std","Raw - Cropped","Raw - Cropped (Std)", "P-Values"]
confMatStat = [cmRaw_mean,cmRaw_std,cmCropped_mean,cmCropped_std,cmDiff_mean,cmDiff_std,p_values]
for saveId,plotTitle,matStat in zip(saveId_l,plotTitle_l,confMatStat):
appendStr = "{}_{}".format(saveId,cfg.uuid)
pklFn = "ntd_stats_{}.pkl".format(appendStr)
saveMat(pklFn,matStat)
pathToPlot = osp.join(cfg.PATH_TO_NTD_OUTPUT, 'ntd_stats_{}.png'.format(appendStr))
plot_confusion_matrix(np.copy(matStat), cfg.clsToSet,
pathToPlot, title=plotTitle,
cmap = plt.cm.bwr_r,vmin=-100,vmax=100)
print(p_values)
| if phase == 'train':
scheduler.step()
model.train(True) # Set model to training mode
else:
model.train(False) # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for data in dataloaders[phase]:
# get the inputs
inputs, labels = data
# wrap them in Variable
if use_gpu:
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.data[0] * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict()) | conditional_block |
ntd_utils.py | # metaDatasetGenerator imports
from core.config import cfg, cfgData, createFilenameID, createPathRepeat, createPathSetID
from datasets.imdb import imdb
# 'other' imports
import pickle
import numpy as np
import numpy.random as npr
import os.path as osp
import matplotlib
matplotlib.use("Agg")
from core.config import cfg, cfg_from_file, cfg_from_list, get_output_dir, loadDatasetIndexDict,iconicImagesFileFormat
from datasets.factory import get_repo_imdb
from datasets.ds_utils import load_mixture_set,print_each_size,computeTotalAnnosFromAnnoCount,cropImageToAnnoRegion,roidbSampleHOG,roidbSampleImage,roidbSampleImageHOG
import os.path as osp
import datasets.imdb
import argparse
import pprint
import numpy as np
import matplotlib.pyplot as plt
import sys,os,cv2,pickle,uuid
# pytorch imports
from datasets.pytorch_roidb_loader import RoidbDataset
from numpy import transpose as npt
from ntd.hog_svm import plot_confusion_matrix, extract_pyroidb_features,appendHOGtoRoidb,split_data, scale_data,train_SVM,findMaxRegions, make_confusion_matrix,appendHOGtoRoidbDict,split_tr_te_data
from utils.misc import *
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train(True) # Set model to training mode
else:
model.train(False) # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for data in dataloaders[phase]:
# get the inputs
inputs, labels = data
# wrap them in Variable
if use_gpu:
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.data[0] * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def roidbToFeatures(roidb,pyloader=roidbSampleHOG,calcHog=False,roidbSizes=None):
pyroidb = RoidbDataset(roidb,[0,1,2,3,4,5,6,7],
loader=pyloader,
transform=None)
if roidbSizes is not None:
pyroidb.roidbSizes = np.arange(len(roidb)) + 1
l_feat,l_idx,y = extract_pyroidb_features(pyroidb, 'hog', cfg.clsToSet, calc_feat = calcHog, \
spatial_size=(32, 32),hist_bins=32, \
orient=9, pix_per_cell=8, cell_per_block=2, \
hog_channel=0)
return l_feat,l_idx,y
def mangleTestingData(l_feat_te,l_idx_te,y_te,X_test,y_test,X_idx):
"""
Goal: to replace the indicies with setIDs associated with the datasets in the
"test" section of the mixed dataset from the "train" to the "test" features
testIndex: the index from the
yIndicies: a python dictionary; {"setID": list of indicies associated with the set; the indicies are the location of a sample from the set in the original testing set X_test}
-> an element in the list gives index of the next "setID" in the current testing data
->
l_feat_te: a list of hog features.
-> axis=0 is datasets
-> axis=1 is hog features for a specific dataset
-> lengths across axis=1 varies
y_te: a list of setIDs from the "testing" section of the mixed dataset
l_idx_te: locations of the sample in the original roidb
-> axis=0 is datasets
-> axis=1 is the sample location
idx: what use the "idx" from across the y_te?
**error case**: if the # of training examples loaded in y_test > available # of testing
-> shouldn't happend since the test/train split comes originally from a training set (at least) x2 the testing size
"""
print(len(y_te))
print(len(l_idx_te))
print(len(l_feat_te))
for i in range(8):
print(len(l_idx_te[i]))
print(len(l_feat_te[i]))
# replace the X_test for each match of y_test
yIndicies = {}
dsIndicies = [ 0 for _ in range(len(l_idx_te)) ]
for setID in y_te:
if setID not in yIndicies.keys(): | print("{}: {}".format(setID,len(yIndicies[setID])))
if len(yIndicies[setID]) == 0: continue
dsIdx = dsIndicies[setID] # index for l_feat_te
testIndex = yIndicies[setID][0] # index for x_test
X_test[testIndex] = l_feat_te[setID][dsIdx] # replace sample content
X_idx[testIndex] = {"idx":int(l_idx_te[setID][dsIdx]),"split":"test"} # replace the lookup
dsIndicies[setID] += 1 # incriment
yIndicies[setID].remove(testIndex) # "incriment" index by removing element
print(dsIndicies)
def roidbToSVMData(roidbTr,roidbTe,train_size,test_size,loaderSettings):
ds_feat_tr,l_idx_tr,y_tr = roidbToFeatures(roidbTr,pyloader=loaderSettings['pyloader'],
calcHog=loaderSettings['calcHog'],
roidbSizes=loaderSettings['roidbSizes'])
"""
X_train, X_test, y_train, y_test, X_idx = split_data(train_size, test_size, \
l_feat_tr,l_idx_tr, y_tr,\
loaderSettings['dsHasTest'])
"""
ds_feat_te,l_idx_te,y_te = roidbToFeatures(roidbTe,pyloader=loaderSettings['pyloader'],
calcHog=loaderSettings['calcHog'],
roidbSizes=loaderSettings["roidbSizes"])
X_train, X_test, y_train, y_test, testing_idx = split_tr_te_data(ds_feat_tr,l_idx_tr,y_tr,
ds_feat_te,l_idx_te,y_te,
train_size, test_size,
loaderSettings['dsHasTest'])
print("-=-=- training dataset counts -=-=-")
for idx,feat in enumerate(ds_feat_tr):
print("{}: {}, {}".format(cfg.DATASET_NAMES_ORDERED[idx],len(feat),np.sum(y_train==idx)))
print("-=-=- testing dataset counts -=-=-")
for idx,feat in enumerate(ds_feat_te):
print("{}: {}, {}".format(cfg.DATASET_NAMES_ORDERED[idx],len(feat),np.sum(y_test==idx)))
# this is a work-around for the loading of a "testing" mixed dataset... overwrites the original split from the training data
#mangleTestingData(l_feat_te,l_idx_te,y_te,X_test,y_test,testing_idx)
X_train, X_test = scale_data(X_train, X_test)
print(X_train.shape)
print(y_train.shape)
if X_train.shape[0] != y_train.shape[0]:
raise ValueError("number of examples for x and y are different")
return X_train, X_test, y_train, y_test, testing_idx
def prepareMixedDataset(setID,repeat,size,addHOG=True):
mixedData = load_mixture_set(setID,repeat,size)
roidbTrDict,annoCountTr,roidbTrDict1k = mixedData["train"][0],mixedData["train"][1],mixedData["train"][2]
roidbTeDict,annoCountTe,roidbTeDict1k = mixedData["test"][0],mixedData["test"][1],mixedData['test'][2]
printRoidbDictImageNamesToTextFile(roidbTrDict,"train_{}".format(setID))
printRoidbDictImageNamesToTextFile(roidbTeDict,"test_{}".format(setID))
# does the dataset have a "testing" split?
dsHasTest = [ (i is not None) and (j is not None) for i,j in zip(annoCountTr[size],
annoCountTe[size]) ]
# cropped hog image input
if addHOG:
appendHOGtoRoidbDict(roidbTrDict,size)
appendHOGtoRoidbDict(roidbTeDict,size)
appendHOGtoRoidbDict(roidbTrDict1k,1000)
appendHOGtoRoidbDict(roidbTeDict1k,1000)
print("annoCountTr: {}".format(annoCountTr[size]))
print("annoCountTe: {}".format(annoCountTe[size]))
# print_report(roidbTr,annoCountTr,roidbTe,annoCountTe,setID,repeat,size)
annoSizes = {}
annoSizes['train'] = annoCountTr
annoSizes['test'] = annoCountTe
print("-="*50)
return roidbTrDict,roidbTeDict,roidbTrDict1k,roidbTeDict1k,dsHasTest,annoSizes
def loadSvmModel(modelParams,dataType,setID,repeat,size,X_train,y_train):
modelFn = modelParams['modelFn']
if modelFn is not None:
model = pickle.load(open(modelFn,"rb"))
else:
model = train_SVM(X_train,y_train)
fn = iconicImagesFileFormat().format("model{}_svm_{}_{}_{}.pkl".format(dataType,setID,repeat,size))
pickle.dump(model,open(fn,"wb"))
print(" saved model to {}".format(fn))
print("\n\n-=- model loaded -=-\n\n")
return model
def loadDlModel(modelParams,dataType,setID,repeat,size,X_train,y_train):
pass
def genConfCropped(modelParams,roidbTr,roidbTe,ntdGameInfo):
loaderSettings = {}
loaderSettings['pyloader'] = roidbSampleHOG
loaderSettings['calcHog'] = False
loaderSettings['roidbSizes'] = None
loaderSettings['dsHasTest'] = ntdGameInfo['dsHasTest'] # todo: kind of gross here
return genConf(modelParams,"Cropped",roidbTr,roidbTe,loaderSettings,ntdGameInfo)
def genConfRaw(modelParams,roidbTr,roidbTe,ntdGameInfo):
loaderSettings = {}
loaderSettings['pyloader'] = roidbSampleImageHOG
loaderSettings['calcHog'] = False
loaderSettings['roidbSizes'] = np.arange(len(roidbTr)) + 1
loaderSettings['dsHasTest'] = ntdGameInfo['dsHasTest'] # todo: kind of gross here
return genConf(modelParams,"Raw",roidbTr,roidbTe,loaderSettings,ntdGameInfo)
def genConfSVM(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo):
X_train, X_test, y_train, y_test, X_idx = roidbToSVMData(roidbTr,roidbTe,
ntdGameInfo['trainSize'],
ntdGameInfo['testSize'],
loaderSettings)
model = loadSvmModel(modelParams,dataType,ntdGameInfo['setID'],ntdGameInfo['repeat'],
ntdGameInfo['size'],X_train,y_train)
print(X_test.shape)
print(y_test.shape)
print("accuracy on test data {}".format(model.score(X_test,y_test)))
print(make_confusion_matrix(model, X_train, y_train, cfg.clsToSet))
print("-"*50)
return make_confusion_matrix(model, X_test, y_test, cfg.clsToSet),model
def genConfDl(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo):
X_train, X_test, y_train, y_test, X_idx = roidbToDlData(roidbTr,roidbTe,
ntdGameInfo['trainSize'],
ntdGameInfo['testSize'],
loaderSettings)
model = loadDlModel(modelParams,dataType,ntdGameInfo['setID'],ntdGameInfo['repeat'],
ntdGameInfo['size'],X_train,y_train)
print("accuracy on test data {}".format(model.score(X_test,y_test)))
return make_confusion_matrix(model, X_test, y_test, cfg.clsToSet),model
def genConf(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo):
modelType = modelParams['modelType']
if modelType == "svm":
return genConfSVM(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo)
elif modelType == "dl":
return genConfDl(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo)
else:
print("Uknown model type of {}".format(modelType))
return None
def saveNtdSummaryStats(cmRaw_l,cmCropped_l,cmDiff_l):
import scipy.stats as ss
cmRaw_l = np.array(cmRaw_l)
cmCropped_l = np.array(cmCropped_l)
cmDiff_l = np.array(cmDiff_l)
cmRaw_mean = np.mean(cmRaw_l,axis=0)
cmCropped_mean = np.mean(cmCropped_l,axis=0)
cmDiff_mean = np.mean(cmDiff_l,axis=0)
cmRaw_std = np.std(cmRaw_l,axis=0)
cmCropped_std = np.std(cmCropped_l,axis=0)
cmDiff_std = np.std(cmDiff_l,axis=0)
paired_tTest_num = cmRaw_mean - cmCropped_mean
paired_tTest_denom = np.sqrt( (cmRaw_std**2 + cmCropped_std**2) / len(cmRaw_l) )
# we know it's two tailed, but computing as one is more efficient
t_values = np.abs(paired_tTest_num) / paired_tTest_denom
print(t_values)
p_values = ss.t.sf(t_values,len(cmRaw_l)-1)
def saveMat(fn,mat):
fid = open(iconicImagesFileFormat().format(fn),"wb")
pickle.dump(mat,fid)
fid.close()
saveId_l = ["rawMean","rawStd","croppedMean","croppedStd","diffMean","diffStd","pValues"]
plotTitle_l = ["Raw Images","Raw Std", "Cropped Images", "Cropped Std","Raw - Cropped","Raw - Cropped (Std)", "P-Values"]
confMatStat = [cmRaw_mean,cmRaw_std,cmCropped_mean,cmCropped_std,cmDiff_mean,cmDiff_std,p_values]
for saveId,plotTitle,matStat in zip(saveId_l,plotTitle_l,confMatStat):
appendStr = "{}_{}".format(saveId,cfg.uuid)
pklFn = "ntd_stats_{}.pkl".format(appendStr)
saveMat(pklFn,matStat)
pathToPlot = osp.join(cfg.PATH_TO_NTD_OUTPUT, 'ntd_stats_{}.png'.format(appendStr))
plot_confusion_matrix(np.copy(matStat), cfg.clsToSet,
pathToPlot, title=plotTitle,
cmap = plt.cm.bwr_r,vmin=-100,vmax=100)
print(p_values) | yIndicies[setID] = list(np.where(y_test == setID)[0]) # find where the setID's are | random_line_split |
spacetime.rs | // Copyright 2020 WHTCORPS INC
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#![allow(dead_code)]
//! Most bundles can mutate the EinsteinDB spacetime by transacting assertions:
//!
//! - they can add (and, eventually, retract and alter) recognized causetIds using the `:edb/causetid`
//! attribute;
//!
//! - they can add (and, eventually, retract and alter) schemaReplicant attributes using various `:edb/*`
//! attributes;
//!
//! - eventually, they will be able to add (and possibly retract) solitonId partitions using a EinsteinDB
//! equivalent (perhaps :edb/partition or :edb.partition/start) to Causetic's `:edb.install/partition`
//! attribute.
//!
//! This module recognizes, validates, applies, and reports on these mutations.
use failure::ResultExt;
use std::collections::{BTreeMap, BTreeSet};
use std::collections::btree_map::Entry;
use add_retract_alter_set::{
AddRetractAlterSet,
};
use edbn::symbols;
use causetids;
use causetq_pull_promises::errors::{
DbErrorKind,
Result,
};
use allegrosql_promises::{
attribute,
SolitonId,
MinkowskiType,
MinkowskiValueType,
};
use causetq_allegrosql::{
SchemaReplicant,
AttributeMap,
};
use schemaReplicant::{
AttributeBuilder,
AttributeValidation,
};
use types::{
EAV,
};
/// An alteration to an attribute.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum AttributeAlteration {
/// From http://blog.Causetic.com/2014/01/schemaReplicant-alteration.html:
/// - rename attributes
/// - rename your own programmatic causetIdities (uses of :edb/causetid)
/// - add or remove indexes
Index,
/// - add or remove uniqueness constraints
Unique,
/// - change attribute cardinality
Cardinality,
/// - change whether history is retained for an attribute
NoHistory,
/// - change whether an attribute is treated as a component
IsComponent,
}
/// An alteration to an causetid.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum CausetIdAlteration {
CausetId(symbols::Keyword),
}
/// Summarizes changes to spacetime such as a a `SchemaReplicant` and (in the future) a `PartitionMap`.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub struct SpacetimeReport {
// SolitonIds that were not present in the original `AttributeMap` that was mutated.
pub attributes_installed: BTreeSet<SolitonId>,
// SolitonIds that were present in the original `AttributeMap` that was mutated, together with a
// representation of the mutations that were applied.
pub attributes_altered: BTreeMap<SolitonId, Vec<AttributeAlteration>>,
// CausetIds that were installed into the `AttributeMap`.
pub causetIds_altered: BTreeMap<SolitonId, CausetIdAlteration>,
}
impl SpacetimeReport {
pub fn attributes_did_change(&self) -> bool {
!(self.attributes_installed.is_empty() &&
self.attributes_altered.is_empty())
}
}
/// Update an 'AttributeMap' in place given two sets of causetid and attribute retractions, which
/// together contain enough information to reason about a "schemaReplicant retraction".
///
/// SchemaReplicant may only be retracted if all of its necessary attributes are being retracted:
/// - :edb/causetid, :edb/valueType, :edb/cardinality.
///
/// Note that this is currently incomplete/flawed:
/// - we're allowing optional attributes to not be retracted and dangle afterwards
///
/// Returns a set of attribute retractions which do not involve schemaReplicant-defining attributes.
fn update_attribute_map_from_schemaReplicant_retractions(attribute_map: &mut AttributeMap, retractions: Vec<EAV>, causetId_retractions: &BTreeMap<SolitonId, symbols::Keyword>) -> Result<Vec<EAV>> {
// Process retractions of schemaReplicant attributes first. It's allowed to retract a schemaReplicant attribute
// if all of the schemaReplicant-defining schemaReplicant attributes are being retracted.
// A defining set of attributes is :edb/causetid, :edb/valueType, :edb/cardinality.
let mut filtered_retractions = vec![];
let mut suspect_retractions = vec![];
// Filter out sets of schemaReplicant altering retractions.
let mut eas = BTreeMap::new();
for (e, a, v) in retractions.into_iter() {
if causetids::is_a_schemaReplicant_attribute(a) {
eas.entry(e).or_insert(vec![]).push(a);
suspect_retractions.push((e, a, v));
} else {
filtered_retractions.push((e, a, v));
}
}
// TODO (see https://github.com/whtcorpsinc/edb/issues/796).
// Retraction of causetIds is allowed, but if an causetid names a schemaReplicant attribute, then we should enforce
// retraction of all of the associated schemaReplicant attributes.
// Unfortunately, our current in-memory schemaReplicant representation (namely, how we define an Attribute) is not currently
// rich enough: it lacks distinction between presence and absence, and instead assumes default values.
// Currently, in order to do this enforcement correctly, we'd need to inspect 'causets'.
// Here is an incorrect way to enforce this. It's incorrect because it prevents us from retracting non-"schemaReplicant naming" causetIds.
// for retracted_e in causetId_retractions.keys() {
// if !eas.contains_key(retracted_e) {
// bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting :edb/causetid of a schemaReplicant without retracting its defining attributes is not permitted.")));
// }
// }
for (e, a, v) in suspect_retractions.into_iter() {
let attributes = eas.get(&e).unwrap();
// Found a set of retractions which negate a schemaReplicant.
if attributes.contains(&causetids::DB_CARDINALITY) && attributes.contains(&causetids::DB_VALUE_TYPE) {
// Ensure that corresponding :edb/causetid is also being retracted at the same time.
if causetId_retractions.contains_key(&e) {
// Remove attributes corresponding to retracted attribute.
attribute_map.remove(&e);
} else {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting defining attributes of a schemaReplicant without retracting its :edb/causetid is not permitted.")));
}
} else {
filtered_retractions.push((e, a, v));
}
}
Ok(filtered_retractions)
}
/// Update a `AttributeMap` in place from the given `[e a typed_value]` triples.
///
/// This is suiBlock for producing a `AttributeMap` from the `schemaReplicant` materialized view, which does not
/// contain install and alter markers.
///
/// Returns a report summarizing the mutations that were applied.
pub fn update_attribute_map_from_causetid_triples(attribute_map: &mut AttributeMap, assertions: Vec<EAV>, retractions: Vec<EAV>) -> Result<SpacetimeReport> {
fn | (attribute_id: SolitonId, existing: &AttributeMap) -> AttributeBuilder {
existing.get(&attribute_id)
.map(AttributeBuilder::to_modify_attribute)
.unwrap_or_else(AttributeBuilder::default)
}
// Group mutations by impacted solitonId.
let mut builders: BTreeMap<SolitonId, AttributeBuilder> = BTreeMap::new();
// For retractions, we start with an attribute builder that's pre-populated with the existing
// attribute values. That allows us to check existing values and unset them.
for (solitonId, attr, ref value) in retractions {
let builder = builders.entry(solitonId).or_insert_with(|| attribute_builder_to_modify(solitonId, attribute_map));
match attr {
// You can only retract :edb/unique, :edb/isComponent; all others must be altered instead
// of retracted, or are not allowed to change.
causetids::DB_IS_COMPONENT => {
match value {
&MinkowskiType::Boolean(v) if builder.component == Some(v) => {
builder.component(false);
},
v => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/isComponent with the wrong value {:?}.", v)));
},
}
},
causetids::DB_UNIQUE => {
match *value {
MinkowskiType::Ref(u) => {
match u {
causetids::DB_UNIQUE_VALUE if builder.unique == Some(Some(attribute::Unique::Value)) => {
builder.non_unique();
},
causetids::DB_UNIQUE_CausetIDITY if builder.unique == Some(Some(attribute::Unique::CausetIdity)) => {
builder.non_unique();
},
v => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/unique with the wrong value {}.", v)));
},
}
},
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [:edb/retract _ :edb/unique :edb.unique/_] but got [:edb/retract {} :edb/unique {:?}]", solitonId, value)))
}
},
causetids::DB_VALUE_TYPE |
causetids::DB_CARDINALITY |
causetids::DB_INDEX |
causetids::DB_FULLTEXT |
causetids::DB_NO_HISTORY => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting attribute {} for instanton {} not permitted.", attr, solitonId)));
},
_ => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId)))
}
}
}
for (solitonId, attr, ref value) in assertions.into_iter() {
// For assertions, we can start with an empty attribute builder.
let builder = builders.entry(solitonId).or_insert_with(Default::default);
// TODO: improve error messages throughout.
match attr {
causetids::DB_VALUE_TYPE => {
match *value {
MinkowskiType::Ref(causetids::DB_TYPE_BOOLEAN) => { builder.value_type(MinkowskiValueType::Boolean); },
MinkowskiType::Ref(causetids::DB_TYPE_DOUBLE) => { builder.value_type(MinkowskiValueType::Double); },
MinkowskiType::Ref(causetids::DB_TYPE_INSTANT) => { builder.value_type(MinkowskiValueType::Instant); },
MinkowskiType::Ref(causetids::DB_TYPE_KEYWORD) => { builder.value_type(MinkowskiValueType::Keyword); },
MinkowskiType::Ref(causetids::DB_TYPE_LONG) => { builder.value_type(MinkowskiValueType::Long); },
MinkowskiType::Ref(causetids::DB_TYPE_REF) => { builder.value_type(MinkowskiValueType::Ref); },
MinkowskiType::Ref(causetids::DB_TYPE_STRING) => { builder.value_type(MinkowskiValueType::String); },
MinkowskiType::Ref(causetids::DB_TYPE_UUID) => { builder.value_type(MinkowskiValueType::Uuid); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/valueType :edb.type/*] but got [... :edb/valueType {:?}] for solitonId {} and attribute {}", value, solitonId, attr)))
}
},
causetids::DB_CARDINALITY => {
match *value {
MinkowskiType::Ref(causetids::DB_CARDINALITY_MANY) => { builder.multival(true); },
MinkowskiType::Ref(causetids::DB_CARDINALITY_ONE) => { builder.multival(false); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/cardinality :edb.cardinality/many|:edb.cardinality/one] but got [... :edb/cardinality {:?}]", value)))
}
},
causetids::DB_UNIQUE => {
match *value {
MinkowskiType::Ref(causetids::DB_UNIQUE_VALUE) => { builder.unique(attribute::Unique::Value); },
MinkowskiType::Ref(causetids::DB_UNIQUE_CausetIDITY) => { builder.unique(attribute::Unique::CausetIdity); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/unique :edb.unique/value|:edb.unique/causetIdity] but got [... :edb/unique {:?}]", value)))
}
},
causetids::DB_INDEX => {
match *value {
MinkowskiType::Boolean(x) => { builder.index(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/index true|false] but got [... :edb/index {:?}]", value)))
}
},
causetids::DB_FULLTEXT => {
match *value {
MinkowskiType::Boolean(x) => { builder.fulltext(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/fulltext true|false] but got [... :edb/fulltext {:?}]", value)))
}
},
causetids::DB_IS_COMPONENT => {
match *value {
MinkowskiType::Boolean(x) => { builder.component(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/isComponent true|false] but got [... :edb/isComponent {:?}]", value)))
}
},
causetids::DB_NO_HISTORY => {
match *value {
MinkowskiType::Boolean(x) => { builder.no_history(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/noHistory true|false] but got [... :edb/noHistory {:?}]", value)))
}
},
_ => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId)))
}
}
};
let mut attributes_installed: BTreeSet<SolitonId> = BTreeSet::default();
let mut attributes_altered: BTreeMap<SolitonId, Vec<AttributeAlteration>> = BTreeMap::default();
for (solitonId, builder) in builders.into_iter() {
match attribute_map.entry(solitonId) {
Entry::Vacant(entry) => {
// Validate once…
builder.validate_install_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for new attribute with solitonId {} is not valid", solitonId)))?;
// … and twice, now we have the Attribute.
let a = builder.build();
a.validate(|| solitonId.to_string())?;
entry.insert(a);
attributes_installed.insert(solitonId);
},
Entry::Occupied(mut entry) => {
builder.validate_alter_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for existing attribute with solitonId {} is not valid", solitonId)))?;
let mutations = builder.mutate(entry.get_mut());
attributes_altered.insert(solitonId, mutations);
},
}
}
Ok(SpacetimeReport {
attributes_installed: attributes_installed,
attributes_altered: attributes_altered,
causetIds_altered: BTreeMap::default(),
})
}
/// Update a `SchemaReplicant` in place from the given `[e a typed_value added]` quadruples.
///
/// This layer enforces that causetid assertions of the form [solitonId :edb/causetid ...] (as distinct from
/// attribute assertions) are present and correct.
///
/// This is suiBlock for mutating a `SchemaReplicant` from an applied transaction.
///
/// Returns a report summarizing the mutations that were applied.
pub fn update_schemaReplicant_from_causetid_quadruples<U>(schemaReplicant: &mut SchemaReplicant, assertions: U) -> Result<SpacetimeReport>
where U: IntoIterator<Item=(SolitonId, SolitonId, MinkowskiType, bool)> {
// Group attribute assertions into asserted, retracted, and updated. We assume all our
// attribute assertions are :edb/cardinality :edb.cardinality/one (so they'll only be added or
// retracted at most once), which means all attribute alterations are simple changes from an old
// value to a new value.
let mut attribute_set: AddRetractAlterSet<(SolitonId, SolitonId), MinkowskiType> = AddRetractAlterSet::default();
let mut causetId_set: AddRetractAlterSet<SolitonId, symbols::Keyword> = AddRetractAlterSet::default();
for (e, a, typed_value, added) in assertions.into_iter() {
// Here we handle :edb/causetid assertions.
if a == causetids::DB_CausetID {
if let MinkowskiType::Keyword(ref keyword) = typed_value {
causetId_set.witness(e, keyword.as_ref().clone(), added);
continue
} else {
// Something is terribly wrong: the schemaReplicant ensures we have a keyword.
unreachable!();
}
}
attribute_set.witness((e, a), typed_value, added);
}
// Collect triples.
let retracted_triples = attribute_set.retracted.into_iter().map(|((e, a), typed_value)| (e, a, typed_value));
let asserted_triples = attribute_set.asserted.into_iter().map(|((e, a), typed_value)| (e, a, typed_value));
let altered_triples = attribute_set.altered.into_iter().map(|((e, a), (_old_value, new_value))| (e, a, new_value));
// First we process retractions which remove schemaReplicant.
// This operation consumes our current list of attribute retractions, producing a filtered one.
let non_schemaReplicant_retractions = update_attribute_map_from_schemaReplicant_retractions(&mut schemaReplicant.attribute_map,
retracted_triples.collect(),
&causetId_set.retracted)?;
// Now we process all other retractions.
let report = update_attribute_map_from_causetid_triples(&mut schemaReplicant.attribute_map,
asserted_triples.chain(altered_triples).collect(),
non_schemaReplicant_retractions)?;
let mut causetIds_altered: BTreeMap<SolitonId, CausetIdAlteration> = BTreeMap::new();
// Asserted, altered, or retracted :edb/causetIds update the relevant causetids.
for (solitonId, causetid) in causetId_set.asserted {
schemaReplicant.causetid_map.insert(solitonId, causetid.clone());
schemaReplicant.causetId_map.insert(causetid.clone(), solitonId);
causetIds_altered.insert(solitonId, CausetIdAlteration::CausetId(causetid.clone()));
}
for (solitonId, (old_causetId, new_causetId)) in causetId_set.altered {
schemaReplicant.causetid_map.insert(solitonId, new_causetId.clone()); // Overwrite existing.
schemaReplicant.causetId_map.remove(&old_causetId); // Remove old.
schemaReplicant.causetId_map.insert(new_causetId.clone(), solitonId); // Insert new.
causetIds_altered.insert(solitonId, CausetIdAlteration::CausetId(new_causetId.clone()));
}
for (solitonId, causetid) in &causetId_set.retracted {
schemaReplicant.causetid_map.remove(solitonId);
schemaReplicant.causetId_map.remove(causetid);
causetIds_altered.insert(*solitonId, CausetIdAlteration::CausetId(causetid.clone()));
}
// Component attributes need to change if either:
// - a component attribute changed
// - a schemaReplicant attribute that was a component was retracted
// These two checks are a rather heavy-handed way of keeping schemaReplicant's
// component_attributes up-to-date: most of the time we'll rebuild it
// even though it's not necessary (e.g. a schemaReplicant attribute that's _not_
// a component was removed, or a non-component related attribute changed).
if report.attributes_did_change() || causetId_set.retracted.len() > 0 {
schemaReplicant.update_component_attributes();
}
Ok(SpacetimeReport {
causetIds_altered: causetIds_altered,
.. report
})
}
| attribute_builder_to_modify | identifier_name |
spacetime.rs | // Copyright 2020 WHTCORPS INC
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#![allow(dead_code)]
//! Most bundles can mutate the EinsteinDB spacetime by transacting assertions:
//!
//! - they can add (and, eventually, retract and alter) recognized causetIds using the `:edb/causetid`
//! attribute;
//!
//! - they can add (and, eventually, retract and alter) schemaReplicant attributes using various `:edb/*`
//! attributes;
//!
//! - eventually, they will be able to add (and possibly retract) solitonId partitions using a EinsteinDB
//! equivalent (perhaps :edb/partition or :edb.partition/start) to Causetic's `:edb.install/partition`
//! attribute.
//!
//! This module recognizes, validates, applies, and reports on these mutations.
use failure::ResultExt;
use std::collections::{BTreeMap, BTreeSet};
use std::collections::btree_map::Entry;
use add_retract_alter_set::{
AddRetractAlterSet,
};
use edbn::symbols;
use causetids;
use causetq_pull_promises::errors::{
DbErrorKind,
Result,
};
use allegrosql_promises::{
attribute,
SolitonId,
MinkowskiType,
MinkowskiValueType,
};
use causetq_allegrosql::{
SchemaReplicant,
AttributeMap,
};
use schemaReplicant::{
AttributeBuilder,
AttributeValidation,
};
use types::{
EAV,
};
/// An alteration to an attribute.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum AttributeAlteration {
/// From http://blog.Causetic.com/2014/01/schemaReplicant-alteration.html:
/// - rename attributes
/// - rename your own programmatic causetIdities (uses of :edb/causetid)
/// - add or remove indexes
Index,
/// - add or remove uniqueness constraints
Unique,
/// - change attribute cardinality
Cardinality,
/// - change whether history is retained for an attribute
NoHistory,
/// - change whether an attribute is treated as a component
IsComponent,
}
/// An alteration to an causetid.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum CausetIdAlteration {
CausetId(symbols::Keyword),
}
/// Summarizes changes to spacetime such as a a `SchemaReplicant` and (in the future) a `PartitionMap`.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub struct SpacetimeReport {
// SolitonIds that were not present in the original `AttributeMap` that was mutated.
pub attributes_installed: BTreeSet<SolitonId>,
// SolitonIds that were present in the original `AttributeMap` that was mutated, together with a
// representation of the mutations that were applied.
pub attributes_altered: BTreeMap<SolitonId, Vec<AttributeAlteration>>,
// CausetIds that were installed into the `AttributeMap`.
pub causetIds_altered: BTreeMap<SolitonId, CausetIdAlteration>,
}
impl SpacetimeReport {
pub fn attributes_did_change(&self) -> bool {
!(self.attributes_installed.is_empty() &&
self.attributes_altered.is_empty())
}
}
/// Update an 'AttributeMap' in place given two sets of causetid and attribute retractions, which
/// together contain enough information to reason about a "schemaReplicant retraction".
///
/// SchemaReplicant may only be retracted if all of its necessary attributes are being retracted:
/// - :edb/causetid, :edb/valueType, :edb/cardinality.
///
/// Note that this is currently incomplete/flawed:
/// - we're allowing optional attributes to not be retracted and dangle afterwards
///
/// Returns a set of attribute retractions which do not involve schemaReplicant-defining attributes.
fn update_attribute_map_from_schemaReplicant_retractions(attribute_map: &mut AttributeMap, retractions: Vec<EAV>, causetId_retractions: &BTreeMap<SolitonId, symbols::Keyword>) -> Result<Vec<EAV>> {
// Process retractions of schemaReplicant attributes first. It's allowed to retract a schemaReplicant attribute
// if all of the schemaReplicant-defining schemaReplicant attributes are being retracted.
// A defining set of attributes is :edb/causetid, :edb/valueType, :edb/cardinality.
let mut filtered_retractions = vec![];
let mut suspect_retractions = vec![];
// Filter out sets of schemaReplicant altering retractions.
let mut eas = BTreeMap::new();
for (e, a, v) in retractions.into_iter() {
if causetids::is_a_schemaReplicant_attribute(a) {
eas.entry(e).or_insert(vec![]).push(a);
suspect_retractions.push((e, a, v));
} else {
filtered_retractions.push((e, a, v));
}
}
// TODO (see https://github.com/whtcorpsinc/edb/issues/796).
// Retraction of causetIds is allowed, but if an causetid names a schemaReplicant attribute, then we should enforce
// retraction of all of the associated schemaReplicant attributes.
// Unfortunately, our current in-memory schemaReplicant representation (namely, how we define an Attribute) is not currently
// rich enough: it lacks distinction between presence and absence, and instead assumes default values.
// Currently, in order to do this enforcement correctly, we'd need to inspect 'causets'.
// Here is an incorrect way to enforce this. It's incorrect because it prevents us from retracting non-"schemaReplicant naming" causetIds.
// for retracted_e in causetId_retractions.keys() {
// if !eas.contains_key(retracted_e) {
// bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting :edb/causetid of a schemaReplicant without retracting its defining attributes is not permitted.")));
// }
// }
for (e, a, v) in suspect_retractions.into_iter() {
let attributes = eas.get(&e).unwrap();
// Found a set of retractions which negate a schemaReplicant.
if attributes.contains(&causetids::DB_CARDINALITY) && attributes.contains(&causetids::DB_VALUE_TYPE) {
// Ensure that corresponding :edb/causetid is also being retracted at the same time.
if causetId_retractions.contains_key(&e) {
// Remove attributes corresponding to retracted attribute.
attribute_map.remove(&e);
} else {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting defining attributes of a schemaReplicant without retracting its :edb/causetid is not permitted.")));
}
} else {
filtered_retractions.push((e, a, v));
}
}
Ok(filtered_retractions)
}
/// Update a `AttributeMap` in place from the given `[e a typed_value]` triples.
/// | fn attribute_builder_to_modify(attribute_id: SolitonId, existing: &AttributeMap) -> AttributeBuilder {
existing.get(&attribute_id)
.map(AttributeBuilder::to_modify_attribute)
.unwrap_or_else(AttributeBuilder::default)
}
// Group mutations by impacted solitonId.
let mut builders: BTreeMap<SolitonId, AttributeBuilder> = BTreeMap::new();
// For retractions, we start with an attribute builder that's pre-populated with the existing
// attribute values. That allows us to check existing values and unset them.
for (solitonId, attr, ref value) in retractions {
let builder = builders.entry(solitonId).or_insert_with(|| attribute_builder_to_modify(solitonId, attribute_map));
match attr {
// You can only retract :edb/unique, :edb/isComponent; all others must be altered instead
// of retracted, or are not allowed to change.
causetids::DB_IS_COMPONENT => {
match value {
&MinkowskiType::Boolean(v) if builder.component == Some(v) => {
builder.component(false);
},
v => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/isComponent with the wrong value {:?}.", v)));
},
}
},
causetids::DB_UNIQUE => {
match *value {
MinkowskiType::Ref(u) => {
match u {
causetids::DB_UNIQUE_VALUE if builder.unique == Some(Some(attribute::Unique::Value)) => {
builder.non_unique();
},
causetids::DB_UNIQUE_CausetIDITY if builder.unique == Some(Some(attribute::Unique::CausetIdity)) => {
builder.non_unique();
},
v => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/unique with the wrong value {}.", v)));
},
}
},
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [:edb/retract _ :edb/unique :edb.unique/_] but got [:edb/retract {} :edb/unique {:?}]", solitonId, value)))
}
},
causetids::DB_VALUE_TYPE |
causetids::DB_CARDINALITY |
causetids::DB_INDEX |
causetids::DB_FULLTEXT |
causetids::DB_NO_HISTORY => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting attribute {} for instanton {} not permitted.", attr, solitonId)));
},
_ => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId)))
}
}
}
for (solitonId, attr, ref value) in assertions.into_iter() {
// For assertions, we can start with an empty attribute builder.
let builder = builders.entry(solitonId).or_insert_with(Default::default);
// TODO: improve error messages throughout.
match attr {
causetids::DB_VALUE_TYPE => {
match *value {
MinkowskiType::Ref(causetids::DB_TYPE_BOOLEAN) => { builder.value_type(MinkowskiValueType::Boolean); },
MinkowskiType::Ref(causetids::DB_TYPE_DOUBLE) => { builder.value_type(MinkowskiValueType::Double); },
MinkowskiType::Ref(causetids::DB_TYPE_INSTANT) => { builder.value_type(MinkowskiValueType::Instant); },
MinkowskiType::Ref(causetids::DB_TYPE_KEYWORD) => { builder.value_type(MinkowskiValueType::Keyword); },
MinkowskiType::Ref(causetids::DB_TYPE_LONG) => { builder.value_type(MinkowskiValueType::Long); },
MinkowskiType::Ref(causetids::DB_TYPE_REF) => { builder.value_type(MinkowskiValueType::Ref); },
MinkowskiType::Ref(causetids::DB_TYPE_STRING) => { builder.value_type(MinkowskiValueType::String); },
MinkowskiType::Ref(causetids::DB_TYPE_UUID) => { builder.value_type(MinkowskiValueType::Uuid); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/valueType :edb.type/*] but got [... :edb/valueType {:?}] for solitonId {} and attribute {}", value, solitonId, attr)))
}
},
causetids::DB_CARDINALITY => {
match *value {
MinkowskiType::Ref(causetids::DB_CARDINALITY_MANY) => { builder.multival(true); },
MinkowskiType::Ref(causetids::DB_CARDINALITY_ONE) => { builder.multival(false); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/cardinality :edb.cardinality/many|:edb.cardinality/one] but got [... :edb/cardinality {:?}]", value)))
}
},
causetids::DB_UNIQUE => {
match *value {
MinkowskiType::Ref(causetids::DB_UNIQUE_VALUE) => { builder.unique(attribute::Unique::Value); },
MinkowskiType::Ref(causetids::DB_UNIQUE_CausetIDITY) => { builder.unique(attribute::Unique::CausetIdity); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/unique :edb.unique/value|:edb.unique/causetIdity] but got [... :edb/unique {:?}]", value)))
}
},
causetids::DB_INDEX => {
match *value {
MinkowskiType::Boolean(x) => { builder.index(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/index true|false] but got [... :edb/index {:?}]", value)))
}
},
causetids::DB_FULLTEXT => {
match *value {
MinkowskiType::Boolean(x) => { builder.fulltext(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/fulltext true|false] but got [... :edb/fulltext {:?}]", value)))
}
},
causetids::DB_IS_COMPONENT => {
match *value {
MinkowskiType::Boolean(x) => { builder.component(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/isComponent true|false] but got [... :edb/isComponent {:?}]", value)))
}
},
causetids::DB_NO_HISTORY => {
match *value {
MinkowskiType::Boolean(x) => { builder.no_history(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/noHistory true|false] but got [... :edb/noHistory {:?}]", value)))
}
},
_ => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId)))
}
}
};
let mut attributes_installed: BTreeSet<SolitonId> = BTreeSet::default();
let mut attributes_altered: BTreeMap<SolitonId, Vec<AttributeAlteration>> = BTreeMap::default();
for (solitonId, builder) in builders.into_iter() {
match attribute_map.entry(solitonId) {
Entry::Vacant(entry) => {
// Validate once…
builder.validate_install_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for new attribute with solitonId {} is not valid", solitonId)))?;
// … and twice, now we have the Attribute.
let a = builder.build();
a.validate(|| solitonId.to_string())?;
entry.insert(a);
attributes_installed.insert(solitonId);
},
Entry::Occupied(mut entry) => {
builder.validate_alter_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for existing attribute with solitonId {} is not valid", solitonId)))?;
let mutations = builder.mutate(entry.get_mut());
attributes_altered.insert(solitonId, mutations);
},
}
}
Ok(SpacetimeReport {
attributes_installed: attributes_installed,
attributes_altered: attributes_altered,
causetIds_altered: BTreeMap::default(),
})
}
/// Update a `SchemaReplicant` in place from the given `[e a typed_value added]` quadruples.
///
/// This layer enforces that causetid assertions of the form [solitonId :edb/causetid ...] (as distinct from
/// attribute assertions) are present and correct.
///
/// This is suiBlock for mutating a `SchemaReplicant` from an applied transaction.
///
/// Returns a report summarizing the mutations that were applied.
pub fn update_schemaReplicant_from_causetid_quadruples<U>(schemaReplicant: &mut SchemaReplicant, assertions: U) -> Result<SpacetimeReport>
where U: IntoIterator<Item=(SolitonId, SolitonId, MinkowskiType, bool)> {
// Group attribute assertions into asserted, retracted, and updated. We assume all our
// attribute assertions are :edb/cardinality :edb.cardinality/one (so they'll only be added or
// retracted at most once), which means all attribute alterations are simple changes from an old
// value to a new value.
let mut attribute_set: AddRetractAlterSet<(SolitonId, SolitonId), MinkowskiType> = AddRetractAlterSet::default();
let mut causetId_set: AddRetractAlterSet<SolitonId, symbols::Keyword> = AddRetractAlterSet::default();
for (e, a, typed_value, added) in assertions.into_iter() {
// Here we handle :edb/causetid assertions.
if a == causetids::DB_CausetID {
if let MinkowskiType::Keyword(ref keyword) = typed_value {
causetId_set.witness(e, keyword.as_ref().clone(), added);
continue
} else {
// Something is terribly wrong: the schemaReplicant ensures we have a keyword.
unreachable!();
}
}
attribute_set.witness((e, a), typed_value, added);
}
// Collect triples.
let retracted_triples = attribute_set.retracted.into_iter().map(|((e, a), typed_value)| (e, a, typed_value));
let asserted_triples = attribute_set.asserted.into_iter().map(|((e, a), typed_value)| (e, a, typed_value));
let altered_triples = attribute_set.altered.into_iter().map(|((e, a), (_old_value, new_value))| (e, a, new_value));
// First we process retractions which remove schemaReplicant.
// This operation consumes our current list of attribute retractions, producing a filtered one.
let non_schemaReplicant_retractions = update_attribute_map_from_schemaReplicant_retractions(&mut schemaReplicant.attribute_map,
retracted_triples.collect(),
&causetId_set.retracted)?;
// Now we process all other retractions.
let report = update_attribute_map_from_causetid_triples(&mut schemaReplicant.attribute_map,
asserted_triples.chain(altered_triples).collect(),
non_schemaReplicant_retractions)?;
let mut causetIds_altered: BTreeMap<SolitonId, CausetIdAlteration> = BTreeMap::new();
// Asserted, altered, or retracted :edb/causetIds update the relevant causetids.
for (solitonId, causetid) in causetId_set.asserted {
schemaReplicant.causetid_map.insert(solitonId, causetid.clone());
schemaReplicant.causetId_map.insert(causetid.clone(), solitonId);
causetIds_altered.insert(solitonId, CausetIdAlteration::CausetId(causetid.clone()));
}
for (solitonId, (old_causetId, new_causetId)) in causetId_set.altered {
schemaReplicant.causetid_map.insert(solitonId, new_causetId.clone()); // Overwrite existing.
schemaReplicant.causetId_map.remove(&old_causetId); // Remove old.
schemaReplicant.causetId_map.insert(new_causetId.clone(), solitonId); // Insert new.
causetIds_altered.insert(solitonId, CausetIdAlteration::CausetId(new_causetId.clone()));
}
for (solitonId, causetid) in &causetId_set.retracted {
schemaReplicant.causetid_map.remove(solitonId);
schemaReplicant.causetId_map.remove(causetid);
causetIds_altered.insert(*solitonId, CausetIdAlteration::CausetId(causetid.clone()));
}
// Component attributes need to change if either:
// - a component attribute changed
// - a schemaReplicant attribute that was a component was retracted
// These two checks are a rather heavy-handed way of keeping schemaReplicant's
// component_attributes up-to-date: most of the time we'll rebuild it
// even though it's not necessary (e.g. a schemaReplicant attribute that's _not_
// a component was removed, or a non-component related attribute changed).
if report.attributes_did_change() || causetId_set.retracted.len() > 0 {
schemaReplicant.update_component_attributes();
}
Ok(SpacetimeReport {
causetIds_altered: causetIds_altered,
.. report
})
} | /// This is suiBlock for producing a `AttributeMap` from the `schemaReplicant` materialized view, which does not
/// contain install and alter markers.
///
/// Returns a report summarizing the mutations that were applied.
pub fn update_attribute_map_from_causetid_triples(attribute_map: &mut AttributeMap, assertions: Vec<EAV>, retractions: Vec<EAV>) -> Result<SpacetimeReport> { | random_line_split |
spacetime.rs | // Copyright 2020 WHTCORPS INC
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#![allow(dead_code)]
//! Most bundles can mutate the EinsteinDB spacetime by transacting assertions:
//!
//! - they can add (and, eventually, retract and alter) recognized causetIds using the `:edb/causetid`
//! attribute;
//!
//! - they can add (and, eventually, retract and alter) schemaReplicant attributes using various `:edb/*`
//! attributes;
//!
//! - eventually, they will be able to add (and possibly retract) solitonId partitions using a EinsteinDB
//! equivalent (perhaps :edb/partition or :edb.partition/start) to Causetic's `:edb.install/partition`
//! attribute.
//!
//! This module recognizes, validates, applies, and reports on these mutations.
use failure::ResultExt;
use std::collections::{BTreeMap, BTreeSet};
use std::collections::btree_map::Entry;
use add_retract_alter_set::{
AddRetractAlterSet,
};
use edbn::symbols;
use causetids;
use causetq_pull_promises::errors::{
DbErrorKind,
Result,
};
use allegrosql_promises::{
attribute,
SolitonId,
MinkowskiType,
MinkowskiValueType,
};
use causetq_allegrosql::{
SchemaReplicant,
AttributeMap,
};
use schemaReplicant::{
AttributeBuilder,
AttributeValidation,
};
use types::{
EAV,
};
/// An alteration to an attribute.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum AttributeAlteration {
/// From http://blog.Causetic.com/2014/01/schemaReplicant-alteration.html:
/// - rename attributes
/// - rename your own programmatic causetIdities (uses of :edb/causetid)
/// - add or remove indexes
Index,
/// - add or remove uniqueness constraints
Unique,
/// - change attribute cardinality
Cardinality,
/// - change whether history is retained for an attribute
NoHistory,
/// - change whether an attribute is treated as a component
IsComponent,
}
/// An alteration to an causetid.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum CausetIdAlteration {
CausetId(symbols::Keyword),
}
/// Summarizes changes to spacetime such as a a `SchemaReplicant` and (in the future) a `PartitionMap`.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub struct SpacetimeReport {
// SolitonIds that were not present in the original `AttributeMap` that was mutated.
pub attributes_installed: BTreeSet<SolitonId>,
// SolitonIds that were present in the original `AttributeMap` that was mutated, together with a
// representation of the mutations that were applied.
pub attributes_altered: BTreeMap<SolitonId, Vec<AttributeAlteration>>,
// CausetIds that were installed into the `AttributeMap`.
pub causetIds_altered: BTreeMap<SolitonId, CausetIdAlteration>,
}
impl SpacetimeReport {
pub fn attributes_did_change(&self) -> bool {
!(self.attributes_installed.is_empty() &&
self.attributes_altered.is_empty())
}
}
/// Update an 'AttributeMap' in place given two sets of causetid and attribute retractions, which
/// together contain enough information to reason about a "schemaReplicant retraction".
///
/// SchemaReplicant may only be retracted if all of its necessary attributes are being retracted:
/// - :edb/causetid, :edb/valueType, :edb/cardinality.
///
/// Note that this is currently incomplete/flawed:
/// - we're allowing optional attributes to not be retracted and dangle afterwards
///
/// Returns a set of attribute retractions which do not involve schemaReplicant-defining attributes.
fn update_attribute_map_from_schemaReplicant_retractions(attribute_map: &mut AttributeMap, retractions: Vec<EAV>, causetId_retractions: &BTreeMap<SolitonId, symbols::Keyword>) -> Result<Vec<EAV>> {
// Process retractions of schemaReplicant attributes first. It's allowed to retract a schemaReplicant attribute
// if all of the schemaReplicant-defining schemaReplicant attributes are being retracted.
// A defining set of attributes is :edb/causetid, :edb/valueType, :edb/cardinality.
let mut filtered_retractions = vec![];
let mut suspect_retractions = vec![];
// Filter out sets of schemaReplicant altering retractions.
let mut eas = BTreeMap::new();
for (e, a, v) in retractions.into_iter() {
if causetids::is_a_schemaReplicant_attribute(a) {
eas.entry(e).or_insert(vec![]).push(a);
suspect_retractions.push((e, a, v));
} else {
filtered_retractions.push((e, a, v));
}
}
// TODO (see https://github.com/whtcorpsinc/edb/issues/796).
// Retraction of causetIds is allowed, but if an causetid names a schemaReplicant attribute, then we should enforce
// retraction of all of the associated schemaReplicant attributes.
// Unfortunately, our current in-memory schemaReplicant representation (namely, how we define an Attribute) is not currently
// rich enough: it lacks distinction between presence and absence, and instead assumes default values.
// Currently, in order to do this enforcement correctly, we'd need to inspect 'causets'.
// Here is an incorrect way to enforce this. It's incorrect because it prevents us from retracting non-"schemaReplicant naming" causetIds.
// for retracted_e in causetId_retractions.keys() {
// if !eas.contains_key(retracted_e) {
// bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting :edb/causetid of a schemaReplicant without retracting its defining attributes is not permitted.")));
// }
// }
for (e, a, v) in suspect_retractions.into_iter() {
let attributes = eas.get(&e).unwrap();
// Found a set of retractions which negate a schemaReplicant.
if attributes.contains(&causetids::DB_CARDINALITY) && attributes.contains(&causetids::DB_VALUE_TYPE) {
// Ensure that corresponding :edb/causetid is also being retracted at the same time.
if causetId_retractions.contains_key(&e) {
// Remove attributes corresponding to retracted attribute.
attribute_map.remove(&e);
} else {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting defining attributes of a schemaReplicant without retracting its :edb/causetid is not permitted.")));
}
} else {
filtered_retractions.push((e, a, v));
}
}
Ok(filtered_retractions)
}
/// Update a `AttributeMap` in place from the given `[e a typed_value]` triples.
///
/// This is suiBlock for producing a `AttributeMap` from the `schemaReplicant` materialized view, which does not
/// contain install and alter markers.
///
/// Returns a report summarizing the mutations that were applied.
pub fn update_attribute_map_from_causetid_triples(attribute_map: &mut AttributeMap, assertions: Vec<EAV>, retractions: Vec<EAV>) -> Result<SpacetimeReport> | / Update a `SchemaReplicant` in place from the given `[e a typed_value added]` quadruples.
///
/// This layer enforces that causetid assertions of the form [solitonId :edb/causetid ...] (as distinct from
/// attribute assertions) are present and correct.
///
/// This is suiBlock for mutating a `SchemaReplicant` from an applied transaction.
///
/// Returns a report summarizing the mutations that were applied.
pub fn update_schemaReplicant_from_causetid_quadruples<U>(schemaReplicant: &mut SchemaReplicant, assertions: U) -> Result<SpacetimeReport>
where U: IntoIterator<Item=(SolitonId, SolitonId, MinkowskiType, bool)> {
// Group attribute assertions into asserted, retracted, and updated. We assume all our
// attribute assertions are :edb/cardinality :edb.cardinality/one (so they'll only be added or
// retracted at most once), which means all attribute alterations are simple changes from an old
// value to a new value.
let mut attribute_set: AddRetractAlterSet<(SolitonId, SolitonId), MinkowskiType> = AddRetractAlterSet::default();
let mut causetId_set: AddRetractAlterSet<SolitonId, symbols::Keyword> = AddRetractAlterSet::default();
for (e, a, typed_value, added) in assertions.into_iter() {
// Here we handle :edb/causetid assertions.
if a == causetids::DB_CausetID {
if let MinkowskiType::Keyword(ref keyword) = typed_value {
causetId_set.witness(e, keyword.as_ref().clone(), added);
continue
} else {
// Something is terribly wrong: the schemaReplicant ensures we have a keyword.
unreachable!();
}
}
attribute_set.witness((e, a), typed_value, added);
}
// Collect triples.
let retracted_triples = attribute_set.retracted.into_iter().map(|((e, a), typed_value)| (e, a, typed_value));
let asserted_triples = attribute_set.asserted.into_iter().map(|((e, a), typed_value)| (e, a, typed_value));
let altered_triples = attribute_set.altered.into_iter().map(|((e, a), (_old_value, new_value))| (e, a, new_value));
// First we process retractions which remove schemaReplicant.
// This operation consumes our current list of attribute retractions, producing a filtered one.
let non_schemaReplicant_retractions = update_attribute_map_from_schemaReplicant_retractions(&mut schemaReplicant.attribute_map,
retracted_triples.collect(),
&causetId_set.retracted)?;
// Now we process all other retractions.
let report = update_attribute_map_from_causetid_triples(&mut schemaReplicant.attribute_map,
asserted_triples.chain(altered_triples).collect(),
non_schemaReplicant_retractions)?;
let mut causetIds_altered: BTreeMap<SolitonId, CausetIdAlteration> = BTreeMap::new();
// Asserted, altered, or retracted :edb/causetIds update the relevant causetids.
for (solitonId, causetid) in causetId_set.asserted {
schemaReplicant.causetid_map.insert(solitonId, causetid.clone());
schemaReplicant.causetId_map.insert(causetid.clone(), solitonId);
causetIds_altered.insert(solitonId, CausetIdAlteration::CausetId(causetid.clone()));
}
for (solitonId, (old_causetId, new_causetId)) in causetId_set.altered {
schemaReplicant.causetid_map.insert(solitonId, new_causetId.clone()); // Overwrite existing.
schemaReplicant.causetId_map.remove(&old_causetId); // Remove old.
schemaReplicant.causetId_map.insert(new_causetId.clone(), solitonId); // Insert new.
causetIds_altered.insert(solitonId, CausetIdAlteration::CausetId(new_causetId.clone()));
}
for (solitonId, causetid) in &causetId_set.retracted {
schemaReplicant.causetid_map.remove(solitonId);
schemaReplicant.causetId_map.remove(causetid);
causetIds_altered.insert(*solitonId, CausetIdAlteration::CausetId(causetid.clone()));
}
// Component attributes need to change if either:
// - a component attribute changed
// - a schemaReplicant attribute that was a component was retracted
// These two checks are a rather heavy-handed way of keeping schemaReplicant's
// component_attributes up-to-date: most of the time we'll rebuild it
// even though it's not necessary (e.g. a schemaReplicant attribute that's _not_
// a component was removed, or a non-component related attribute changed).
if report.attributes_did_change() || causetId_set.retracted.len() > 0 {
schemaReplicant.update_component_attributes();
}
Ok(SpacetimeReport {
causetIds_altered: causetIds_altered,
.. report
})
}
| {
fn attribute_builder_to_modify(attribute_id: SolitonId, existing: &AttributeMap) -> AttributeBuilder {
existing.get(&attribute_id)
.map(AttributeBuilder::to_modify_attribute)
.unwrap_or_else(AttributeBuilder::default)
}
// Group mutations by impacted solitonId.
let mut builders: BTreeMap<SolitonId, AttributeBuilder> = BTreeMap::new();
// For retractions, we start with an attribute builder that's pre-populated with the existing
// attribute values. That allows us to check existing values and unset them.
for (solitonId, attr, ref value) in retractions {
let builder = builders.entry(solitonId).or_insert_with(|| attribute_builder_to_modify(solitonId, attribute_map));
match attr {
// You can only retract :edb/unique, :edb/isComponent; all others must be altered instead
// of retracted, or are not allowed to change.
causetids::DB_IS_COMPONENT => {
match value {
&MinkowskiType::Boolean(v) if builder.component == Some(v) => {
builder.component(false);
},
v => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/isComponent with the wrong value {:?}.", v)));
},
}
},
causetids::DB_UNIQUE => {
match *value {
MinkowskiType::Ref(u) => {
match u {
causetids::DB_UNIQUE_VALUE if builder.unique == Some(Some(attribute::Unique::Value)) => {
builder.non_unique();
},
causetids::DB_UNIQUE_CausetIDITY if builder.unique == Some(Some(attribute::Unique::CausetIdity)) => {
builder.non_unique();
},
v => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/unique with the wrong value {}.", v)));
},
}
},
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [:edb/retract _ :edb/unique :edb.unique/_] but got [:edb/retract {} :edb/unique {:?}]", solitonId, value)))
}
},
causetids::DB_VALUE_TYPE |
causetids::DB_CARDINALITY |
causetids::DB_INDEX |
causetids::DB_FULLTEXT |
causetids::DB_NO_HISTORY => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting attribute {} for instanton {} not permitted.", attr, solitonId)));
},
_ => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId)))
}
}
}
for (solitonId, attr, ref value) in assertions.into_iter() {
// For assertions, we can start with an empty attribute builder.
let builder = builders.entry(solitonId).or_insert_with(Default::default);
// TODO: improve error messages throughout.
match attr {
causetids::DB_VALUE_TYPE => {
match *value {
MinkowskiType::Ref(causetids::DB_TYPE_BOOLEAN) => { builder.value_type(MinkowskiValueType::Boolean); },
MinkowskiType::Ref(causetids::DB_TYPE_DOUBLE) => { builder.value_type(MinkowskiValueType::Double); },
MinkowskiType::Ref(causetids::DB_TYPE_INSTANT) => { builder.value_type(MinkowskiValueType::Instant); },
MinkowskiType::Ref(causetids::DB_TYPE_KEYWORD) => { builder.value_type(MinkowskiValueType::Keyword); },
MinkowskiType::Ref(causetids::DB_TYPE_LONG) => { builder.value_type(MinkowskiValueType::Long); },
MinkowskiType::Ref(causetids::DB_TYPE_REF) => { builder.value_type(MinkowskiValueType::Ref); },
MinkowskiType::Ref(causetids::DB_TYPE_STRING) => { builder.value_type(MinkowskiValueType::String); },
MinkowskiType::Ref(causetids::DB_TYPE_UUID) => { builder.value_type(MinkowskiValueType::Uuid); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/valueType :edb.type/*] but got [... :edb/valueType {:?}] for solitonId {} and attribute {}", value, solitonId, attr)))
}
},
causetids::DB_CARDINALITY => {
match *value {
MinkowskiType::Ref(causetids::DB_CARDINALITY_MANY) => { builder.multival(true); },
MinkowskiType::Ref(causetids::DB_CARDINALITY_ONE) => { builder.multival(false); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/cardinality :edb.cardinality/many|:edb.cardinality/one] but got [... :edb/cardinality {:?}]", value)))
}
},
causetids::DB_UNIQUE => {
match *value {
MinkowskiType::Ref(causetids::DB_UNIQUE_VALUE) => { builder.unique(attribute::Unique::Value); },
MinkowskiType::Ref(causetids::DB_UNIQUE_CausetIDITY) => { builder.unique(attribute::Unique::CausetIdity); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/unique :edb.unique/value|:edb.unique/causetIdity] but got [... :edb/unique {:?}]", value)))
}
},
causetids::DB_INDEX => {
match *value {
MinkowskiType::Boolean(x) => { builder.index(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/index true|false] but got [... :edb/index {:?}]", value)))
}
},
causetids::DB_FULLTEXT => {
match *value {
MinkowskiType::Boolean(x) => { builder.fulltext(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/fulltext true|false] but got [... :edb/fulltext {:?}]", value)))
}
},
causetids::DB_IS_COMPONENT => {
match *value {
MinkowskiType::Boolean(x) => { builder.component(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/isComponent true|false] but got [... :edb/isComponent {:?}]", value)))
}
},
causetids::DB_NO_HISTORY => {
match *value {
MinkowskiType::Boolean(x) => { builder.no_history(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/noHistory true|false] but got [... :edb/noHistory {:?}]", value)))
}
},
_ => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId)))
}
}
};
let mut attributes_installed: BTreeSet<SolitonId> = BTreeSet::default();
let mut attributes_altered: BTreeMap<SolitonId, Vec<AttributeAlteration>> = BTreeMap::default();
for (solitonId, builder) in builders.into_iter() {
match attribute_map.entry(solitonId) {
Entry::Vacant(entry) => {
// Validate once…
builder.validate_install_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for new attribute with solitonId {} is not valid", solitonId)))?;
// … and twice, now we have the Attribute.
let a = builder.build();
a.validate(|| solitonId.to_string())?;
entry.insert(a);
attributes_installed.insert(solitonId);
},
Entry::Occupied(mut entry) => {
builder.validate_alter_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for existing attribute with solitonId {} is not valid", solitonId)))?;
let mutations = builder.mutate(entry.get_mut());
attributes_altered.insert(solitonId, mutations);
},
}
}
Ok(SpacetimeReport {
attributes_installed: attributes_installed,
attributes_altered: attributes_altered,
causetIds_altered: BTreeMap::default(),
})
}
// | identifier_body |
spacetime.rs | // Copyright 2020 WHTCORPS INC
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#![allow(dead_code)]
//! Most bundles can mutate the EinsteinDB spacetime by transacting assertions:
//!
//! - they can add (and, eventually, retract and alter) recognized causetIds using the `:edb/causetid`
//! attribute;
//!
//! - they can add (and, eventually, retract and alter) schemaReplicant attributes using various `:edb/*`
//! attributes;
//!
//! - eventually, they will be able to add (and possibly retract) solitonId partitions using a EinsteinDB
//! equivalent (perhaps :edb/partition or :edb.partition/start) to Causetic's `:edb.install/partition`
//! attribute.
//!
//! This module recognizes, validates, applies, and reports on these mutations.
use failure::ResultExt;
use std::collections::{BTreeMap, BTreeSet};
use std::collections::btree_map::Entry;
use add_retract_alter_set::{
AddRetractAlterSet,
};
use edbn::symbols;
use causetids;
use causetq_pull_promises::errors::{
DbErrorKind,
Result,
};
use allegrosql_promises::{
attribute,
SolitonId,
MinkowskiType,
MinkowskiValueType,
};
use causetq_allegrosql::{
SchemaReplicant,
AttributeMap,
};
use schemaReplicant::{
AttributeBuilder,
AttributeValidation,
};
use types::{
EAV,
};
/// An alteration to an attribute.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum AttributeAlteration {
/// From http://blog.Causetic.com/2014/01/schemaReplicant-alteration.html:
/// - rename attributes
/// - rename your own programmatic causetIdities (uses of :edb/causetid)
/// - add or remove indexes
Index,
/// - add or remove uniqueness constraints
Unique,
/// - change attribute cardinality
Cardinality,
/// - change whether history is retained for an attribute
NoHistory,
/// - change whether an attribute is treated as a component
IsComponent,
}
/// An alteration to an causetid.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum CausetIdAlteration {
CausetId(symbols::Keyword),
}
/// Summarizes changes to spacetime such as a a `SchemaReplicant` and (in the future) a `PartitionMap`.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub struct SpacetimeReport {
// SolitonIds that were not present in the original `AttributeMap` that was mutated.
pub attributes_installed: BTreeSet<SolitonId>,
// SolitonIds that were present in the original `AttributeMap` that was mutated, together with a
// representation of the mutations that were applied.
pub attributes_altered: BTreeMap<SolitonId, Vec<AttributeAlteration>>,
// CausetIds that were installed into the `AttributeMap`.
pub causetIds_altered: BTreeMap<SolitonId, CausetIdAlteration>,
}
impl SpacetimeReport {
pub fn attributes_did_change(&self) -> bool {
!(self.attributes_installed.is_empty() &&
self.attributes_altered.is_empty())
}
}
/// Update an 'AttributeMap' in place given two sets of causetid and attribute retractions, which
/// together contain enough information to reason about a "schemaReplicant retraction".
///
/// SchemaReplicant may only be retracted if all of its necessary attributes are being retracted:
/// - :edb/causetid, :edb/valueType, :edb/cardinality.
///
/// Note that this is currently incomplete/flawed:
/// - we're allowing optional attributes to not be retracted and dangle afterwards
///
/// Returns a set of attribute retractions which do not involve schemaReplicant-defining attributes.
fn update_attribute_map_from_schemaReplicant_retractions(attribute_map: &mut AttributeMap, retractions: Vec<EAV>, causetId_retractions: &BTreeMap<SolitonId, symbols::Keyword>) -> Result<Vec<EAV>> {
// Process retractions of schemaReplicant attributes first. It's allowed to retract a schemaReplicant attribute
// if all of the schemaReplicant-defining schemaReplicant attributes are being retracted.
// A defining set of attributes is :edb/causetid, :edb/valueType, :edb/cardinality.
let mut filtered_retractions = vec![];
let mut suspect_retractions = vec![];
// Filter out sets of schemaReplicant altering retractions.
let mut eas = BTreeMap::new();
for (e, a, v) in retractions.into_iter() {
if causetids::is_a_schemaReplicant_attribute(a) {
eas.entry(e).or_insert(vec![]).push(a);
suspect_retractions.push((e, a, v));
} else {
filtered_retractions.push((e, a, v));
}
}
// TODO (see https://github.com/whtcorpsinc/edb/issues/796).
// Retraction of causetIds is allowed, but if an causetid names a schemaReplicant attribute, then we should enforce
// retraction of all of the associated schemaReplicant attributes.
// Unfortunately, our current in-memory schemaReplicant representation (namely, how we define an Attribute) is not currently
// rich enough: it lacks distinction between presence and absence, and instead assumes default values.
// Currently, in order to do this enforcement correctly, we'd need to inspect 'causets'.
// Here is an incorrect way to enforce this. It's incorrect because it prevents us from retracting non-"schemaReplicant naming" causetIds.
// for retracted_e in causetId_retractions.keys() {
// if !eas.contains_key(retracted_e) {
// bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting :edb/causetid of a schemaReplicant without retracting its defining attributes is not permitted.")));
// }
// }
for (e, a, v) in suspect_retractions.into_iter() {
let attributes = eas.get(&e).unwrap();
// Found a set of retractions which negate a schemaReplicant.
if attributes.contains(&causetids::DB_CARDINALITY) && attributes.contains(&causetids::DB_VALUE_TYPE) {
// Ensure that corresponding :edb/causetid is also being retracted at the same time.
if causetId_retractions.contains_key(&e) {
// Remove attributes corresponding to retracted attribute.
attribute_map.remove(&e);
} else {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting defining attributes of a schemaReplicant without retracting its :edb/causetid is not permitted.")));
}
} else {
filtered_retractions.push((e, a, v));
}
}
Ok(filtered_retractions)
}
/// Update a `AttributeMap` in place from the given `[e a typed_value]` triples.
///
/// This is suiBlock for producing a `AttributeMap` from the `schemaReplicant` materialized view, which does not
/// contain install and alter markers.
///
/// Returns a report summarizing the mutations that were applied.
pub fn update_attribute_map_from_causetid_triples(attribute_map: &mut AttributeMap, assertions: Vec<EAV>, retractions: Vec<EAV>) -> Result<SpacetimeReport> {
fn attribute_builder_to_modify(attribute_id: SolitonId, existing: &AttributeMap) -> AttributeBuilder {
existing.get(&attribute_id)
.map(AttributeBuilder::to_modify_attribute)
.unwrap_or_else(AttributeBuilder::default)
}
// Group mutations by impacted solitonId.
let mut builders: BTreeMap<SolitonId, AttributeBuilder> = BTreeMap::new();
// For retractions, we start with an attribute builder that's pre-populated with the existing
// attribute values. That allows us to check existing values and unset them.
for (solitonId, attr, ref value) in retractions {
let builder = builders.entry(solitonId).or_insert_with(|| attribute_builder_to_modify(solitonId, attribute_map));
match attr {
// You can only retract :edb/unique, :edb/isComponent; all others must be altered instead
// of retracted, or are not allowed to change.
causetids::DB_IS_COMPONENT => {
match value {
&MinkowskiType::Boolean(v) if builder.component == Some(v) => {
builder.component(false);
},
v => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/isComponent with the wrong value {:?}.", v)));
},
}
},
causetids::DB_UNIQUE => {
match *value {
MinkowskiType::Ref(u) => {
match u {
causetids::DB_UNIQUE_VALUE if builder.unique == Some(Some(attribute::Unique::Value)) => {
builder.non_unique();
},
causetids::DB_UNIQUE_CausetIDITY if builder.unique == Some(Some(attribute::Unique::CausetIdity)) => {
builder.non_unique();
},
v => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/unique with the wrong value {}.", v)));
},
}
},
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [:edb/retract _ :edb/unique :edb.unique/_] but got [:edb/retract {} :edb/unique {:?}]", solitonId, value)))
}
},
causetids::DB_VALUE_TYPE |
causetids::DB_CARDINALITY |
causetids::DB_INDEX |
causetids::DB_FULLTEXT |
causetids::DB_NO_HISTORY => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting attribute {} for instanton {} not permitted.", attr, solitonId)));
},
_ => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId)))
}
}
}
for (solitonId, attr, ref value) in assertions.into_iter() {
// For assertions, we can start with an empty attribute builder.
let builder = builders.entry(solitonId).or_insert_with(Default::default);
// TODO: improve error messages throughout.
match attr {
causetids::DB_VALUE_TYPE => {
match *value {
MinkowskiType::Ref(causetids::DB_TYPE_BOOLEAN) => { builder.value_type(MinkowskiValueType::Boolean); },
MinkowskiType::Ref(causetids::DB_TYPE_DOUBLE) => { builder.value_type(MinkowskiValueType::Double); },
MinkowskiType::Ref(causetids::DB_TYPE_INSTANT) => { builder.value_type(MinkowskiValueType::Instant); },
MinkowskiType::Ref(causetids::DB_TYPE_KEYWORD) => { builder.value_type(MinkowskiValueType::Keyword); },
MinkowskiType::Ref(causetids::DB_TYPE_LONG) => { builder.value_type(MinkowskiValueType::Long); },
MinkowskiType::Ref(causetids::DB_TYPE_REF) => { builder.value_type(MinkowskiValueType::Ref); },
MinkowskiType::Ref(causetids::DB_TYPE_STRING) => { builder.value_type(MinkowskiValueType::String); },
MinkowskiType::Ref(causetids::DB_TYPE_UUID) => { builder.value_type(MinkowskiValueType::Uuid); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/valueType :edb.type/*] but got [... :edb/valueType {:?}] for solitonId {} and attribute {}", value, solitonId, attr)))
}
},
causetids::DB_CARDINALITY => {
match *value {
MinkowskiType::Ref(causetids::DB_CARDINALITY_MANY) => { builder.multival(true); },
MinkowskiType::Ref(causetids::DB_CARDINALITY_ONE) => { builder.multival(false); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/cardinality :edb.cardinality/many|:edb.cardinality/one] but got [... :edb/cardinality {:?}]", value)))
}
},
causetids::DB_UNIQUE => {
match *value {
MinkowskiType::Ref(causetids::DB_UNIQUE_VALUE) => { builder.unique(attribute::Unique::Value); },
MinkowskiType::Ref(causetids::DB_UNIQUE_CausetIDITY) => { builder.unique(attribute::Unique::CausetIdity); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/unique :edb.unique/value|:edb.unique/causetIdity] but got [... :edb/unique {:?}]", value)))
}
},
causetids::DB_INDEX => {
match *value {
MinkowskiType::Boolean(x) => { builder.index(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/index true|false] but got [... :edb/index {:?}]", value)))
}
},
causetids::DB_FULLTEXT => {
match *value {
MinkowskiType::Boolean(x) => { builder.fulltext(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/fulltext true|false] but got [... :edb/fulltext {:?}]", value)))
}
},
causetids::DB_IS_COMPONENT => {
match *value {
MinkowskiType::Boolean(x) => { builder.component(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/isComponent true|false] but got [... :edb/isComponent {:?}]", value)))
}
},
causetids::DB_NO_HISTORY => {
match *value {
MinkowskiType::Boolean(x) => { builder.no_history(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/noHistory true|false] but got [... :edb/noHistory {:?}]", value)))
}
},
_ => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId)))
}
}
};
let mut attributes_installed: BTreeSet<SolitonId> = BTreeSet::default();
let mut attributes_altered: BTreeMap<SolitonId, Vec<AttributeAlteration>> = BTreeMap::default();
for (solitonId, builder) in builders.into_iter() {
match attribute_map.entry(solitonId) {
Entry::Vacant(entry) => {
// Validate once…
builder.validate_install_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for new attribute with solitonId {} is not valid", solitonId)))?;
// … and twice, now we have the Attribute.
let a = builder.build();
a.validate(|| solitonId.to_string())?;
entry.insert(a);
attributes_installed.insert(solitonId);
},
Entry::Occupied(mut entry) => {
| }
}
Ok(SpacetimeReport {
attributes_installed: attributes_installed,
attributes_altered: attributes_altered,
causetIds_altered: BTreeMap::default(),
})
}
/// Update a `SchemaReplicant` in place from the given `[e a typed_value added]` quadruples.
///
/// This layer enforces that causetid assertions of the form [solitonId :edb/causetid ...] (as distinct from
/// attribute assertions) are present and correct.
///
/// This is suiBlock for mutating a `SchemaReplicant` from an applied transaction.
///
/// Returns a report summarizing the mutations that were applied.
pub fn update_schemaReplicant_from_causetid_quadruples<U>(schemaReplicant: &mut SchemaReplicant, assertions: U) -> Result<SpacetimeReport>
where U: IntoIterator<Item=(SolitonId, SolitonId, MinkowskiType, bool)> {
// Group attribute assertions into asserted, retracted, and updated. We assume all our
// attribute assertions are :edb/cardinality :edb.cardinality/one (so they'll only be added or
// retracted at most once), which means all attribute alterations are simple changes from an old
// value to a new value.
let mut attribute_set: AddRetractAlterSet<(SolitonId, SolitonId), MinkowskiType> = AddRetractAlterSet::default();
let mut causetId_set: AddRetractAlterSet<SolitonId, symbols::Keyword> = AddRetractAlterSet::default();
for (e, a, typed_value, added) in assertions.into_iter() {
// Here we handle :edb/causetid assertions.
if a == causetids::DB_CausetID {
if let MinkowskiType::Keyword(ref keyword) = typed_value {
causetId_set.witness(e, keyword.as_ref().clone(), added);
continue
} else {
// Something is terribly wrong: the schemaReplicant ensures we have a keyword.
unreachable!();
}
}
attribute_set.witness((e, a), typed_value, added);
}
// Collect triples.
let retracted_triples = attribute_set.retracted.into_iter().map(|((e, a), typed_value)| (e, a, typed_value));
let asserted_triples = attribute_set.asserted.into_iter().map(|((e, a), typed_value)| (e, a, typed_value));
let altered_triples = attribute_set.altered.into_iter().map(|((e, a), (_old_value, new_value))| (e, a, new_value));
// First we process retractions which remove schemaReplicant.
// This operation consumes our current list of attribute retractions, producing a filtered one.
let non_schemaReplicant_retractions = update_attribute_map_from_schemaReplicant_retractions(&mut schemaReplicant.attribute_map,
retracted_triples.collect(),
&causetId_set.retracted)?;
// Now we process all other retractions.
let report = update_attribute_map_from_causetid_triples(&mut schemaReplicant.attribute_map,
asserted_triples.chain(altered_triples).collect(),
non_schemaReplicant_retractions)?;
let mut causetIds_altered: BTreeMap<SolitonId, CausetIdAlteration> = BTreeMap::new();
// Asserted, altered, or retracted :edb/causetIds update the relevant causetids.
for (solitonId, causetid) in causetId_set.asserted {
schemaReplicant.causetid_map.insert(solitonId, causetid.clone());
schemaReplicant.causetId_map.insert(causetid.clone(), solitonId);
causetIds_altered.insert(solitonId, CausetIdAlteration::CausetId(causetid.clone()));
}
for (solitonId, (old_causetId, new_causetId)) in causetId_set.altered {
schemaReplicant.causetid_map.insert(solitonId, new_causetId.clone()); // Overwrite existing.
schemaReplicant.causetId_map.remove(&old_causetId); // Remove old.
schemaReplicant.causetId_map.insert(new_causetId.clone(), solitonId); // Insert new.
causetIds_altered.insert(solitonId, CausetIdAlteration::CausetId(new_causetId.clone()));
}
for (solitonId, causetid) in &causetId_set.retracted {
schemaReplicant.causetid_map.remove(solitonId);
schemaReplicant.causetId_map.remove(causetid);
causetIds_altered.insert(*solitonId, CausetIdAlteration::CausetId(causetid.clone()));
}
// Component attributes need to change if either:
// - a component attribute changed
// - a schemaReplicant attribute that was a component was retracted
// These two checks are a rather heavy-handed way of keeping schemaReplicant's
// component_attributes up-to-date: most of the time we'll rebuild it
// even though it's not necessary (e.g. a schemaReplicant attribute that's _not_
// a component was removed, or a non-component related attribute changed).
if report.attributes_did_change() || causetId_set.retracted.len() > 0 {
schemaReplicant.update_component_attributes();
}
Ok(SpacetimeReport {
causetIds_altered: causetIds_altered,
.. report
})
}
| builder.validate_alter_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for existing attribute with solitonId {} is not valid", solitonId)))?;
let mutations = builder.mutate(entry.get_mut());
attributes_altered.insert(solitonId, mutations);
},
| conditional_block |
make.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Automatically generated file for compiling doconce documents.
"""
import sys, glob, os, shutil, subprocess, codecs, re
logfile = 'tmp_output.log' # store all output of all operating system commands
f = open(logfile, 'w'); f.close() # touch logfile so it can be appended
unix_command_recorder = []
def os_system(cmd):
"""Run system command cmd using the simple os.system command."""
print cmd
failure = os.system(cmd)
if failure:
print """Command
%s
failed""" % cmd
sys.exit(1)
unix_command_recorder.append(cmd) # record command for bash script
def system(cmd):
"""Run system command cmd using subprocess module."""
print cmd
try:
output = subprocess.check_output(cmd, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print """Command
%s
failed""" % cmd
print 'Return code:', e.returncode
print e.output
print("Hello")
sys.exit(1)
print output
f = open(logfile, 'a'); f.write(output); f.close()
unix_command_recorder.append(cmd) # record command for bash script
return output
def spellcheck():
for filename in glob.glob('*.do.txt'):
if not filename.startswith('tmp'):
cmd = 'doconce spellcheck -d .dict4spell.txt %(filename)s' % vars()
system(cmd)
def latex(name,
latex_program='pdflatex', # or 'latex'
options='--latex_code_style=vrb',
version='paper', # or 'screen', '2up', 'A4', 'A4-2up'
postfix='', # or 'auto'
ptex2tex=None, # only for ptex2tex step
):
"""
Make latex/pdflatex (according to latex_program) PDF file from
the doconce file name (without any .do.txt extension).
version can take the following values:
* paper: normal page size, --device=paper
* 2up: normal page size, --device=paper, 2 pages per sheet
* A4: A4 page size, --device=paper
* A4-2up: A4 page size, --device=paper, 2 pages per sheet
* screen: normal pages size, --device=screen
If a separate ptex2tex step is wanted, fill in all necessary
commands in the ptex2tex string.
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
system('rm -f %(name)s.aux' % vars())
if version in ('paper', 'A4', '2up', 'A4-2up'):
if not '--device=paper' in options:
options += ' --device=paper'
elif version == 'screen' and '--device=paper' in options:
options = options.replace('--device=paper', '')
if version in ('A4', 'A4-2up'):
if not '--latex_papersize=a4' in options:
options += ' --latex_papersize=a4'
if postfix == 'auto':
if version == 'paper':
postfix = '4print'
elif version == 'screen':
postfix = '4screen'
else:
postfix = version
# Compile source
cmd = 'doconce format %(latex_program)s %(name)s %(options)s ' % vars()
system(cmd)
cmd = r"doconce replace '%% insert custom LaTeX commands...' '\usepackage[russian]{babel} \usepackage{titlesec} \titleformat{\subsubsection}[runin] {\normalfont\normalsize\bfseries}{\thesubsubsection.}{1em}{} \let\paragraph=\subsubsection' %(name)s.tex" % vars()
system(cmd)
cmd = r"doconce replace '\usepackage{lmodern}' '%%\usepackage{lmodern}' %(name)s.tex" % vars()
system(cmd)
# Transform .p.tex to .tex?
if ptex2tex is not None:
cmd = ptex2tex
system(cmd)
# Load latex file into string for examination
dofile = open(name + '.tex', 'r')
text = dofile.read()
dofile.close()
latex_options = ''
if latex_program == 'pdflatex':
latex_options = '-file-line-error -interaction nonstopmode -halt-on-error'
# Run latex
shell_escape = ' -shell-escape' if 'begin{minted}' in text else ''
cmd_latex = '%(latex_program)s%(shell_escape)s %(latex_options)s %(name)s' % vars()
system(cmd_latex)
if 'idx{' in text:
cmd = 'makeindex %(name)s' % vars()
system(cmd)
if 'BIBFILE:' in text:
cmd = 'bibtex %(name)s' % vars()
system(cmd)
system(cmd_latex)
system(cmd_latex)
if latex_program == 'latex':
cmd = 'dvipdf %(name)s' % vars()
system(cmd)
# Could instead of dvipdf run the old-fashioned dvips and ps2pdf
if version in ('2up', 'A4-2up'):
# Use pdfnup to make two pages per sheet
cmd = 'pdfnup --frame true --outfile %(name)s.pdf %(name)s.pdf' % vars()
system(cmd)
if postfix:
shutil.copy(name + '.pdf', name + '-' + postfix + '.pdf')
def html(name, options='', split=False):
"""
Make HTML file from the doconce file `name`
(without any .do.txt extension).
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = u"doconce replace 'Figure' 'Рис.' %(name)s.html".encode('utf-8') % vars()
system(cmd)
cmd = u"doconce replace 'figure' 'рис.' %(name)s.html".encode('utf-8') % vars()
system(cmd)
cmd = u"doconce replace 'width=responsive' 'class=\"img-responsive\" style=\"max-width:600px; width:100%%;\"' %(name)s.html".encode('utf-8') % vars()
system(cmd)
if split:
cmd = 'doconce split_html %(name)s' % vars()
system(cmd)
for filename in glob.glob("._%(name)s*.html" % vars()):
if '000' not in filename:
cmd = u"doconce replace '← Prev' '← Предыдущая глава' %s".encode('utf-8') % filename
system(cmd)
cmd = u"doconce replace 'Next →' ' Следующая глава →' %s".encode('utf-8') % filename
system(cmd)
for filename in [name, '._%s000' % name]:
print(filename)
cmd = u"doconce replace 'Read' 'Перейти к первой главе' %s.html".encode('utf-8') % filename
system(cmd)
cmd = u"doconce subst '.*Next.*' '' %s.html".encode('utf-8') % filename
system(cmd)
def reveal_slides(name, options='', postfix='reveal', theme='darkgray'):
"""Make reveal.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_p | options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s reveal --html_slide_theme=%(theme)s'
system(cmd)
def deck_slides(name, options='', postfix='deck', theme='sandstone.default'):
"""Make deck.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_pygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['deck'][theme][0]
if '--keep_pygments_html_bg' not in options:
options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s deck --html_slide_theme=%(theme)s'
system(cmd)
def beamer_slides(name, options='', postfix='beamer', theme='red_shadow',
ptex2tex_envir='minted'):
"""Make latex beamer slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
system('rm -f %(name)s.aux' % vars())
# Compile source
shell_escape = '-shell-escape' if ptex2tex_envir == 'minted' else ''
cmd = 'doconce format pdflatex %(name)s %(options)s ' % vars()
system(cmd)
# Run latex
cmd = 'doconce ptex2tex %(name)s envir=%(ptex2tex_envir)s' % vars()
system(cmd)
cmd = 'doconce slides_beamer %(name)s --beamer_slide_theme=%(theme)s' % vars()
system(cmd)
cmd = 'pdflatex %(shell_escape)s %(name)s'
system(cmd)
system(cmd)
system('cp %(name)s.pdf %(name)s-%(postfi).pdf' % vars())
cmd = 'doconce slides_html %(name)s-%(postfi)s deck --html_slide_theme=%(theme)s'
system(cmd)
def sphinx(name, options='', dirname='sphinx-rootdir',
theme='pyramid', automake_sphinx_options='',
split=False):
"""
Make Sphinx HTML subdirectory from the doconce file `name`
(without any .do.txt extension).
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
if name.endswith('.do'):
name = name.replace('.do','')
# Compile source
cmd = 'doconce format sphinx %(name)s %(options)s ' % vars()
system(cmd)
if split:
cmd = 'doconce split_rst %(name)s' % vars()
# Create sphinx directory
cmd = 'doconce sphinx_dir theme=%(theme)s %(name)s' % vars()
system(cmd)
# Compile sphinx
cmd = 'python automake_sphinx.py %(automake_sphinx_options)s' % vars()
system(cmd)
def doconce2format(name, format, options=''):
"""Make given format from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
cmd = 'doconce format %(format)s %(name)s %(options)s ' % vars()
system(cmd)
def plain(name, options=''):
doconce2format(name, 'plain', options)
def pandoc(name, options=''):
doconce2format(name, 'pandoc', options)
def ipynb(name, options=''):
doconce2format(name, 'ipynb', options)
def cwiki(name, options=''):
doconce2format(name, 'cwiki', options)
def mwiki(name, options=''):
doconce2format(name, 'mwiki', options)
def gwiki(name, options=''):
doconce2format(name, 'gwiki', options)
def cleanup_html(fn):
show = False
out = []
with codecs.open(fn, "r", encoding='utf-8') as f:
for line in f:
if "<!-- ------------------- end of main content --------------- -->" in line:
show = False
if show:
out.append(line)
if "<!-- ------------------- main content ---------------------- -->" in line:
show = True
assert out, "No output in %s" % fn
with codecs.open(fn, 'w', encoding='utf-8') as f:
f.write("".join(out))
def mksnippets():
for fn in glob.glob("._*.html"):
with codecs.open(fn, 'r', encoding='utf-8') as thebook:
snippet_name = None
snippet_content = []
snippets = {}
for line in thebook:
if 'navigation buttons at the bottom of the page' in line \
or 'end of snippets' in line:
break
if 'snippet: ' in line:
m = re.search(ur'snippet:\s*(\w+)', line)
if m:
snippet_name = m.groups(1)
snippets[snippet_name] = snippet_content
else:
if snippet_name:
if re.match('<h\d', line):
snippet_content = []
snippet_name = None
else:
snippet_content.append(line)
for snippet_name, snippet_content in snippets.items():
with codecs.open("snippets/%s.html" % snippet_name,
'w', encoding='utf-8') as snippet:
snippet.write("".join(snippet_content))
def main():
"""
Produce various formats from the doconce source.
"""
dofile = "thebook"
#spellcheck()
common_options = '--encoding=utf-8 --examples_as_exercises '
# --- HTML ---
common_html_options = ' '
# HTML Bootstrap
bootstrap_options = ' --html_style=bootswatch_readable --html_code_style=inherit --html_pre_style=inherit --toc_depth=2 --pygments_html_style=default --html_template=template_bootstrap_wtoc.html --html_figure_caption=bottom --html_figure_hrule=top+bottom'
html(
dofile,
options=common_options + common_html_options + bootstrap_options,
split=True)
# One long HTML file
#html(dofile, options=common_options + common_html_options + ' --html_style=bloodish --html_output=%s-1' % dofile, split=False)
# Solarized HTML
#html(dofile, options=common_options + common_html_options + ' --html_style=solarized3 --html_output=%s-solarized' % dofile, split=True)
mksnippets()
sys.exit(1)
# --- latex ---
common_latex_options = ' --latex_code_style=vrb'
for version in 'paper', 'screen': # , 'A4', '2up', 'A4-2up':
latex(
dofile,
latex_program='pdflatex',
options=common_options + common_latex_options,
version=version,
postfix='auto')
# --- Sphinx ---
# sphinx_themes = ['pyramid',]
# for theme in sphinx_themes:
# dirname = 'sphinx-rootdir' if len(sphinx_themes) == 1 else 'sphinx-rootdir-%s' % theme
# sphinx(
# dofile,
# options=common_options + '',
# dirname=dirname,
# theme=theme,
# automake_sphinx_options='',
# split=False)
# Dump all Unix commands run above as a Bash script
bash = open('tmp_make.sh', 'w')
print 'see tmp_make.sh for an equivalent auto-generated unix script'
bash.write('''#!/bin/bash
set -x # display all commands in output
# Safe execution of a Unix command: exit if failure
function system {
"$@"
if [ $? -ne 0 ]; then
echo "make.sh: unsuccessful command $@"
echo "abort!"
exit 1
fi
}
''')
for cmd in unix_command_recorder:
if cmd.startswith('doconce format') or cmd.startswith('rm '):
bash.write('\n') # delimiter line in script
bash.write('system ' + cmd + '\n')
bash.close()
print 'see tmp_output.log for the output of all the commands'
if __name__ == '__main__':
main()
| ygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['reveal'][theme][0]
if '--keep_pygments_html_bg' not in options:
| conditional_block |
make.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Automatically generated file for compiling doconce documents.
"""
import sys, glob, os, shutil, subprocess, codecs, re
logfile = 'tmp_output.log' # store all output of all operating system commands
f = open(logfile, 'w'); f.close() # touch logfile so it can be appended
unix_command_recorder = []
def os_system(cmd):
"""Run system command cmd using the simple os.system command."""
print cmd
failure = os.system(cmd)
if failure:
print """Command
%s
failed""" % cmd
sys.exit(1)
unix_command_recorder.append(cmd) # record command for bash script
def system(cmd):
"""Run system command cmd using subprocess module."""
print cmd
try:
output = subprocess.check_output(cmd, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print """Command
%s
failed""" % cmd
print 'Return code:', e.returncode
print e.output
print("Hello")
sys.exit(1)
print output
f = open(logfile, 'a'); f.write(output); f.close()
unix_command_recorder.append(cmd) # record command for bash script
return output
def spellcheck():
for filename in glob.glob('*.do.txt'):
if not filename.startswith('tmp'):
cmd = 'doconce spellcheck -d .dict4spell.txt %(filename)s' % vars()
system(cmd)
def latex(name,
latex_program='pdflatex', # or 'latex'
options='--latex_code_style=vrb',
version='paper', # or 'screen', '2up', 'A4', 'A4-2up'
postfix='', # or 'auto'
ptex2tex=None, # only for ptex2tex step
):
"""
Make latex/pdflatex (according to latex_program) PDF file from
the doconce file name (without any .do.txt extension).
version can take the following values:
* paper: normal page size, --device=paper
* 2up: normal page size, --device=paper, 2 pages per sheet
* A4: A4 page size, --device=paper
* A4-2up: A4 page size, --device=paper, 2 pages per sheet
* screen: normal pages size, --device=screen
If a separate ptex2tex step is wanted, fill in all necessary
commands in the ptex2tex string.
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
system('rm -f %(name)s.aux' % vars())
if version in ('paper', 'A4', '2up', 'A4-2up'):
if not '--device=paper' in options:
options += ' --device=paper'
elif version == 'screen' and '--device=paper' in options:
options = options.replace('--device=paper', '')
if version in ('A4', 'A4-2up'):
if not '--latex_papersize=a4' in options:
options += ' --latex_papersize=a4'
if postfix == 'auto':
if version == 'paper':
postfix = '4print'
elif version == 'screen':
postfix = '4screen'
else:
postfix = version
# Compile source
cmd = 'doconce format %(latex_program)s %(name)s %(options)s ' % vars()
system(cmd)
cmd = r"doconce replace '%% insert custom LaTeX commands...' '\usepackage[russian]{babel} \usepackage{titlesec} \titleformat{\subsubsection}[runin] {\normalfont\normalsize\bfseries}{\thesubsubsection.}{1em}{} \let\paragraph=\subsubsection' %(name)s.tex" % vars()
system(cmd)
cmd = r"doconce replace '\usepackage{lmodern}' '%%\usepackage{lmodern}' %(name)s.tex" % vars()
system(cmd)
# Transform .p.tex to .tex?
if ptex2tex is not None:
cmd = ptex2tex
system(cmd)
# Load latex file into string for examination
dofile = open(name + '.tex', 'r')
text = dofile.read()
dofile.close()
latex_options = ''
if latex_program == 'pdflatex':
latex_options = '-file-line-error -interaction nonstopmode -halt-on-error'
# Run latex
shell_escape = ' -shell-escape' if 'begin{minted}' in text else ''
cmd_latex = '%(latex_program)s%(shell_escape)s %(latex_options)s %(name)s' % vars()
system(cmd_latex)
if 'idx{' in text:
cmd = 'makeindex %(name)s' % vars()
system(cmd)
if 'BIBFILE:' in text:
cmd = 'bibtex %(name)s' % vars()
system(cmd)
system(cmd_latex)
system(cmd_latex)
if latex_program == 'latex':
cmd = 'dvipdf %(name)s' % vars()
system(cmd)
# Could instead of dvipdf run the old-fashioned dvips and ps2pdf
if version in ('2up', 'A4-2up'):
# Use pdfnup to make two pages per sheet
cmd = 'pdfnup --frame true --outfile %(name)s.pdf %(name)s.pdf' % vars()
system(cmd)
if postfix:
shutil.copy(name + '.pdf', name + '-' + postfix + '.pdf')
def html(name, options='', split=False):
"""
Make HTML file from the doconce file `name`
(without any .do.txt extension).
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = u"doconce replace 'Figure' 'Рис.' %(name)s.html".encode('utf-8') % vars()
system(cmd)
cmd = u"doconce replace 'figure' 'рис.' %(name)s.html".encode('utf-8') % vars()
system(cmd)
cmd = u"doconce replace 'width=responsive' 'class=\"img-responsive\" style=\"max-width:600px; width:100%%;\"' %(name)s.html".encode('utf-8') % vars()
system(cmd)
if split:
cmd = 'doconce split_html %(name)s' % vars()
system(cmd)
for filename in glob.glob("._%(name)s*.html" % vars()):
if '000' not in filename:
cmd = u"doconce replace '← Prev' '← Предыдущая глава' %s".encode('utf-8') % filename
system(cmd)
cmd = u"doconce replace 'Next →' ' Следующая глава →' %s".encode('utf-8') % filename
system(cmd)
for filename in [name, '._%s000' % name]:
print(filename)
cmd = u"doconce replace 'Read' 'Перейти к первой главе' %s.html".encode('utf-8') % filename
system(cmd)
cmd = u"doconce subst '.*Next.*' '' %s.html".encode('utf-8') % filename
system(cmd)
def reveal_slides(name, options='', postfix='reveal', theme='darkgray'):
"""Make reveal.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_pygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['reveal'][theme][0]
if '--keep_pygments_html_bg' not in options:
options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s reveal --html_slide_theme=%(theme)s'
system(cmd)
def deck_slides(name, options='', postfix='deck', theme='sandstone.default'):
"""Make deck.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_pygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['deck'][theme][0]
if '--keep_pygments_html_bg' not in options:
options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s deck --html_slide_theme=%(theme)s'
system(cmd)
def beamer_slides(name, options='', postfix='beamer', theme='red_shadow',
ptex2tex_envir='minted'):
"""Make latex beamer slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
system('rm -f %(name)s.aux' % vars())
# Compile source
shell_escape = '-shell-escape' if ptex2tex_envir == 'minted' else ''
cmd = 'doconce format pdflatex %(name)s %(options)s ' % vars()
system(cmd)
# Run latex
cmd = 'doconce ptex2tex %(name)s envir=%(ptex2tex_envir)s' % vars()
system(cmd)
cmd = 'doconce slides_beamer %(name)s --beamer_slide_theme=%(theme)s' % vars()
system(cmd)
cmd = 'pdflatex %(shell_escape)s %(name)s'
system(cmd)
system(cmd)
system('cp %(name)s.pdf %(name)s-%(postfi).pdf' % vars())
cmd = 'doconce slides_html %(name)s-%(postfi)s deck --html_slide_theme=%(theme)s'
system(cmd)
def sphinx(name, options='', dirname='sphinx-rootdir',
theme='pyramid', automake_sphinx_options='',
split=False):
"""
Make Sphinx HTML subdirectory from the doconce file `name`
(without any .do.txt extension).
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
if name.endswith('.do'):
name = name.replace('.do','')
# Compile source
cmd = 'doconce format sphinx %(name)s %(options)s ' % vars()
system(cmd)
if split:
cmd = 'doconce split_rst %(name)s' % vars()
# Create sphinx directory
cmd = 'doconce sphinx_dir theme=%(theme)s %(name)s' % vars()
system(cmd)
# Compile sphinx
cmd = 'python automake_sphinx.py %(automake_sphinx_options)s' % vars()
system(cmd)
def doconce2format(name, format, options=''):
"""Make given format from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
cmd = 'doconce format %(format)s %(name)s %(options)s ' % vars()
system(cmd)
def plain(name, options=''):
doconce2format(name, 'plain', options)
def pandoc(name, options=''):
doconce2format(name, 'pandoc', options)
def ipynb(name, options=''):
doconce2format(name, 'ipynb', options)
def cwiki(name, options=''):
doconce2format(name, 'cwiki', options)
def mwiki(name, options=''):
doconce2format(name, 'mwiki', options)
def gwiki(name, options=''):
doconce2format(name, 'gwiki', options)
def cleanup_html(fn):
show = False
out = []
with codecs.open(fn, "r", encoding='utf-8') as f:
for line in f:
if "<!-- ------------------- end of main content --------------- -->" in line:
show = False
if show:
out.append(line)
if "<!-- ------------------- main content ---------------------- -->" in line:
show = True
assert out, "No output in %s" % fn
with codecs.open(fn, 'w', encoding='utf-8') as f:
f.write("".join(out))
def mksnippets():
for fn in glob.glob("._*.html"):
with codecs.open(fn, 'r', encoding='utf-8') as thebook:
snippet_name = None
snippet_content = []
snippets = {}
for line in thebook:
if 'navigation buttons at the bottom of the page' in line \
or 'end of snippets' in line:
break
if 'snippet: ' in line:
m = re.search(ur'snippet:\s*(\w+)', line)
if m:
snippet_name = m.groups(1)
snippets[snippet_name] = snippet_content
else:
if snippet_name:
if re.match('<h\d', line):
snippet_content = []
snippet_name = None
else:
snippet_content.append(line)
for snippet_name, snippet_content in snippets.items():
with codecs.open("snippets/%s.html" % snippet_name,
'w', encoding='utf-8') as snippet:
snippet.write("".join(snippet_content))
def main():
"""
Produce various formats from the d | ce source.
"""
dofile = "thebook"
#spellcheck()
common_options = '--encoding=utf-8 --examples_as_exercises '
# --- HTML ---
common_html_options = ' '
# HTML Bootstrap
bootstrap_options = ' --html_style=bootswatch_readable --html_code_style=inherit --html_pre_style=inherit --toc_depth=2 --pygments_html_style=default --html_template=template_bootstrap_wtoc.html --html_figure_caption=bottom --html_figure_hrule=top+bottom'
html(
dofile,
options=common_options + common_html_options + bootstrap_options,
split=True)
# One long HTML file
#html(dofile, options=common_options + common_html_options + ' --html_style=bloodish --html_output=%s-1' % dofile, split=False)
# Solarized HTML
#html(dofile, options=common_options + common_html_options + ' --html_style=solarized3 --html_output=%s-solarized' % dofile, split=True)
mksnippets()
sys.exit(1)
# --- latex ---
common_latex_options = ' --latex_code_style=vrb'
for version in 'paper', 'screen': # , 'A4', '2up', 'A4-2up':
latex(
dofile,
latex_program='pdflatex',
options=common_options + common_latex_options,
version=version,
postfix='auto')
# --- Sphinx ---
# sphinx_themes = ['pyramid',]
# for theme in sphinx_themes:
# dirname = 'sphinx-rootdir' if len(sphinx_themes) == 1 else 'sphinx-rootdir-%s' % theme
# sphinx(
# dofile,
# options=common_options + '',
# dirname=dirname,
# theme=theme,
# automake_sphinx_options='',
# split=False)
# Dump all Unix commands run above as a Bash script
bash = open('tmp_make.sh', 'w')
print 'see tmp_make.sh for an equivalent auto-generated unix script'
bash.write('''#!/bin/bash
set -x # display all commands in output
# Safe execution of a Unix command: exit if failure
function system {
"$@"
if [ $? -ne 0 ]; then
echo "make.sh: unsuccessful command $@"
echo "abort!"
exit 1
fi
}
''')
for cmd in unix_command_recorder:
if cmd.startswith('doconce format') or cmd.startswith('rm '):
bash.write('\n') # delimiter line in script
bash.write('system ' + cmd + '\n')
bash.close()
print 'see tmp_output.log for the output of all the commands'
if __name__ == '__main__':
main()
| ocon | identifier_name |
make.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Automatically generated file for compiling doconce documents.
"""
import sys, glob, os, shutil, subprocess, codecs, re
logfile = 'tmp_output.log' # store all output of all operating system commands
f = open(logfile, 'w'); f.close() # touch logfile so it can be appended
unix_command_recorder = []
def os_system(cmd):
"""Run system command cmd using the simple os.system command."""
print cmd
failure = os.system(cmd)
if failure:
print """Command
%s
failed""" % cmd
sys.exit(1)
unix_command_recorder.append(cmd) # record command for bash script
def system(cmd):
|
def spellcheck():
for filename in glob.glob('*.do.txt'):
if not filename.startswith('tmp'):
cmd = 'doconce spellcheck -d .dict4spell.txt %(filename)s' % vars()
system(cmd)
def latex(name,
latex_program='pdflatex', # or 'latex'
options='--latex_code_style=vrb',
version='paper', # or 'screen', '2up', 'A4', 'A4-2up'
postfix='', # or 'auto'
ptex2tex=None, # only for ptex2tex step
):
"""
Make latex/pdflatex (according to latex_program) PDF file from
the doconce file name (without any .do.txt extension).
version can take the following values:
* paper: normal page size, --device=paper
* 2up: normal page size, --device=paper, 2 pages per sheet
* A4: A4 page size, --device=paper
* A4-2up: A4 page size, --device=paper, 2 pages per sheet
* screen: normal pages size, --device=screen
If a separate ptex2tex step is wanted, fill in all necessary
commands in the ptex2tex string.
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
system('rm -f %(name)s.aux' % vars())
if version in ('paper', 'A4', '2up', 'A4-2up'):
if not '--device=paper' in options:
options += ' --device=paper'
elif version == 'screen' and '--device=paper' in options:
options = options.replace('--device=paper', '')
if version in ('A4', 'A4-2up'):
if not '--latex_papersize=a4' in options:
options += ' --latex_papersize=a4'
if postfix == 'auto':
if version == 'paper':
postfix = '4print'
elif version == 'screen':
postfix = '4screen'
else:
postfix = version
# Compile source
cmd = 'doconce format %(latex_program)s %(name)s %(options)s ' % vars()
system(cmd)
cmd = r"doconce replace '%% insert custom LaTeX commands...' '\usepackage[russian]{babel} \usepackage{titlesec} \titleformat{\subsubsection}[runin] {\normalfont\normalsize\bfseries}{\thesubsubsection.}{1em}{} \let\paragraph=\subsubsection' %(name)s.tex" % vars()
system(cmd)
cmd = r"doconce replace '\usepackage{lmodern}' '%%\usepackage{lmodern}' %(name)s.tex" % vars()
system(cmd)
# Transform .p.tex to .tex?
if ptex2tex is not None:
cmd = ptex2tex
system(cmd)
# Load latex file into string for examination
dofile = open(name + '.tex', 'r')
text = dofile.read()
dofile.close()
latex_options = ''
if latex_program == 'pdflatex':
latex_options = '-file-line-error -interaction nonstopmode -halt-on-error'
# Run latex
shell_escape = ' -shell-escape' if 'begin{minted}' in text else ''
cmd_latex = '%(latex_program)s%(shell_escape)s %(latex_options)s %(name)s' % vars()
system(cmd_latex)
if 'idx{' in text:
cmd = 'makeindex %(name)s' % vars()
system(cmd)
if 'BIBFILE:' in text:
cmd = 'bibtex %(name)s' % vars()
system(cmd)
system(cmd_latex)
system(cmd_latex)
if latex_program == 'latex':
cmd = 'dvipdf %(name)s' % vars()
system(cmd)
# Could instead of dvipdf run the old-fashioned dvips and ps2pdf
if version in ('2up', 'A4-2up'):
# Use pdfnup to make two pages per sheet
cmd = 'pdfnup --frame true --outfile %(name)s.pdf %(name)s.pdf' % vars()
system(cmd)
if postfix:
shutil.copy(name + '.pdf', name + '-' + postfix + '.pdf')
def html(name, options='', split=False):
"""
Make HTML file from the doconce file `name`
(without any .do.txt extension).
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = u"doconce replace 'Figure' 'Рис.' %(name)s.html".encode('utf-8') % vars()
system(cmd)
cmd = u"doconce replace 'figure' 'рис.' %(name)s.html".encode('utf-8') % vars()
system(cmd)
cmd = u"doconce replace 'width=responsive' 'class=\"img-responsive\" style=\"max-width:600px; width:100%%;\"' %(name)s.html".encode('utf-8') % vars()
system(cmd)
if split:
cmd = 'doconce split_html %(name)s' % vars()
system(cmd)
for filename in glob.glob("._%(name)s*.html" % vars()):
if '000' not in filename:
cmd = u"doconce replace '← Prev' '← Предыдущая глава' %s".encode('utf-8') % filename
system(cmd)
cmd = u"doconce replace 'Next →' ' Следующая глава →' %s".encode('utf-8') % filename
system(cmd)
for filename in [name, '._%s000' % name]:
print(filename)
cmd = u"doconce replace 'Read' 'Перейти к первой главе' %s.html".encode('utf-8') % filename
system(cmd)
cmd = u"doconce subst '.*Next.*' '' %s.html".encode('utf-8') % filename
system(cmd)
def reveal_slides(name, options='', postfix='reveal', theme='darkgray'):
"""Make reveal.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_pygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['reveal'][theme][0]
if '--keep_pygments_html_bg' not in options:
options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s reveal --html_slide_theme=%(theme)s'
system(cmd)
def deck_slides(name, options='', postfix='deck', theme='sandstone.default'):
"""Make deck.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_pygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['deck'][theme][0]
if '--keep_pygments_html_bg' not in options:
options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s deck --html_slide_theme=%(theme)s'
system(cmd)
def beamer_slides(name, options='', postfix='beamer', theme='red_shadow',
ptex2tex_envir='minted'):
"""Make latex beamer slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
system('rm -f %(name)s.aux' % vars())
# Compile source
shell_escape = '-shell-escape' if ptex2tex_envir == 'minted' else ''
cmd = 'doconce format pdflatex %(name)s %(options)s ' % vars()
system(cmd)
# Run latex
cmd = 'doconce ptex2tex %(name)s envir=%(ptex2tex_envir)s' % vars()
system(cmd)
cmd = 'doconce slides_beamer %(name)s --beamer_slide_theme=%(theme)s' % vars()
system(cmd)
cmd = 'pdflatex %(shell_escape)s %(name)s'
system(cmd)
system(cmd)
system('cp %(name)s.pdf %(name)s-%(postfi).pdf' % vars())
cmd = 'doconce slides_html %(name)s-%(postfi)s deck --html_slide_theme=%(theme)s'
system(cmd)
def sphinx(name, options='', dirname='sphinx-rootdir',
theme='pyramid', automake_sphinx_options='',
split=False):
"""
Make Sphinx HTML subdirectory from the doconce file `name`
(without any .do.txt extension).
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
if name.endswith('.do'):
name = name.replace('.do','')
# Compile source
cmd = 'doconce format sphinx %(name)s %(options)s ' % vars()
system(cmd)
if split:
cmd = 'doconce split_rst %(name)s' % vars()
# Create sphinx directory
cmd = 'doconce sphinx_dir theme=%(theme)s %(name)s' % vars()
system(cmd)
# Compile sphinx
cmd = 'python automake_sphinx.py %(automake_sphinx_options)s' % vars()
system(cmd)
def doconce2format(name, format, options=''):
"""Make given format from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
cmd = 'doconce format %(format)s %(name)s %(options)s ' % vars()
system(cmd)
def plain(name, options=''):
doconce2format(name, 'plain', options)
def pandoc(name, options=''):
doconce2format(name, 'pandoc', options)
def ipynb(name, options=''):
doconce2format(name, 'ipynb', options)
def cwiki(name, options=''):
doconce2format(name, 'cwiki', options)
def mwiki(name, options=''):
doconce2format(name, 'mwiki', options)
def gwiki(name, options=''):
doconce2format(name, 'gwiki', options)
def cleanup_html(fn):
show = False
out = []
with codecs.open(fn, "r", encoding='utf-8') as f:
for line in f:
if "<!-- ------------------- end of main content --------------- -->" in line:
show = False
if show:
out.append(line)
if "<!-- ------------------- main content ---------------------- -->" in line:
show = True
assert out, "No output in %s" % fn
with codecs.open(fn, 'w', encoding='utf-8') as f:
f.write("".join(out))
def mksnippets():
for fn in glob.glob("._*.html"):
with codecs.open(fn, 'r', encoding='utf-8') as thebook:
snippet_name = None
snippet_content = []
snippets = {}
for line in thebook:
if 'navigation buttons at the bottom of the page' in line \
or 'end of snippets' in line:
break
if 'snippet: ' in line:
m = re.search(ur'snippet:\s*(\w+)', line)
if m:
snippet_name = m.groups(1)
snippets[snippet_name] = snippet_content
else:
if snippet_name:
if re.match('<h\d', line):
snippet_content = []
snippet_name = None
else:
snippet_content.append(line)
for snippet_name, snippet_content in snippets.items():
with codecs.open("snippets/%s.html" % snippet_name,
'w', encoding='utf-8') as snippet:
snippet.write("".join(snippet_content))
def main():
"""
Produce various formats from the doconce source.
"""
dofile = "thebook"
#spellcheck()
common_options = '--encoding=utf-8 --examples_as_exercises '
# --- HTML ---
common_html_options = ' '
# HTML Bootstrap
bootstrap_options = ' --html_style=bootswatch_readable --html_code_style=inherit --html_pre_style=inherit --toc_depth=2 --pygments_html_style=default --html_template=template_bootstrap_wtoc.html --html_figure_caption=bottom --html_figure_hrule=top+bottom'
html(
dofile,
options=common_options + common_html_options + bootstrap_options,
split=True)
# One long HTML file
#html(dofile, options=common_options + common_html_options + ' --html_style=bloodish --html_output=%s-1' % dofile, split=False)
# Solarized HTML
#html(dofile, options=common_options + common_html_options + ' --html_style=solarized3 --html_output=%s-solarized' % dofile, split=True)
mksnippets()
sys.exit(1)
# --- latex ---
common_latex_options = ' --latex_code_style=vrb'
for version in 'paper', 'screen': # , 'A4', '2up', 'A4-2up':
latex(
dofile,
latex_program='pdflatex',
options=common_options + common_latex_options,
version=version,
postfix='auto')
# --- Sphinx ---
# sphinx_themes = ['pyramid',]
# for theme in sphinx_themes:
# dirname = 'sphinx-rootdir' if len(sphinx_themes) == 1 else 'sphinx-rootdir-%s' % theme
# sphinx(
# dofile,
# options=common_options + '',
# dirname=dirname,
# theme=theme,
# automake_sphinx_options='',
# split=False)
# Dump all Unix commands run above as a Bash script
bash = open('tmp_make.sh', 'w')
print 'see tmp_make.sh for an equivalent auto-generated unix script'
bash.write('''#!/bin/bash
set -x # display all commands in output
# Safe execution of a Unix command: exit if failure
function system {
"$@"
if [ $? -ne 0 ]; then
echo "make.sh: unsuccessful command $@"
echo "abort!"
exit 1
fi
}
''')
for cmd in unix_command_recorder:
if cmd.startswith('doconce format') or cmd.startswith('rm '):
bash.write('\n') # delimiter line in script
bash.write('system ' + cmd + '\n')
bash.close()
print 'see tmp_output.log for the output of all the commands'
if __name__ == '__main__':
main()
| """Run system command cmd using subprocess module."""
print cmd
try:
output = subprocess.check_output(cmd, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print """Command
%s
failed""" % cmd
print 'Return code:', e.returncode
print e.output
print("Hello")
sys.exit(1)
print output
f = open(logfile, 'a'); f.write(output); f.close()
unix_command_recorder.append(cmd) # record command for bash script
return output | identifier_body |
make.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Automatically generated file for compiling doconce documents.
"""
import sys, glob, os, shutil, subprocess, codecs, re
logfile = 'tmp_output.log' # store all output of all operating system commands
f = open(logfile, 'w'); f.close() # touch logfile so it can be appended
unix_command_recorder = []
def os_system(cmd):
"""Run system command cmd using the simple os.system command."""
print cmd
failure = os.system(cmd)
if failure:
print """Command
%s
failed""" % cmd
sys.exit(1)
unix_command_recorder.append(cmd) # record command for bash script
def system(cmd):
"""Run system command cmd using subprocess module."""
print cmd
try:
output = subprocess.check_output(cmd, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print """Command
%s
failed""" % cmd
print 'Return code:', e.returncode
print e.output
print("Hello")
sys.exit(1)
print output
f = open(logfile, 'a'); f.write(output); f.close()
unix_command_recorder.append(cmd) # record command for bash script
return output
def spellcheck():
for filename in glob.glob('*.do.txt'):
if not filename.startswith('tmp'):
cmd = 'doconce spellcheck -d .dict4spell.txt %(filename)s' % vars()
system(cmd)
def latex(name,
latex_program='pdflatex', # or 'latex'
options='--latex_code_style=vrb',
version='paper', # or 'screen', '2up', 'A4', 'A4-2up'
postfix='', # or 'auto'
ptex2tex=None, # only for ptex2tex step
):
"""
Make latex/pdflatex (according to latex_program) PDF file from
the doconce file name (without any .do.txt extension).
version can take the following values:
* paper: normal page size, --device=paper
* 2up: normal page size, --device=paper, 2 pages per sheet
* A4: A4 page size, --device=paper
* A4-2up: A4 page size, --device=paper, 2 pages per sheet
* screen: normal pages size, --device=screen
If a separate ptex2tex step is wanted, fill in all necessary
commands in the ptex2tex string.
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
system('rm -f %(name)s.aux' % vars())
if version in ('paper', 'A4', '2up', 'A4-2up'):
if not '--device=paper' in options:
options += ' --device=paper'
elif version == 'screen' and '--device=paper' in options:
options = options.replace('--device=paper', '')
if version in ('A4', 'A4-2up'):
if not '--latex_papersize=a4' in options:
options += ' --latex_papersize=a4'
if postfix == 'auto':
if version == 'paper':
postfix = '4print'
elif version == 'screen':
postfix = '4screen'
else:
postfix = version
# Compile source
cmd = 'doconce format %(latex_program)s %(name)s %(options)s ' % vars()
system(cmd)
cmd = r"doconce replace '%% insert custom LaTeX commands...' '\usepackage[russian]{babel} \usepackage{titlesec} \titleformat{\subsubsection}[runin] {\normalfont\normalsize\bfseries}{\thesubsubsection.}{1em}{} \let\paragraph=\subsubsection' %(name)s.tex" % vars()
system(cmd)
cmd = r"doconce replace '\usepackage{lmodern}' '%%\usepackage{lmodern}' %(name)s.tex" % vars()
system(cmd)
# Transform .p.tex to .tex?
if ptex2tex is not None:
cmd = ptex2tex
system(cmd)
# Load latex file into string for examination
dofile = open(name + '.tex', 'r')
text = dofile.read()
dofile.close()
latex_options = ''
if latex_program == 'pdflatex':
latex_options = '-file-line-error -interaction nonstopmode -halt-on-error'
# Run latex
shell_escape = ' -shell-escape' if 'begin{minted}' in text else ''
cmd_latex = '%(latex_program)s%(shell_escape)s %(latex_options)s %(name)s' % vars()
system(cmd_latex)
if 'idx{' in text:
cmd = 'makeindex %(name)s' % vars()
system(cmd)
if 'BIBFILE:' in text:
cmd = 'bibtex %(name)s' % vars()
system(cmd)
system(cmd_latex)
system(cmd_latex)
if latex_program == 'latex':
cmd = 'dvipdf %(name)s' % vars()
system(cmd)
# Could instead of dvipdf run the old-fashioned dvips and ps2pdf
if version in ('2up', 'A4-2up'):
# Use pdfnup to make two pages per sheet
cmd = 'pdfnup --frame true --outfile %(name)s.pdf %(name)s.pdf' % vars()
system(cmd)
if postfix:
shutil.copy(name + '.pdf', name + '-' + postfix + '.pdf')
def html(name, options='', split=False):
"""
Make HTML file from the doconce file `name`
(without any .do.txt extension).
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = u"doconce replace 'Figure' 'Рис.' %(name)s.html".encode('utf-8') % vars()
system(cmd)
cmd = u"doconce replace 'figure' 'рис.' %(name)s.html".encode('utf-8') % vars()
system(cmd)
cmd = u"doconce replace 'width=responsive' 'class=\"img-responsive\" style=\"max-width:600px; width:100%%;\"' %(name)s.html".encode('utf-8') % vars()
system(cmd)
if split:
cmd = 'doconce split_html %(name)s' % vars()
system(cmd)
for filename in glob.glob("._%(name)s*.html" % vars()):
if '000' not in filename: | system(cmd)
for filename in [name, '._%s000' % name]:
print(filename)
cmd = u"doconce replace 'Read' 'Перейти к первой главе' %s.html".encode('utf-8') % filename
system(cmd)
cmd = u"doconce subst '.*Next.*' '' %s.html".encode('utf-8') % filename
system(cmd)
def reveal_slides(name, options='', postfix='reveal', theme='darkgray'):
"""Make reveal.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_pygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['reveal'][theme][0]
if '--keep_pygments_html_bg' not in options:
options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s reveal --html_slide_theme=%(theme)s'
system(cmd)
def deck_slides(name, options='', postfix='deck', theme='sandstone.default'):
"""Make deck.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_pygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['deck'][theme][0]
if '--keep_pygments_html_bg' not in options:
options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s deck --html_slide_theme=%(theme)s'
system(cmd)
def beamer_slides(name, options='', postfix='beamer', theme='red_shadow',
ptex2tex_envir='minted'):
"""Make latex beamer slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
system('rm -f %(name)s.aux' % vars())
# Compile source
shell_escape = '-shell-escape' if ptex2tex_envir == 'minted' else ''
cmd = 'doconce format pdflatex %(name)s %(options)s ' % vars()
system(cmd)
# Run latex
cmd = 'doconce ptex2tex %(name)s envir=%(ptex2tex_envir)s' % vars()
system(cmd)
cmd = 'doconce slides_beamer %(name)s --beamer_slide_theme=%(theme)s' % vars()
system(cmd)
cmd = 'pdflatex %(shell_escape)s %(name)s'
system(cmd)
system(cmd)
system('cp %(name)s.pdf %(name)s-%(postfi).pdf' % vars())
cmd = 'doconce slides_html %(name)s-%(postfi)s deck --html_slide_theme=%(theme)s'
system(cmd)
def sphinx(name, options='', dirname='sphinx-rootdir',
theme='pyramid', automake_sphinx_options='',
split=False):
"""
Make Sphinx HTML subdirectory from the doconce file `name`
(without any .do.txt extension).
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
if name.endswith('.do'):
name = name.replace('.do','')
# Compile source
cmd = 'doconce format sphinx %(name)s %(options)s ' % vars()
system(cmd)
if split:
cmd = 'doconce split_rst %(name)s' % vars()
# Create sphinx directory
cmd = 'doconce sphinx_dir theme=%(theme)s %(name)s' % vars()
system(cmd)
# Compile sphinx
cmd = 'python automake_sphinx.py %(automake_sphinx_options)s' % vars()
system(cmd)
def doconce2format(name, format, options=''):
"""Make given format from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
cmd = 'doconce format %(format)s %(name)s %(options)s ' % vars()
system(cmd)
def plain(name, options=''):
doconce2format(name, 'plain', options)
def pandoc(name, options=''):
doconce2format(name, 'pandoc', options)
def ipynb(name, options=''):
doconce2format(name, 'ipynb', options)
def cwiki(name, options=''):
doconce2format(name, 'cwiki', options)
def mwiki(name, options=''):
doconce2format(name, 'mwiki', options)
def gwiki(name, options=''):
doconce2format(name, 'gwiki', options)
def cleanup_html(fn):
show = False
out = []
with codecs.open(fn, "r", encoding='utf-8') as f:
for line in f:
if "<!-- ------------------- end of main content --------------- -->" in line:
show = False
if show:
out.append(line)
if "<!-- ------------------- main content ---------------------- -->" in line:
show = True
assert out, "No output in %s" % fn
with codecs.open(fn, 'w', encoding='utf-8') as f:
f.write("".join(out))
def mksnippets():
for fn in glob.glob("._*.html"):
with codecs.open(fn, 'r', encoding='utf-8') as thebook:
snippet_name = None
snippet_content = []
snippets = {}
for line in thebook:
if 'navigation buttons at the bottom of the page' in line \
or 'end of snippets' in line:
break
if 'snippet: ' in line:
m = re.search(ur'snippet:\s*(\w+)', line)
if m:
snippet_name = m.groups(1)
snippets[snippet_name] = snippet_content
else:
if snippet_name:
if re.match('<h\d', line):
snippet_content = []
snippet_name = None
else:
snippet_content.append(line)
for snippet_name, snippet_content in snippets.items():
with codecs.open("snippets/%s.html" % snippet_name,
'w', encoding='utf-8') as snippet:
snippet.write("".join(snippet_content))
def main():
"""
Produce various formats from the doconce source.
"""
dofile = "thebook"
#spellcheck()
common_options = '--encoding=utf-8 --examples_as_exercises '
# --- HTML ---
common_html_options = ' '
# HTML Bootstrap
bootstrap_options = ' --html_style=bootswatch_readable --html_code_style=inherit --html_pre_style=inherit --toc_depth=2 --pygments_html_style=default --html_template=template_bootstrap_wtoc.html --html_figure_caption=bottom --html_figure_hrule=top+bottom'
html(
dofile,
options=common_options + common_html_options + bootstrap_options,
split=True)
# One long HTML file
#html(dofile, options=common_options + common_html_options + ' --html_style=bloodish --html_output=%s-1' % dofile, split=False)
# Solarized HTML
#html(dofile, options=common_options + common_html_options + ' --html_style=solarized3 --html_output=%s-solarized' % dofile, split=True)
mksnippets()
sys.exit(1)
# --- latex ---
common_latex_options = ' --latex_code_style=vrb'
for version in 'paper', 'screen': # , 'A4', '2up', 'A4-2up':
latex(
dofile,
latex_program='pdflatex',
options=common_options + common_latex_options,
version=version,
postfix='auto')
# --- Sphinx ---
# sphinx_themes = ['pyramid',]
# for theme in sphinx_themes:
# dirname = 'sphinx-rootdir' if len(sphinx_themes) == 1 else 'sphinx-rootdir-%s' % theme
# sphinx(
# dofile,
# options=common_options + '',
# dirname=dirname,
# theme=theme,
# automake_sphinx_options='',
# split=False)
# Dump all Unix commands run above as a Bash script
bash = open('tmp_make.sh', 'w')
print 'see tmp_make.sh for an equivalent auto-generated unix script'
bash.write('''#!/bin/bash
set -x # display all commands in output
# Safe execution of a Unix command: exit if failure
function system {
"$@"
if [ $? -ne 0 ]; then
echo "make.sh: unsuccessful command $@"
echo "abort!"
exit 1
fi
}
''')
for cmd in unix_command_recorder:
if cmd.startswith('doconce format') or cmd.startswith('rm '):
bash.write('\n') # delimiter line in script
bash.write('system ' + cmd + '\n')
bash.close()
print 'see tmp_output.log for the output of all the commands'
if __name__ == '__main__':
main() | cmd = u"doconce replace '← Prev' '← Предыдущая глава' %s".encode('utf-8') % filename
system(cmd)
cmd = u"doconce replace 'Next →' ' Следующая глава →' %s".encode('utf-8') % filename | random_line_split |
init.rs | // Copyright 2019-2023 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
use super::{get_app, Target};
use crate::helpers::{config::get as get_tauri_config, template::JsonMap};
use crate::Result;
use handlebars::{Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError};
use tauri_mobile::{
android::{
config::Config as AndroidConfig, env::Env as AndroidEnv, target::Target as AndroidTarget,
},
config::app::App,
dot_cargo,
target::TargetTrait as _,
util::{
self,
cli::{Report, TextWrapper},
},
};
use std::{
env::{current_dir, var, var_os},
path::PathBuf,
};
pub fn command(
target: Target,
ci: bool,
reinstall_deps: bool,
skip_targets_install: bool,
) -> Result<()> {
let wrapper = TextWrapper::with_splitter(textwrap::termwidth(), textwrap::NoHyphenation);
exec(
target,
&wrapper,
ci || var_os("CI").is_some(),
reinstall_deps,
skip_targets_install,
)
.map_err(|e| anyhow::anyhow!("{:#}", e))?;
Ok(())
}
pub fn configure_cargo(
app: &App,
android: Option<(&mut AndroidEnv, &AndroidConfig)>,
) -> Result<()> {
if let Some((env, config)) = android {
for target in AndroidTarget::all().values() {
let config = target.generate_cargo_config(config, env)?;
let target_var_name = target.triple.replace('-', "_").to_uppercase();
if let Some(linker) = config.linker {
env.base.insert_env_var(
format!("CARGO_TARGET_{target_var_name}_LINKER"),
linker.into(),
);
}
env.base.insert_env_var(
format!("CARGO_TARGET_{target_var_name}_RUSTFLAGS"),
config.rustflags.join(" ").into(),
);
}
}
let mut dot_cargo = dot_cargo::DotCargo::load(app)?;
// Mysteriously, builds that don't specify `--target` seem to fight over
// the build cache with builds that use `--target`! This means that
// alternating between i.e. `cargo run` and `cargo apple run` would
// result in clean builds being made each time you switched... which is
// pretty nightmarish. Specifying `build.target` in `.cargo/config`
// fortunately has the same effect as specifying `--target`, so now we can
// `cargo run` with peace of mind!
//
// This behavior could be explained here:
// https://doc.rust-lang.org/cargo/reference/config.html#buildrustflags
dot_cargo.set_default_target(util::host_target_triple()?);
dot_cargo.write(app).map_err(Into::into)
}
pub fn exec(
target: Target,
wrapper: &TextWrapper,
#[allow(unused_variables)] non_interactive: bool,
#[allow(unused_variables)] reinstall_deps: bool,
skip_targets_install: bool,
) -> Result<App> {
let current_dir = current_dir()?;
let tauri_config = get_tauri_config(None)?;
let tauri_config_guard = tauri_config.lock().unwrap();
let tauri_config_ = tauri_config_guard.as_ref().unwrap();
let app = get_app(tauri_config_);
let (handlebars, mut map) = handlebars(&app);
let mut args = std::env::args_os();
let mut binary = args
.next()
.map(|bin| {
let path = PathBuf::from(&bin);
if path.exists() {
let absolute_path = util::prefix_path(¤t_dir, path);
return absolute_path.into();
}
bin
})
.unwrap_or_else(|| std::ffi::OsString::from("cargo"));
let mut build_args = Vec::new();
for arg in args {
let path = PathBuf::from(&arg);
if path.exists() {
let absolute_path = util::prefix_path(¤t_dir, path);
build_args.push(absolute_path.to_string_lossy().into_owned());
continue;
}
build_args.push(arg.to_string_lossy().into_owned());
if arg == "android" || arg == "ios" {
break;
}
}
build_args.push(target.ide_build_script_name().into());
let binary_path = PathBuf::from(&binary);
let bin_stem = binary_path.file_stem().unwrap().to_string_lossy();
let r = regex::Regex::new("(nodejs|node)\\-?([1-9]*)*$").unwrap();
if r.is_match(&bin_stem) {
if let Some(npm_execpath) = var_os("npm_execpath").map(PathBuf::from) {
let manager_stem = npm_execpath.file_stem().unwrap().to_os_string();
let is_npm = manager_stem == "npm-cli";
let is_npx = manager_stem == "npx-cli";
binary = if is_npm {
"npm".into()
} else if is_npx {
"npx".into()
} else {
manager_stem
};
if !(build_args.is_empty() || is_npx) {
// remove script path, we'll use `npm_lifecycle_event` instead
build_args.remove(0);
}
if is_npm {
build_args.insert(0, "--".into());
}
if !is_npx {
build_args.insert(0, var("npm_lifecycle_event").unwrap());
}
if is_npm {
build_args.insert(0, "run".into());
}
}
}
map.insert("tauri-binary", binary.to_string_lossy());
map.insert("tauri-binary-args", &build_args);
map.insert("tauri-binary-args-str", build_args.join(" "));
let app = match target {
// Generate Android Studio project
Target::Android => match AndroidEnv::new() {
Ok(_env) => {
let app = get_app(tauri_config_);
let (config, metadata) =
super::android::get_config(&app, tauri_config_, &Default::default());
map.insert("android", &config);
super::android::project::gen(
&config,
&metadata,
(handlebars, map),
wrapper,
skip_targets_install,
)?;
app
}
Err(err) => {
if err.sdk_or_ndk_issue() {
Report::action_request(
" to initialize Android environment; Android support won't be usable until you fix the issue below and re-run `tauri android init`!",
err,
)
.print(wrapper);
app
} else {
return Err(err.into());
}
}
},
#[cfg(target_os = "macos")]
// Generate Xcode project
Target::Ios => {
let (config, metadata) = super::ios::get_config(&app, tauri_config_, &Default::default());
map.insert("apple", &config);
super::ios::project::gen(
&config,
&metadata,
(handlebars, map),
wrapper,
non_interactive,
reinstall_deps,
skip_targets_install,
)?;
app
}
};
Report::victory(
"Project generated successfully!",
"Make cool apps! 🌻 🐕 🎉",
)
.print(wrapper);
Ok(app)
}
fn handlebars(app: &App) -> (Handlebars<'static>, JsonMap) {
let mut h = Handlebars::new();
h.register_escape_fn(handlebars::no_escape);
h.register_helper("html-escape", Box::new(html_escape));
h.register_helper("join", Box::new(join));
h.register_helper("quote-and-join", Box::new(quote_and_join));
h.register_helper(
"quote-and-join-colon-prefix",
Box::new(quote_and_join_colon_prefix),
);
h.register_helper("snake-case", Box::new(snake_case));
h.register_helper("reverse-domain", Box::new(reverse_domain));
h.register_helper(
"reverse-domain-snake-case",
Box::new(reverse_domain_snake_case),
);
// don't mix these up or very bad things will happen to all of us
h.register_helper("prefix-path", Box::new(prefix_path));
h.register_helper("unprefix-path", Box::new(unprefix_path));
let mut map = JsonMap::default();
map.insert("app", app);
(h, map)
}
fn get_str<'a>(helper: &'a Helper) -> &'a str {
helper
.param(0)
.and_then(|v| v.value().as_str())
.unwrap_or("")
}
fn get_str_array(helper: &Helper, formatter: impl Fn(&str) -> String) -> Option<Vec<String>> {
helper.param(0).and_then(|v| {
v.value().as_array().and_then(|arr| {
arr
.iter()
.map(|val| {
val.as_str().map(
#[allow(clippy::redundant_closure)]
|s| formatter(s),
)
})
.collect()
})
})
}
fn html_escape(
helper: &Helper,
_: &Handlebars,
_ctx: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(&handlebars::html_escape(get_str(helper)))
.map_err(Into::into)
}
fn join(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| s.to_string())
.ok_or_else(|| RenderError::new("`join` helper wasn't given an array"))?
.join(", "),
)
.map_err(Into::into)
}
fn quote_and_join(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| format!("{s:?}"))
.ok_or_else(|| RenderError::new("`quote-and-join` helper wasn't given an array"))?
.join(", "),
)
.map_err(Into::into)
}
fn quote_and_join_colon_prefix(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| format!("{:?}", format!(":{s}")))
.ok_or_else(|| {
RenderError::new("`quote-and-join-colon-prefix` helper wasn't given an array")
})?
.join(", "),
)
.map_err(Into::into)
}
fn snake_case(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
use heck::ToSnekCase as _;
out
.write(&get_str(helper).to_snek_case())
.map_err(Into::into)
}
fn reverse_domain(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(&util::reverse_domain(get_str(helper)))
.map_err(Into::into)
}
fn reverse_domain_snake_case(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
use heck::ToSnekCase as _;
out
.write(&util::reverse_domain(get_str(helper)).to_snek_case())
.map_err(Into::into)
}
fn app_root(ctx: &Context) -> Result<&str, RenderError> {
let app_root = ctx
.data()
.get("app")
.ok_or_else(|| RenderError::new("`app` missing from template data."))?
.get("root-dir")
.ok_or_else(|| RenderError::new("`app.root-dir` missing from template data."))?;
app_root
.as_str()
.ok_or_else(|| RenderError::new("`app.root-dir` contained invalid UTF-8."))
}
fn prefix_path(
helper: &Helper,
_: &Handlebars,
ctx: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
util::prefix_path(app_root(ctx)?, get_str(helper))
.to_str()
.ok_or_else(|| {
RenderError::new(
"Either the `app.root-dir` or the specified path contained invalid UTF-8.",
)
})?,
)
.map_err(Into::into)
}
fn unprefix_ | r: &Helper,
_: &Handlebars,
ctx: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
util::unprefix_path(app_root(ctx)?, get_str(helper))
.map_err(|_| {
RenderError::new("Attempted to unprefix a path that wasn't in the app root dir.")
})?
.to_str()
.ok_or_else(|| {
RenderError::new(
"Either the `app.root-dir` or the specified path contained invalid UTF-8.",
)
})?,
)
.map_err(Into::into)
}
| path(
helpe | identifier_name |
init.rs | // Copyright 2019-2023 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
use super::{get_app, Target};
use crate::helpers::{config::get as get_tauri_config, template::JsonMap};
use crate::Result;
use handlebars::{Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError};
use tauri_mobile::{
android::{
config::Config as AndroidConfig, env::Env as AndroidEnv, target::Target as AndroidTarget,
},
config::app::App,
dot_cargo,
target::TargetTrait as _,
util::{
self,
cli::{Report, TextWrapper},
},
};
use std::{
env::{current_dir, var, var_os},
path::PathBuf,
};
pub fn command(
target: Target,
ci: bool,
reinstall_deps: bool,
skip_targets_install: bool,
) -> Result<()> {
let wrapper = TextWrapper::with_splitter(textwrap::termwidth(), textwrap::NoHyphenation);
exec(
target,
&wrapper,
ci || var_os("CI").is_some(),
reinstall_deps,
skip_targets_install,
)
.map_err(|e| anyhow::anyhow!("{:#}", e))?;
Ok(())
}
pub fn configure_cargo(
app: &App,
android: Option<(&mut AndroidEnv, &AndroidConfig)>,
) -> Result<()> {
if let Some((env, config)) = android {
for target in AndroidTarget::all().values() {
let config = target.generate_cargo_config(config, env)?;
let target_var_name = target.triple.replace('-', "_").to_uppercase();
if let Some(linker) = config.linker {
env.base.insert_env_var(
format!("CARGO_TARGET_{target_var_name}_LINKER"),
linker.into(),
);
}
env.base.insert_env_var(
format!("CARGO_TARGET_{target_var_name}_RUSTFLAGS"),
config.rustflags.join(" ").into(),
);
}
}
let mut dot_cargo = dot_cargo::DotCargo::load(app)?;
// Mysteriously, builds that don't specify `--target` seem to fight over
// the build cache with builds that use `--target`! This means that
// alternating between i.e. `cargo run` and `cargo apple run` would
// result in clean builds being made each time you switched... which is
// pretty nightmarish. Specifying `build.target` in `.cargo/config`
// fortunately has the same effect as specifying `--target`, so now we can
// `cargo run` with peace of mind!
//
// This behavior could be explained here:
// https://doc.rust-lang.org/cargo/reference/config.html#buildrustflags
dot_cargo.set_default_target(util::host_target_triple()?);
dot_cargo.write(app).map_err(Into::into)
} | #[allow(unused_variables)] non_interactive: bool,
#[allow(unused_variables)] reinstall_deps: bool,
skip_targets_install: bool,
) -> Result<App> {
let current_dir = current_dir()?;
let tauri_config = get_tauri_config(None)?;
let tauri_config_guard = tauri_config.lock().unwrap();
let tauri_config_ = tauri_config_guard.as_ref().unwrap();
let app = get_app(tauri_config_);
let (handlebars, mut map) = handlebars(&app);
let mut args = std::env::args_os();
let mut binary = args
.next()
.map(|bin| {
let path = PathBuf::from(&bin);
if path.exists() {
let absolute_path = util::prefix_path(¤t_dir, path);
return absolute_path.into();
}
bin
})
.unwrap_or_else(|| std::ffi::OsString::from("cargo"));
let mut build_args = Vec::new();
for arg in args {
let path = PathBuf::from(&arg);
if path.exists() {
let absolute_path = util::prefix_path(¤t_dir, path);
build_args.push(absolute_path.to_string_lossy().into_owned());
continue;
}
build_args.push(arg.to_string_lossy().into_owned());
if arg == "android" || arg == "ios" {
break;
}
}
build_args.push(target.ide_build_script_name().into());
let binary_path = PathBuf::from(&binary);
let bin_stem = binary_path.file_stem().unwrap().to_string_lossy();
let r = regex::Regex::new("(nodejs|node)\\-?([1-9]*)*$").unwrap();
if r.is_match(&bin_stem) {
if let Some(npm_execpath) = var_os("npm_execpath").map(PathBuf::from) {
let manager_stem = npm_execpath.file_stem().unwrap().to_os_string();
let is_npm = manager_stem == "npm-cli";
let is_npx = manager_stem == "npx-cli";
binary = if is_npm {
"npm".into()
} else if is_npx {
"npx".into()
} else {
manager_stem
};
if !(build_args.is_empty() || is_npx) {
// remove script path, we'll use `npm_lifecycle_event` instead
build_args.remove(0);
}
if is_npm {
build_args.insert(0, "--".into());
}
if !is_npx {
build_args.insert(0, var("npm_lifecycle_event").unwrap());
}
if is_npm {
build_args.insert(0, "run".into());
}
}
}
map.insert("tauri-binary", binary.to_string_lossy());
map.insert("tauri-binary-args", &build_args);
map.insert("tauri-binary-args-str", build_args.join(" "));
let app = match target {
// Generate Android Studio project
Target::Android => match AndroidEnv::new() {
Ok(_env) => {
let app = get_app(tauri_config_);
let (config, metadata) =
super::android::get_config(&app, tauri_config_, &Default::default());
map.insert("android", &config);
super::android::project::gen(
&config,
&metadata,
(handlebars, map),
wrapper,
skip_targets_install,
)?;
app
}
Err(err) => {
if err.sdk_or_ndk_issue() {
Report::action_request(
" to initialize Android environment; Android support won't be usable until you fix the issue below and re-run `tauri android init`!",
err,
)
.print(wrapper);
app
} else {
return Err(err.into());
}
}
},
#[cfg(target_os = "macos")]
// Generate Xcode project
Target::Ios => {
let (config, metadata) = super::ios::get_config(&app, tauri_config_, &Default::default());
map.insert("apple", &config);
super::ios::project::gen(
&config,
&metadata,
(handlebars, map),
wrapper,
non_interactive,
reinstall_deps,
skip_targets_install,
)?;
app
}
};
Report::victory(
"Project generated successfully!",
"Make cool apps! 🌻 🐕 🎉",
)
.print(wrapper);
Ok(app)
}
fn handlebars(app: &App) -> (Handlebars<'static>, JsonMap) {
let mut h = Handlebars::new();
h.register_escape_fn(handlebars::no_escape);
h.register_helper("html-escape", Box::new(html_escape));
h.register_helper("join", Box::new(join));
h.register_helper("quote-and-join", Box::new(quote_and_join));
h.register_helper(
"quote-and-join-colon-prefix",
Box::new(quote_and_join_colon_prefix),
);
h.register_helper("snake-case", Box::new(snake_case));
h.register_helper("reverse-domain", Box::new(reverse_domain));
h.register_helper(
"reverse-domain-snake-case",
Box::new(reverse_domain_snake_case),
);
// don't mix these up or very bad things will happen to all of us
h.register_helper("prefix-path", Box::new(prefix_path));
h.register_helper("unprefix-path", Box::new(unprefix_path));
let mut map = JsonMap::default();
map.insert("app", app);
(h, map)
}
fn get_str<'a>(helper: &'a Helper) -> &'a str {
helper
.param(0)
.and_then(|v| v.value().as_str())
.unwrap_or("")
}
fn get_str_array(helper: &Helper, formatter: impl Fn(&str) -> String) -> Option<Vec<String>> {
helper.param(0).and_then(|v| {
v.value().as_array().and_then(|arr| {
arr
.iter()
.map(|val| {
val.as_str().map(
#[allow(clippy::redundant_closure)]
|s| formatter(s),
)
})
.collect()
})
})
}
fn html_escape(
helper: &Helper,
_: &Handlebars,
_ctx: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(&handlebars::html_escape(get_str(helper)))
.map_err(Into::into)
}
fn join(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| s.to_string())
.ok_or_else(|| RenderError::new("`join` helper wasn't given an array"))?
.join(", "),
)
.map_err(Into::into)
}
fn quote_and_join(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| format!("{s:?}"))
.ok_or_else(|| RenderError::new("`quote-and-join` helper wasn't given an array"))?
.join(", "),
)
.map_err(Into::into)
}
fn quote_and_join_colon_prefix(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| format!("{:?}", format!(":{s}")))
.ok_or_else(|| {
RenderError::new("`quote-and-join-colon-prefix` helper wasn't given an array")
})?
.join(", "),
)
.map_err(Into::into)
}
fn snake_case(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
use heck::ToSnekCase as _;
out
.write(&get_str(helper).to_snek_case())
.map_err(Into::into)
}
fn reverse_domain(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(&util::reverse_domain(get_str(helper)))
.map_err(Into::into)
}
fn reverse_domain_snake_case(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
use heck::ToSnekCase as _;
out
.write(&util::reverse_domain(get_str(helper)).to_snek_case())
.map_err(Into::into)
}
fn app_root(ctx: &Context) -> Result<&str, RenderError> {
let app_root = ctx
.data()
.get("app")
.ok_or_else(|| RenderError::new("`app` missing from template data."))?
.get("root-dir")
.ok_or_else(|| RenderError::new("`app.root-dir` missing from template data."))?;
app_root
.as_str()
.ok_or_else(|| RenderError::new("`app.root-dir` contained invalid UTF-8."))
}
fn prefix_path(
helper: &Helper,
_: &Handlebars,
ctx: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
util::prefix_path(app_root(ctx)?, get_str(helper))
.to_str()
.ok_or_else(|| {
RenderError::new(
"Either the `app.root-dir` or the specified path contained invalid UTF-8.",
)
})?,
)
.map_err(Into::into)
}
fn unprefix_path(
helper: &Helper,
_: &Handlebars,
ctx: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
util::unprefix_path(app_root(ctx)?, get_str(helper))
.map_err(|_| {
RenderError::new("Attempted to unprefix a path that wasn't in the app root dir.")
})?
.to_str()
.ok_or_else(|| {
RenderError::new(
"Either the `app.root-dir` or the specified path contained invalid UTF-8.",
)
})?,
)
.map_err(Into::into)
} |
pub fn exec(
target: Target,
wrapper: &TextWrapper, | random_line_split |
init.rs | // Copyright 2019-2023 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
use super::{get_app, Target};
use crate::helpers::{config::get as get_tauri_config, template::JsonMap};
use crate::Result;
use handlebars::{Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError};
use tauri_mobile::{
android::{
config::Config as AndroidConfig, env::Env as AndroidEnv, target::Target as AndroidTarget,
},
config::app::App,
dot_cargo,
target::TargetTrait as _,
util::{
self,
cli::{Report, TextWrapper},
},
};
use std::{
env::{current_dir, var, var_os},
path::PathBuf,
};
pub fn command(
target: Target,
ci: bool,
reinstall_deps: bool,
skip_targets_install: bool,
) -> Result<()> {
let wrapper = TextWrapper::with_splitter(textwrap::termwidth(), textwrap::NoHyphenation);
exec(
target,
&wrapper,
ci || var_os("CI").is_some(),
reinstall_deps,
skip_targets_install,
)
.map_err(|e| anyhow::anyhow!("{:#}", e))?;
Ok(())
}
pub fn configure_cargo(
app: &App,
android: Option<(&mut AndroidEnv, &AndroidConfig)>,
) -> Result<()> {
if let Some((env, config)) = android {
for target in AndroidTarget::all().values() {
let config = target.generate_cargo_config(config, env)?;
let target_var_name = target.triple.replace('-', "_").to_uppercase();
if let Some(linker) = config.linker {
env.base.insert_env_var(
format!("CARGO_TARGET_{target_var_name}_LINKER"),
linker.into(),
);
}
env.base.insert_env_var(
format!("CARGO_TARGET_{target_var_name}_RUSTFLAGS"),
config.rustflags.join(" ").into(),
);
}
}
let mut dot_cargo = dot_cargo::DotCargo::load(app)?;
// Mysteriously, builds that don't specify `--target` seem to fight over
// the build cache with builds that use `--target`! This means that
// alternating between i.e. `cargo run` and `cargo apple run` would
// result in clean builds being made each time you switched... which is
// pretty nightmarish. Specifying `build.target` in `.cargo/config`
// fortunately has the same effect as specifying `--target`, so now we can
// `cargo run` with peace of mind!
//
// This behavior could be explained here:
// https://doc.rust-lang.org/cargo/reference/config.html#buildrustflags
dot_cargo.set_default_target(util::host_target_triple()?);
dot_cargo.write(app).map_err(Into::into)
}
pub fn exec(
target: Target,
wrapper: &TextWrapper,
#[allow(unused_variables)] non_interactive: bool,
#[allow(unused_variables)] reinstall_deps: bool,
skip_targets_install: bool,
) -> Result<App> {
let current_dir = current_dir()?;
let tauri_config = get_tauri_config(None)?;
let tauri_config_guard = tauri_config.lock().unwrap();
let tauri_config_ = tauri_config_guard.as_ref().unwrap();
let app = get_app(tauri_config_);
let (handlebars, mut map) = handlebars(&app);
let mut args = std::env::args_os();
let mut binary = args
.next()
.map(|bin| {
let path = PathBuf::from(&bin);
if path.exists() {
let absolute_path = util::prefix_path(¤t_dir, path);
return absolute_path.into();
}
bin
})
.unwrap_or_else(|| std::ffi::OsString::from("cargo"));
let mut build_args = Vec::new();
for arg in args {
let path = PathBuf::from(&arg);
if path.exists() {
let absolute_path = util::prefix_path(¤t_dir, path);
build_args.push(absolute_path.to_string_lossy().into_owned());
continue;
}
build_args.push(arg.to_string_lossy().into_owned());
if arg == "android" || arg == "ios" {
break;
}
}
build_args.push(target.ide_build_script_name().into());
let binary_path = PathBuf::from(&binary);
let bin_stem = binary_path.file_stem().unwrap().to_string_lossy();
let r = regex::Regex::new("(nodejs|node)\\-?([1-9]*)*$").unwrap();
if r.is_match(&bin_stem) |
map.insert("tauri-binary", binary.to_string_lossy());
map.insert("tauri-binary-args", &build_args);
map.insert("tauri-binary-args-str", build_args.join(" "));
let app = match target {
// Generate Android Studio project
Target::Android => match AndroidEnv::new() {
Ok(_env) => {
let app = get_app(tauri_config_);
let (config, metadata) =
super::android::get_config(&app, tauri_config_, &Default::default());
map.insert("android", &config);
super::android::project::gen(
&config,
&metadata,
(handlebars, map),
wrapper,
skip_targets_install,
)?;
app
}
Err(err) => {
if err.sdk_or_ndk_issue() {
Report::action_request(
" to initialize Android environment; Android support won't be usable until you fix the issue below and re-run `tauri android init`!",
err,
)
.print(wrapper);
app
} else {
return Err(err.into());
}
}
},
#[cfg(target_os = "macos")]
// Generate Xcode project
Target::Ios => {
let (config, metadata) = super::ios::get_config(&app, tauri_config_, &Default::default());
map.insert("apple", &config);
super::ios::project::gen(
&config,
&metadata,
(handlebars, map),
wrapper,
non_interactive,
reinstall_deps,
skip_targets_install,
)?;
app
}
};
Report::victory(
"Project generated successfully!",
"Make cool apps! 🌻 🐕 🎉",
)
.print(wrapper);
Ok(app)
}
fn handlebars(app: &App) -> (Handlebars<'static>, JsonMap) {
let mut h = Handlebars::new();
h.register_escape_fn(handlebars::no_escape);
h.register_helper("html-escape", Box::new(html_escape));
h.register_helper("join", Box::new(join));
h.register_helper("quote-and-join", Box::new(quote_and_join));
h.register_helper(
"quote-and-join-colon-prefix",
Box::new(quote_and_join_colon_prefix),
);
h.register_helper("snake-case", Box::new(snake_case));
h.register_helper("reverse-domain", Box::new(reverse_domain));
h.register_helper(
"reverse-domain-snake-case",
Box::new(reverse_domain_snake_case),
);
// don't mix these up or very bad things will happen to all of us
h.register_helper("prefix-path", Box::new(prefix_path));
h.register_helper("unprefix-path", Box::new(unprefix_path));
let mut map = JsonMap::default();
map.insert("app", app);
(h, map)
}
fn get_str<'a>(helper: &'a Helper) -> &'a str {
helper
.param(0)
.and_then(|v| v.value().as_str())
.unwrap_or("")
}
fn get_str_array(helper: &Helper, formatter: impl Fn(&str) -> String) -> Option<Vec<String>> {
helper.param(0).and_then(|v| {
v.value().as_array().and_then(|arr| {
arr
.iter()
.map(|val| {
val.as_str().map(
#[allow(clippy::redundant_closure)]
|s| formatter(s),
)
})
.collect()
})
})
}
fn html_escape(
helper: &Helper,
_: &Handlebars,
_ctx: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(&handlebars::html_escape(get_str(helper)))
.map_err(Into::into)
}
fn join(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| s.to_string())
.ok_or_else(|| RenderError::new("`join` helper wasn't given an array"))?
.join(", "),
)
.map_err(Into::into)
}
fn quote_and_join(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| format!("{s:?}"))
.ok_or_else(|| RenderError::new("`quote-and-join` helper wasn't given an array"))?
.join(", "),
)
.map_err(Into::into)
}
fn quote_and_join_colon_prefix(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| format!("{:?}", format!(":{s}")))
.ok_or_else(|| {
RenderError::new("`quote-and-join-colon-prefix` helper wasn't given an array")
})?
.join(", "),
)
.map_err(Into::into)
}
fn snake_case(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
use heck::ToSnekCase as _;
out
.write(&get_str(helper).to_snek_case())
.map_err(Into::into)
}
fn reverse_domain(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(&util::reverse_domain(get_str(helper)))
.map_err(Into::into)
}
fn reverse_domain_snake_case(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
use heck::ToSnekCase as _;
out
.write(&util::reverse_domain(get_str(helper)).to_snek_case())
.map_err(Into::into)
}
fn app_root(ctx: &Context) -> Result<&str, RenderError> {
let app_root = ctx
.data()
.get("app")
.ok_or_else(|| RenderError::new("`app` missing from template data."))?
.get("root-dir")
.ok_or_else(|| RenderError::new("`app.root-dir` missing from template data."))?;
app_root
.as_str()
.ok_or_else(|| RenderError::new("`app.root-dir` contained invalid UTF-8."))
}
fn prefix_path(
helper: &Helper,
_: &Handlebars,
ctx: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
util::prefix_path(app_root(ctx)?, get_str(helper))
.to_str()
.ok_or_else(|| {
RenderError::new(
"Either the `app.root-dir` or the specified path contained invalid UTF-8.",
)
})?,
)
.map_err(Into::into)
}
fn unprefix_path(
helper: &Helper,
_: &Handlebars,
ctx: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
util::unprefix_path(app_root(ctx)?, get_str(helper))
.map_err(|_| {
RenderError::new("Attempted to unprefix a path that wasn't in the app root dir.")
})?
.to_str()
.ok_or_else(|| {
RenderError::new(
"Either the `app.root-dir` or the specified path contained invalid UTF-8.",
)
})?,
)
.map_err(Into::into)
}
| {
if let Some(npm_execpath) = var_os("npm_execpath").map(PathBuf::from) {
let manager_stem = npm_execpath.file_stem().unwrap().to_os_string();
let is_npm = manager_stem == "npm-cli";
let is_npx = manager_stem == "npx-cli";
binary = if is_npm {
"npm".into()
} else if is_npx {
"npx".into()
} else {
manager_stem
};
if !(build_args.is_empty() || is_npx) {
// remove script path, we'll use `npm_lifecycle_event` instead
build_args.remove(0);
}
if is_npm {
build_args.insert(0, "--".into());
}
if !is_npx {
build_args.insert(0, var("npm_lifecycle_event").unwrap());
}
if is_npm {
build_args.insert(0, "run".into());
}
}
} | conditional_block |
init.rs | // Copyright 2019-2023 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
use super::{get_app, Target};
use crate::helpers::{config::get as get_tauri_config, template::JsonMap};
use crate::Result;
use handlebars::{Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError};
use tauri_mobile::{
android::{
config::Config as AndroidConfig, env::Env as AndroidEnv, target::Target as AndroidTarget,
},
config::app::App,
dot_cargo,
target::TargetTrait as _,
util::{
self,
cli::{Report, TextWrapper},
},
};
use std::{
env::{current_dir, var, var_os},
path::PathBuf,
};
pub fn command(
target: Target,
ci: bool,
reinstall_deps: bool,
skip_targets_install: bool,
) -> Result<()> {
let wrapper = TextWrapper::with_splitter(textwrap::termwidth(), textwrap::NoHyphenation);
exec(
target,
&wrapper,
ci || var_os("CI").is_some(),
reinstall_deps,
skip_targets_install,
)
.map_err(|e| anyhow::anyhow!("{:#}", e))?;
Ok(())
}
pub fn configure_cargo(
app: &App,
android: Option<(&mut AndroidEnv, &AndroidConfig)>,
) -> Result<()> {
if let Some((env, config)) = android {
for target in AndroidTarget::all().values() {
let config = target.generate_cargo_config(config, env)?;
let target_var_name = target.triple.replace('-', "_").to_uppercase();
if let Some(linker) = config.linker {
env.base.insert_env_var(
format!("CARGO_TARGET_{target_var_name}_LINKER"),
linker.into(),
);
}
env.base.insert_env_var(
format!("CARGO_TARGET_{target_var_name}_RUSTFLAGS"),
config.rustflags.join(" ").into(),
);
}
}
let mut dot_cargo = dot_cargo::DotCargo::load(app)?;
// Mysteriously, builds that don't specify `--target` seem to fight over
// the build cache with builds that use `--target`! This means that
// alternating between i.e. `cargo run` and `cargo apple run` would
// result in clean builds being made each time you switched... which is
// pretty nightmarish. Specifying `build.target` in `.cargo/config`
// fortunately has the same effect as specifying `--target`, so now we can
// `cargo run` with peace of mind!
//
// This behavior could be explained here:
// https://doc.rust-lang.org/cargo/reference/config.html#buildrustflags
dot_cargo.set_default_target(util::host_target_triple()?);
dot_cargo.write(app).map_err(Into::into)
}
pub fn exec(
target: Target,
wrapper: &TextWrapper,
#[allow(unused_variables)] non_interactive: bool,
#[allow(unused_variables)] reinstall_deps: bool,
skip_targets_install: bool,
) -> Result<App> {
let current_dir = current_dir()?;
let tauri_config = get_tauri_config(None)?;
let tauri_config_guard = tauri_config.lock().unwrap();
let tauri_config_ = tauri_config_guard.as_ref().unwrap();
let app = get_app(tauri_config_);
let (handlebars, mut map) = handlebars(&app);
let mut args = std::env::args_os();
let mut binary = args
.next()
.map(|bin| {
let path = PathBuf::from(&bin);
if path.exists() {
let absolute_path = util::prefix_path(¤t_dir, path);
return absolute_path.into();
}
bin
})
.unwrap_or_else(|| std::ffi::OsString::from("cargo"));
let mut build_args = Vec::new();
for arg in args {
let path = PathBuf::from(&arg);
if path.exists() {
let absolute_path = util::prefix_path(¤t_dir, path);
build_args.push(absolute_path.to_string_lossy().into_owned());
continue;
}
build_args.push(arg.to_string_lossy().into_owned());
if arg == "android" || arg == "ios" {
break;
}
}
build_args.push(target.ide_build_script_name().into());
let binary_path = PathBuf::from(&binary);
let bin_stem = binary_path.file_stem().unwrap().to_string_lossy();
let r = regex::Regex::new("(nodejs|node)\\-?([1-9]*)*$").unwrap();
if r.is_match(&bin_stem) {
if let Some(npm_execpath) = var_os("npm_execpath").map(PathBuf::from) {
let manager_stem = npm_execpath.file_stem().unwrap().to_os_string();
let is_npm = manager_stem == "npm-cli";
let is_npx = manager_stem == "npx-cli";
binary = if is_npm {
"npm".into()
} else if is_npx {
"npx".into()
} else {
manager_stem
};
if !(build_args.is_empty() || is_npx) {
// remove script path, we'll use `npm_lifecycle_event` instead
build_args.remove(0);
}
if is_npm {
build_args.insert(0, "--".into());
}
if !is_npx {
build_args.insert(0, var("npm_lifecycle_event").unwrap());
}
if is_npm {
build_args.insert(0, "run".into());
}
}
}
map.insert("tauri-binary", binary.to_string_lossy());
map.insert("tauri-binary-args", &build_args);
map.insert("tauri-binary-args-str", build_args.join(" "));
let app = match target {
// Generate Android Studio project
Target::Android => match AndroidEnv::new() {
Ok(_env) => {
let app = get_app(tauri_config_);
let (config, metadata) =
super::android::get_config(&app, tauri_config_, &Default::default());
map.insert("android", &config);
super::android::project::gen(
&config,
&metadata,
(handlebars, map),
wrapper,
skip_targets_install,
)?;
app
}
Err(err) => {
if err.sdk_or_ndk_issue() {
Report::action_request(
" to initialize Android environment; Android support won't be usable until you fix the issue below and re-run `tauri android init`!",
err,
)
.print(wrapper);
app
} else {
return Err(err.into());
}
}
},
#[cfg(target_os = "macos")]
// Generate Xcode project
Target::Ios => {
let (config, metadata) = super::ios::get_config(&app, tauri_config_, &Default::default());
map.insert("apple", &config);
super::ios::project::gen(
&config,
&metadata,
(handlebars, map),
wrapper,
non_interactive,
reinstall_deps,
skip_targets_install,
)?;
app
}
};
Report::victory(
"Project generated successfully!",
"Make cool apps! 🌻 🐕 🎉",
)
.print(wrapper);
Ok(app)
}
fn handlebars(app: &App) -> (Handlebars<'static>, JsonMap) {
let mut h = Handlebars::new();
h.register_escape_fn(handlebars::no_escape);
h.register_helper("html-escape", Box::new(html_escape));
h.register_helper("join", Box::new(join));
h.register_helper("quote-and-join", Box::new(quote_and_join));
h.register_helper(
"quote-and-join-colon-prefix",
Box::new(quote_and_join_colon_prefix),
);
h.register_helper("snake-case", Box::new(snake_case));
h.register_helper("reverse-domain", Box::new(reverse_domain));
h.register_helper(
"reverse-domain-snake-case",
Box::new(reverse_domain_snake_case),
);
// don't mix these up or very bad things will happen to all of us
h.register_helper("prefix-path", Box::new(prefix_path));
h.register_helper("unprefix-path", Box::new(unprefix_path));
let mut map = JsonMap::default();
map.insert("app", app);
(h, map)
}
fn get_str<'a>(helper: &'a Helper) -> &'a str {
helper
.param(0)
.and_then(|v| v.value().as_str())
.unwrap_or("")
}
fn get_str_array(helper: &Helper, formatter: impl Fn(&str) -> String) -> Option<Vec<String>> {
helper.param(0).and_then(|v| {
v.value().as_array().and_then(|arr| {
arr
.iter()
.map(|val| {
val.as_str().map(
#[allow(clippy::redundant_closure)]
|s| formatter(s),
)
})
.collect()
})
})
}
fn html_escape(
helper: &Helper,
_: &Handlebars,
_ctx: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
| (
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| s.to_string())
.ok_or_else(|| RenderError::new("`join` helper wasn't given an array"))?
.join(", "),
)
.map_err(Into::into)
}
fn quote_and_join(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| format!("{s:?}"))
.ok_or_else(|| RenderError::new("`quote-and-join` helper wasn't given an array"))?
.join(", "),
)
.map_err(Into::into)
}
fn quote_and_join_colon_prefix(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| format!("{:?}", format!(":{s}")))
.ok_or_else(|| {
RenderError::new("`quote-and-join-colon-prefix` helper wasn't given an array")
})?
.join(", "),
)
.map_err(Into::into)
}
fn snake_case(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
use heck::ToSnekCase as _;
out
.write(&get_str(helper).to_snek_case())
.map_err(Into::into)
}
fn reverse_domain(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(&util::reverse_domain(get_str(helper)))
.map_err(Into::into)
}
fn reverse_domain_snake_case(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
use heck::ToSnekCase as _;
out
.write(&util::reverse_domain(get_str(helper)).to_snek_case())
.map_err(Into::into)
}
fn app_root(ctx: &Context) -> Result<&str, RenderError> {
let app_root = ctx
.data()
.get("app")
.ok_or_else(|| RenderError::new("`app` missing from template data."))?
.get("root-dir")
.ok_or_else(|| RenderError::new("`app.root-dir` missing from template data."))?;
app_root
.as_str()
.ok_or_else(|| RenderError::new("`app.root-dir` contained invalid UTF-8."))
}
fn prefix_path(
helper: &Helper,
_: &Handlebars,
ctx: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
util::prefix_path(app_root(ctx)?, get_str(helper))
.to_str()
.ok_or_else(|| {
RenderError::new(
"Either the `app.root-dir` or the specified path contained invalid UTF-8.",
)
})?,
)
.map_err(Into::into)
}
fn unprefix_path(
helper: &Helper,
_: &Handlebars,
ctx: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
util::unprefix_path(app_root(ctx)?, get_str(helper))
.map_err(|_| {
RenderError::new("Attempted to unprefix a path that wasn't in the app root dir.")
})?
.to_str()
.ok_or_else(|| {
RenderError::new(
"Either the `app.root-dir` or the specified path contained invalid UTF-8.",
)
})?,
)
.map_err(Into::into)
}
| .write(&handlebars::html_escape(get_str(helper)))
.map_err(Into::into)
}
fn join | identifier_body |
consensus.rs | // Copyright 2020 The Grin Developers
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! All the rules required for a cryptocurrency to have reach consensus across
//! the whole network are complex and hard to completely isolate. Some can be
//! simple parameters (like block reward), others complex algorithms (like
//! Merkle sum trees or reorg rules). However, as long as they're simple
//! enough, consensus-relevant constants and short functions should be kept
//! here.
// Proof of existence:
// txid: d043f5cc3e9e135e0bafb010521813668d5bc86eef27c0e30232287fd7f5a85f
// document hash: 9b6372224719c5531e0ee1fcc36e7c9e29def9edd22e61aa60c014927191e58a
use crate::core::block::HeaderVersion;
use crate::core::hash::{Hash, ZERO_HASH};
use crate::global;
use crate::pow::Difficulty;
use std::cmp::{max, min};
/// A grin is divisible to 10^9, following the SI prefixes
pub const GRIN_BASE: u64 = 1_000_000_000;
/// Milligrin, a thousand of a grin
pub const MILLI_GRIN: u64 = GRIN_BASE / 1_000;
/// Microgrin, a thousand of a milligrin
pub const MICRO_GRIN: u64 = MILLI_GRIN / 1_000;
/// Nanogrin, smallest unit, takes a billion to make a grin
pub const NANO_GRIN: u64 = 1;
/// Block interval, in seconds, the network will tune its next_target for. Note
/// that we may reduce this value in the future as we get more data on mining
/// with Cuckoo Cycle, networks improve and block propagation is optimized
/// (adjusting the reward accordingly).
pub const BLOCK_TIME_SEC: u64 = 60;
/// Start at BTC block 717,000 (snapshot) which should occur around
/// Jan 3, 2022. This block will reward 6.25 BTC.
/// We allocate the remaining of the 6.25 blocks to our
/// "long tail" which will last 1000 years and start with 3.25.
/// So initial reward is 0.3125 BCMWs for 1,224,600 more blocks.
/// This is due to the 1 minute blocks instead of the 10 minute of BTC.
/// This is approximately Bitcoin's halving schedule, until
/// the 8th halving, after which the long tail will distribute the
/// remainder of the BCMWs over 1000 years. At block 717,000 there will be
/// 19,246,875 BTC.
/// Note that pre-launch we may recalibrate these numbers
/// a little. The goal will be to get exactly 21m BCMWs, have
/// a 1000 year long tail, and do a snapshot on January 3, 2022.
/// Snapshot includes 18,918,750,000,000,000 NanoBCMWs
/// Gensis reward is 0.
pub const REWARD0: u64 = 0;
/// First reward 1,224,600 blocks
pub const REWARD1: u64 = 312_500_000; // 382,687,500,000,000 NanoBCMWs
/// Second reward for 2,100,000 blocks
pub const REWARD2: u64 = 156_250_000; // 328,125,000,000,000 NanoBCMWs
/// Third reward for 2,100,000 blocks
pub const REWARD3: u64 = 78_125_000; // 164,062,500,000,000 NanoBCMWs
/// Fourth reward for 2,100,000 blocks
pub const REWARD4: u64 = 39_062_500; // 82,031,250,000,000 NanoBCMWs
/// Fifth reward for 2,100,000 blocks
pub const REWARD5: u64 = 19_531_250; // 41,015,625,000,000 NanoBCMWs
/// Sixth reward for 2,100,000 blocks
pub const REWARD6: u64 = 9_675_625; // 20,507,812,500,000 NanoBCMWs
/// Seventh reward for 2,100,000 blocks
pub const REWARD7: u64 = 4_882_812; // 10,253,905,200,000 NanoBCMWs
/// Eigth reward for 525,600,000 blocks
pub const REWARD8: u64 = 2_000_000; // 105,120,000,0000,000 NanoBCMWs
/// Actual block reward for a given total fee amount
pub fn reward(fee: u64, height: u64) -> u64 {
calc_block_reward(height).saturating_add(fee)
}
fn get_epoch_start(num: u64) -> u64 {
if num == 1 {
1
} else if num == 2 {
1_224_600
} else if num == 3 {
3_324_600
} else if num == 4 {
5_424_600
} else if num == 5 {
7_524_600
} else if num == 6 {
9_624_600
} else if num == 7 {
11_724_600
} else if num == 8 {
13_824_600
} else if num == 9 {
539_424_600
} else {
// shouldn't get here.
0
}
}
/// Calculate block reward based on height
pub fn calc_block_reward(height: u64) -> u64 {
if height == 0 {
// reward for genesis block
REWARD0
} else if height <= get_epoch_start(2) {
REWARD1
} else if height <= get_epoch_start(3) {
REWARD2
} else if height <= get_epoch_start(4) {
REWARD3
} else if height <= get_epoch_start(5) {
REWARD4
} else if height <= get_epoch_start(6) {
REWARD5
} else if height <= get_epoch_start(7) {
REWARD6
} else if height <= get_epoch_start(8) {
REWARD7
} else if height <= get_epoch_start(9) {
REWARD8
} else {
0 // no reward after this.
}
}
fn get_overage_offset_start_epoch(num: u64) -> u64 {
if num == 1 {
REWARD0
} else if num == 2 {
get_epoch_start(2) * REWARD1 + REWARD0
} else if num == 3 {
get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0
} else if num == 4 {
get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 5 {
get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 6 {
get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 7 {
get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 8 {
get_epoch_start(8) * REWARD7
+ get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 9 {
get_epoch_start(9) * REWARD8
+ get_epoch_start(8) * REWARD7
+ get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else {
// should not get here
1
}
}
/// Calculate block overage based on height and claimed BTCUtxos
pub fn calc_block_overage(height: u64) -> u64 {
if height == 0 {
0
} else if height <= get_epoch_start(2) {
(REWARD1 * height) + get_overage_offset_start_epoch(1)
} else if height <= get_epoch_start(3) {
(REWARD2 * (height - get_epoch_start(2))) + get_overage_offset_start_epoch(2)
} else if height <= get_epoch_start(4) {
(REWARD3 * (height - get_epoch_start(3))) + get_overage_offset_start_epoch(3)
} else if height <= get_epoch_start(5) {
(REWARD4 * (height - get_epoch_start(4))) + get_overage_offset_start_epoch(4)
} else if height <= get_epoch_start(6) {
(REWARD5 * (height - get_epoch_start(5))) + get_overage_offset_start_epoch(5)
} else if height <= get_epoch_start(7) {
(REWARD6 * (height - get_epoch_start(6))) + get_overage_offset_start_epoch(6)
} else if height <= get_epoch_start(8) {
(REWARD7 * (height - get_epoch_start(7))) + get_overage_offset_start_epoch(7)
} else if height <= get_epoch_start(9) {
(REWARD8 * (height - get_epoch_start(8))) + get_overage_offset_start_epoch(8)
} else {
// we exit here. Up to future generations to decide
// how to handle.
std::process::exit(0);
}
}
/// an hour in seconds
pub const HOUR_SEC: u64 = 60 * 60;
/// Nominal height for standard time intervals, hour is 60 blocks
pub const HOUR_HEIGHT: u64 = HOUR_SEC / BLOCK_TIME_SEC;
/// A day is 1440 blocks
pub const DAY_HEIGHT: u64 = 24 * HOUR_HEIGHT;
/// A week is 10_080 blocks
pub const WEEK_HEIGHT: u64 = 7 * DAY_HEIGHT;
/// A year is 524_160 blocks
pub const YEAR_HEIGHT: u64 = 52 * WEEK_HEIGHT;
/// Number of blocks before a coinbase matures and can be spent
pub const COINBASE_MATURITY: u64 = DAY_HEIGHT;
/// We use all C29d from the start
pub fn secondary_pow_ratio(_height: u64) -> u64 {
100
}
/// Cuckoo-cycle proof size (cycle length)
pub const PROOFSIZE: usize = 42;
/// Default Cuckatoo Cycle edge_bits, used for mining and validating.
pub const DEFAULT_MIN_EDGE_BITS: u8 = 31;
/// Cuckaroo* proof-of-work edge_bits, meant to be ASIC resistant.
pub const SECOND_POW_EDGE_BITS: u8 = 29;
/// Original reference edge_bits to compute difficulty factors for higher
/// Cuckoo graph sizes, changing this would hard fork
pub const BASE_EDGE_BITS: u8 = 24;
/// Default number of blocks in the past when cross-block cut-through will start
/// happening. Needs to be long enough to not overlap with a long reorg.
/// Rational
/// behind the value is the longest bitcoin fork was about 30 blocks, so 5h. We
/// add an order of magnitude to be safe and round to 7x24h of blocks to make it
/// easier to reason about.
pub const CUT_THROUGH_HORIZON: u32 = WEEK_HEIGHT as u32;
/// Default number of blocks in the past to determine the height where we request
/// a txhashset (and full blocks from). Needs to be long enough to not overlap with
/// a long reorg.
/// Rational behind the value is the longest bitcoin fork was about 30 blocks, so 5h.
/// We add an order of magnitude to be safe and round to 2x24h of blocks to make it
/// easier to reason about.
pub const STATE_SYNC_THRESHOLD: u32 = 2 * DAY_HEIGHT as u32;
/// Weight of an input when counted against the max block weight capacity
pub const INPUT_WEIGHT: u64 = 1;
/// Weight of an output when counted against the max block weight capacity
pub const OUTPUT_WEIGHT: u64 = 21;
/// Weight of a kernel when counted against the max block weight capacity
pub const KERNEL_WEIGHT: u64 = 3;
/// Total maximum block weight. At current sizes, this means a maximum
/// theoretical size of:
/// * `(674 + 33 + 1) * (40_000 / 21) = 1_348_571` for a block with only outputs
/// * `(1 + 8 + 8 + 33 + 64) * (40_000 / 3) = 1_520_000` for a block with only kernels
/// * `(1 + 33) * 40_000 = 1_360_000` for a block with only inputs
///
/// Regardless of the relative numbers of inputs/outputs/kernels in a block the maximum
/// block size is around 1.5MB
/// For a block full of "average" txs (2 inputs, 2 outputs, 1 kernel) we have -
/// `(1 * 2) + (21 * 2) + (3 * 1) = 47` (weight per tx)
/// `40_000 / 47 = 851` (txs per block)
///
pub const MAX_BLOCK_WEIGHT: u64 = 40_000;
/// Fork every 6 months.
pub const HARD_FORK_INTERVAL: u64 = YEAR_HEIGHT / 2;
/// Testnet first hard fork height, set to happen around 2019-06-20
pub const TESTNET_FIRST_HARD_FORK: u64 = 185_040;
/// Testnet second hard fork height, set to happen around 2019-12-19
pub const TESTNET_SECOND_HARD_FORK: u64 = 298_080;
/// Testnet second hard fork height, set to happen around 2020-06-20
pub const TESTNET_THIRD_HARD_FORK: u64 = 552_960;
/// Testnet second hard fork height, set to happen around 2020-12-8
pub const TESTNET_FOURTH_HARD_FORK: u64 = 642_240;
/// Fork every 3 blocks
pub const TESTING_HARD_FORK_INTERVAL: u64 = 3;
/// Compute possible block version at a given height,
/// currently no hard forks.
pub fn header_version(_height: u64) -> HeaderVersion {
HeaderVersion(1)
}
/// Check whether the block version is valid at a given height, implements
/// 6 months interval scheduled hard forks for the first 2 years.
pub fn valid_header_version(height: u64, version: HeaderVersion) -> bool {
version == header_version(height)
}
/// Number of blocks used to calculate difficulty adjustment by Damped Moving Average
pub const DMA_WINDOW: u64 = HOUR_HEIGHT;
/// Difficulty adjustment half life (actually, 60s * number of 0s-blocks to raise diff by factor e) is 4 hours
pub const WTEMA_HALF_LIFE: u64 = 4 * HOUR_SEC;
/// Average time span of the DMA difficulty adjustment window
pub const BLOCK_TIME_WINDOW: u64 = DMA_WINDOW * BLOCK_TIME_SEC;
/// Clamp factor to use for DMA difficulty adjustment
/// Limit value to within this factor of goal
pub const CLAMP_FACTOR: u64 = 2;
/// Dampening factor to use for DMA difficulty adjustment
pub const DMA_DAMP_FACTOR: u64 = 3;
/// Dampening factor to use for AR scale calculation.
pub const AR_SCALE_DAMP_FACTOR: u64 = 13;
/// Compute weight of a graph as number of siphash bits defining the graph
/// The height dependence allows a 30-week linear transition from C31+ to C32+ starting after 1 year
pub fn graph_weight(height: u64, edge_bits: u8) -> u64 {
let mut xpr_edge_bits = edge_bits as u64;
let expiry_height = YEAR_HEIGHT;
if edge_bits == 31 && height >= expiry_height {
xpr_edge_bits = xpr_edge_bits.saturating_sub(1 + (height - expiry_height) / WEEK_HEIGHT);
}
// For C31 xpr_edge_bits reaches 0 at height YEAR_HEIGHT + 30 * WEEK_HEIGHT
// 30 weeks after Jan 15, 2020 would be Aug 12, 2020
(2u64 << (edge_bits - global::base_edge_bits()) as u64) * xpr_edge_bits
}
/// minimum solution difficulty after HardFork4 when PoW becomes primary only Cuckatoo32+
pub const C32_GRAPH_WEIGHT: u64 = (2u64 << (32 - BASE_EDGE_BITS) as u64) * 32; // 16384
/// Minimum difficulty, enforced in Damped Moving Average diff retargetting
/// avoids getting stuck when trying to increase difficulty subject to dampening
pub const MIN_DMA_DIFFICULTY: u64 = DMA_DAMP_FACTOR;
/// Minimum scaling factor for AR pow, enforced in diff retargetting
/// avoids getting stuck when trying to increase ar_scale subject to dampening
pub const MIN_AR_SCALE: u64 = AR_SCALE_DAMP_FACTOR;
/// unit difficulty, equal to graph_weight(SECOND_POW_EDGE_BITS)
pub const UNIT_DIFFICULTY: u64 =
((2 as u64) << (SECOND_POW_EDGE_BITS - BASE_EDGE_BITS)) * (SECOND_POW_EDGE_BITS as u64);
/// The initial difficulty at launch. This should be over-estimated
/// and difficulty should come down at launch rather than up
/// Currently grossly over-estimated at 10% of current
/// ethereum GPUs (assuming 1GPU can solve a block at diff 1 in one block interval)
pub const INITIAL_DIFFICULTY: u64 = 1_000_000 * UNIT_DIFFICULTY;
/// Minimal header information required for the Difficulty calculation to
/// take place
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct HeaderInfo {
/// Block hash, ZERO_HASH when this is a sythetic entry.
pub block_hash: Hash,
/// Timestamp of the header, 1 when not used (returned info)
pub timestamp: u64,
/// Network difficulty or next difficulty to use
pub difficulty: Difficulty,
/// Network secondary PoW factor or factor to use
pub secondary_scaling: u32,
/// Whether the header is a secondary proof of work
pub is_secondary: bool,
}
impl HeaderInfo {
/// Default constructor
pub fn new(
block_hash: Hash,
timestamp: u64,
difficulty: Difficulty,
secondary_scaling: u32,
is_secondary: bool,
) -> HeaderInfo {
HeaderInfo {
block_hash,
timestamp,
difficulty,
secondary_scaling,
is_secondary,
}
}
/// Constructor from a timestamp and difficulty, setting a default secondary
/// PoW factor
pub fn from_ts_diff(timestamp: u64, difficulty: Difficulty) -> HeaderInfo {
HeaderInfo {
block_hash: ZERO_HASH,
timestamp,
difficulty,
secondary_scaling: global::initial_graph_weight(),
is_secondary: true,
}
}
/// Constructor from a difficulty and secondary factor, setting a default
/// timestamp
pub fn from_diff_scaling(difficulty: Difficulty, secondary_scaling: u32) -> HeaderInfo {
HeaderInfo {
block_hash: ZERO_HASH,
timestamp: 1,
difficulty,
secondary_scaling,
is_secondary: true,
}
}
}
/// Move value linearly toward a goal
pub fn damp(actual: u64, goal: u64, damp_factor: u64) -> u64 {
(actual + (damp_factor - 1) * goal) / damp_factor
}
/// limit value to be within some factor from a goal
pub fn clamp(actual: u64, goal: u64, clamp_factor: u64) -> u64 {
max(goal / clamp_factor, min(actual, goal * clamp_factor))
}
/// Computes the proof-of-work difficulty that the next block should comply with.
/// Takes an iterator over past block headers information, from latest
/// (highest height) to oldest (lowest height).
/// Uses either the old dma DAA or, starting from HF4, the new wtema DAA
pub fn next_difficulty<T>(height: u64, cursor: T) -> HeaderInfo
where
T: IntoIterator<Item = HeaderInfo>,
{
if header_version(height) < HeaderVersion(5) {
next_dma_difficulty(height, cursor)
} else {
next_wtema_difficulty(height, cursor)
}
}
/// Difficulty calculation based on a Damped Moving Average
/// of difficulty over a window of DMA_WINDOW blocks.
/// The corresponding timespan is calculated
/// by using the difference between the timestamps at the beginning
/// and the end of the window, with a damping toward the target block time.
pub fn next_dma_difficulty<T>(height: u64, cursor: T) -> HeaderInfo
where
T: IntoIterator<Item = HeaderInfo>,
{
// Create vector of difficulty data running from earliest
// to latest, and pad with simulated pre-genesis data to allow earlier
// adjustment if there isn't enough window data length will be
// DMA_WINDOW + 1 (for initial block time bound)
let diff_data = global::difficulty_data_to_vector(cursor);
// First, get the ratio of secondary PoW vs primary, skipping initial header
let sec_pow_scaling = secondary_pow_scaling(height, &diff_data[1..]);
// Get the timestamp delta across the window
let ts_delta: u64 = diff_data[DMA_WINDOW as usize].timestamp - diff_data[0].timestamp;
// Get the difficulty sum of the last DMA_WINDOW elements
let diff_sum: u64 = diff_data
.iter()
.skip(1)
.map(|dd| dd.difficulty.to_num())
.sum();
// adjust time delta toward goal subject to dampening and clamping
let adj_ts = clamp(
damp(ts_delta, BLOCK_TIME_WINDOW, DMA_DAMP_FACTOR),
BLOCK_TIME_WINDOW,
CLAMP_FACTOR,
);
// minimum difficulty avoids getting stuck due to dampening | }
/// Difficulty calculation based on a Weighted Target Exponential Moving Average
/// of difficulty, using the ratio of the last block time over the target block time.
pub fn next_wtema_difficulty<T>(_height: u64, cursor: T) -> HeaderInfo
where
T: IntoIterator<Item = HeaderInfo>,
{
let mut last_headers = cursor.into_iter();
// last two headers
let last_header = last_headers.next().unwrap();
let prev_header = last_headers.next().unwrap();
let last_block_time: u64 = last_header.timestamp - prev_header.timestamp;
let last_diff = last_header.difficulty.to_num();
// wtema difficulty update
let next_diff =
last_diff * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - BLOCK_TIME_SEC + last_block_time);
// mainnet minimum difficulty at graph_weight(32) ensures difficulty increase on 59s block
// since 16384 * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - 1) > 16384
let difficulty = max(Difficulty::min_wtema(), Difficulty::from_num(next_diff));
HeaderInfo::from_diff_scaling(difficulty, 0) // no more secondary PoW
}
/// Count, in units of 1/100 (a percent), the number of "secondary" (AR) blocks in the provided window of blocks.
pub fn ar_count(_height: u64, diff_data: &[HeaderInfo]) -> u64 {
100 * diff_data.iter().filter(|n| n.is_secondary).count() as u64
}
/// The secondary proof-of-work factor is calculated along the same lines as in next_dma_difficulty,
/// as an adjustment on the deviation against the ideal value.
/// Factor by which the secondary proof of work difficulty will be adjusted
pub fn secondary_pow_scaling(height: u64, diff_data: &[HeaderInfo]) -> u32 {
// Get the scaling factor sum of the last DMA_WINDOW elements
let scale_sum: u64 = diff_data.iter().map(|dd| dd.secondary_scaling as u64).sum();
// compute ideal 2nd_pow_fraction in pct and across window
let target_pct = secondary_pow_ratio(height);
let target_count = DMA_WINDOW * target_pct;
// Get the secondary count across the window, adjusting count toward goal
// subject to dampening and clamping.
let adj_count = clamp(
damp(
ar_count(height, diff_data),
target_count,
AR_SCALE_DAMP_FACTOR,
),
target_count,
CLAMP_FACTOR,
);
let scale = scale_sum * target_pct / max(1, adj_count);
// minimum AR scale avoids getting stuck due to dampening
max(MIN_AR_SCALE, scale) as u32
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_graph_weight() {
global::set_local_chain_type(global::ChainTypes::Mainnet);
// initial weights
assert_eq!(graph_weight(1, 31), 256 * 31);
assert_eq!(graph_weight(1, 32), 512 * 32);
assert_eq!(graph_weight(1, 33), 1024 * 33);
// one year in, 31 starts going down, the rest stays the same
assert_eq!(graph_weight(YEAR_HEIGHT, 31), 256 * 30);
assert_eq!(graph_weight(YEAR_HEIGHT, 32), 512 * 32);
assert_eq!(graph_weight(YEAR_HEIGHT, 33), 1024 * 33);
// 31 loses one factor per week
assert_eq!(graph_weight(YEAR_HEIGHT + WEEK_HEIGHT, 31), 256 * 29);
assert_eq!(graph_weight(YEAR_HEIGHT + 2 * WEEK_HEIGHT, 31), 256 * 28);
assert_eq!(graph_weight(YEAR_HEIGHT + 32 * WEEK_HEIGHT, 31), 0);
// 2 years in, 31 still at 0, 32 starts decreasing
assert_eq!(graph_weight(2 * YEAR_HEIGHT, 31), 0);
assert_eq!(graph_weight(2 * YEAR_HEIGHT, 32), 512 * 32);
assert_eq!(graph_weight(2 * YEAR_HEIGHT, 33), 1024 * 33);
// 32 phaseout on hold
assert_eq!(
graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 32),
C32_GRAPH_WEIGHT
);
assert_eq!(graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 31), 0);
assert_eq!(
graph_weight(2 * YEAR_HEIGHT + 30 * WEEK_HEIGHT, 32),
C32_GRAPH_WEIGHT
);
assert_eq!(
graph_weight(2 * YEAR_HEIGHT + 31 * WEEK_HEIGHT, 32),
C32_GRAPH_WEIGHT
);
// 3 years in, nothing changes
assert_eq!(graph_weight(3 * YEAR_HEIGHT, 31), 0);
assert_eq!(graph_weight(3 * YEAR_HEIGHT, 32), 512 * 32);
assert_eq!(graph_weight(3 * YEAR_HEIGHT, 33), 1024 * 33);
// 4 years in, still on hold
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 31), 0);
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 32), 512 * 32);
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33);
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33);
}
} | let difficulty = max(MIN_DMA_DIFFICULTY, diff_sum * BLOCK_TIME_SEC / adj_ts);
HeaderInfo::from_diff_scaling(Difficulty::from_num(difficulty), sec_pow_scaling) | random_line_split |
consensus.rs | // Copyright 2020 The Grin Developers
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! All the rules required for a cryptocurrency to have reach consensus across
//! the whole network are complex and hard to completely isolate. Some can be
//! simple parameters (like block reward), others complex algorithms (like
//! Merkle sum trees or reorg rules). However, as long as they're simple
//! enough, consensus-relevant constants and short functions should be kept
//! here.
// Proof of existence:
// txid: d043f5cc3e9e135e0bafb010521813668d5bc86eef27c0e30232287fd7f5a85f
// document hash: 9b6372224719c5531e0ee1fcc36e7c9e29def9edd22e61aa60c014927191e58a
use crate::core::block::HeaderVersion;
use crate::core::hash::{Hash, ZERO_HASH};
use crate::global;
use crate::pow::Difficulty;
use std::cmp::{max, min};
/// A grin is divisible to 10^9, following the SI prefixes
pub const GRIN_BASE: u64 = 1_000_000_000;
/// Milligrin, a thousand of a grin
pub const MILLI_GRIN: u64 = GRIN_BASE / 1_000;
/// Microgrin, a thousand of a milligrin
pub const MICRO_GRIN: u64 = MILLI_GRIN / 1_000;
/// Nanogrin, smallest unit, takes a billion to make a grin
pub const NANO_GRIN: u64 = 1;
/// Block interval, in seconds, the network will tune its next_target for. Note
/// that we may reduce this value in the future as we get more data on mining
/// with Cuckoo Cycle, networks improve and block propagation is optimized
/// (adjusting the reward accordingly).
pub const BLOCK_TIME_SEC: u64 = 60;
/// Start at BTC block 717,000 (snapshot) which should occur around
/// Jan 3, 2022. This block will reward 6.25 BTC.
/// We allocate the remaining of the 6.25 blocks to our
/// "long tail" which will last 1000 years and start with 3.25.
/// So initial reward is 0.3125 BCMWs for 1,224,600 more blocks.
/// This is due to the 1 minute blocks instead of the 10 minute of BTC.
/// This is approximately Bitcoin's halving schedule, until
/// the 8th halving, after which the long tail will distribute the
/// remainder of the BCMWs over 1000 years. At block 717,000 there will be
/// 19,246,875 BTC.
/// Note that pre-launch we may recalibrate these numbers
/// a little. The goal will be to get exactly 21m BCMWs, have
/// a 1000 year long tail, and do a snapshot on January 3, 2022.
/// Snapshot includes 18,918,750,000,000,000 NanoBCMWs
/// Gensis reward is 0.
pub const REWARD0: u64 = 0;
/// First reward 1,224,600 blocks
pub const REWARD1: u64 = 312_500_000; // 382,687,500,000,000 NanoBCMWs
/// Second reward for 2,100,000 blocks
pub const REWARD2: u64 = 156_250_000; // 328,125,000,000,000 NanoBCMWs
/// Third reward for 2,100,000 blocks
pub const REWARD3: u64 = 78_125_000; // 164,062,500,000,000 NanoBCMWs
/// Fourth reward for 2,100,000 blocks
pub const REWARD4: u64 = 39_062_500; // 82,031,250,000,000 NanoBCMWs
/// Fifth reward for 2,100,000 blocks
pub const REWARD5: u64 = 19_531_250; // 41,015,625,000,000 NanoBCMWs
/// Sixth reward for 2,100,000 blocks
pub const REWARD6: u64 = 9_675_625; // 20,507,812,500,000 NanoBCMWs
/// Seventh reward for 2,100,000 blocks
pub const REWARD7: u64 = 4_882_812; // 10,253,905,200,000 NanoBCMWs
/// Eigth reward for 525,600,000 blocks
pub const REWARD8: u64 = 2_000_000; // 105,120,000,0000,000 NanoBCMWs
/// Actual block reward for a given total fee amount
pub fn reward(fee: u64, height: u64) -> u64 {
calc_block_reward(height).saturating_add(fee)
}
fn get_epoch_start(num: u64) -> u64 {
if num == 1 {
1
} else if num == 2 {
1_224_600
} else if num == 3 {
3_324_600
} else if num == 4 {
5_424_600
} else if num == 5 {
7_524_600
} else if num == 6 {
9_624_600
} else if num == 7 {
11_724_600
} else if num == 8 {
13_824_600
} else if num == 9 {
539_424_600
} else {
// shouldn't get here.
0
}
}
/// Calculate block reward based on height
pub fn calc_block_reward(height: u64) -> u64 {
if height == 0 {
// reward for genesis block
REWARD0
} else if height <= get_epoch_start(2) {
REWARD1
} else if height <= get_epoch_start(3) {
REWARD2
} else if height <= get_epoch_start(4) {
REWARD3
} else if height <= get_epoch_start(5) {
REWARD4
} else if height <= get_epoch_start(6) {
REWARD5
} else if height <= get_epoch_start(7) {
REWARD6
} else if height <= get_epoch_start(8) {
REWARD7
} else if height <= get_epoch_start(9) {
REWARD8
} else {
0 // no reward after this.
}
}
fn get_overage_offset_start_epoch(num: u64) -> u64 {
if num == 1 {
REWARD0
} else if num == 2 {
get_epoch_start(2) * REWARD1 + REWARD0
} else if num == 3 {
get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0
} else if num == 4 {
get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 5 {
get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 6 {
get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 7 {
get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 8 {
get_epoch_start(8) * REWARD7
+ get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 9 {
get_epoch_start(9) * REWARD8
+ get_epoch_start(8) * REWARD7
+ get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else {
// should not get here
1
}
}
/// Calculate block overage based on height and claimed BTCUtxos
pub fn calc_block_overage(height: u64) -> u64 {
if height == 0 {
0
} else if height <= get_epoch_start(2) {
(REWARD1 * height) + get_overage_offset_start_epoch(1)
} else if height <= get_epoch_start(3) {
(REWARD2 * (height - get_epoch_start(2))) + get_overage_offset_start_epoch(2)
} else if height <= get_epoch_start(4) {
(REWARD3 * (height - get_epoch_start(3))) + get_overage_offset_start_epoch(3)
} else if height <= get_epoch_start(5) {
(REWARD4 * (height - get_epoch_start(4))) + get_overage_offset_start_epoch(4)
} else if height <= get_epoch_start(6) {
(REWARD5 * (height - get_epoch_start(5))) + get_overage_offset_start_epoch(5)
} else if height <= get_epoch_start(7) {
(REWARD6 * (height - get_epoch_start(6))) + get_overage_offset_start_epoch(6)
} else if height <= get_epoch_start(8) {
(REWARD7 * (height - get_epoch_start(7))) + get_overage_offset_start_epoch(7)
} else if height <= get_epoch_start(9) {
(REWARD8 * (height - get_epoch_start(8))) + get_overage_offset_start_epoch(8)
} else {
// we exit here. Up to future generations to decide
// how to handle.
std::process::exit(0);
}
}
/// an hour in seconds
pub const HOUR_SEC: u64 = 60 * 60;
/// Nominal height for standard time intervals, hour is 60 blocks
pub const HOUR_HEIGHT: u64 = HOUR_SEC / BLOCK_TIME_SEC;
/// A day is 1440 blocks
pub const DAY_HEIGHT: u64 = 24 * HOUR_HEIGHT;
/// A week is 10_080 blocks
pub const WEEK_HEIGHT: u64 = 7 * DAY_HEIGHT;
/// A year is 524_160 blocks
pub const YEAR_HEIGHT: u64 = 52 * WEEK_HEIGHT;
/// Number of blocks before a coinbase matures and can be spent
pub const COINBASE_MATURITY: u64 = DAY_HEIGHT;
/// We use all C29d from the start
pub fn secondary_pow_ratio(_height: u64) -> u64 {
100
}
/// Cuckoo-cycle proof size (cycle length)
pub const PROOFSIZE: usize = 42;
/// Default Cuckatoo Cycle edge_bits, used for mining and validating.
pub const DEFAULT_MIN_EDGE_BITS: u8 = 31;
/// Cuckaroo* proof-of-work edge_bits, meant to be ASIC resistant.
pub const SECOND_POW_EDGE_BITS: u8 = 29;
/// Original reference edge_bits to compute difficulty factors for higher
/// Cuckoo graph sizes, changing this would hard fork
pub const BASE_EDGE_BITS: u8 = 24;
/// Default number of blocks in the past when cross-block cut-through will start
/// happening. Needs to be long enough to not overlap with a long reorg.
/// Rational
/// behind the value is the longest bitcoin fork was about 30 blocks, so 5h. We
/// add an order of magnitude to be safe and round to 7x24h of blocks to make it
/// easier to reason about.
pub const CUT_THROUGH_HORIZON: u32 = WEEK_HEIGHT as u32;
/// Default number of blocks in the past to determine the height where we request
/// a txhashset (and full blocks from). Needs to be long enough to not overlap with
/// a long reorg.
/// Rational behind the value is the longest bitcoin fork was about 30 blocks, so 5h.
/// We add an order of magnitude to be safe and round to 2x24h of blocks to make it
/// easier to reason about.
pub const STATE_SYNC_THRESHOLD: u32 = 2 * DAY_HEIGHT as u32;
/// Weight of an input when counted against the max block weight capacity
pub const INPUT_WEIGHT: u64 = 1;
/// Weight of an output when counted against the max block weight capacity
pub const OUTPUT_WEIGHT: u64 = 21;
/// Weight of a kernel when counted against the max block weight capacity
pub const KERNEL_WEIGHT: u64 = 3;
/// Total maximum block weight. At current sizes, this means a maximum
/// theoretical size of:
/// * `(674 + 33 + 1) * (40_000 / 21) = 1_348_571` for a block with only outputs
/// * `(1 + 8 + 8 + 33 + 64) * (40_000 / 3) = 1_520_000` for a block with only kernels
/// * `(1 + 33) * 40_000 = 1_360_000` for a block with only inputs
///
/// Regardless of the relative numbers of inputs/outputs/kernels in a block the maximum
/// block size is around 1.5MB
/// For a block full of "average" txs (2 inputs, 2 outputs, 1 kernel) we have -
/// `(1 * 2) + (21 * 2) + (3 * 1) = 47` (weight per tx)
/// `40_000 / 47 = 851` (txs per block)
///
pub const MAX_BLOCK_WEIGHT: u64 = 40_000;
/// Fork every 6 months.
pub const HARD_FORK_INTERVAL: u64 = YEAR_HEIGHT / 2;
/// Testnet first hard fork height, set to happen around 2019-06-20
pub const TESTNET_FIRST_HARD_FORK: u64 = 185_040;
/// Testnet second hard fork height, set to happen around 2019-12-19
pub const TESTNET_SECOND_HARD_FORK: u64 = 298_080;
/// Testnet second hard fork height, set to happen around 2020-06-20
pub const TESTNET_THIRD_HARD_FORK: u64 = 552_960;
/// Testnet second hard fork height, set to happen around 2020-12-8
pub const TESTNET_FOURTH_HARD_FORK: u64 = 642_240;
/// Fork every 3 blocks
pub const TESTING_HARD_FORK_INTERVAL: u64 = 3;
/// Compute possible block version at a given height,
/// currently no hard forks.
pub fn header_version(_height: u64) -> HeaderVersion {
HeaderVersion(1)
}
/// Check whether the block version is valid at a given height, implements
/// 6 months interval scheduled hard forks for the first 2 years.
pub fn valid_header_version(height: u64, version: HeaderVersion) -> bool {
version == header_version(height)
}
/// Number of blocks used to calculate difficulty adjustment by Damped Moving Average
pub const DMA_WINDOW: u64 = HOUR_HEIGHT;
/// Difficulty adjustment half life (actually, 60s * number of 0s-blocks to raise diff by factor e) is 4 hours
pub const WTEMA_HALF_LIFE: u64 = 4 * HOUR_SEC;
/// Average time span of the DMA difficulty adjustment window
pub const BLOCK_TIME_WINDOW: u64 = DMA_WINDOW * BLOCK_TIME_SEC;
/// Clamp factor to use for DMA difficulty adjustment
/// Limit value to within this factor of goal
pub const CLAMP_FACTOR: u64 = 2;
/// Dampening factor to use for DMA difficulty adjustment
pub const DMA_DAMP_FACTOR: u64 = 3;
/// Dampening factor to use for AR scale calculation.
pub const AR_SCALE_DAMP_FACTOR: u64 = 13;
/// Compute weight of a graph as number of siphash bits defining the graph
/// The height dependence allows a 30-week linear transition from C31+ to C32+ starting after 1 year
pub fn graph_weight(height: u64, edge_bits: u8) -> u64 {
let mut xpr_edge_bits = edge_bits as u64;
let expiry_height = YEAR_HEIGHT;
if edge_bits == 31 && height >= expiry_height {
xpr_edge_bits = xpr_edge_bits.saturating_sub(1 + (height - expiry_height) / WEEK_HEIGHT);
}
// For C31 xpr_edge_bits reaches 0 at height YEAR_HEIGHT + 30 * WEEK_HEIGHT
// 30 weeks after Jan 15, 2020 would be Aug 12, 2020
(2u64 << (edge_bits - global::base_edge_bits()) as u64) * xpr_edge_bits
}
/// minimum solution difficulty after HardFork4 when PoW becomes primary only Cuckatoo32+
pub const C32_GRAPH_WEIGHT: u64 = (2u64 << (32 - BASE_EDGE_BITS) as u64) * 32; // 16384
/// Minimum difficulty, enforced in Damped Moving Average diff retargetting
/// avoids getting stuck when trying to increase difficulty subject to dampening
pub const MIN_DMA_DIFFICULTY: u64 = DMA_DAMP_FACTOR;
/// Minimum scaling factor for AR pow, enforced in diff retargetting
/// avoids getting stuck when trying to increase ar_scale subject to dampening
pub const MIN_AR_SCALE: u64 = AR_SCALE_DAMP_FACTOR;
/// unit difficulty, equal to graph_weight(SECOND_POW_EDGE_BITS)
pub const UNIT_DIFFICULTY: u64 =
((2 as u64) << (SECOND_POW_EDGE_BITS - BASE_EDGE_BITS)) * (SECOND_POW_EDGE_BITS as u64);
/// The initial difficulty at launch. This should be over-estimated
/// and difficulty should come down at launch rather than up
/// Currently grossly over-estimated at 10% of current
/// ethereum GPUs (assuming 1GPU can solve a block at diff 1 in one block interval)
pub const INITIAL_DIFFICULTY: u64 = 1_000_000 * UNIT_DIFFICULTY;
/// Minimal header information required for the Difficulty calculation to
/// take place
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct HeaderInfo {
/// Block hash, ZERO_HASH when this is a sythetic entry.
pub block_hash: Hash,
/// Timestamp of the header, 1 when not used (returned info)
pub timestamp: u64,
/// Network difficulty or next difficulty to use
pub difficulty: Difficulty,
/// Network secondary PoW factor or factor to use
pub secondary_scaling: u32,
/// Whether the header is a secondary proof of work
pub is_secondary: bool,
}
impl HeaderInfo {
/// Default constructor
pub fn new(
block_hash: Hash,
timestamp: u64,
difficulty: Difficulty,
secondary_scaling: u32,
is_secondary: bool,
) -> HeaderInfo {
HeaderInfo {
block_hash,
timestamp,
difficulty,
secondary_scaling,
is_secondary,
}
}
/// Constructor from a timestamp and difficulty, setting a default secondary
/// PoW factor
pub fn from_ts_diff(timestamp: u64, difficulty: Difficulty) -> HeaderInfo {
HeaderInfo {
block_hash: ZERO_HASH,
timestamp,
difficulty,
secondary_scaling: global::initial_graph_weight(),
is_secondary: true,
}
}
/// Constructor from a difficulty and secondary factor, setting a default
/// timestamp
pub fn from_diff_scaling(difficulty: Difficulty, secondary_scaling: u32) -> HeaderInfo {
HeaderInfo {
block_hash: ZERO_HASH,
timestamp: 1,
difficulty,
secondary_scaling,
is_secondary: true,
}
}
}
/// Move value linearly toward a goal
pub fn | (actual: u64, goal: u64, damp_factor: u64) -> u64 {
(actual + (damp_factor - 1) * goal) / damp_factor
}
/// limit value to be within some factor from a goal
pub fn clamp(actual: u64, goal: u64, clamp_factor: u64) -> u64 {
max(goal / clamp_factor, min(actual, goal * clamp_factor))
}
/// Computes the proof-of-work difficulty that the next block should comply with.
/// Takes an iterator over past block headers information, from latest
/// (highest height) to oldest (lowest height).
/// Uses either the old dma DAA or, starting from HF4, the new wtema DAA
pub fn next_difficulty<T>(height: u64, cursor: T) -> HeaderInfo
where
T: IntoIterator<Item = HeaderInfo>,
{
if header_version(height) < HeaderVersion(5) {
next_dma_difficulty(height, cursor)
} else {
next_wtema_difficulty(height, cursor)
}
}
/// Difficulty calculation based on a Damped Moving Average
/// of difficulty over a window of DMA_WINDOW blocks.
/// The corresponding timespan is calculated
/// by using the difference between the timestamps at the beginning
/// and the end of the window, with a damping toward the target block time.
pub fn next_dma_difficulty<T>(height: u64, cursor: T) -> HeaderInfo
where
T: IntoIterator<Item = HeaderInfo>,
{
// Create vector of difficulty data running from earliest
// to latest, and pad with simulated pre-genesis data to allow earlier
// adjustment if there isn't enough window data length will be
// DMA_WINDOW + 1 (for initial block time bound)
let diff_data = global::difficulty_data_to_vector(cursor);
// First, get the ratio of secondary PoW vs primary, skipping initial header
let sec_pow_scaling = secondary_pow_scaling(height, &diff_data[1..]);
// Get the timestamp delta across the window
let ts_delta: u64 = diff_data[DMA_WINDOW as usize].timestamp - diff_data[0].timestamp;
// Get the difficulty sum of the last DMA_WINDOW elements
let diff_sum: u64 = diff_data
.iter()
.skip(1)
.map(|dd| dd.difficulty.to_num())
.sum();
// adjust time delta toward goal subject to dampening and clamping
let adj_ts = clamp(
damp(ts_delta, BLOCK_TIME_WINDOW, DMA_DAMP_FACTOR),
BLOCK_TIME_WINDOW,
CLAMP_FACTOR,
);
// minimum difficulty avoids getting stuck due to dampening
let difficulty = max(MIN_DMA_DIFFICULTY, diff_sum * BLOCK_TIME_SEC / adj_ts);
HeaderInfo::from_diff_scaling(Difficulty::from_num(difficulty), sec_pow_scaling)
}
/// Difficulty calculation based on a Weighted Target Exponential Moving Average
/// of difficulty, using the ratio of the last block time over the target block time.
pub fn next_wtema_difficulty<T>(_height: u64, cursor: T) -> HeaderInfo
where
T: IntoIterator<Item = HeaderInfo>,
{
let mut last_headers = cursor.into_iter();
// last two headers
let last_header = last_headers.next().unwrap();
let prev_header = last_headers.next().unwrap();
let last_block_time: u64 = last_header.timestamp - prev_header.timestamp;
let last_diff = last_header.difficulty.to_num();
// wtema difficulty update
let next_diff =
last_diff * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - BLOCK_TIME_SEC + last_block_time);
// mainnet minimum difficulty at graph_weight(32) ensures difficulty increase on 59s block
// since 16384 * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - 1) > 16384
let difficulty = max(Difficulty::min_wtema(), Difficulty::from_num(next_diff));
HeaderInfo::from_diff_scaling(difficulty, 0) // no more secondary PoW
}
/// Count, in units of 1/100 (a percent), the number of "secondary" (AR) blocks in the provided window of blocks.
pub fn ar_count(_height: u64, diff_data: &[HeaderInfo]) -> u64 {
100 * diff_data.iter().filter(|n| n.is_secondary).count() as u64
}
/// The secondary proof-of-work factor is calculated along the same lines as in next_dma_difficulty,
/// as an adjustment on the deviation against the ideal value.
/// Factor by which the secondary proof of work difficulty will be adjusted
pub fn secondary_pow_scaling(height: u64, diff_data: &[HeaderInfo]) -> u32 {
// Get the scaling factor sum of the last DMA_WINDOW elements
let scale_sum: u64 = diff_data.iter().map(|dd| dd.secondary_scaling as u64).sum();
// compute ideal 2nd_pow_fraction in pct and across window
let target_pct = secondary_pow_ratio(height);
let target_count = DMA_WINDOW * target_pct;
// Get the secondary count across the window, adjusting count toward goal
// subject to dampening and clamping.
let adj_count = clamp(
damp(
ar_count(height, diff_data),
target_count,
AR_SCALE_DAMP_FACTOR,
),
target_count,
CLAMP_FACTOR,
);
let scale = scale_sum * target_pct / max(1, adj_count);
// minimum AR scale avoids getting stuck due to dampening
max(MIN_AR_SCALE, scale) as u32
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_graph_weight() {
global::set_local_chain_type(global::ChainTypes::Mainnet);
// initial weights
assert_eq!(graph_weight(1, 31), 256 * 31);
assert_eq!(graph_weight(1, 32), 512 * 32);
assert_eq!(graph_weight(1, 33), 1024 * 33);
// one year in, 31 starts going down, the rest stays the same
assert_eq!(graph_weight(YEAR_HEIGHT, 31), 256 * 30);
assert_eq!(graph_weight(YEAR_HEIGHT, 32), 512 * 32);
assert_eq!(graph_weight(YEAR_HEIGHT, 33), 1024 * 33);
// 31 loses one factor per week
assert_eq!(graph_weight(YEAR_HEIGHT + WEEK_HEIGHT, 31), 256 * 29);
assert_eq!(graph_weight(YEAR_HEIGHT + 2 * WEEK_HEIGHT, 31), 256 * 28);
assert_eq!(graph_weight(YEAR_HEIGHT + 32 * WEEK_HEIGHT, 31), 0);
// 2 years in, 31 still at 0, 32 starts decreasing
assert_eq!(graph_weight(2 * YEAR_HEIGHT, 31), 0);
assert_eq!(graph_weight(2 * YEAR_HEIGHT, 32), 512 * 32);
assert_eq!(graph_weight(2 * YEAR_HEIGHT, 33), 1024 * 33);
// 32 phaseout on hold
assert_eq!(
graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 32),
C32_GRAPH_WEIGHT
);
assert_eq!(graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 31), 0);
assert_eq!(
graph_weight(2 * YEAR_HEIGHT + 30 * WEEK_HEIGHT, 32),
C32_GRAPH_WEIGHT
);
assert_eq!(
graph_weight(2 * YEAR_HEIGHT + 31 * WEEK_HEIGHT, 32),
C32_GRAPH_WEIGHT
);
// 3 years in, nothing changes
assert_eq!(graph_weight(3 * YEAR_HEIGHT, 31), 0);
assert_eq!(graph_weight(3 * YEAR_HEIGHT, 32), 512 * 32);
assert_eq!(graph_weight(3 * YEAR_HEIGHT, 33), 1024 * 33);
// 4 years in, still on hold
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 31), 0);
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 32), 512 * 32);
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33);
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33);
}
}
| damp | identifier_name |
consensus.rs | // Copyright 2020 The Grin Developers
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! All the rules required for a cryptocurrency to have reach consensus across
//! the whole network are complex and hard to completely isolate. Some can be
//! simple parameters (like block reward), others complex algorithms (like
//! Merkle sum trees or reorg rules). However, as long as they're simple
//! enough, consensus-relevant constants and short functions should be kept
//! here.
// Proof of existence:
// txid: d043f5cc3e9e135e0bafb010521813668d5bc86eef27c0e30232287fd7f5a85f
// document hash: 9b6372224719c5531e0ee1fcc36e7c9e29def9edd22e61aa60c014927191e58a
use crate::core::block::HeaderVersion;
use crate::core::hash::{Hash, ZERO_HASH};
use crate::global;
use crate::pow::Difficulty;
use std::cmp::{max, min};
/// A grin is divisible to 10^9, following the SI prefixes
pub const GRIN_BASE: u64 = 1_000_000_000;
/// Milligrin, a thousand of a grin
pub const MILLI_GRIN: u64 = GRIN_BASE / 1_000;
/// Microgrin, a thousand of a milligrin
pub const MICRO_GRIN: u64 = MILLI_GRIN / 1_000;
/// Nanogrin, smallest unit, takes a billion to make a grin
pub const NANO_GRIN: u64 = 1;
/// Block interval, in seconds, the network will tune its next_target for. Note
/// that we may reduce this value in the future as we get more data on mining
/// with Cuckoo Cycle, networks improve and block propagation is optimized
/// (adjusting the reward accordingly).
pub const BLOCK_TIME_SEC: u64 = 60;
/// Start at BTC block 717,000 (snapshot) which should occur around
/// Jan 3, 2022. This block will reward 6.25 BTC.
/// We allocate the remaining of the 6.25 blocks to our
/// "long tail" which will last 1000 years and start with 3.25.
/// So initial reward is 0.3125 BCMWs for 1,224,600 more blocks.
/// This is due to the 1 minute blocks instead of the 10 minute of BTC.
/// This is approximately Bitcoin's halving schedule, until
/// the 8th halving, after which the long tail will distribute the
/// remainder of the BCMWs over 1000 years. At block 717,000 there will be
/// 19,246,875 BTC.
/// Note that pre-launch we may recalibrate these numbers
/// a little. The goal will be to get exactly 21m BCMWs, have
/// a 1000 year long tail, and do a snapshot on January 3, 2022.
/// Snapshot includes 18,918,750,000,000,000 NanoBCMWs
/// Gensis reward is 0.
pub const REWARD0: u64 = 0;
/// First reward 1,224,600 blocks
pub const REWARD1: u64 = 312_500_000; // 382,687,500,000,000 NanoBCMWs
/// Second reward for 2,100,000 blocks
pub const REWARD2: u64 = 156_250_000; // 328,125,000,000,000 NanoBCMWs
/// Third reward for 2,100,000 blocks
pub const REWARD3: u64 = 78_125_000; // 164,062,500,000,000 NanoBCMWs
/// Fourth reward for 2,100,000 blocks
pub const REWARD4: u64 = 39_062_500; // 82,031,250,000,000 NanoBCMWs
/// Fifth reward for 2,100,000 blocks
pub const REWARD5: u64 = 19_531_250; // 41,015,625,000,000 NanoBCMWs
/// Sixth reward for 2,100,000 blocks
pub const REWARD6: u64 = 9_675_625; // 20,507,812,500,000 NanoBCMWs
/// Seventh reward for 2,100,000 blocks
pub const REWARD7: u64 = 4_882_812; // 10,253,905,200,000 NanoBCMWs
/// Eigth reward for 525,600,000 blocks
pub const REWARD8: u64 = 2_000_000; // 105,120,000,0000,000 NanoBCMWs
/// Actual block reward for a given total fee amount
pub fn reward(fee: u64, height: u64) -> u64 {
calc_block_reward(height).saturating_add(fee)
}
fn get_epoch_start(num: u64) -> u64 {
if num == 1 {
1
} else if num == 2 {
1_224_600
} else if num == 3 {
3_324_600
} else if num == 4 {
5_424_600
} else if num == 5 {
7_524_600
} else if num == 6 {
9_624_600
} else if num == 7 {
11_724_600
} else if num == 8 {
13_824_600
} else if num == 9 {
539_424_600
} else {
// shouldn't get here.
0
}
}
/// Calculate block reward based on height
pub fn calc_block_reward(height: u64) -> u64 {
if height == 0 {
// reward for genesis block
REWARD0
} else if height <= get_epoch_start(2) {
REWARD1
} else if height <= get_epoch_start(3) {
REWARD2
} else if height <= get_epoch_start(4) {
REWARD3
} else if height <= get_epoch_start(5) {
REWARD4
} else if height <= get_epoch_start(6) {
REWARD5
} else if height <= get_epoch_start(7) {
REWARD6
} else if height <= get_epoch_start(8) {
REWARD7
} else if height <= get_epoch_start(9) {
REWARD8
} else {
0 // no reward after this.
}
}
fn get_overage_offset_start_epoch(num: u64) -> u64 {
if num == 1 {
REWARD0
} else if num == 2 {
get_epoch_start(2) * REWARD1 + REWARD0
} else if num == 3 {
get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0
} else if num == 4 {
get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 5 {
get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 6 {
get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 7 {
get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 8 {
get_epoch_start(8) * REWARD7
+ get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 9 | else {
// should not get here
1
}
}
/// Calculate block overage based on height and claimed BTCUtxos
pub fn calc_block_overage(height: u64) -> u64 {
if height == 0 {
0
} else if height <= get_epoch_start(2) {
(REWARD1 * height) + get_overage_offset_start_epoch(1)
} else if height <= get_epoch_start(3) {
(REWARD2 * (height - get_epoch_start(2))) + get_overage_offset_start_epoch(2)
} else if height <= get_epoch_start(4) {
(REWARD3 * (height - get_epoch_start(3))) + get_overage_offset_start_epoch(3)
} else if height <= get_epoch_start(5) {
(REWARD4 * (height - get_epoch_start(4))) + get_overage_offset_start_epoch(4)
} else if height <= get_epoch_start(6) {
(REWARD5 * (height - get_epoch_start(5))) + get_overage_offset_start_epoch(5)
} else if height <= get_epoch_start(7) {
(REWARD6 * (height - get_epoch_start(6))) + get_overage_offset_start_epoch(6)
} else if height <= get_epoch_start(8) {
(REWARD7 * (height - get_epoch_start(7))) + get_overage_offset_start_epoch(7)
} else if height <= get_epoch_start(9) {
(REWARD8 * (height - get_epoch_start(8))) + get_overage_offset_start_epoch(8)
} else {
// we exit here. Up to future generations to decide
// how to handle.
std::process::exit(0);
}
}
/// an hour in seconds
pub const HOUR_SEC: u64 = 60 * 60;
/// Nominal height for standard time intervals, hour is 60 blocks
pub const HOUR_HEIGHT: u64 = HOUR_SEC / BLOCK_TIME_SEC;
/// A day is 1440 blocks
pub const DAY_HEIGHT: u64 = 24 * HOUR_HEIGHT;
/// A week is 10_080 blocks
pub const WEEK_HEIGHT: u64 = 7 * DAY_HEIGHT;
/// A year is 524_160 blocks
pub const YEAR_HEIGHT: u64 = 52 * WEEK_HEIGHT;
/// Number of blocks before a coinbase matures and can be spent
pub const COINBASE_MATURITY: u64 = DAY_HEIGHT;
/// We use all C29d from the start
pub fn secondary_pow_ratio(_height: u64) -> u64 {
100
}
/// Cuckoo-cycle proof size (cycle length)
pub const PROOFSIZE: usize = 42;
/// Default Cuckatoo Cycle edge_bits, used for mining and validating.
pub const DEFAULT_MIN_EDGE_BITS: u8 = 31;
/// Cuckaroo* proof-of-work edge_bits, meant to be ASIC resistant.
pub const SECOND_POW_EDGE_BITS: u8 = 29;
/// Original reference edge_bits to compute difficulty factors for higher
/// Cuckoo graph sizes, changing this would hard fork
pub const BASE_EDGE_BITS: u8 = 24;
/// Default number of blocks in the past when cross-block cut-through will start
/// happening. Needs to be long enough to not overlap with a long reorg.
/// Rational
/// behind the value is the longest bitcoin fork was about 30 blocks, so 5h. We
/// add an order of magnitude to be safe and round to 7x24h of blocks to make it
/// easier to reason about.
pub const CUT_THROUGH_HORIZON: u32 = WEEK_HEIGHT as u32;
/// Default number of blocks in the past to determine the height where we request
/// a txhashset (and full blocks from). Needs to be long enough to not overlap with
/// a long reorg.
/// Rational behind the value is the longest bitcoin fork was about 30 blocks, so 5h.
/// We add an order of magnitude to be safe and round to 2x24h of blocks to make it
/// easier to reason about.
pub const STATE_SYNC_THRESHOLD: u32 = 2 * DAY_HEIGHT as u32;
/// Weight of an input when counted against the max block weight capacity
pub const INPUT_WEIGHT: u64 = 1;
/// Weight of an output when counted against the max block weight capacity
pub const OUTPUT_WEIGHT: u64 = 21;
/// Weight of a kernel when counted against the max block weight capacity
pub const KERNEL_WEIGHT: u64 = 3;
/// Total maximum block weight. At current sizes, this means a maximum
/// theoretical size of:
/// * `(674 + 33 + 1) * (40_000 / 21) = 1_348_571` for a block with only outputs
/// * `(1 + 8 + 8 + 33 + 64) * (40_000 / 3) = 1_520_000` for a block with only kernels
/// * `(1 + 33) * 40_000 = 1_360_000` for a block with only inputs
///
/// Regardless of the relative numbers of inputs/outputs/kernels in a block the maximum
/// block size is around 1.5MB
/// For a block full of "average" txs (2 inputs, 2 outputs, 1 kernel) we have -
/// `(1 * 2) + (21 * 2) + (3 * 1) = 47` (weight per tx)
/// `40_000 / 47 = 851` (txs per block)
///
pub const MAX_BLOCK_WEIGHT: u64 = 40_000;
/// Fork every 6 months.
pub const HARD_FORK_INTERVAL: u64 = YEAR_HEIGHT / 2;
/// Testnet first hard fork height, set to happen around 2019-06-20
pub const TESTNET_FIRST_HARD_FORK: u64 = 185_040;
/// Testnet second hard fork height, set to happen around 2019-12-19
pub const TESTNET_SECOND_HARD_FORK: u64 = 298_080;
/// Testnet second hard fork height, set to happen around 2020-06-20
pub const TESTNET_THIRD_HARD_FORK: u64 = 552_960;
/// Testnet second hard fork height, set to happen around 2020-12-8
pub const TESTNET_FOURTH_HARD_FORK: u64 = 642_240;
/// Fork every 3 blocks
pub const TESTING_HARD_FORK_INTERVAL: u64 = 3;
/// Compute possible block version at a given height,
/// currently no hard forks.
pub fn header_version(_height: u64) -> HeaderVersion {
HeaderVersion(1)
}
/// Check whether the block version is valid at a given height, implements
/// 6 months interval scheduled hard forks for the first 2 years.
pub fn valid_header_version(height: u64, version: HeaderVersion) -> bool {
version == header_version(height)
}
/// Number of blocks used to calculate difficulty adjustment by Damped Moving Average
pub const DMA_WINDOW: u64 = HOUR_HEIGHT;
/// Difficulty adjustment half life (actually, 60s * number of 0s-blocks to raise diff by factor e) is 4 hours
pub const WTEMA_HALF_LIFE: u64 = 4 * HOUR_SEC;
/// Average time span of the DMA difficulty adjustment window
pub const BLOCK_TIME_WINDOW: u64 = DMA_WINDOW * BLOCK_TIME_SEC;
/// Clamp factor to use for DMA difficulty adjustment
/// Limit value to within this factor of goal
pub const CLAMP_FACTOR: u64 = 2;
/// Dampening factor to use for DMA difficulty adjustment
pub const DMA_DAMP_FACTOR: u64 = 3;
/// Dampening factor to use for AR scale calculation.
pub const AR_SCALE_DAMP_FACTOR: u64 = 13;
/// Compute weight of a graph as number of siphash bits defining the graph
/// The height dependence allows a 30-week linear transition from C31+ to C32+ starting after 1 year
pub fn graph_weight(height: u64, edge_bits: u8) -> u64 {
let mut xpr_edge_bits = edge_bits as u64;
let expiry_height = YEAR_HEIGHT;
if edge_bits == 31 && height >= expiry_height {
xpr_edge_bits = xpr_edge_bits.saturating_sub(1 + (height - expiry_height) / WEEK_HEIGHT);
}
// For C31 xpr_edge_bits reaches 0 at height YEAR_HEIGHT + 30 * WEEK_HEIGHT
// 30 weeks after Jan 15, 2020 would be Aug 12, 2020
(2u64 << (edge_bits - global::base_edge_bits()) as u64) * xpr_edge_bits
}
/// minimum solution difficulty after HardFork4 when PoW becomes primary only Cuckatoo32+
pub const C32_GRAPH_WEIGHT: u64 = (2u64 << (32 - BASE_EDGE_BITS) as u64) * 32; // 16384
/// Minimum difficulty, enforced in Damped Moving Average diff retargetting
/// avoids getting stuck when trying to increase difficulty subject to dampening
pub const MIN_DMA_DIFFICULTY: u64 = DMA_DAMP_FACTOR;
/// Minimum scaling factor for AR pow, enforced in diff retargetting
/// avoids getting stuck when trying to increase ar_scale subject to dampening
pub const MIN_AR_SCALE: u64 = AR_SCALE_DAMP_FACTOR;
/// unit difficulty, equal to graph_weight(SECOND_POW_EDGE_BITS)
pub const UNIT_DIFFICULTY: u64 =
((2 as u64) << (SECOND_POW_EDGE_BITS - BASE_EDGE_BITS)) * (SECOND_POW_EDGE_BITS as u64);
/// The initial difficulty at launch. This should be over-estimated
/// and difficulty should come down at launch rather than up
/// Currently grossly over-estimated at 10% of current
/// ethereum GPUs (assuming 1GPU can solve a block at diff 1 in one block interval)
pub const INITIAL_DIFFICULTY: u64 = 1_000_000 * UNIT_DIFFICULTY;
/// Minimal header information required for the Difficulty calculation to
/// take place
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct HeaderInfo {
/// Block hash, ZERO_HASH when this is a sythetic entry.
pub block_hash: Hash,
/// Timestamp of the header, 1 when not used (returned info)
pub timestamp: u64,
/// Network difficulty or next difficulty to use
pub difficulty: Difficulty,
/// Network secondary PoW factor or factor to use
pub secondary_scaling: u32,
/// Whether the header is a secondary proof of work
pub is_secondary: bool,
}
impl HeaderInfo {
/// Default constructor
pub fn new(
block_hash: Hash,
timestamp: u64,
difficulty: Difficulty,
secondary_scaling: u32,
is_secondary: bool,
) -> HeaderInfo {
HeaderInfo {
block_hash,
timestamp,
difficulty,
secondary_scaling,
is_secondary,
}
}
/// Constructor from a timestamp and difficulty, setting a default secondary
/// PoW factor
pub fn from_ts_diff(timestamp: u64, difficulty: Difficulty) -> HeaderInfo {
HeaderInfo {
block_hash: ZERO_HASH,
timestamp,
difficulty,
secondary_scaling: global::initial_graph_weight(),
is_secondary: true,
}
}
/// Constructor from a difficulty and secondary factor, setting a default
/// timestamp
pub fn from_diff_scaling(difficulty: Difficulty, secondary_scaling: u32) -> HeaderInfo {
HeaderInfo {
block_hash: ZERO_HASH,
timestamp: 1,
difficulty,
secondary_scaling,
is_secondary: true,
}
}
}
/// Move value linearly toward a goal
pub fn damp(actual: u64, goal: u64, damp_factor: u64) -> u64 {
(actual + (damp_factor - 1) * goal) / damp_factor
}
/// limit value to be within some factor from a goal
pub fn clamp(actual: u64, goal: u64, clamp_factor: u64) -> u64 {
max(goal / clamp_factor, min(actual, goal * clamp_factor))
}
/// Computes the proof-of-work difficulty that the next block should comply with.
/// Takes an iterator over past block headers information, from latest
/// (highest height) to oldest (lowest height).
/// Uses either the old dma DAA or, starting from HF4, the new wtema DAA
pub fn next_difficulty<T>(height: u64, cursor: T) -> HeaderInfo
where
T: IntoIterator<Item = HeaderInfo>,
{
if header_version(height) < HeaderVersion(5) {
next_dma_difficulty(height, cursor)
} else {
next_wtema_difficulty(height, cursor)
}
}
/// Difficulty calculation based on a Damped Moving Average
/// of difficulty over a window of DMA_WINDOW blocks.
/// The corresponding timespan is calculated
/// by using the difference between the timestamps at the beginning
/// and the end of the window, with a damping toward the target block time.
pub fn next_dma_difficulty<T>(height: u64, cursor: T) -> HeaderInfo
where
T: IntoIterator<Item = HeaderInfo>,
{
// Create vector of difficulty data running from earliest
// to latest, and pad with simulated pre-genesis data to allow earlier
// adjustment if there isn't enough window data length will be
// DMA_WINDOW + 1 (for initial block time bound)
let diff_data = global::difficulty_data_to_vector(cursor);
// First, get the ratio of secondary PoW vs primary, skipping initial header
let sec_pow_scaling = secondary_pow_scaling(height, &diff_data[1..]);
// Get the timestamp delta across the window
let ts_delta: u64 = diff_data[DMA_WINDOW as usize].timestamp - diff_data[0].timestamp;
// Get the difficulty sum of the last DMA_WINDOW elements
let diff_sum: u64 = diff_data
.iter()
.skip(1)
.map(|dd| dd.difficulty.to_num())
.sum();
// adjust time delta toward goal subject to dampening and clamping
let adj_ts = clamp(
damp(ts_delta, BLOCK_TIME_WINDOW, DMA_DAMP_FACTOR),
BLOCK_TIME_WINDOW,
CLAMP_FACTOR,
);
// minimum difficulty avoids getting stuck due to dampening
let difficulty = max(MIN_DMA_DIFFICULTY, diff_sum * BLOCK_TIME_SEC / adj_ts);
HeaderInfo::from_diff_scaling(Difficulty::from_num(difficulty), sec_pow_scaling)
}
/// Difficulty calculation based on a Weighted Target Exponential Moving Average
/// of difficulty, using the ratio of the last block time over the target block time.
pub fn next_wtema_difficulty<T>(_height: u64, cursor: T) -> HeaderInfo
where
T: IntoIterator<Item = HeaderInfo>,
{
let mut last_headers = cursor.into_iter();
// last two headers
let last_header = last_headers.next().unwrap();
let prev_header = last_headers.next().unwrap();
let last_block_time: u64 = last_header.timestamp - prev_header.timestamp;
let last_diff = last_header.difficulty.to_num();
// wtema difficulty update
let next_diff =
last_diff * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - BLOCK_TIME_SEC + last_block_time);
// mainnet minimum difficulty at graph_weight(32) ensures difficulty increase on 59s block
// since 16384 * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - 1) > 16384
let difficulty = max(Difficulty::min_wtema(), Difficulty::from_num(next_diff));
HeaderInfo::from_diff_scaling(difficulty, 0) // no more secondary PoW
}
/// Count, in units of 1/100 (a percent), the number of "secondary" (AR) blocks in the provided window of blocks.
pub fn ar_count(_height: u64, diff_data: &[HeaderInfo]) -> u64 {
100 * diff_data.iter().filter(|n| n.is_secondary).count() as u64
}
/// The secondary proof-of-work factor is calculated along the same lines as in next_dma_difficulty,
/// as an adjustment on the deviation against the ideal value.
/// Factor by which the secondary proof of work difficulty will be adjusted
pub fn secondary_pow_scaling(height: u64, diff_data: &[HeaderInfo]) -> u32 {
// Get the scaling factor sum of the last DMA_WINDOW elements
let scale_sum: u64 = diff_data.iter().map(|dd| dd.secondary_scaling as u64).sum();
// compute ideal 2nd_pow_fraction in pct and across window
let target_pct = secondary_pow_ratio(height);
let target_count = DMA_WINDOW * target_pct;
// Get the secondary count across the window, adjusting count toward goal
// subject to dampening and clamping.
let adj_count = clamp(
damp(
ar_count(height, diff_data),
target_count,
AR_SCALE_DAMP_FACTOR,
),
target_count,
CLAMP_FACTOR,
);
let scale = scale_sum * target_pct / max(1, adj_count);
// minimum AR scale avoids getting stuck due to dampening
max(MIN_AR_SCALE, scale) as u32
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_graph_weight() {
global::set_local_chain_type(global::ChainTypes::Mainnet);
// initial weights
assert_eq!(graph_weight(1, 31), 256 * 31);
assert_eq!(graph_weight(1, 32), 512 * 32);
assert_eq!(graph_weight(1, 33), 1024 * 33);
// one year in, 31 starts going down, the rest stays the same
assert_eq!(graph_weight(YEAR_HEIGHT, 31), 256 * 30);
assert_eq!(graph_weight(YEAR_HEIGHT, 32), 512 * 32);
assert_eq!(graph_weight(YEAR_HEIGHT, 33), 1024 * 33);
// 31 loses one factor per week
assert_eq!(graph_weight(YEAR_HEIGHT + WEEK_HEIGHT, 31), 256 * 29);
assert_eq!(graph_weight(YEAR_HEIGHT + 2 * WEEK_HEIGHT, 31), 256 * 28);
assert_eq!(graph_weight(YEAR_HEIGHT + 32 * WEEK_HEIGHT, 31), 0);
// 2 years in, 31 still at 0, 32 starts decreasing
assert_eq!(graph_weight(2 * YEAR_HEIGHT, 31), 0);
assert_eq!(graph_weight(2 * YEAR_HEIGHT, 32), 512 * 32);
assert_eq!(graph_weight(2 * YEAR_HEIGHT, 33), 1024 * 33);
// 32 phaseout on hold
assert_eq!(
graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 32),
C32_GRAPH_WEIGHT
);
assert_eq!(graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 31), 0);
assert_eq!(
graph_weight(2 * YEAR_HEIGHT + 30 * WEEK_HEIGHT, 32),
C32_GRAPH_WEIGHT
);
assert_eq!(
graph_weight(2 * YEAR_HEIGHT + 31 * WEEK_HEIGHT, 32),
C32_GRAPH_WEIGHT
);
// 3 years in, nothing changes
assert_eq!(graph_weight(3 * YEAR_HEIGHT, 31), 0);
assert_eq!(graph_weight(3 * YEAR_HEIGHT, 32), 512 * 32);
assert_eq!(graph_weight(3 * YEAR_HEIGHT, 33), 1024 * 33);
// 4 years in, still on hold
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 31), 0);
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 32), 512 * 32);
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33);
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33);
}
}
| {
get_epoch_start(9) * REWARD8
+ get_epoch_start(8) * REWARD7
+ get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} | conditional_block |
consensus.rs | // Copyright 2020 The Grin Developers
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! All the rules required for a cryptocurrency to have reach consensus across
//! the whole network are complex and hard to completely isolate. Some can be
//! simple parameters (like block reward), others complex algorithms (like
//! Merkle sum trees or reorg rules). However, as long as they're simple
//! enough, consensus-relevant constants and short functions should be kept
//! here.
// Proof of existence:
// txid: d043f5cc3e9e135e0bafb010521813668d5bc86eef27c0e30232287fd7f5a85f
// document hash: 9b6372224719c5531e0ee1fcc36e7c9e29def9edd22e61aa60c014927191e58a
use crate::core::block::HeaderVersion;
use crate::core::hash::{Hash, ZERO_HASH};
use crate::global;
use crate::pow::Difficulty;
use std::cmp::{max, min};
/// A grin is divisible to 10^9, following the SI prefixes
pub const GRIN_BASE: u64 = 1_000_000_000;
/// Milligrin, a thousand of a grin
pub const MILLI_GRIN: u64 = GRIN_BASE / 1_000;
/// Microgrin, a thousand of a milligrin
pub const MICRO_GRIN: u64 = MILLI_GRIN / 1_000;
/// Nanogrin, smallest unit, takes a billion to make a grin
pub const NANO_GRIN: u64 = 1;
/// Block interval, in seconds, the network will tune its next_target for. Note
/// that we may reduce this value in the future as we get more data on mining
/// with Cuckoo Cycle, networks improve and block propagation is optimized
/// (adjusting the reward accordingly).
pub const BLOCK_TIME_SEC: u64 = 60;
/// Start at BTC block 717,000 (snapshot) which should occur around
/// Jan 3, 2022. This block will reward 6.25 BTC.
/// We allocate the remaining of the 6.25 blocks to our
/// "long tail" which will last 1000 years and start with 3.25.
/// So initial reward is 0.3125 BCMWs for 1,224,600 more blocks.
/// This is due to the 1 minute blocks instead of the 10 minute of BTC.
/// This is approximately Bitcoin's halving schedule, until
/// the 8th halving, after which the long tail will distribute the
/// remainder of the BCMWs over 1000 years. At block 717,000 there will be
/// 19,246,875 BTC.
/// Note that pre-launch we may recalibrate these numbers
/// a little. The goal will be to get exactly 21m BCMWs, have
/// a 1000 year long tail, and do a snapshot on January 3, 2022.
/// Snapshot includes 18,918,750,000,000,000 NanoBCMWs
/// Gensis reward is 0.
pub const REWARD0: u64 = 0;
/// First reward 1,224,600 blocks
pub const REWARD1: u64 = 312_500_000; // 382,687,500,000,000 NanoBCMWs
/// Second reward for 2,100,000 blocks
pub const REWARD2: u64 = 156_250_000; // 328,125,000,000,000 NanoBCMWs
/// Third reward for 2,100,000 blocks
pub const REWARD3: u64 = 78_125_000; // 164,062,500,000,000 NanoBCMWs
/// Fourth reward for 2,100,000 blocks
pub const REWARD4: u64 = 39_062_500; // 82,031,250,000,000 NanoBCMWs
/// Fifth reward for 2,100,000 blocks
pub const REWARD5: u64 = 19_531_250; // 41,015,625,000,000 NanoBCMWs
/// Sixth reward for 2,100,000 blocks
pub const REWARD6: u64 = 9_675_625; // 20,507,812,500,000 NanoBCMWs
/// Seventh reward for 2,100,000 blocks
pub const REWARD7: u64 = 4_882_812; // 10,253,905,200,000 NanoBCMWs
/// Eigth reward for 525,600,000 blocks
pub const REWARD8: u64 = 2_000_000; // 105,120,000,0000,000 NanoBCMWs
/// Actual block reward for a given total fee amount
pub fn reward(fee: u64, height: u64) -> u64 {
calc_block_reward(height).saturating_add(fee)
}
fn get_epoch_start(num: u64) -> u64 {
if num == 1 {
1
} else if num == 2 {
1_224_600
} else if num == 3 {
3_324_600
} else if num == 4 {
5_424_600
} else if num == 5 {
7_524_600
} else if num == 6 {
9_624_600
} else if num == 7 {
11_724_600
} else if num == 8 {
13_824_600
} else if num == 9 {
539_424_600
} else {
// shouldn't get here.
0
}
}
/// Calculate block reward based on height
pub fn calc_block_reward(height: u64) -> u64 {
if height == 0 {
// reward for genesis block
REWARD0
} else if height <= get_epoch_start(2) {
REWARD1
} else if height <= get_epoch_start(3) {
REWARD2
} else if height <= get_epoch_start(4) {
REWARD3
} else if height <= get_epoch_start(5) {
REWARD4
} else if height <= get_epoch_start(6) {
REWARD5
} else if height <= get_epoch_start(7) {
REWARD6
} else if height <= get_epoch_start(8) {
REWARD7
} else if height <= get_epoch_start(9) {
REWARD8
} else {
0 // no reward after this.
}
}
fn get_overage_offset_start_epoch(num: u64) -> u64 {
if num == 1 {
REWARD0
} else if num == 2 {
get_epoch_start(2) * REWARD1 + REWARD0
} else if num == 3 {
get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0
} else if num == 4 {
get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 5 {
get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 6 {
get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 7 {
get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 8 {
get_epoch_start(8) * REWARD7
+ get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 9 {
get_epoch_start(9) * REWARD8
+ get_epoch_start(8) * REWARD7
+ get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else {
// should not get here
1
}
}
/// Calculate block overage based on height and claimed BTCUtxos
pub fn calc_block_overage(height: u64) -> u64 {
if height == 0 {
0
} else if height <= get_epoch_start(2) {
(REWARD1 * height) + get_overage_offset_start_epoch(1)
} else if height <= get_epoch_start(3) {
(REWARD2 * (height - get_epoch_start(2))) + get_overage_offset_start_epoch(2)
} else if height <= get_epoch_start(4) {
(REWARD3 * (height - get_epoch_start(3))) + get_overage_offset_start_epoch(3)
} else if height <= get_epoch_start(5) {
(REWARD4 * (height - get_epoch_start(4))) + get_overage_offset_start_epoch(4)
} else if height <= get_epoch_start(6) {
(REWARD5 * (height - get_epoch_start(5))) + get_overage_offset_start_epoch(5)
} else if height <= get_epoch_start(7) {
(REWARD6 * (height - get_epoch_start(6))) + get_overage_offset_start_epoch(6)
} else if height <= get_epoch_start(8) {
(REWARD7 * (height - get_epoch_start(7))) + get_overage_offset_start_epoch(7)
} else if height <= get_epoch_start(9) {
(REWARD8 * (height - get_epoch_start(8))) + get_overage_offset_start_epoch(8)
} else {
// we exit here. Up to future generations to decide
// how to handle.
std::process::exit(0);
}
}
/// an hour in seconds
pub const HOUR_SEC: u64 = 60 * 60;
/// Nominal height for standard time intervals, hour is 60 blocks
pub const HOUR_HEIGHT: u64 = HOUR_SEC / BLOCK_TIME_SEC;
/// A day is 1440 blocks
pub const DAY_HEIGHT: u64 = 24 * HOUR_HEIGHT;
/// A week is 10_080 blocks
pub const WEEK_HEIGHT: u64 = 7 * DAY_HEIGHT;
/// A year is 524_160 blocks
pub const YEAR_HEIGHT: u64 = 52 * WEEK_HEIGHT;
/// Number of blocks before a coinbase matures and can be spent
pub const COINBASE_MATURITY: u64 = DAY_HEIGHT;
/// We use all C29d from the start
pub fn secondary_pow_ratio(_height: u64) -> u64 {
100
}
/// Cuckoo-cycle proof size (cycle length)
pub const PROOFSIZE: usize = 42;
/// Default Cuckatoo Cycle edge_bits, used for mining and validating.
pub const DEFAULT_MIN_EDGE_BITS: u8 = 31;
/// Cuckaroo* proof-of-work edge_bits, meant to be ASIC resistant.
pub const SECOND_POW_EDGE_BITS: u8 = 29;
/// Original reference edge_bits to compute difficulty factors for higher
/// Cuckoo graph sizes, changing this would hard fork
pub const BASE_EDGE_BITS: u8 = 24;
/// Default number of blocks in the past when cross-block cut-through will start
/// happening. Needs to be long enough to not overlap with a long reorg.
/// Rational
/// behind the value is the longest bitcoin fork was about 30 blocks, so 5h. We
/// add an order of magnitude to be safe and round to 7x24h of blocks to make it
/// easier to reason about.
pub const CUT_THROUGH_HORIZON: u32 = WEEK_HEIGHT as u32;
/// Default number of blocks in the past to determine the height where we request
/// a txhashset (and full blocks from). Needs to be long enough to not overlap with
/// a long reorg.
/// Rational behind the value is the longest bitcoin fork was about 30 blocks, so 5h.
/// We add an order of magnitude to be safe and round to 2x24h of blocks to make it
/// easier to reason about.
pub const STATE_SYNC_THRESHOLD: u32 = 2 * DAY_HEIGHT as u32;
/// Weight of an input when counted against the max block weight capacity
pub const INPUT_WEIGHT: u64 = 1;
/// Weight of an output when counted against the max block weight capacity
pub const OUTPUT_WEIGHT: u64 = 21;
/// Weight of a kernel when counted against the max block weight capacity
pub const KERNEL_WEIGHT: u64 = 3;
/// Total maximum block weight. At current sizes, this means a maximum
/// theoretical size of:
/// * `(674 + 33 + 1) * (40_000 / 21) = 1_348_571` for a block with only outputs
/// * `(1 + 8 + 8 + 33 + 64) * (40_000 / 3) = 1_520_000` for a block with only kernels
/// * `(1 + 33) * 40_000 = 1_360_000` for a block with only inputs
///
/// Regardless of the relative numbers of inputs/outputs/kernels in a block the maximum
/// block size is around 1.5MB
/// For a block full of "average" txs (2 inputs, 2 outputs, 1 kernel) we have -
/// `(1 * 2) + (21 * 2) + (3 * 1) = 47` (weight per tx)
/// `40_000 / 47 = 851` (txs per block)
///
pub const MAX_BLOCK_WEIGHT: u64 = 40_000;
/// Fork every 6 months.
pub const HARD_FORK_INTERVAL: u64 = YEAR_HEIGHT / 2;
/// Testnet first hard fork height, set to happen around 2019-06-20
pub const TESTNET_FIRST_HARD_FORK: u64 = 185_040;
/// Testnet second hard fork height, set to happen around 2019-12-19
pub const TESTNET_SECOND_HARD_FORK: u64 = 298_080;
/// Testnet second hard fork height, set to happen around 2020-06-20
pub const TESTNET_THIRD_HARD_FORK: u64 = 552_960;
/// Testnet second hard fork height, set to happen around 2020-12-8
pub const TESTNET_FOURTH_HARD_FORK: u64 = 642_240;
/// Fork every 3 blocks
pub const TESTING_HARD_FORK_INTERVAL: u64 = 3;
/// Compute possible block version at a given height,
/// currently no hard forks.
pub fn header_version(_height: u64) -> HeaderVersion {
HeaderVersion(1)
}
/// Check whether the block version is valid at a given height, implements
/// 6 months interval scheduled hard forks for the first 2 years.
pub fn valid_header_version(height: u64, version: HeaderVersion) -> bool |
/// Number of blocks used to calculate difficulty adjustment by Damped Moving Average
pub const DMA_WINDOW: u64 = HOUR_HEIGHT;
/// Difficulty adjustment half life (actually, 60s * number of 0s-blocks to raise diff by factor e) is 4 hours
pub const WTEMA_HALF_LIFE: u64 = 4 * HOUR_SEC;
/// Average time span of the DMA difficulty adjustment window
pub const BLOCK_TIME_WINDOW: u64 = DMA_WINDOW * BLOCK_TIME_SEC;
/// Clamp factor to use for DMA difficulty adjustment
/// Limit value to within this factor of goal
pub const CLAMP_FACTOR: u64 = 2;
/// Dampening factor to use for DMA difficulty adjustment
pub const DMA_DAMP_FACTOR: u64 = 3;
/// Dampening factor to use for AR scale calculation.
pub const AR_SCALE_DAMP_FACTOR: u64 = 13;
/// Compute weight of a graph as number of siphash bits defining the graph
/// The height dependence allows a 30-week linear transition from C31+ to C32+ starting after 1 year
pub fn graph_weight(height: u64, edge_bits: u8) -> u64 {
let mut xpr_edge_bits = edge_bits as u64;
let expiry_height = YEAR_HEIGHT;
if edge_bits == 31 && height >= expiry_height {
xpr_edge_bits = xpr_edge_bits.saturating_sub(1 + (height - expiry_height) / WEEK_HEIGHT);
}
// For C31 xpr_edge_bits reaches 0 at height YEAR_HEIGHT + 30 * WEEK_HEIGHT
// 30 weeks after Jan 15, 2020 would be Aug 12, 2020
(2u64 << (edge_bits - global::base_edge_bits()) as u64) * xpr_edge_bits
}
/// minimum solution difficulty after HardFork4 when PoW becomes primary only Cuckatoo32+
pub const C32_GRAPH_WEIGHT: u64 = (2u64 << (32 - BASE_EDGE_BITS) as u64) * 32; // 16384
/// Minimum difficulty, enforced in Damped Moving Average diff retargetting
/// avoids getting stuck when trying to increase difficulty subject to dampening
pub const MIN_DMA_DIFFICULTY: u64 = DMA_DAMP_FACTOR;
/// Minimum scaling factor for AR pow, enforced in diff retargetting
/// avoids getting stuck when trying to increase ar_scale subject to dampening
pub const MIN_AR_SCALE: u64 = AR_SCALE_DAMP_FACTOR;
/// unit difficulty, equal to graph_weight(SECOND_POW_EDGE_BITS)
pub const UNIT_DIFFICULTY: u64 =
((2 as u64) << (SECOND_POW_EDGE_BITS - BASE_EDGE_BITS)) * (SECOND_POW_EDGE_BITS as u64);
/// The initial difficulty at launch. This should be over-estimated
/// and difficulty should come down at launch rather than up
/// Currently grossly over-estimated at 10% of current
/// ethereum GPUs (assuming 1GPU can solve a block at diff 1 in one block interval)
pub const INITIAL_DIFFICULTY: u64 = 1_000_000 * UNIT_DIFFICULTY;
/// Minimal header information required for the Difficulty calculation to
/// take place
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct HeaderInfo {
/// Block hash, ZERO_HASH when this is a sythetic entry.
pub block_hash: Hash,
/// Timestamp of the header, 1 when not used (returned info)
pub timestamp: u64,
/// Network difficulty or next difficulty to use
pub difficulty: Difficulty,
/// Network secondary PoW factor or factor to use
pub secondary_scaling: u32,
/// Whether the header is a secondary proof of work
pub is_secondary: bool,
}
impl HeaderInfo {
/// Default constructor
pub fn new(
block_hash: Hash,
timestamp: u64,
difficulty: Difficulty,
secondary_scaling: u32,
is_secondary: bool,
) -> HeaderInfo {
HeaderInfo {
block_hash,
timestamp,
difficulty,
secondary_scaling,
is_secondary,
}
}
/// Constructor from a timestamp and difficulty, setting a default secondary
/// PoW factor
pub fn from_ts_diff(timestamp: u64, difficulty: Difficulty) -> HeaderInfo {
HeaderInfo {
block_hash: ZERO_HASH,
timestamp,
difficulty,
secondary_scaling: global::initial_graph_weight(),
is_secondary: true,
}
}
/// Constructor from a difficulty and secondary factor, setting a default
/// timestamp
pub fn from_diff_scaling(difficulty: Difficulty, secondary_scaling: u32) -> HeaderInfo {
HeaderInfo {
block_hash: ZERO_HASH,
timestamp: 1,
difficulty,
secondary_scaling,
is_secondary: true,
}
}
}
/// Move value linearly toward a goal
pub fn damp(actual: u64, goal: u64, damp_factor: u64) -> u64 {
(actual + (damp_factor - 1) * goal) / damp_factor
}
/// limit value to be within some factor from a goal
pub fn clamp(actual: u64, goal: u64, clamp_factor: u64) -> u64 {
max(goal / clamp_factor, min(actual, goal * clamp_factor))
}
/// Computes the proof-of-work difficulty that the next block should comply with.
/// Takes an iterator over past block headers information, from latest
/// (highest height) to oldest (lowest height).
/// Uses either the old dma DAA or, starting from HF4, the new wtema DAA
pub fn next_difficulty<T>(height: u64, cursor: T) -> HeaderInfo
where
T: IntoIterator<Item = HeaderInfo>,
{
if header_version(height) < HeaderVersion(5) {
next_dma_difficulty(height, cursor)
} else {
next_wtema_difficulty(height, cursor)
}
}
/// Difficulty calculation based on a Damped Moving Average
/// of difficulty over a window of DMA_WINDOW blocks.
/// The corresponding timespan is calculated
/// by using the difference between the timestamps at the beginning
/// and the end of the window, with a damping toward the target block time.
pub fn next_dma_difficulty<T>(height: u64, cursor: T) -> HeaderInfo
where
T: IntoIterator<Item = HeaderInfo>,
{
// Create vector of difficulty data running from earliest
// to latest, and pad with simulated pre-genesis data to allow earlier
// adjustment if there isn't enough window data length will be
// DMA_WINDOW + 1 (for initial block time bound)
let diff_data = global::difficulty_data_to_vector(cursor);
// First, get the ratio of secondary PoW vs primary, skipping initial header
let sec_pow_scaling = secondary_pow_scaling(height, &diff_data[1..]);
// Get the timestamp delta across the window
let ts_delta: u64 = diff_data[DMA_WINDOW as usize].timestamp - diff_data[0].timestamp;
// Get the difficulty sum of the last DMA_WINDOW elements
let diff_sum: u64 = diff_data
.iter()
.skip(1)
.map(|dd| dd.difficulty.to_num())
.sum();
// adjust time delta toward goal subject to dampening and clamping
let adj_ts = clamp(
damp(ts_delta, BLOCK_TIME_WINDOW, DMA_DAMP_FACTOR),
BLOCK_TIME_WINDOW,
CLAMP_FACTOR,
);
// minimum difficulty avoids getting stuck due to dampening
let difficulty = max(MIN_DMA_DIFFICULTY, diff_sum * BLOCK_TIME_SEC / adj_ts);
HeaderInfo::from_diff_scaling(Difficulty::from_num(difficulty), sec_pow_scaling)
}
/// Difficulty calculation based on a Weighted Target Exponential Moving Average
/// of difficulty, using the ratio of the last block time over the target block time.
pub fn next_wtema_difficulty<T>(_height: u64, cursor: T) -> HeaderInfo
where
T: IntoIterator<Item = HeaderInfo>,
{
let mut last_headers = cursor.into_iter();
// last two headers
let last_header = last_headers.next().unwrap();
let prev_header = last_headers.next().unwrap();
let last_block_time: u64 = last_header.timestamp - prev_header.timestamp;
let last_diff = last_header.difficulty.to_num();
// wtema difficulty update
let next_diff =
last_diff * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - BLOCK_TIME_SEC + last_block_time);
// mainnet minimum difficulty at graph_weight(32) ensures difficulty increase on 59s block
// since 16384 * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - 1) > 16384
let difficulty = max(Difficulty::min_wtema(), Difficulty::from_num(next_diff));
HeaderInfo::from_diff_scaling(difficulty, 0) // no more secondary PoW
}
/// Count, in units of 1/100 (a percent), the number of "secondary" (AR) blocks in the provided window of blocks.
pub fn ar_count(_height: u64, diff_data: &[HeaderInfo]) -> u64 {
100 * diff_data.iter().filter(|n| n.is_secondary).count() as u64
}
/// The secondary proof-of-work factor is calculated along the same lines as in next_dma_difficulty,
/// as an adjustment on the deviation against the ideal value.
/// Factor by which the secondary proof of work difficulty will be adjusted
pub fn secondary_pow_scaling(height: u64, diff_data: &[HeaderInfo]) -> u32 {
// Get the scaling factor sum of the last DMA_WINDOW elements
let scale_sum: u64 = diff_data.iter().map(|dd| dd.secondary_scaling as u64).sum();
// compute ideal 2nd_pow_fraction in pct and across window
let target_pct = secondary_pow_ratio(height);
let target_count = DMA_WINDOW * target_pct;
// Get the secondary count across the window, adjusting count toward goal
// subject to dampening and clamping.
let adj_count = clamp(
damp(
ar_count(height, diff_data),
target_count,
AR_SCALE_DAMP_FACTOR,
),
target_count,
CLAMP_FACTOR,
);
let scale = scale_sum * target_pct / max(1, adj_count);
// minimum AR scale avoids getting stuck due to dampening
max(MIN_AR_SCALE, scale) as u32
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_graph_weight() {
global::set_local_chain_type(global::ChainTypes::Mainnet);
// initial weights
assert_eq!(graph_weight(1, 31), 256 * 31);
assert_eq!(graph_weight(1, 32), 512 * 32);
assert_eq!(graph_weight(1, 33), 1024 * 33);
// one year in, 31 starts going down, the rest stays the same
assert_eq!(graph_weight(YEAR_HEIGHT, 31), 256 * 30);
assert_eq!(graph_weight(YEAR_HEIGHT, 32), 512 * 32);
assert_eq!(graph_weight(YEAR_HEIGHT, 33), 1024 * 33);
// 31 loses one factor per week
assert_eq!(graph_weight(YEAR_HEIGHT + WEEK_HEIGHT, 31), 256 * 29);
assert_eq!(graph_weight(YEAR_HEIGHT + 2 * WEEK_HEIGHT, 31), 256 * 28);
assert_eq!(graph_weight(YEAR_HEIGHT + 32 * WEEK_HEIGHT, 31), 0);
// 2 years in, 31 still at 0, 32 starts decreasing
assert_eq!(graph_weight(2 * YEAR_HEIGHT, 31), 0);
assert_eq!(graph_weight(2 * YEAR_HEIGHT, 32), 512 * 32);
assert_eq!(graph_weight(2 * YEAR_HEIGHT, 33), 1024 * 33);
// 32 phaseout on hold
assert_eq!(
graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 32),
C32_GRAPH_WEIGHT
);
assert_eq!(graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 31), 0);
assert_eq!(
graph_weight(2 * YEAR_HEIGHT + 30 * WEEK_HEIGHT, 32),
C32_GRAPH_WEIGHT
);
assert_eq!(
graph_weight(2 * YEAR_HEIGHT + 31 * WEEK_HEIGHT, 32),
C32_GRAPH_WEIGHT
);
// 3 years in, nothing changes
assert_eq!(graph_weight(3 * YEAR_HEIGHT, 31), 0);
assert_eq!(graph_weight(3 * YEAR_HEIGHT, 32), 512 * 32);
assert_eq!(graph_weight(3 * YEAR_HEIGHT, 33), 1024 * 33);
// 4 years in, still on hold
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 31), 0);
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 32), 512 * 32);
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33);
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33);
}
}
| {
version == header_version(height)
} | identifier_body |
lib.rs | // Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![crate_name="rustcxx_common"]
#![feature(rustc_private, slice_patterns)]
extern crate syntax;
extern crate rustc;
mod types;
use std::borrow::Cow;
use std::hash::{SipHasher, Hash, Hasher};
use std::iter;
use syntax::abi::Abi;
use syntax::ast::{self, DUMMY_NODE_ID};
use syntax::codemap::{Span, Spanned, respan, spanned, DUMMY_SP};
use syntax::errors::Handler;
use syntax::ext::base::ExtCtxt;
use syntax::ext::build::AstBuilder;
use syntax::ext::quote::rt::ToTokens;
use syntax::parse::{token, PResult};
use syntax::parse::common::SeqSep;
use syntax::parse::parser::Parser;
use syntax::print::pprust::{token_to_string, tts_to_string};
use syntax::ptr::P;
use syntax::tokenstream::TokenTree;
/// Language specific parsing.
///
/// The two macros, `cxx!` and `rust!`, share a similar syntax.
/// This trait differentiates the two, such that the rest of the parsing code can be reused.
pub trait Lang {
type Body: ToTokens;
type ArgValue;
fn parse_body<'a>(parser: &mut Parser<'a>) -> PResult<'a, Self::Body>;
fn parse_arg_value<'a>(ecx: &ExtCtxt,
parser: &mut Parser<'a>,
ident: ast::SpannedIdent) -> PResult<'a, Self::ArgValue>;
}
pub enum Rust {}
pub enum Cxx {}
impl Lang for Rust {
type Body = P<ast::Block>;
type ArgValue = Vec<Spanned<String>>;
fn parse_body<'a>(parser: &mut Parser<'a>) -> PResult<'a, Self::Body> {
parser.parse_block()
}
fn parse_arg_value<'a>(_ecx: &ExtCtxt,
parser: &mut Parser<'a>,
ident: ast::SpannedIdent) -> PResult<'a, Self::ArgValue> {
if parser.eat(&token::Eq) {
let mut tokens = Vec::new();
while !parser.check(&token::Comma) &&
!parser.check(&token::CloseDelim(token::Paren)) {
tokens.push(try!(parser.parse_token_tree()));
}
Ok(flatten_tts(&tokens))
} else {
Ok(vec![respan(ident.span, ident.node.to_string())])
}
}
}
impl Lang for Cxx {
type Body = Vec<TokenTree>;
type ArgValue = P<ast::Expr>;
fn parse_body<'a>(parser: &mut Parser<'a>) -> PResult<'a, Self::Body> {
try!(parser.expect(&token::OpenDelim(token::Brace)));
parser.parse_seq_to_end(
&token::CloseDelim(token::Brace),
SeqSep::none(),
|parser| parser.parse_token_tree())
}
fn parse_arg_value<'a>(ecx: &ExtCtxt,
parser: &mut Parser<'a>,
ident: ast::SpannedIdent) -> PResult<'a, Self::ArgValue> {
if parser.eat(&token::Eq) {
parser.parse_expr()
} else {
Ok(ecx.expr_ident(ident.span, ident.node))
}
}
}
pub struct Function<L: Lang> {
pub span: Span,
pub name: ast::Ident,
pub ret_ty: Option<P<ast::Ty>>,
pub args: Vec<ArgSpec<L>>,
pub body: L::Body,
}
impl <L: Lang> Function<L> {
pub fn parse<'a>(ecx: &ExtCtxt<'a>,
span: Span,
tts: &[TokenTree]) -> PResult<'a, Function<L>> {
let mut parser = ecx.new_parser_from_tts(tts);
let args = if parser.check(&token::OpenDelim(token::Paren)) {
Some(try!(Self::parse_args(ecx, &mut parser)))
} else {
None
};
let ret_ty = if args.is_some() && parser.check(&token::RArrow) {
Some(try!(Self::parse_ret_ty(&mut parser)))
} else {
None
};
let body = try!(L::parse_body(&mut parser));
let hash = {
let mut hasher = SipHasher::new();
tts_to_string(tts).hash(&mut hasher);
hasher.finish()
};
let name = ecx.ident_of(&format!("rustcxx_{:016x}", hash));
Ok(Function {
span: span,
name: name,
ret_ty: ret_ty,
args: args.unwrap_or_else(|| Vec::new()),
body: body,
})
}
fn parse_args<'a>(ecx: &ExtCtxt,
parser: &mut Parser<'a>) -> PResult<'a, Vec<ArgSpec<L>>> {
parser.parse_unspanned_seq(
&token::OpenDelim(token::Paren),
&token::CloseDelim(token::Paren),
SeqSep::trailing_allowed(token::Comma),
|parser| ArgSpec::parse(ecx, parser))
}
fn parse_ret_ty<'a>(parser: &mut Parser<'a>) -> PResult<'a, P<ast::Ty>> {
try!(parser.expect(&token::RArrow));
parser.parse_ty()
}
pub fn fn_decl(&self, ecx: &ExtCtxt) -> P<ast::FnDecl> {
let args = self.args.iter().map(|arg| {
ecx.arg(arg.ident.span, arg.ident.node, arg.ty.clone())
}).collect();
let ret_ty = self.ret_ty.clone()
.map(ast::FunctionRetTy::Ty)
.unwrap_or(ast::FunctionRetTy::Default(DUMMY_SP));
P(ast::FnDecl {
inputs: args,
output: ret_ty,
variadic: false
})
}
pub fn foreign_item(&self, ecx: &ExtCtxt) -> ast::ForeignItem {
let fn_decl = self.fn_decl(ecx);
ast::ForeignItem {
id: DUMMY_NODE_ID,
ident: self.name,
attrs: Vec::new(),
node: ast::ForeignItemKind::Fn(fn_decl, ast::Generics::default()),
vis: ast::Visibility::Inherited,
span: self.span,
}
}
pub fn cxx_args<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, String> {
let args = try!(self.args.iter().map(|arg| {
let ty = try!(arg.cxx_type(&ecx.parse_sess.span_diagnostic));
Ok(format!("{} const {}", ty, arg.ident.node))
}).collect::<PResult<Vec<String>>>());
Ok(args.join(", "))
}
pub fn cxx_ret_ty<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, Cow<'static, str>> {
self.ret_ty.as_ref().map(|ty| {
types::convert_ty_to_cxx(&ecx.parse_sess.span_diagnostic, &ty)
}).unwrap_or(Ok(Cow::from("void")))
}
}
#[derive(Debug)]
pub struct ArgSpec<L: Lang> {
pub ident: ast::SpannedIdent,
pub ty: P<ast::Ty>,
pub value: L::ArgValue,
}
impl <L: Lang> ArgSpec<L> {
pub fn parse<'a>(ecx: &ExtCtxt,
parser: &mut Parser<'a>) -> PResult<'a, ArgSpec<L>> {
let ident = {
let lo = parser.span.lo;
let ident = try!(parser.parse_ident());
let hi = parser.span.lo;
spanned(lo, hi, ident)
};
try!(parser.expect(&token::Colon));
let ty = try!(parser.parse_ty());
let value = try!(L::parse_arg_value(ecx, parser, ident));
Ok(ArgSpec {
ident: ident,
ty: ty,
value: value,
})
}
pub fn cxx_type<'a>(&self, handler: &'a Handler)
-> PResult<'a, Cow<'static, str>> {
types::convert_ty_to_cxx(handler, &self.ty)
}
}
impl Function<Cxx> {
pub fn call_expr<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, P<ast::Expr>> {
let name = self.name.clone();
let args = self.args.iter().map(|arg| arg.value.clone()).collect();
Ok(ecx.expr_call_ident(self.span, name, args))
}
pub fn cxx_code<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, String> {
let ret_ty = try!(self.cxx_ret_ty(ecx));
let args = try!(self.cxx_args(ecx));
let signature = format!(
"{span}\nextern \"C\" {ret_ty} {name}({args})",
span = span_to_cpp_directive(ecx, self.span),
ret_ty = ret_ty,
name = self.name,
args = args);
let mut body = tokens_to_cpp(ecx, &flatten_tts(&self.body));
if self.ret_ty.is_some() {
body = format!("return ({{\n{};\n}});", body);
}
Ok(format!("{} {{\n{}\n}}\n", signature, body))
}
}
// Calling rust from C++ is a bit trickier.
// We must declare the function before it can be used.
// However C++ requires the function to be declared outside the current function, but then we may
// miss type definitions which are in scope due to being in a namespace, or some includes.
//
// For example :
// ```c++
// #include <stdint.h>
// #include <stdio.h>
//
// void foo() {
// uint32_t a = 3;
// uint32_t double = rust![(a: uint32_t) -> uint32_t {
// a * 2
// }];
// printf("double: ", a);
// }
// ```
//
// Declaring the extern function before the includes would not work, as uint32_t is not defined at
// this point. Finding the right place to declare it would be complicated and would almost require
// a full C++ parser.
//
// Instead we use an alternative approach. The function's symbol is declared with an opaque type at
// the top of the file. This does not require argument types to be in scope.
// When invoking the function, the symbol is first casted into a function pointer of the correct type.
// This way, the same typing context as in the original source is used.
//
// The example above would be translated into the following :
//
// ```c++
// struct rustcxx_XXXXXXXX;
// extern "C" rustcxx_XXXXXXXX rustcxx_XXXXXXXX;
//
// #include <stdint.h>
// #include <stdio.h>
//
// void foo() {
// uint32_t a = 3;
// uint32_t double = ((uint32_t (*)(uint32_t a)) &rustcxx_XXXXXXXX)(a);
// printf("double: ", a);
// }
// ```
impl Function<Rust> {
pub fn cxx_decl<'a>(&self, _ecx: &'a ExtCtxt) -> PResult<'a, String> {
Ok(format!("struct {}; extern \"C\" {} {};", self.name, self.name, self.name))
}
pub fn cxx_call<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, String> {
let ret_ty = try!(self.cxx_ret_ty(ecx));
let args_sig = try!(self.cxx_args(ecx));
let arg_separator = respan(DUMMY_SP, String::from(","));
let args_value = self.args.iter().map(|arg| {
arg.value.clone()
}).collect::<Vec<_>>().join(&arg_separator);
let cast_ty = format!("{} (*) ({})", ret_ty, args_sig);
let fn_ptr = format!("( ({}) &{} )", cast_ty, self.name);
let call = format!("{} ({})", fn_ptr, tokens_to_cpp(ecx, &args_value));
Ok(call)
}
pub fn item<'a>(&self, ecx: &'a ExtCtxt) -> P<ast::Item> {
let decl = self.fn_decl(ecx);
// Function has to be no_mangle, otherwise it can't be called from C++
let no_mangle = ecx.meta_word(self.span, token::intern("no_mangle").as_str());
// The function has to be exported or it would be optimized out by the compiler.
// The compiler already prints an error, but it is easy to miss, so make it a hard error.
let deny = ecx.meta_list(
self.span,
token::intern("deny").as_str(),
vec![ecx.meta_word(self.span, token::intern("private_no_mangle_fns").as_str())]);
let attrs = vec![
ecx.attribute(self.span, no_mangle),
ecx.attribute(self.span, deny),
];
let fn_item = ast::ItemKind::Fn(
decl, ast::Unsafety::Unsafe, ast::Constness::NotConst,
Abi::C, ast::Generics::default(), self.body.clone());
P(ast::Item {
ident: self.name,
attrs: attrs,
id: ast::DUMMY_NODE_ID,
node: fn_item,
vis: ast::Visibility::Public,
span: self.span,
})
}
}
/// Find and replace uses of rust![ .. ] in a token tree stream.
///
/// The callback is invoked for every use of the rust! macro and it's result is used to replace it.
pub fn parse_rust_macro<F>(tts: &[TokenTree], f: &mut F) -> Vec<Spanned<String>>
where F: FnMut(Span, &[TokenTree]) -> Vec<Spanned<String>> {
let mut result = Vec::new();
// Iterate over the tokens with 3 tokens of lookahead.
let mut i = 0;
loop {
match (tts.get(i), tts.get(i+1), tts.get(i+2)) {
(Some(&TokenTree::Token(_, token::Ident(ident))),
Some(&TokenTree::Token(_, token::Not)),
Some(&TokenTree::Delimited(span, ref contents)))
if ident.name.to_string() == "rust" => {
i += 2;
result.extend(f(span, &contents.tts));
}
(Some(&TokenTree::Delimited(_, ref contents)), _, _) => {
// Recursively look into the token tree
result.push(respan(contents.open_span, token_to_string(&contents.open_token())));
result.extend(parse_rust_macro(&contents.tts, f));
result.push(respan(contents.close_span, token_to_string(&contents.close_token())));
}
(Some(&TokenTree::Token(span, ref tok)), _, _) => {
result.push(respan(span, token_to_string(tok)));
}
(Some(&TokenTree::Sequence(..)), _, _) => unimplemented!(),
(None, _, _) => break,
}
i += 1;
}
result
}
/// Flatten a token tree stream.
///
/// Each token is stringified and paired with it's span.
pub fn flatten_tts(tts: &[TokenTree]) -> Vec<Spanned<String>> {
tts.iter().flat_map(|tt| {
match tt {
&TokenTree::Token(span, ref tok) => {
vec![respan(span, token_to_string(tok))]
}
&TokenTree::Delimited(_, ref delimited) => {
let open = respan(delimited.open_span, token_to_string(&delimited.open_token()));
let close = respan(delimited.close_span, token_to_string(&delimited.close_token()));
iter::once(open)
.chain(flatten_tts(&delimited.tts))
.chain(iter::once(close))
.collect()
}
&TokenTree::Sequence(..) => unimplemented!()
}
}).collect()
}
/// Join tokens, using `#line` C preprocessor directives to maintain span
/// information.
pub fn tokens_to_cpp(ecx: &ExtCtxt, tokens: &[Spanned<String>]) -> String {
let codemap = ecx.parse_sess.codemap();
let mut last_pos = codemap.lookup_char_pos(DUMMY_SP.lo);
let mut column = 0;
let mut contents = String::new();
for token in tokens {
if token.span != DUMMY_SP {
let pos = codemap.lookup_char_pos(token.span.lo);
if pos.file.name == pos.file.name && pos.line == last_pos.line + 1 {
contents.push('\n');
column = 0;
} else if pos.file.name != pos.file.name || pos.line != last_pos.line {
contents.push('\n');
contents.push_str(&span_to_cpp_directive(ecx, token.span));
contents.push('\n');
column = 0;
} | contents.push(' ');
column += 1;
}
last_pos = pos;
}
column += token.node.len();
contents.push_str(&token.node);
}
return contents;
}
pub fn span_to_cpp_directive(ecx: &ExtCtxt, span: Span) -> String {
let codemap = ecx.parse_sess.codemap();
let pos = codemap.lookup_char_pos(span.lo);
format!("#line {} {:?}", pos.line, pos.file.name)
} |
// Pad the code such that the token remains on the same column
while column < pos.col.0 { | random_line_split |
lib.rs | // Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![crate_name="rustcxx_common"]
#![feature(rustc_private, slice_patterns)]
extern crate syntax;
extern crate rustc;
mod types;
use std::borrow::Cow;
use std::hash::{SipHasher, Hash, Hasher};
use std::iter;
use syntax::abi::Abi;
use syntax::ast::{self, DUMMY_NODE_ID};
use syntax::codemap::{Span, Spanned, respan, spanned, DUMMY_SP};
use syntax::errors::Handler;
use syntax::ext::base::ExtCtxt;
use syntax::ext::build::AstBuilder;
use syntax::ext::quote::rt::ToTokens;
use syntax::parse::{token, PResult};
use syntax::parse::common::SeqSep;
use syntax::parse::parser::Parser;
use syntax::print::pprust::{token_to_string, tts_to_string};
use syntax::ptr::P;
use syntax::tokenstream::TokenTree;
/// Language specific parsing.
///
/// The two macros, `cxx!` and `rust!`, share a similar syntax.
/// This trait differentiates the two, such that the rest of the parsing code can be reused.
pub trait Lang {
type Body: ToTokens;
type ArgValue;
fn parse_body<'a>(parser: &mut Parser<'a>) -> PResult<'a, Self::Body>;
fn parse_arg_value<'a>(ecx: &ExtCtxt,
parser: &mut Parser<'a>,
ident: ast::SpannedIdent) -> PResult<'a, Self::ArgValue>;
}
pub enum Rust {}
pub enum Cxx {}
impl Lang for Rust {
type Body = P<ast::Block>;
type ArgValue = Vec<Spanned<String>>;
fn parse_body<'a>(parser: &mut Parser<'a>) -> PResult<'a, Self::Body> {
parser.parse_block()
}
fn parse_arg_value<'a>(_ecx: &ExtCtxt,
parser: &mut Parser<'a>,
ident: ast::SpannedIdent) -> PResult<'a, Self::ArgValue> {
if parser.eat(&token::Eq) {
let mut tokens = Vec::new();
while !parser.check(&token::Comma) &&
!parser.check(&token::CloseDelim(token::Paren)) {
tokens.push(try!(parser.parse_token_tree()));
}
Ok(flatten_tts(&tokens))
} else {
Ok(vec![respan(ident.span, ident.node.to_string())])
}
}
}
impl Lang for Cxx {
type Body = Vec<TokenTree>;
type ArgValue = P<ast::Expr>;
fn parse_body<'a>(parser: &mut Parser<'a>) -> PResult<'a, Self::Body> {
try!(parser.expect(&token::OpenDelim(token::Brace)));
parser.parse_seq_to_end(
&token::CloseDelim(token::Brace),
SeqSep::none(),
|parser| parser.parse_token_tree())
}
fn parse_arg_value<'a>(ecx: &ExtCtxt,
parser: &mut Parser<'a>,
ident: ast::SpannedIdent) -> PResult<'a, Self::ArgValue> {
if parser.eat(&token::Eq) {
parser.parse_expr()
} else {
Ok(ecx.expr_ident(ident.span, ident.node))
}
}
}
pub struct Function<L: Lang> {
pub span: Span,
pub name: ast::Ident,
pub ret_ty: Option<P<ast::Ty>>,
pub args: Vec<ArgSpec<L>>,
pub body: L::Body,
}
impl <L: Lang> Function<L> {
pub fn | <'a>(ecx: &ExtCtxt<'a>,
span: Span,
tts: &[TokenTree]) -> PResult<'a, Function<L>> {
let mut parser = ecx.new_parser_from_tts(tts);
let args = if parser.check(&token::OpenDelim(token::Paren)) {
Some(try!(Self::parse_args(ecx, &mut parser)))
} else {
None
};
let ret_ty = if args.is_some() && parser.check(&token::RArrow) {
Some(try!(Self::parse_ret_ty(&mut parser)))
} else {
None
};
let body = try!(L::parse_body(&mut parser));
let hash = {
let mut hasher = SipHasher::new();
tts_to_string(tts).hash(&mut hasher);
hasher.finish()
};
let name = ecx.ident_of(&format!("rustcxx_{:016x}", hash));
Ok(Function {
span: span,
name: name,
ret_ty: ret_ty,
args: args.unwrap_or_else(|| Vec::new()),
body: body,
})
}
fn parse_args<'a>(ecx: &ExtCtxt,
parser: &mut Parser<'a>) -> PResult<'a, Vec<ArgSpec<L>>> {
parser.parse_unspanned_seq(
&token::OpenDelim(token::Paren),
&token::CloseDelim(token::Paren),
SeqSep::trailing_allowed(token::Comma),
|parser| ArgSpec::parse(ecx, parser))
}
fn parse_ret_ty<'a>(parser: &mut Parser<'a>) -> PResult<'a, P<ast::Ty>> {
try!(parser.expect(&token::RArrow));
parser.parse_ty()
}
pub fn fn_decl(&self, ecx: &ExtCtxt) -> P<ast::FnDecl> {
let args = self.args.iter().map(|arg| {
ecx.arg(arg.ident.span, arg.ident.node, arg.ty.clone())
}).collect();
let ret_ty = self.ret_ty.clone()
.map(ast::FunctionRetTy::Ty)
.unwrap_or(ast::FunctionRetTy::Default(DUMMY_SP));
P(ast::FnDecl {
inputs: args,
output: ret_ty,
variadic: false
})
}
pub fn foreign_item(&self, ecx: &ExtCtxt) -> ast::ForeignItem {
let fn_decl = self.fn_decl(ecx);
ast::ForeignItem {
id: DUMMY_NODE_ID,
ident: self.name,
attrs: Vec::new(),
node: ast::ForeignItemKind::Fn(fn_decl, ast::Generics::default()),
vis: ast::Visibility::Inherited,
span: self.span,
}
}
pub fn cxx_args<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, String> {
let args = try!(self.args.iter().map(|arg| {
let ty = try!(arg.cxx_type(&ecx.parse_sess.span_diagnostic));
Ok(format!("{} const {}", ty, arg.ident.node))
}).collect::<PResult<Vec<String>>>());
Ok(args.join(", "))
}
pub fn cxx_ret_ty<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, Cow<'static, str>> {
self.ret_ty.as_ref().map(|ty| {
types::convert_ty_to_cxx(&ecx.parse_sess.span_diagnostic, &ty)
}).unwrap_or(Ok(Cow::from("void")))
}
}
#[derive(Debug)]
pub struct ArgSpec<L: Lang> {
pub ident: ast::SpannedIdent,
pub ty: P<ast::Ty>,
pub value: L::ArgValue,
}
impl <L: Lang> ArgSpec<L> {
pub fn parse<'a>(ecx: &ExtCtxt,
parser: &mut Parser<'a>) -> PResult<'a, ArgSpec<L>> {
let ident = {
let lo = parser.span.lo;
let ident = try!(parser.parse_ident());
let hi = parser.span.lo;
spanned(lo, hi, ident)
};
try!(parser.expect(&token::Colon));
let ty = try!(parser.parse_ty());
let value = try!(L::parse_arg_value(ecx, parser, ident));
Ok(ArgSpec {
ident: ident,
ty: ty,
value: value,
})
}
pub fn cxx_type<'a>(&self, handler: &'a Handler)
-> PResult<'a, Cow<'static, str>> {
types::convert_ty_to_cxx(handler, &self.ty)
}
}
impl Function<Cxx> {
pub fn call_expr<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, P<ast::Expr>> {
let name = self.name.clone();
let args = self.args.iter().map(|arg| arg.value.clone()).collect();
Ok(ecx.expr_call_ident(self.span, name, args))
}
pub fn cxx_code<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, String> {
let ret_ty = try!(self.cxx_ret_ty(ecx));
let args = try!(self.cxx_args(ecx));
let signature = format!(
"{span}\nextern \"C\" {ret_ty} {name}({args})",
span = span_to_cpp_directive(ecx, self.span),
ret_ty = ret_ty,
name = self.name,
args = args);
let mut body = tokens_to_cpp(ecx, &flatten_tts(&self.body));
if self.ret_ty.is_some() {
body = format!("return ({{\n{};\n}});", body);
}
Ok(format!("{} {{\n{}\n}}\n", signature, body))
}
}
// Calling rust from C++ is a bit trickier.
// We must declare the function before it can be used.
// However C++ requires the function to be declared outside the current function, but then we may
// miss type definitions which are in scope due to being in a namespace, or some includes.
//
// For example :
// ```c++
// #include <stdint.h>
// #include <stdio.h>
//
// void foo() {
// uint32_t a = 3;
// uint32_t double = rust![(a: uint32_t) -> uint32_t {
// a * 2
// }];
// printf("double: ", a);
// }
// ```
//
// Declaring the extern function before the includes would not work, as uint32_t is not defined at
// this point. Finding the right place to declare it would be complicated and would almost require
// a full C++ parser.
//
// Instead we use an alternative approach. The function's symbol is declared with an opaque type at
// the top of the file. This does not require argument types to be in scope.
// When invoking the function, the symbol is first casted into a function pointer of the correct type.
// This way, the same typing context as in the original source is used.
//
// The example above would be translated into the following :
//
// ```c++
// struct rustcxx_XXXXXXXX;
// extern "C" rustcxx_XXXXXXXX rustcxx_XXXXXXXX;
//
// #include <stdint.h>
// #include <stdio.h>
//
// void foo() {
// uint32_t a = 3;
// uint32_t double = ((uint32_t (*)(uint32_t a)) &rustcxx_XXXXXXXX)(a);
// printf("double: ", a);
// }
// ```
impl Function<Rust> {
pub fn cxx_decl<'a>(&self, _ecx: &'a ExtCtxt) -> PResult<'a, String> {
Ok(format!("struct {}; extern \"C\" {} {};", self.name, self.name, self.name))
}
pub fn cxx_call<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, String> {
let ret_ty = try!(self.cxx_ret_ty(ecx));
let args_sig = try!(self.cxx_args(ecx));
let arg_separator = respan(DUMMY_SP, String::from(","));
let args_value = self.args.iter().map(|arg| {
arg.value.clone()
}).collect::<Vec<_>>().join(&arg_separator);
let cast_ty = format!("{} (*) ({})", ret_ty, args_sig);
let fn_ptr = format!("( ({}) &{} )", cast_ty, self.name);
let call = format!("{} ({})", fn_ptr, tokens_to_cpp(ecx, &args_value));
Ok(call)
}
pub fn item<'a>(&self, ecx: &'a ExtCtxt) -> P<ast::Item> {
let decl = self.fn_decl(ecx);
// Function has to be no_mangle, otherwise it can't be called from C++
let no_mangle = ecx.meta_word(self.span, token::intern("no_mangle").as_str());
// The function has to be exported or it would be optimized out by the compiler.
// The compiler already prints an error, but it is easy to miss, so make it a hard error.
let deny = ecx.meta_list(
self.span,
token::intern("deny").as_str(),
vec![ecx.meta_word(self.span, token::intern("private_no_mangle_fns").as_str())]);
let attrs = vec![
ecx.attribute(self.span, no_mangle),
ecx.attribute(self.span, deny),
];
let fn_item = ast::ItemKind::Fn(
decl, ast::Unsafety::Unsafe, ast::Constness::NotConst,
Abi::C, ast::Generics::default(), self.body.clone());
P(ast::Item {
ident: self.name,
attrs: attrs,
id: ast::DUMMY_NODE_ID,
node: fn_item,
vis: ast::Visibility::Public,
span: self.span,
})
}
}
/// Find and replace uses of rust![ .. ] in a token tree stream.
///
/// The callback is invoked for every use of the rust! macro and it's result is used to replace it.
pub fn parse_rust_macro<F>(tts: &[TokenTree], f: &mut F) -> Vec<Spanned<String>>
where F: FnMut(Span, &[TokenTree]) -> Vec<Spanned<String>> {
let mut result = Vec::new();
// Iterate over the tokens with 3 tokens of lookahead.
let mut i = 0;
loop {
match (tts.get(i), tts.get(i+1), tts.get(i+2)) {
(Some(&TokenTree::Token(_, token::Ident(ident))),
Some(&TokenTree::Token(_, token::Not)),
Some(&TokenTree::Delimited(span, ref contents)))
if ident.name.to_string() == "rust" => {
i += 2;
result.extend(f(span, &contents.tts));
}
(Some(&TokenTree::Delimited(_, ref contents)), _, _) => {
// Recursively look into the token tree
result.push(respan(contents.open_span, token_to_string(&contents.open_token())));
result.extend(parse_rust_macro(&contents.tts, f));
result.push(respan(contents.close_span, token_to_string(&contents.close_token())));
}
(Some(&TokenTree::Token(span, ref tok)), _, _) => {
result.push(respan(span, token_to_string(tok)));
}
(Some(&TokenTree::Sequence(..)), _, _) => unimplemented!(),
(None, _, _) => break,
}
i += 1;
}
result
}
/// Flatten a token tree stream.
///
/// Each token is stringified and paired with it's span.
pub fn flatten_tts(tts: &[TokenTree]) -> Vec<Spanned<String>> {
tts.iter().flat_map(|tt| {
match tt {
&TokenTree::Token(span, ref tok) => {
vec![respan(span, token_to_string(tok))]
}
&TokenTree::Delimited(_, ref delimited) => {
let open = respan(delimited.open_span, token_to_string(&delimited.open_token()));
let close = respan(delimited.close_span, token_to_string(&delimited.close_token()));
iter::once(open)
.chain(flatten_tts(&delimited.tts))
.chain(iter::once(close))
.collect()
}
&TokenTree::Sequence(..) => unimplemented!()
}
}).collect()
}
/// Join tokens, using `#line` C preprocessor directives to maintain span
/// information.
pub fn tokens_to_cpp(ecx: &ExtCtxt, tokens: &[Spanned<String>]) -> String {
let codemap = ecx.parse_sess.codemap();
let mut last_pos = codemap.lookup_char_pos(DUMMY_SP.lo);
let mut column = 0;
let mut contents = String::new();
for token in tokens {
if token.span != DUMMY_SP {
let pos = codemap.lookup_char_pos(token.span.lo);
if pos.file.name == pos.file.name && pos.line == last_pos.line + 1 {
contents.push('\n');
column = 0;
} else if pos.file.name != pos.file.name || pos.line != last_pos.line {
contents.push('\n');
contents.push_str(&span_to_cpp_directive(ecx, token.span));
contents.push('\n');
column = 0;
}
// Pad the code such that the token remains on the same column
while column < pos.col.0 {
contents.push(' ');
column += 1;
}
last_pos = pos;
}
column += token.node.len();
contents.push_str(&token.node);
}
return contents;
}
pub fn span_to_cpp_directive(ecx: &ExtCtxt, span: Span) -> String {
let codemap = ecx.parse_sess.codemap();
let pos = codemap.lookup_char_pos(span.lo);
format!("#line {} {:?}", pos.line, pos.file.name)
}
| parse | identifier_name |
server.rs | use std::thread;
use std::sync::mpsc;
use websocket;
use websocket::{Message, Sender, Receiver};
use websocket::server::sender;
use websocket::stream::WebSocketStream;
use websocket::message::CloseData;
use std::io::prelude::*;
use std::fs::{OpenOptions, File};
use std::net::Shutdown;
use rustc_serialize::json::{Json, ToJson};
use cbor;
use hyper::header::Cookie;
use value::Value;
use relation::Change;
use flow::{Changes, Flow};
use client;
pub trait FromJson {
fn from_json(json: &Json) -> Self;
}
impl ToJson for Value {
fn to_json(&self) -> Json {
match *self {
Value::Null => panic!("Cannot allow the client to see nulls"),
Value::Bool(bool) => Json::Boolean(bool),
Value::String(ref string) => Json::String(string.clone()),
Value::Float(float) => Json::F64(float),
Value::Column(ref column) => Json::Array(column.iter().map(|v| v.to_json()).collect()),
}
}
}
impl FromJson for Value {
fn from_json(json: &Json) -> Self {
match *json {
Json::Boolean(bool) => Value::Bool(bool),
Json::String(ref string) => Value::String(string.clone()),
Json::F64(float) => Value::Float(float),
Json::I64(int) => Value::Float(int as f64),
Json::U64(uint) => Value::Float(uint as f64),
Json::Array(ref array) => Value::Column(array.iter().map(FromJson::from_json).collect()),
_ => panic!("Cannot decode {:?} as Value", json),
}
}
}
impl FromJson for String {
fn | (json: &Json) -> Self {
json.as_string().unwrap().to_owned()
}
}
impl<T: FromJson> FromJson for Vec<T> {
fn from_json(json: &Json) -> Self {
json.as_array().unwrap().iter().map(|t| FromJson::from_json(t)).collect()
}
}
#[derive(Debug, Clone)]
pub struct Event {
pub changes: Changes,
pub session: String,
}
impl ToJson for Event {
fn to_json(&self) -> Json {
Json::Object(vec![
("changes".to_string(), Json::Array(
self.changes.iter().map(|&(ref view_id, ref view_changes)| {
Json::Array(vec![
view_id.to_json(),
view_changes.fields.to_json(),
view_changes.insert.to_json(),
view_changes.remove.to_json(),
])
}).collect()
)
),
("session".to_string(), self.session.to_json()),
].into_iter().collect())
}
}
impl FromJson for Event {
fn from_json(json: &Json) -> Self {
Event{
changes: json.as_object().unwrap()["changes"]
.as_array().unwrap().iter().map(|change| {
let change = change.as_array().unwrap();
assert_eq!(change.len(), 4);
let view_id = FromJson::from_json(&change[0]);
let fields = FromJson::from_json(&change[1]);
let insert = FromJson::from_json(&change[2]);
let remove = FromJson::from_json(&change[3]);
(view_id, Change{fields:fields, insert: insert, remove: remove})
}).collect(),
session: "".to_string(),
}
}
}
pub enum ServerEvent {
Change(Vec<u8>),
Sync((sender::Sender<WebSocketStream>,Option<String>)),
Terminate(Option<CloseData>),
}
// TODO holy crap why is everything blocking? this is a mess
pub fn server_events() -> mpsc::Receiver<ServerEvent> {
let (event_sender, event_receiver) = mpsc::channel();
thread::spawn(move || {
let server = websocket::Server::bind("0.0.0.0:2794").unwrap();
for connection in server {
let event_sender = event_sender.clone();
thread::spawn(move || {
// accept request
let request = connection.unwrap().read_request().unwrap();
request.validate().unwrap();
// Get the User ID from a cookie in the headers
let user_id = get_user_id(request.headers.get::<Cookie>());
let response = request.accept();
let (mut sender, mut receiver) = response.send().unwrap().split();
let ip = sender.get_mut().peer_addr().unwrap();
println!("Connection from {}", ip);
::std::io::stdout().flush().unwrap(); // TODO is this actually necessary?
// hand over sender
event_sender.send(ServerEvent::Sync((sender,user_id))).unwrap();
// handle messages
for message in receiver.incoming_messages() {
let message = match message {
Ok(m) => m,
Err(_) => return,
};
match message {
Message::Binary(bytes) => {
event_sender.send(ServerEvent::Change(bytes)).unwrap();
}
Message::Close(_) => {
let ip_addr = format!("{}", ip);
println!("Received close message from {}.",ip_addr);
let close_message = CloseData{status_code: 0, reason: ip_addr};
event_sender.send(ServerEvent::Terminate(Some(close_message))).unwrap();
}
_ => println!("Unknown message: {:?}", message)
}
}
});
}
});
event_receiver
}
pub fn load(flow: &mut Flow, filename: &str) {
let mut events = OpenOptions::new().create(true).open(filename).unwrap();
let mut old_events = String::new();
events.read_to_string(&mut old_events).unwrap();
for line in old_events.lines() {
let json = Json::from_str(&line).unwrap();
let event: Event = FromJson::from_json(&json);
flow.quiesce(event.changes);
}
}
pub struct Server {
pub flow: Flow,
pub events: File,
pub senders: Vec<sender::Sender<WebSocketStream>>,
}
pub fn handle_event(server: &mut Server, event: Event, event_json: Json) {
server.events.write_all(format!("{}", event_json).as_bytes()).unwrap();
server.events.write_all("\n".as_bytes()).unwrap();
server.events.flush().unwrap();
let old_flow = time!("cloning", {
server.flow.clone()
});
server.flow.quiesce(event.changes);
let changes = time!("diffing", {
server.flow.changes_from(old_flow)
});
for sender in server.senders.iter_mut() {
let session_id = format!("{}", sender.get_mut().peer_addr().unwrap());
let text = format!("{}", Event{changes: changes.clone(), session: session_id}.to_json());
match sender.send_message(Message::Text(text)) {
Ok(_) => (),
Err(error) => println!("Send error: {}", error),
};
}
}
pub fn run() {
let mut flow = Flow::new();
time!("reading saved state", {
load(&mut flow, "./bootstrap");
load(&mut flow, "./events");
});
let events = OpenOptions::new().write(true).append(true).open("./events").unwrap();
let senders: Vec<sender::Sender<WebSocketStream>> = Vec::new();
let mut server = Server{flow: flow, events: events, senders: senders};
for server_event in server_events() {
match server_event {
ServerEvent::Sync((mut sender,user_id)) => {
// Add a session to the session table
let session_id = format!("{}", sender.get_mut().peer_addr().unwrap());
let mut add_session = client::insert_fact(&"sessions",&vec!["id","status"],&vec![Value::String(session_id.clone()),
Value::Float(1f64)
],None);
// If we have a user ID, add a mapping from the session ID to the user ID
add_session = match user_id {
Some(user_id) => {
client::insert_fact(&"session id to user id",&vec!["session id","user id"],&vec![Value::String(session_id.clone()),
Value::String(user_id),
],Some(add_session))
},
None => add_session,
};
let json = add_session.to_json();
handle_event(&mut server, add_session, json);
let changes = server.flow.as_changes();
let text = format!("{}", Event{changes: changes, session: session_id}.to_json());
match sender.send_message(Message::Text(text)) {
Ok(_) => (),
Err(error) => println!("Send error: {}", error),
};
server.senders.push(sender)
}
ServerEvent::Change(input_bytes) => {
// TODO we throw cbor in here to avoid https://github.com/rust-lang/rustc-serialize/issues/113
let mut decoder = cbor::Decoder::from_bytes(&input_bytes[..]);
let cbor = decoder.items().next().unwrap().unwrap();
let json = cbor.to_json();
let event = FromJson::from_json(&json);
handle_event(&mut server, event, json);
}
ServerEvent::Terminate(m) => {
let terminate_ip = m.unwrap().reason;
println!("Closing connection from {}...",terminate_ip);
// Find the index of the connection's sender
let ip_ix = server.senders.iter_mut().position(|mut sender| {
let ip = format!("{}",sender.get_mut().peer_addr().unwrap());
ip == terminate_ip
});
// Properly clean up connections and the session table
match ip_ix {
Some(ix) => {
// Close the connection
let _ = server.senders[ix].send_message(Message::Close(None));
match server.senders[ix].get_mut().shutdown(Shutdown::Both) {
Ok(_) => println!("Connection from {} has closed successfully.",terminate_ip),
Err(e) => println!("Connection from {} failed to shut down properly: {}",terminate_ip,e),
}
server.senders.remove(ix);
// Update the session table
let sessions = server.flow.get_output("sessions").clone();
let ip_string = Value::String(terminate_ip.clone());
match sessions.find_maybe("id",&ip_string) {
Some(session) => {
let closed_session = session.clone();
let mut close_session_values = &mut closed_session.values.to_vec();
let status_ix = match closed_session.names.iter().position(|name| name == "status") {
Some(ix) => ix,
None => panic!("No field named \"status\""),
};
close_session_values[status_ix] = Value::Float(0f64);
let change = Change {
fields: sessions.fields.clone(),
insert: vec![close_session_values.clone()],
remove: vec![session.values.to_vec().clone()],
};
let event = Event{changes: vec![("sessions".to_string(),change)], session: "".to_string()};
let json = event.to_json();
handle_event(&mut server, event, json);
},
None => println!("No session found"),
}
},
None => panic!("IP address {} is not connected",terminate_ip),
}
}
}
}
}
pub fn get_user_id(cookies: Option<&Cookie>) -> Option<String> {
match cookies {
Some(cookies) => {
match cookies.iter().find(|cookie| cookie.name == "userid") {
Some(user_id) => Some(user_id.value.clone()),
None => None,
}
},
None => None,
}
} | from_json | identifier_name |
server.rs | use std::thread;
use std::sync::mpsc;
use websocket;
use websocket::{Message, Sender, Receiver};
use websocket::server::sender;
use websocket::stream::WebSocketStream;
use websocket::message::CloseData;
use std::io::prelude::*;
use std::fs::{OpenOptions, File};
use std::net::Shutdown;
use rustc_serialize::json::{Json, ToJson};
use cbor;
use hyper::header::Cookie;
use value::Value;
use relation::Change;
use flow::{Changes, Flow};
use client;
pub trait FromJson {
fn from_json(json: &Json) -> Self;
}
impl ToJson for Value {
fn to_json(&self) -> Json {
match *self {
Value::Null => panic!("Cannot allow the client to see nulls"),
Value::Bool(bool) => Json::Boolean(bool),
Value::String(ref string) => Json::String(string.clone()),
Value::Float(float) => Json::F64(float),
Value::Column(ref column) => Json::Array(column.iter().map(|v| v.to_json()).collect()),
}
}
}
impl FromJson for Value {
fn from_json(json: &Json) -> Self {
match *json {
Json::Boolean(bool) => Value::Bool(bool),
Json::String(ref string) => Value::String(string.clone()),
Json::F64(float) => Value::Float(float),
Json::I64(int) => Value::Float(int as f64),
Json::U64(uint) => Value::Float(uint as f64),
Json::Array(ref array) => Value::Column(array.iter().map(FromJson::from_json).collect()),
_ => panic!("Cannot decode {:?} as Value", json),
}
}
}
impl FromJson for String {
fn from_json(json: &Json) -> Self {
json.as_string().unwrap().to_owned()
}
}
impl<T: FromJson> FromJson for Vec<T> {
fn from_json(json: &Json) -> Self |
}
#[derive(Debug, Clone)]
pub struct Event {
pub changes: Changes,
pub session: String,
}
impl ToJson for Event {
fn to_json(&self) -> Json {
Json::Object(vec![
("changes".to_string(), Json::Array(
self.changes.iter().map(|&(ref view_id, ref view_changes)| {
Json::Array(vec![
view_id.to_json(),
view_changes.fields.to_json(),
view_changes.insert.to_json(),
view_changes.remove.to_json(),
])
}).collect()
)
),
("session".to_string(), self.session.to_json()),
].into_iter().collect())
}
}
impl FromJson for Event {
fn from_json(json: &Json) -> Self {
Event{
changes: json.as_object().unwrap()["changes"]
.as_array().unwrap().iter().map(|change| {
let change = change.as_array().unwrap();
assert_eq!(change.len(), 4);
let view_id = FromJson::from_json(&change[0]);
let fields = FromJson::from_json(&change[1]);
let insert = FromJson::from_json(&change[2]);
let remove = FromJson::from_json(&change[3]);
(view_id, Change{fields:fields, insert: insert, remove: remove})
}).collect(),
session: "".to_string(),
}
}
}
pub enum ServerEvent {
Change(Vec<u8>),
Sync((sender::Sender<WebSocketStream>,Option<String>)),
Terminate(Option<CloseData>),
}
// TODO holy crap why is everything blocking? this is a mess
pub fn server_events() -> mpsc::Receiver<ServerEvent> {
let (event_sender, event_receiver) = mpsc::channel();
thread::spawn(move || {
let server = websocket::Server::bind("0.0.0.0:2794").unwrap();
for connection in server {
let event_sender = event_sender.clone();
thread::spawn(move || {
// accept request
let request = connection.unwrap().read_request().unwrap();
request.validate().unwrap();
// Get the User ID from a cookie in the headers
let user_id = get_user_id(request.headers.get::<Cookie>());
let response = request.accept();
let (mut sender, mut receiver) = response.send().unwrap().split();
let ip = sender.get_mut().peer_addr().unwrap();
println!("Connection from {}", ip);
::std::io::stdout().flush().unwrap(); // TODO is this actually necessary?
// hand over sender
event_sender.send(ServerEvent::Sync((sender,user_id))).unwrap();
// handle messages
for message in receiver.incoming_messages() {
let message = match message {
Ok(m) => m,
Err(_) => return,
};
match message {
Message::Binary(bytes) => {
event_sender.send(ServerEvent::Change(bytes)).unwrap();
}
Message::Close(_) => {
let ip_addr = format!("{}", ip);
println!("Received close message from {}.",ip_addr);
let close_message = CloseData{status_code: 0, reason: ip_addr};
event_sender.send(ServerEvent::Terminate(Some(close_message))).unwrap();
}
_ => println!("Unknown message: {:?}", message)
}
}
});
}
});
event_receiver
}
pub fn load(flow: &mut Flow, filename: &str) {
let mut events = OpenOptions::new().create(true).open(filename).unwrap();
let mut old_events = String::new();
events.read_to_string(&mut old_events).unwrap();
for line in old_events.lines() {
let json = Json::from_str(&line).unwrap();
let event: Event = FromJson::from_json(&json);
flow.quiesce(event.changes);
}
}
pub struct Server {
pub flow: Flow,
pub events: File,
pub senders: Vec<sender::Sender<WebSocketStream>>,
}
pub fn handle_event(server: &mut Server, event: Event, event_json: Json) {
server.events.write_all(format!("{}", event_json).as_bytes()).unwrap();
server.events.write_all("\n".as_bytes()).unwrap();
server.events.flush().unwrap();
let old_flow = time!("cloning", {
server.flow.clone()
});
server.flow.quiesce(event.changes);
let changes = time!("diffing", {
server.flow.changes_from(old_flow)
});
for sender in server.senders.iter_mut() {
let session_id = format!("{}", sender.get_mut().peer_addr().unwrap());
let text = format!("{}", Event{changes: changes.clone(), session: session_id}.to_json());
match sender.send_message(Message::Text(text)) {
Ok(_) => (),
Err(error) => println!("Send error: {}", error),
};
}
}
pub fn run() {
let mut flow = Flow::new();
time!("reading saved state", {
load(&mut flow, "./bootstrap");
load(&mut flow, "./events");
});
let events = OpenOptions::new().write(true).append(true).open("./events").unwrap();
let senders: Vec<sender::Sender<WebSocketStream>> = Vec::new();
let mut server = Server{flow: flow, events: events, senders: senders};
for server_event in server_events() {
match server_event {
ServerEvent::Sync((mut sender,user_id)) => {
// Add a session to the session table
let session_id = format!("{}", sender.get_mut().peer_addr().unwrap());
let mut add_session = client::insert_fact(&"sessions",&vec!["id","status"],&vec![Value::String(session_id.clone()),
Value::Float(1f64)
],None);
// If we have a user ID, add a mapping from the session ID to the user ID
add_session = match user_id {
Some(user_id) => {
client::insert_fact(&"session id to user id",&vec!["session id","user id"],&vec![Value::String(session_id.clone()),
Value::String(user_id),
],Some(add_session))
},
None => add_session,
};
let json = add_session.to_json();
handle_event(&mut server, add_session, json);
let changes = server.flow.as_changes();
let text = format!("{}", Event{changes: changes, session: session_id}.to_json());
match sender.send_message(Message::Text(text)) {
Ok(_) => (),
Err(error) => println!("Send error: {}", error),
};
server.senders.push(sender)
}
ServerEvent::Change(input_bytes) => {
// TODO we throw cbor in here to avoid https://github.com/rust-lang/rustc-serialize/issues/113
let mut decoder = cbor::Decoder::from_bytes(&input_bytes[..]);
let cbor = decoder.items().next().unwrap().unwrap();
let json = cbor.to_json();
let event = FromJson::from_json(&json);
handle_event(&mut server, event, json);
}
ServerEvent::Terminate(m) => {
let terminate_ip = m.unwrap().reason;
println!("Closing connection from {}...",terminate_ip);
// Find the index of the connection's sender
let ip_ix = server.senders.iter_mut().position(|mut sender| {
let ip = format!("{}",sender.get_mut().peer_addr().unwrap());
ip == terminate_ip
});
// Properly clean up connections and the session table
match ip_ix {
Some(ix) => {
// Close the connection
let _ = server.senders[ix].send_message(Message::Close(None));
match server.senders[ix].get_mut().shutdown(Shutdown::Both) {
Ok(_) => println!("Connection from {} has closed successfully.",terminate_ip),
Err(e) => println!("Connection from {} failed to shut down properly: {}",terminate_ip,e),
}
server.senders.remove(ix);
// Update the session table
let sessions = server.flow.get_output("sessions").clone();
let ip_string = Value::String(terminate_ip.clone());
match sessions.find_maybe("id",&ip_string) {
Some(session) => {
let closed_session = session.clone();
let mut close_session_values = &mut closed_session.values.to_vec();
let status_ix = match closed_session.names.iter().position(|name| name == "status") {
Some(ix) => ix,
None => panic!("No field named \"status\""),
};
close_session_values[status_ix] = Value::Float(0f64);
let change = Change {
fields: sessions.fields.clone(),
insert: vec![close_session_values.clone()],
remove: vec![session.values.to_vec().clone()],
};
let event = Event{changes: vec![("sessions".to_string(),change)], session: "".to_string()};
let json = event.to_json();
handle_event(&mut server, event, json);
},
None => println!("No session found"),
}
},
None => panic!("IP address {} is not connected",terminate_ip),
}
}
}
}
}
pub fn get_user_id(cookies: Option<&Cookie>) -> Option<String> {
match cookies {
Some(cookies) => {
match cookies.iter().find(|cookie| cookie.name == "userid") {
Some(user_id) => Some(user_id.value.clone()),
None => None,
}
},
None => None,
}
} | {
json.as_array().unwrap().iter().map(|t| FromJson::from_json(t)).collect()
} | identifier_body |
server.rs | use std::thread;
use std::sync::mpsc;
use websocket;
use websocket::{Message, Sender, Receiver};
use websocket::server::sender;
use websocket::stream::WebSocketStream;
use websocket::message::CloseData;
use std::io::prelude::*;
use std::fs::{OpenOptions, File};
use std::net::Shutdown;
use rustc_serialize::json::{Json, ToJson};
use cbor;
use hyper::header::Cookie;
use value::Value;
use relation::Change;
use flow::{Changes, Flow};
use client;
pub trait FromJson {
fn from_json(json: &Json) -> Self;
}
impl ToJson for Value {
fn to_json(&self) -> Json {
match *self {
Value::Null => panic!("Cannot allow the client to see nulls"),
Value::Bool(bool) => Json::Boolean(bool),
Value::String(ref string) => Json::String(string.clone()),
Value::Float(float) => Json::F64(float),
Value::Column(ref column) => Json::Array(column.iter().map(|v| v.to_json()).collect()),
}
}
}
impl FromJson for Value {
fn from_json(json: &Json) -> Self {
match *json {
Json::Boolean(bool) => Value::Bool(bool),
Json::String(ref string) => Value::String(string.clone()),
Json::F64(float) => Value::Float(float),
Json::I64(int) => Value::Float(int as f64),
Json::U64(uint) => Value::Float(uint as f64),
Json::Array(ref array) => Value::Column(array.iter().map(FromJson::from_json).collect()),
_ => panic!("Cannot decode {:?} as Value", json),
}
}
}
impl FromJson for String {
fn from_json(json: &Json) -> Self {
json.as_string().unwrap().to_owned()
}
}
impl<T: FromJson> FromJson for Vec<T> {
fn from_json(json: &Json) -> Self {
json.as_array().unwrap().iter().map(|t| FromJson::from_json(t)).collect()
}
}
#[derive(Debug, Clone)]
pub struct Event {
pub changes: Changes,
pub session: String,
}
impl ToJson for Event {
fn to_json(&self) -> Json { | ("changes".to_string(), Json::Array(
self.changes.iter().map(|&(ref view_id, ref view_changes)| {
Json::Array(vec![
view_id.to_json(),
view_changes.fields.to_json(),
view_changes.insert.to_json(),
view_changes.remove.to_json(),
])
}).collect()
)
),
("session".to_string(), self.session.to_json()),
].into_iter().collect())
}
}
impl FromJson for Event {
fn from_json(json: &Json) -> Self {
Event{
changes: json.as_object().unwrap()["changes"]
.as_array().unwrap().iter().map(|change| {
let change = change.as_array().unwrap();
assert_eq!(change.len(), 4);
let view_id = FromJson::from_json(&change[0]);
let fields = FromJson::from_json(&change[1]);
let insert = FromJson::from_json(&change[2]);
let remove = FromJson::from_json(&change[3]);
(view_id, Change{fields:fields, insert: insert, remove: remove})
}).collect(),
session: "".to_string(),
}
}
}
pub enum ServerEvent {
Change(Vec<u8>),
Sync((sender::Sender<WebSocketStream>,Option<String>)),
Terminate(Option<CloseData>),
}
// TODO holy crap why is everything blocking? this is a mess
pub fn server_events() -> mpsc::Receiver<ServerEvent> {
let (event_sender, event_receiver) = mpsc::channel();
thread::spawn(move || {
let server = websocket::Server::bind("0.0.0.0:2794").unwrap();
for connection in server {
let event_sender = event_sender.clone();
thread::spawn(move || {
// accept request
let request = connection.unwrap().read_request().unwrap();
request.validate().unwrap();
// Get the User ID from a cookie in the headers
let user_id = get_user_id(request.headers.get::<Cookie>());
let response = request.accept();
let (mut sender, mut receiver) = response.send().unwrap().split();
let ip = sender.get_mut().peer_addr().unwrap();
println!("Connection from {}", ip);
::std::io::stdout().flush().unwrap(); // TODO is this actually necessary?
// hand over sender
event_sender.send(ServerEvent::Sync((sender,user_id))).unwrap();
// handle messages
for message in receiver.incoming_messages() {
let message = match message {
Ok(m) => m,
Err(_) => return,
};
match message {
Message::Binary(bytes) => {
event_sender.send(ServerEvent::Change(bytes)).unwrap();
}
Message::Close(_) => {
let ip_addr = format!("{}", ip);
println!("Received close message from {}.",ip_addr);
let close_message = CloseData{status_code: 0, reason: ip_addr};
event_sender.send(ServerEvent::Terminate(Some(close_message))).unwrap();
}
_ => println!("Unknown message: {:?}", message)
}
}
});
}
});
event_receiver
}
pub fn load(flow: &mut Flow, filename: &str) {
let mut events = OpenOptions::new().create(true).open(filename).unwrap();
let mut old_events = String::new();
events.read_to_string(&mut old_events).unwrap();
for line in old_events.lines() {
let json = Json::from_str(&line).unwrap();
let event: Event = FromJson::from_json(&json);
flow.quiesce(event.changes);
}
}
pub struct Server {
pub flow: Flow,
pub events: File,
pub senders: Vec<sender::Sender<WebSocketStream>>,
}
pub fn handle_event(server: &mut Server, event: Event, event_json: Json) {
server.events.write_all(format!("{}", event_json).as_bytes()).unwrap();
server.events.write_all("\n".as_bytes()).unwrap();
server.events.flush().unwrap();
let old_flow = time!("cloning", {
server.flow.clone()
});
server.flow.quiesce(event.changes);
let changes = time!("diffing", {
server.flow.changes_from(old_flow)
});
for sender in server.senders.iter_mut() {
let session_id = format!("{}", sender.get_mut().peer_addr().unwrap());
let text = format!("{}", Event{changes: changes.clone(), session: session_id}.to_json());
match sender.send_message(Message::Text(text)) {
Ok(_) => (),
Err(error) => println!("Send error: {}", error),
};
}
}
pub fn run() {
let mut flow = Flow::new();
time!("reading saved state", {
load(&mut flow, "./bootstrap");
load(&mut flow, "./events");
});
let events = OpenOptions::new().write(true).append(true).open("./events").unwrap();
let senders: Vec<sender::Sender<WebSocketStream>> = Vec::new();
let mut server = Server{flow: flow, events: events, senders: senders};
for server_event in server_events() {
match server_event {
ServerEvent::Sync((mut sender,user_id)) => {
// Add a session to the session table
let session_id = format!("{}", sender.get_mut().peer_addr().unwrap());
let mut add_session = client::insert_fact(&"sessions",&vec!["id","status"],&vec![Value::String(session_id.clone()),
Value::Float(1f64)
],None);
// If we have a user ID, add a mapping from the session ID to the user ID
add_session = match user_id {
Some(user_id) => {
client::insert_fact(&"session id to user id",&vec!["session id","user id"],&vec![Value::String(session_id.clone()),
Value::String(user_id),
],Some(add_session))
},
None => add_session,
};
let json = add_session.to_json();
handle_event(&mut server, add_session, json);
let changes = server.flow.as_changes();
let text = format!("{}", Event{changes: changes, session: session_id}.to_json());
match sender.send_message(Message::Text(text)) {
Ok(_) => (),
Err(error) => println!("Send error: {}", error),
};
server.senders.push(sender)
}
ServerEvent::Change(input_bytes) => {
// TODO we throw cbor in here to avoid https://github.com/rust-lang/rustc-serialize/issues/113
let mut decoder = cbor::Decoder::from_bytes(&input_bytes[..]);
let cbor = decoder.items().next().unwrap().unwrap();
let json = cbor.to_json();
let event = FromJson::from_json(&json);
handle_event(&mut server, event, json);
}
ServerEvent::Terminate(m) => {
let terminate_ip = m.unwrap().reason;
println!("Closing connection from {}...",terminate_ip);
// Find the index of the connection's sender
let ip_ix = server.senders.iter_mut().position(|mut sender| {
let ip = format!("{}",sender.get_mut().peer_addr().unwrap());
ip == terminate_ip
});
// Properly clean up connections and the session table
match ip_ix {
Some(ix) => {
// Close the connection
let _ = server.senders[ix].send_message(Message::Close(None));
match server.senders[ix].get_mut().shutdown(Shutdown::Both) {
Ok(_) => println!("Connection from {} has closed successfully.",terminate_ip),
Err(e) => println!("Connection from {} failed to shut down properly: {}",terminate_ip,e),
}
server.senders.remove(ix);
// Update the session table
let sessions = server.flow.get_output("sessions").clone();
let ip_string = Value::String(terminate_ip.clone());
match sessions.find_maybe("id",&ip_string) {
Some(session) => {
let closed_session = session.clone();
let mut close_session_values = &mut closed_session.values.to_vec();
let status_ix = match closed_session.names.iter().position(|name| name == "status") {
Some(ix) => ix,
None => panic!("No field named \"status\""),
};
close_session_values[status_ix] = Value::Float(0f64);
let change = Change {
fields: sessions.fields.clone(),
insert: vec![close_session_values.clone()],
remove: vec![session.values.to_vec().clone()],
};
let event = Event{changes: vec![("sessions".to_string(),change)], session: "".to_string()};
let json = event.to_json();
handle_event(&mut server, event, json);
},
None => println!("No session found"),
}
},
None => panic!("IP address {} is not connected",terminate_ip),
}
}
}
}
}
pub fn get_user_id(cookies: Option<&Cookie>) -> Option<String> {
match cookies {
Some(cookies) => {
match cookies.iter().find(|cookie| cookie.name == "userid") {
Some(user_id) => Some(user_id.value.clone()),
None => None,
}
},
None => None,
}
} | Json::Object(vec![ | random_line_split |
index.js | import { app, BrowserWindow, ipcMain, shell } from 'electron'
import Config from '../../static/js/config'
const Store = require('electron-store')
let store = new Store()
const path = require('path')
const { Parser } = require('m3u8-parser')
const fs = require('fs')
const async = require('async')
const dateFormat = require('dateformat')
const crypto = require('crypto')
const got = require('got')
const ffmpeg = require('fluent-ffmpeg')
const download = require('download')
const httpTimeout = {socket: 300000, request: 300000, response:300000};
const ffmpegPath = process.env.NODE_ENV == 'development' ? path.resolve(__dirname, '../../ffmpeg.exe') : path.resolve(__dirname, '../../../ffmpeg.exe')
ffmpeg.setFfmpegPath(ffmpegPath)
let mainWindow = null
var configVideos = [];
let globalCond = {};
const globalConfigDir = app.getPath('userData');
const globalConfigVideoPath = path.join(globalConfigDir,'config_videos.json');
if (process.env.NODE_ENV !== 'development') {
global.__static = require('path').join(__dirname, '/static').replace(/\\/g, '\\\\')
}
const winURL = process.env.NODE_ENV === 'development'
? `http://localhost:9080`
: `file://${__dirname}/index.html`
function createWindow () {
/**
* Initial window options
*/
mainWindow = new BrowserWindow({
height: 550,
width: 700,
useContentSize: true,
minWidth:700,
minHeight:550,
maxWidth:700,
maxHeight:550,
frame: false,
show:false,
webPreferences: {
nodeIntegration: true
}
})
new Config(mainWindow)
mainWindow.loadURL(winURL)
mainWindow.on('ready-to-show', function () {
mainWindow.show() // 初始化后再显示
})
mainWindow.on('closed', () => {
mainWindow = null
})
}
let playerWindow = null
function createPlayerWindow(src) {
if(playerWindow == null)
{
// 创建浏览器窗口
playerWindow = new BrowserWindow({
width: 1024,
height: 620,
skipTaskbar: false,
transparent: false,
frame: false,
resizable: true,
webPreferences: {
nodeIntegration: true
},
alwaysOnTop: false,
hasShadow: false,
parent: mainWindow
});
playerWindow.setMenu(null)
playerWindow.on('closed', () => {
// 取消引用 window 对象,如果你的应用支持多窗口的话,
// 通常会把多个 window 对象存放在一个数组里面,
// 与此同时,你应该删除相应的元素。
playerWindow = null;
})
}
// 加载index.html文件
let playerSrc = process.env.NODE_ENV == 'development' ? path.resolve(__dirname, '../../static/player.html') : path.resolve(__dirname, './static/player.html')
playerWindow.loadFile(playerSrc, {search:"src="+src});
}
app.on('ready', createWindow)
app.allowRendererProcessReuse = true
app.on('window-all-closed', () => {
if (process.platform !== 'darwin') {
app.quit()
}
})
app.on('activate', () => {
if (mainWindow === null) {
createWindow()
}
})
ipcMain.on('opendir', function (event, arg) {
shell.openExternal(arg);
});
ipcMain.on('playvideo', function (event, arg) {
createPlayerWindow(arg);
});
ipcMain.on('delvideo', function (event, id) {
configVideos.forEach(Element=>{
if(Element.id==id)
{
try {
if(fs.existsSync(Element.dir)) {
var files = fs.readdirSync(Element.dir)
files.forEach(e=>{
fs.unlinkSync(path.join(Element.dir,e) );
})
fs.rmdirSync(Element.dir,{recursive :true})
}
var nIdx = configVideos.indexOf(Element);
if( nIdx > -1)
{
configVideos.splice(nIdx,1);
fs.writeFileSync(globalConfigVideoPath,JSON.stringify(configVideos));
}
event.sender.send("delvideo-reply",Element);
} catch (error) {
console.log(error)
}
}
});
});
ipcMain.on('StartOrStop', function (event, arg) {
let id = Number.parseInt(arg);
if(globalCond[id] == null)
{
console.log("不存在此任务")
return;
}
globalCond[id] = !globalCond[id];
if(globalCond[id] == true)
{
configVideos.forEach(Element=>{
if(Element.id==id)
{
if(Element.isLiving == true)
{
startDownloadLive(Element.url, Element.headers, id);
}
else
{
startDownload(Element.url, Element.headers, id);
}
}
});
}
});
ipcMain.on('task-add', async function (event, arg, headers) {
let src = arg;
let _headers = {};
if(headers != '')
{
let __ = headers.match(/(.*?): ?(.*?)(\n|\r|$)/g);
__ && __.forEach((_)=>{
let ___ = _.match(/(.*?): ?(.*?)(\n|\r|$)/i);
___ && (_headers[___[1]] = ___[2]);
});
}
let mes = src.match(/^https?:\/\/[^/]*/);
let _hosts = '';
if(mes && mes.length >= 1)
{
_hosts = mes[0];
}
if(_headers['Origin'] == null && _headers['origin'] == null)
{
_headers['Origin'] = _hosts;
}
if(_headers['Referer'] == null && _headers['referer'] == null)
{
_headers['Referer'] = _hosts;
}
const response = await got(src, {headers: _headers, timeout: httpTimeout, https: {rejectUnauthorized: false}}).catch((error) => { console.log(error) })
{
let info = '';
let code = 0;
code = -1;
info = '解析资源失败!';
if (response && response.body != null
&& response.body != '')
{
let parser = new Parser();
parser.push(response.body);
parser.end();
let count_seg = parser.manifest.segments.length;
if (count_seg > 0) {
code = 0;
if (parser.manifest.endList) {
let duration = 0;
parser.manifest.segments.forEach(segment => {
duration += segment.duration;
});
info = `资源解析成功,有 ${count_seg} 个片段,时长:${formatTime(duration)},即将开始缓存...`;
startDownload(src, _headers);
} else {
info = `直播资源解析成功,即将开始缓存...`;
startDownloadLive(src, _headers);
}
}
}
event.sender.send('task-add-reply', { code: code, message: info });
}
})
ipcMain.on('local-task-add', function (event, arg) {
let path = arg
let info = '';
let code = 0;
code = -1;
info = '解析资源失败!';
try{
const response = fs.readFileSync(path, 'utf-8')
if (response){
let parser = new Parser();
parser.push(response);
parser.end();
let count_seg = parser.manifest.segments.length;
if (count_seg > 0) {
code = 0;
if (parser.manifest.endList) {
let duration = 0;
parser.manifest.segments.forEach(segment => {
duration += segment.duration;
});
info = `资源解析成功,有 ${count_seg} 个片段,时长:${formatTime(duration)},即将开始缓存...`;
startDownloadLocal(path);
} else {
info = `解析资源失败!`;
}
}
}
}catch(error) {
console.log(error)
}
event.sender.send('task-add-reply', { code: code, message: info });
})
function formatTime(duration) {
let sec = Math.floor(duration % 60).toLocaleString();
let min = Math.floor(duration / 60 % 60).toLocaleString();
let hour = Math.floor(duration / 3600 % 60).toLocaleString();
if (sec.length != 2) sec = '0' + sec;
if (min.length != 2) min = '0' + min;
if (hour.length != 2) hour = '0' + hour;
return hour + ":" + min + ":" + sec;
}
class QueueObject {
constructor() {
this.segment = null;
this.url = '';
this.headers = '';
this.id = 0;
this.idx = 0;
this.dir = '';
this.then = this.catch = null;
}
async callback( _callback ) {
try{
if(!globalCond[this.id])
{
console.log(`globalCond[this.id] is not exsited.`);
return;
}
let partent_uri = this.url.replace(/([^\/]*\?.*$)|([^\/]*$)/g, '');
let segment = this.segment;
let uri_ts = '';
if (/^http.*/.test(segment.uri)) {
uri_ts = segment.uri;
}
else if(/^\/.*/.test(segment.uri))
{
let mes = this.url.match(/^https?:\/\/[^/]*/);
if(mes && mes.length >= 1)
{
uri_ts = mes[0] + segment.uri;
}
else
{
uri_ts = partent_uri + segment.uri;
}
}
else
{
uri_ts = partent_uri + segment.uri;
}
let filename = `${ ((this.idx + 1) +'').padStart(6,'0')}.ts`;
let filpath = path.join(this.dir, filename);
let filpath_dl = path.join(this.dir, filename+".dl");
// console.log(`2 ${segment.uri}`,`${filename}`);
//检测文件是否存在
for (let index = 0; index < 3 && !fs.existsSync(filpath); index++) {
// 下载的时候使用.dl后缀的文件名,下载完成后重命名
let that = this;
await download (uri_ts, that.dir, {filename:filename + ".dl",timeout:httpTimeout,headers:that.headers}).catch((err)=>{
console.log(err)
if(fs.existsSync(filpath_dl)) fs.unlinkSync( filpath_dl);
});
if(!fs.existsSync(filpath_dl)) continue;
if( fs.statSync(filpath_dl).size <= 0 )
{
fs.unlinkSync(filpath_dl);
}
if(segment.key != null && segment.key.method != null)
{
//标准解密TS流
let aes_path = path.join(this.dir, "aes.key" );
if(!fs.existsSync( aes_path ))
{
let key_uri = segment.key.uri;
if (! /^http.*/.test(segment.key.uri)) {
key_uri = partent_uri + segment.key.uri;
}
else if(/^\/.*/.test(key_uri))
{
let mes = this.url.match(/^https?:\/\/[^/]*/);
if(mes && mes.length >= 1)
{
key_uri = mes[0] + segment.key.uri;
}
else
{
key_uri = partent_uri + segment.key.uri;
}
}
await download (key_uri, that.dir, { filename: "aes.key" }).catch(console.error);
}
if(fs.existsSync( aes_path ))
{
try {
let key_ = fs.readFileSync( aes_path );
let iv_ = segment.key.iv != null ? Buffer.from(segment.key.iv.buffer)
:Buffer.from(that.idx.toString(16).padStart(32,'0') ,'hex' );
let cipher = crypto.createDecipheriv((segment.key.method+"-cbc").toLowerCase(), key_, iv_);
cipher.on('error', (error) => {console.log(error)});
let inputData = fs.readFileSync( filpath_dl );
let outputData = Buffer.concat([cipher.update(inputData),cipher.final()]);
fs.writeFileSync(filpath,outputData);
if(fs.existsSync(filpath_dl)) fs.unlinkSync(filpath_dl);
that.then && that.then();
} catch (error) {
console.log(error)
if(fs.existsSync( filpath_dl ))
fs.unlinkSync(filpath_dl);
}
return;
}
}
else
{
fs.renameSync(filpath_dl , filpath);
break;
}
}
if(fs.existsSync(filpath))
{
this.then && this.then();
}
else
{
this.catch && this.catch();
}
}
catch(e)
{
console.log(e);
}
finally
{
_callback();
}
}
}
function queue_callback(that, callback)
{
that.callback(callback);
}
async function startDownload(url, headers = null, nId = null) {
let id = nId == null ? new Date().getTime() : nId;
let dir = path.join(store.get('downloadPath'), '/'+id);
let filesegments = [];
if(!fs.existsSync(dir))
{
fs.mkdirSync(dir, { recursive: true });
}
const response = await got(url, {headers: headers | pTimeout, https: {rejectUnauthorized: false}}).catch((error) => { console.log(error) })
if(response == null || response.body == null || response.body == '')
{
return;
}
let parser = new Parser();
parser.push(response.body);
parser.end();
//并发 3 个线程下载
var tsQueues = async.queue(queue_callback, 3);
let count_seg = parser.manifest.segments.length;
let count_downloaded = 0;
var video = {
id:id,
url:url,
dir:dir,
segment_total:count_seg,
segment_downloaded:count_downloaded,
time: dateFormat(new Date(),"yyyy-mm-dd HH:MM:ss"),
status:'初始化...',
isLiving:false,
headers:headers,
videopath:''
};
if(nId == null)
{
mainWindow.webContents.send('task-notify-create',video);
}
globalCond[id] = true;
let segments = parser.manifest.segments;
for (let iSeg = 0; iSeg < segments.length; iSeg++) {
let qo = new QueueObject();
qo.dir = dir;
qo.idx = iSeg;
qo.id = id;
qo.url = url;
qo.headers = headers;
qo.segment = segments[iSeg];
qo.then = function(){
count_downloaded = count_downloaded + 1
video.segment_downloaded = count_downloaded;
video.status = `下载中...${count_downloaded}/${count_seg}`
mainWindow.webContents.send('task-notify-update',video);
};
tsQueues.push(qo);
}
tsQueues.drain(()=>{
console.log('download success');
video.status = "已完成,合并中..."
mainWindow.webContents.send('task-notify-end',video);
let indexData = '';
for (let iSeg = 0; iSeg < segments.length; iSeg++) {
let filpath = path.join(dir, `${ ((iSeg + 1) +'').padStart(6,'0') }.ts`);
indexData += `file '${filpath}'\r\n`;
filesegments.push(filpath);
}
fs.writeFileSync(path.join(dir,'index.txt'),indexData);
let outPathMP4 = path.join(dir,id+'.mp4');
if(fs.existsSync(ffmpegPath))
{
ffmpeg()
.input(`${path.join(dir,'index.txt')}`)
.inputOptions(['-f concat', '-safe 0'])
.outputOptions('-c copy')
.output(`${outPathMP4}`)
.on('start', function (commandLine) {
console.log('Spawned Ffmpeg with command: ' + commandLine)
})
.on('codecData', function (data) {
console.log('Input is ' + data.audio + ' audio ' + 'with ' + data.video + ' video')
})
.on('progress', function (progress) {
console.log(progress.percent)
})
.on('error', function (err, stdout, stderr) {
console.log('Cannot process video: ' + err.message)
video.videopath = outPathMP4;
video.status = "合成失败,可能是非标准加密视频源,请联系客服定制。"
mainWindow.webContents.send('task-notify-end',video);
})
.on('end', function (stdout, stderr) {
video.videopath = outPathMP4;
video.status = "已完成"
mainWindow.webContents.send('task-notify-end',video);
let index_path = path.join(dir,'index.txt');
let key_path = path.join(dir,'aes.key');
if(fs.existsSync(index_path))
{
fs.unlinkSync(index_path);
}
if(fs.existsSync(key_path))
{
fs.unlinkSync(key_path);
}
filesegments.forEach(fileseg=>{
if(fs.existsSync(fileseg))
{
fs.unlinkSync(fileseg);
}
});
})
.run()
configVideos.push(video);
}else{
video.videopath = outPathMP4;
video.status = "已完成,未发现本地FFMPEG,不进行合成。"
mainWindow.webContents.send('task-notify-end',video);
}
});
console.log("drain over");
}
function startDownloadLocal(filepath, nId = null) {
let id = nId == null ? new Date().getTime() : nId;
let dir = path.join(store.get('downloadPath'), '/'+id);
let filesegments = [];
if(!fs.existsSync(dir))
{
fs.mkdirSync(dir, { recursive: true });
}
try{
const response = fs.readFileSync(filepath, 'utf-8')
if(response == '')
{
return;
}
let parser = new Parser();
parser.push(response);
parser.end();
//并发 3 个线程下载
var tsQueues = async.queue(queue_callback, 3);
let count_seg = parser.manifest.segments.length;
let count_downloaded = 0;
var video = {
id:id,
url:filepath,
dir:dir,
segment_total:count_seg,
segment_downloaded:count_downloaded,
time: dateFormat(new Date(),"yyyy-mm-dd HH:MM:ss"),
status:'初始化...',
isLiving:false,
videopath:''
};
if(nId == null)
{
mainWindow.webContents.send('task-notify-create',video);
}
globalCond[id] = true;
let segments = parser.manifest.segments;
for (let iSeg = 0; iSeg < segments.length; iSeg++) {
let qo = new QueueObject();
qo.dir = dir;
qo.idx = iSeg;
qo.id = id;
qo.url = filepath;
qo.segment = segments[iSeg];
qo.then = function(){
count_downloaded = count_downloaded + 1
video.segment_downloaded = count_downloaded;
video.status = `下载中...${count_downloaded}/${count_seg}`
mainWindow.webContents.send('task-notify-update',video);
};
tsQueues.push(qo);
}
tsQueues.drain(()=>{
console.log('download success');
video.status = "已完成,合并中..."
mainWindow.webContents.send('task-notify-end',video);
let indexData = '';
for (let iSeg = 0; iSeg < segments.length; iSeg++) {
let filpath = path.join(dir, `${ ((iSeg + 1) +'').padStart(6,'0') }.ts`);
indexData += `file '${filpath}'\r\n`;
filesegments.push(filpath);
}
fs.writeFileSync(path.join(dir,'index.txt'),indexData);
let outPathMP4 = path.join(dir,id+'.mp4');
if(fs.existsSync(ffmpegPath))
{
ffmpeg()
.input(`${path.join(dir,'index.txt')}`)
.inputOptions(['-f concat', '-safe 0'])
.outputOptions('-c copy')
.output(`${outPathMP4}`)
.on('start', function (commandLine) {
console.log('Spawned Ffmpeg with command: ' + commandLine)
})
.on('codecData', function (data) {
console.log('Input is ' + data.audio + ' audio ' + 'with ' + data.video + ' video')
})
.on('progress', function (progress) {
console.log(progress.percent)
})
.on('error', function (err, stdout, stderr) {
console.log('Cannot process video: ' + err.message)
video.videopath = outPathMP4;
video.status = "合成失败,可能是非标准加密视频源,请联系客服定制。"
mainWindow.webContents.send('task-notify-end',video);
})
.on('end', function (stdout, stderr) {
video.videopath = outPathMP4;
video.status = "已完成"
mainWindow.webContents.send('task-notify-end',video);
let index_path = path.join(dir,'index.txt');
let key_path = path.join(dir,'aes.key');
if(fs.existsSync(index_path))
{
fs.unlinkSync(index_path);
}
if(fs.existsSync(key_path))
{
fs.unlinkSync(key_path);
}
filesegments.forEach(fileseg=>{
if(fs.existsSync(fileseg))
{
fs.unlinkSync(fileseg);
}
});
})
.run()
configVideos.push(video);
}else{
video.videopath = outPathMP4;
video.status = "已完成,未发现本地FFMPEG,不进行合成。"
mainWindow.webContents.send('task-notify-end',video);
}
});
console.log("drain over");
}catch(error) {
console.log(error)
}
} | , timeout: htt | identifier_name |
index.js | import { app, BrowserWindow, ipcMain, shell } from 'electron'
import Config from '../../static/js/config'
const Store = require('electron-store')
let store = new Store()
const path = require('path')
const { Parser } = require('m3u8-parser')
const fs = require('fs')
const async = require('async')
const dateFormat = require('dateformat')
const crypto = require('crypto')
const got = require('got')
const ffmpeg = require('fluent-ffmpeg')
const download = require('download')
const httpTimeout = {socket: 300000, request: 300000, response:300000};
const ffmpegPath = process.env.NODE_ENV == 'development' ? path.resolve(__dirname, '../../ffmpeg.exe') : path.resolve(__dirname, '../../../ffmpeg.exe')
ffmpeg.setFfmpegPath(ffmpegPath)
let mainWindow = null
var configVideos = [];
let globalCond = {};
const globalConfigDir = app.getPath('userData');
const globalConfigVideoPath = path.join(globalConfigDir,'config_videos.json');
if (process.env.NODE_ENV !== 'development') {
global.__static = require('path').join(__dirname, '/static').replace(/\\/g, '\\\\')
}
const winURL = process.env.NODE_ENV === 'development'
? `http://localhost:9080`
: `file://${__dirname}/index.html`
function createWindow () {
/**
* Initial window options
*/
mainWindow = new BrowserWindow({
height: 550,
width: 700,
useContentSize: true,
minWidth:700,
minHeight:550,
maxWidth:700,
maxHeight:550,
frame: false,
show:false,
webPreferences: {
nodeIntegration: true
}
})
new Config(mainWindow)
mainWindow.loadURL(winURL)
mainWindow.on('ready-to-show', function () {
mainWindow.show() // 初始化后再显示
})
mainWindow.on('closed', () => {
mainWindow = null
})
}
let playerWindow = null
function createPlayerWindow(src) {
if(playerWindow == null)
{
// 创建浏览器窗口
playerWindow = new BrowserWindow({
width: 1024,
height: 620,
skipTaskbar: false,
transparent: false,
frame: false,
resizable: true,
webPreferences: {
nodeIntegration: true
},
alwaysOnTop: false,
hasShadow: false,
parent: mainWindow
});
playerWindow.setMenu(null)
playerWindow.on('closed', () => {
// 取消引用 window 对象,如果你的应用支持多窗口的话,
// 通常会把多个 window 对象存放在一个数组里面,
// 与此同时,你应该删除相应的元素。
playerWindow = null;
})
}
// 加载index.html文件
let playerSrc = process.env.NODE_ENV == 'development' ? path.resolve(__dirname, '../../static/player.html') : path.resolve(__dirname, './static/player.html')
playerWindow.loadFile(playerSrc, {search:"src="+src});
}
app.on('ready', createWindow)
app.allowRendererProcessReuse = true
app.on('window-all-closed', () => {
if (process.platform !== 'darwin') {
app.quit()
}
})
app.on('activate', () => {
if (mainWindow === null) {
createWindow()
}
})
ipcMain.on('opendir', function (event, arg) {
shell.openExternal(arg);
});
ipcMain.on('playvideo', function (event, arg) {
createPlayerWindow(arg);
});
ipcMain.on('delvideo', function (event, id) {
configVideos.forEach(Element=>{
if(Element.id==id)
{
try {
if(fs.existsSync(Element.dir)) {
var files = fs.readdirSync(Element.dir)
files.forEach(e=>{
fs.unlinkSync(path.join(Element.dir,e) );
})
fs.rmdirSync(Element.dir,{recursive :true})
}
var nIdx = configVideos.indexOf(Element);
if( nIdx > -1)
{
configVideos.splice(nIdx,1);
fs.writeFileSync(globalConfigVideoPath,JSON.stringify(configVideos));
}
event.sender.send("delvideo-reply",Element);
} catch (error) {
console.log(error)
}
}
});
});
ipcMain.on('StartOrStop', function (event, arg) {
let id = Number.parseInt(arg);
if(globalCond[id] == null)
{
console.log("不存在此任务")
return;
}
globalCond[id] = !globalCond[id];
if(globalCond[id] == true)
{
configVideos.forEach(Element=>{
if(Element.id==id)
{
if(Element.isLiving == true)
{
startDownloadLive(Element.url, Element.headers, id);
}
else
{
startDownload(Element.url, Element.headers, id);
}
}
});
}
});
ipcMain.on('task-add', async function (event, arg, headers) {
let src = arg;
let _headers = {};
if(headers != '')
{
let __ = headers.match(/(.*?): ?(.*?)(\n|\r|$)/g);
__ && __.forEach((_)=>{
let ___ = _.match(/(.*?): ?(.*?)(\n|\r|$)/i);
___ && (_headers[___[1]] = ___[2]);
});
}
let mes = src.match(/^https?:\/\/[^/]*/);
let _hosts = '';
if(mes && mes.length >= 1)
{
_hosts = mes[0];
}
if(_headers['Origin'] == null && _headers['origin'] == null)
{
_headers['Origin'] = _hosts;
}
if(_headers['Referer'] == null && _headers['referer'] == null)
{
_headers['Referer'] = _hosts;
}
const response = await got(src, {headers: _headers, timeout: httpTimeout, https: {rejectUnauthorized: false}}).catch((error) => { console.log(error) })
{
let info = '';
let code = 0;
code = -1;
info = '解析资源失败!';
if (response && response.body != null
&& response.body != '')
{
let parser = new Parser();
parser.push(response.body);
parser.end();
let count_seg = parser.manifest.segments.length;
if (count_seg > 0) {
code = 0;
if (parser.manifest.endList) {
let duration = 0;
parser.manifest.segments.forEach(segment => {
duration += segment.duration;
});
info = `资源解析成功,有 ${count_seg} 个片段,时长:${formatTime(duration)},即将开始缓存...`;
startDownload(src, _headers);
} else {
info = `直播资源解析成功,即将开始缓存...`;
startDownloadLive(src, _headers);
}
}
}
event.sender.send('task-add-reply', { code: code, message: info });
}
})
ipcMain.on('local-task-add', function (event, arg) {
let path = arg
let info = '';
let code = 0;
code = -1;
info = '解析资源失败!';
try{
const response = fs.readFileSync(path, 'utf-8')
if (response){
let parser = new Parser();
parser.push(response);
parser.end();
let count_seg = parser.manifest.segments.length;
if (count_seg > 0) {
code = 0;
if (parser.manifest.endList) {
let duration = 0;
parser.manifest.segments.forEach(segment => {
duration += segment.duration;
});
info = `资源解析成功,有 ${count_seg} 个片段,时长:${formatTime(duration)},即将开始缓存...`;
startDownloadLocal(path);
} else {
info = `解析资源失败!`;
}
}
}
}catch(error) {
console.log(error)
}
event.sender.send('task-add-reply', { code: code, message: info });
})
function formatTime(duration) {
let sec = Math.floor(duration % 60).toLocaleString();
let min = Math.floor(duration / 60 % 60).toLocaleString();
let hour = Math.floor(duration / 3600 % 60).toLocaleString();
if (sec.length != 2) sec = '0' + sec;
if (min.length != 2) min = '0' + min;
if (hour.length != 2) hour = '0' + hour;
return hour + ":" + min + ":" + sec;
}
class QueueObject {
constructor() {
this.segment = null;
this.url = '';
this.headers = '';
this.id = 0;
this.idx = 0;
this.dir = '';
this.then = this.catch = null;
}
async callback( _callback ) {
try{
if(!globalCond[this.id])
{
console.log(`globalCond[this.id] is not exsited.`);
return;
}
let partent_uri = this.url.replace(/([^\/]*\?.*$)|([^\/]*$)/g, '');
let segment = this.segment;
let uri_ts = '';
if (/^http.*/.test(segment.uri)) {
uri_ts = segment.uri;
}
else if(/^\/.*/.test(segment.uri))
{
let mes = this.url.match(/^https?:\/\/[^/]*/);
if(mes && mes.length >= 1)
{
uri_ts = mes[0] + segment.uri;
}
else
{
uri_ts = partent_uri + segment.uri;
}
}
else
{
uri_ts = partent_uri + segment.uri;
}
let filename = `${ ((this.idx + 1) +'').padStart(6,'0')}.ts`;
let filpath = path.join(this.dir, filename);
let filpath_dl = path.join(this.dir, filename+".dl");
// console.log(`2 ${segment.uri}`,`${filename}`);
//检测文件是否存在
for (let index = 0; index < 3 && !fs.existsSync(filpath); index++) {
// 下载的时候使用.dl后缀的文件名,下载完成后重命名
let that = this;
await download (uri_ts, that.dir, {filename:filename + ".dl",timeout:httpTimeout,headers:that.headers}).catch((err)=>{
console.log(err)
if(fs.existsSync(filpath_dl)) fs.unlinkSync( filpath_dl);
});
if(!fs.existsSync(filpath_dl)) continue;
if( fs.statSync(filpath_dl).size <= 0 )
{
fs.unlinkSync(filpath_dl);
}
if(segment.key != null && segment.key.method != null)
{
//标准解密TS流
let aes_path = path.join(this.dir, "aes.key" );
if(!fs.existsSync( aes_path ))
{
let key_uri = segment.key.uri;
if (! /^http.*/.test(segment.key.uri)) {
key_uri = partent_uri + segment.key.uri;
}
else if(/^\/.*/.test(key_uri))
{
let mes = this.url.match(/^https?:\/\/[^/]*/);
if(mes && mes.length >= 1)
{
key_uri = mes[0] + segment.key.uri;
} | else
{
key_uri = partent_uri + segment.key.uri;
}
}
await download (key_uri, that.dir, { filename: "aes.key" }).catch(console.error);
}
if(fs.existsSync( aes_path ))
{
try {
let key_ = fs.readFileSync( aes_path );
let iv_ = segment.key.iv != null ? Buffer.from(segment.key.iv.buffer)
:Buffer.from(that.idx.toString(16).padStart(32,'0') ,'hex' );
let cipher = crypto.createDecipheriv((segment.key.method+"-cbc").toLowerCase(), key_, iv_);
cipher.on('error', (error) => {console.log(error)});
let inputData = fs.readFileSync( filpath_dl );
let outputData = Buffer.concat([cipher.update(inputData),cipher.final()]);
fs.writeFileSync(filpath,outputData);
if(fs.existsSync(filpath_dl)) fs.unlinkSync(filpath_dl);
that.then && that.then();
} catch (error) {
console.log(error)
if(fs.existsSync( filpath_dl ))
fs.unlinkSync(filpath_dl);
}
return;
}
}
else
{
fs.renameSync(filpath_dl , filpath);
break;
}
}
if(fs.existsSync(filpath))
{
this.then && this.then();
}
else
{
this.catch && this.catch();
}
}
catch(e)
{
console.log(e);
}
finally
{
_callback();
}
}
}
function queue_callback(that, callback)
{
that.callback(callback);
}
async function startDownload(url, headers = null, nId = null) {
let id = nId == null ? new Date().getTime() : nId;
let dir = path.join(store.get('downloadPath'), '/'+id);
let filesegments = [];
if(!fs.existsSync(dir))
{
fs.mkdirSync(dir, { recursive: true });
}
const response = await got(url, {headers: headers, timeout: httpTimeout, https: {rejectUnauthorized: false}}).catch((error) => { console.log(error) })
if(response == null || response.body == null || response.body == '')
{
return;
}
let parser = new Parser();
parser.push(response.body);
parser.end();
//并发 3 个线程下载
var tsQueues = async.queue(queue_callback, 3);
let count_seg = parser.manifest.segments.length;
let count_downloaded = 0;
var video = {
id:id,
url:url,
dir:dir,
segment_total:count_seg,
segment_downloaded:count_downloaded,
time: dateFormat(new Date(),"yyyy-mm-dd HH:MM:ss"),
status:'初始化...',
isLiving:false,
headers:headers,
videopath:''
};
if(nId == null)
{
mainWindow.webContents.send('task-notify-create',video);
}
globalCond[id] = true;
let segments = parser.manifest.segments;
for (let iSeg = 0; iSeg < segments.length; iSeg++) {
let qo = new QueueObject();
qo.dir = dir;
qo.idx = iSeg;
qo.id = id;
qo.url = url;
qo.headers = headers;
qo.segment = segments[iSeg];
qo.then = function(){
count_downloaded = count_downloaded + 1
video.segment_downloaded = count_downloaded;
video.status = `下载中...${count_downloaded}/${count_seg}`
mainWindow.webContents.send('task-notify-update',video);
};
tsQueues.push(qo);
}
tsQueues.drain(()=>{
console.log('download success');
video.status = "已完成,合并中..."
mainWindow.webContents.send('task-notify-end',video);
let indexData = '';
for (let iSeg = 0; iSeg < segments.length; iSeg++) {
let filpath = path.join(dir, `${ ((iSeg + 1) +'').padStart(6,'0') }.ts`);
indexData += `file '${filpath}'\r\n`;
filesegments.push(filpath);
}
fs.writeFileSync(path.join(dir,'index.txt'),indexData);
let outPathMP4 = path.join(dir,id+'.mp4');
if(fs.existsSync(ffmpegPath))
{
ffmpeg()
.input(`${path.join(dir,'index.txt')}`)
.inputOptions(['-f concat', '-safe 0'])
.outputOptions('-c copy')
.output(`${outPathMP4}`)
.on('start', function (commandLine) {
console.log('Spawned Ffmpeg with command: ' + commandLine)
})
.on('codecData', function (data) {
console.log('Input is ' + data.audio + ' audio ' + 'with ' + data.video + ' video')
})
.on('progress', function (progress) {
console.log(progress.percent)
})
.on('error', function (err, stdout, stderr) {
console.log('Cannot process video: ' + err.message)
video.videopath = outPathMP4;
video.status = "合成失败,可能是非标准加密视频源,请联系客服定制。"
mainWindow.webContents.send('task-notify-end',video);
})
.on('end', function (stdout, stderr) {
video.videopath = outPathMP4;
video.status = "已完成"
mainWindow.webContents.send('task-notify-end',video);
let index_path = path.join(dir,'index.txt');
let key_path = path.join(dir,'aes.key');
if(fs.existsSync(index_path))
{
fs.unlinkSync(index_path);
}
if(fs.existsSync(key_path))
{
fs.unlinkSync(key_path);
}
filesegments.forEach(fileseg=>{
if(fs.existsSync(fileseg))
{
fs.unlinkSync(fileseg);
}
});
})
.run()
configVideos.push(video);
}else{
video.videopath = outPathMP4;
video.status = "已完成,未发现本地FFMPEG,不进行合成。"
mainWindow.webContents.send('task-notify-end',video);
}
});
console.log("drain over");
}
function startDownloadLocal(filepath, nId = null) {
let id = nId == null ? new Date().getTime() : nId;
let dir = path.join(store.get('downloadPath'), '/'+id);
let filesegments = [];
if(!fs.existsSync(dir))
{
fs.mkdirSync(dir, { recursive: true });
}
try{
const response = fs.readFileSync(filepath, 'utf-8')
if(response == '')
{
return;
}
let parser = new Parser();
parser.push(response);
parser.end();
//并发 3 个线程下载
var tsQueues = async.queue(queue_callback, 3);
let count_seg = parser.manifest.segments.length;
let count_downloaded = 0;
var video = {
id:id,
url:filepath,
dir:dir,
segment_total:count_seg,
segment_downloaded:count_downloaded,
time: dateFormat(new Date(),"yyyy-mm-dd HH:MM:ss"),
status:'初始化...',
isLiving:false,
videopath:''
};
if(nId == null)
{
mainWindow.webContents.send('task-notify-create',video);
}
globalCond[id] = true;
let segments = parser.manifest.segments;
for (let iSeg = 0; iSeg < segments.length; iSeg++) {
let qo = new QueueObject();
qo.dir = dir;
qo.idx = iSeg;
qo.id = id;
qo.url = filepath;
qo.segment = segments[iSeg];
qo.then = function(){
count_downloaded = count_downloaded + 1
video.segment_downloaded = count_downloaded;
video.status = `下载中...${count_downloaded}/${count_seg}`
mainWindow.webContents.send('task-notify-update',video);
};
tsQueues.push(qo);
}
tsQueues.drain(()=>{
console.log('download success');
video.status = "已完成,合并中..."
mainWindow.webContents.send('task-notify-end',video);
let indexData = '';
for (let iSeg = 0; iSeg < segments.length; iSeg++) {
let filpath = path.join(dir, `${ ((iSeg + 1) +'').padStart(6,'0') }.ts`);
indexData += `file '${filpath}'\r\n`;
filesegments.push(filpath);
}
fs.writeFileSync(path.join(dir,'index.txt'),indexData);
let outPathMP4 = path.join(dir,id+'.mp4');
if(fs.existsSync(ffmpegPath))
{
ffmpeg()
.input(`${path.join(dir,'index.txt')}`)
.inputOptions(['-f concat', '-safe 0'])
.outputOptions('-c copy')
.output(`${outPathMP4}`)
.on('start', function (commandLine) {
console.log('Spawned Ffmpeg with command: ' + commandLine)
})
.on('codecData', function (data) {
console.log('Input is ' + data.audio + ' audio ' + 'with ' + data.video + ' video')
})
.on('progress', function (progress) {
console.log(progress.percent)
})
.on('error', function (err, stdout, stderr) {
console.log('Cannot process video: ' + err.message)
video.videopath = outPathMP4;
video.status = "合成失败,可能是非标准加密视频源,请联系客服定制。"
mainWindow.webContents.send('task-notify-end',video);
})
.on('end', function (stdout, stderr) {
video.videopath = outPathMP4;
video.status = "已完成"
mainWindow.webContents.send('task-notify-end',video);
let index_path = path.join(dir,'index.txt');
let key_path = path.join(dir,'aes.key');
if(fs.existsSync(index_path))
{
fs.unlinkSync(index_path);
}
if(fs.existsSync(key_path))
{
fs.unlinkSync(key_path);
}
filesegments.forEach(fileseg=>{
if(fs.existsSync(fileseg))
{
fs.unlinkSync(fileseg);
}
});
})
.run()
configVideos.push(video);
}else{
video.videopath = outPathMP4;
video.status = "已完成,未发现本地FFMPEG,不进行合成。"
mainWindow.webContents.send('task-notify-end',video);
}
});
console.log("drain over");
}catch(error) {
console.log(error)
}
} | random_line_split |
|
index.js | import { app, BrowserWindow, ipcMain, shell } from 'electron'
import Config from '../../static/js/config'
const Store = require('electron-store')
let store = new Store()
const path = require('path')
const { Parser } = require('m3u8-parser')
const fs = require('fs')
const async = require('async')
const dateFormat = require('dateformat')
const crypto = require('crypto')
const got = require('got')
const ffmpeg = require('fluent-ffmpeg')
const download = require('download')
const httpTimeout = {socket: 300000, request: 300000, response:300000};
const ffmpegPath = process.env.NODE_ENV == 'development' ? path.resolve(__dirname, '../../ffmpeg.exe') : path.resolve(__dirname, '../../../ffmpeg.exe')
ffmpeg.setFfmpegPath(ffmpegPath)
let mainWindow = null
var configVideos = [];
let globalCond = {};
const globalConfigDir = app.getPath('userData');
const globalConfigVideoPath = path.join(globalConfigDir,'config_videos.json');
if (process.env.NODE_ENV !== 'development') {
global.__static = require('path').join(__dirname, '/static').replace(/\\/g, '\\\\')
}
const winURL = process.env.NODE_ENV === 'development'
? `http://localhost:9080`
: `file://${__dirname}/index.html`
function createWindow () {
/**
* Initial window options
*/
mainWindow = new BrowserWindow({
height: 550,
width: 700,
useContentSize: true,
minWidth:700,
minHeight:550,
maxWidth:700,
maxHeight:550,
frame: false,
show:false,
webPreferences: {
nodeIntegration: true
}
})
new Config(mainWindow)
mainWindow.loadURL(winURL)
mainWindow.on('ready-to-show', function () {
mainWindow.show() // 初始化后再显示
})
mainWindow.on('closed', () => {
mainWindow = null
})
}
let playerWindow = null
function createPlayerWindow(src) {
if(playerWindow == null)
{
// 创建浏览器窗口
playerWindow = new BrowserWindow({
width: 1024,
height: 620,
skipTaskbar: false,
transparent: false,
frame: false,
resizable: true,
webPreferences: {
nodeIntegration: true
},
alwaysOnTop: false,
hasShadow: false,
parent: mainWindow
});
playerWindow.setMenu(null)
playerWindow.on('closed', () => {
// 取消引用 window 对象,如果你的应用支持多窗口的话,
// 通常会把多个 window 对象存放在一个数组里面,
// 与此同时,你应该删除相应的元素。
playerWindow = null;
})
}
// 加载index.html文件
let playerSrc = process.env.NODE_ENV == 'development' ? path.resolve(__dirname, '../../static/player.html') : path.resolve(__dirname, './static/player.html')
playerWindow.loadFile(playerSrc, {search:"src="+src});
}
app.on('ready', createWindow)
app.allowRendererProcessReuse = true
app.on('window-all-closed', () => {
if (process.platform !== 'darwin') {
app.quit()
}
})
app.on('activate', () => {
if (mainWindow === null) {
createWindow()
}
})
ipcMain.on('opendir', function (event, arg) {
shell.openExternal(arg);
});
ipcMain.on('playvideo', function (event, arg) {
createPlayerWindow(arg);
});
ipcMain.on('delvideo', function (event, id) {
configVideos.forEach(Element=>{
if(Element.id==id)
{
try {
if(fs.existsSync(Element.dir)) {
var files = fs.readdirSync(Element.dir)
files.forEach(e=>{
fs.unlinkSync(path.join(Element.dir,e) );
})
fs.rmdirSync(Element.dir,{recursive :true})
}
var nIdx = configVideos.indexOf(Element);
if( nIdx > -1)
{
configVideos.splice(nIdx,1);
fs.writeFileSync(globalConfigVideoPath,JSON.stringify(configVideos));
}
event.sender.send("delvideo-reply",Element);
} catch (error) {
console.log(error)
}
}
});
});
ipcMain.on('StartOrStop', function (event, arg) {
let id = Number.parseInt(arg);
if(globalCond[id] == null)
{
console.log("不存在此任务")
return;
}
globalCond[id] = !globalCond[id];
if(globalCond[id] == true)
{
configVideos.forEach(Element=>{
if(Element.id==id)
{
if(Element.isLiving == true)
{
startDownloadLive(Element.url, Element.headers, id);
}
else
{
startDownload(Element.url, Element.headers, id);
}
}
});
}
});
ipcMain.on('task-add', async function (event, arg, headers) {
let src = arg;
let _headers = {};
if(headers != '')
{
let __ = headers.match(/(.*?): ?(.*?)(\n|\r|$)/g);
__ && __.forEach((_)=>{
let ___ = _.match(/(.*?): ?(.*?)(\n|\r|$)/i);
___ && (_headers[___[1]] = ___[2]);
});
}
let mes = src.match(/^https?:\/\/[^/]*/);
let _hosts = '';
if(mes && mes.length >= 1)
{
_hosts = mes[0];
}
if(_headers['Origin'] == null && _headers['origin'] == null)
{
_headers['Origin'] = _hosts;
}
if(_headers['Referer'] == null && _headers['referer'] == null)
{
_headers['Referer'] = _hosts;
}
const response = await got(src, {headers: _headers, timeout: httpTimeout, https: {rejectUnauthorized: false}}).catch((error) => { console.log(error) })
{
let info = '';
let code = 0;
code = -1;
info = '解析资源失败!';
if (response && response.body != null
&& response.body != '')
{
let parser = new Parser();
parser.push(response.body);
parser.end();
let count_seg = parser.manifest.segments.length;
if (count_seg > 0) {
code = 0;
if (parser.manifest.endList) {
let duration = 0;
parser.manifest.segments.forEach(segment => {
duration += segment.duration;
});
info = `资源解析成功,有 ${count_seg} 个片段,时长:${formatTime(duration)},即将开始缓存...`;
startDownload(src, _headers);
} else {
info = `直播资源解析成功,即将开始缓存...`;
startDownloadLive(src, _headers);
}
}
}
event.sender.send('task-add-reply', { code: code, message: info });
}
})
ipcMain.on('local-task-add', function (event, arg) {
let path = arg
let info = '';
let code = 0;
code = -1;
info = '解析资源失败!';
try{
const response = fs.readFileSync(path, 'utf-8')
if (response){
let parser = new Parser();
parser.push(response);
parser.end();
let count_seg = parser.manifest.segments.length;
if (count_seg > 0) {
code = 0;
if (parser.manifest.endList) {
let duration = 0;
parser.manifest.segments.forEach(segment => {
duration += segment.duration;
});
info = `资源解析成功,有 ${count_seg} 个片段,时长:${formatTime(duration)},即将开始缓存...`;
startDownloadLocal(path);
} else {
info = `解析资源失败!`;
}
}
}
}catch(error) {
console.log(error)
}
event.sender.send('task-add-reply', { code: code, message: info });
})
function formatTime(duration) {
let sec = Math.floor(duration % 60).toLocaleString();
let min = Math.floor(duration / 60 % 60).toLocaleString();
let hour = Math.floor(duration / 3600 % 60).toLocaleString();
if (sec.length != 2) sec = '0' + sec;
if (min.length != 2) min = '0' + min;
if (hour.length != 2) hour = '0' + hour;
return hour + ":" + min + ":" + sec;
}
class QueueObject {
constructor() {
this.segment = null;
this.url = '';
this.headers = '';
this.id = 0;
this.idx = 0;
this.dir = '';
this.then = this.catch = null;
}
async callback( _callback ) {
try{
if(!globalCond[this.id])
{
console.log(`globalCond[this.id] is not exsited.`);
return;
}
let partent_uri = this.url.replace(/([^\/]*\?.*$)|([^\/]*$)/g, '');
let segment = this.segment;
let uri_ts = '';
if (/^http.*/.test(segment.uri)) {
uri_ts = segment.uri;
}
else if(/^\/.*/.test(segment.uri))
{
let mes = this.url.match(/^https?:\/\/[^/]*/);
if(mes && mes.length >= 1)
{
uri_ts = mes[0] + segment.uri;
}
else
{
uri_ts = partent_uri + segment.uri;
}
}
else
{
uri_ts = partent_uri + segment.uri;
}
let filename = `${ ((this.idx + 1) +'').padStart(6,'0')}.ts`;
let filpath = path.join(this.dir, filename);
let filpath_dl = path.join(this.dir, filename+".dl");
// console.log(`2 ${segment.uri}`,`${filename}`);
//检测文件是否存在
for (let index = 0; index < 3 && !fs.existsSync(filpath); index++) {
// 下载的时候使用.dl后缀的文件名,下载完成后重命名
let that = this;
await download (uri_ts, that.dir, {filename:filename + ".dl",timeout:httpTimeout,headers:that.headers}).catch((err)=>{
console.log(err)
if(fs.existsSync(filpath_dl)) fs.unlinkSync( filpath_dl);
});
if(!fs.existsSync(filpath_dl)) continue;
if( fs.statSync(filpath_dl).size <= 0 )
{
fs.unlinkSync(filpath_dl);
}
if(segment.key != null && segment.key.method != null)
{
//标准解密TS流
let aes_path = path.join(this.dir, "aes.key" );
if(!fs.existsSync( aes_path ))
{
let key_uri = segment.key.uri;
if (! /^http.*/.test(segment.key.uri)) {
key_uri = partent_uri + segment.key.uri;
}
else if(/^\/.*/.test(key_uri))
{
let mes = this.url.match(/^https?:\/\/[^/]*/);
if(mes && mes.length >= 1)
{
key_uri = mes[0] + segment.key.uri;
}
else
{
key_uri = partent_uri + segment.key.uri;
}
}
await download (key_uri, that.dir, { filename: "aes.key" }).catch(console.error);
}
if(fs.existsSync( aes_path ))
{
try {
let key_ = fs.readFileSync( aes_path );
let iv_ = segment.key.iv != null ? Buffer.from(segment.key.iv.buffer)
:Buffer.from(that.idx.toString(16).padStart(32,'0') ,'hex' );
let cipher = crypto.createDecipheriv((segment.key.method+"-cbc").toLowerCase(), key_, iv_);
cipher.on('error', (error) => {console.log(error)});
let inputData = fs.readFileSync( filpath_dl );
let outputData = Buffer.concat([cipher.update(inputData),cipher.final()]);
fs.writeFileSync(filpath,outputData);
if(fs.existsSync(filpath_dl)) fs.unlinkSync(filpath_dl);
that.then && that.then();
} catch (error) {
console.log(error)
if(fs.existsSync( filpath_dl ))
fs.unlinkSync(filpath_dl);
}
return;
}
}
else
{
fs.renameSync(filpath_dl , filpath);
break;
}
}
if(fs.existsSync(filpath))
{
this.then && this.then();
}
else
{
this.catch && this.catch();
}
}
catch(e)
{
console.log(e);
}
finally
{
_callback();
}
}
}
function queue_callback(that, callback)
{
that.callback(callback);
}
async function startDownload(url, headers = null, nId = null) {
let id = nId == null ? new Date().getTime() : nId;
let dir = path.join(store.get('downloadPath'), '/'+id);
let filesegments = | .mkdirSync(dir, { recursive: true });
}
const response = await got(url, {headers: headers, timeout: httpTimeout, https: {rejectUnauthorized: false}}).catch((error) => { console.log(error) })
if(response == null || response.body == null || response.body == '')
{
return;
}
let parser = new Parser();
parser.push(response.body);
parser.end();
//并发 3 个线程下载
var tsQueues = async.queue(queue_callback, 3);
let count_seg = parser.manifest.segments.length;
let count_downloaded = 0;
var video = {
id:id,
url:url,
dir:dir,
segment_total:count_seg,
segment_downloaded:count_downloaded,
time: dateFormat(new Date(),"yyyy-mm-dd HH:MM:ss"),
status:'初始化...',
isLiving:false,
headers:headers,
videopath:''
};
if(nId == null)
{
mainWindow.webContents.send('task-notify-create',video);
}
globalCond[id] = true;
let segments = parser.manifest.segments;
for (let iSeg = 0; iSeg < segments.length; iSeg++) {
let qo = new QueueObject();
qo.dir = dir;
qo.idx = iSeg;
qo.id = id;
qo.url = url;
qo.headers = headers;
qo.segment = segments[iSeg];
qo.then = function(){
count_downloaded = count_downloaded + 1
video.segment_downloaded = count_downloaded;
video.status = `下载中...${count_downloaded}/${count_seg}`
mainWindow.webContents.send('task-notify-update',video);
};
tsQueues.push(qo);
}
tsQueues.drain(()=>{
console.log('download success');
video.status = "已完成,合并中..."
mainWindow.webContents.send('task-notify-end',video);
let indexData = '';
for (let iSeg = 0; iSeg < segments.length; iSeg++) {
let filpath = path.join(dir, `${ ((iSeg + 1) +'').padStart(6,'0') }.ts`);
indexData += `file '${filpath}'\r\n`;
filesegments.push(filpath);
}
fs.writeFileSync(path.join(dir,'index.txt'),indexData);
let outPathMP4 = path.join(dir,id+'.mp4');
if(fs.existsSync(ffmpegPath))
{
ffmpeg()
.input(`${path.join(dir,'index.txt')}`)
.inputOptions(['-f concat', '-safe 0'])
.outputOptions('-c copy')
.output(`${outPathMP4}`)
.on('start', function (commandLine) {
console.log('Spawned Ffmpeg with command: ' + commandLine)
})
.on('codecData', function (data) {
console.log('Input is ' + data.audio + ' audio ' + 'with ' + data.video + ' video')
})
.on('progress', function (progress) {
console.log(progress.percent)
})
.on('error', function (err, stdout, stderr) {
console.log('Cannot process video: ' + err.message)
video.videopath = outPathMP4;
video.status = "合成失败,可能是非标准加密视频源,请联系客服定制。"
mainWindow.webContents.send('task-notify-end',video);
})
.on('end', function (stdout, stderr) {
video.videopath = outPathMP4;
video.status = "已完成"
mainWindow.webContents.send('task-notify-end',video);
let index_path = path.join(dir,'index.txt');
let key_path = path.join(dir,'aes.key');
if(fs.existsSync(index_path))
{
fs.unlinkSync(index_path);
}
if(fs.existsSync(key_path))
{
fs.unlinkSync(key_path);
}
filesegments.forEach(fileseg=>{
if(fs.existsSync(fileseg))
{
fs.unlinkSync(fileseg);
}
});
})
.run()
configVideos.push(video);
}else{
video.videopath = outPathMP4;
video.status = "已完成,未发现本地FFMPEG,不进行合成。"
mainWindow.webContents.send('task-notify-end',video);
}
});
console.log("drain over");
}
function startDownloadLocal(filepath, nId = null) {
let id = nId == null ? new Date().getTime() : nId;
let dir = path.join(store.get('downloadPath'), '/'+id);
let filesegments = [];
if(!fs.existsSync(dir))
{
fs.mkdirSync(dir, { recursive: true });
}
try{
const response = fs.readFileSync(filepath, 'utf-8')
if(response == '')
{
return;
}
let parser = new Parser();
parser.push(response);
parser.end();
//并发 3 个线程下载
var tsQueues = async.queue(queue_callback, 3);
let count_seg = parser.manifest.segments.length;
let count_downloaded = 0;
var video = {
id:id,
url:filepath,
dir:dir,
segment_total:count_seg,
segment_downloaded:count_downloaded,
time: dateFormat(new Date(),"yyyy-mm-dd HH:MM:ss"),
status:'初始化...',
isLiving:false,
videopath:''
};
if(nId == null)
{
mainWindow.webContents.send('task-notify-create',video);
}
globalCond[id] = true;
let segments = parser.manifest.segments;
for (let iSeg = 0; iSeg < segments.length; iSeg++) {
let qo = new QueueObject();
qo.dir = dir;
qo.idx = iSeg;
qo.id = id;
qo.url = filepath;
qo.segment = segments[iSeg];
qo.then = function(){
count_downloaded = count_downloaded + 1
video.segment_downloaded = count_downloaded;
video.status = `下载中...${count_downloaded}/${count_seg}`
mainWindow.webContents.send('task-notify-update',video);
};
tsQueues.push(qo);
}
tsQueues.drain(()=>{
console.log('download success');
video.status = "已完成,合并中..."
mainWindow.webContents.send('task-notify-end',video);
let indexData = '';
for (let iSeg = 0; iSeg < segments.length; iSeg++) {
let filpath = path.join(dir, `${ ((iSeg + 1) +'').padStart(6,'0') }.ts`);
indexData += `file '${filpath}'\r\n`;
filesegments.push(filpath);
}
fs.writeFileSync(path.join(dir,'index.txt'),indexData);
let outPathMP4 = path.join(dir,id+'.mp4');
if(fs.existsSync(ffmpegPath))
{
ffmpeg()
.input(`${path.join(dir,'index.txt')}`)
.inputOptions(['-f concat', '-safe 0'])
.outputOptions('-c copy')
.output(`${outPathMP4}`)
.on('start', function (commandLine) {
console.log('Spawned Ffmpeg with command: ' + commandLine)
})
.on('codecData', function (data) {
console.log('Input is ' + data.audio + ' audio ' + 'with ' + data.video + ' video')
})
.on('progress', function (progress) {
console.log(progress.percent)
})
.on('error', function (err, stdout, stderr) {
console.log('Cannot process video: ' + err.message)
video.videopath = outPathMP4;
video.status = "合成失败,可能是非标准加密视频源,请联系客服定制。"
mainWindow.webContents.send('task-notify-end',video);
})
.on('end', function (stdout, stderr) {
video.videopath = outPathMP4;
video.status = "已完成"
mainWindow.webContents.send('task-notify-end',video);
let index_path = path.join(dir,'index.txt');
let key_path = path.join(dir,'aes.key');
if(fs.existsSync(index_path))
{
fs.unlinkSync(index_path);
}
if(fs.existsSync(key_path))
{
fs.unlinkSync(key_path);
}
filesegments.forEach(fileseg=>{
if(fs.existsSync(fileseg))
{
fs.unlinkSync(fileseg);
}
});
})
.run()
configVideos.push(video);
}else{
video.videopath = outPathMP4;
video.status = "已完成,未发现本地FFMPEG,不进行合成。"
mainWindow.webContents.send('task-notify-end',video);
}
});
console.log("drain over");
}catch(error) {
console.log(error)
}
} | [];
if(!fs.existsSync(dir))
{
fs | conditional_block |
state.go | package views
import (
"errors"
"regexp"
"strings"
"sync"
"sync/atomic"
)
// excessStops is a regexp for matching more than one fullstops in the state address which then gets replaced into a single fullstop
var excessStops = regexp.MustCompile(`.\+`)
// ErrStateNotFound is returned when the state address is inaccurate and a state was not found in the path
var ErrStateNotFound = errors.New("State Not Found")
// ErrInvalidStateAddr is returned when a wrong length or format state address is found
var ErrInvalidStateAddr = errors.New("State Not Found")
// StateResponse defines the function type used by state in response to a state call
type StateResponse func()
//StateValidator defines a function type used in the state validator process
type StateValidator func(string, string) bool
// States represent the interface defining a state type
type States interface {
Active() bool
// Tag() string
Engine() *StateEngine
Activate()
Deactivate()
UseActivator(StateResponse) States
UseDeactivator(StateResponse) States
OverrideValidator(StateValidator) States
acceptable(string, string) bool
}
// State represents a single state of with a specific tag and address
// where the address is a single piece 'home' item in the '.home.files' style state address
type State struct {
// atomic bit used to indicate active state or inactive state
active int64
// tag represent the identifier key used in a super-state
// tag string
// activator and deactivator provide actions to occur when the state is set to be active
activator StateResponse
deactivator StateResponse
// validator represents an option argument which also takes part in the validation process of a state in validating its rightness/wrongness from a giving state address
optionalValidator StateValidator
//internal engine that allows sub-states from a root state
engine *StateEngine
// the parent state this is connected to
// parent States
vo, ro, do sync.Mutex
}
// NewState builds a new state with a tag and single address point .eg home or files ..etc
func NewState() *State {
ns := State{}
ns.engine = BuildStateEngine(&ns)
return &ns
}
// Active returns true/false if this state is active
func (s *State) Active() bool {
return atomic.LoadInt64(&s.active) == 1
}
// Engine returns the internal nested StateEngine
func (s *State) Engine() *StateEngine {
return s.engine
}
// UseDeactivator assigns the state a new deactivate respone handler
func (s *State) UseDeactivator(so StateResponse) States {
s.do.Lock()
s.deactivator = so
s.do.Unlock()
return s
}
// UseActivator assigns the state a new active respone handler
func (s *State) UseActivator(so StateResponse) States {
s.ro.Lock()
s.activator = so
s.ro.Unlock()
return s
}
// OverrideValidator assigns an validator to perform custom matching of the state
func (s *State) OverrideValidator(so StateValidator) States {
s.vo.Lock()
s.optionalValidator = so
s.vo.Unlock()
return s
}
// Activate activates the state
func (s *State) Activate() {
if s.active > 1 {
return
}
atomic.StoreInt64(&s.active, 1)
subs := s.engine.diffOnlySubs()
//activate all the subroot states first so they can
//do any population they want
for _, ko := range subs {
ko.Activate()
}
s.ro.Lock()
if s.activator != nil {
s.activator()
}
s.ro.Unlock()
}
// Deactivate deactivates the state
func (s *State) Deactivate() {
if s.active < 1 {
return
}
atomic.StoreInt64(&s.active, 0)
s.do.Lock()
if s.deactivator != nil {
s.deactivator()
}
s.do.Unlock()
}
// acceptable checks if the state matches the current point
func (s *State) acceptable(addr string, point string) bool {
if s.optionalValidator == nil {
if addr == point {
return true
}
return false
}
s.vo.Lock()
state := s.optionalValidator(addr, point)
s.vo.Unlock()
return state
}
// StateEngine represents the engine that handles the state machine based operations for state-address based states
type StateEngine struct {
rw sync.RWMutex
states map[States]string
owner States
curr States
}
// NewStateEngine returns a new engine with a default empty state
func NewStateEngine() *StateEngine {
return BuildStateEngine(nil)
}
// BuildStateEngine returns a new StateEngine instance set with a particular state as its owner
func BuildStateEngine(s States) *StateEngine {
es := StateEngine{
states: make(map[States]string),
owner: s,
}
return &es
}
// AddState adds a new state into the engine with the tag used to identify the state, if the address is a empty string then the address recieves the tag as its value, remember the address is a single address point .eg home or files and not the length of the extend address eg .root.home.files
func (se *StateEngine) AddState(addr string) States {
sa := NewState()
se.add(addr, sa)
return sa
}
// UseState adds a state into the StateEngine with a specific tag, the state address point is still used in matching against it
func (se *StateEngine) UseState(addr string, s States) States {
if addr == "" {
addr = "."
}
se.add(addr, s)
return s
}
// ShallowState returns the current state of the engine and not the final state i.e with a state address of '.home.files' from its root, it will return State(:home) object
func (se *StateEngine) ShallowState() States {
if se.curr == nil {
return nil
}
return se.curr
}
// State returns the current last state of the engine with respect to any nested state that is with the state address of '.home.files', it will return State(:files) object
func (se *StateEngine) | () States {
co := se.curr
if co == nil {
// return se.owner
return nil
}
return co.Engine().State()
}
// Partial renders the partial of the last state of the state address
func (se *StateEngine) Partial(addr string) error {
points, err := se.prepare(addr)
if err != nil {
return err
}
return se.trajectory(points, true)
}
// All renders the partial of the last state of the state address
func (se *StateEngine) All(addr string) error {
points, err := se.prepare(addr)
if err != nil {
return err
}
return se.trajectory(points, false)
}
// DeactivateAll deactivates all states connected to this engine
func (se *StateEngine) DeactivateAll() {
se.eachState(func(so States, tag string, _ func()) {
so.Deactivate()
})
}
func (se *StateEngine) eachState(fx func(States, string, func())) {
if fx == nil {
return
}
se.rw.RLock()
defer se.rw.RUnlock()
var stop bool
for so, addr := range se.states {
if stop {
break
}
fx(so, addr, func() {
stop = true
})
}
}
func (se *StateEngine) getAddr(s States) string {
se.rw.RLock()
defer se.rw.RUnlock()
return se.states[s]
}
func (se *StateEngine) get(addr string) States {
se.rw.RLock()
defer se.rw.RUnlock()
for sm, ao := range se.states {
if ao != addr {
continue
}
return sm
}
return nil
}
func (se *StateEngine) add(addr string, s States) {
se.rw.RLock()
_, ok := se.states[s]
se.rw.RUnlock()
if ok {
return
}
se.rw.Lock()
se.states[s] = addr
se.rw.Unlock()
}
// trajectory is the real engine which checks the path and passes down the StateStat to the sub-states and determines wether its a full view or partial view
func (se *StateEngine) trajectory(points []string, partial bool) error {
subs, nosubs := se.diffSubs()
//are we out of points to walk through? if so then fire acive and tell others to be inactive
if len(points) < 1 {
//deactivate all the non-subroot states
for _, ko := range nosubs {
ko.Deactivate()
}
//if the engine has a root state activate it since in doing so,it will activate its own children else manually activate the children
if se.owner != nil {
se.owner.Activate()
} else {
//activate all the subroot states first so they can
//do be ready for the root. We call this here incase the StateEngine has no root state
for _, ko := range subs {
ko.Activate()
}
}
return nil
}
//cache the first point so we dont loose it
point := points[0]
var state = se.get(point)
if state == nil {
// for _, ko := range nosubs {
// if sko.acceptable(se.getAddr(ko), point, so) {
// state = ko
// break
// }
// }
//
// if state == nil {
return ErrStateNotFound
// }
}
//set this state as the current active state
se.curr = state
//shift the list one more bit for the points
points = points[1:]
//we pass down the points since that will handle the loadup downwards
err := state.Engine().trajectory(points, partial)
if err != nil {
return err
}
if !partial {
// //activate all the subroot states first so they can
// //do any population they want
// for _, ko := range subs {
// ko.Activate(so)
// }
if se.owner != nil {
se.owner.Activate()
}
}
return nil
}
// preparePoints prepares the state address into a list of walk points
func (se *StateEngine) prepare(addr string) ([]string, error) {
var points []string
var polen int
if addr != "." {
addr = excessStops.ReplaceAllString(addr, ".")
points = strings.Split(addr, ".")
polen = len(points)
} else {
polen = 1
points = []string{""}
}
//check if the length is below 1 then return appropriately
if polen < 1 {
return nil, ErrInvalidStateAddr
}
//if the first is an empty string, meaning the '.' root was supplied, then we shift so we just start from the first state point else we ignore and use the list as-is.
if points[0] == "" {
points = points[1:]
}
return points, nil
}
// diffOnlyNotSubs returns all states with a '.' root state address
func (se *StateEngine) diffOnlySubs() []States {
var subs []States
se.eachState(func(so States, addr string, _ func()) {
if addr == "." {
subs = append(subs, so)
}
})
return subs
}
// diffOnlyNotSubs returns all states not with a '.' root state address
func (se *StateEngine) diffOnlyNotSubs() []States {
var subs []States
se.eachState(func(so States, addr string, _ func()) {
if addr != "." {
subs = append(subs, so)
}
})
return subs
}
func (se *StateEngine) diffSubs() ([]States, []States) {
var nosubs, subs []States
se.eachState(func(so States, addr string, _ func()) {
if addr == "." {
subs = append(subs, so)
} else {
nosubs = append(nosubs, so)
}
})
return subs, nosubs
}
| State | identifier_name |
state.go | package views
import (
"errors"
"regexp"
"strings"
"sync"
"sync/atomic"
)
// excessStops is a regexp for matching more than one fullstops in the state address which then gets replaced into a single fullstop
var excessStops = regexp.MustCompile(`.\+`)
// ErrStateNotFound is returned when the state address is inaccurate and a state was not found in the path
var ErrStateNotFound = errors.New("State Not Found")
// ErrInvalidStateAddr is returned when a wrong length or format state address is found
var ErrInvalidStateAddr = errors.New("State Not Found")
// StateResponse defines the function type used by state in response to a state call
type StateResponse func()
//StateValidator defines a function type used in the state validator process
type StateValidator func(string, string) bool
// States represent the interface defining a state type
type States interface {
Active() bool
// Tag() string
Engine() *StateEngine
Activate()
Deactivate()
UseActivator(StateResponse) States
UseDeactivator(StateResponse) States
OverrideValidator(StateValidator) States
acceptable(string, string) bool
}
// State represents a single state of with a specific tag and address
// where the address is a single piece 'home' item in the '.home.files' style state address
type State struct {
// atomic bit used to indicate active state or inactive state
active int64
// tag represent the identifier key used in a super-state
// tag string
// activator and deactivator provide actions to occur when the state is set to be active
activator StateResponse
deactivator StateResponse
// validator represents an option argument which also takes part in the validation process of a state in validating its rightness/wrongness from a giving state address
optionalValidator StateValidator
//internal engine that allows sub-states from a root state
engine *StateEngine
// the parent state this is connected to
// parent States
vo, ro, do sync.Mutex
}
// NewState builds a new state with a tag and single address point .eg home or files ..etc
func NewState() *State {
ns := State{}
ns.engine = BuildStateEngine(&ns)
return &ns
}
// Active returns true/false if this state is active
func (s *State) Active() bool {
return atomic.LoadInt64(&s.active) == 1
}
// Engine returns the internal nested StateEngine
func (s *State) Engine() *StateEngine {
return s.engine
}
// UseDeactivator assigns the state a new deactivate respone handler
func (s *State) UseDeactivator(so StateResponse) States {
s.do.Lock()
s.deactivator = so
s.do.Unlock()
return s
}
// UseActivator assigns the state a new active respone handler
func (s *State) UseActivator(so StateResponse) States {
s.ro.Lock()
s.activator = so
s.ro.Unlock()
return s
}
// OverrideValidator assigns an validator to perform custom matching of the state
func (s *State) OverrideValidator(so StateValidator) States {
s.vo.Lock()
s.optionalValidator = so
s.vo.Unlock()
return s
}
// Activate activates the state
func (s *State) Activate() {
if s.active > 1 {
return
}
atomic.StoreInt64(&s.active, 1)
subs := s.engine.diffOnlySubs()
//activate all the subroot states first so they can
//do any population they want
for _, ko := range subs {
ko.Activate()
}
s.ro.Lock()
if s.activator != nil {
s.activator()
}
s.ro.Unlock()
}
// Deactivate deactivates the state
func (s *State) Deactivate() {
if s.active < 1 |
atomic.StoreInt64(&s.active, 0)
s.do.Lock()
if s.deactivator != nil {
s.deactivator()
}
s.do.Unlock()
}
// acceptable checks if the state matches the current point
func (s *State) acceptable(addr string, point string) bool {
if s.optionalValidator == nil {
if addr == point {
return true
}
return false
}
s.vo.Lock()
state := s.optionalValidator(addr, point)
s.vo.Unlock()
return state
}
// StateEngine represents the engine that handles the state machine based operations for state-address based states
type StateEngine struct {
rw sync.RWMutex
states map[States]string
owner States
curr States
}
// NewStateEngine returns a new engine with a default empty state
func NewStateEngine() *StateEngine {
return BuildStateEngine(nil)
}
// BuildStateEngine returns a new StateEngine instance set with a particular state as its owner
func BuildStateEngine(s States) *StateEngine {
es := StateEngine{
states: make(map[States]string),
owner: s,
}
return &es
}
// AddState adds a new state into the engine with the tag used to identify the state, if the address is a empty string then the address recieves the tag as its value, remember the address is a single address point .eg home or files and not the length of the extend address eg .root.home.files
func (se *StateEngine) AddState(addr string) States {
sa := NewState()
se.add(addr, sa)
return sa
}
// UseState adds a state into the StateEngine with a specific tag, the state address point is still used in matching against it
func (se *StateEngine) UseState(addr string, s States) States {
if addr == "" {
addr = "."
}
se.add(addr, s)
return s
}
// ShallowState returns the current state of the engine and not the final state i.e with a state address of '.home.files' from its root, it will return State(:home) object
func (se *StateEngine) ShallowState() States {
if se.curr == nil {
return nil
}
return se.curr
}
// State returns the current last state of the engine with respect to any nested state that is with the state address of '.home.files', it will return State(:files) object
func (se *StateEngine) State() States {
co := se.curr
if co == nil {
// return se.owner
return nil
}
return co.Engine().State()
}
// Partial renders the partial of the last state of the state address
func (se *StateEngine) Partial(addr string) error {
points, err := se.prepare(addr)
if err != nil {
return err
}
return se.trajectory(points, true)
}
// All renders the partial of the last state of the state address
func (se *StateEngine) All(addr string) error {
points, err := se.prepare(addr)
if err != nil {
return err
}
return se.trajectory(points, false)
}
// DeactivateAll deactivates all states connected to this engine
func (se *StateEngine) DeactivateAll() {
se.eachState(func(so States, tag string, _ func()) {
so.Deactivate()
})
}
func (se *StateEngine) eachState(fx func(States, string, func())) {
if fx == nil {
return
}
se.rw.RLock()
defer se.rw.RUnlock()
var stop bool
for so, addr := range se.states {
if stop {
break
}
fx(so, addr, func() {
stop = true
})
}
}
func (se *StateEngine) getAddr(s States) string {
se.rw.RLock()
defer se.rw.RUnlock()
return se.states[s]
}
func (se *StateEngine) get(addr string) States {
se.rw.RLock()
defer se.rw.RUnlock()
for sm, ao := range se.states {
if ao != addr {
continue
}
return sm
}
return nil
}
func (se *StateEngine) add(addr string, s States) {
se.rw.RLock()
_, ok := se.states[s]
se.rw.RUnlock()
if ok {
return
}
se.rw.Lock()
se.states[s] = addr
se.rw.Unlock()
}
// trajectory is the real engine which checks the path and passes down the StateStat to the sub-states and determines wether its a full view or partial view
func (se *StateEngine) trajectory(points []string, partial bool) error {
subs, nosubs := se.diffSubs()
//are we out of points to walk through? if so then fire acive and tell others to be inactive
if len(points) < 1 {
//deactivate all the non-subroot states
for _, ko := range nosubs {
ko.Deactivate()
}
//if the engine has a root state activate it since in doing so,it will activate its own children else manually activate the children
if se.owner != nil {
se.owner.Activate()
} else {
//activate all the subroot states first so they can
//do be ready for the root. We call this here incase the StateEngine has no root state
for _, ko := range subs {
ko.Activate()
}
}
return nil
}
//cache the first point so we dont loose it
point := points[0]
var state = se.get(point)
if state == nil {
// for _, ko := range nosubs {
// if sko.acceptable(se.getAddr(ko), point, so) {
// state = ko
// break
// }
// }
//
// if state == nil {
return ErrStateNotFound
// }
}
//set this state as the current active state
se.curr = state
//shift the list one more bit for the points
points = points[1:]
//we pass down the points since that will handle the loadup downwards
err := state.Engine().trajectory(points, partial)
if err != nil {
return err
}
if !partial {
// //activate all the subroot states first so they can
// //do any population they want
// for _, ko := range subs {
// ko.Activate(so)
// }
if se.owner != nil {
se.owner.Activate()
}
}
return nil
}
// preparePoints prepares the state address into a list of walk points
func (se *StateEngine) prepare(addr string) ([]string, error) {
var points []string
var polen int
if addr != "." {
addr = excessStops.ReplaceAllString(addr, ".")
points = strings.Split(addr, ".")
polen = len(points)
} else {
polen = 1
points = []string{""}
}
//check if the length is below 1 then return appropriately
if polen < 1 {
return nil, ErrInvalidStateAddr
}
//if the first is an empty string, meaning the '.' root was supplied, then we shift so we just start from the first state point else we ignore and use the list as-is.
if points[0] == "" {
points = points[1:]
}
return points, nil
}
// diffOnlyNotSubs returns all states with a '.' root state address
func (se *StateEngine) diffOnlySubs() []States {
var subs []States
se.eachState(func(so States, addr string, _ func()) {
if addr == "." {
subs = append(subs, so)
}
})
return subs
}
// diffOnlyNotSubs returns all states not with a '.' root state address
func (se *StateEngine) diffOnlyNotSubs() []States {
var subs []States
se.eachState(func(so States, addr string, _ func()) {
if addr != "." {
subs = append(subs, so)
}
})
return subs
}
func (se *StateEngine) diffSubs() ([]States, []States) {
var nosubs, subs []States
se.eachState(func(so States, addr string, _ func()) {
if addr == "." {
subs = append(subs, so)
} else {
nosubs = append(nosubs, so)
}
})
return subs, nosubs
}
| {
return
} | conditional_block |
state.go | package views
import (
"errors"
"regexp"
"strings"
"sync"
"sync/atomic"
)
// excessStops is a regexp for matching more than one fullstops in the state address which then gets replaced into a single fullstop
var excessStops = regexp.MustCompile(`.\+`)
// ErrStateNotFound is returned when the state address is inaccurate and a state was not found in the path
var ErrStateNotFound = errors.New("State Not Found")
// ErrInvalidStateAddr is returned when a wrong length or format state address is found
var ErrInvalidStateAddr = errors.New("State Not Found")
// StateResponse defines the function type used by state in response to a state call
type StateResponse func()
//StateValidator defines a function type used in the state validator process
type StateValidator func(string, string) bool
// States represent the interface defining a state type
type States interface {
Active() bool
// Tag() string
Engine() *StateEngine
Activate()
Deactivate()
UseActivator(StateResponse) States
UseDeactivator(StateResponse) States
OverrideValidator(StateValidator) States
acceptable(string, string) bool
}
// State represents a single state of with a specific tag and address
// where the address is a single piece 'home' item in the '.home.files' style state address
type State struct {
// atomic bit used to indicate active state or inactive state
active int64
// tag represent the identifier key used in a super-state
// tag string
// activator and deactivator provide actions to occur when the state is set to be active
activator StateResponse
deactivator StateResponse
// validator represents an option argument which also takes part in the validation process of a state in validating its rightness/wrongness from a giving state address
optionalValidator StateValidator
//internal engine that allows sub-states from a root state
engine *StateEngine
// the parent state this is connected to
// parent States
vo, ro, do sync.Mutex
}
// NewState builds a new state with a tag and single address point .eg home or files ..etc
func NewState() *State {
ns := State{}
ns.engine = BuildStateEngine(&ns)
return &ns
}
// Active returns true/false if this state is active
func (s *State) Active() bool {
return atomic.LoadInt64(&s.active) == 1
}
// Engine returns the internal nested StateEngine
func (s *State) Engine() *StateEngine {
return s.engine
}
// UseDeactivator assigns the state a new deactivate respone handler
func (s *State) UseDeactivator(so StateResponse) States |
// UseActivator assigns the state a new active respone handler
func (s *State) UseActivator(so StateResponse) States {
s.ro.Lock()
s.activator = so
s.ro.Unlock()
return s
}
// OverrideValidator assigns an validator to perform custom matching of the state
func (s *State) OverrideValidator(so StateValidator) States {
s.vo.Lock()
s.optionalValidator = so
s.vo.Unlock()
return s
}
// Activate activates the state
func (s *State) Activate() {
if s.active > 1 {
return
}
atomic.StoreInt64(&s.active, 1)
subs := s.engine.diffOnlySubs()
//activate all the subroot states first so they can
//do any population they want
for _, ko := range subs {
ko.Activate()
}
s.ro.Lock()
if s.activator != nil {
s.activator()
}
s.ro.Unlock()
}
// Deactivate deactivates the state
func (s *State) Deactivate() {
if s.active < 1 {
return
}
atomic.StoreInt64(&s.active, 0)
s.do.Lock()
if s.deactivator != nil {
s.deactivator()
}
s.do.Unlock()
}
// acceptable checks if the state matches the current point
func (s *State) acceptable(addr string, point string) bool {
if s.optionalValidator == nil {
if addr == point {
return true
}
return false
}
s.vo.Lock()
state := s.optionalValidator(addr, point)
s.vo.Unlock()
return state
}
// StateEngine represents the engine that handles the state machine based operations for state-address based states
type StateEngine struct {
rw sync.RWMutex
states map[States]string
owner States
curr States
}
// NewStateEngine returns a new engine with a default empty state
func NewStateEngine() *StateEngine {
return BuildStateEngine(nil)
}
// BuildStateEngine returns a new StateEngine instance set with a particular state as its owner
func BuildStateEngine(s States) *StateEngine {
es := StateEngine{
states: make(map[States]string),
owner: s,
}
return &es
}
// AddState adds a new state into the engine with the tag used to identify the state, if the address is a empty string then the address recieves the tag as its value, remember the address is a single address point .eg home or files and not the length of the extend address eg .root.home.files
func (se *StateEngine) AddState(addr string) States {
sa := NewState()
se.add(addr, sa)
return sa
}
// UseState adds a state into the StateEngine with a specific tag, the state address point is still used in matching against it
func (se *StateEngine) UseState(addr string, s States) States {
if addr == "" {
addr = "."
}
se.add(addr, s)
return s
}
// ShallowState returns the current state of the engine and not the final state i.e with a state address of '.home.files' from its root, it will return State(:home) object
func (se *StateEngine) ShallowState() States {
if se.curr == nil {
return nil
}
return se.curr
}
// State returns the current last state of the engine with respect to any nested state that is with the state address of '.home.files', it will return State(:files) object
func (se *StateEngine) State() States {
co := se.curr
if co == nil {
// return se.owner
return nil
}
return co.Engine().State()
}
// Partial renders the partial of the last state of the state address
func (se *StateEngine) Partial(addr string) error {
points, err := se.prepare(addr)
if err != nil {
return err
}
return se.trajectory(points, true)
}
// All renders the partial of the last state of the state address
func (se *StateEngine) All(addr string) error {
points, err := se.prepare(addr)
if err != nil {
return err
}
return se.trajectory(points, false)
}
// DeactivateAll deactivates all states connected to this engine
func (se *StateEngine) DeactivateAll() {
se.eachState(func(so States, tag string, _ func()) {
so.Deactivate()
})
}
func (se *StateEngine) eachState(fx func(States, string, func())) {
if fx == nil {
return
}
se.rw.RLock()
defer se.rw.RUnlock()
var stop bool
for so, addr := range se.states {
if stop {
break
}
fx(so, addr, func() {
stop = true
})
}
}
func (se *StateEngine) getAddr(s States) string {
se.rw.RLock()
defer se.rw.RUnlock()
return se.states[s]
}
func (se *StateEngine) get(addr string) States {
se.rw.RLock()
defer se.rw.RUnlock()
for sm, ao := range se.states {
if ao != addr {
continue
}
return sm
}
return nil
}
func (se *StateEngine) add(addr string, s States) {
se.rw.RLock()
_, ok := se.states[s]
se.rw.RUnlock()
if ok {
return
}
se.rw.Lock()
se.states[s] = addr
se.rw.Unlock()
}
// trajectory is the real engine which checks the path and passes down the StateStat to the sub-states and determines wether its a full view or partial view
func (se *StateEngine) trajectory(points []string, partial bool) error {
subs, nosubs := se.diffSubs()
//are we out of points to walk through? if so then fire acive and tell others to be inactive
if len(points) < 1 {
//deactivate all the non-subroot states
for _, ko := range nosubs {
ko.Deactivate()
}
//if the engine has a root state activate it since in doing so,it will activate its own children else manually activate the children
if se.owner != nil {
se.owner.Activate()
} else {
//activate all the subroot states first so they can
//do be ready for the root. We call this here incase the StateEngine has no root state
for _, ko := range subs {
ko.Activate()
}
}
return nil
}
//cache the first point so we dont loose it
point := points[0]
var state = se.get(point)
if state == nil {
// for _, ko := range nosubs {
// if sko.acceptable(se.getAddr(ko), point, so) {
// state = ko
// break
// }
// }
//
// if state == nil {
return ErrStateNotFound
// }
}
//set this state as the current active state
se.curr = state
//shift the list one more bit for the points
points = points[1:]
//we pass down the points since that will handle the loadup downwards
err := state.Engine().trajectory(points, partial)
if err != nil {
return err
}
if !partial {
// //activate all the subroot states first so they can
// //do any population they want
// for _, ko := range subs {
// ko.Activate(so)
// }
if se.owner != nil {
se.owner.Activate()
}
}
return nil
}
// preparePoints prepares the state address into a list of walk points
func (se *StateEngine) prepare(addr string) ([]string, error) {
var points []string
var polen int
if addr != "." {
addr = excessStops.ReplaceAllString(addr, ".")
points = strings.Split(addr, ".")
polen = len(points)
} else {
polen = 1
points = []string{""}
}
//check if the length is below 1 then return appropriately
if polen < 1 {
return nil, ErrInvalidStateAddr
}
//if the first is an empty string, meaning the '.' root was supplied, then we shift so we just start from the first state point else we ignore and use the list as-is.
if points[0] == "" {
points = points[1:]
}
return points, nil
}
// diffOnlyNotSubs returns all states with a '.' root state address
func (se *StateEngine) diffOnlySubs() []States {
var subs []States
se.eachState(func(so States, addr string, _ func()) {
if addr == "." {
subs = append(subs, so)
}
})
return subs
}
// diffOnlyNotSubs returns all states not with a '.' root state address
func (se *StateEngine) diffOnlyNotSubs() []States {
var subs []States
se.eachState(func(so States, addr string, _ func()) {
if addr != "." {
subs = append(subs, so)
}
})
return subs
}
func (se *StateEngine) diffSubs() ([]States, []States) {
var nosubs, subs []States
se.eachState(func(so States, addr string, _ func()) {
if addr == "." {
subs = append(subs, so)
} else {
nosubs = append(nosubs, so)
}
})
return subs, nosubs
}
| {
s.do.Lock()
s.deactivator = so
s.do.Unlock()
return s
} | identifier_body |
state.go | package views
import (
"errors"
"regexp"
"strings"
"sync"
"sync/atomic"
)
// excessStops is a regexp for matching more than one fullstops in the state address which then gets replaced into a single fullstop
var excessStops = regexp.MustCompile(`.\+`)
// ErrStateNotFound is returned when the state address is inaccurate and a state was not found in the path
var ErrStateNotFound = errors.New("State Not Found")
// ErrInvalidStateAddr is returned when a wrong length or format state address is found
var ErrInvalidStateAddr = errors.New("State Not Found")
// StateResponse defines the function type used by state in response to a state call
type StateResponse func()
//StateValidator defines a function type used in the state validator process
type StateValidator func(string, string) bool
// States represent the interface defining a state type
type States interface {
Active() bool
// Tag() string
Engine() *StateEngine
Activate()
Deactivate()
UseActivator(StateResponse) States
UseDeactivator(StateResponse) States
OverrideValidator(StateValidator) States
acceptable(string, string) bool
}
// State represents a single state of with a specific tag and address
// where the address is a single piece 'home' item in the '.home.files' style state address
type State struct {
// atomic bit used to indicate active state or inactive state
active int64
// tag represent the identifier key used in a super-state
// tag string
// activator and deactivator provide actions to occur when the state is set to be active
activator StateResponse
deactivator StateResponse
// validator represents an option argument which also takes part in the validation process of a state in validating its rightness/wrongness from a giving state address
optionalValidator StateValidator
//internal engine that allows sub-states from a root state
engine *StateEngine
// the parent state this is connected to
// parent States
vo, ro, do sync.Mutex
}
// NewState builds a new state with a tag and single address point .eg home or files ..etc
func NewState() *State {
ns := State{}
ns.engine = BuildStateEngine(&ns)
return &ns
}
// Active returns true/false if this state is active
func (s *State) Active() bool {
return atomic.LoadInt64(&s.active) == 1
}
// Engine returns the internal nested StateEngine
func (s *State) Engine() *StateEngine {
return s.engine
}
// UseDeactivator assigns the state a new deactivate respone handler
func (s *State) UseDeactivator(so StateResponse) States {
s.do.Lock()
s.deactivator = so
s.do.Unlock()
return s
}
// UseActivator assigns the state a new active respone handler
func (s *State) UseActivator(so StateResponse) States {
s.ro.Lock()
s.activator = so
s.ro.Unlock()
return s
}
// OverrideValidator assigns an validator to perform custom matching of the state
func (s *State) OverrideValidator(so StateValidator) States {
s.vo.Lock()
s.optionalValidator = so
s.vo.Unlock()
return s
}
// Activate activates the state
func (s *State) Activate() {
if s.active > 1 {
return
}
atomic.StoreInt64(&s.active, 1)
subs := s.engine.diffOnlySubs()
//activate all the subroot states first so they can
//do any population they want
for _, ko := range subs {
ko.Activate()
}
s.ro.Lock()
if s.activator != nil {
s.activator()
}
s.ro.Unlock()
}
// Deactivate deactivates the state
func (s *State) Deactivate() {
if s.active < 1 {
return
}
atomic.StoreInt64(&s.active, 0)
s.do.Lock()
if s.deactivator != nil {
s.deactivator()
}
s.do.Unlock()
}
// acceptable checks if the state matches the current point
func (s *State) acceptable(addr string, point string) bool {
if s.optionalValidator == nil {
if addr == point {
return true
}
return false
}
s.vo.Lock()
state := s.optionalValidator(addr, point)
s.vo.Unlock()
return state
}
// StateEngine represents the engine that handles the state machine based operations for state-address based states
type StateEngine struct {
rw sync.RWMutex
states map[States]string
owner States
curr States
}
// NewStateEngine returns a new engine with a default empty state
func NewStateEngine() *StateEngine {
return BuildStateEngine(nil)
}
// BuildStateEngine returns a new StateEngine instance set with a particular state as its owner
func BuildStateEngine(s States) *StateEngine {
es := StateEngine{
states: make(map[States]string),
owner: s,
}
return &es
}
// AddState adds a new state into the engine with the tag used to identify the state, if the address is a empty string then the address recieves the tag as its value, remember the address is a single address point .eg home or files and not the length of the extend address eg .root.home.files
func (se *StateEngine) AddState(addr string) States {
sa := NewState()
se.add(addr, sa)
return sa
}
// UseState adds a state into the StateEngine with a specific tag, the state address point is still used in matching against it
func (se *StateEngine) UseState(addr string, s States) States {
if addr == "" {
addr = "."
}
se.add(addr, s)
return s
}
// ShallowState returns the current state of the engine and not the final state i.e with a state address of '.home.files' from its root, it will return State(:home) object
func (se *StateEngine) ShallowState() States {
if se.curr == nil {
return nil
}
return se.curr
}
// State returns the current last state of the engine with respect to any nested state that is with the state address of '.home.files', it will return State(:files) object
func (se *StateEngine) State() States {
co := se.curr
if co == nil {
// return se.owner
return nil
}
return co.Engine().State()
}
// Partial renders the partial of the last state of the state address
func (se *StateEngine) Partial(addr string) error {
points, err := se.prepare(addr)
if err != nil {
return err
}
return se.trajectory(points, true)
}
// All renders the partial of the last state of the state address
func (se *StateEngine) All(addr string) error {
points, err := se.prepare(addr)
if err != nil {
return err
}
return se.trajectory(points, false)
}
// DeactivateAll deactivates all states connected to this engine
func (se *StateEngine) DeactivateAll() {
se.eachState(func(so States, tag string, _ func()) {
so.Deactivate()
})
}
func (se *StateEngine) eachState(fx func(States, string, func())) {
if fx == nil {
return
}
se.rw.RLock()
defer se.rw.RUnlock()
var stop bool
for so, addr := range se.states {
if stop {
break
}
fx(so, addr, func() { | func (se *StateEngine) getAddr(s States) string {
se.rw.RLock()
defer se.rw.RUnlock()
return se.states[s]
}
func (se *StateEngine) get(addr string) States {
se.rw.RLock()
defer se.rw.RUnlock()
for sm, ao := range se.states {
if ao != addr {
continue
}
return sm
}
return nil
}
func (se *StateEngine) add(addr string, s States) {
se.rw.RLock()
_, ok := se.states[s]
se.rw.RUnlock()
if ok {
return
}
se.rw.Lock()
se.states[s] = addr
se.rw.Unlock()
}
// trajectory is the real engine which checks the path and passes down the StateStat to the sub-states and determines wether its a full view or partial view
func (se *StateEngine) trajectory(points []string, partial bool) error {
subs, nosubs := se.diffSubs()
//are we out of points to walk through? if so then fire acive and tell others to be inactive
if len(points) < 1 {
//deactivate all the non-subroot states
for _, ko := range nosubs {
ko.Deactivate()
}
//if the engine has a root state activate it since in doing so,it will activate its own children else manually activate the children
if se.owner != nil {
se.owner.Activate()
} else {
//activate all the subroot states first so they can
//do be ready for the root. We call this here incase the StateEngine has no root state
for _, ko := range subs {
ko.Activate()
}
}
return nil
}
//cache the first point so we dont loose it
point := points[0]
var state = se.get(point)
if state == nil {
// for _, ko := range nosubs {
// if sko.acceptable(se.getAddr(ko), point, so) {
// state = ko
// break
// }
// }
//
// if state == nil {
return ErrStateNotFound
// }
}
//set this state as the current active state
se.curr = state
//shift the list one more bit for the points
points = points[1:]
//we pass down the points since that will handle the loadup downwards
err := state.Engine().trajectory(points, partial)
if err != nil {
return err
}
if !partial {
// //activate all the subroot states first so they can
// //do any population they want
// for _, ko := range subs {
// ko.Activate(so)
// }
if se.owner != nil {
se.owner.Activate()
}
}
return nil
}
// preparePoints prepares the state address into a list of walk points
func (se *StateEngine) prepare(addr string) ([]string, error) {
var points []string
var polen int
if addr != "." {
addr = excessStops.ReplaceAllString(addr, ".")
points = strings.Split(addr, ".")
polen = len(points)
} else {
polen = 1
points = []string{""}
}
//check if the length is below 1 then return appropriately
if polen < 1 {
return nil, ErrInvalidStateAddr
}
//if the first is an empty string, meaning the '.' root was supplied, then we shift so we just start from the first state point else we ignore and use the list as-is.
if points[0] == "" {
points = points[1:]
}
return points, nil
}
// diffOnlyNotSubs returns all states with a '.' root state address
func (se *StateEngine) diffOnlySubs() []States {
var subs []States
se.eachState(func(so States, addr string, _ func()) {
if addr == "." {
subs = append(subs, so)
}
})
return subs
}
// diffOnlyNotSubs returns all states not with a '.' root state address
func (se *StateEngine) diffOnlyNotSubs() []States {
var subs []States
se.eachState(func(so States, addr string, _ func()) {
if addr != "." {
subs = append(subs, so)
}
})
return subs
}
func (se *StateEngine) diffSubs() ([]States, []States) {
var nosubs, subs []States
se.eachState(func(so States, addr string, _ func()) {
if addr == "." {
subs = append(subs, so)
} else {
nosubs = append(nosubs, so)
}
})
return subs, nosubs
} | stop = true
})
}
}
| random_line_split |
mpsc.rs | //! A multi-producer, single-consumer, futures-aware, FIFO queue with back
//! pressure, for use communicating between tasks on the same thread.
//!
//! These queues are the same as those in `futures::sync`, except they're not
//! intended to be sent across threads.
use std::any::Any;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::error::Error;
use std::fmt;
use std::mem;
use std::rc::{Rc, Weak};
use task::{self, Task};
use future::Executor;
use sink::SendAll;
use resultstream::{self, Results};
use unsync::oneshot;
use {Async, AsyncSink, Future, Poll, StartSend, Sink, Stream};
/// Creates a bounded in-memory channel with buffered storage.
///
/// This method creates concrete implementations of the `Stream` and `Sink`
/// traits which can be used to communicate a stream of values between tasks
/// with backpressure. The channel capacity is exactly `buffer`. On average,
/// sending a message through this channel performs no dynamic allocation.
pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) {
channel_(Some(buffer))
}
fn channel_<T>(buffer: Option<usize>) -> (Sender<T>, Receiver<T>) {
let shared = Rc::new(RefCell::new(Shared {
buffer: VecDeque::new(),
capacity: buffer,
blocked_senders: VecDeque::new(),
blocked_recv: None,
}));
let sender = Sender { shared: Rc::downgrade(&shared) };
let receiver = Receiver { state: State::Open(shared) };
(sender, receiver)
}
#[derive(Debug)]
struct Shared<T> {
buffer: VecDeque<T>,
capacity: Option<usize>,
blocked_senders: VecDeque<Task>,
blocked_recv: Option<Task>,
}
/// The transmission end of a channel.
///
/// This is created by the `channel` function.
#[derive(Debug)]
pub struct Sender<T> {
shared: Weak<RefCell<Shared<T>>>,
}
impl<T> Sender<T> {
fn do_send(&self, msg: T) -> StartSend<T, SendError<T>> {
let shared = match self.shared.upgrade() {
Some(shared) => shared,
None => return Err(SendError(msg)), // receiver was dropped
};
let mut shared = shared.borrow_mut();
match shared.capacity {
Some(capacity) if shared.buffer.len() == capacity => {
shared.blocked_senders.push_back(task::current());
Ok(AsyncSink::NotReady(msg))
}
_ => {
shared.buffer.push_back(msg);
if let Some(task) = shared.blocked_recv.take() {
task.notify();
}
Ok(AsyncSink::Ready)
}
}
}
}
impl<T> Clone for Sender<T> {
fn clone(&self) -> Self {
Sender { shared: self.shared.clone() }
}
}
impl<T> Sink for Sender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn | (&mut self, msg: T) -> StartSend<T, SendError<T>> {
self.do_send(msg)
}
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
}
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
let shared = match self.shared.upgrade() {
Some(shared) => shared,
None => return,
};
// The number of existing `Weak` indicates if we are possibly the last
// `Sender`. If we are the last, we possibly must notify a blocked
// `Receiver`. `self.shared` is always one of the `Weak` to this shared
// data. Therefore the smallest possible Rc::weak_count(&shared) is 1.
if Rc::weak_count(&shared) == 1 {
if let Some(task) = shared.borrow_mut().blocked_recv.take() {
// Wake up receiver as its stream has ended
task.notify();
}
}
}
}
/// The receiving end of a channel which implements the `Stream` trait.
///
/// This is created by the `channel` function.
#[derive(Debug)]
pub struct Receiver<T> {
state: State<T>,
}
/// Possible states of a receiver. We're either Open (can receive more messages)
/// or we're closed with a list of messages we have left to receive.
#[derive(Debug)]
enum State<T> {
Open(Rc<RefCell<Shared<T>>>),
Closed(VecDeque<T>),
}
impl<T> Receiver<T> {
/// Closes the receiving half
///
/// This prevents any further messages from being sent on the channel while
/// still enabling the receiver to drain messages that are buffered.
pub fn close(&mut self) {
let (blockers, items) = match self.state {
State::Open(ref state) => {
let mut state = state.borrow_mut();
let items = mem::replace(&mut state.buffer, VecDeque::new());
let blockers = mem::replace(&mut state.blocked_senders, VecDeque::new());
(blockers, items)
}
State::Closed(_) => return,
};
self.state = State::Closed(items);
for task in blockers {
task.notify();
}
}
}
impl<T> Stream for Receiver<T> {
type Item = T;
type Error = ();
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
let me = match self.state {
State::Open(ref mut me) => me,
State::Closed(ref mut items) => {
return Ok(Async::Ready(items.pop_front()))
}
};
if let Some(shared) = Rc::get_mut(me) {
// All senders have been dropped, so drain the buffer and end the
// stream.
return Ok(Async::Ready(shared.borrow_mut().buffer.pop_front()));
}
let mut shared = me.borrow_mut();
if let Some(msg) = shared.buffer.pop_front() {
if let Some(task) = shared.blocked_senders.pop_front() {
drop(shared);
task.notify();
}
Ok(Async::Ready(Some(msg)))
} else {
shared.blocked_recv = Some(task::current());
Ok(Async::NotReady)
}
}
}
impl<T> Drop for Receiver<T> {
fn drop(&mut self) {
self.close();
}
}
/// The transmission end of an unbounded channel.
///
/// This is created by the `unbounded` function.
#[derive(Debug)]
pub struct UnboundedSender<T>(Sender<T>);
impl<T> Clone for UnboundedSender<T> {
fn clone(&self) -> Self {
UnboundedSender(self.0.clone())
}
}
impl<T> Sink for UnboundedSender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
self.0.start_send(msg)
}
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
}
impl<'a, T> Sink for &'a UnboundedSender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
self.0.do_send(msg)
}
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
}
impl<T> UnboundedSender<T> {
/// Sends the provided message along this channel.
///
/// This is an unbounded sender, so this function differs from `Sink::send`
/// by ensuring the return type reflects that the channel is always ready to
/// receive messages.
#[deprecated(note = "renamed to `unbounded_send`")]
#[doc(hidden)]
pub fn send(&self, msg: T) -> Result<(), SendError<T>> {
self.unbounded_send(msg)
}
/// Sends the provided message along this channel.
///
/// This is an unbounded sender, so this function differs from `Sink::send`
/// by ensuring the return type reflects that the channel is always ready to
/// receive messages.
pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> {
let shared = match self.0.shared.upgrade() {
Some(shared) => shared,
None => return Err(SendError(msg)),
};
let mut shared = shared.borrow_mut();
shared.buffer.push_back(msg);
if let Some(task) = shared.blocked_recv.take() {
drop(shared);
task.notify();
}
Ok(())
}
}
/// The receiving end of an unbounded channel.
///
/// This is created by the `unbounded` function.
#[derive(Debug)]
pub struct UnboundedReceiver<T>(Receiver<T>);
impl<T> UnboundedReceiver<T> {
/// Closes the receiving half
///
/// This prevents any further messages from being sent on the channel while
/// still enabling the receiver to drain messages that are buffered.
pub fn close(&mut self) {
self.0.close();
}
}
impl<T> Stream for UnboundedReceiver<T> {
type Item = T;
type Error = ();
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
self.0.poll()
}
}
/// Creates an unbounded in-memory channel with buffered storage.
///
/// Identical semantics to `channel`, except with no limit to buffer size.
pub fn unbounded<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) {
let (send, recv) = channel_(None);
(UnboundedSender(send), UnboundedReceiver(recv))
}
/// Error type for sending, used when the receiving end of a channel is
/// dropped
pub struct SendError<T>(T);
impl<T> fmt::Debug for SendError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_tuple("SendError")
.field(&"...")
.finish()
}
}
impl<T> fmt::Display for SendError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "send failed because receiver is gone")
}
}
impl<T: Any> Error for SendError<T> {
fn description(&self) -> &str {
"send failed because receiver is gone"
}
}
impl<T> SendError<T> {
/// Returns the message that was attempted to be sent but failed.
pub fn into_inner(self) -> T {
self.0
}
}
/// Handle returned from the `spawn` function.
///
/// This handle is a stream that proxies a stream on a separate `Executor`.
/// Created through the `mpsc::spawn` function, this handle will produce
/// the same values as the proxied stream, as they are produced in the executor,
/// and uses a limited buffer to exert back-pressure on the remote stream.
///
/// If this handle is dropped, then the stream will no longer be polled and is
/// scheduled to be dropped.
pub struct SpawnHandle<Item, Error> {
inner: Receiver<Result<Item, Error>>,
_cancel_tx: oneshot::Sender<()>,
}
/// Type of future which `Executor` instances must be able to execute for `spawn`.
pub struct Execute<S: Stream> {
inner: SendAll<Sender<Result<S::Item, S::Error>>, Results<S, SendError<Result<S::Item, S::Error>>>>,
cancel_rx: oneshot::Receiver<()>,
}
/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
/// returning a handle representing the remote stream.
///
/// The `stream` will be canceled if the `SpawnHandle` is dropped.
///
/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
/// When `stream` has additional items available, then the `SpawnHandle`
/// will have those same items available.
///
/// At most `buffer + 1` elements will be buffered at a time. If the buffer
/// is full, then `stream` will stop progressing until more space is available.
/// This allows the `SpawnHandle` to exert backpressure on the `stream`.
///
/// # Panics
///
/// This function will panic if `executor` is unable spawn a `Future` containing
/// the entirety of the `stream`.
pub fn spawn<S, E>(stream: S, executor: &E, buffer: usize) -> SpawnHandle<S::Item, S::Error>
where S: Stream,
E: Executor<Execute<S>>
{
let (cancel_tx, cancel_rx) = oneshot::channel();
let (tx, rx) = channel(buffer);
executor.execute(Execute {
inner: tx.send_all(resultstream::new(stream)),
cancel_rx: cancel_rx,
}).expect("failed to spawn stream");
SpawnHandle {
inner: rx,
_cancel_tx: cancel_tx,
}
}
/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
/// returning a handle representing the remote stream, with unbounded buffering.
///
/// The `stream` will be canceled if the `SpawnHandle` is dropped.
///
/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
/// When `stream` has additional items available, then the `SpawnHandle`
/// will have those same items available.
///
/// An unbounded buffer is used, which means that values will be buffered as
/// fast as `stream` can produce them, without any backpressure. Therefore, if
/// `stream` is an infinite stream, it can use an unbounded amount of memory, and
/// potentially hog CPU resources. In particular, if `stream` is infinite
/// and doesn't ever yield (by returning `Async::NotReady` from `poll`), it
/// will result in an infinite loop.
///
/// # Panics
///
/// This function will panic if `executor` is unable spawn a `Future` containing
/// the entirety of the `stream`.
pub fn spawn_unbounded<S,E>(stream: S, executor: &E) -> SpawnHandle<S::Item, S::Error>
where S: Stream,
E: Executor<Execute<S>>
{
let (cancel_tx, cancel_rx) = oneshot::channel();
let (tx, rx) = channel_(None);
executor.execute(Execute {
inner: tx.send_all(resultstream::new(stream)),
cancel_rx: cancel_rx,
}).expect("failed to spawn stream");
SpawnHandle {
inner: rx,
_cancel_tx: cancel_tx,
}
}
impl<I, E> Stream for SpawnHandle<I, E> {
type Item = I;
type Error = E;
fn poll(&mut self) -> Poll<Option<I>, E> {
match self.inner.poll() {
Ok(Async::Ready(Some(Ok(t)))) => Ok(Async::Ready(Some(t.into()))),
Ok(Async::Ready(Some(Err(e)))) => Err(e),
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(_) => unreachable!("mpsc::Receiver should never return Err"),
}
}
}
impl<I, E> fmt::Debug for SpawnHandle<I, E> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SpawnHandle")
.finish()
}
}
impl<S: Stream> Future for Execute<S> {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
match self.cancel_rx.poll() {
Ok(Async::NotReady) => (),
_ => return Ok(Async::Ready(())),
}
match self.inner.poll() {
Ok(Async::NotReady) => Ok(Async::NotReady),
_ => Ok(Async::Ready(()))
}
}
}
impl<S: Stream> fmt::Debug for Execute<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Execute")
.finish()
}
}
| start_send | identifier_name |
mpsc.rs | //! A multi-producer, single-consumer, futures-aware, FIFO queue with back
//! pressure, for use communicating between tasks on the same thread.
//!
//! These queues are the same as those in `futures::sync`, except they're not
//! intended to be sent across threads.
use std::any::Any;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::error::Error;
use std::fmt;
use std::mem;
use std::rc::{Rc, Weak};
use task::{self, Task};
use future::Executor;
use sink::SendAll;
use resultstream::{self, Results};
use unsync::oneshot;
use {Async, AsyncSink, Future, Poll, StartSend, Sink, Stream};
/// Creates a bounded in-memory channel with buffered storage.
///
/// This method creates concrete implementations of the `Stream` and `Sink`
/// traits which can be used to communicate a stream of values between tasks
/// with backpressure. The channel capacity is exactly `buffer`. On average,
/// sending a message through this channel performs no dynamic allocation.
pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) {
channel_(Some(buffer))
}
fn channel_<T>(buffer: Option<usize>) -> (Sender<T>, Receiver<T>) {
let shared = Rc::new(RefCell::new(Shared {
buffer: VecDeque::new(),
capacity: buffer,
blocked_senders: VecDeque::new(),
blocked_recv: None,
}));
let sender = Sender { shared: Rc::downgrade(&shared) };
let receiver = Receiver { state: State::Open(shared) };
(sender, receiver)
}
#[derive(Debug)]
struct Shared<T> {
buffer: VecDeque<T>,
capacity: Option<usize>,
blocked_senders: VecDeque<Task>,
blocked_recv: Option<Task>,
}
/// The transmission end of a channel.
///
/// This is created by the `channel` function.
#[derive(Debug)]
pub struct Sender<T> {
shared: Weak<RefCell<Shared<T>>>,
}
impl<T> Sender<T> {
fn do_send(&self, msg: T) -> StartSend<T, SendError<T>> {
let shared = match self.shared.upgrade() {
Some(shared) => shared,
None => return Err(SendError(msg)), // receiver was dropped
};
let mut shared = shared.borrow_mut();
match shared.capacity {
Some(capacity) if shared.buffer.len() == capacity => {
shared.blocked_senders.push_back(task::current());
Ok(AsyncSink::NotReady(msg))
}
_ => {
shared.buffer.push_back(msg);
if let Some(task) = shared.blocked_recv.take() {
task.notify();
}
Ok(AsyncSink::Ready)
}
}
}
}
impl<T> Clone for Sender<T> {
fn clone(&self) -> Self {
Sender { shared: self.shared.clone() }
}
}
impl<T> Sink for Sender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
self.do_send(msg)
}
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
}
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
let shared = match self.shared.upgrade() {
Some(shared) => shared,
None => return,
};
// The number of existing `Weak` indicates if we are possibly the last
// `Sender`. If we are the last, we possibly must notify a blocked
// `Receiver`. `self.shared` is always one of the `Weak` to this shared
// data. Therefore the smallest possible Rc::weak_count(&shared) is 1.
if Rc::weak_count(&shared) == 1 {
if let Some(task) = shared.borrow_mut().blocked_recv.take() {
// Wake up receiver as its stream has ended
task.notify();
}
}
}
}
/// The receiving end of a channel which implements the `Stream` trait.
///
/// This is created by the `channel` function.
#[derive(Debug)]
pub struct Receiver<T> {
state: State<T>,
}
/// Possible states of a receiver. We're either Open (can receive more messages)
/// or we're closed with a list of messages we have left to receive.
#[derive(Debug)]
enum State<T> {
Open(Rc<RefCell<Shared<T>>>),
Closed(VecDeque<T>),
}
impl<T> Receiver<T> {
/// Closes the receiving half
///
/// This prevents any further messages from being sent on the channel while
/// still enabling the receiver to drain messages that are buffered.
pub fn close(&mut self) {
let (blockers, items) = match self.state {
State::Open(ref state) => {
let mut state = state.borrow_mut();
let items = mem::replace(&mut state.buffer, VecDeque::new());
let blockers = mem::replace(&mut state.blocked_senders, VecDeque::new());
(blockers, items)
}
State::Closed(_) => return,
};
self.state = State::Closed(items);
for task in blockers {
task.notify();
}
}
}
impl<T> Stream for Receiver<T> {
type Item = T;
type Error = ();
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
let me = match self.state {
State::Open(ref mut me) => me,
State::Closed(ref mut items) => {
return Ok(Async::Ready(items.pop_front()))
}
};
if let Some(shared) = Rc::get_mut(me) {
// All senders have been dropped, so drain the buffer and end the
// stream.
return Ok(Async::Ready(shared.borrow_mut().buffer.pop_front()));
}
let mut shared = me.borrow_mut();
if let Some(msg) = shared.buffer.pop_front() {
if let Some(task) = shared.blocked_senders.pop_front() {
drop(shared);
task.notify();
}
Ok(Async::Ready(Some(msg)))
} else {
shared.blocked_recv = Some(task::current());
Ok(Async::NotReady)
}
}
}
impl<T> Drop for Receiver<T> {
fn drop(&mut self) {
self.close();
}
}
/// The transmission end of an unbounded channel.
///
/// This is created by the `unbounded` function.
#[derive(Debug)]
pub struct UnboundedSender<T>(Sender<T>);
impl<T> Clone for UnboundedSender<T> {
fn clone(&self) -> Self {
UnboundedSender(self.0.clone())
}
}
impl<T> Sink for UnboundedSender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
self.0.start_send(msg)
}
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
}
impl<'a, T> Sink for &'a UnboundedSender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
self.0.do_send(msg)
}
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
}
impl<T> UnboundedSender<T> {
/// Sends the provided message along this channel.
///
/// This is an unbounded sender, so this function differs from `Sink::send`
/// by ensuring the return type reflects that the channel is always ready to
/// receive messages.
#[deprecated(note = "renamed to `unbounded_send`")]
#[doc(hidden)]
pub fn send(&self, msg: T) -> Result<(), SendError<T>> {
self.unbounded_send(msg)
}
/// Sends the provided message along this channel.
///
/// This is an unbounded sender, so this function differs from `Sink::send`
/// by ensuring the return type reflects that the channel is always ready to
/// receive messages.
pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> {
let shared = match self.0.shared.upgrade() {
Some(shared) => shared,
None => return Err(SendError(msg)),
};
let mut shared = shared.borrow_mut();
shared.buffer.push_back(msg);
if let Some(task) = shared.blocked_recv.take() {
drop(shared);
task.notify();
}
Ok(())
}
}
/// The receiving end of an unbounded channel.
///
/// This is created by the `unbounded` function.
#[derive(Debug)]
pub struct UnboundedReceiver<T>(Receiver<T>);
impl<T> UnboundedReceiver<T> {
/// Closes the receiving half
///
/// This prevents any further messages from being sent on the channel while
/// still enabling the receiver to drain messages that are buffered.
pub fn close(&mut self) {
self.0.close();
}
}
impl<T> Stream for UnboundedReceiver<T> {
type Item = T;
type Error = ();
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
self.0.poll()
}
}
/// Creates an unbounded in-memory channel with buffered storage.
///
/// Identical semantics to `channel`, except with no limit to buffer size.
pub fn unbounded<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) {
let (send, recv) = channel_(None);
(UnboundedSender(send), UnboundedReceiver(recv))
}
/// Error type for sending, used when the receiving end of a channel is
/// dropped
pub struct SendError<T>(T);
impl<T> fmt::Debug for SendError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_tuple("SendError")
.field(&"...")
.finish()
}
}
impl<T> fmt::Display for SendError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "send failed because receiver is gone")
}
}
impl<T: Any> Error for SendError<T> {
fn description(&self) -> &str {
"send failed because receiver is gone"
}
}
impl<T> SendError<T> {
/// Returns the message that was attempted to be sent but failed.
pub fn into_inner(self) -> T {
self.0
}
}
/// Handle returned from the `spawn` function.
///
/// This handle is a stream that proxies a stream on a separate `Executor`.
/// Created through the `mpsc::spawn` function, this handle will produce
/// the same values as the proxied stream, as they are produced in the executor,
/// and uses a limited buffer to exert back-pressure on the remote stream.
///
/// If this handle is dropped, then the stream will no longer be polled and is
/// scheduled to be dropped.
pub struct SpawnHandle<Item, Error> {
inner: Receiver<Result<Item, Error>>,
_cancel_tx: oneshot::Sender<()>,
}
/// Type of future which `Executor` instances must be able to execute for `spawn`.
pub struct Execute<S: Stream> {
inner: SendAll<Sender<Result<S::Item, S::Error>>, Results<S, SendError<Result<S::Item, S::Error>>>>,
cancel_rx: oneshot::Receiver<()>,
}
/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
/// returning a handle representing the remote stream.
///
/// The `stream` will be canceled if the `SpawnHandle` is dropped.
///
/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
/// When `stream` has additional items available, then the `SpawnHandle`
/// will have those same items available.
///
/// At most `buffer + 1` elements will be buffered at a time. If the buffer
/// is full, then `stream` will stop progressing until more space is available.
/// This allows the `SpawnHandle` to exert backpressure on the `stream`.
///
/// # Panics
///
/// This function will panic if `executor` is unable spawn a `Future` containing
/// the entirety of the `stream`.
pub fn spawn<S, E>(stream: S, executor: &E, buffer: usize) -> SpawnHandle<S::Item, S::Error>
where S: Stream,
E: Executor<Execute<S>>
{
let (cancel_tx, cancel_rx) = oneshot::channel();
let (tx, rx) = channel(buffer);
executor.execute(Execute {
inner: tx.send_all(resultstream::new(stream)),
cancel_rx: cancel_rx,
}).expect("failed to spawn stream");
SpawnHandle {
inner: rx,
_cancel_tx: cancel_tx,
}
}
/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
/// returning a handle representing the remote stream, with unbounded buffering.
///
/// The `stream` will be canceled if the `SpawnHandle` is dropped.
///
/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself. | ///
/// An unbounded buffer is used, which means that values will be buffered as
/// fast as `stream` can produce them, without any backpressure. Therefore, if
/// `stream` is an infinite stream, it can use an unbounded amount of memory, and
/// potentially hog CPU resources. In particular, if `stream` is infinite
/// and doesn't ever yield (by returning `Async::NotReady` from `poll`), it
/// will result in an infinite loop.
///
/// # Panics
///
/// This function will panic if `executor` is unable spawn a `Future` containing
/// the entirety of the `stream`.
pub fn spawn_unbounded<S,E>(stream: S, executor: &E) -> SpawnHandle<S::Item, S::Error>
where S: Stream,
E: Executor<Execute<S>>
{
let (cancel_tx, cancel_rx) = oneshot::channel();
let (tx, rx) = channel_(None);
executor.execute(Execute {
inner: tx.send_all(resultstream::new(stream)),
cancel_rx: cancel_rx,
}).expect("failed to spawn stream");
SpawnHandle {
inner: rx,
_cancel_tx: cancel_tx,
}
}
impl<I, E> Stream for SpawnHandle<I, E> {
type Item = I;
type Error = E;
fn poll(&mut self) -> Poll<Option<I>, E> {
match self.inner.poll() {
Ok(Async::Ready(Some(Ok(t)))) => Ok(Async::Ready(Some(t.into()))),
Ok(Async::Ready(Some(Err(e)))) => Err(e),
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(_) => unreachable!("mpsc::Receiver should never return Err"),
}
}
}
impl<I, E> fmt::Debug for SpawnHandle<I, E> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SpawnHandle")
.finish()
}
}
impl<S: Stream> Future for Execute<S> {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
match self.cancel_rx.poll() {
Ok(Async::NotReady) => (),
_ => return Ok(Async::Ready(())),
}
match self.inner.poll() {
Ok(Async::NotReady) => Ok(Async::NotReady),
_ => Ok(Async::Ready(()))
}
}
}
impl<S: Stream> fmt::Debug for Execute<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Execute")
.finish()
}
} | /// When `stream` has additional items available, then the `SpawnHandle`
/// will have those same items available. | random_line_split |
mpsc.rs | //! A multi-producer, single-consumer, futures-aware, FIFO queue with back
//! pressure, for use communicating between tasks on the same thread.
//!
//! These queues are the same as those in `futures::sync`, except they're not
//! intended to be sent across threads.
use std::any::Any;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::error::Error;
use std::fmt;
use std::mem;
use std::rc::{Rc, Weak};
use task::{self, Task};
use future::Executor;
use sink::SendAll;
use resultstream::{self, Results};
use unsync::oneshot;
use {Async, AsyncSink, Future, Poll, StartSend, Sink, Stream};
/// Creates a bounded in-memory channel with buffered storage.
///
/// This method creates concrete implementations of the `Stream` and `Sink`
/// traits which can be used to communicate a stream of values between tasks
/// with backpressure. The channel capacity is exactly `buffer`. On average,
/// sending a message through this channel performs no dynamic allocation.
pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) {
channel_(Some(buffer))
}
fn channel_<T>(buffer: Option<usize>) -> (Sender<T>, Receiver<T>) {
let shared = Rc::new(RefCell::new(Shared {
buffer: VecDeque::new(),
capacity: buffer,
blocked_senders: VecDeque::new(),
blocked_recv: None,
}));
let sender = Sender { shared: Rc::downgrade(&shared) };
let receiver = Receiver { state: State::Open(shared) };
(sender, receiver)
}
#[derive(Debug)]
struct Shared<T> {
buffer: VecDeque<T>,
capacity: Option<usize>,
blocked_senders: VecDeque<Task>,
blocked_recv: Option<Task>,
}
/// The transmission end of a channel.
///
/// This is created by the `channel` function.
#[derive(Debug)]
pub struct Sender<T> {
shared: Weak<RefCell<Shared<T>>>,
}
impl<T> Sender<T> {
fn do_send(&self, msg: T) -> StartSend<T, SendError<T>> {
let shared = match self.shared.upgrade() {
Some(shared) => shared,
None => return Err(SendError(msg)), // receiver was dropped
};
let mut shared = shared.borrow_mut();
match shared.capacity {
Some(capacity) if shared.buffer.len() == capacity => {
shared.blocked_senders.push_back(task::current());
Ok(AsyncSink::NotReady(msg))
}
_ => {
shared.buffer.push_back(msg);
if let Some(task) = shared.blocked_recv.take() {
task.notify();
}
Ok(AsyncSink::Ready)
}
}
}
}
impl<T> Clone for Sender<T> {
fn clone(&self) -> Self {
Sender { shared: self.shared.clone() }
}
}
impl<T> Sink for Sender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
self.do_send(msg)
}
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
}
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
let shared = match self.shared.upgrade() {
Some(shared) => shared,
None => return,
};
// The number of existing `Weak` indicates if we are possibly the last
// `Sender`. If we are the last, we possibly must notify a blocked
// `Receiver`. `self.shared` is always one of the `Weak` to this shared
// data. Therefore the smallest possible Rc::weak_count(&shared) is 1.
if Rc::weak_count(&shared) == 1 {
if let Some(task) = shared.borrow_mut().blocked_recv.take() {
// Wake up receiver as its stream has ended
task.notify();
}
}
}
}
/// The receiving end of a channel which implements the `Stream` trait.
///
/// This is created by the `channel` function.
#[derive(Debug)]
pub struct Receiver<T> {
state: State<T>,
}
/// Possible states of a receiver. We're either Open (can receive more messages)
/// or we're closed with a list of messages we have left to receive.
#[derive(Debug)]
enum State<T> {
Open(Rc<RefCell<Shared<T>>>),
Closed(VecDeque<T>),
}
impl<T> Receiver<T> {
/// Closes the receiving half
///
/// This prevents any further messages from being sent on the channel while
/// still enabling the receiver to drain messages that are buffered.
pub fn close(&mut self) {
let (blockers, items) = match self.state {
State::Open(ref state) => {
let mut state = state.borrow_mut();
let items = mem::replace(&mut state.buffer, VecDeque::new());
let blockers = mem::replace(&mut state.blocked_senders, VecDeque::new());
(blockers, items)
}
State::Closed(_) => return,
};
self.state = State::Closed(items);
for task in blockers {
task.notify();
}
}
}
impl<T> Stream for Receiver<T> {
type Item = T;
type Error = ();
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
let me = match self.state {
State::Open(ref mut me) => me,
State::Closed(ref mut items) => {
return Ok(Async::Ready(items.pop_front()))
}
};
if let Some(shared) = Rc::get_mut(me) {
// All senders have been dropped, so drain the buffer and end the
// stream.
return Ok(Async::Ready(shared.borrow_mut().buffer.pop_front()));
}
let mut shared = me.borrow_mut();
if let Some(msg) = shared.buffer.pop_front() {
if let Some(task) = shared.blocked_senders.pop_front() {
drop(shared);
task.notify();
}
Ok(Async::Ready(Some(msg)))
} else {
shared.blocked_recv = Some(task::current());
Ok(Async::NotReady)
}
}
}
impl<T> Drop for Receiver<T> {
fn drop(&mut self) {
self.close();
}
}
/// The transmission end of an unbounded channel.
///
/// This is created by the `unbounded` function.
#[derive(Debug)]
pub struct UnboundedSender<T>(Sender<T>);
impl<T> Clone for UnboundedSender<T> {
fn clone(&self) -> Self {
UnboundedSender(self.0.clone())
}
}
impl<T> Sink for UnboundedSender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
self.0.start_send(msg)
}
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
}
impl<'a, T> Sink for &'a UnboundedSender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
self.0.do_send(msg)
}
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
}
impl<T> UnboundedSender<T> {
/// Sends the provided message along this channel.
///
/// This is an unbounded sender, so this function differs from `Sink::send`
/// by ensuring the return type reflects that the channel is always ready to
/// receive messages.
#[deprecated(note = "renamed to `unbounded_send`")]
#[doc(hidden)]
pub fn send(&self, msg: T) -> Result<(), SendError<T>> {
self.unbounded_send(msg)
}
/// Sends the provided message along this channel.
///
/// This is an unbounded sender, so this function differs from `Sink::send`
/// by ensuring the return type reflects that the channel is always ready to
/// receive messages.
pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> {
let shared = match self.0.shared.upgrade() {
Some(shared) => shared,
None => return Err(SendError(msg)),
};
let mut shared = shared.borrow_mut();
shared.buffer.push_back(msg);
if let Some(task) = shared.blocked_recv.take() {
drop(shared);
task.notify();
}
Ok(())
}
}
/// The receiving end of an unbounded channel.
///
/// This is created by the `unbounded` function.
#[derive(Debug)]
pub struct UnboundedReceiver<T>(Receiver<T>);
impl<T> UnboundedReceiver<T> {
/// Closes the receiving half
///
/// This prevents any further messages from being sent on the channel while
/// still enabling the receiver to drain messages that are buffered.
pub fn close(&mut self) |
}
impl<T> Stream for UnboundedReceiver<T> {
type Item = T;
type Error = ();
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
self.0.poll()
}
}
/// Creates an unbounded in-memory channel with buffered storage.
///
/// Identical semantics to `channel`, except with no limit to buffer size.
pub fn unbounded<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) {
let (send, recv) = channel_(None);
(UnboundedSender(send), UnboundedReceiver(recv))
}
/// Error type for sending, used when the receiving end of a channel is
/// dropped
pub struct SendError<T>(T);
impl<T> fmt::Debug for SendError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_tuple("SendError")
.field(&"...")
.finish()
}
}
impl<T> fmt::Display for SendError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "send failed because receiver is gone")
}
}
impl<T: Any> Error for SendError<T> {
fn description(&self) -> &str {
"send failed because receiver is gone"
}
}
impl<T> SendError<T> {
/// Returns the message that was attempted to be sent but failed.
pub fn into_inner(self) -> T {
self.0
}
}
/// Handle returned from the `spawn` function.
///
/// This handle is a stream that proxies a stream on a separate `Executor`.
/// Created through the `mpsc::spawn` function, this handle will produce
/// the same values as the proxied stream, as they are produced in the executor,
/// and uses a limited buffer to exert back-pressure on the remote stream.
///
/// If this handle is dropped, then the stream will no longer be polled and is
/// scheduled to be dropped.
pub struct SpawnHandle<Item, Error> {
inner: Receiver<Result<Item, Error>>,
_cancel_tx: oneshot::Sender<()>,
}
/// Type of future which `Executor` instances must be able to execute for `spawn`.
pub struct Execute<S: Stream> {
inner: SendAll<Sender<Result<S::Item, S::Error>>, Results<S, SendError<Result<S::Item, S::Error>>>>,
cancel_rx: oneshot::Receiver<()>,
}
/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
/// returning a handle representing the remote stream.
///
/// The `stream` will be canceled if the `SpawnHandle` is dropped.
///
/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
/// When `stream` has additional items available, then the `SpawnHandle`
/// will have those same items available.
///
/// At most `buffer + 1` elements will be buffered at a time. If the buffer
/// is full, then `stream` will stop progressing until more space is available.
/// This allows the `SpawnHandle` to exert backpressure on the `stream`.
///
/// # Panics
///
/// This function will panic if `executor` is unable spawn a `Future` containing
/// the entirety of the `stream`.
pub fn spawn<S, E>(stream: S, executor: &E, buffer: usize) -> SpawnHandle<S::Item, S::Error>
where S: Stream,
E: Executor<Execute<S>>
{
let (cancel_tx, cancel_rx) = oneshot::channel();
let (tx, rx) = channel(buffer);
executor.execute(Execute {
inner: tx.send_all(resultstream::new(stream)),
cancel_rx: cancel_rx,
}).expect("failed to spawn stream");
SpawnHandle {
inner: rx,
_cancel_tx: cancel_tx,
}
}
/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
/// returning a handle representing the remote stream, with unbounded buffering.
///
/// The `stream` will be canceled if the `SpawnHandle` is dropped.
///
/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
/// When `stream` has additional items available, then the `SpawnHandle`
/// will have those same items available.
///
/// An unbounded buffer is used, which means that values will be buffered as
/// fast as `stream` can produce them, without any backpressure. Therefore, if
/// `stream` is an infinite stream, it can use an unbounded amount of memory, and
/// potentially hog CPU resources. In particular, if `stream` is infinite
/// and doesn't ever yield (by returning `Async::NotReady` from `poll`), it
/// will result in an infinite loop.
///
/// # Panics
///
/// This function will panic if `executor` is unable spawn a `Future` containing
/// the entirety of the `stream`.
pub fn spawn_unbounded<S,E>(stream: S, executor: &E) -> SpawnHandle<S::Item, S::Error>
where S: Stream,
E: Executor<Execute<S>>
{
let (cancel_tx, cancel_rx) = oneshot::channel();
let (tx, rx) = channel_(None);
executor.execute(Execute {
inner: tx.send_all(resultstream::new(stream)),
cancel_rx: cancel_rx,
}).expect("failed to spawn stream");
SpawnHandle {
inner: rx,
_cancel_tx: cancel_tx,
}
}
impl<I, E> Stream for SpawnHandle<I, E> {
type Item = I;
type Error = E;
fn poll(&mut self) -> Poll<Option<I>, E> {
match self.inner.poll() {
Ok(Async::Ready(Some(Ok(t)))) => Ok(Async::Ready(Some(t.into()))),
Ok(Async::Ready(Some(Err(e)))) => Err(e),
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(_) => unreachable!("mpsc::Receiver should never return Err"),
}
}
}
impl<I, E> fmt::Debug for SpawnHandle<I, E> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SpawnHandle")
.finish()
}
}
impl<S: Stream> Future for Execute<S> {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
match self.cancel_rx.poll() {
Ok(Async::NotReady) => (),
_ => return Ok(Async::Ready(())),
}
match self.inner.poll() {
Ok(Async::NotReady) => Ok(Async::NotReady),
_ => Ok(Async::Ready(()))
}
}
}
impl<S: Stream> fmt::Debug for Execute<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Execute")
.finish()
}
}
| {
self.0.close();
} | identifier_body |
drafts.go | /*
* Copyright 2018-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package drafts
import (
"archive/tar"
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"text/template"
"github.com/BurntSushi/toml"
"github.com/buildpacks/libcnb"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/google"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-github/v43/github"
"github.com/paketo-buildpacks/libpak"
"github.com/paketo-buildpacks/pipeline-builder/actions"
"github.com/pkg/errors"
)
type Payload struct {
PrimaryBuildpack Buildpack
Builder Builder
NestedBuildpacks []Buildpack
Release Release
}
type Buildpack struct {
libcnb.Buildpack
OrderGroups []libcnb.BuildpackOrder `toml:"order"`
Dependencies []libpak.BuildpackDependency
}
type Builder struct {
Description string
Buildpacks []struct {
URI string
}
OrderGroups []libcnb.BuildpackOrder `toml:"order"`
Stack BuilderStack `toml:"stack"`
}
type BuilderStack struct {
ID string `toml:"id"`
BuildImage string `toml:"build-image"`
RunImage string `toml:"run-image"`
}
func (b Builder) Flatten() []string {
tmp := []string{}
for _, bp := range b.Buildpacks {
tmp = append(tmp, strings.TrimPrefix(bp.URI, "docker://"))
}
return tmp
}
type Package struct {
Dependencies []struct {
URI string `toml:"uri"`
}
}
func (b Package) Flatten() []string {
tmp := []string{}
for _, bp := range b.Dependencies {
tmp = append(tmp, strings.TrimPrefix(bp.URI, "docker://"))
}
return tmp
}
type Release struct {
ID string
Name string
Body string
Tag string
}
//go:generate mockery --name BuildpackLoader --case=underscore
type BuildpackLoader interface {
LoadBuildpack(id string) (Buildpack, error)
LoadBuildpacks(uris []string) ([]Buildpack, error)
}
type Drafter struct {
Loader BuildpackLoader
}
func (d Drafter) BuildAndWriteReleaseToFileDraftFromTemplate(outputPath, templateContents string, context interface{}) error {
fp, err := os.Create(outputPath)
if err != nil {
return fmt.Errorf("unable to create file %s\n%w", outputPath, err)
}
defer fp.Close()
return d.BuildAndWriteReleaseDraftFromTemplate(fp, templateContents, context)
}
func (d Drafter) BuildAndWriteReleaseDraftFromTemplate(output io.Writer, templateContents string, context interface{}) error {
tmpl, err := template.New("draft").Parse(templateContents)
if err != nil {
return fmt.Errorf("unable to parse template %q\n%w", templateContents, err)
}
err = tmpl.Execute(output, context)
if err != nil {
return fmt.Errorf("unable to execute template %q\n%w", templateContents, err)
}
return nil
}
func (d Drafter) CreatePayload(inputs actions.Inputs, buildpackPath string) (Payload, error) {
release := Release{
ID: inputs["release_id"],
Name: inputs["release_name"],
Body: inputs["release_body"],
Tag: inputs["release_tag_name"],
}
builder, err := loadBuilderTOML(buildpackPath)
if err != nil {
return Payload{}, err
}
if builder != nil {
bps, err := d.Loader.LoadBuildpacks(builder.Flatten())
if err != nil {
return Payload{}, fmt.Errorf("unable to load buildpacks\n%w", err)
}
return Payload{
PrimaryBuildpack: Buildpack{},
Builder: *builder,
NestedBuildpacks: bps,
Release: release,
}, nil
}
bp, err := loadBuildpackTOMLFromFile(buildpackPath)
if err != nil {
return Payload{}, err
}
pkg, err := loadPackage(buildpackPath)
if err != nil {
return Payload{}, err
}
if bp != nil && pkg == nil { // component
return Payload{
PrimaryBuildpack: *bp,
Release: release,
}, nil
} else if bp != nil && pkg != nil { // composite
bps, err := d.Loader.LoadBuildpacks(pkg.Flatten())
if err != nil {
return Payload{}, fmt.Errorf("unable to load buildpacks\n%w", err)
}
return Payload{
NestedBuildpacks: bps,
PrimaryBuildpack: *bp,
Release: release,
}, nil
}
return Payload{}, fmt.Errorf("unable to generate payload, need buildpack.toml or buildpack.toml + package.toml or builder.toml")
}
func loadBuildpackTOMLFromFile(buildpackPath string) (*Buildpack, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "buildpack.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read buildpack toml\n%w", err)
}
return loadBuildpackTOML(rawTOML)
}
func loadBuildpackTOML(TOML []byte) (*Buildpack, error) {
bp := &Buildpack{}
if err := toml.Unmarshal(TOML, bp); err != nil {
return nil, fmt.Errorf("unable to parse buildpack TOML\n%w", err)
}
sort.Slice(bp.Stacks, func(i, j int) bool {
return strings.ToLower(bp.Stacks[i].ID) < strings.ToLower(bp.Stacks[j].ID)
})
if deps, found := bp.Metadata["dependencies"]; found {
if depList, ok := deps.([]map[string]interface{}); ok {
for _, dep := range depList {
bpDep := libpak.BuildpackDependency{
ID: asString(dep, "id"),
Name: asString(dep, "name"),
Version: asString(dep, "version"),
URI: asString(dep, "uri"),
SHA256: asString(dep, "sha256"),
PURL: asString(dep, "purl"),
}
if stacks, ok := dep["stacks"].([]interface{}); ok {
for _, stack := range stacks {
if stack, ok := stack.(string); ok {
bpDep.Stacks = append(bpDep.Stacks, stack)
}
}
}
if cpes, ok := dep["cpes"].([]interface{}); ok {
for _, cpe := range cpes {
if cpe, ok := cpe.(string); ok {
bpDep.CPEs = append(bpDep.CPEs, cpe)
}
}
}
if licenses, ok := dep["licenses"].([]map[string]interface{}); ok {
for _, license := range licenses {
bpDep.Licenses = append(bpDep.Licenses, libpak.BuildpackDependencyLicense{
Type: asString(license, "type"),
URI: asString(license, "uri"),
})
}
}
bp.Dependencies = append(bp.Dependencies, bpDep)
}
} else {
return nil, fmt.Errorf("unable to read dependencies from %v", bp.Metadata)
}
sort.Slice(bp.Dependencies, func(i, j int) bool {
return strings.ToLower(bp.Dependencies[i].Name) < strings.ToLower(bp.Dependencies[j].Name)
})
}
return bp, nil
}
func asString(m map[string]interface{}, key string) string {
if tmp, ok := m[key].(string); ok {
return tmp
}
return ""
}
func loadPackage(buildpackPath string) (*Package, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "package.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read package toml\n%w", err)
}
pkg := &Package{}
if err := toml.Unmarshal(rawTOML, pkg); err != nil {
return nil, fmt.Errorf("unable to parse package TOML\n%w", err)
}
return pkg, nil
}
func loadBuilderTOML(buildpackPath string) (*Builder, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "builder.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read builder toml\n%w", err)
}
builder := &Builder{}
if err := toml.Unmarshal(rawTOML, builder); err != nil {
return nil, fmt.Errorf("unable to parse builder TOML\n%w", err)
}
return builder, nil
}
type GithubBuildpackLoader struct {
GithubClient *github.Client
RegexMappers []string
}
func (g GithubBuildpackLoader) LoadBuildpacks(uris []string) ([]Buildpack, error) {
buildpacks := []Buildpack{}
for _, uri := range uris {
bp, err := g.LoadBuildpack(uri)
if err != nil {
return []Buildpack{}, fmt.Errorf("unable to process %s\n%w", uri, err)
}
buildpacks = append(buildpacks, bp)
}
sort.Slice(buildpacks, func(i, j int) bool {
return strings.ToLower(buildpacks[i].Info.Name) < strings.ToLower(buildpacks[j].Info.Name)
})
return buildpacks, nil
}
func (g GithubBuildpackLoader) LoadBuildpack(imgUri string) (Buildpack, error) {
uris, err := g.mapURIs(imgUri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to map URIs\n%w", err)
}
origOrg, origRepo, _, err := parseRepoOrgVersionFromImageUri(imgUri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to parse original image uri\n%w", err)
}
for _, uri := range uris {
org, repo, version, err := parseRepoOrgVersionFromImageUri(uri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to parse image uri\n%w", err)
}
paths, err := g.mapBuildpackTOMLPath(origOrg, origRepo)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to map buildpack toml path\n%w", err)
}
if regexp.MustCompile(`^\d+\.\d+\.\d+$`).MatchString(version) {
version = fmt.Sprintf("v%s", version)
}
for _, path := range paths {
tomlBytes, err := g.fetchTOMLFile(org, repo, version, path)
if err != nil {
var apiErr *github.ErrorResponse
if errors.As(err, &apiErr) && apiErr.Response.StatusCode == 404 {
fmt.Println("skipping 404", apiErr)
continue
}
return Buildpack{}, fmt.Errorf("unable to fetch toml\n%w", err)
}
if len(tomlBytes) > 0 {
bp, err := loadBuildpackTOML(tomlBytes)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load buildpack toml from image\n%w", err)
}
bp.Info.Version = version
return *bp, nil
}
}
}
return Buildpack{}, fmt.Errorf("unable to load buildpack.toml for %s", imgUri)
}
func (g GithubBuildpackLoader) LoadPackages(imgUri string) (Package, error) {
uris, err := g.mapURIs(imgUri)
if err != nil {
return Package{}, fmt.Errorf("unable to map URIs\n%w", err)
}
for _, uri := range uris {
uriPattern := regexp.MustCompile(`.*\/(.*)\/(.*):(.*)`)
parts := uriPattern.FindStringSubmatch(uri)
if len(parts) != 4 {
return Package{}, fmt.Errorf("unable to parse %s, found %q", uri, parts)
}
org := parts[1]
repo := parts[2]
version := parts[3]
if regexp.MustCompile(`\d+\.\d+\.\d+`).MatchString(version) {
version = fmt.Sprintf("v%s", version)
}
tomlBytes, err := g.fetchTOMLFile(org, repo, version, "/package.toml")
if err != nil {
var apiErr *github.ErrorResponse
if errors.As(err, &apiErr) && apiErr.Response.StatusCode == 404 {
fmt.Println("skipping 404", apiErr)
continue
}
return Package{}, fmt.Errorf("unable to fetch toml\n%w", err)
}
if len(tomlBytes) > 0 {
pkg := &Package{}
if err := toml.Unmarshal(tomlBytes, pkg); err != nil {
return Package{}, fmt.Errorf("unable to parse package TOML\n%w", err)
}
return *pkg, nil
}
}
return Package{}, fmt.Errorf("unable to load package.toml for %s", imgUri)
}
func (g GithubBuildpackLoader) mapURIs(uri string) ([]string, error) {
possibilities := []string{uri}
for _, mapper := range g.RegexMappers {
if len(mapper) <= 3 {
continue
}
splitCh := string(mapper[0])
parts := strings.SplitN(mapper[1:len(mapper)-1], splitCh, 2)
expr, err := regexp.Compile(parts[0])
if err != nil {
return []string{}, fmt.Errorf("unable to parse regex %s\n%w", mapper, err)
}
possibilities = append(possibilities, expr.ReplaceAllString(uri, parts[1]))
}
return possibilities, nil
}
func (g GithubBuildpackLoader) mapBuildpackTOMLPath(org, repo string) ([]string, error) {
paths := []string{
"/buildpack.toml",
}
org = strings.ToUpper(strings.ReplaceAll(org, "-", "_"))
repo = strings.ToUpper(strings.ReplaceAll(repo, "-", "_"))
if p, found := os.LookupEnv(fmt.Sprintf("BP_TOML_PATH_%s_%s", org, repo)); found {
if !strings.HasSuffix(p, "/buildpack.toml") {
p = fmt.Sprintf("%s/buildpack.toml", p)
}
return []string{p}, nil
}
return paths, nil
}
func (g GithubBuildpackLoader) fetchTOMLFile(org, repo, version, path string) ([]byte, error) {
fmt.Println("Fetching from org:", org, "repo:", repo, "version:", version, "path:", path)
body, _, err := g.GithubClient.Repositories.DownloadContents(
context.Background(),
org,
repo,
path,
&github.RepositoryContentGetOptions{Ref: version})
if err != nil {
return []byte{}, fmt.Errorf("unable to download file\n%w", err)
}
buf := &bytes.Buffer{}
_, err = io.Copy(buf, body)
if err != nil {
return []byte{}, fmt.Errorf("unable to read downloaded file\n%w", err)
}
return buf.Bytes(), nil
}
type RegistryBuildpackLoader struct {
GCRToken string
}
func (r RegistryBuildpackLoader) LoadBuildpacks(uris []string) ([]Buildpack, error) {
buildpacks := []Buildpack{}
for _, uri := range uris {
fmt.Println("Loading buildpack info from:", uri)
bp, err := r.LoadBuildpack(uri)
if err != nil {
return []Buildpack{}, fmt.Errorf("unable to process %s\n%w", uri, err)
}
buildpacks = append(buildpacks, bp)
}
return buildpacks, nil
}
func (r RegistryBuildpackLoader) LoadBuildpack(uri string) (Buildpack, error) {
if err := os.MkdirAll("/tmp", 1777); err != nil {
return Buildpack{}, fmt.Errorf("unable to create /tmp\n%w", err)
}
tarFile, err := ioutil.TempFile("/tmp", "tarfiles")
if err != nil {
return Buildpack{}, fmt.Errorf("unable to create tempfile\n%w", err)
}
defer os.Remove(tarFile.Name())
err = r.loadBuildpackImage(uri, tarFile)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load %s\n%w", uri, err)
}
_, err = tarFile.Seek(0, 0)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to reset file pointer\n%w", err)
}
bpTOML, err := readBuildpackTOML(tarFile)
if err != nil {
return Buildpack{}, err
}
bp, err := loadBuildpackTOML(bpTOML)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load buildpack toml from image\n%w", err)
}
| return *bp, nil
}
func (r RegistryBuildpackLoader) loadBuildpackImage(ref string, to io.Writer) error {
reference, err := name.ParseReference(ref)
if err != nil {
return fmt.Errorf("unable to parse reference for existing buildpack tag\n%w", err)
}
auth := authn.Anonymous
if r.GCRToken != "" {
auth = google.NewJSONKeyAuthenticator(r.GCRToken)
}
img, err := remote.Image(reference, remote.WithAuth(auth))
if err != nil {
return fmt.Errorf("unable to fetch remote image\n%w", err)
}
layers, err := img.Layers()
if err != nil {
return fmt.Errorf("unable to fetch layer\n%w", err)
}
if len(layers) == 1 {
l := layers[0]
rc, err := l.Uncompressed()
if err != nil {
return fmt.Errorf("unable to get uncompressed reader\n%w", err)
}
_, err = io.Copy(to, rc)
return err
}
fs := mutate.Extract(img)
_, err = io.Copy(to, fs)
return err
}
func readBuildpackTOML(tarFile *os.File) ([]byte, error) {
t := tar.NewReader(tarFile)
for {
f, err := t.Next()
if err != nil && err == io.EOF {
break
} else if err != nil {
return []byte{}, fmt.Errorf("unable to read TAR file\n%w", err)
}
if strings.HasSuffix(f.Name, "buildpack.toml") {
info := f.FileInfo()
if info.IsDir() || (info.Mode()&os.ModeSymlink != 0) {
return []byte{}, fmt.Errorf("unable to read buildpack.toml, unexpected file type (directory or symlink)")
}
buf := &bytes.Buffer{}
_, err := io.Copy(buf, t)
if err != nil {
return []byte{}, fmt.Errorf("unable to read buildpack.toml\n%w", err)
}
return buf.Bytes(), nil
}
}
return []byte{}, fmt.Errorf("unable to find buildpack.toml in image")
}
func parseRepoOrgVersionFromImageUri(imgUri string) (string, string, string, error) {
uriPattern := regexp.MustCompile(`.*\/(.*)\/(.*):(.*)`)
parts := uriPattern.FindStringSubmatch(imgUri)
if len(parts) != 4 {
return "", "", "", fmt.Errorf("unable to parse %s, found %q", imgUri, parts)
}
return parts[1], parts[2], parts[3], nil
} | random_line_split |
|
drafts.go | /*
* Copyright 2018-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package drafts
import (
"archive/tar"
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"text/template"
"github.com/BurntSushi/toml"
"github.com/buildpacks/libcnb"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/google"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-github/v43/github"
"github.com/paketo-buildpacks/libpak"
"github.com/paketo-buildpacks/pipeline-builder/actions"
"github.com/pkg/errors"
)
type Payload struct {
PrimaryBuildpack Buildpack
Builder Builder
NestedBuildpacks []Buildpack
Release Release
}
type Buildpack struct {
libcnb.Buildpack
OrderGroups []libcnb.BuildpackOrder `toml:"order"`
Dependencies []libpak.BuildpackDependency
}
type Builder struct {
Description string
Buildpacks []struct {
URI string
}
OrderGroups []libcnb.BuildpackOrder `toml:"order"`
Stack BuilderStack `toml:"stack"`
}
type BuilderStack struct {
ID string `toml:"id"`
BuildImage string `toml:"build-image"`
RunImage string `toml:"run-image"`
}
func (b Builder) Flatten() []string {
tmp := []string{}
for _, bp := range b.Buildpacks {
tmp = append(tmp, strings.TrimPrefix(bp.URI, "docker://"))
}
return tmp
}
type Package struct {
Dependencies []struct {
URI string `toml:"uri"`
}
}
func (b Package) Flatten() []string {
tmp := []string{}
for _, bp := range b.Dependencies {
tmp = append(tmp, strings.TrimPrefix(bp.URI, "docker://"))
}
return tmp
}
type Release struct {
ID string
Name string
Body string
Tag string
}
//go:generate mockery --name BuildpackLoader --case=underscore
type BuildpackLoader interface {
LoadBuildpack(id string) (Buildpack, error)
LoadBuildpacks(uris []string) ([]Buildpack, error)
}
type Drafter struct {
Loader BuildpackLoader
}
func (d Drafter) BuildAndWriteReleaseToFileDraftFromTemplate(outputPath, templateContents string, context interface{}) error {
fp, err := os.Create(outputPath)
if err != nil {
return fmt.Errorf("unable to create file %s\n%w", outputPath, err)
}
defer fp.Close()
return d.BuildAndWriteReleaseDraftFromTemplate(fp, templateContents, context)
}
func (d Drafter) BuildAndWriteReleaseDraftFromTemplate(output io.Writer, templateContents string, context interface{}) error {
tmpl, err := template.New("draft").Parse(templateContents)
if err != nil {
return fmt.Errorf("unable to parse template %q\n%w", templateContents, err)
}
err = tmpl.Execute(output, context)
if err != nil {
return fmt.Errorf("unable to execute template %q\n%w", templateContents, err)
}
return nil
}
func (d Drafter) CreatePayload(inputs actions.Inputs, buildpackPath string) (Payload, error) {
release := Release{
ID: inputs["release_id"],
Name: inputs["release_name"],
Body: inputs["release_body"],
Tag: inputs["release_tag_name"],
}
builder, err := loadBuilderTOML(buildpackPath)
if err != nil {
return Payload{}, err
}
if builder != nil {
bps, err := d.Loader.LoadBuildpacks(builder.Flatten())
if err != nil {
return Payload{}, fmt.Errorf("unable to load buildpacks\n%w", err)
}
return Payload{
PrimaryBuildpack: Buildpack{},
Builder: *builder,
NestedBuildpacks: bps,
Release: release,
}, nil
}
bp, err := loadBuildpackTOMLFromFile(buildpackPath)
if err != nil {
return Payload{}, err
}
pkg, err := loadPackage(buildpackPath)
if err != nil {
return Payload{}, err
}
if bp != nil && pkg == nil { // component
return Payload{
PrimaryBuildpack: *bp,
Release: release,
}, nil
} else if bp != nil && pkg != nil { // composite
bps, err := d.Loader.LoadBuildpacks(pkg.Flatten())
if err != nil {
return Payload{}, fmt.Errorf("unable to load buildpacks\n%w", err)
}
return Payload{
NestedBuildpacks: bps,
PrimaryBuildpack: *bp,
Release: release,
}, nil
}
return Payload{}, fmt.Errorf("unable to generate payload, need buildpack.toml or buildpack.toml + package.toml or builder.toml")
}
func loadBuildpackTOMLFromFile(buildpackPath string) (*Buildpack, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "buildpack.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read buildpack toml\n%w", err)
}
return loadBuildpackTOML(rawTOML)
}
func loadBuildpackTOML(TOML []byte) (*Buildpack, error) {
bp := &Buildpack{}
if err := toml.Unmarshal(TOML, bp); err != nil {
return nil, fmt.Errorf("unable to parse buildpack TOML\n%w", err)
}
sort.Slice(bp.Stacks, func(i, j int) bool {
return strings.ToLower(bp.Stacks[i].ID) < strings.ToLower(bp.Stacks[j].ID)
})
if deps, found := bp.Metadata["dependencies"]; found {
if depList, ok := deps.([]map[string]interface{}); ok {
for _, dep := range depList {
bpDep := libpak.BuildpackDependency{
ID: asString(dep, "id"),
Name: asString(dep, "name"),
Version: asString(dep, "version"),
URI: asString(dep, "uri"),
SHA256: asString(dep, "sha256"),
PURL: asString(dep, "purl"),
}
if stacks, ok := dep["stacks"].([]interface{}); ok {
for _, stack := range stacks {
if stack, ok := stack.(string); ok {
bpDep.Stacks = append(bpDep.Stacks, stack)
}
}
}
if cpes, ok := dep["cpes"].([]interface{}); ok {
for _, cpe := range cpes {
if cpe, ok := cpe.(string); ok {
bpDep.CPEs = append(bpDep.CPEs, cpe)
}
}
}
if licenses, ok := dep["licenses"].([]map[string]interface{}); ok {
for _, license := range licenses {
bpDep.Licenses = append(bpDep.Licenses, libpak.BuildpackDependencyLicense{
Type: asString(license, "type"),
URI: asString(license, "uri"),
})
}
}
bp.Dependencies = append(bp.Dependencies, bpDep)
}
} else {
return nil, fmt.Errorf("unable to read dependencies from %v", bp.Metadata)
}
sort.Slice(bp.Dependencies, func(i, j int) bool {
return strings.ToLower(bp.Dependencies[i].Name) < strings.ToLower(bp.Dependencies[j].Name)
})
}
return bp, nil
}
func asString(m map[string]interface{}, key string) string {
if tmp, ok := m[key].(string); ok {
return tmp
}
return ""
}
func loadPackage(buildpackPath string) (*Package, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "package.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read package toml\n%w", err)
}
pkg := &Package{}
if err := toml.Unmarshal(rawTOML, pkg); err != nil {
return nil, fmt.Errorf("unable to parse package TOML\n%w", err)
}
return pkg, nil
}
func loadBuilderTOML(buildpackPath string) (*Builder, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "builder.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read builder toml\n%w", err)
}
builder := &Builder{}
if err := toml.Unmarshal(rawTOML, builder); err != nil {
return nil, fmt.Errorf("unable to parse builder TOML\n%w", err)
}
return builder, nil
}
type GithubBuildpackLoader struct {
GithubClient *github.Client
RegexMappers []string
}
func (g GithubBuildpackLoader) LoadBuildpacks(uris []string) ([]Buildpack, error) {
buildpacks := []Buildpack{}
for _, uri := range uris {
bp, err := g.LoadBuildpack(uri)
if err != nil {
return []Buildpack{}, fmt.Errorf("unable to process %s\n%w", uri, err)
}
buildpacks = append(buildpacks, bp)
}
sort.Slice(buildpacks, func(i, j int) bool {
return strings.ToLower(buildpacks[i].Info.Name) < strings.ToLower(buildpacks[j].Info.Name)
})
return buildpacks, nil
}
func (g GithubBuildpackLoader) LoadBuildpack(imgUri string) (Buildpack, error) {
uris, err := g.mapURIs(imgUri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to map URIs\n%w", err)
}
origOrg, origRepo, _, err := parseRepoOrgVersionFromImageUri(imgUri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to parse original image uri\n%w", err)
}
for _, uri := range uris {
org, repo, version, err := parseRepoOrgVersionFromImageUri(uri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to parse image uri\n%w", err)
}
paths, err := g.mapBuildpackTOMLPath(origOrg, origRepo)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to map buildpack toml path\n%w", err)
}
if regexp.MustCompile(`^\d+\.\d+\.\d+$`).MatchString(version) {
version = fmt.Sprintf("v%s", version)
}
for _, path := range paths {
tomlBytes, err := g.fetchTOMLFile(org, repo, version, path)
if err != nil {
var apiErr *github.ErrorResponse
if errors.As(err, &apiErr) && apiErr.Response.StatusCode == 404 {
fmt.Println("skipping 404", apiErr)
continue
}
return Buildpack{}, fmt.Errorf("unable to fetch toml\n%w", err)
}
if len(tomlBytes) > 0 {
bp, err := loadBuildpackTOML(tomlBytes)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load buildpack toml from image\n%w", err)
}
bp.Info.Version = version
return *bp, nil
}
}
}
return Buildpack{}, fmt.Errorf("unable to load buildpack.toml for %s", imgUri)
}
func (g GithubBuildpackLoader) LoadPackages(imgUri string) (Package, error) {
uris, err := g.mapURIs(imgUri)
if err != nil {
return Package{}, fmt.Errorf("unable to map URIs\n%w", err)
}
for _, uri := range uris {
uriPattern := regexp.MustCompile(`.*\/(.*)\/(.*):(.*)`)
parts := uriPattern.FindStringSubmatch(uri)
if len(parts) != 4 {
return Package{}, fmt.Errorf("unable to parse %s, found %q", uri, parts)
}
org := parts[1]
repo := parts[2]
version := parts[3]
if regexp.MustCompile(`\d+\.\d+\.\d+`).MatchString(version) {
version = fmt.Sprintf("v%s", version)
}
tomlBytes, err := g.fetchTOMLFile(org, repo, version, "/package.toml")
if err != nil {
var apiErr *github.ErrorResponse
if errors.As(err, &apiErr) && apiErr.Response.StatusCode == 404 {
fmt.Println("skipping 404", apiErr)
continue
}
return Package{}, fmt.Errorf("unable to fetch toml\n%w", err)
}
if len(tomlBytes) > 0 {
pkg := &Package{}
if err := toml.Unmarshal(tomlBytes, pkg); err != nil {
return Package{}, fmt.Errorf("unable to parse package TOML\n%w", err)
}
return *pkg, nil
}
}
return Package{}, fmt.Errorf("unable to load package.toml for %s", imgUri)
}
func (g GithubBuildpackLoader) mapURIs(uri string) ([]string, error) {
possibilities := []string{uri}
for _, mapper := range g.RegexMappers {
if len(mapper) <= 3 {
continue
}
splitCh := string(mapper[0])
parts := strings.SplitN(mapper[1:len(mapper)-1], splitCh, 2)
expr, err := regexp.Compile(parts[0])
if err != nil {
return []string{}, fmt.Errorf("unable to parse regex %s\n%w", mapper, err)
}
possibilities = append(possibilities, expr.ReplaceAllString(uri, parts[1]))
}
return possibilities, nil
}
func (g GithubBuildpackLoader) mapBuildpackTOMLPath(org, repo string) ([]string, error) |
func (g GithubBuildpackLoader) fetchTOMLFile(org, repo, version, path string) ([]byte, error) {
fmt.Println("Fetching from org:", org, "repo:", repo, "version:", version, "path:", path)
body, _, err := g.GithubClient.Repositories.DownloadContents(
context.Background(),
org,
repo,
path,
&github.RepositoryContentGetOptions{Ref: version})
if err != nil {
return []byte{}, fmt.Errorf("unable to download file\n%w", err)
}
buf := &bytes.Buffer{}
_, err = io.Copy(buf, body)
if err != nil {
return []byte{}, fmt.Errorf("unable to read downloaded file\n%w", err)
}
return buf.Bytes(), nil
}
type RegistryBuildpackLoader struct {
GCRToken string
}
func (r RegistryBuildpackLoader) LoadBuildpacks(uris []string) ([]Buildpack, error) {
buildpacks := []Buildpack{}
for _, uri := range uris {
fmt.Println("Loading buildpack info from:", uri)
bp, err := r.LoadBuildpack(uri)
if err != nil {
return []Buildpack{}, fmt.Errorf("unable to process %s\n%w", uri, err)
}
buildpacks = append(buildpacks, bp)
}
return buildpacks, nil
}
func (r RegistryBuildpackLoader) LoadBuildpack(uri string) (Buildpack, error) {
if err := os.MkdirAll("/tmp", 1777); err != nil {
return Buildpack{}, fmt.Errorf("unable to create /tmp\n%w", err)
}
tarFile, err := ioutil.TempFile("/tmp", "tarfiles")
if err != nil {
return Buildpack{}, fmt.Errorf("unable to create tempfile\n%w", err)
}
defer os.Remove(tarFile.Name())
err = r.loadBuildpackImage(uri, tarFile)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load %s\n%w", uri, err)
}
_, err = tarFile.Seek(0, 0)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to reset file pointer\n%w", err)
}
bpTOML, err := readBuildpackTOML(tarFile)
if err != nil {
return Buildpack{}, err
}
bp, err := loadBuildpackTOML(bpTOML)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load buildpack toml from image\n%w", err)
}
return *bp, nil
}
func (r RegistryBuildpackLoader) loadBuildpackImage(ref string, to io.Writer) error {
reference, err := name.ParseReference(ref)
if err != nil {
return fmt.Errorf("unable to parse reference for existing buildpack tag\n%w", err)
}
auth := authn.Anonymous
if r.GCRToken != "" {
auth = google.NewJSONKeyAuthenticator(r.GCRToken)
}
img, err := remote.Image(reference, remote.WithAuth(auth))
if err != nil {
return fmt.Errorf("unable to fetch remote image\n%w", err)
}
layers, err := img.Layers()
if err != nil {
return fmt.Errorf("unable to fetch layer\n%w", err)
}
if len(layers) == 1 {
l := layers[0]
rc, err := l.Uncompressed()
if err != nil {
return fmt.Errorf("unable to get uncompressed reader\n%w", err)
}
_, err = io.Copy(to, rc)
return err
}
fs := mutate.Extract(img)
_, err = io.Copy(to, fs)
return err
}
func readBuildpackTOML(tarFile *os.File) ([]byte, error) {
t := tar.NewReader(tarFile)
for {
f, err := t.Next()
if err != nil && err == io.EOF {
break
} else if err != nil {
return []byte{}, fmt.Errorf("unable to read TAR file\n%w", err)
}
if strings.HasSuffix(f.Name, "buildpack.toml") {
info := f.FileInfo()
if info.IsDir() || (info.Mode()&os.ModeSymlink != 0) {
return []byte{}, fmt.Errorf("unable to read buildpack.toml, unexpected file type (directory or symlink)")
}
buf := &bytes.Buffer{}
_, err := io.Copy(buf, t)
if err != nil {
return []byte{}, fmt.Errorf("unable to read buildpack.toml\n%w", err)
}
return buf.Bytes(), nil
}
}
return []byte{}, fmt.Errorf("unable to find buildpack.toml in image")
}
func parseRepoOrgVersionFromImageUri(imgUri string) (string, string, string, error) {
uriPattern := regexp.MustCompile(`.*\/(.*)\/(.*):(.*)`)
parts := uriPattern.FindStringSubmatch(imgUri)
if len(parts) != 4 {
return "", "", "", fmt.Errorf("unable to parse %s, found %q", imgUri, parts)
}
return parts[1], parts[2], parts[3], nil
}
| {
paths := []string{
"/buildpack.toml",
}
org = strings.ToUpper(strings.ReplaceAll(org, "-", "_"))
repo = strings.ToUpper(strings.ReplaceAll(repo, "-", "_"))
if p, found := os.LookupEnv(fmt.Sprintf("BP_TOML_PATH_%s_%s", org, repo)); found {
if !strings.HasSuffix(p, "/buildpack.toml") {
p = fmt.Sprintf("%s/buildpack.toml", p)
}
return []string{p}, nil
}
return paths, nil
} | identifier_body |
drafts.go | /*
* Copyright 2018-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package drafts
import (
"archive/tar"
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"text/template"
"github.com/BurntSushi/toml"
"github.com/buildpacks/libcnb"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/google"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-github/v43/github"
"github.com/paketo-buildpacks/libpak"
"github.com/paketo-buildpacks/pipeline-builder/actions"
"github.com/pkg/errors"
)
type Payload struct {
PrimaryBuildpack Buildpack
Builder Builder
NestedBuildpacks []Buildpack
Release Release
}
type Buildpack struct {
libcnb.Buildpack
OrderGroups []libcnb.BuildpackOrder `toml:"order"`
Dependencies []libpak.BuildpackDependency
}
type Builder struct {
Description string
Buildpacks []struct {
URI string
}
OrderGroups []libcnb.BuildpackOrder `toml:"order"`
Stack BuilderStack `toml:"stack"`
}
type BuilderStack struct {
ID string `toml:"id"`
BuildImage string `toml:"build-image"`
RunImage string `toml:"run-image"`
}
func (b Builder) Flatten() []string {
tmp := []string{}
for _, bp := range b.Buildpacks {
tmp = append(tmp, strings.TrimPrefix(bp.URI, "docker://"))
}
return tmp
}
type Package struct {
Dependencies []struct {
URI string `toml:"uri"`
}
}
func (b Package) Flatten() []string {
tmp := []string{}
for _, bp := range b.Dependencies {
tmp = append(tmp, strings.TrimPrefix(bp.URI, "docker://"))
}
return tmp
}
type Release struct {
ID string
Name string
Body string
Tag string
}
//go:generate mockery --name BuildpackLoader --case=underscore
type BuildpackLoader interface {
LoadBuildpack(id string) (Buildpack, error)
LoadBuildpacks(uris []string) ([]Buildpack, error)
}
type Drafter struct {
Loader BuildpackLoader
}
func (d Drafter) BuildAndWriteReleaseToFileDraftFromTemplate(outputPath, templateContents string, context interface{}) error {
fp, err := os.Create(outputPath)
if err != nil {
return fmt.Errorf("unable to create file %s\n%w", outputPath, err)
}
defer fp.Close()
return d.BuildAndWriteReleaseDraftFromTemplate(fp, templateContents, context)
}
func (d Drafter) BuildAndWriteReleaseDraftFromTemplate(output io.Writer, templateContents string, context interface{}) error {
tmpl, err := template.New("draft").Parse(templateContents)
if err != nil {
return fmt.Errorf("unable to parse template %q\n%w", templateContents, err)
}
err = tmpl.Execute(output, context)
if err != nil {
return fmt.Errorf("unable to execute template %q\n%w", templateContents, err)
}
return nil
}
func (d Drafter) CreatePayload(inputs actions.Inputs, buildpackPath string) (Payload, error) {
release := Release{
ID: inputs["release_id"],
Name: inputs["release_name"],
Body: inputs["release_body"],
Tag: inputs["release_tag_name"],
}
builder, err := loadBuilderTOML(buildpackPath)
if err != nil {
return Payload{}, err
}
if builder != nil {
bps, err := d.Loader.LoadBuildpacks(builder.Flatten())
if err != nil {
return Payload{}, fmt.Errorf("unable to load buildpacks\n%w", err)
}
return Payload{
PrimaryBuildpack: Buildpack{},
Builder: *builder,
NestedBuildpacks: bps,
Release: release,
}, nil
}
bp, err := loadBuildpackTOMLFromFile(buildpackPath)
if err != nil {
return Payload{}, err
}
pkg, err := loadPackage(buildpackPath)
if err != nil {
return Payload{}, err
}
if bp != nil && pkg == nil { // component
return Payload{
PrimaryBuildpack: *bp,
Release: release,
}, nil
} else if bp != nil && pkg != nil { // composite
bps, err := d.Loader.LoadBuildpacks(pkg.Flatten())
if err != nil {
return Payload{}, fmt.Errorf("unable to load buildpacks\n%w", err)
}
return Payload{
NestedBuildpacks: bps,
PrimaryBuildpack: *bp,
Release: release,
}, nil
}
return Payload{}, fmt.Errorf("unable to generate payload, need buildpack.toml or buildpack.toml + package.toml or builder.toml")
}
func loadBuildpackTOMLFromFile(buildpackPath string) (*Buildpack, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "buildpack.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read buildpack toml\n%w", err)
}
return loadBuildpackTOML(rawTOML)
}
func loadBuildpackTOML(TOML []byte) (*Buildpack, error) {
bp := &Buildpack{}
if err := toml.Unmarshal(TOML, bp); err != nil {
return nil, fmt.Errorf("unable to parse buildpack TOML\n%w", err)
}
sort.Slice(bp.Stacks, func(i, j int) bool {
return strings.ToLower(bp.Stacks[i].ID) < strings.ToLower(bp.Stacks[j].ID)
})
if deps, found := bp.Metadata["dependencies"]; found {
if depList, ok := deps.([]map[string]interface{}); ok {
for _, dep := range depList {
bpDep := libpak.BuildpackDependency{
ID: asString(dep, "id"),
Name: asString(dep, "name"),
Version: asString(dep, "version"),
URI: asString(dep, "uri"),
SHA256: asString(dep, "sha256"),
PURL: asString(dep, "purl"),
}
if stacks, ok := dep["stacks"].([]interface{}); ok {
for _, stack := range stacks {
if stack, ok := stack.(string); ok {
bpDep.Stacks = append(bpDep.Stacks, stack)
}
}
}
if cpes, ok := dep["cpes"].([]interface{}); ok {
for _, cpe := range cpes {
if cpe, ok := cpe.(string); ok {
bpDep.CPEs = append(bpDep.CPEs, cpe)
}
}
}
if licenses, ok := dep["licenses"].([]map[string]interface{}); ok {
for _, license := range licenses {
bpDep.Licenses = append(bpDep.Licenses, libpak.BuildpackDependencyLicense{
Type: asString(license, "type"),
URI: asString(license, "uri"),
})
}
}
bp.Dependencies = append(bp.Dependencies, bpDep)
}
} else {
return nil, fmt.Errorf("unable to read dependencies from %v", bp.Metadata)
}
sort.Slice(bp.Dependencies, func(i, j int) bool {
return strings.ToLower(bp.Dependencies[i].Name) < strings.ToLower(bp.Dependencies[j].Name)
})
}
return bp, nil
}
func asString(m map[string]interface{}, key string) string {
if tmp, ok := m[key].(string); ok {
return tmp
}
return ""
}
func loadPackage(buildpackPath string) (*Package, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "package.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read package toml\n%w", err)
}
pkg := &Package{}
if err := toml.Unmarshal(rawTOML, pkg); err != nil {
return nil, fmt.Errorf("unable to parse package TOML\n%w", err)
}
return pkg, nil
}
func loadBuilderTOML(buildpackPath string) (*Builder, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "builder.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read builder toml\n%w", err)
}
builder := &Builder{}
if err := toml.Unmarshal(rawTOML, builder); err != nil {
return nil, fmt.Errorf("unable to parse builder TOML\n%w", err)
}
return builder, nil
}
type GithubBuildpackLoader struct {
GithubClient *github.Client
RegexMappers []string
}
func (g GithubBuildpackLoader) LoadBuildpacks(uris []string) ([]Buildpack, error) {
buildpacks := []Buildpack{}
for _, uri := range uris {
bp, err := g.LoadBuildpack(uri)
if err != nil {
return []Buildpack{}, fmt.Errorf("unable to process %s\n%w", uri, err)
}
buildpacks = append(buildpacks, bp)
}
sort.Slice(buildpacks, func(i, j int) bool {
return strings.ToLower(buildpacks[i].Info.Name) < strings.ToLower(buildpacks[j].Info.Name)
})
return buildpacks, nil
}
func (g GithubBuildpackLoader) LoadBuildpack(imgUri string) (Buildpack, error) {
uris, err := g.mapURIs(imgUri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to map URIs\n%w", err)
}
origOrg, origRepo, _, err := parseRepoOrgVersionFromImageUri(imgUri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to parse original image uri\n%w", err)
}
for _, uri := range uris {
org, repo, version, err := parseRepoOrgVersionFromImageUri(uri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to parse image uri\n%w", err)
}
paths, err := g.mapBuildpackTOMLPath(origOrg, origRepo)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to map buildpack toml path\n%w", err)
}
if regexp.MustCompile(`^\d+\.\d+\.\d+$`).MatchString(version) {
version = fmt.Sprintf("v%s", version)
}
for _, path := range paths {
tomlBytes, err := g.fetchTOMLFile(org, repo, version, path)
if err != nil {
var apiErr *github.ErrorResponse
if errors.As(err, &apiErr) && apiErr.Response.StatusCode == 404 {
fmt.Println("skipping 404", apiErr)
continue
}
return Buildpack{}, fmt.Errorf("unable to fetch toml\n%w", err)
}
if len(tomlBytes) > 0 {
bp, err := loadBuildpackTOML(tomlBytes)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load buildpack toml from image\n%w", err)
}
bp.Info.Version = version
return *bp, nil
}
}
}
return Buildpack{}, fmt.Errorf("unable to load buildpack.toml for %s", imgUri)
}
func (g GithubBuildpackLoader) LoadPackages(imgUri string) (Package, error) {
uris, err := g.mapURIs(imgUri)
if err != nil {
return Package{}, fmt.Errorf("unable to map URIs\n%w", err)
}
for _, uri := range uris {
uriPattern := regexp.MustCompile(`.*\/(.*)\/(.*):(.*)`)
parts := uriPattern.FindStringSubmatch(uri)
if len(parts) != 4 {
return Package{}, fmt.Errorf("unable to parse %s, found %q", uri, parts)
}
org := parts[1]
repo := parts[2]
version := parts[3]
if regexp.MustCompile(`\d+\.\d+\.\d+`).MatchString(version) {
version = fmt.Sprintf("v%s", version)
}
tomlBytes, err := g.fetchTOMLFile(org, repo, version, "/package.toml")
if err != nil {
var apiErr *github.ErrorResponse
if errors.As(err, &apiErr) && apiErr.Response.StatusCode == 404 {
fmt.Println("skipping 404", apiErr)
continue
}
return Package{}, fmt.Errorf("unable to fetch toml\n%w", err)
}
if len(tomlBytes) > 0 {
pkg := &Package{}
if err := toml.Unmarshal(tomlBytes, pkg); err != nil {
return Package{}, fmt.Errorf("unable to parse package TOML\n%w", err)
}
return *pkg, nil
}
}
return Package{}, fmt.Errorf("unable to load package.toml for %s", imgUri)
}
func (g GithubBuildpackLoader) mapURIs(uri string) ([]string, error) {
possibilities := []string{uri}
for _, mapper := range g.RegexMappers {
if len(mapper) <= 3 {
continue
}
splitCh := string(mapper[0])
parts := strings.SplitN(mapper[1:len(mapper)-1], splitCh, 2)
expr, err := regexp.Compile(parts[0])
if err != nil {
return []string{}, fmt.Errorf("unable to parse regex %s\n%w", mapper, err)
}
possibilities = append(possibilities, expr.ReplaceAllString(uri, parts[1]))
}
return possibilities, nil
}
func (g GithubBuildpackLoader) mapBuildpackTOMLPath(org, repo string) ([]string, error) {
paths := []string{
"/buildpack.toml",
}
org = strings.ToUpper(strings.ReplaceAll(org, "-", "_"))
repo = strings.ToUpper(strings.ReplaceAll(repo, "-", "_"))
if p, found := os.LookupEnv(fmt.Sprintf("BP_TOML_PATH_%s_%s", org, repo)); found {
if !strings.HasSuffix(p, "/buildpack.toml") {
p = fmt.Sprintf("%s/buildpack.toml", p)
}
return []string{p}, nil
}
return paths, nil
}
func (g GithubBuildpackLoader) fetchTOMLFile(org, repo, version, path string) ([]byte, error) {
fmt.Println("Fetching from org:", org, "repo:", repo, "version:", version, "path:", path)
body, _, err := g.GithubClient.Repositories.DownloadContents(
context.Background(),
org,
repo,
path,
&github.RepositoryContentGetOptions{Ref: version})
if err != nil {
return []byte{}, fmt.Errorf("unable to download file\n%w", err)
}
buf := &bytes.Buffer{}
_, err = io.Copy(buf, body)
if err != nil {
return []byte{}, fmt.Errorf("unable to read downloaded file\n%w", err)
}
return buf.Bytes(), nil
}
type RegistryBuildpackLoader struct {
GCRToken string
}
func (r RegistryBuildpackLoader) LoadBuildpacks(uris []string) ([]Buildpack, error) {
buildpacks := []Buildpack{}
for _, uri := range uris {
fmt.Println("Loading buildpack info from:", uri)
bp, err := r.LoadBuildpack(uri)
if err != nil {
return []Buildpack{}, fmt.Errorf("unable to process %s\n%w", uri, err)
}
buildpacks = append(buildpacks, bp)
}
return buildpacks, nil
}
func (r RegistryBuildpackLoader) LoadBuildpack(uri string) (Buildpack, error) {
if err := os.MkdirAll("/tmp", 1777); err != nil {
return Buildpack{}, fmt.Errorf("unable to create /tmp\n%w", err)
}
tarFile, err := ioutil.TempFile("/tmp", "tarfiles")
if err != nil {
return Buildpack{}, fmt.Errorf("unable to create tempfile\n%w", err)
}
defer os.Remove(tarFile.Name())
err = r.loadBuildpackImage(uri, tarFile)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load %s\n%w", uri, err)
}
_, err = tarFile.Seek(0, 0)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to reset file pointer\n%w", err)
}
bpTOML, err := readBuildpackTOML(tarFile)
if err != nil {
return Buildpack{}, err
}
bp, err := loadBuildpackTOML(bpTOML)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load buildpack toml from image\n%w", err)
}
return *bp, nil
}
func (r RegistryBuildpackLoader) | (ref string, to io.Writer) error {
reference, err := name.ParseReference(ref)
if err != nil {
return fmt.Errorf("unable to parse reference for existing buildpack tag\n%w", err)
}
auth := authn.Anonymous
if r.GCRToken != "" {
auth = google.NewJSONKeyAuthenticator(r.GCRToken)
}
img, err := remote.Image(reference, remote.WithAuth(auth))
if err != nil {
return fmt.Errorf("unable to fetch remote image\n%w", err)
}
layers, err := img.Layers()
if err != nil {
return fmt.Errorf("unable to fetch layer\n%w", err)
}
if len(layers) == 1 {
l := layers[0]
rc, err := l.Uncompressed()
if err != nil {
return fmt.Errorf("unable to get uncompressed reader\n%w", err)
}
_, err = io.Copy(to, rc)
return err
}
fs := mutate.Extract(img)
_, err = io.Copy(to, fs)
return err
}
func readBuildpackTOML(tarFile *os.File) ([]byte, error) {
t := tar.NewReader(tarFile)
for {
f, err := t.Next()
if err != nil && err == io.EOF {
break
} else if err != nil {
return []byte{}, fmt.Errorf("unable to read TAR file\n%w", err)
}
if strings.HasSuffix(f.Name, "buildpack.toml") {
info := f.FileInfo()
if info.IsDir() || (info.Mode()&os.ModeSymlink != 0) {
return []byte{}, fmt.Errorf("unable to read buildpack.toml, unexpected file type (directory or symlink)")
}
buf := &bytes.Buffer{}
_, err := io.Copy(buf, t)
if err != nil {
return []byte{}, fmt.Errorf("unable to read buildpack.toml\n%w", err)
}
return buf.Bytes(), nil
}
}
return []byte{}, fmt.Errorf("unable to find buildpack.toml in image")
}
func parseRepoOrgVersionFromImageUri(imgUri string) (string, string, string, error) {
uriPattern := regexp.MustCompile(`.*\/(.*)\/(.*):(.*)`)
parts := uriPattern.FindStringSubmatch(imgUri)
if len(parts) != 4 {
return "", "", "", fmt.Errorf("unable to parse %s, found %q", imgUri, parts)
}
return parts[1], parts[2], parts[3], nil
}
| loadBuildpackImage | identifier_name |
drafts.go | /*
* Copyright 2018-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package drafts
import (
"archive/tar"
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"text/template"
"github.com/BurntSushi/toml"
"github.com/buildpacks/libcnb"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/google"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-github/v43/github"
"github.com/paketo-buildpacks/libpak"
"github.com/paketo-buildpacks/pipeline-builder/actions"
"github.com/pkg/errors"
)
type Payload struct {
PrimaryBuildpack Buildpack
Builder Builder
NestedBuildpacks []Buildpack
Release Release
}
type Buildpack struct {
libcnb.Buildpack
OrderGroups []libcnb.BuildpackOrder `toml:"order"`
Dependencies []libpak.BuildpackDependency
}
type Builder struct {
Description string
Buildpacks []struct {
URI string
}
OrderGroups []libcnb.BuildpackOrder `toml:"order"`
Stack BuilderStack `toml:"stack"`
}
type BuilderStack struct {
ID string `toml:"id"`
BuildImage string `toml:"build-image"`
RunImage string `toml:"run-image"`
}
func (b Builder) Flatten() []string {
tmp := []string{}
for _, bp := range b.Buildpacks {
tmp = append(tmp, strings.TrimPrefix(bp.URI, "docker://"))
}
return tmp
}
type Package struct {
Dependencies []struct {
URI string `toml:"uri"`
}
}
func (b Package) Flatten() []string {
tmp := []string{}
for _, bp := range b.Dependencies {
tmp = append(tmp, strings.TrimPrefix(bp.URI, "docker://"))
}
return tmp
}
type Release struct {
ID string
Name string
Body string
Tag string
}
//go:generate mockery --name BuildpackLoader --case=underscore
type BuildpackLoader interface {
LoadBuildpack(id string) (Buildpack, error)
LoadBuildpacks(uris []string) ([]Buildpack, error)
}
type Drafter struct {
Loader BuildpackLoader
}
func (d Drafter) BuildAndWriteReleaseToFileDraftFromTemplate(outputPath, templateContents string, context interface{}) error {
fp, err := os.Create(outputPath)
if err != nil {
return fmt.Errorf("unable to create file %s\n%w", outputPath, err)
}
defer fp.Close()
return d.BuildAndWriteReleaseDraftFromTemplate(fp, templateContents, context)
}
func (d Drafter) BuildAndWriteReleaseDraftFromTemplate(output io.Writer, templateContents string, context interface{}) error {
tmpl, err := template.New("draft").Parse(templateContents)
if err != nil {
return fmt.Errorf("unable to parse template %q\n%w", templateContents, err)
}
err = tmpl.Execute(output, context)
if err != nil {
return fmt.Errorf("unable to execute template %q\n%w", templateContents, err)
}
return nil
}
func (d Drafter) CreatePayload(inputs actions.Inputs, buildpackPath string) (Payload, error) {
release := Release{
ID: inputs["release_id"],
Name: inputs["release_name"],
Body: inputs["release_body"],
Tag: inputs["release_tag_name"],
}
builder, err := loadBuilderTOML(buildpackPath)
if err != nil {
return Payload{}, err
}
if builder != nil {
bps, err := d.Loader.LoadBuildpacks(builder.Flatten())
if err != nil {
return Payload{}, fmt.Errorf("unable to load buildpacks\n%w", err)
}
return Payload{
PrimaryBuildpack: Buildpack{},
Builder: *builder,
NestedBuildpacks: bps,
Release: release,
}, nil
}
bp, err := loadBuildpackTOMLFromFile(buildpackPath)
if err != nil {
return Payload{}, err
}
pkg, err := loadPackage(buildpackPath)
if err != nil {
return Payload{}, err
}
if bp != nil && pkg == nil { // component
return Payload{
PrimaryBuildpack: *bp,
Release: release,
}, nil
} else if bp != nil && pkg != nil { // composite
bps, err := d.Loader.LoadBuildpacks(pkg.Flatten())
if err != nil {
return Payload{}, fmt.Errorf("unable to load buildpacks\n%w", err)
}
return Payload{
NestedBuildpacks: bps,
PrimaryBuildpack: *bp,
Release: release,
}, nil
}
return Payload{}, fmt.Errorf("unable to generate payload, need buildpack.toml or buildpack.toml + package.toml or builder.toml")
}
func loadBuildpackTOMLFromFile(buildpackPath string) (*Buildpack, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "buildpack.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read buildpack toml\n%w", err)
}
return loadBuildpackTOML(rawTOML)
}
func loadBuildpackTOML(TOML []byte) (*Buildpack, error) {
bp := &Buildpack{}
if err := toml.Unmarshal(TOML, bp); err != nil {
return nil, fmt.Errorf("unable to parse buildpack TOML\n%w", err)
}
sort.Slice(bp.Stacks, func(i, j int) bool {
return strings.ToLower(bp.Stacks[i].ID) < strings.ToLower(bp.Stacks[j].ID)
})
if deps, found := bp.Metadata["dependencies"]; found {
if depList, ok := deps.([]map[string]interface{}); ok {
for _, dep := range depList {
bpDep := libpak.BuildpackDependency{
ID: asString(dep, "id"),
Name: asString(dep, "name"),
Version: asString(dep, "version"),
URI: asString(dep, "uri"),
SHA256: asString(dep, "sha256"),
PURL: asString(dep, "purl"),
}
if stacks, ok := dep["stacks"].([]interface{}); ok {
for _, stack := range stacks {
if stack, ok := stack.(string); ok {
bpDep.Stacks = append(bpDep.Stacks, stack)
}
}
}
if cpes, ok := dep["cpes"].([]interface{}); ok {
for _, cpe := range cpes {
if cpe, ok := cpe.(string); ok {
bpDep.CPEs = append(bpDep.CPEs, cpe)
}
}
}
if licenses, ok := dep["licenses"].([]map[string]interface{}); ok {
for _, license := range licenses {
bpDep.Licenses = append(bpDep.Licenses, libpak.BuildpackDependencyLicense{
Type: asString(license, "type"),
URI: asString(license, "uri"),
})
}
}
bp.Dependencies = append(bp.Dependencies, bpDep)
}
} else {
return nil, fmt.Errorf("unable to read dependencies from %v", bp.Metadata)
}
sort.Slice(bp.Dependencies, func(i, j int) bool {
return strings.ToLower(bp.Dependencies[i].Name) < strings.ToLower(bp.Dependencies[j].Name)
})
}
return bp, nil
}
func asString(m map[string]interface{}, key string) string {
if tmp, ok := m[key].(string); ok {
return tmp
}
return ""
}
func loadPackage(buildpackPath string) (*Package, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "package.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read package toml\n%w", err)
}
pkg := &Package{}
if err := toml.Unmarshal(rawTOML, pkg); err != nil {
return nil, fmt.Errorf("unable to parse package TOML\n%w", err)
}
return pkg, nil
}
func loadBuilderTOML(buildpackPath string) (*Builder, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "builder.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read builder toml\n%w", err)
}
builder := &Builder{}
if err := toml.Unmarshal(rawTOML, builder); err != nil {
return nil, fmt.Errorf("unable to parse builder TOML\n%w", err)
}
return builder, nil
}
type GithubBuildpackLoader struct {
GithubClient *github.Client
RegexMappers []string
}
func (g GithubBuildpackLoader) LoadBuildpacks(uris []string) ([]Buildpack, error) {
buildpacks := []Buildpack{}
for _, uri := range uris {
bp, err := g.LoadBuildpack(uri)
if err != nil {
return []Buildpack{}, fmt.Errorf("unable to process %s\n%w", uri, err)
}
buildpacks = append(buildpacks, bp)
}
sort.Slice(buildpacks, func(i, j int) bool {
return strings.ToLower(buildpacks[i].Info.Name) < strings.ToLower(buildpacks[j].Info.Name)
})
return buildpacks, nil
}
func (g GithubBuildpackLoader) LoadBuildpack(imgUri string) (Buildpack, error) {
uris, err := g.mapURIs(imgUri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to map URIs\n%w", err)
}
origOrg, origRepo, _, err := parseRepoOrgVersionFromImageUri(imgUri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to parse original image uri\n%w", err)
}
for _, uri := range uris {
org, repo, version, err := parseRepoOrgVersionFromImageUri(uri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to parse image uri\n%w", err)
}
paths, err := g.mapBuildpackTOMLPath(origOrg, origRepo)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to map buildpack toml path\n%w", err)
}
if regexp.MustCompile(`^\d+\.\d+\.\d+$`).MatchString(version) {
version = fmt.Sprintf("v%s", version)
}
for _, path := range paths |
}
return Buildpack{}, fmt.Errorf("unable to load buildpack.toml for %s", imgUri)
}
func (g GithubBuildpackLoader) LoadPackages(imgUri string) (Package, error) {
uris, err := g.mapURIs(imgUri)
if err != nil {
return Package{}, fmt.Errorf("unable to map URIs\n%w", err)
}
for _, uri := range uris {
uriPattern := regexp.MustCompile(`.*\/(.*)\/(.*):(.*)`)
parts := uriPattern.FindStringSubmatch(uri)
if len(parts) != 4 {
return Package{}, fmt.Errorf("unable to parse %s, found %q", uri, parts)
}
org := parts[1]
repo := parts[2]
version := parts[3]
if regexp.MustCompile(`\d+\.\d+\.\d+`).MatchString(version) {
version = fmt.Sprintf("v%s", version)
}
tomlBytes, err := g.fetchTOMLFile(org, repo, version, "/package.toml")
if err != nil {
var apiErr *github.ErrorResponse
if errors.As(err, &apiErr) && apiErr.Response.StatusCode == 404 {
fmt.Println("skipping 404", apiErr)
continue
}
return Package{}, fmt.Errorf("unable to fetch toml\n%w", err)
}
if len(tomlBytes) > 0 {
pkg := &Package{}
if err := toml.Unmarshal(tomlBytes, pkg); err != nil {
return Package{}, fmt.Errorf("unable to parse package TOML\n%w", err)
}
return *pkg, nil
}
}
return Package{}, fmt.Errorf("unable to load package.toml for %s", imgUri)
}
func (g GithubBuildpackLoader) mapURIs(uri string) ([]string, error) {
possibilities := []string{uri}
for _, mapper := range g.RegexMappers {
if len(mapper) <= 3 {
continue
}
splitCh := string(mapper[0])
parts := strings.SplitN(mapper[1:len(mapper)-1], splitCh, 2)
expr, err := regexp.Compile(parts[0])
if err != nil {
return []string{}, fmt.Errorf("unable to parse regex %s\n%w", mapper, err)
}
possibilities = append(possibilities, expr.ReplaceAllString(uri, parts[1]))
}
return possibilities, nil
}
func (g GithubBuildpackLoader) mapBuildpackTOMLPath(org, repo string) ([]string, error) {
paths := []string{
"/buildpack.toml",
}
org = strings.ToUpper(strings.ReplaceAll(org, "-", "_"))
repo = strings.ToUpper(strings.ReplaceAll(repo, "-", "_"))
if p, found := os.LookupEnv(fmt.Sprintf("BP_TOML_PATH_%s_%s", org, repo)); found {
if !strings.HasSuffix(p, "/buildpack.toml") {
p = fmt.Sprintf("%s/buildpack.toml", p)
}
return []string{p}, nil
}
return paths, nil
}
func (g GithubBuildpackLoader) fetchTOMLFile(org, repo, version, path string) ([]byte, error) {
fmt.Println("Fetching from org:", org, "repo:", repo, "version:", version, "path:", path)
body, _, err := g.GithubClient.Repositories.DownloadContents(
context.Background(),
org,
repo,
path,
&github.RepositoryContentGetOptions{Ref: version})
if err != nil {
return []byte{}, fmt.Errorf("unable to download file\n%w", err)
}
buf := &bytes.Buffer{}
_, err = io.Copy(buf, body)
if err != nil {
return []byte{}, fmt.Errorf("unable to read downloaded file\n%w", err)
}
return buf.Bytes(), nil
}
type RegistryBuildpackLoader struct {
GCRToken string
}
func (r RegistryBuildpackLoader) LoadBuildpacks(uris []string) ([]Buildpack, error) {
buildpacks := []Buildpack{}
for _, uri := range uris {
fmt.Println("Loading buildpack info from:", uri)
bp, err := r.LoadBuildpack(uri)
if err != nil {
return []Buildpack{}, fmt.Errorf("unable to process %s\n%w", uri, err)
}
buildpacks = append(buildpacks, bp)
}
return buildpacks, nil
}
func (r RegistryBuildpackLoader) LoadBuildpack(uri string) (Buildpack, error) {
if err := os.MkdirAll("/tmp", 1777); err != nil {
return Buildpack{}, fmt.Errorf("unable to create /tmp\n%w", err)
}
tarFile, err := ioutil.TempFile("/tmp", "tarfiles")
if err != nil {
return Buildpack{}, fmt.Errorf("unable to create tempfile\n%w", err)
}
defer os.Remove(tarFile.Name())
err = r.loadBuildpackImage(uri, tarFile)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load %s\n%w", uri, err)
}
_, err = tarFile.Seek(0, 0)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to reset file pointer\n%w", err)
}
bpTOML, err := readBuildpackTOML(tarFile)
if err != nil {
return Buildpack{}, err
}
bp, err := loadBuildpackTOML(bpTOML)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load buildpack toml from image\n%w", err)
}
return *bp, nil
}
func (r RegistryBuildpackLoader) loadBuildpackImage(ref string, to io.Writer) error {
reference, err := name.ParseReference(ref)
if err != nil {
return fmt.Errorf("unable to parse reference for existing buildpack tag\n%w", err)
}
auth := authn.Anonymous
if r.GCRToken != "" {
auth = google.NewJSONKeyAuthenticator(r.GCRToken)
}
img, err := remote.Image(reference, remote.WithAuth(auth))
if err != nil {
return fmt.Errorf("unable to fetch remote image\n%w", err)
}
layers, err := img.Layers()
if err != nil {
return fmt.Errorf("unable to fetch layer\n%w", err)
}
if len(layers) == 1 {
l := layers[0]
rc, err := l.Uncompressed()
if err != nil {
return fmt.Errorf("unable to get uncompressed reader\n%w", err)
}
_, err = io.Copy(to, rc)
return err
}
fs := mutate.Extract(img)
_, err = io.Copy(to, fs)
return err
}
func readBuildpackTOML(tarFile *os.File) ([]byte, error) {
t := tar.NewReader(tarFile)
for {
f, err := t.Next()
if err != nil && err == io.EOF {
break
} else if err != nil {
return []byte{}, fmt.Errorf("unable to read TAR file\n%w", err)
}
if strings.HasSuffix(f.Name, "buildpack.toml") {
info := f.FileInfo()
if info.IsDir() || (info.Mode()&os.ModeSymlink != 0) {
return []byte{}, fmt.Errorf("unable to read buildpack.toml, unexpected file type (directory or symlink)")
}
buf := &bytes.Buffer{}
_, err := io.Copy(buf, t)
if err != nil {
return []byte{}, fmt.Errorf("unable to read buildpack.toml\n%w", err)
}
return buf.Bytes(), nil
}
}
return []byte{}, fmt.Errorf("unable to find buildpack.toml in image")
}
func parseRepoOrgVersionFromImageUri(imgUri string) (string, string, string, error) {
uriPattern := regexp.MustCompile(`.*\/(.*)\/(.*):(.*)`)
parts := uriPattern.FindStringSubmatch(imgUri)
if len(parts) != 4 {
return "", "", "", fmt.Errorf("unable to parse %s, found %q", imgUri, parts)
}
return parts[1], parts[2], parts[3], nil
}
| {
tomlBytes, err := g.fetchTOMLFile(org, repo, version, path)
if err != nil {
var apiErr *github.ErrorResponse
if errors.As(err, &apiErr) && apiErr.Response.StatusCode == 404 {
fmt.Println("skipping 404", apiErr)
continue
}
return Buildpack{}, fmt.Errorf("unable to fetch toml\n%w", err)
}
if len(tomlBytes) > 0 {
bp, err := loadBuildpackTOML(tomlBytes)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load buildpack toml from image\n%w", err)
}
bp.Info.Version = version
return *bp, nil
}
} | conditional_block |
utility.js | //............................................................................Send AjaxRequest Without Object/parameter
//...................................................Recommanded for Get Request
SendAjaxRequest = (ApiUrl, RequestType) =>
{
let ajaxConfig = {
url: ApiBaseUrl(ApiUrl),
type: RequestType,
contentType: 'application/json; charset=utf-8',
dataType: "json",
async: false,
beforeSend: function (xhr) {
xhr.setRequestHeader('Authorization', 'Bearer ' + localStorage.getItem("UserToken"));
},
success: (response) => {
return response;
},
error: (XMLHttpRequest, textStatus, errorThrown) => {
return errorThrown;
}
}
$.ajax(ajaxConfig)
}
//............................................................................Send AjaxRequest With Object/parameter
//...................................................Recommanded for all other requests excepts Get Request
SendAjaxRequestWithObject = (ApiUrl, RequestType , Object) => {
let ajaxConfig = {
url: ApiBaseUrl(ApiUrl),
type: RequestType,
contentType: 'application/json; charset=utf-8',
dataType: "json",
data: JSON.stringify(Object),
async: false,
beforeSend: function (xhr) {
xhr.setRequestHeader('Authorization', 'Bearer ' + localStorage.getItem("UserToken"));
},
success: (response) => {
return response;
},
error: (XMLHttpRequest, textStatus, errorThrown) => {
return errorThrown;
}
}
$.ajax(ajaxConfig)
}
//............................................................................Section Textbox/Labels/div handling
//..................return Textbox Value
let getTextboxValue = (selector) => {
return $.trim($(selector).val())
}
//..................Put Value into Textbox
let putValueIntoTextbox = (selector, value) => {
$(selector).val(value)
}
//..................Reset Textbox value
let resetTextbox = (selector) => {
$(selector).val('')
}
//..................Reset Textbox value with value assign by user
let resetTextboxWithCustomValue = (selector, value) => {
$(selector).val(value)
}
//...........................................select Value From dropdown
let selectValueFromDropdown = (selector) => {
return $(`${selector} option:selected`).val()
}
//..................Change Main Page Title
let CurrentPageTitle = (title, openPage) => {
$("#currentPage").text(title)
if (openPage != null) {
$("#slash").removeAttr("hidden")
$("#openPage").text(openPage)
}
}
//..................Put Value into Label
let putValueIntoLabels = (selector, value) => {
$(selector).text(value)
}
//..................Get Value From Label
let getValueFromLabel = (selector) => {
return $(selector).text()
}
//............................................................................Section Notifications
//..................Show Error Message
let showErrorMessage = (selector, message) => {
$(selector).removeAttr('hidden')
$(selector).text(message)
setTimeout(() => {
$(selector).text('');
$(selector).attr('hidden', 'hidden');
}, 2000);
}
//..................Show error of Ajax Response Message
let RedirectAccordingToError = (errorThrown) => {
if (errorThrown == "Unauthorized") {
window.location = "/Account/Accounts/UnAuthorized"
} else if (errorThrown == "Not Found") {
window.location = "/Account/Accounts/Login"
} else if (errorThrown == "Forbidden") {
window.location = "/Account/Accounts/UnAuthorized"
}
else {
console.log(errorThrown);
}
}
//............................................................................Section Date Time
//................. Datepicker format like Dec 17 2019
$('.datepickerDEC')[0] && $('.datepickerDEC').each(function () {
$('.datepickerDEC').datepicker({
disableTouchKeyboard: true,
autoclose: true,
format: "M dd yyyy"
});
});
//................. Datepicker format like Dec 2019
$('.datepicker1')[1] && $('.datepicker1').each(function () {
$('.datepicker1').datepicker({
disableTouchKeyboard: true,
autoclose: true,
minViewMode: 'months',
viewMode: 'months',
pickTime: false,
format: "M yyyy"
});
});
//................. Datepicker format like Dec 2019
$('.datepickerGetMonth')[1] && $('.datepickerGetMonth').each(function () {
$('.datepickerGetMonth').datepicker({
disableTouchKeyboard: true,
autoclose: true,
minViewMode: 'months',
viewMode: 'months',
pickTime: false,
format: "M yyyy"
});
});
//.............Get Time Using Moment
let momentTime = (value) => {
return moment(value).format("LT")
}
//.............Get date Using MomentFormat
let momentDate = (value) => {
return moment(value).format("MMM DD YYYY")
}
//.............moment human readable Date
let momentHumanDate = (value) => {
return moment(value).fromNow();
}
//.............Get date Using MomentFormat like Dec
let momentMonth = (value) => {
return moment(value).format("MMM")
}
//..............................Get Comming month with year
let commingMonth = () => {
var now = new Date();
if (now.getMonth() == 11) {
var current = new Date(now.getFullYear() + 1, 0, 1);
return current;
} else {
var current = new Date(now.getFullYear(), now.getMonth() + 1, 1);
return current;
}
}
//............................................................................Section Custom validtions(Email/Mobile)
//...............Email Format Verification
let email_validate = (value) => {
let email = value
var regexPattern = new RegExp(/^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$/); // regular expression pattern
let isValid = regexPattern.test(email);
if (!isValid) {
| return true;
}
//..........................Contact validation
let contact_validate = (value) => {
let contact = value
var regexPattern = new RegExp(/^\d{4}[- ]?\d{7}$/); // regular expression pattern
let isValid = regexPattern.test(contact);
if (!isValid) {
return false;
}
return true;
}
//............................................................................Section Table Pagenation
// .................... pager
function Pager(tableName, itemsPerPage) {
this.tableName = tableName;
this.itemsPerPage = itemsPerPage;
this.currentPage = 1;
this.pages = 0;
this.inited = false;
this.showRecords = function (from, to) {
var rows = document.getElementById(tableName).rows;
// i starts from 1 to skip table header row
for (var i = 1; i <= rows.length - 1; i++) {
if (i < from || i > to)
rows[i].style.display = 'none';
else
rows[i].style.display = '';
}
}
this.showPage = function (pageNumber) {
if (!this.inited) {
alert("not inited");
return;
}
var oldPageAnchor = document.getElementById(`pg${tableName}` + this.currentPage);
oldPageAnchor.className = 'page-item';
this.currentPage = pageNumber;
var newPageAnchor = document.getElementById(`pg${tableName}` + this.currentPage);
newPageAnchor.className = 'page-item active';
var from = (pageNumber - 1) * itemsPerPage + 1;
var to = from + itemsPerPage - 1;
this.showRecords(from, to);
}
this.prev = function () {
if (this.currentPage > 1)
this.showPage(this.currentPage - 1);
}
this.next = function () {
if (this.currentPage < this.pages) {
this.showPage(this.currentPage + 1);
}
}
this.init = function () {
var rows = document.getElementById(tableName).rows;
var records = (rows.length - 1);
this.pages = Math.ceil(records / itemsPerPage);
this.inited = true;
}
this.showPageNav = function (pagerName, positionId) {
if (!this.inited) {
alert("not inited");
return;
}
var element = document.getElementById(positionId);
var pagerHtml = `<li class="page-pre" title="Previous" style="cursor:pointer" onclick="${pagerName}.prev();">
<a class="page-link">‹</a>
</li>
`
for (var page = 1; page <= this.pages; page++)
pagerHtml += `<li style="cursor:pointer" class="page-number" id="pg${tableName}${page}" onclick="${pagerName}.showPage(${page});"><a class="page-link">${page}</a></li>`
pagerHtml += `<li style="cursor:pointer" title="Next" class="page-next" onclick="${pagerName}.next();">
<a class="page-link">›</a>
</li >`
element.innerHTML = pagerHtml;
}
}
//...........................................................................Section Days Name With Id's
//.................get today by using day id
let getToday = (day) => {
switch (day) {
case 1:
return "Monday"
break;
case 2:
return "Tuesday"
break;
case 3:
return "Wednesday"
break;
case 4:
return "Thursday"
break;
case 5:
return "Friday"
break;
case 6:
return "Saturday";
break;
case 7:
return "Sunday"
break;
}
}
//................................ReturnDayId by Using Day Name
let getDayID = (dayName) => {
switch (dayName) {
case "monday":
return 1
break;
case "tuesday":
return 2
break;
case "wednesday":
return 3
break;
case "thursday":
return 4
break;
case "friday":
return 5
break;
case "saturday":
return 6;
break;
case "sunday":
return 7
break;
}
}
//............................................................................Section General
//..................show Modal
let showModal = (selector) => {
$(selector).modal("show")
}
//..................Hide Modal
let hideModal = (selector) => {
$(selector).modal("hide")
}
//.............add class active
let addClassActive = (selector) => {
$(selector).addClass("active")
}
//.............Api Base Url
let ApiBaseUrl = (url) => {
return `https://localhost:44376/${url}`
// return `https://localhost:44346/${url}`
}
//$(`#TimeInInputHours option[value='${timeinhours}']`).attr("selected", "selected");
//$(`#TimeInInputMinutes option[value='${timeinmin}']`).attr("selected", "selected");
//$("#TimeInInputHours").prop('selectedIndex', timeinhours);
//$("#TimeInInputMinutes").prop('selectedIndex', timeinmin); | return false;
}
| conditional_block |
utility.js | //............................................................................Send AjaxRequest Without Object/parameter
//...................................................Recommanded for Get Request
SendAjaxRequest = (ApiUrl, RequestType) =>
{
let ajaxConfig = {
url: ApiBaseUrl(ApiUrl),
type: RequestType,
contentType: 'application/json; charset=utf-8',
dataType: "json",
async: false,
beforeSend: function (xhr) {
xhr.setRequestHeader('Authorization', 'Bearer ' + localStorage.getItem("UserToken"));
},
success: (response) => {
return response;
},
error: (XMLHttpRequest, textStatus, errorThrown) => {
return errorThrown;
}
}
$.ajax(ajaxConfig)
}
//............................................................................Send AjaxRequest With Object/parameter
//...................................................Recommanded for all other requests excepts Get Request
SendAjaxRequestWithObject = (ApiUrl, RequestType , Object) => {
let ajaxConfig = {
url: ApiBaseUrl(ApiUrl),
type: RequestType,
contentType: 'application/json; charset=utf-8',
dataType: "json",
data: JSON.stringify(Object),
async: false,
beforeSend: function (xhr) {
xhr.setRequestHeader('Authorization', 'Bearer ' + localStorage.getItem("UserToken"));
},
success: (response) => {
return response;
},
error: (XMLHttpRequest, textStatus, errorThrown) => {
return errorThrown;
}
}
$.ajax(ajaxConfig)
}
//............................................................................Section Textbox/Labels/div handling
//..................return Textbox Value
let getTextboxValue = (selector) => {
return $.trim($(selector).val())
}
//..................Put Value into Textbox
let putValueIntoTextbox = (selector, value) => {
$(selector).val(value)
}
//..................Reset Textbox value
let resetTextbox = (selector) => {
$(selector).val('')
}
//..................Reset Textbox value with value assign by user
let resetTextboxWithCustomValue = (selector, value) => {
$(selector).val(value)
}
//...........................................select Value From dropdown
let selectValueFromDropdown = (selector) => {
return $(`${selector} option:selected`).val()
}
//..................Change Main Page Title
let CurrentPageTitle = (title, openPage) => {
$("#currentPage").text(title)
if (openPage != null) {
$("#slash").removeAttr("hidden")
$("#openPage").text(openPage)
}
}
//..................Put Value into Label
let putValueIntoLabels = (selector, value) => {
$(selector).text(value)
}
//..................Get Value From Label
let getValueFromLabel = (selector) => {
return $(selector).text()
}
//............................................................................Section Notifications
//..................Show Error Message
let showErrorMessage = (selector, message) => {
$(selector).removeAttr('hidden')
$(selector).text(message)
setTimeout(() => {
$(selector).text('');
$(selector).attr('hidden', 'hidden');
}, 2000);
}
//..................Show error of Ajax Response Message
let RedirectAccordingToError = (errorThrown) => {
if (errorThrown == "Unauthorized") {
window.location = "/Account/Accounts/UnAuthorized"
} else if (errorThrown == "Not Found") {
window.location = "/Account/Accounts/Login"
} else if (errorThrown == "Forbidden") {
window.location = "/Account/Accounts/UnAuthorized"
}
else {
console.log(errorThrown);
}
}
//............................................................................Section Date Time
//................. Datepicker format like Dec 17 2019
$('.datepickerDEC')[0] && $('.datepickerDEC').each(function () {
$('.datepickerDEC').datepicker({
disableTouchKeyboard: true,
autoclose: true,
format: "M dd yyyy"
});
});
//................. Datepicker format like Dec 2019
$('.datepicker1')[1] && $('.datepicker1').each(function () {
$('.datepicker1').datepicker({
disableTouchKeyboard: true,
autoclose: true,
minViewMode: 'months',
viewMode: 'months',
pickTime: false,
format: "M yyyy"
});
});
//................. Datepicker format like Dec 2019
$('.datepickerGetMonth')[1] && $('.datepickerGetMonth').each(function () {
$('.datepickerGetMonth').datepicker({
disableTouchKeyboard: true,
autoclose: true,
minViewMode: 'months',
viewMode: 'months',
pickTime: false,
format: "M yyyy"
});
});
//.............Get Time Using Moment
let momentTime = (value) => {
return moment(value).format("LT")
}
//.............Get date Using MomentFormat
let momentDate = (value) => {
return moment(value).format("MMM DD YYYY")
}
//.............moment human readable Date
let momentHumanDate = (value) => {
return moment(value).fromNow();
}
//.............Get date Using MomentFormat like Dec
let momentMonth = (value) => {
return moment(value).format("MMM")
}
//..............................Get Comming month with year
let commingMonth = () => {
var now = new Date();
if (now.getMonth() == 11) {
var current = new Date(now.getFullYear() + 1, 0, 1);
return current;
} else {
var current = new Date(now.getFullYear(), now.getMonth() + 1, 1);
return current;
}
}
//............................................................................Section Custom validtions(Email/Mobile)
//...............Email Format Verification
let email_validate = (value) => {
let email = value
var regexPattern = new RegExp(/^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$/); // regular expression pattern
let isValid = regexPattern.test(email);
if (!isValid) {
return false;
}
return true;
}
//..........................Contact validation
let contact_validate = (value) => {
let contact = value
var regexPattern = new RegExp(/^\d{4}[- ]?\d{7}$/); // regular expression pattern
let isValid = regexPattern.test(contact);
if (!isValid) {
return false;
}
return true;
}
//............................................................................Section Table Pagenation
// .................... pager
function Pa | ableName, itemsPerPage) {
this.tableName = tableName;
this.itemsPerPage = itemsPerPage;
this.currentPage = 1;
this.pages = 0;
this.inited = false;
this.showRecords = function (from, to) {
var rows = document.getElementById(tableName).rows;
// i starts from 1 to skip table header row
for (var i = 1; i <= rows.length - 1; i++) {
if (i < from || i > to)
rows[i].style.display = 'none';
else
rows[i].style.display = '';
}
}
this.showPage = function (pageNumber) {
if (!this.inited) {
alert("not inited");
return;
}
var oldPageAnchor = document.getElementById(`pg${tableName}` + this.currentPage);
oldPageAnchor.className = 'page-item';
this.currentPage = pageNumber;
var newPageAnchor = document.getElementById(`pg${tableName}` + this.currentPage);
newPageAnchor.className = 'page-item active';
var from = (pageNumber - 1) * itemsPerPage + 1;
var to = from + itemsPerPage - 1;
this.showRecords(from, to);
}
this.prev = function () {
if (this.currentPage > 1)
this.showPage(this.currentPage - 1);
}
this.next = function () {
if (this.currentPage < this.pages) {
this.showPage(this.currentPage + 1);
}
}
this.init = function () {
var rows = document.getElementById(tableName).rows;
var records = (rows.length - 1);
this.pages = Math.ceil(records / itemsPerPage);
this.inited = true;
}
this.showPageNav = function (pagerName, positionId) {
if (!this.inited) {
alert("not inited");
return;
}
var element = document.getElementById(positionId);
var pagerHtml = `<li class="page-pre" title="Previous" style="cursor:pointer" onclick="${pagerName}.prev();">
<a class="page-link">‹</a>
</li>
`
for (var page = 1; page <= this.pages; page++)
pagerHtml += `<li style="cursor:pointer" class="page-number" id="pg${tableName}${page}" onclick="${pagerName}.showPage(${page});"><a class="page-link">${page}</a></li>`
pagerHtml += `<li style="cursor:pointer" title="Next" class="page-next" onclick="${pagerName}.next();">
<a class="page-link">›</a>
</li >`
element.innerHTML = pagerHtml;
}
}
//...........................................................................Section Days Name With Id's
//.................get today by using day id
let getToday = (day) => {
switch (day) {
case 1:
return "Monday"
break;
case 2:
return "Tuesday"
break;
case 3:
return "Wednesday"
break;
case 4:
return "Thursday"
break;
case 5:
return "Friday"
break;
case 6:
return "Saturday";
break;
case 7:
return "Sunday"
break;
}
}
//................................ReturnDayId by Using Day Name
let getDayID = (dayName) => {
switch (dayName) {
case "monday":
return 1
break;
case "tuesday":
return 2
break;
case "wednesday":
return 3
break;
case "thursday":
return 4
break;
case "friday":
return 5
break;
case "saturday":
return 6;
break;
case "sunday":
return 7
break;
}
}
//............................................................................Section General
//..................show Modal
let showModal = (selector) => {
$(selector).modal("show")
}
//..................Hide Modal
let hideModal = (selector) => {
$(selector).modal("hide")
}
//.............add class active
let addClassActive = (selector) => {
$(selector).addClass("active")
}
//.............Api Base Url
let ApiBaseUrl = (url) => {
return `https://localhost:44376/${url}`
// return `https://localhost:44346/${url}`
}
//$(`#TimeInInputHours option[value='${timeinhours}']`).attr("selected", "selected");
//$(`#TimeInInputMinutes option[value='${timeinmin}']`).attr("selected", "selected");
//$("#TimeInInputHours").prop('selectedIndex', timeinhours);
//$("#TimeInInputMinutes").prop('selectedIndex', timeinmin); | ger(t | identifier_name |
utility.js | //............................................................................Send AjaxRequest Without Object/parameter
//...................................................Recommanded for Get Request
SendAjaxRequest = (ApiUrl, RequestType) =>
{
let ajaxConfig = {
url: ApiBaseUrl(ApiUrl),
type: RequestType,
contentType: 'application/json; charset=utf-8',
dataType: "json",
async: false,
beforeSend: function (xhr) {
xhr.setRequestHeader('Authorization', 'Bearer ' + localStorage.getItem("UserToken"));
},
success: (response) => {
return response;
},
error: (XMLHttpRequest, textStatus, errorThrown) => {
return errorThrown;
}
}
$.ajax(ajaxConfig)
}
//............................................................................Send AjaxRequest With Object/parameter
//...................................................Recommanded for all other requests excepts Get Request
SendAjaxRequestWithObject = (ApiUrl, RequestType , Object) => {
let ajaxConfig = {
url: ApiBaseUrl(ApiUrl),
type: RequestType,
contentType: 'application/json; charset=utf-8',
dataType: "json",
data: JSON.stringify(Object),
async: false,
beforeSend: function (xhr) {
xhr.setRequestHeader('Authorization', 'Bearer ' + localStorage.getItem("UserToken"));
},
success: (response) => {
return response; | },
error: (XMLHttpRequest, textStatus, errorThrown) => {
return errorThrown;
}
}
$.ajax(ajaxConfig)
}
//............................................................................Section Textbox/Labels/div handling
//..................return Textbox Value
let getTextboxValue = (selector) => {
return $.trim($(selector).val())
}
//..................Put Value into Textbox
let putValueIntoTextbox = (selector, value) => {
$(selector).val(value)
}
//..................Reset Textbox value
let resetTextbox = (selector) => {
$(selector).val('')
}
//..................Reset Textbox value with value assign by user
let resetTextboxWithCustomValue = (selector, value) => {
$(selector).val(value)
}
//...........................................select Value From dropdown
let selectValueFromDropdown = (selector) => {
return $(`${selector} option:selected`).val()
}
//..................Change Main Page Title
let CurrentPageTitle = (title, openPage) => {
$("#currentPage").text(title)
if (openPage != null) {
$("#slash").removeAttr("hidden")
$("#openPage").text(openPage)
}
}
//..................Put Value into Label
let putValueIntoLabels = (selector, value) => {
$(selector).text(value)
}
//..................Get Value From Label
let getValueFromLabel = (selector) => {
return $(selector).text()
}
//............................................................................Section Notifications
//..................Show Error Message
let showErrorMessage = (selector, message) => {
$(selector).removeAttr('hidden')
$(selector).text(message)
setTimeout(() => {
$(selector).text('');
$(selector).attr('hidden', 'hidden');
}, 2000);
}
//..................Show error of Ajax Response Message
let RedirectAccordingToError = (errorThrown) => {
if (errorThrown == "Unauthorized") {
window.location = "/Account/Accounts/UnAuthorized"
} else if (errorThrown == "Not Found") {
window.location = "/Account/Accounts/Login"
} else if (errorThrown == "Forbidden") {
window.location = "/Account/Accounts/UnAuthorized"
}
else {
console.log(errorThrown);
}
}
//............................................................................Section Date Time
//................. Datepicker format like Dec 17 2019
$('.datepickerDEC')[0] && $('.datepickerDEC').each(function () {
$('.datepickerDEC').datepicker({
disableTouchKeyboard: true,
autoclose: true,
format: "M dd yyyy"
});
});
//................. Datepicker format like Dec 2019
$('.datepicker1')[1] && $('.datepicker1').each(function () {
$('.datepicker1').datepicker({
disableTouchKeyboard: true,
autoclose: true,
minViewMode: 'months',
viewMode: 'months',
pickTime: false,
format: "M yyyy"
});
});
//................. Datepicker format like Dec 2019
$('.datepickerGetMonth')[1] && $('.datepickerGetMonth').each(function () {
$('.datepickerGetMonth').datepicker({
disableTouchKeyboard: true,
autoclose: true,
minViewMode: 'months',
viewMode: 'months',
pickTime: false,
format: "M yyyy"
});
});
//.............Get Time Using Moment
let momentTime = (value) => {
return moment(value).format("LT")
}
//.............Get date Using MomentFormat
let momentDate = (value) => {
return moment(value).format("MMM DD YYYY")
}
//.............moment human readable Date
let momentHumanDate = (value) => {
return moment(value).fromNow();
}
//.............Get date Using MomentFormat like Dec
let momentMonth = (value) => {
return moment(value).format("MMM")
}
//..............................Get Comming month with year
let commingMonth = () => {
var now = new Date();
if (now.getMonth() == 11) {
var current = new Date(now.getFullYear() + 1, 0, 1);
return current;
} else {
var current = new Date(now.getFullYear(), now.getMonth() + 1, 1);
return current;
}
}
//............................................................................Section Custom validtions(Email/Mobile)
//...............Email Format Verification
let email_validate = (value) => {
let email = value
var regexPattern = new RegExp(/^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$/); // regular expression pattern
let isValid = regexPattern.test(email);
if (!isValid) {
return false;
}
return true;
}
//..........................Contact validation
let contact_validate = (value) => {
let contact = value
var regexPattern = new RegExp(/^\d{4}[- ]?\d{7}$/); // regular expression pattern
let isValid = regexPattern.test(contact);
if (!isValid) {
return false;
}
return true;
}
//............................................................................Section Table Pagenation
// .................... pager
function Pager(tableName, itemsPerPage) {
this.tableName = tableName;
this.itemsPerPage = itemsPerPage;
this.currentPage = 1;
this.pages = 0;
this.inited = false;
this.showRecords = function (from, to) {
var rows = document.getElementById(tableName).rows;
// i starts from 1 to skip table header row
for (var i = 1; i <= rows.length - 1; i++) {
if (i < from || i > to)
rows[i].style.display = 'none';
else
rows[i].style.display = '';
}
}
this.showPage = function (pageNumber) {
if (!this.inited) {
alert("not inited");
return;
}
var oldPageAnchor = document.getElementById(`pg${tableName}` + this.currentPage);
oldPageAnchor.className = 'page-item';
this.currentPage = pageNumber;
var newPageAnchor = document.getElementById(`pg${tableName}` + this.currentPage);
newPageAnchor.className = 'page-item active';
var from = (pageNumber - 1) * itemsPerPage + 1;
var to = from + itemsPerPage - 1;
this.showRecords(from, to);
}
this.prev = function () {
if (this.currentPage > 1)
this.showPage(this.currentPage - 1);
}
this.next = function () {
if (this.currentPage < this.pages) {
this.showPage(this.currentPage + 1);
}
}
this.init = function () {
var rows = document.getElementById(tableName).rows;
var records = (rows.length - 1);
this.pages = Math.ceil(records / itemsPerPage);
this.inited = true;
}
this.showPageNav = function (pagerName, positionId) {
if (!this.inited) {
alert("not inited");
return;
}
var element = document.getElementById(positionId);
var pagerHtml = `<li class="page-pre" title="Previous" style="cursor:pointer" onclick="${pagerName}.prev();">
<a class="page-link">‹</a>
</li>
`
for (var page = 1; page <= this.pages; page++)
pagerHtml += `<li style="cursor:pointer" class="page-number" id="pg${tableName}${page}" onclick="${pagerName}.showPage(${page});"><a class="page-link">${page}</a></li>`
pagerHtml += `<li style="cursor:pointer" title="Next" class="page-next" onclick="${pagerName}.next();">
<a class="page-link">›</a>
</li >`
element.innerHTML = pagerHtml;
}
}
//...........................................................................Section Days Name With Id's
//.................get today by using day id
let getToday = (day) => {
switch (day) {
case 1:
return "Monday"
break;
case 2:
return "Tuesday"
break;
case 3:
return "Wednesday"
break;
case 4:
return "Thursday"
break;
case 5:
return "Friday"
break;
case 6:
return "Saturday";
break;
case 7:
return "Sunday"
break;
}
}
//................................ReturnDayId by Using Day Name
let getDayID = (dayName) => {
switch (dayName) {
case "monday":
return 1
break;
case "tuesday":
return 2
break;
case "wednesday":
return 3
break;
case "thursday":
return 4
break;
case "friday":
return 5
break;
case "saturday":
return 6;
break;
case "sunday":
return 7
break;
}
}
//............................................................................Section General
//..................show Modal
let showModal = (selector) => {
$(selector).modal("show")
}
//..................Hide Modal
let hideModal = (selector) => {
$(selector).modal("hide")
}
//.............add class active
let addClassActive = (selector) => {
$(selector).addClass("active")
}
//.............Api Base Url
let ApiBaseUrl = (url) => {
return `https://localhost:44376/${url}`
// return `https://localhost:44346/${url}`
}
//$(`#TimeInInputHours option[value='${timeinhours}']`).attr("selected", "selected");
//$(`#TimeInInputMinutes option[value='${timeinmin}']`).attr("selected", "selected");
//$("#TimeInInputHours").prop('selectedIndex', timeinhours);
//$("#TimeInInputMinutes").prop('selectedIndex', timeinmin); | random_line_split |
|
utility.js | //............................................................................Send AjaxRequest Without Object/parameter
//...................................................Recommanded for Get Request
SendAjaxRequest = (ApiUrl, RequestType) =>
{
let ajaxConfig = {
url: ApiBaseUrl(ApiUrl),
type: RequestType,
contentType: 'application/json; charset=utf-8',
dataType: "json",
async: false,
beforeSend: function (xhr) {
xhr.setRequestHeader('Authorization', 'Bearer ' + localStorage.getItem("UserToken"));
},
success: (response) => {
return response;
},
error: (XMLHttpRequest, textStatus, errorThrown) => {
return errorThrown;
}
}
$.ajax(ajaxConfig)
}
//............................................................................Send AjaxRequest With Object/parameter
//...................................................Recommanded for all other requests excepts Get Request
SendAjaxRequestWithObject = (ApiUrl, RequestType , Object) => {
let ajaxConfig = {
url: ApiBaseUrl(ApiUrl),
type: RequestType,
contentType: 'application/json; charset=utf-8',
dataType: "json",
data: JSON.stringify(Object),
async: false,
beforeSend: function (xhr) {
xhr.setRequestHeader('Authorization', 'Bearer ' + localStorage.getItem("UserToken"));
},
success: (response) => {
return response;
},
error: (XMLHttpRequest, textStatus, errorThrown) => {
return errorThrown;
}
}
$.ajax(ajaxConfig)
}
//............................................................................Section Textbox/Labels/div handling
//..................return Textbox Value
let getTextboxValue = (selector) => {
return $.trim($(selector).val())
}
//..................Put Value into Textbox
let putValueIntoTextbox = (selector, value) => {
$(selector).val(value)
}
//..................Reset Textbox value
let resetTextbox = (selector) => {
$(selector).val('')
}
//..................Reset Textbox value with value assign by user
let resetTextboxWithCustomValue = (selector, value) => {
$(selector).val(value)
}
//...........................................select Value From dropdown
let selectValueFromDropdown = (selector) => {
return $(`${selector} option:selected`).val()
}
//..................Change Main Page Title
let CurrentPageTitle = (title, openPage) => {
$("#currentPage").text(title)
if (openPage != null) {
$("#slash").removeAttr("hidden")
$("#openPage").text(openPage)
}
}
//..................Put Value into Label
let putValueIntoLabels = (selector, value) => {
$(selector).text(value)
}
//..................Get Value From Label
let getValueFromLabel = (selector) => {
return $(selector).text()
}
//............................................................................Section Notifications
//..................Show Error Message
let showErrorMessage = (selector, message) => {
$(selector).removeAttr('hidden')
$(selector).text(message)
setTimeout(() => {
$(selector).text('');
$(selector).attr('hidden', 'hidden');
}, 2000);
}
//..................Show error of Ajax Response Message
let RedirectAccordingToError = (errorThrown) => {
if (errorThrown == "Unauthorized") {
window.location = "/Account/Accounts/UnAuthorized"
} else if (errorThrown == "Not Found") {
window.location = "/Account/Accounts/Login"
} else if (errorThrown == "Forbidden") {
window.location = "/Account/Accounts/UnAuthorized"
}
else {
console.log(errorThrown);
}
}
//............................................................................Section Date Time
//................. Datepicker format like Dec 17 2019
$('.datepickerDEC')[0] && $('.datepickerDEC').each(function () {
$('.datepickerDEC').datepicker({
disableTouchKeyboard: true,
autoclose: true,
format: "M dd yyyy"
});
});
//................. Datepicker format like Dec 2019
$('.datepicker1')[1] && $('.datepicker1').each(function () {
$('.datepicker1').datepicker({
disableTouchKeyboard: true,
autoclose: true,
minViewMode: 'months',
viewMode: 'months',
pickTime: false,
format: "M yyyy"
});
});
//................. Datepicker format like Dec 2019
$('.datepickerGetMonth')[1] && $('.datepickerGetMonth').each(function () {
$('.datepickerGetMonth').datepicker({
disableTouchKeyboard: true,
autoclose: true,
minViewMode: 'months',
viewMode: 'months',
pickTime: false,
format: "M yyyy"
});
});
//.............Get Time Using Moment
let momentTime = (value) => {
return moment(value).format("LT")
}
//.............Get date Using MomentFormat
let momentDate = (value) => {
return moment(value).format("MMM DD YYYY")
}
//.............moment human readable Date
let momentHumanDate = (value) => {
return moment(value).fromNow();
}
//.............Get date Using MomentFormat like Dec
let momentMonth = (value) => {
return moment(value).format("MMM")
}
//..............................Get Comming month with year
let commingMonth = () => {
var now = new Date();
if (now.getMonth() == 11) {
var current = new Date(now.getFullYear() + 1, 0, 1);
return current;
} else {
var current = new Date(now.getFullYear(), now.getMonth() + 1, 1);
return current;
}
}
//............................................................................Section Custom validtions(Email/Mobile)
//...............Email Format Verification
let email_validate = (value) => {
let email = value
var regexPattern = new RegExp(/^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$/); // regular expression pattern
let isValid = regexPattern.test(email);
if (!isValid) {
return false;
}
return true;
}
//..........................Contact validation
let contact_validate = (value) => {
let contact = value
var regexPattern = new RegExp(/^\d{4}[- ]?\d{7}$/); // regular expression pattern
let isValid = regexPattern.test(contact);
if (!isValid) {
return false;
}
return true;
}
//............................................................................Section Table Pagenation
// .................... pager
function Pager(tableName, itemsPerPage) {
| .........................................................................Section Days Name With Id's
//.................get today by using day id
let getToday = (day) => {
switch (day) {
case 1:
return "Monday"
break;
case 2:
return "Tuesday"
break;
case 3:
return "Wednesday"
break;
case 4:
return "Thursday"
break;
case 5:
return "Friday"
break;
case 6:
return "Saturday";
break;
case 7:
return "Sunday"
break;
}
}
//................................ReturnDayId by Using Day Name
let getDayID = (dayName) => {
switch (dayName) {
case "monday":
return 1
break;
case "tuesday":
return 2
break;
case "wednesday":
return 3
break;
case "thursday":
return 4
break;
case "friday":
return 5
break;
case "saturday":
return 6;
break;
case "sunday":
return 7
break;
}
}
//............................................................................Section General
//..................show Modal
let showModal = (selector) => {
$(selector).modal("show")
}
//..................Hide Modal
let hideModal = (selector) => {
$(selector).modal("hide")
}
//.............add class active
let addClassActive = (selector) => {
$(selector).addClass("active")
}
//.............Api Base Url
let ApiBaseUrl = (url) => {
return `https://localhost:44376/${url}`
// return `https://localhost:44346/${url}`
}
//$(`#TimeInInputHours option[value='${timeinhours}']`).attr("selected", "selected");
//$(`#TimeInInputMinutes option[value='${timeinmin}']`).attr("selected", "selected");
//$("#TimeInInputHours").prop('selectedIndex', timeinhours);
//$("#TimeInInputMinutes").prop('selectedIndex', timeinmin); |
this.tableName = tableName;
this.itemsPerPage = itemsPerPage;
this.currentPage = 1;
this.pages = 0;
this.inited = false;
this.showRecords = function (from, to) {
var rows = document.getElementById(tableName).rows;
// i starts from 1 to skip table header row
for (var i = 1; i <= rows.length - 1; i++) {
if (i < from || i > to)
rows[i].style.display = 'none';
else
rows[i].style.display = '';
}
}
this.showPage = function (pageNumber) {
if (!this.inited) {
alert("not inited");
return;
}
var oldPageAnchor = document.getElementById(`pg${tableName}` + this.currentPage);
oldPageAnchor.className = 'page-item';
this.currentPage = pageNumber;
var newPageAnchor = document.getElementById(`pg${tableName}` + this.currentPage);
newPageAnchor.className = 'page-item active';
var from = (pageNumber - 1) * itemsPerPage + 1;
var to = from + itemsPerPage - 1;
this.showRecords(from, to);
}
this.prev = function () {
if (this.currentPage > 1)
this.showPage(this.currentPage - 1);
}
this.next = function () {
if (this.currentPage < this.pages) {
this.showPage(this.currentPage + 1);
}
}
this.init = function () {
var rows = document.getElementById(tableName).rows;
var records = (rows.length - 1);
this.pages = Math.ceil(records / itemsPerPage);
this.inited = true;
}
this.showPageNav = function (pagerName, positionId) {
if (!this.inited) {
alert("not inited");
return;
}
var element = document.getElementById(positionId);
var pagerHtml = `<li class="page-pre" title="Previous" style="cursor:pointer" onclick="${pagerName}.prev();">
<a class="page-link">‹</a>
</li>
`
for (var page = 1; page <= this.pages; page++)
pagerHtml += `<li style="cursor:pointer" class="page-number" id="pg${tableName}${page}" onclick="${pagerName}.showPage(${page});"><a class="page-link">${page}</a></li>`
pagerHtml += `<li style="cursor:pointer" title="Next" class="page-next" onclick="${pagerName}.next();">
<a class="page-link">›</a>
</li >`
element.innerHTML = pagerHtml;
}
}
//.. | identifier_body |
Server.go | package server
import (
networkapi "github.com/juzi5201314/MineGopher/api/network"
raknetapi "github.com/juzi5201314/MineGopher/api/network/raknet"
"github.com/juzi5201314/MineGopher/api/player"
api "github.com/juzi5201314/MineGopher/api/server"
"github.com/juzi5201314/MineGopher/level"
"github.com/juzi5201314/MineGopher/network"
raknet "github.com/juzi5201314/MineGopher/network/raknet/server"
"github.com/juzi5201314/MineGopher/network/webconsole"
"github.com/juzi5201314/MineGopher/utils"
"os"
"strconv"
"time"
"github.com/juzi5201314/MineGopher/plugin"
)
const (
ServerName = "MineGopher"
ServerVersion = "0.0.1"
)
type Server struct {
isRunning bool
tick int64
logger *utils.Logger
pluginPath string
playersPath string
themePath string
worldsPath string
behaviorPacksPath string
resourecePackPath string
serverPath string
config *utils.Config
network networkapi.NetWork
ip string
port int
raknetServer raknetapi.RaknetServer
pluginLoader *plugin.PluginLoader
levels map[string]*level.Level
defaultLevel string
}
func New(serverPath string, config *utils.Config, logger *utils.Logger) *Server {
server := new(Server)
api.SetServer(server)
server.serverPath = serverPath
server.config = config
server.logger = logger
server.pluginPath = serverPath + "/plugins/"
server.themePath = serverPath + "/theme/"
server.playersPath = serverPath + "/players/"
server.worldsPath = serverPath + "/worlds/"
server.resourecePackPath = serverPath + "/resoureces_pack/"
server.levels = map[string]*level.Level{}
server.ip = config.Get("server-ip", "0.0.0.0").(string)
server.port = config.Get("server-port", 19132).(int)
server.pluginLoader = plugin.NewLoader(server.pluginPath)
//s.LevelManager = level.NewManager(serverPath)
//server.CommandManager = commands.NewManager()
//server.CommandReader = command.NewCommandReader(os.Stdin)
/*
s.SessionManager = packet.NewSessionManager()
s.NetworkAdapter = packet.NewNetworkAdapter(s.SessionManager)
s.NetworkAdapter.GetRakLibManager().PongData = s.GeneratePongData()
s.NetworkAdapter.GetRakLibManager().RawPacketFunction = s.HandleRaw
s.NetworkAdapter.GetRakLibManager().DisconnectFunction = s.HandleDisconnect
s.RegisterDefaultProtocols()
s.PackManager = packs.NewManager(serverPath)
s.PermissionManager = permissions.NewManager()
s.PluginManager = NewPluginManager(s)
s.QueryManager = query.NewManager()
if config.UseEncryption {
var curve = elliptic.P384()
var err error
s.privateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
text.DefaultLogger.LogError(err)
if !curve.IsOnCurve(s.privateKey.X, s.privateKey.Y) {
text.DefaultLogger.Error("Invalid private key generated")
}
var token = make([]byte, 128)
rand.Read(token)
s.token = token
}
return s
*/
return server
}
func (server *Server) IsRunning() bool {
return server.isRunning
}
func (server *Server) Start() {
if server.isRunning {
panic("The server has beem started!")
}
server.mkdirs()
server.logger.Info("MineGopher " + ServerVersion + ", running on " + server.serverPath)
server.isRunning = true
server.defaultLevel = server.config.Get("level-name", "world").(string)
dl := level.NewLevel(server.worldsPath+server.defaultLevel, server.defaultLevel)
server.levels[server.defaultLevel] = dl
server.network = network.New()
server.network.SetName(server.config.Get("motd", "MineGopher Server For Minecraft: PE").(string))
server.raknetServer = raknet.New(server.GetIp(), server.GetPort())
server.raknetServer.Start()
server.logger.Info("RakNetServer Listen " + server.GetIp() + ":" + strconv.Itoa(server.GetPort()))
if server.config.Get("webconsole", true).(bool) {
webconsole.Start()
}
server.pluginLoader.LoadPlugins()
server.config.Save()
}
func (server *Server) Shutdown() {
if !server.isRunning {
return
}
for _, l := range server.levels {
l.GetDimension().Save()
}
server.logger.Info("Server stopped.")
server.isRunning = false
server.logger.Close()
}
func (server *Server) GetConfig() *utils.Config {
return server.config
}
func (server *Server) GetAllPlayer() map[string]player.Player {
return server.raknetServer.GetPlayers()
}
func (server *Server) GetNetWork() networkapi.NetWork {
return server.network
}
func (server *Server) GetRaknetServer() raknetapi.RaknetServer {
return server.raknetServer
}
func (server *Server) GetName() string {
return ServerName
}
func (server *Server) GetLogger() *utils.Logger {
return server.logger
}
func (server *Server) GetLevels() map[string]*level.Level {
return server.levels
}
func (server *Server) GetLevel(name string) *level.Level {
return server.levels[name]
}
func (server *Server) GetDefaultLevel() *level.Level {
return server.GetLevel(server.defaultLevel)
}
func (server *Server) GetPath() string {
return server.serverPath
}
func (server *Server) ScheduleRepeatingTask(fn func(), d time.Duration) *time.Ticker {
ticker := time.NewTicker(d)
go func() {
for range ticker.C {
fn()
}
}()
return ticker
}
func (server *Server) ScheduleDelayedTask(fn func(), d time.Duration) *time.Timer {
return time.AfterFunc(d, fn)
}
/*
// GetMinecraftVersion returns the latest Minecraft game version.
// It is prefixed with a 'v', for example: "v1.2.10.1"
func (server *Server) GetMinecraftVersion() string {
return info.LatestGameVersion
}
// GetMinecraftNetworkVersion returns the latest Minecraft network version.
// For example: "1.2.10.1"
func (server *Server) GetMinecraftNetworkVersion() string {
return info.LatestGameVersionNetwork
}
// HasPermission returns if the server has a given permission.
// Always returns true to satisfy the ICommandSender interface.
func (server *Server) HasPermission(string) bool {
return true
}
// SendMessage sends a message to the server to satisfy the ICommandSender interface.
func (server *Server) SendMessage(message ...interface{}) {
text.DefaultLogger.Notice(message)
}
// GetEngineName returns 'minegopher'.
func (server *Server) GetEngineName() string {
return minegopherName
}
// GetName returns the LAN name of the server specified in the configuration.
func (server *Server) GetName() string {
return server.Config.ServerName
}
// GetPort returns the port of the server specified in the configuration.
func (server *Server) GetPort() uint16 {
return server.Config.ServerPort
}
// GetAddress returns the IP address specified in the configuration.
func (server *Server) GetAddress() string {
return server.Config.ServerIp
}
// GetMaximumPlayers returns the maximum amount of players on the server.
func (server *Server) GetMaximumPlayers() uint {
return server.Config.MaximumPlayers
}
// Returns the Message Of The Day of the server.
func (server *Server) GetMotd() string {
return server.Config.ServerMotd
}
// GetCurrentTick returns the current tick the server is on.
func (server *Server) GetCurrentTick() int64 {
return server.tick
}
// BroadcastMessageTo broadcasts a message to all receivers.
func (server *Server) BroadcastMessageTo(receivers []*packet.MinecraftSession, message ...interface{}) {
for _, session := range receivers {
session.SendMessage(message)
}
text.DefaultLogger.LogChat(message)
}
// Broadcast broadcasts a message to all players and the console in the server.
func (server *Server) BroadcastMessage(message ...interface{}) {
for _, session := range server.SessionManager.GetSessions() {
session.SendMessage(message)
}
text.DefaultLogger.LogChat(message)
}
// GetPrivateKey returns the ECDSA private key of the server.
func (server *Server) GetPrivateKey() *ecdsa.PrivateKey {
return server.privateKey
}
// GetPublicKey returns the ECDSA public key of the private key of the server.
func (server *Server) GetPublicKey() *ecdsa.PublicKey {
return &server.privateKey.PublicKey
}
// GetServerToken returns the server token byte sequence.
func (server *Server) GetServerToken() []byte {
return server.token
}
// GenerateQueryResult returns the query data of the server in a byte array.
func (server *Server) GenerateQueryResult() query.Result {
var plugs []string
for _, plug := range server.PluginManager.GetPlugins() {
plugs = append(plugs, plug.GetName()+" v"+plug.GetVersion())
}
var ps []string
for name := range server.SessionManager.GetSessions() {
ps = append(ps, name)
}
var result = query.Result{
MOTD: server.GetMotd(),
ListPlugins: server.Config.AllowPluginQuery,
PluginNames: plugs,
PlayerNames: ps,
GameMode: "SMP",
Version: server.GetMinecraftVersion(),
ServerEngine: server.GetEngineName(),
WorldName: server.LevelManager.GetDefaultLevel().GetName(),
OnlinePlayers: int(server.SessionManager.GetSessionCount()),
MaximumPlayers: int(server.Config.MaximumPlayers),
Whitelist: "off",
Port: server.Config.ServerPort,
Address: server.Config.ServerIp,
}
return result
}
// HandleRaw handles a raw packet, for instance a query packet.
func (server *Server) HandleRaw(packet []byte, addr *net2.UDPAddr) {
if string(packet[0:2]) == string(query.Header) {
if !server.Config.AllowQuery {
return
}
var q = query.NewFromRaw(packet, addr)
q.DecodeServer()
server.QueryManager.HandleQuery(q)
return
}
text.DefaultLogger.Debug("Unhandled raw packet:", hex.EncodeToString(packet))
}
// HandleDisconnect handles a disconnection from a session.
func (server *Server) HandleDisconnect(s *server.Session) {
text.DefaultLogger.Debug(s, "disconnected!")
session, ok := server.SessionManager.GetSessionByRakNetSession(s)
server.SessionManager.RemoveMinecraftSession(session)
if !ok {
return
}
if session.GetPlayer().Dimension != nil {
for _, online := range server.SessionManager.GetSessions() {
online.SendPlayerList(data.ListTypeRemove, map[string]protocol.PlayerListEntry{online.GetPlayer().GetName(): online.GetPlayer()})
}
session.GetPlayer().DespawnFromAll()
session.GetPlayer().Close()
server.BroadcastMessage(text.Yellow+session.GetDisplayName(), "has left the server")
}
}
// GeneratePongData generates the raknet pong data for the UnconnectedPong RakNet packet.
func (server *Server) GeneratePongData() string {
return fmt.Sprint("MCPE;", server.GetMotd(), ";", info.LatestProtocol, ";", server.GetMinecraftNetworkVersion(), ";", server.SessionManager.GetSessionCount(), ";", server.Config.MaximumPlayers, ";", server.NetworkAdapter.GetRakLibManager().ServerId, ";", server.GetEngineName(), ";Creative;")
}
// Tick ticks the entire server. (Levels, scheduler, raknet server etc.)
// Internal. Not to be used by plugins.
func (server *Server) Tick() {
if !server.isRunning {
return
} | }
for _, session := range server.SessionManager.GetSessions() {
session.Tick()
}
for range server.LevelManager.GetLevels() {
//level.Tick()
}
server.tick++
}
func (server *Server) GetCommandManager() command {
return server.CommandManager
}
**/
func (server *Server) Tick() {
for _, p := range server.GetAllPlayer() {
p.Tick()
}
}
func (server *Server) mkdirs() {
os.Mkdir(server.playersPath, 0700)
os.Mkdir(server.pluginPath, 0700)
//os.Mkdir(server.behaviorPacksPath, 0700)
os.Mkdir(server.resourecePackPath, 0700)
os.Mkdir(server.worldsPath, 0700)
os.Mkdir(server.themePath, 0700)
}
func (server *Server) GetIp() string {
return server.ip
}
func (server *Server) GetPort() int {
return server.port
} | if server.tick%20 == 0 {
server.QueryManager.SetQueryResult(server.GenerateQueryResult())
server.NetworkAdapter.GetRakLibManager().PongData = server.GeneratePongData() | random_line_split |
Server.go | package server
import (
networkapi "github.com/juzi5201314/MineGopher/api/network"
raknetapi "github.com/juzi5201314/MineGopher/api/network/raknet"
"github.com/juzi5201314/MineGopher/api/player"
api "github.com/juzi5201314/MineGopher/api/server"
"github.com/juzi5201314/MineGopher/level"
"github.com/juzi5201314/MineGopher/network"
raknet "github.com/juzi5201314/MineGopher/network/raknet/server"
"github.com/juzi5201314/MineGopher/network/webconsole"
"github.com/juzi5201314/MineGopher/utils"
"os"
"strconv"
"time"
"github.com/juzi5201314/MineGopher/plugin"
)
const (
ServerName = "MineGopher"
ServerVersion = "0.0.1"
)
type Server struct {
isRunning bool
tick int64
logger *utils.Logger
pluginPath string
playersPath string
themePath string
worldsPath string
behaviorPacksPath string
resourecePackPath string
serverPath string
config *utils.Config
network networkapi.NetWork
ip string
port int
raknetServer raknetapi.RaknetServer
pluginLoader *plugin.PluginLoader
levels map[string]*level.Level
defaultLevel string
}
func New(serverPath string, config *utils.Config, logger *utils.Logger) *Server {
server := new(Server)
api.SetServer(server)
server.serverPath = serverPath
server.config = config
server.logger = logger
server.pluginPath = serverPath + "/plugins/"
server.themePath = serverPath + "/theme/"
server.playersPath = serverPath + "/players/"
server.worldsPath = serverPath + "/worlds/"
server.resourecePackPath = serverPath + "/resoureces_pack/"
server.levels = map[string]*level.Level{}
server.ip = config.Get("server-ip", "0.0.0.0").(string)
server.port = config.Get("server-port", 19132).(int)
server.pluginLoader = plugin.NewLoader(server.pluginPath)
//s.LevelManager = level.NewManager(serverPath)
//server.CommandManager = commands.NewManager()
//server.CommandReader = command.NewCommandReader(os.Stdin)
/*
s.SessionManager = packet.NewSessionManager()
s.NetworkAdapter = packet.NewNetworkAdapter(s.SessionManager)
s.NetworkAdapter.GetRakLibManager().PongData = s.GeneratePongData()
s.NetworkAdapter.GetRakLibManager().RawPacketFunction = s.HandleRaw
s.NetworkAdapter.GetRakLibManager().DisconnectFunction = s.HandleDisconnect
s.RegisterDefaultProtocols()
s.PackManager = packs.NewManager(serverPath)
s.PermissionManager = permissions.NewManager()
s.PluginManager = NewPluginManager(s)
s.QueryManager = query.NewManager()
if config.UseEncryption {
var curve = elliptic.P384()
var err error
s.privateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
text.DefaultLogger.LogError(err)
if !curve.IsOnCurve(s.privateKey.X, s.privateKey.Y) {
text.DefaultLogger.Error("Invalid private key generated")
}
var token = make([]byte, 128)
rand.Read(token)
s.token = token
}
return s
*/
return server
}
func (server *Server) IsRunning() bool {
return server.isRunning
}
func (server *Server) Start() {
if server.isRunning {
panic("The server has beem started!")
}
server.mkdirs()
server.logger.Info("MineGopher " + ServerVersion + ", running on " + server.serverPath)
server.isRunning = true
server.defaultLevel = server.config.Get("level-name", "world").(string)
dl := level.NewLevel(server.worldsPath+server.defaultLevel, server.defaultLevel)
server.levels[server.defaultLevel] = dl
server.network = network.New()
server.network.SetName(server.config.Get("motd", "MineGopher Server For Minecraft: PE").(string))
server.raknetServer = raknet.New(server.GetIp(), server.GetPort())
server.raknetServer.Start()
server.logger.Info("RakNetServer Listen " + server.GetIp() + ":" + strconv.Itoa(server.GetPort()))
if server.config.Get("webconsole", true).(bool) {
webconsole.Start()
}
server.pluginLoader.LoadPlugins()
server.config.Save()
}
func (server *Server) Shutdown() {
if !server.isRunning {
return
}
for _, l := range server.levels {
l.GetDimension().Save()
}
server.logger.Info("Server stopped.")
server.isRunning = false
server.logger.Close()
}
func (server *Server) GetConfig() *utils.Config {
return server.config
}
func (server *Server) GetAllPlayer() map[string]player.Player {
return server.raknetServer.GetPlayers()
}
func (server *Server) GetNetWork() networkapi.NetWork {
return server.network
}
func (server *Server) GetRaknetServer() raknetapi.RaknetServer {
return server.raknetServer
}
func (server *Server) GetName() string {
return ServerName
}
func (server *Server) GetLogger() *utils.Logger {
return server.logger
}
func (server *Server) GetLevels() map[string]*level.Level {
return server.levels
}
func (server *Server) GetLevel(name string) *level.Level {
return server.levels[name]
}
func (server *Server) GetDefaultLevel() *level.Level {
return server.GetLevel(server.defaultLevel)
}
func (server *Server) GetPath() string {
return server.serverPath
}
func (server *Server) ScheduleRepeatingTask(fn func(), d time.Duration) *time.Ticker {
ticker := time.NewTicker(d)
go func() {
for range ticker.C {
fn()
}
}()
return ticker
}
func (server *Server) ScheduleDelayedTask(fn func(), d time.Duration) *time.Timer {
return time.AfterFunc(d, fn)
}
/*
// GetMinecraftVersion returns the latest Minecraft game version.
// It is prefixed with a 'v', for example: "v1.2.10.1"
func (server *Server) GetMinecraftVersion() string {
return info.LatestGameVersion
}
// GetMinecraftNetworkVersion returns the latest Minecraft network version.
// For example: "1.2.10.1"
func (server *Server) GetMinecraftNetworkVersion() string {
return info.LatestGameVersionNetwork
}
// HasPermission returns if the server has a given permission.
// Always returns true to satisfy the ICommandSender interface.
func (server *Server) HasPermission(string) bool {
return true
}
// SendMessage sends a message to the server to satisfy the ICommandSender interface.
func (server *Server) SendMessage(message ...interface{}) {
text.DefaultLogger.Notice(message)
}
// GetEngineName returns 'minegopher'.
func (server *Server) GetEngineName() string {
return minegopherName
}
// GetName returns the LAN name of the server specified in the configuration.
func (server *Server) GetName() string {
return server.Config.ServerName
}
// GetPort returns the port of the server specified in the configuration.
func (server *Server) GetPort() uint16 {
return server.Config.ServerPort
}
// GetAddress returns the IP address specified in the configuration.
func (server *Server) GetAddress() string {
return server.Config.ServerIp
}
// GetMaximumPlayers returns the maximum amount of players on the server.
func (server *Server) GetMaximumPlayers() uint {
return server.Config.MaximumPlayers
}
// Returns the Message Of The Day of the server.
func (server *Server) GetMotd() string {
return server.Config.ServerMotd
}
// GetCurrentTick returns the current tick the server is on.
func (server *Server) GetCurrentTick() int64 {
return server.tick
}
// BroadcastMessageTo broadcasts a message to all receivers.
func (server *Server) BroadcastMessageTo(receivers []*packet.MinecraftSession, message ...interface{}) {
for _, session := range receivers {
session.SendMessage(message)
}
text.DefaultLogger.LogChat(message)
}
// Broadcast broadcasts a message to all players and the console in the server.
func (server *Server) BroadcastMessage(message ...interface{}) {
for _, session := range server.SessionManager.GetSessions() {
session.SendMessage(message)
}
text.DefaultLogger.LogChat(message)
}
// GetPrivateKey returns the ECDSA private key of the server.
func (server *Server) GetPrivateKey() *ecdsa.PrivateKey {
return server.privateKey
}
// GetPublicKey returns the ECDSA public key of the private key of the server.
func (server *Server) GetPublicKey() *ecdsa.PublicKey {
return &server.privateKey.PublicKey
}
// GetServerToken returns the server token byte sequence.
func (server *Server) GetServerToken() []byte {
return server.token
}
// GenerateQueryResult returns the query data of the server in a byte array.
func (server *Server) GenerateQueryResult() query.Result {
var plugs []string
for _, plug := range server.PluginManager.GetPlugins() {
plugs = append(plugs, plug.GetName()+" v"+plug.GetVersion())
}
var ps []string
for name := range server.SessionManager.GetSessions() {
ps = append(ps, name)
}
var result = query.Result{
MOTD: server.GetMotd(),
ListPlugins: server.Config.AllowPluginQuery,
PluginNames: plugs,
PlayerNames: ps,
GameMode: "SMP",
Version: server.GetMinecraftVersion(),
ServerEngine: server.GetEngineName(),
WorldName: server.LevelManager.GetDefaultLevel().GetName(),
OnlinePlayers: int(server.SessionManager.GetSessionCount()),
MaximumPlayers: int(server.Config.MaximumPlayers),
Whitelist: "off",
Port: server.Config.ServerPort,
Address: server.Config.ServerIp,
}
return result
}
// HandleRaw handles a raw packet, for instance a query packet.
func (server *Server) HandleRaw(packet []byte, addr *net2.UDPAddr) {
if string(packet[0:2]) == string(query.Header) {
if !server.Config.AllowQuery {
return
}
var q = query.NewFromRaw(packet, addr)
q.DecodeServer()
server.QueryManager.HandleQuery(q)
return
}
text.DefaultLogger.Debug("Unhandled raw packet:", hex.EncodeToString(packet))
}
// HandleDisconnect handles a disconnection from a session.
func (server *Server) HandleDisconnect(s *server.Session) {
text.DefaultLogger.Debug(s, "disconnected!")
session, ok := server.SessionManager.GetSessionByRakNetSession(s)
server.SessionManager.RemoveMinecraftSession(session)
if !ok {
return
}
if session.GetPlayer().Dimension != nil {
for _, online := range server.SessionManager.GetSessions() {
online.SendPlayerList(data.ListTypeRemove, map[string]protocol.PlayerListEntry{online.GetPlayer().GetName(): online.GetPlayer()})
}
session.GetPlayer().DespawnFromAll()
session.GetPlayer().Close()
server.BroadcastMessage(text.Yellow+session.GetDisplayName(), "has left the server")
}
}
// GeneratePongData generates the raknet pong data for the UnconnectedPong RakNet packet.
func (server *Server) GeneratePongData() string {
return fmt.Sprint("MCPE;", server.GetMotd(), ";", info.LatestProtocol, ";", server.GetMinecraftNetworkVersion(), ";", server.SessionManager.GetSessionCount(), ";", server.Config.MaximumPlayers, ";", server.NetworkAdapter.GetRakLibManager().ServerId, ";", server.GetEngineName(), ";Creative;")
}
// Tick ticks the entire server. (Levels, scheduler, raknet server etc.)
// Internal. Not to be used by plugins.
func (server *Server) Tick() {
if !server.isRunning {
return
}
if server.tick%20 == 0 {
server.QueryManager.SetQueryResult(server.GenerateQueryResult())
server.NetworkAdapter.GetRakLibManager().PongData = server.GeneratePongData()
}
for _, session := range server.SessionManager.GetSessions() {
session.Tick()
}
for range server.LevelManager.GetLevels() {
//level.Tick()
}
server.tick++
}
func (server *Server) GetCommandManager() command {
return server.CommandManager
}
**/
func (server *Server) | () {
for _, p := range server.GetAllPlayer() {
p.Tick()
}
}
func (server *Server) mkdirs() {
os.Mkdir(server.playersPath, 0700)
os.Mkdir(server.pluginPath, 0700)
//os.Mkdir(server.behaviorPacksPath, 0700)
os.Mkdir(server.resourecePackPath, 0700)
os.Mkdir(server.worldsPath, 0700)
os.Mkdir(server.themePath, 0700)
}
func (server *Server) GetIp() string {
return server.ip
}
func (server *Server) GetPort() int {
return server.port
}
| Tick | identifier_name |
Server.go | package server
import (
networkapi "github.com/juzi5201314/MineGopher/api/network"
raknetapi "github.com/juzi5201314/MineGopher/api/network/raknet"
"github.com/juzi5201314/MineGopher/api/player"
api "github.com/juzi5201314/MineGopher/api/server"
"github.com/juzi5201314/MineGopher/level"
"github.com/juzi5201314/MineGopher/network"
raknet "github.com/juzi5201314/MineGopher/network/raknet/server"
"github.com/juzi5201314/MineGopher/network/webconsole"
"github.com/juzi5201314/MineGopher/utils"
"os"
"strconv"
"time"
"github.com/juzi5201314/MineGopher/plugin"
)
const (
ServerName = "MineGopher"
ServerVersion = "0.0.1"
)
type Server struct {
isRunning bool
tick int64
logger *utils.Logger
pluginPath string
playersPath string
themePath string
worldsPath string
behaviorPacksPath string
resourecePackPath string
serverPath string
config *utils.Config
network networkapi.NetWork
ip string
port int
raknetServer raknetapi.RaknetServer
pluginLoader *plugin.PluginLoader
levels map[string]*level.Level
defaultLevel string
}
func New(serverPath string, config *utils.Config, logger *utils.Logger) *Server {
server := new(Server)
api.SetServer(server)
server.serverPath = serverPath
server.config = config
server.logger = logger
server.pluginPath = serverPath + "/plugins/"
server.themePath = serverPath + "/theme/"
server.playersPath = serverPath + "/players/"
server.worldsPath = serverPath + "/worlds/"
server.resourecePackPath = serverPath + "/resoureces_pack/"
server.levels = map[string]*level.Level{}
server.ip = config.Get("server-ip", "0.0.0.0").(string)
server.port = config.Get("server-port", 19132).(int)
server.pluginLoader = plugin.NewLoader(server.pluginPath)
//s.LevelManager = level.NewManager(serverPath)
//server.CommandManager = commands.NewManager()
//server.CommandReader = command.NewCommandReader(os.Stdin)
/*
s.SessionManager = packet.NewSessionManager()
s.NetworkAdapter = packet.NewNetworkAdapter(s.SessionManager)
s.NetworkAdapter.GetRakLibManager().PongData = s.GeneratePongData()
s.NetworkAdapter.GetRakLibManager().RawPacketFunction = s.HandleRaw
s.NetworkAdapter.GetRakLibManager().DisconnectFunction = s.HandleDisconnect
s.RegisterDefaultProtocols()
s.PackManager = packs.NewManager(serverPath)
s.PermissionManager = permissions.NewManager()
s.PluginManager = NewPluginManager(s)
s.QueryManager = query.NewManager()
if config.UseEncryption {
var curve = elliptic.P384()
var err error
s.privateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
text.DefaultLogger.LogError(err)
if !curve.IsOnCurve(s.privateKey.X, s.privateKey.Y) {
text.DefaultLogger.Error("Invalid private key generated")
}
var token = make([]byte, 128)
rand.Read(token)
s.token = token
}
return s
*/
return server
}
func (server *Server) IsRunning() bool {
return server.isRunning
}
func (server *Server) Start() {
if server.isRunning {
panic("The server has beem started!")
}
server.mkdirs()
server.logger.Info("MineGopher " + ServerVersion + ", running on " + server.serverPath)
server.isRunning = true
server.defaultLevel = server.config.Get("level-name", "world").(string)
dl := level.NewLevel(server.worldsPath+server.defaultLevel, server.defaultLevel)
server.levels[server.defaultLevel] = dl
server.network = network.New()
server.network.SetName(server.config.Get("motd", "MineGopher Server For Minecraft: PE").(string))
server.raknetServer = raknet.New(server.GetIp(), server.GetPort())
server.raknetServer.Start()
server.logger.Info("RakNetServer Listen " + server.GetIp() + ":" + strconv.Itoa(server.GetPort()))
if server.config.Get("webconsole", true).(bool) {
webconsole.Start()
}
server.pluginLoader.LoadPlugins()
server.config.Save()
}
func (server *Server) Shutdown() {
if !server.isRunning {
return
}
for _, l := range server.levels {
l.GetDimension().Save()
}
server.logger.Info("Server stopped.")
server.isRunning = false
server.logger.Close()
}
func (server *Server) GetConfig() *utils.Config {
return server.config
}
func (server *Server) GetAllPlayer() map[string]player.Player {
return server.raknetServer.GetPlayers()
}
func (server *Server) GetNetWork() networkapi.NetWork {
return server.network
}
func (server *Server) GetRaknetServer() raknetapi.RaknetServer {
return server.raknetServer
}
func (server *Server) GetName() string {
return ServerName
}
func (server *Server) GetLogger() *utils.Logger {
return server.logger
}
func (server *Server) GetLevels() map[string]*level.Level |
func (server *Server) GetLevel(name string) *level.Level {
return server.levels[name]
}
func (server *Server) GetDefaultLevel() *level.Level {
return server.GetLevel(server.defaultLevel)
}
func (server *Server) GetPath() string {
return server.serverPath
}
func (server *Server) ScheduleRepeatingTask(fn func(), d time.Duration) *time.Ticker {
ticker := time.NewTicker(d)
go func() {
for range ticker.C {
fn()
}
}()
return ticker
}
func (server *Server) ScheduleDelayedTask(fn func(), d time.Duration) *time.Timer {
return time.AfterFunc(d, fn)
}
/*
// GetMinecraftVersion returns the latest Minecraft game version.
// It is prefixed with a 'v', for example: "v1.2.10.1"
func (server *Server) GetMinecraftVersion() string {
return info.LatestGameVersion
}
// GetMinecraftNetworkVersion returns the latest Minecraft network version.
// For example: "1.2.10.1"
func (server *Server) GetMinecraftNetworkVersion() string {
return info.LatestGameVersionNetwork
}
// HasPermission returns if the server has a given permission.
// Always returns true to satisfy the ICommandSender interface.
func (server *Server) HasPermission(string) bool {
return true
}
// SendMessage sends a message to the server to satisfy the ICommandSender interface.
func (server *Server) SendMessage(message ...interface{}) {
text.DefaultLogger.Notice(message)
}
// GetEngineName returns 'minegopher'.
func (server *Server) GetEngineName() string {
return minegopherName
}
// GetName returns the LAN name of the server specified in the configuration.
func (server *Server) GetName() string {
return server.Config.ServerName
}
// GetPort returns the port of the server specified in the configuration.
func (server *Server) GetPort() uint16 {
return server.Config.ServerPort
}
// GetAddress returns the IP address specified in the configuration.
func (server *Server) GetAddress() string {
return server.Config.ServerIp
}
// GetMaximumPlayers returns the maximum amount of players on the server.
func (server *Server) GetMaximumPlayers() uint {
return server.Config.MaximumPlayers
}
// Returns the Message Of The Day of the server.
func (server *Server) GetMotd() string {
return server.Config.ServerMotd
}
// GetCurrentTick returns the current tick the server is on.
func (server *Server) GetCurrentTick() int64 {
return server.tick
}
// BroadcastMessageTo broadcasts a message to all receivers.
func (server *Server) BroadcastMessageTo(receivers []*packet.MinecraftSession, message ...interface{}) {
for _, session := range receivers {
session.SendMessage(message)
}
text.DefaultLogger.LogChat(message)
}
// Broadcast broadcasts a message to all players and the console in the server.
func (server *Server) BroadcastMessage(message ...interface{}) {
for _, session := range server.SessionManager.GetSessions() {
session.SendMessage(message)
}
text.DefaultLogger.LogChat(message)
}
// GetPrivateKey returns the ECDSA private key of the server.
func (server *Server) GetPrivateKey() *ecdsa.PrivateKey {
return server.privateKey
}
// GetPublicKey returns the ECDSA public key of the private key of the server.
func (server *Server) GetPublicKey() *ecdsa.PublicKey {
return &server.privateKey.PublicKey
}
// GetServerToken returns the server token byte sequence.
func (server *Server) GetServerToken() []byte {
return server.token
}
// GenerateQueryResult returns the query data of the server in a byte array.
func (server *Server) GenerateQueryResult() query.Result {
var plugs []string
for _, plug := range server.PluginManager.GetPlugins() {
plugs = append(plugs, plug.GetName()+" v"+plug.GetVersion())
}
var ps []string
for name := range server.SessionManager.GetSessions() {
ps = append(ps, name)
}
var result = query.Result{
MOTD: server.GetMotd(),
ListPlugins: server.Config.AllowPluginQuery,
PluginNames: plugs,
PlayerNames: ps,
GameMode: "SMP",
Version: server.GetMinecraftVersion(),
ServerEngine: server.GetEngineName(),
WorldName: server.LevelManager.GetDefaultLevel().GetName(),
OnlinePlayers: int(server.SessionManager.GetSessionCount()),
MaximumPlayers: int(server.Config.MaximumPlayers),
Whitelist: "off",
Port: server.Config.ServerPort,
Address: server.Config.ServerIp,
}
return result
}
// HandleRaw handles a raw packet, for instance a query packet.
func (server *Server) HandleRaw(packet []byte, addr *net2.UDPAddr) {
if string(packet[0:2]) == string(query.Header) {
if !server.Config.AllowQuery {
return
}
var q = query.NewFromRaw(packet, addr)
q.DecodeServer()
server.QueryManager.HandleQuery(q)
return
}
text.DefaultLogger.Debug("Unhandled raw packet:", hex.EncodeToString(packet))
}
// HandleDisconnect handles a disconnection from a session.
func (server *Server) HandleDisconnect(s *server.Session) {
text.DefaultLogger.Debug(s, "disconnected!")
session, ok := server.SessionManager.GetSessionByRakNetSession(s)
server.SessionManager.RemoveMinecraftSession(session)
if !ok {
return
}
if session.GetPlayer().Dimension != nil {
for _, online := range server.SessionManager.GetSessions() {
online.SendPlayerList(data.ListTypeRemove, map[string]protocol.PlayerListEntry{online.GetPlayer().GetName(): online.GetPlayer()})
}
session.GetPlayer().DespawnFromAll()
session.GetPlayer().Close()
server.BroadcastMessage(text.Yellow+session.GetDisplayName(), "has left the server")
}
}
// GeneratePongData generates the raknet pong data for the UnconnectedPong RakNet packet.
func (server *Server) GeneratePongData() string {
return fmt.Sprint("MCPE;", server.GetMotd(), ";", info.LatestProtocol, ";", server.GetMinecraftNetworkVersion(), ";", server.SessionManager.GetSessionCount(), ";", server.Config.MaximumPlayers, ";", server.NetworkAdapter.GetRakLibManager().ServerId, ";", server.GetEngineName(), ";Creative;")
}
// Tick ticks the entire server. (Levels, scheduler, raknet server etc.)
// Internal. Not to be used by plugins.
func (server *Server) Tick() {
if !server.isRunning {
return
}
if server.tick%20 == 0 {
server.QueryManager.SetQueryResult(server.GenerateQueryResult())
server.NetworkAdapter.GetRakLibManager().PongData = server.GeneratePongData()
}
for _, session := range server.SessionManager.GetSessions() {
session.Tick()
}
for range server.LevelManager.GetLevels() {
//level.Tick()
}
server.tick++
}
func (server *Server) GetCommandManager() command {
return server.CommandManager
}
**/
func (server *Server) Tick() {
for _, p := range server.GetAllPlayer() {
p.Tick()
}
}
func (server *Server) mkdirs() {
os.Mkdir(server.playersPath, 0700)
os.Mkdir(server.pluginPath, 0700)
//os.Mkdir(server.behaviorPacksPath, 0700)
os.Mkdir(server.resourecePackPath, 0700)
os.Mkdir(server.worldsPath, 0700)
os.Mkdir(server.themePath, 0700)
}
func (server *Server) GetIp() string {
return server.ip
}
func (server *Server) GetPort() int {
return server.port
}
| {
return server.levels
} | identifier_body |
Server.go | package server
import (
networkapi "github.com/juzi5201314/MineGopher/api/network"
raknetapi "github.com/juzi5201314/MineGopher/api/network/raknet"
"github.com/juzi5201314/MineGopher/api/player"
api "github.com/juzi5201314/MineGopher/api/server"
"github.com/juzi5201314/MineGopher/level"
"github.com/juzi5201314/MineGopher/network"
raknet "github.com/juzi5201314/MineGopher/network/raknet/server"
"github.com/juzi5201314/MineGopher/network/webconsole"
"github.com/juzi5201314/MineGopher/utils"
"os"
"strconv"
"time"
"github.com/juzi5201314/MineGopher/plugin"
)
const (
ServerName = "MineGopher"
ServerVersion = "0.0.1"
)
type Server struct {
isRunning bool
tick int64
logger *utils.Logger
pluginPath string
playersPath string
themePath string
worldsPath string
behaviorPacksPath string
resourecePackPath string
serverPath string
config *utils.Config
network networkapi.NetWork
ip string
port int
raknetServer raknetapi.RaknetServer
pluginLoader *plugin.PluginLoader
levels map[string]*level.Level
defaultLevel string
}
func New(serverPath string, config *utils.Config, logger *utils.Logger) *Server {
server := new(Server)
api.SetServer(server)
server.serverPath = serverPath
server.config = config
server.logger = logger
server.pluginPath = serverPath + "/plugins/"
server.themePath = serverPath + "/theme/"
server.playersPath = serverPath + "/players/"
server.worldsPath = serverPath + "/worlds/"
server.resourecePackPath = serverPath + "/resoureces_pack/"
server.levels = map[string]*level.Level{}
server.ip = config.Get("server-ip", "0.0.0.0").(string)
server.port = config.Get("server-port", 19132).(int)
server.pluginLoader = plugin.NewLoader(server.pluginPath)
//s.LevelManager = level.NewManager(serverPath)
//server.CommandManager = commands.NewManager()
//server.CommandReader = command.NewCommandReader(os.Stdin)
/*
s.SessionManager = packet.NewSessionManager()
s.NetworkAdapter = packet.NewNetworkAdapter(s.SessionManager)
s.NetworkAdapter.GetRakLibManager().PongData = s.GeneratePongData()
s.NetworkAdapter.GetRakLibManager().RawPacketFunction = s.HandleRaw
s.NetworkAdapter.GetRakLibManager().DisconnectFunction = s.HandleDisconnect
s.RegisterDefaultProtocols()
s.PackManager = packs.NewManager(serverPath)
s.PermissionManager = permissions.NewManager()
s.PluginManager = NewPluginManager(s)
s.QueryManager = query.NewManager()
if config.UseEncryption {
var curve = elliptic.P384()
var err error
s.privateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
text.DefaultLogger.LogError(err)
if !curve.IsOnCurve(s.privateKey.X, s.privateKey.Y) {
text.DefaultLogger.Error("Invalid private key generated")
}
var token = make([]byte, 128)
rand.Read(token)
s.token = token
}
return s
*/
return server
}
func (server *Server) IsRunning() bool {
return server.isRunning
}
func (server *Server) Start() {
if server.isRunning {
panic("The server has beem started!")
}
server.mkdirs()
server.logger.Info("MineGopher " + ServerVersion + ", running on " + server.serverPath)
server.isRunning = true
server.defaultLevel = server.config.Get("level-name", "world").(string)
dl := level.NewLevel(server.worldsPath+server.defaultLevel, server.defaultLevel)
server.levels[server.defaultLevel] = dl
server.network = network.New()
server.network.SetName(server.config.Get("motd", "MineGopher Server For Minecraft: PE").(string))
server.raknetServer = raknet.New(server.GetIp(), server.GetPort())
server.raknetServer.Start()
server.logger.Info("RakNetServer Listen " + server.GetIp() + ":" + strconv.Itoa(server.GetPort()))
if server.config.Get("webconsole", true).(bool) {
webconsole.Start()
}
server.pluginLoader.LoadPlugins()
server.config.Save()
}
func (server *Server) Shutdown() {
if !server.isRunning {
return
}
for _, l := range server.levels {
l.GetDimension().Save()
}
server.logger.Info("Server stopped.")
server.isRunning = false
server.logger.Close()
}
func (server *Server) GetConfig() *utils.Config {
return server.config
}
func (server *Server) GetAllPlayer() map[string]player.Player {
return server.raknetServer.GetPlayers()
}
func (server *Server) GetNetWork() networkapi.NetWork {
return server.network
}
func (server *Server) GetRaknetServer() raknetapi.RaknetServer {
return server.raknetServer
}
func (server *Server) GetName() string {
return ServerName
}
func (server *Server) GetLogger() *utils.Logger {
return server.logger
}
func (server *Server) GetLevels() map[string]*level.Level {
return server.levels
}
func (server *Server) GetLevel(name string) *level.Level {
return server.levels[name]
}
func (server *Server) GetDefaultLevel() *level.Level {
return server.GetLevel(server.defaultLevel)
}
func (server *Server) GetPath() string {
return server.serverPath
}
func (server *Server) ScheduleRepeatingTask(fn func(), d time.Duration) *time.Ticker {
ticker := time.NewTicker(d)
go func() {
for range ticker.C |
}()
return ticker
}
func (server *Server) ScheduleDelayedTask(fn func(), d time.Duration) *time.Timer {
return time.AfterFunc(d, fn)
}
/*
// GetMinecraftVersion returns the latest Minecraft game version.
// It is prefixed with a 'v', for example: "v1.2.10.1"
func (server *Server) GetMinecraftVersion() string {
return info.LatestGameVersion
}
// GetMinecraftNetworkVersion returns the latest Minecraft network version.
// For example: "1.2.10.1"
func (server *Server) GetMinecraftNetworkVersion() string {
return info.LatestGameVersionNetwork
}
// HasPermission returns if the server has a given permission.
// Always returns true to satisfy the ICommandSender interface.
func (server *Server) HasPermission(string) bool {
return true
}
// SendMessage sends a message to the server to satisfy the ICommandSender interface.
func (server *Server) SendMessage(message ...interface{}) {
text.DefaultLogger.Notice(message)
}
// GetEngineName returns 'minegopher'.
func (server *Server) GetEngineName() string {
return minegopherName
}
// GetName returns the LAN name of the server specified in the configuration.
func (server *Server) GetName() string {
return server.Config.ServerName
}
// GetPort returns the port of the server specified in the configuration.
func (server *Server) GetPort() uint16 {
return server.Config.ServerPort
}
// GetAddress returns the IP address specified in the configuration.
func (server *Server) GetAddress() string {
return server.Config.ServerIp
}
// GetMaximumPlayers returns the maximum amount of players on the server.
func (server *Server) GetMaximumPlayers() uint {
return server.Config.MaximumPlayers
}
// Returns the Message Of The Day of the server.
func (server *Server) GetMotd() string {
return server.Config.ServerMotd
}
// GetCurrentTick returns the current tick the server is on.
func (server *Server) GetCurrentTick() int64 {
return server.tick
}
// BroadcastMessageTo broadcasts a message to all receivers.
func (server *Server) BroadcastMessageTo(receivers []*packet.MinecraftSession, message ...interface{}) {
for _, session := range receivers {
session.SendMessage(message)
}
text.DefaultLogger.LogChat(message)
}
// Broadcast broadcasts a message to all players and the console in the server.
func (server *Server) BroadcastMessage(message ...interface{}) {
for _, session := range server.SessionManager.GetSessions() {
session.SendMessage(message)
}
text.DefaultLogger.LogChat(message)
}
// GetPrivateKey returns the ECDSA private key of the server.
func (server *Server) GetPrivateKey() *ecdsa.PrivateKey {
return server.privateKey
}
// GetPublicKey returns the ECDSA public key of the private key of the server.
func (server *Server) GetPublicKey() *ecdsa.PublicKey {
return &server.privateKey.PublicKey
}
// GetServerToken returns the server token byte sequence.
func (server *Server) GetServerToken() []byte {
return server.token
}
// GenerateQueryResult returns the query data of the server in a byte array.
func (server *Server) GenerateQueryResult() query.Result {
var plugs []string
for _, plug := range server.PluginManager.GetPlugins() {
plugs = append(plugs, plug.GetName()+" v"+plug.GetVersion())
}
var ps []string
for name := range server.SessionManager.GetSessions() {
ps = append(ps, name)
}
var result = query.Result{
MOTD: server.GetMotd(),
ListPlugins: server.Config.AllowPluginQuery,
PluginNames: plugs,
PlayerNames: ps,
GameMode: "SMP",
Version: server.GetMinecraftVersion(),
ServerEngine: server.GetEngineName(),
WorldName: server.LevelManager.GetDefaultLevel().GetName(),
OnlinePlayers: int(server.SessionManager.GetSessionCount()),
MaximumPlayers: int(server.Config.MaximumPlayers),
Whitelist: "off",
Port: server.Config.ServerPort,
Address: server.Config.ServerIp,
}
return result
}
// HandleRaw handles a raw packet, for instance a query packet.
func (server *Server) HandleRaw(packet []byte, addr *net2.UDPAddr) {
if string(packet[0:2]) == string(query.Header) {
if !server.Config.AllowQuery {
return
}
var q = query.NewFromRaw(packet, addr)
q.DecodeServer()
server.QueryManager.HandleQuery(q)
return
}
text.DefaultLogger.Debug("Unhandled raw packet:", hex.EncodeToString(packet))
}
// HandleDisconnect handles a disconnection from a session.
func (server *Server) HandleDisconnect(s *server.Session) {
text.DefaultLogger.Debug(s, "disconnected!")
session, ok := server.SessionManager.GetSessionByRakNetSession(s)
server.SessionManager.RemoveMinecraftSession(session)
if !ok {
return
}
if session.GetPlayer().Dimension != nil {
for _, online := range server.SessionManager.GetSessions() {
online.SendPlayerList(data.ListTypeRemove, map[string]protocol.PlayerListEntry{online.GetPlayer().GetName(): online.GetPlayer()})
}
session.GetPlayer().DespawnFromAll()
session.GetPlayer().Close()
server.BroadcastMessage(text.Yellow+session.GetDisplayName(), "has left the server")
}
}
// GeneratePongData generates the raknet pong data for the UnconnectedPong RakNet packet.
func (server *Server) GeneratePongData() string {
return fmt.Sprint("MCPE;", server.GetMotd(), ";", info.LatestProtocol, ";", server.GetMinecraftNetworkVersion(), ";", server.SessionManager.GetSessionCount(), ";", server.Config.MaximumPlayers, ";", server.NetworkAdapter.GetRakLibManager().ServerId, ";", server.GetEngineName(), ";Creative;")
}
// Tick ticks the entire server. (Levels, scheduler, raknet server etc.)
// Internal. Not to be used by plugins.
func (server *Server) Tick() {
if !server.isRunning {
return
}
if server.tick%20 == 0 {
server.QueryManager.SetQueryResult(server.GenerateQueryResult())
server.NetworkAdapter.GetRakLibManager().PongData = server.GeneratePongData()
}
for _, session := range server.SessionManager.GetSessions() {
session.Tick()
}
for range server.LevelManager.GetLevels() {
//level.Tick()
}
server.tick++
}
func (server *Server) GetCommandManager() command {
return server.CommandManager
}
**/
func (server *Server) Tick() {
for _, p := range server.GetAllPlayer() {
p.Tick()
}
}
func (server *Server) mkdirs() {
os.Mkdir(server.playersPath, 0700)
os.Mkdir(server.pluginPath, 0700)
//os.Mkdir(server.behaviorPacksPath, 0700)
os.Mkdir(server.resourecePackPath, 0700)
os.Mkdir(server.worldsPath, 0700)
os.Mkdir(server.themePath, 0700)
}
func (server *Server) GetIp() string {
return server.ip
}
func (server *Server) GetPort() int {
return server.port
}
| {
fn()
} | conditional_block |
stopwords.rs | use lazy_static::lazy_static;
use std::collections::{HashMap, HashSet};
lazy_static! {
/// ignore these as keywords
pub(crate) static ref STOPWORDS: HashSet<&'static str> = [
"a", "sys", "ffi", "placeholder", "app", "loops", "master", "library", "rs",
"accidentally", "additional", "adds", "against", "all", "allow", "allows",
"already", "also", "alternative", "always", "an", "and", "any", "appropriate",
"arbitrary", "are", "as", "at", "available", "based", "be", "because", "been",
"both", "but", "by", "can", "certain", "changes", "comes", "contains", "code", "core", "cost",
"crate", "crates.io", "current", "currently", "custom", "dependencies",
"dependency", "developers", "do", "don't", "e.g", "easily", "easy", "either",
"enables", "etc", "even", "every", "example", "examples", "features", "feel",
"files", "fast", "for", "from", "fully", "function", "get", "given", "had", "has",
"hacktoberfest",
"have", "here", "if", "implementing", "implements", "implementation", "in", "includes",
"including", "incurring", "installation", "interested", "into", "is", "it",
"it's", "its", "itself", "just", "known", "large", "later", "library",
"license", "lightweight", "like", "made", "main", "make", "makes", "many",
"may", "me", "means", "method", "minimal", "mit", "more", "mostly", "much",
"need", "needed", "never", "new", "no", "noop", "not", "of", "on", "one",
"only", "or", "other", "over", "plausible", "please", "possible", "program", "project",
"provides", "put", "readme", "release", "runs", "rust", "rust's", "same",
"see", "selected", "should", "similar", "simple", "simply", "since", "small", "so",
"some", "specific", "still", "stuff", "such", "take", "than", "that", "the",
"their", "them", "then", "there", "therefore", "these", "they", "things",
"this", "those", "to", "todo", "too", "took", "travis", "two", "under", "us",
"usable", "use", "used", "useful", "using", "usage", "v1", "v2", "v3", "v4", "various",
"very", "via", "want", "way", "well", "we'll", "what", "when", "where", "which",
"while", "will", "wip", "with", "without", "working", "works", "writing",
"written", "yet", "you", "your", "build status", "meritbadge", "common",
"file was generated", "easy to use", "general-purpose", "fundamental",
].iter().copied().collect();
/// If one is present, ignore the others
pub(crate) static ref COND_STOPWORDS: HashMap<&'static str, Option<&'static [&'static str]>> = [
("game-engine", Some(&["game", "ffi"][..])),
("game-engines", Some(&["game", "ffi"])),
("game-development", Some(&["game", "ffi"])),
("game-dev", Some(&["game", "games"])),
("gamedev", Some(&["game", "games"])),
("game", Some(&["wasm", "webassembly"])), // wasm games are nice, but should be in games category
("contract", Some(&["wasm", "webassembly"])), // that's crypto-babble-nothingburger
("opengl", Some(&["terminal", "console"])),
("protocol", Some(&["game", "games", "container"])),
("framework", Some(&["game", "games"])),
("engine", Some(&["ffi"])),
("mock", Some(&["macro", "derive", "plugin", "cargo"])),
("ffi", Some(&["api-bindings"])),
("caching", Some(&["allocator"])),
("distributed", Some(&["filesystem", "file"])),
("container", Some(&["filesystem", "file"])),
("aws", Some(&["ecs"])), // not game engine
("raspberry", Some(&["osx", "windows"])),
("linux", Some(&["windows", "winsdk", "macos", "mac", "osx"])),
("cross-platform", Some(&["windows", "winsdk", "macos", "mac", "osx", "linux", "unix", "gnu"])),
("portable", Some(&["windows", "winsdk", "macos", "mac", "osx", "linux", "unix", "gnu"])),
("winapi", Some(&["target", "windows", "gnu", "x86", "i686", "64", "pc"])),
("windows", Some(&["gnu"])),
("compile-time", Some(&["time"])),
("constant-time", Some(&["time"])),
("real-time", Some(&["time"])),
("time-series", Some(&["time"])),
("execution", Some(&["time"])),
("iterator", Some(&["window", "windows"])),
("buffer", Some(&["window", "windows"])),
("sliding", Some(&["window", "windows"])),
("web", Some(&["windows", "macos", "mac", "osx", "linux"])),
("error", Some(&["color"])),
("pretty-print", Some(&["color"])),
("pretty-printer", Some(&["color"])),
("ios", Some(&["core"])),
("macos", Some(&["core"])),
("osx", Some(&["core"])),
("mac", Some(&["core"])),
("module", Some(&["core"])),
("wasm", Some(&["embedded", "javascript", "no-std", "no_std", "feature:no_std", "deploy"])),
("javascript", Some(&["embedded", "no-std", "no_std", "feature:no_std"])),
("webassembly", Some(&["embedded", "javascript", "no-std", "no_std", "feature:no_std"])),
("deep-learning", Some(&["math", "statistics"])),
("machine-learning", Some(&["math", "statistics"])),
("neural-networks", Some(&["math", "statistics", "network"])),
("neural", Some(&["network"])),
("fantasy", Some(&["console"])),
("learning", Some(&["network"])),
("safe", Some(&["network"])),
("database", Some(&["embedded"])),
("robotics", Some(&["localization"])),
("thread", Some(&["storage"])),
("exchange", Some(&["twitch", "animation"])),
("animation", Some(&["kraken"])),
("bitcoin", Some(&["http", "day", "database", "key-value", "network", "wasm", "secp256k1", "client", "rpc", "websocket"])),
("solana", Some(&["http", "day", "database", "key-value", "network", "wasm", "secp256k1", "client", "cryptographic", "gfx", "sdk"])),
("exonum", Some(&["http", "day", "database", "key-value", "network", "wasm", "client"])),
("blockchain", Some(&["database", "key-value", "network", "wasm", "nosql", "orm", "driver", "fun", "rpc", "client", "server", "p2p", "networking", "websocket"])),
("cryptocurrencies", Some(&["database", "key-value", "network", "wasm", "nosql", "orm", "driver", "fun", "rpc", "client", "server", "p2p", "networking", "websocket"])),
("cryptocurrency", Some(&["database", "key-value", "network", "wasm", "nosql", "orm", "driver", "fun", "rpc", "client", "server", "p2p", "networking", "websocket", "twitch"])),
("ethereum", Some(&["http", "day", "nosql", "eth", "log", "generic", "network", "wasm", "key-value", "orm", "client", "database", "secp256k1", "websocket", "parity"])),
("iter", Some(&["math"])),
("ethernet", Some(&["eth"])),
("macro", Some(&["no-std", "no_std", "feature:no_std"])),
("macros", Some(&["no-std", "no_std", "feature:no_std"])),
("embedded", Some(&["no-std", "no_std", "feature:no_std"])),
("arm", Some(&["no-std", "no_std", "feature:no_std"])),
("float", Some(&["math"])),
("c64", Some(&["terminal", "core"])),
("emulator", Some(&["6502", "core", "gpu", "color", "timer"])),
("garbage", Some(&["tracing"])),
("terminal", Some(&["math", "emulator"])),
("terminal-emulator", Some(&["math", "emulator"])),
("editor", Some(&["terminal"])),
("build", Some(&["logic"])), // confuses categorization
("messaging", Some(&["matrix"])), // confuses categorization
("led", Some(&["matrix"])), // confuses categorization
("rgb", Some(&["matrix"])), // confuses categorization
("chat", Some(&["matrix"])), // confuses categorization
("math", Some(&["num", "symbolic", "algorithms", "algorithm", "utils"])), // confuses categorization
("mathematics", Some(&["num", "numeric", "symbolic", "algorithms", "algorithm", "utils"])), // confuses categorization
("cuda", Some(&["nvidia"])), // confuses categorization
("subcommand", Some(&["plugin"])),
("lint", Some(&["plugin"])),
("email", Some(&["validator", "validation"])),
("e-mail", Some(&["validator", "validation"])),
("template", Some(&["derive"])),
("dsl", Some(&["template"])),
("syn", Some(&["nom"])),
("cargo", Some(&["plugin"])),
("git", Some(&["terminal"])),
("nzxt", Some(&["kraken"])),
("wide", Some(&["windows", "win32"])),
("i18n", Some(&["text", "format", "message", "json", "ffi"])),
("l10n", Some(&["text", "format", "message", "json", "ffi"])),
("unicode", Some(&["text"])),
("parity", Some(&["fun", "backend"])),
("secp256k1", Some(&["fun", "backend", "alloc", "ecc"])),
("font", Some(&["text", "bitmap"])),
("freetype", Some(&["text", "bitmap"])),
("tex", Some(&["font"])),
("regex", Some(&["text", "linear", "time", "search"])),
("language", Some(&["server"])),
("server", Some(&["files"])),
("medical", Some(&["image"])),
("social", Some(&["media"])),
("codegen", Some(&["backend"])),
("game", Some(&["simulator", "simulation"])),
("vkontakte", Some(&["vk"])),
("vulkan", Some(&["vk"])),
("2d", Some(&["path", "paths"])),
("video", Some(&["audio"])), // have to pick one…
("sound", Some(&["3d", "windows"])),
("memory", Some(&["os", "system", "storage"])), // too generic
("data-structure", Some(&["no-std", "no_std"])), // it's a nice feature, but not defining one
("crypto", Some(&["no-std", "no_std"])), // it's a nice feature, but not defining one
("macro", Some(&["no-std", "no_std"])), // it's a nice feature, but not defining one
("parser", Some(&["no-std", "no_std", "game"])), // it's a nice feature, but not defining one
("cryptography", Some(&["no-std", "no_std"])), // it's a nice feature, but not defining one
("websocket", Some(&["http", "cli", "tokio", "client", "io", "network", "servo", "web"])), // there's a separate category for it
("rest", Some(&["api"])),
("cargo-subcommand", None),
("substrate", None),
("twitch", Some(&["kraken"])),
("chess", Some(&["bot"])),
("lichess", Some(&["bot"])),
("nftables", Some(&["nft"])),
("placeholder", None), // spam
("reserved", None), // spam
("name-squatting", None), // spam
("parked", None), // spam
("squatting", None), // spam
("malware", None), // spam
("unfinished", None), // spam | } | ].iter().copied().collect(); | random_line_split |
rasterbackend.rs | use crate::aabb::*;
use crate::mesh::*;
use crate::picture::*;
use crate::zbuffer::*;
use std::f32::consts::PI;
use std::time::{Duration, Instant};
#[derive(Debug)]
pub struct RenderOptions {
pub view_pos: Vec3,
pub light_pos: Vec3,
pub light_color: Vec3,
pub ambient_color: Vec3,
pub model_color: Vec3,
pub grid_color: Vec3,
pub background_color: Vec4,
pub zoom: f32,
pub grid_visible: bool,
pub draw_size_hint: bool,
}
impl Default for RenderOptions {
fn default() -> Self {
Self {
view_pos: Vec3::new(-1.0, 1.0, -1.0).normalize(),
light_pos: Vec3::new(-1.0, 0.5, -0.5),
light_color: Vec3::new(0.6, 0.6, 0.6),
ambient_color: Vec3::new(0.4, 0.4, 0.4),
model_color: Vec3::new(0.0, 0.45, 1.0),
grid_color: Vec3::new(0.1, 0.1, 0.1),
background_color: Vec4::new(1.0, 1.0, 1.0, 1.0),
grid_visible: true,
zoom: 1.0,
draw_size_hint: true,
}
}
}
#[derive(Debug)]
pub struct RasterBackend {
pub render_options: RenderOptions,
width: u32,
height: u32,
aspect_ratio: f32,
}
impl RasterBackend {
pub fn new(width: u32, height: u32) -> Self {
Self {
render_options: RenderOptions::default(),
width,
height,
aspect_ratio: width as f32 / height as f32,
}
}
fn view_projection(&self, zoom: f32) -> Mat4 {
// calculate view projection matrix
let proj = glm::ortho(
zoom * 0.5 * self.aspect_ratio,
-zoom * 0.5 * self.aspect_ratio,
-zoom * 0.5,
zoom * 0.5,
0.0,
1.0,
);
let view = glm::look_at(
&self.render_options.view_pos,
&Vec3::new(0.0, 0.0, 0.0),
&Vec3::new(0.0, 0.0, -1.0),
);
proj * view
}
pub fn fit_mesh_scale(&self, mesh: impl IntoIterator<Item = Triangle> + Copy) -> (AABB, f32) {
let aabb = AABB::from_iterable(mesh);
let vp = self.view_projection(1.0);
// scale the model such that is fills the entire canvas
(aabb, scale_for_unitsize(&vp, &aabb))
}
pub fn render(
&self,
mesh: impl IntoIterator<Item = Triangle> + Copy,
model_scale: f32,
aabb: &AABB,
timeout: Option<Duration>,
) -> Picture {
let start_time = Instant::now();
let mut pic = Picture::new(self.width, self.height);
let mut zbuf = ZBuffer::new(self.width, self.height);
let mut scaled_aabb = *aabb;
pic.fill(&(&self.render_options.background_color).into());
let vp = self.view_projection(self.render_options.zoom);
// calculate transforms taking the new model scale into account
let model = Mat4::identity()
.append_translation(&-aabb.center())
.append_scaling(model_scale);
let mvp = vp * model;
// let the AABB match the transformed model
scaled_aabb.apply_transform(&model);
// eye normal pointing towards the camera in world space
let eye_normal = self.render_options.view_pos.normalize();
// grid in x and y direction
if self.render_options.grid_visible {
draw_grid(
&mut pic,
&vp,
scaled_aabb.lower.z,
&self.render_options.grid_color,
aabb.size(),
model_scale,
);
draw_grid(
&mut pic,
&(vp * glm::rotation(PI / 2.0, &Vec3::new(0.0, 0.0, 1.0))),
scaled_aabb.lower.z,
&self.render_options.grid_color,
aabb.size(),
model_scale,
);
}
for t in mesh {
// timed out?
if let Some(timeout) = timeout {
let dt = Instant::now() - start_time;
if dt > timeout {
// abort
println!("... timeout!");
return pic;
}
}
let normal = -t.normal;
// backface culling
if glm::dot(&eye_normal, &normal) < 0.0 {
continue;
}
let v = &t.vertices;
let v0 = matmul(&mvp, &v[0]);
let v1 = matmul(&mvp, &v[1]);
let v2 = matmul(&mvp, &v[2]);
let v0m = matmul(&model, &v[0]);
let v1m = matmul(&model, &v[1]);
let v2m = matmul(&model, &v[2]);
// triangle bounding box
let min_x = v0.x.min(v1.x).min(v2.x);
let min_y = v0.y.min(v1.y).min(v2.y);
let max_x = v0.x.max(v1.x).max(v2.x);
let max_y = v0.y.max(v1.y).max(v2.y);
// triangle bounding box in screen space
let smin_x = 0.max(((min_x + 1.0) / 2.0 * pic.width() as f32) as u32);
let smin_y = 0.max(((min_y + 1.0) / 2.0 * pic.height() as f32) as u32);
let smax_x = 0.max(pic.width().min(((max_x + 1.0) / 2.0 * pic.width() as f32) as u32));
let smax_y = 0.max(pic.height().min(((max_y + 1.0) / 2.0 * pic.height() as f32) as u32));
for y in smin_y..=smax_y {
for x in smin_x..=smax_x {
// normalized screen coordinates [-1,1]
let nx = 2.0 * ((x as f32 / pic.width() as f32) - 0.5);
let ny = 2.0 * ((y as f32 / pic.height() as f32) - 0.5);
let p = Vec2::new(nx, ny);
let p0 = v0.xy();
let p1 = v1.xy();
let p2 = v2.xy();
let inside =
edge_fn(&p, &p0, &p1) <= 0.0 && edge_fn(&p, &p1, &p2) <= 0.0 && edge_fn(&p, &p2, &p0) <= 0.0;
if inside {
// calculate barycentric coordinates
let area = edge_fn(&p0, &p1, &p2);
let w0 = edge_fn(&p1, &p2, &p) / area;
let w1 = edge_fn(&p2, &p0, &p) / area;
let w2 = edge_fn(&p0, &p1, &p) / area;
// fragment position in screen space
let frag_pos = Vec3::new(
w0 * v0.x + w1 * v1.x + w2 * v2.x, | w0 * v0.y + w1 * v1.y + w2 * v2.y,
w0 * v0.z + w1 * v1.z + w2 * v2.z,
);
// fragment position in world space
let fp = Vec3::new(
w0 * v0m.x + w1 * v1m.x + w2 * v2m.x,
w0 * v0m.y + w1 * v1m.y + w2 * v2m.y,
w0 * v0m.z + w1 * v1m.z + w2 * v2m.z,
);
//let fp = matmul(&mvp_inv, &frag_pos);
if zbuf.test_and_set(x, y, frag_pos.z) {
// calculate lightning
let light_normal = (self.render_options.light_pos - fp).normalize(); // normal frag pos to light (world space)
let view_normal = (self.render_options.view_pos - fp).normalize(); // normal frag pos to view (world space)
let reflect_dir = glm::reflect_vec(&-light_normal, &normal);
// diffuse
let diff_color =
glm::dot(&normal, &light_normal).max(0.0) * self.render_options.light_color * 1.0;
// specular
let spec_color = (glm::dot(&view_normal, &reflect_dir).powf(16.0) * 0.7)
* self.render_options.light_color;
// merge
let mut color = self.render_options.ambient_color + diff_color + spec_color;
color.x *= self.render_options.model_color.x;
color.y *= self.render_options.model_color.y;
color.z *= self.render_options.model_color.z;
pic.set(x, y, &(color.x, color.y, color.z, 1.0).into());
}
}
}
}
}
if self.render_options.draw_size_hint {
let margin = 3;
let text_to_height_ratio = 16;
let text = format!(
"{}x{}x{}",
aabb.size().x as i32,
aabb.size().y as i32,
aabb.size().z as i32
);
let text_size = pic.height() / text_to_height_ratio;
pic.fill_rect(
0,
pic.height() as i32 - (text_size + margin * 2) as i32,
pic.width() as i32,
pic.height() as i32,
&"333333FF".into(),
);
pic.stroke_string(
margin,
pic.height() - text_size - margin,
&text,
text_size as f32,
&"FFFFFFFF".into(),
);
}
pic
}
}
fn edge_fn(a: &Vec2, b: &Vec2, c: &Vec2) -> f32 {
(c.x - a.x) * (b.y - a.y) - (c.y - a.y) * (b.x - a.x)
}
fn scale_for_unitsize(mvp: &Mat4, aabb: &AABB) -> f32 {
let edges = [
matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.lower.y, aabb.lower.z)),
matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.lower.y, aabb.lower.z)),
matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.upper.y, aabb.lower.z)),
matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.upper.y, aabb.lower.z)),
matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.lower.y, aabb.upper.z)),
matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.lower.y, aabb.upper.z)),
matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.upper.y, aabb.upper.z)),
matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.upper.y, aabb.upper.z)),
];
let mut min = Vec3::new(f32::MAX, f32::MAX, f32::MAX);
let mut max = Vec3::new(f32::MIN, f32::MIN, f32::MIN);
for e in &edges {
min.x = min.x.min(e.x);
min.y = min.y.min(e.y);
max.x = max.x.max(e.x);
max.y = max.y.max(e.y);
}
1.0 / ((f32::abs(max.x - min.x)).max(f32::abs(max.y - min.y)) / 2.0)
}
fn draw_grid(pic: &mut Picture, vp: &Mat4, z: f32, color: &Vec3, model_size: Vec3, scale: f32) {
// draw grid
let max_xy = model_size.x.max(model_size.y);
let grid_color = (color.x, color.y, color.z, 1.0).into();
let grid_size = 10.0; // mm
let grid_count = ((max_xy * scale) / scale / grid_size + 1.0) as i32;
let grid_spacing = grid_size * scale as f32;
let ox = grid_count as f32 * grid_spacing / 2.0;
for x in 0..=grid_count {
let p0 = Vec3::new(grid_spacing * x as f32 - ox, grid_count as f32 * grid_spacing * 0.5, z);
let p1 = Vec3::new(p0.x, -grid_count as f32 * grid_spacing * 0.5, z);
// to screen space
let sp0 = matmul(&vp, &p0).xy();
let sp1 = matmul(&vp, &p1).xy();
pic.thick_line(
((sp0.x + 1.0) / 2.0 * pic.width() as f32) as i32,
((sp0.y + 1.0) / 2.0 * pic.height() as f32) as i32,
((sp1.x + 1.0) / 2.0 * pic.width() as f32) as i32,
((sp1.y + 1.0) / 2.0 * pic.height() as f32) as i32,
&grid_color,
1.0,
);
}
} | random_line_split |
|
rasterbackend.rs | use crate::aabb::*;
use crate::mesh::*;
use crate::picture::*;
use crate::zbuffer::*;
use std::f32::consts::PI;
use std::time::{Duration, Instant};
#[derive(Debug)]
pub struct RenderOptions {
pub view_pos: Vec3,
pub light_pos: Vec3,
pub light_color: Vec3,
pub ambient_color: Vec3,
pub model_color: Vec3,
pub grid_color: Vec3,
pub background_color: Vec4,
pub zoom: f32,
pub grid_visible: bool,
pub draw_size_hint: bool,
}
impl Default for RenderOptions {
fn default() -> Self {
Self {
view_pos: Vec3::new(-1.0, 1.0, -1.0).normalize(),
light_pos: Vec3::new(-1.0, 0.5, -0.5),
light_color: Vec3::new(0.6, 0.6, 0.6),
ambient_color: Vec3::new(0.4, 0.4, 0.4),
model_color: Vec3::new(0.0, 0.45, 1.0),
grid_color: Vec3::new(0.1, 0.1, 0.1),
background_color: Vec4::new(1.0, 1.0, 1.0, 1.0),
grid_visible: true,
zoom: 1.0,
draw_size_hint: true,
}
}
}
#[derive(Debug)]
pub struct RasterBackend {
pub render_options: RenderOptions,
width: u32,
height: u32,
aspect_ratio: f32,
}
impl RasterBackend {
pub fn new(width: u32, height: u32) -> Self |
fn view_projection(&self, zoom: f32) -> Mat4 {
// calculate view projection matrix
let proj = glm::ortho(
zoom * 0.5 * self.aspect_ratio,
-zoom * 0.5 * self.aspect_ratio,
-zoom * 0.5,
zoom * 0.5,
0.0,
1.0,
);
let view = glm::look_at(
&self.render_options.view_pos,
&Vec3::new(0.0, 0.0, 0.0),
&Vec3::new(0.0, 0.0, -1.0),
);
proj * view
}
pub fn fit_mesh_scale(&self, mesh: impl IntoIterator<Item = Triangle> + Copy) -> (AABB, f32) {
let aabb = AABB::from_iterable(mesh);
let vp = self.view_projection(1.0);
// scale the model such that is fills the entire canvas
(aabb, scale_for_unitsize(&vp, &aabb))
}
pub fn render(
&self,
mesh: impl IntoIterator<Item = Triangle> + Copy,
model_scale: f32,
aabb: &AABB,
timeout: Option<Duration>,
) -> Picture {
let start_time = Instant::now();
let mut pic = Picture::new(self.width, self.height);
let mut zbuf = ZBuffer::new(self.width, self.height);
let mut scaled_aabb = *aabb;
pic.fill(&(&self.render_options.background_color).into());
let vp = self.view_projection(self.render_options.zoom);
// calculate transforms taking the new model scale into account
let model = Mat4::identity()
.append_translation(&-aabb.center())
.append_scaling(model_scale);
let mvp = vp * model;
// let the AABB match the transformed model
scaled_aabb.apply_transform(&model);
// eye normal pointing towards the camera in world space
let eye_normal = self.render_options.view_pos.normalize();
// grid in x and y direction
if self.render_options.grid_visible {
draw_grid(
&mut pic,
&vp,
scaled_aabb.lower.z,
&self.render_options.grid_color,
aabb.size(),
model_scale,
);
draw_grid(
&mut pic,
&(vp * glm::rotation(PI / 2.0, &Vec3::new(0.0, 0.0, 1.0))),
scaled_aabb.lower.z,
&self.render_options.grid_color,
aabb.size(),
model_scale,
);
}
for t in mesh {
// timed out?
if let Some(timeout) = timeout {
let dt = Instant::now() - start_time;
if dt > timeout {
// abort
println!("... timeout!");
return pic;
}
}
let normal = -t.normal;
// backface culling
if glm::dot(&eye_normal, &normal) < 0.0 {
continue;
}
let v = &t.vertices;
let v0 = matmul(&mvp, &v[0]);
let v1 = matmul(&mvp, &v[1]);
let v2 = matmul(&mvp, &v[2]);
let v0m = matmul(&model, &v[0]);
let v1m = matmul(&model, &v[1]);
let v2m = matmul(&model, &v[2]);
// triangle bounding box
let min_x = v0.x.min(v1.x).min(v2.x);
let min_y = v0.y.min(v1.y).min(v2.y);
let max_x = v0.x.max(v1.x).max(v2.x);
let max_y = v0.y.max(v1.y).max(v2.y);
// triangle bounding box in screen space
let smin_x = 0.max(((min_x + 1.0) / 2.0 * pic.width() as f32) as u32);
let smin_y = 0.max(((min_y + 1.0) / 2.0 * pic.height() as f32) as u32);
let smax_x = 0.max(pic.width().min(((max_x + 1.0) / 2.0 * pic.width() as f32) as u32));
let smax_y = 0.max(pic.height().min(((max_y + 1.0) / 2.0 * pic.height() as f32) as u32));
for y in smin_y..=smax_y {
for x in smin_x..=smax_x {
// normalized screen coordinates [-1,1]
let nx = 2.0 * ((x as f32 / pic.width() as f32) - 0.5);
let ny = 2.0 * ((y as f32 / pic.height() as f32) - 0.5);
let p = Vec2::new(nx, ny);
let p0 = v0.xy();
let p1 = v1.xy();
let p2 = v2.xy();
let inside =
edge_fn(&p, &p0, &p1) <= 0.0 && edge_fn(&p, &p1, &p2) <= 0.0 && edge_fn(&p, &p2, &p0) <= 0.0;
if inside {
// calculate barycentric coordinates
let area = edge_fn(&p0, &p1, &p2);
let w0 = edge_fn(&p1, &p2, &p) / area;
let w1 = edge_fn(&p2, &p0, &p) / area;
let w2 = edge_fn(&p0, &p1, &p) / area;
// fragment position in screen space
let frag_pos = Vec3::new(
w0 * v0.x + w1 * v1.x + w2 * v2.x,
w0 * v0.y + w1 * v1.y + w2 * v2.y,
w0 * v0.z + w1 * v1.z + w2 * v2.z,
);
// fragment position in world space
let fp = Vec3::new(
w0 * v0m.x + w1 * v1m.x + w2 * v2m.x,
w0 * v0m.y + w1 * v1m.y + w2 * v2m.y,
w0 * v0m.z + w1 * v1m.z + w2 * v2m.z,
);
//let fp = matmul(&mvp_inv, &frag_pos);
if zbuf.test_and_set(x, y, frag_pos.z) {
// calculate lightning
let light_normal = (self.render_options.light_pos - fp).normalize(); // normal frag pos to light (world space)
let view_normal = (self.render_options.view_pos - fp).normalize(); // normal frag pos to view (world space)
let reflect_dir = glm::reflect_vec(&-light_normal, &normal);
// diffuse
let diff_color =
glm::dot(&normal, &light_normal).max(0.0) * self.render_options.light_color * 1.0;
// specular
let spec_color = (glm::dot(&view_normal, &reflect_dir).powf(16.0) * 0.7)
* self.render_options.light_color;
// merge
let mut color = self.render_options.ambient_color + diff_color + spec_color;
color.x *= self.render_options.model_color.x;
color.y *= self.render_options.model_color.y;
color.z *= self.render_options.model_color.z;
pic.set(x, y, &(color.x, color.y, color.z, 1.0).into());
}
}
}
}
}
if self.render_options.draw_size_hint {
let margin = 3;
let text_to_height_ratio = 16;
let text = format!(
"{}x{}x{}",
aabb.size().x as i32,
aabb.size().y as i32,
aabb.size().z as i32
);
let text_size = pic.height() / text_to_height_ratio;
pic.fill_rect(
0,
pic.height() as i32 - (text_size + margin * 2) as i32,
pic.width() as i32,
pic.height() as i32,
&"333333FF".into(),
);
pic.stroke_string(
margin,
pic.height() - text_size - margin,
&text,
text_size as f32,
&"FFFFFFFF".into(),
);
}
pic
}
}
fn edge_fn(a: &Vec2, b: &Vec2, c: &Vec2) -> f32 {
(c.x - a.x) * (b.y - a.y) - (c.y - a.y) * (b.x - a.x)
}
fn scale_for_unitsize(mvp: &Mat4, aabb: &AABB) -> f32 {
let edges = [
matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.lower.y, aabb.lower.z)),
matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.lower.y, aabb.lower.z)),
matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.upper.y, aabb.lower.z)),
matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.upper.y, aabb.lower.z)),
matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.lower.y, aabb.upper.z)),
matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.lower.y, aabb.upper.z)),
matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.upper.y, aabb.upper.z)),
matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.upper.y, aabb.upper.z)),
];
let mut min = Vec3::new(f32::MAX, f32::MAX, f32::MAX);
let mut max = Vec3::new(f32::MIN, f32::MIN, f32::MIN);
for e in &edges {
min.x = min.x.min(e.x);
min.y = min.y.min(e.y);
max.x = max.x.max(e.x);
max.y = max.y.max(e.y);
}
1.0 / ((f32::abs(max.x - min.x)).max(f32::abs(max.y - min.y)) / 2.0)
}
fn draw_grid(pic: &mut Picture, vp: &Mat4, z: f32, color: &Vec3, model_size: Vec3, scale: f32) {
// draw grid
let max_xy = model_size.x.max(model_size.y);
let grid_color = (color.x, color.y, color.z, 1.0).into();
let grid_size = 10.0; // mm
let grid_count = ((max_xy * scale) / scale / grid_size + 1.0) as i32;
let grid_spacing = grid_size * scale as f32;
let ox = grid_count as f32 * grid_spacing / 2.0;
for x in 0..=grid_count {
let p0 = Vec3::new(grid_spacing * x as f32 - ox, grid_count as f32 * grid_spacing * 0.5, z);
let p1 = Vec3::new(p0.x, -grid_count as f32 * grid_spacing * 0.5, z);
// to screen space
let sp0 = matmul(&vp, &p0).xy();
let sp1 = matmul(&vp, &p1).xy();
pic.thick_line(
((sp0.x + 1.0) / 2.0 * pic.width() as f32) as i32,
((sp0.y + 1.0) / 2.0 * pic.height() as f32) as i32,
((sp1.x + 1.0) / 2.0 * pic.width() as f32) as i32,
((sp1.y + 1.0) / 2.0 * pic.height() as f32) as i32,
&grid_color,
1.0,
);
}
}
| {
Self {
render_options: RenderOptions::default(),
width,
height,
aspect_ratio: width as f32 / height as f32,
}
} | identifier_body |
rasterbackend.rs | use crate::aabb::*;
use crate::mesh::*;
use crate::picture::*;
use crate::zbuffer::*;
use std::f32::consts::PI;
use std::time::{Duration, Instant};
#[derive(Debug)]
pub struct RenderOptions {
pub view_pos: Vec3,
pub light_pos: Vec3,
pub light_color: Vec3,
pub ambient_color: Vec3,
pub model_color: Vec3,
pub grid_color: Vec3,
pub background_color: Vec4,
pub zoom: f32,
pub grid_visible: bool,
pub draw_size_hint: bool,
}
impl Default for RenderOptions {
fn default() -> Self {
Self {
view_pos: Vec3::new(-1.0, 1.0, -1.0).normalize(),
light_pos: Vec3::new(-1.0, 0.5, -0.5),
light_color: Vec3::new(0.6, 0.6, 0.6),
ambient_color: Vec3::new(0.4, 0.4, 0.4),
model_color: Vec3::new(0.0, 0.45, 1.0),
grid_color: Vec3::new(0.1, 0.1, 0.1),
background_color: Vec4::new(1.0, 1.0, 1.0, 1.0),
grid_visible: true,
zoom: 1.0,
draw_size_hint: true,
}
}
}
#[derive(Debug)]
pub struct RasterBackend {
pub render_options: RenderOptions,
width: u32,
height: u32,
aspect_ratio: f32,
}
impl RasterBackend {
pub fn new(width: u32, height: u32) -> Self {
Self {
render_options: RenderOptions::default(),
width,
height,
aspect_ratio: width as f32 / height as f32,
}
}
fn view_projection(&self, zoom: f32) -> Mat4 {
// calculate view projection matrix
let proj = glm::ortho(
zoom * 0.5 * self.aspect_ratio,
-zoom * 0.5 * self.aspect_ratio,
-zoom * 0.5,
zoom * 0.5,
0.0,
1.0,
);
let view = glm::look_at(
&self.render_options.view_pos,
&Vec3::new(0.0, 0.0, 0.0),
&Vec3::new(0.0, 0.0, -1.0),
);
proj * view
}
pub fn fit_mesh_scale(&self, mesh: impl IntoIterator<Item = Triangle> + Copy) -> (AABB, f32) {
let aabb = AABB::from_iterable(mesh);
let vp = self.view_projection(1.0);
// scale the model such that is fills the entire canvas
(aabb, scale_for_unitsize(&vp, &aabb))
}
pub fn | (
&self,
mesh: impl IntoIterator<Item = Triangle> + Copy,
model_scale: f32,
aabb: &AABB,
timeout: Option<Duration>,
) -> Picture {
let start_time = Instant::now();
let mut pic = Picture::new(self.width, self.height);
let mut zbuf = ZBuffer::new(self.width, self.height);
let mut scaled_aabb = *aabb;
pic.fill(&(&self.render_options.background_color).into());
let vp = self.view_projection(self.render_options.zoom);
// calculate transforms taking the new model scale into account
let model = Mat4::identity()
.append_translation(&-aabb.center())
.append_scaling(model_scale);
let mvp = vp * model;
// let the AABB match the transformed model
scaled_aabb.apply_transform(&model);
// eye normal pointing towards the camera in world space
let eye_normal = self.render_options.view_pos.normalize();
// grid in x and y direction
if self.render_options.grid_visible {
draw_grid(
&mut pic,
&vp,
scaled_aabb.lower.z,
&self.render_options.grid_color,
aabb.size(),
model_scale,
);
draw_grid(
&mut pic,
&(vp * glm::rotation(PI / 2.0, &Vec3::new(0.0, 0.0, 1.0))),
scaled_aabb.lower.z,
&self.render_options.grid_color,
aabb.size(),
model_scale,
);
}
for t in mesh {
// timed out?
if let Some(timeout) = timeout {
let dt = Instant::now() - start_time;
if dt > timeout {
// abort
println!("... timeout!");
return pic;
}
}
let normal = -t.normal;
// backface culling
if glm::dot(&eye_normal, &normal) < 0.0 {
continue;
}
let v = &t.vertices;
let v0 = matmul(&mvp, &v[0]);
let v1 = matmul(&mvp, &v[1]);
let v2 = matmul(&mvp, &v[2]);
let v0m = matmul(&model, &v[0]);
let v1m = matmul(&model, &v[1]);
let v2m = matmul(&model, &v[2]);
// triangle bounding box
let min_x = v0.x.min(v1.x).min(v2.x);
let min_y = v0.y.min(v1.y).min(v2.y);
let max_x = v0.x.max(v1.x).max(v2.x);
let max_y = v0.y.max(v1.y).max(v2.y);
// triangle bounding box in screen space
let smin_x = 0.max(((min_x + 1.0) / 2.0 * pic.width() as f32) as u32);
let smin_y = 0.max(((min_y + 1.0) / 2.0 * pic.height() as f32) as u32);
let smax_x = 0.max(pic.width().min(((max_x + 1.0) / 2.0 * pic.width() as f32) as u32));
let smax_y = 0.max(pic.height().min(((max_y + 1.0) / 2.0 * pic.height() as f32) as u32));
for y in smin_y..=smax_y {
for x in smin_x..=smax_x {
// normalized screen coordinates [-1,1]
let nx = 2.0 * ((x as f32 / pic.width() as f32) - 0.5);
let ny = 2.0 * ((y as f32 / pic.height() as f32) - 0.5);
let p = Vec2::new(nx, ny);
let p0 = v0.xy();
let p1 = v1.xy();
let p2 = v2.xy();
let inside =
edge_fn(&p, &p0, &p1) <= 0.0 && edge_fn(&p, &p1, &p2) <= 0.0 && edge_fn(&p, &p2, &p0) <= 0.0;
if inside {
// calculate barycentric coordinates
let area = edge_fn(&p0, &p1, &p2);
let w0 = edge_fn(&p1, &p2, &p) / area;
let w1 = edge_fn(&p2, &p0, &p) / area;
let w2 = edge_fn(&p0, &p1, &p) / area;
// fragment position in screen space
let frag_pos = Vec3::new(
w0 * v0.x + w1 * v1.x + w2 * v2.x,
w0 * v0.y + w1 * v1.y + w2 * v2.y,
w0 * v0.z + w1 * v1.z + w2 * v2.z,
);
// fragment position in world space
let fp = Vec3::new(
w0 * v0m.x + w1 * v1m.x + w2 * v2m.x,
w0 * v0m.y + w1 * v1m.y + w2 * v2m.y,
w0 * v0m.z + w1 * v1m.z + w2 * v2m.z,
);
//let fp = matmul(&mvp_inv, &frag_pos);
if zbuf.test_and_set(x, y, frag_pos.z) {
// calculate lightning
let light_normal = (self.render_options.light_pos - fp).normalize(); // normal frag pos to light (world space)
let view_normal = (self.render_options.view_pos - fp).normalize(); // normal frag pos to view (world space)
let reflect_dir = glm::reflect_vec(&-light_normal, &normal);
// diffuse
let diff_color =
glm::dot(&normal, &light_normal).max(0.0) * self.render_options.light_color * 1.0;
// specular
let spec_color = (glm::dot(&view_normal, &reflect_dir).powf(16.0) * 0.7)
* self.render_options.light_color;
// merge
let mut color = self.render_options.ambient_color + diff_color + spec_color;
color.x *= self.render_options.model_color.x;
color.y *= self.render_options.model_color.y;
color.z *= self.render_options.model_color.z;
pic.set(x, y, &(color.x, color.y, color.z, 1.0).into());
}
}
}
}
}
if self.render_options.draw_size_hint {
let margin = 3;
let text_to_height_ratio = 16;
let text = format!(
"{}x{}x{}",
aabb.size().x as i32,
aabb.size().y as i32,
aabb.size().z as i32
);
let text_size = pic.height() / text_to_height_ratio;
pic.fill_rect(
0,
pic.height() as i32 - (text_size + margin * 2) as i32,
pic.width() as i32,
pic.height() as i32,
&"333333FF".into(),
);
pic.stroke_string(
margin,
pic.height() - text_size - margin,
&text,
text_size as f32,
&"FFFFFFFF".into(),
);
}
pic
}
}
fn edge_fn(a: &Vec2, b: &Vec2, c: &Vec2) -> f32 {
(c.x - a.x) * (b.y - a.y) - (c.y - a.y) * (b.x - a.x)
}
fn scale_for_unitsize(mvp: &Mat4, aabb: &AABB) -> f32 {
let edges = [
matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.lower.y, aabb.lower.z)),
matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.lower.y, aabb.lower.z)),
matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.upper.y, aabb.lower.z)),
matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.upper.y, aabb.lower.z)),
matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.lower.y, aabb.upper.z)),
matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.lower.y, aabb.upper.z)),
matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.upper.y, aabb.upper.z)),
matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.upper.y, aabb.upper.z)),
];
let mut min = Vec3::new(f32::MAX, f32::MAX, f32::MAX);
let mut max = Vec3::new(f32::MIN, f32::MIN, f32::MIN);
for e in &edges {
min.x = min.x.min(e.x);
min.y = min.y.min(e.y);
max.x = max.x.max(e.x);
max.y = max.y.max(e.y);
}
1.0 / ((f32::abs(max.x - min.x)).max(f32::abs(max.y - min.y)) / 2.0)
}
fn draw_grid(pic: &mut Picture, vp: &Mat4, z: f32, color: &Vec3, model_size: Vec3, scale: f32) {
// draw grid
let max_xy = model_size.x.max(model_size.y);
let grid_color = (color.x, color.y, color.z, 1.0).into();
let grid_size = 10.0; // mm
let grid_count = ((max_xy * scale) / scale / grid_size + 1.0) as i32;
let grid_spacing = grid_size * scale as f32;
let ox = grid_count as f32 * grid_spacing / 2.0;
for x in 0..=grid_count {
let p0 = Vec3::new(grid_spacing * x as f32 - ox, grid_count as f32 * grid_spacing * 0.5, z);
let p1 = Vec3::new(p0.x, -grid_count as f32 * grid_spacing * 0.5, z);
// to screen space
let sp0 = matmul(&vp, &p0).xy();
let sp1 = matmul(&vp, &p1).xy();
pic.thick_line(
((sp0.x + 1.0) / 2.0 * pic.width() as f32) as i32,
((sp0.y + 1.0) / 2.0 * pic.height() as f32) as i32,
((sp1.x + 1.0) / 2.0 * pic.width() as f32) as i32,
((sp1.y + 1.0) / 2.0 * pic.height() as f32) as i32,
&grid_color,
1.0,
);
}
}
| render | identifier_name |
rasterbackend.rs | use crate::aabb::*;
use crate::mesh::*;
use crate::picture::*;
use crate::zbuffer::*;
use std::f32::consts::PI;
use std::time::{Duration, Instant};
#[derive(Debug)]
pub struct RenderOptions {
pub view_pos: Vec3,
pub light_pos: Vec3,
pub light_color: Vec3,
pub ambient_color: Vec3,
pub model_color: Vec3,
pub grid_color: Vec3,
pub background_color: Vec4,
pub zoom: f32,
pub grid_visible: bool,
pub draw_size_hint: bool,
}
impl Default for RenderOptions {
fn default() -> Self {
Self {
view_pos: Vec3::new(-1.0, 1.0, -1.0).normalize(),
light_pos: Vec3::new(-1.0, 0.5, -0.5),
light_color: Vec3::new(0.6, 0.6, 0.6),
ambient_color: Vec3::new(0.4, 0.4, 0.4),
model_color: Vec3::new(0.0, 0.45, 1.0),
grid_color: Vec3::new(0.1, 0.1, 0.1),
background_color: Vec4::new(1.0, 1.0, 1.0, 1.0),
grid_visible: true,
zoom: 1.0,
draw_size_hint: true,
}
}
}
#[derive(Debug)]
pub struct RasterBackend {
pub render_options: RenderOptions,
width: u32,
height: u32,
aspect_ratio: f32,
}
impl RasterBackend {
pub fn new(width: u32, height: u32) -> Self {
Self {
render_options: RenderOptions::default(),
width,
height,
aspect_ratio: width as f32 / height as f32,
}
}
fn view_projection(&self, zoom: f32) -> Mat4 {
// calculate view projection matrix
let proj = glm::ortho(
zoom * 0.5 * self.aspect_ratio,
-zoom * 0.5 * self.aspect_ratio,
-zoom * 0.5,
zoom * 0.5,
0.0,
1.0,
);
let view = glm::look_at(
&self.render_options.view_pos,
&Vec3::new(0.0, 0.0, 0.0),
&Vec3::new(0.0, 0.0, -1.0),
);
proj * view
}
pub fn fit_mesh_scale(&self, mesh: impl IntoIterator<Item = Triangle> + Copy) -> (AABB, f32) {
let aabb = AABB::from_iterable(mesh);
let vp = self.view_projection(1.0);
// scale the model such that is fills the entire canvas
(aabb, scale_for_unitsize(&vp, &aabb))
}
pub fn render(
&self,
mesh: impl IntoIterator<Item = Triangle> + Copy,
model_scale: f32,
aabb: &AABB,
timeout: Option<Duration>,
) -> Picture {
let start_time = Instant::now();
let mut pic = Picture::new(self.width, self.height);
let mut zbuf = ZBuffer::new(self.width, self.height);
let mut scaled_aabb = *aabb;
pic.fill(&(&self.render_options.background_color).into());
let vp = self.view_projection(self.render_options.zoom);
// calculate transforms taking the new model scale into account
let model = Mat4::identity()
.append_translation(&-aabb.center())
.append_scaling(model_scale);
let mvp = vp * model;
// let the AABB match the transformed model
scaled_aabb.apply_transform(&model);
// eye normal pointing towards the camera in world space
let eye_normal = self.render_options.view_pos.normalize();
// grid in x and y direction
if self.render_options.grid_visible {
draw_grid(
&mut pic,
&vp,
scaled_aabb.lower.z,
&self.render_options.grid_color,
aabb.size(),
model_scale,
);
draw_grid(
&mut pic,
&(vp * glm::rotation(PI / 2.0, &Vec3::new(0.0, 0.0, 1.0))),
scaled_aabb.lower.z,
&self.render_options.grid_color,
aabb.size(),
model_scale,
);
}
for t in mesh {
// timed out?
if let Some(timeout) = timeout {
let dt = Instant::now() - start_time;
if dt > timeout {
// abort
println!("... timeout!");
return pic;
}
}
let normal = -t.normal;
// backface culling
if glm::dot(&eye_normal, &normal) < 0.0 {
continue;
}
let v = &t.vertices;
let v0 = matmul(&mvp, &v[0]);
let v1 = matmul(&mvp, &v[1]);
let v2 = matmul(&mvp, &v[2]);
let v0m = matmul(&model, &v[0]);
let v1m = matmul(&model, &v[1]);
let v2m = matmul(&model, &v[2]);
// triangle bounding box
let min_x = v0.x.min(v1.x).min(v2.x);
let min_y = v0.y.min(v1.y).min(v2.y);
let max_x = v0.x.max(v1.x).max(v2.x);
let max_y = v0.y.max(v1.y).max(v2.y);
// triangle bounding box in screen space
let smin_x = 0.max(((min_x + 1.0) / 2.0 * pic.width() as f32) as u32);
let smin_y = 0.max(((min_y + 1.0) / 2.0 * pic.height() as f32) as u32);
let smax_x = 0.max(pic.width().min(((max_x + 1.0) / 2.0 * pic.width() as f32) as u32));
let smax_y = 0.max(pic.height().min(((max_y + 1.0) / 2.0 * pic.height() as f32) as u32));
for y in smin_y..=smax_y {
for x in smin_x..=smax_x {
// normalized screen coordinates [-1,1]
let nx = 2.0 * ((x as f32 / pic.width() as f32) - 0.5);
let ny = 2.0 * ((y as f32 / pic.height() as f32) - 0.5);
let p = Vec2::new(nx, ny);
let p0 = v0.xy();
let p1 = v1.xy();
let p2 = v2.xy();
let inside =
edge_fn(&p, &p0, &p1) <= 0.0 && edge_fn(&p, &p1, &p2) <= 0.0 && edge_fn(&p, &p2, &p0) <= 0.0;
if inside {
// calculate barycentric coordinates
let area = edge_fn(&p0, &p1, &p2);
let w0 = edge_fn(&p1, &p2, &p) / area;
let w1 = edge_fn(&p2, &p0, &p) / area;
let w2 = edge_fn(&p0, &p1, &p) / area;
// fragment position in screen space
let frag_pos = Vec3::new(
w0 * v0.x + w1 * v1.x + w2 * v2.x,
w0 * v0.y + w1 * v1.y + w2 * v2.y,
w0 * v0.z + w1 * v1.z + w2 * v2.z,
);
// fragment position in world space
let fp = Vec3::new(
w0 * v0m.x + w1 * v1m.x + w2 * v2m.x,
w0 * v0m.y + w1 * v1m.y + w2 * v2m.y,
w0 * v0m.z + w1 * v1m.z + w2 * v2m.z,
);
//let fp = matmul(&mvp_inv, &frag_pos);
if zbuf.test_and_set(x, y, frag_pos.z) {
// calculate lightning
let light_normal = (self.render_options.light_pos - fp).normalize(); // normal frag pos to light (world space)
let view_normal = (self.render_options.view_pos - fp).normalize(); // normal frag pos to view (world space)
let reflect_dir = glm::reflect_vec(&-light_normal, &normal);
// diffuse
let diff_color =
glm::dot(&normal, &light_normal).max(0.0) * self.render_options.light_color * 1.0;
// specular
let spec_color = (glm::dot(&view_normal, &reflect_dir).powf(16.0) * 0.7)
* self.render_options.light_color;
// merge
let mut color = self.render_options.ambient_color + diff_color + spec_color;
color.x *= self.render_options.model_color.x;
color.y *= self.render_options.model_color.y;
color.z *= self.render_options.model_color.z;
pic.set(x, y, &(color.x, color.y, color.z, 1.0).into());
}
}
}
}
}
if self.render_options.draw_size_hint |
pic
}
}
fn edge_fn(a: &Vec2, b: &Vec2, c: &Vec2) -> f32 {
(c.x - a.x) * (b.y - a.y) - (c.y - a.y) * (b.x - a.x)
}
fn scale_for_unitsize(mvp: &Mat4, aabb: &AABB) -> f32 {
let edges = [
matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.lower.y, aabb.lower.z)),
matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.lower.y, aabb.lower.z)),
matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.upper.y, aabb.lower.z)),
matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.upper.y, aabb.lower.z)),
matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.lower.y, aabb.upper.z)),
matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.lower.y, aabb.upper.z)),
matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.upper.y, aabb.upper.z)),
matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.upper.y, aabb.upper.z)),
];
let mut min = Vec3::new(f32::MAX, f32::MAX, f32::MAX);
let mut max = Vec3::new(f32::MIN, f32::MIN, f32::MIN);
for e in &edges {
min.x = min.x.min(e.x);
min.y = min.y.min(e.y);
max.x = max.x.max(e.x);
max.y = max.y.max(e.y);
}
1.0 / ((f32::abs(max.x - min.x)).max(f32::abs(max.y - min.y)) / 2.0)
}
fn draw_grid(pic: &mut Picture, vp: &Mat4, z: f32, color: &Vec3, model_size: Vec3, scale: f32) {
// draw grid
let max_xy = model_size.x.max(model_size.y);
let grid_color = (color.x, color.y, color.z, 1.0).into();
let grid_size = 10.0; // mm
let grid_count = ((max_xy * scale) / scale / grid_size + 1.0) as i32;
let grid_spacing = grid_size * scale as f32;
let ox = grid_count as f32 * grid_spacing / 2.0;
for x in 0..=grid_count {
let p0 = Vec3::new(grid_spacing * x as f32 - ox, grid_count as f32 * grid_spacing * 0.5, z);
let p1 = Vec3::new(p0.x, -grid_count as f32 * grid_spacing * 0.5, z);
// to screen space
let sp0 = matmul(&vp, &p0).xy();
let sp1 = matmul(&vp, &p1).xy();
pic.thick_line(
((sp0.x + 1.0) / 2.0 * pic.width() as f32) as i32,
((sp0.y + 1.0) / 2.0 * pic.height() as f32) as i32,
((sp1.x + 1.0) / 2.0 * pic.width() as f32) as i32,
((sp1.y + 1.0) / 2.0 * pic.height() as f32) as i32,
&grid_color,
1.0,
);
}
}
| {
let margin = 3;
let text_to_height_ratio = 16;
let text = format!(
"{}x{}x{}",
aabb.size().x as i32,
aabb.size().y as i32,
aabb.size().z as i32
);
let text_size = pic.height() / text_to_height_ratio;
pic.fill_rect(
0,
pic.height() as i32 - (text_size + margin * 2) as i32,
pic.width() as i32,
pic.height() as i32,
&"333333FF".into(),
);
pic.stroke_string(
margin,
pic.height() - text_size - margin,
&text,
text_size as f32,
&"FFFFFFFF".into(),
);
} | conditional_block |
mod.rs | use crate::{
data::{Key, Metakey, Value},
error::*,
Aggregator, AggregatorState, Backend, Handle, MapState, Reducer, ReducerState, ValueState,
VecState,
};
use rocksdb::{
checkpoint::Checkpoint, ColumnFamily, ColumnFamilyDescriptor, DBPinnableSlice, Options,
SliceTransform, WriteBatch, WriteOptions, DB,
};
use std::{
cell::UnsafeCell,
collections::HashSet,
fs,
path::{Path, PathBuf},
};
unsafe impl Send for Rocks {}
unsafe impl Sync for Rocks {}
#[derive(Debug)]
pub struct Rocks {
inner: UnsafeCell<DB>,
restored: bool,
name: String,
}
// we use epochs, so WAL is useless for us
fn default_write_opts() -> WriteOptions {
let mut res = WriteOptions::default();
res.disable_wal(true);
res
}
impl Rocks {
#[inline(always)]
#[allow(clippy::mut_from_ref)]
fn db_mut(&self) -> &mut DB {
unsafe { &mut (*self.inner.get()) }
}
#[inline(always)]
fn db(&self) -> &DB {
unsafe { &(*self.inner.get()) }
}
#[inline]
fn get_cf_handle(&self, cf_name: impl AsRef<str>) -> Result<&ColumnFamily> {
let cf_name = cf_name.as_ref();
self.db()
.cf_handle(cf_name)
.with_context(|| RocksMissingColumnFamily {
cf_name: cf_name.to_string(),
})
}
#[inline]
fn get(
&self,
cf_name: impl AsRef<str>,
key: impl AsRef<[u8]>,
) -> Result<Option<DBPinnableSlice>> {
let cf = self.get_cf_handle(cf_name)?;
Ok(self.db().get_pinned_cf(cf, key)?)
}
#[inline]
fn put(
&self,
cf_name: impl AsRef<str>,
key: impl AsRef<[u8]>,
value: impl AsRef<[u8]>,
) -> Result<()> {
let cf = self.get_cf_handle(cf_name)?;
Ok(self
.db()
.put_cf_opt(cf, key, value, &default_write_opts())?)
}
#[inline]
fn remove(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<()> {
let cf = self.get_cf_handle(cf)?;
Ok(self.db().delete_cf_opt(cf, key, &default_write_opts())?)
}
fn remove_prefix(&self, cf: impl AsRef<str>, prefix: impl AsRef<[u8]>) -> Result<()> {
let prefix = prefix.as_ref();
let cf_name = cf.as_ref();
let cf = self.get_cf_handle(cf_name)?;
// NOTE: this only works assuming the column family is lexicographically ordered (which is
// the default, so we don't explicitly set it, see Options::set_comparator)
let start = prefix;
// delete_range deletes all the entries in [start, end) range, so we can just increment the
// least significant byte of the prefix
let mut end = start.to_vec();
*end.last_mut()
.expect("unreachable, the empty case is covered a few lines above") += 1;
let mut wb = WriteBatch::default();
wb.delete_range_cf(cf, start, &end);
self.db().write_opt(wb, &default_write_opts())?;
Ok(())
}
#[inline]
fn contains(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<bool> {
let cf = self.get_cf_handle(cf.as_ref())?;
Ok(self.db().get_pinned_cf(cf, key)?.is_some())
}
fn | (&self, cf_name: &str, opts: Options) -> Result<()> {
if self.db().cf_handle(cf_name).is_none() {
self.db_mut().create_cf(cf_name, &opts)?;
}
Ok(())
}
}
fn common_options<IK, N>() -> Options
where
IK: Metakey,
N: Metakey,
{
let prefix_size = IK::SIZE + N::SIZE;
let mut opts = Options::default();
// for map state to work properly, but useful for all the states, so the bloom filters get
// populated
opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(prefix_size as usize));
opts
}
impl Backend for Rocks {
fn name(&self) -> &str {
self.name.as_str()
}
fn create(path: &Path, name: String) -> Result<Self>
where
Self: Sized,
{
let mut opts = Options::default();
opts.create_if_missing(true);
let path: PathBuf = path.into();
if !path.exists() {
fs::create_dir_all(&path)?;
}
let column_families: HashSet<String> = match DB::list_cf(&opts, &path) {
Ok(cfs) => cfs.into_iter().filter(|n| n != "default").collect(),
// TODO: possibly platform-dependant error message check
Err(e) if e.to_string().contains("No such file or directory") => HashSet::new(),
Err(e) => return Err(e.into()),
};
let cfds = if !column_families.is_empty() {
column_families
.into_iter()
.map(|name| ColumnFamilyDescriptor::new(name, Options::default()))
.collect()
} else {
vec![ColumnFamilyDescriptor::new("default", Options::default())]
};
Ok(Rocks {
inner: UnsafeCell::new(DB::open_cf_descriptors(&opts, &path, cfds)?),
restored: false,
name,
})
}
fn restore(live_path: &Path, checkpoint_path: &Path, name: String) -> Result<Self>
where
Self: Sized,
{
fs::create_dir_all(live_path)?;
ensure!(
fs::read_dir(live_path)?.next().is_none(),
RocksRestoreDirNotEmpty { dir: &(*live_path) }
);
let mut target_path: PathBuf = live_path.into();
target_path.push("__DUMMY"); // the file name is replaced inside the loop below
for entry in fs::read_dir(checkpoint_path)? {
let entry = entry?;
assert!(entry
.file_type()
.expect("Cannot read entry metadata")
.is_file());
let source_path = entry.path();
// replaces the __DUMMY from above the loop
target_path.set_file_name(
source_path
.file_name()
.expect("directory entry with no name?"),
);
fs::copy(&source_path, &target_path)?;
}
Rocks::create(live_path, name).map(|mut r| {
r.restored = true;
r
})
}
fn was_restored(&self) -> bool {
self.restored
}
fn checkpoint(&self, checkpoint_path: &Path) -> Result<()> {
let db = self.db();
db.flush()?;
let checkpointer = Checkpoint::new(db)?;
if checkpoint_path.exists() {
// TODO: add a warning log here
// warn!(logger, "Checkpoint path {:?} exists, deleting");
fs::remove_dir_all(checkpoint_path)?
}
checkpointer.create_checkpoint(checkpoint_path)?;
Ok(())
}
fn register_value_handle<'s, T: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<ValueState<T>, IK, N>,
) {
handle.registered = true;
let opts = common_options::<IK, N>();
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_map_handle<'s, K: Key, V: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<MapState<K, V>, IK, N>,
) {
handle.registered = true;
let opts = common_options::<IK, N>();
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_vec_handle<'s, T: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<VecState<T>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
opts.set_merge_operator_associative("vec_merge", vec_ops::vec_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_reducer_handle<'s, T: Value, F: Reducer<T>, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<ReducerState<T, F>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
let reducer_merge = reducer_ops::make_reducer_merge(handle.extra_data.clone());
opts.set_merge_operator_associative("reducer_merge", reducer_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_aggregator_handle<'s, A: Aggregator, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<AggregatorState<A>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
let aggregator_merge = aggregator_ops::make_aggregator_merge(handle.extra_data.clone());
opts.set_merge_operator_associative("aggregator_merge", aggregator_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
}
mod aggregator_ops;
mod map_ops;
mod reducer_ops;
mod value_ops;
mod vec_ops;
#[cfg(test)]
pub mod tests {
use super::*;
use std::{
ops::{Deref, DerefMut},
sync::Arc,
};
use tempfile::TempDir;
#[derive(Debug)]
pub struct TestDb {
rocks: Arc<Rocks>,
dir: TempDir,
}
impl TestDb {
#[allow(clippy::new_without_default)]
pub fn new() -> TestDb {
let dir = TempDir::new().unwrap();
let mut dir_path = dir.path().to_path_buf();
dir_path.push("rocks");
fs::create_dir(&dir_path).unwrap();
let rocks = Rocks::create(&dir_path, "testDB".to_string()).unwrap();
TestDb {
rocks: Arc::new(rocks),
dir,
}
}
pub fn checkpoint(&mut self) -> PathBuf {
let mut checkpoint_dir: PathBuf = self.dir.path().into();
checkpoint_dir.push("checkpoint");
self.rocks.checkpoint(&checkpoint_dir).unwrap();
checkpoint_dir
}
pub fn from_checkpoint(checkpoint_dir: &str) -> TestDb {
let dir = TempDir::new().unwrap();
let mut dir_path = dir.path().to_path_buf();
dir_path.push("rocks");
let rocks =
Rocks::restore(&dir_path, checkpoint_dir.as_ref(), "testDB".to_string()).unwrap();
TestDb {
rocks: Arc::new(rocks),
dir,
}
}
}
impl Deref for TestDb {
type Target = Arc<Rocks>;
fn deref(&self) -> &Self::Target {
&self.rocks
}
}
impl DerefMut for TestDb {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.rocks
}
}
#[test]
fn simple_rocksdb_test() {
let db = TestDb::new();
let key = "key";
let value = "test";
let column_family = "default";
db.put(column_family, key.as_bytes(), value.as_bytes())
.expect("put");
{
let v = db.get(column_family, key.as_bytes()).unwrap().unwrap();
assert_eq!(value, String::from_utf8_lossy(&v));
}
db.remove(column_family, key.as_bytes()).expect("remove");
let v = db.get(column_family, key.as_bytes()).unwrap();
assert!(v.is_none());
}
#[test]
fn checkpoint_rocksdb_raw_test() {
let tmp_dir = TempDir::new().unwrap();
let checkpoints_dir = TempDir::new().unwrap();
let restore_dir = TempDir::new().unwrap();
let dir_path = tmp_dir.path();
let mut checkpoints_dir_path = checkpoints_dir.path().to_path_buf();
checkpoints_dir_path.push("chkp0");
let mut restore_dir_path = restore_dir.path().to_path_buf();
restore_dir_path.push("chkp0");
let db = Rocks::create(dir_path, "testDB".to_string()).unwrap();
let key: &[u8] = b"key";
let initial_value: &[u8] = b"value";
let new_value: &[u8] = b"new value";
let column_family = "default";
db.put(column_family, key, initial_value)
.expect("put failed");
db.checkpoint(&checkpoints_dir_path)
.expect("checkpoint failed");
db.put(column_family, key, new_value)
.expect("second put failed");
let db_from_checkpoint = Rocks::restore(
&restore_dir_path,
&checkpoints_dir_path,
"testDB".to_string(),
)
.expect("Could not open checkpointed db");
assert_eq!(
new_value,
db.get(column_family, key)
.expect("Could not get from the original db")
.unwrap()
.as_ref()
);
assert_eq!(
initial_value,
db_from_checkpoint
.get(column_family, key)
.expect("Could not get from the checkpoint")
.unwrap()
.as_ref()
);
}
#[test]
fn checkpoint_restore_state_test() {
let mut original_test = TestDb::new();
let mut a_handle = Handle::value("a");
original_test.register_value_handle(&mut a_handle);
let checkpoint_dir = {
let mut a = a_handle.activate(original_test.clone());
a.set(420).unwrap();
let checkpoint_dir = original_test.checkpoint();
assert_eq!(a.get().unwrap().unwrap(), 420);
a.set(69).unwrap();
assert_eq!(a.get().unwrap().unwrap(), 69);
checkpoint_dir
};
let restored = TestDb::from_checkpoint(&checkpoint_dir.to_string_lossy());
{
let mut a_handle = Handle::value("a");
restored.register_value_handle(&mut a_handle);
let mut a_restored = a_handle.activate(restored.clone());
// TODO: serialize value state metadata (type names, serialization, etc.) into rocksdb, so
// that type mismatches are caught early. Right now it would be possible to, let's say,
// store an integer, and then read a float from the restored state backend
assert_eq!(a_restored.get().unwrap().unwrap(), 420);
a_restored.set(1337).unwrap();
assert_eq!(a_restored.get().unwrap().unwrap(), 1337);
}
}
common_state_tests!(TestDb::new());
}
| create_column_family | identifier_name |
mod.rs | use crate::{
data::{Key, Metakey, Value},
error::*,
Aggregator, AggregatorState, Backend, Handle, MapState, Reducer, ReducerState, ValueState,
VecState,
};
use rocksdb::{
checkpoint::Checkpoint, ColumnFamily, ColumnFamilyDescriptor, DBPinnableSlice, Options,
SliceTransform, WriteBatch, WriteOptions, DB,
};
use std::{
cell::UnsafeCell,
collections::HashSet,
fs,
path::{Path, PathBuf},
};
unsafe impl Send for Rocks {}
unsafe impl Sync for Rocks {}
#[derive(Debug)]
pub struct Rocks {
inner: UnsafeCell<DB>,
restored: bool,
name: String,
}
// we use epochs, so WAL is useless for us
fn default_write_opts() -> WriteOptions {
let mut res = WriteOptions::default();
res.disable_wal(true);
res
}
impl Rocks {
#[inline(always)]
#[allow(clippy::mut_from_ref)]
fn db_mut(&self) -> &mut DB {
unsafe { &mut (*self.inner.get()) }
}
#[inline(always)]
fn db(&self) -> &DB {
unsafe { &(*self.inner.get()) }
}
#[inline]
fn get_cf_handle(&self, cf_name: impl AsRef<str>) -> Result<&ColumnFamily> {
let cf_name = cf_name.as_ref();
self.db()
.cf_handle(cf_name)
.with_context(|| RocksMissingColumnFamily {
cf_name: cf_name.to_string(),
})
}
#[inline]
fn get(
&self,
cf_name: impl AsRef<str>,
key: impl AsRef<[u8]>,
) -> Result<Option<DBPinnableSlice>> {
let cf = self.get_cf_handle(cf_name)?;
Ok(self.db().get_pinned_cf(cf, key)?)
}
#[inline]
fn put(
&self,
cf_name: impl AsRef<str>,
key: impl AsRef<[u8]>,
value: impl AsRef<[u8]>,
) -> Result<()> {
let cf = self.get_cf_handle(cf_name)?;
Ok(self
.db()
.put_cf_opt(cf, key, value, &default_write_opts())?)
}
#[inline]
fn remove(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<()> {
let cf = self.get_cf_handle(cf)?;
Ok(self.db().delete_cf_opt(cf, key, &default_write_opts())?)
}
fn remove_prefix(&self, cf: impl AsRef<str>, prefix: impl AsRef<[u8]>) -> Result<()> {
let prefix = prefix.as_ref();
let cf_name = cf.as_ref();
let cf = self.get_cf_handle(cf_name)?;
// NOTE: this only works assuming the column family is lexicographically ordered (which is
// the default, so we don't explicitly set it, see Options::set_comparator)
let start = prefix;
// delete_range deletes all the entries in [start, end) range, so we can just increment the
// least significant byte of the prefix
let mut end = start.to_vec();
*end.last_mut()
.expect("unreachable, the empty case is covered a few lines above") += 1;
let mut wb = WriteBatch::default();
wb.delete_range_cf(cf, start, &end);
self.db().write_opt(wb, &default_write_opts())?;
Ok(())
}
#[inline]
fn contains(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<bool> {
let cf = self.get_cf_handle(cf.as_ref())?;
Ok(self.db().get_pinned_cf(cf, key)?.is_some())
}
fn create_column_family(&self, cf_name: &str, opts: Options) -> Result<()> {
if self.db().cf_handle(cf_name).is_none() {
self.db_mut().create_cf(cf_name, &opts)?;
}
Ok(())
}
}
fn common_options<IK, N>() -> Options
where
IK: Metakey,
N: Metakey,
{
let prefix_size = IK::SIZE + N::SIZE;
let mut opts = Options::default();
// for map state to work properly, but useful for all the states, so the bloom filters get
// populated
opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(prefix_size as usize));
opts
}
impl Backend for Rocks {
fn name(&self) -> &str {
self.name.as_str()
}
fn create(path: &Path, name: String) -> Result<Self>
where
Self: Sized,
{
let mut opts = Options::default();
opts.create_if_missing(true);
let path: PathBuf = path.into();
if !path.exists() {
fs::create_dir_all(&path)?;
}
let column_families: HashSet<String> = match DB::list_cf(&opts, &path) {
Ok(cfs) => cfs.into_iter().filter(|n| n != "default").collect(),
// TODO: possibly platform-dependant error message check
Err(e) if e.to_string().contains("No such file or directory") => HashSet::new(),
Err(e) => return Err(e.into()),
};
let cfds = if !column_families.is_empty() {
column_families
.into_iter()
.map(|name| ColumnFamilyDescriptor::new(name, Options::default()))
.collect()
} else {
vec![ColumnFamilyDescriptor::new("default", Options::default())]
};
Ok(Rocks {
inner: UnsafeCell::new(DB::open_cf_descriptors(&opts, &path, cfds)?),
restored: false,
name,
})
}
fn restore(live_path: &Path, checkpoint_path: &Path, name: String) -> Result<Self>
where
Self: Sized,
{
fs::create_dir_all(live_path)?;
ensure!(
fs::read_dir(live_path)?.next().is_none(),
RocksRestoreDirNotEmpty { dir: &(*live_path) }
);
let mut target_path: PathBuf = live_path.into();
target_path.push("__DUMMY"); // the file name is replaced inside the loop below
for entry in fs::read_dir(checkpoint_path)? {
let entry = entry?;
assert!(entry
.file_type()
.expect("Cannot read entry metadata")
.is_file());
let source_path = entry.path();
// replaces the __DUMMY from above the loop
target_path.set_file_name(
source_path
.file_name()
.expect("directory entry with no name?"),
);
fs::copy(&source_path, &target_path)?;
}
Rocks::create(live_path, name).map(|mut r| {
r.restored = true;
r
})
}
fn was_restored(&self) -> bool {
self.restored
}
fn checkpoint(&self, checkpoint_path: &Path) -> Result<()> {
let db = self.db();
db.flush()?;
let checkpointer = Checkpoint::new(db)?;
if checkpoint_path.exists() |
checkpointer.create_checkpoint(checkpoint_path)?;
Ok(())
}
fn register_value_handle<'s, T: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<ValueState<T>, IK, N>,
) {
handle.registered = true;
let opts = common_options::<IK, N>();
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_map_handle<'s, K: Key, V: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<MapState<K, V>, IK, N>,
) {
handle.registered = true;
let opts = common_options::<IK, N>();
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_vec_handle<'s, T: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<VecState<T>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
opts.set_merge_operator_associative("vec_merge", vec_ops::vec_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_reducer_handle<'s, T: Value, F: Reducer<T>, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<ReducerState<T, F>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
let reducer_merge = reducer_ops::make_reducer_merge(handle.extra_data.clone());
opts.set_merge_operator_associative("reducer_merge", reducer_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_aggregator_handle<'s, A: Aggregator, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<AggregatorState<A>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
let aggregator_merge = aggregator_ops::make_aggregator_merge(handle.extra_data.clone());
opts.set_merge_operator_associative("aggregator_merge", aggregator_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
}
mod aggregator_ops;
mod map_ops;
mod reducer_ops;
mod value_ops;
mod vec_ops;
#[cfg(test)]
pub mod tests {
use super::*;
use std::{
ops::{Deref, DerefMut},
sync::Arc,
};
use tempfile::TempDir;
#[derive(Debug)]
pub struct TestDb {
rocks: Arc<Rocks>,
dir: TempDir,
}
impl TestDb {
#[allow(clippy::new_without_default)]
pub fn new() -> TestDb {
let dir = TempDir::new().unwrap();
let mut dir_path = dir.path().to_path_buf();
dir_path.push("rocks");
fs::create_dir(&dir_path).unwrap();
let rocks = Rocks::create(&dir_path, "testDB".to_string()).unwrap();
TestDb {
rocks: Arc::new(rocks),
dir,
}
}
pub fn checkpoint(&mut self) -> PathBuf {
let mut checkpoint_dir: PathBuf = self.dir.path().into();
checkpoint_dir.push("checkpoint");
self.rocks.checkpoint(&checkpoint_dir).unwrap();
checkpoint_dir
}
pub fn from_checkpoint(checkpoint_dir: &str) -> TestDb {
let dir = TempDir::new().unwrap();
let mut dir_path = dir.path().to_path_buf();
dir_path.push("rocks");
let rocks =
Rocks::restore(&dir_path, checkpoint_dir.as_ref(), "testDB".to_string()).unwrap();
TestDb {
rocks: Arc::new(rocks),
dir,
}
}
}
impl Deref for TestDb {
type Target = Arc<Rocks>;
fn deref(&self) -> &Self::Target {
&self.rocks
}
}
impl DerefMut for TestDb {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.rocks
}
}
#[test]
fn simple_rocksdb_test() {
let db = TestDb::new();
let key = "key";
let value = "test";
let column_family = "default";
db.put(column_family, key.as_bytes(), value.as_bytes())
.expect("put");
{
let v = db.get(column_family, key.as_bytes()).unwrap().unwrap();
assert_eq!(value, String::from_utf8_lossy(&v));
}
db.remove(column_family, key.as_bytes()).expect("remove");
let v = db.get(column_family, key.as_bytes()).unwrap();
assert!(v.is_none());
}
#[test]
fn checkpoint_rocksdb_raw_test() {
let tmp_dir = TempDir::new().unwrap();
let checkpoints_dir = TempDir::new().unwrap();
let restore_dir = TempDir::new().unwrap();
let dir_path = tmp_dir.path();
let mut checkpoints_dir_path = checkpoints_dir.path().to_path_buf();
checkpoints_dir_path.push("chkp0");
let mut restore_dir_path = restore_dir.path().to_path_buf();
restore_dir_path.push("chkp0");
let db = Rocks::create(dir_path, "testDB".to_string()).unwrap();
let key: &[u8] = b"key";
let initial_value: &[u8] = b"value";
let new_value: &[u8] = b"new value";
let column_family = "default";
db.put(column_family, key, initial_value)
.expect("put failed");
db.checkpoint(&checkpoints_dir_path)
.expect("checkpoint failed");
db.put(column_family, key, new_value)
.expect("second put failed");
let db_from_checkpoint = Rocks::restore(
&restore_dir_path,
&checkpoints_dir_path,
"testDB".to_string(),
)
.expect("Could not open checkpointed db");
assert_eq!(
new_value,
db.get(column_family, key)
.expect("Could not get from the original db")
.unwrap()
.as_ref()
);
assert_eq!(
initial_value,
db_from_checkpoint
.get(column_family, key)
.expect("Could not get from the checkpoint")
.unwrap()
.as_ref()
);
}
#[test]
fn checkpoint_restore_state_test() {
let mut original_test = TestDb::new();
let mut a_handle = Handle::value("a");
original_test.register_value_handle(&mut a_handle);
let checkpoint_dir = {
let mut a = a_handle.activate(original_test.clone());
a.set(420).unwrap();
let checkpoint_dir = original_test.checkpoint();
assert_eq!(a.get().unwrap().unwrap(), 420);
a.set(69).unwrap();
assert_eq!(a.get().unwrap().unwrap(), 69);
checkpoint_dir
};
let restored = TestDb::from_checkpoint(&checkpoint_dir.to_string_lossy());
{
let mut a_handle = Handle::value("a");
restored.register_value_handle(&mut a_handle);
let mut a_restored = a_handle.activate(restored.clone());
// TODO: serialize value state metadata (type names, serialization, etc.) into rocksdb, so
// that type mismatches are caught early. Right now it would be possible to, let's say,
// store an integer, and then read a float from the restored state backend
assert_eq!(a_restored.get().unwrap().unwrap(), 420);
a_restored.set(1337).unwrap();
assert_eq!(a_restored.get().unwrap().unwrap(), 1337);
}
}
common_state_tests!(TestDb::new());
}
| {
// TODO: add a warning log here
// warn!(logger, "Checkpoint path {:?} exists, deleting");
fs::remove_dir_all(checkpoint_path)?
} | conditional_block |
mod.rs | use crate::{
data::{Key, Metakey, Value},
error::*,
Aggregator, AggregatorState, Backend, Handle, MapState, Reducer, ReducerState, ValueState,
VecState,
};
use rocksdb::{
checkpoint::Checkpoint, ColumnFamily, ColumnFamilyDescriptor, DBPinnableSlice, Options,
SliceTransform, WriteBatch, WriteOptions, DB,
};
use std::{
cell::UnsafeCell,
collections::HashSet,
fs,
path::{Path, PathBuf},
};
unsafe impl Send for Rocks {}
unsafe impl Sync for Rocks {}
#[derive(Debug)]
pub struct Rocks {
inner: UnsafeCell<DB>,
restored: bool,
name: String,
}
// we use epochs, so WAL is useless for us
fn default_write_opts() -> WriteOptions {
let mut res = WriteOptions::default();
res.disable_wal(true);
res
}
impl Rocks {
#[inline(always)]
#[allow(clippy::mut_from_ref)]
fn db_mut(&self) -> &mut DB {
unsafe { &mut (*self.inner.get()) }
}
#[inline(always)]
fn db(&self) -> &DB {
unsafe { &(*self.inner.get()) }
}
#[inline]
fn get_cf_handle(&self, cf_name: impl AsRef<str>) -> Result<&ColumnFamily> {
let cf_name = cf_name.as_ref();
self.db()
.cf_handle(cf_name)
.with_context(|| RocksMissingColumnFamily {
cf_name: cf_name.to_string(),
})
}
#[inline]
fn get(
&self,
cf_name: impl AsRef<str>,
key: impl AsRef<[u8]>,
) -> Result<Option<DBPinnableSlice>> {
let cf = self.get_cf_handle(cf_name)?;
Ok(self.db().get_pinned_cf(cf, key)?)
}
#[inline]
fn put(
&self,
cf_name: impl AsRef<str>,
key: impl AsRef<[u8]>,
value: impl AsRef<[u8]>,
) -> Result<()> {
let cf = self.get_cf_handle(cf_name)?;
Ok(self
.db()
.put_cf_opt(cf, key, value, &default_write_opts())?)
}
#[inline]
fn remove(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<()> {
let cf = self.get_cf_handle(cf)?;
Ok(self.db().delete_cf_opt(cf, key, &default_write_opts())?)
}
fn remove_prefix(&self, cf: impl AsRef<str>, prefix: impl AsRef<[u8]>) -> Result<()> {
let prefix = prefix.as_ref();
let cf_name = cf.as_ref();
let cf = self.get_cf_handle(cf_name)?;
// NOTE: this only works assuming the column family is lexicographically ordered (which is
// the default, so we don't explicitly set it, see Options::set_comparator)
let start = prefix;
// delete_range deletes all the entries in [start, end) range, so we can just increment the
// least significant byte of the prefix
let mut end = start.to_vec();
*end.last_mut()
.expect("unreachable, the empty case is covered a few lines above") += 1;
let mut wb = WriteBatch::default();
wb.delete_range_cf(cf, start, &end);
self.db().write_opt(wb, &default_write_opts())?;
Ok(())
}
#[inline]
fn contains(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<bool> {
let cf = self.get_cf_handle(cf.as_ref())?;
Ok(self.db().get_pinned_cf(cf, key)?.is_some())
}
fn create_column_family(&self, cf_name: &str, opts: Options) -> Result<()> {
if self.db().cf_handle(cf_name).is_none() {
self.db_mut().create_cf(cf_name, &opts)?;
}
Ok(())
}
}
fn common_options<IK, N>() -> Options
where
IK: Metakey,
N: Metakey,
|
impl Backend for Rocks {
fn name(&self) -> &str {
self.name.as_str()
}
fn create(path: &Path, name: String) -> Result<Self>
where
Self: Sized,
{
let mut opts = Options::default();
opts.create_if_missing(true);
let path: PathBuf = path.into();
if !path.exists() {
fs::create_dir_all(&path)?;
}
let column_families: HashSet<String> = match DB::list_cf(&opts, &path) {
Ok(cfs) => cfs.into_iter().filter(|n| n != "default").collect(),
// TODO: possibly platform-dependant error message check
Err(e) if e.to_string().contains("No such file or directory") => HashSet::new(),
Err(e) => return Err(e.into()),
};
let cfds = if !column_families.is_empty() {
column_families
.into_iter()
.map(|name| ColumnFamilyDescriptor::new(name, Options::default()))
.collect()
} else {
vec![ColumnFamilyDescriptor::new("default", Options::default())]
};
Ok(Rocks {
inner: UnsafeCell::new(DB::open_cf_descriptors(&opts, &path, cfds)?),
restored: false,
name,
})
}
fn restore(live_path: &Path, checkpoint_path: &Path, name: String) -> Result<Self>
where
Self: Sized,
{
fs::create_dir_all(live_path)?;
ensure!(
fs::read_dir(live_path)?.next().is_none(),
RocksRestoreDirNotEmpty { dir: &(*live_path) }
);
let mut target_path: PathBuf = live_path.into();
target_path.push("__DUMMY"); // the file name is replaced inside the loop below
for entry in fs::read_dir(checkpoint_path)? {
let entry = entry?;
assert!(entry
.file_type()
.expect("Cannot read entry metadata")
.is_file());
let source_path = entry.path();
// replaces the __DUMMY from above the loop
target_path.set_file_name(
source_path
.file_name()
.expect("directory entry with no name?"),
);
fs::copy(&source_path, &target_path)?;
}
Rocks::create(live_path, name).map(|mut r| {
r.restored = true;
r
})
}
fn was_restored(&self) -> bool {
self.restored
}
fn checkpoint(&self, checkpoint_path: &Path) -> Result<()> {
let db = self.db();
db.flush()?;
let checkpointer = Checkpoint::new(db)?;
if checkpoint_path.exists() {
// TODO: add a warning log here
// warn!(logger, "Checkpoint path {:?} exists, deleting");
fs::remove_dir_all(checkpoint_path)?
}
checkpointer.create_checkpoint(checkpoint_path)?;
Ok(())
}
fn register_value_handle<'s, T: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<ValueState<T>, IK, N>,
) {
handle.registered = true;
let opts = common_options::<IK, N>();
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_map_handle<'s, K: Key, V: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<MapState<K, V>, IK, N>,
) {
handle.registered = true;
let opts = common_options::<IK, N>();
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_vec_handle<'s, T: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<VecState<T>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
opts.set_merge_operator_associative("vec_merge", vec_ops::vec_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_reducer_handle<'s, T: Value, F: Reducer<T>, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<ReducerState<T, F>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
let reducer_merge = reducer_ops::make_reducer_merge(handle.extra_data.clone());
opts.set_merge_operator_associative("reducer_merge", reducer_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_aggregator_handle<'s, A: Aggregator, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<AggregatorState<A>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
let aggregator_merge = aggregator_ops::make_aggregator_merge(handle.extra_data.clone());
opts.set_merge_operator_associative("aggregator_merge", aggregator_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
}
mod aggregator_ops;
mod map_ops;
mod reducer_ops;
mod value_ops;
mod vec_ops;
#[cfg(test)]
pub mod tests {
use super::*;
use std::{
ops::{Deref, DerefMut},
sync::Arc,
};
use tempfile::TempDir;
#[derive(Debug)]
pub struct TestDb {
rocks: Arc<Rocks>,
dir: TempDir,
}
impl TestDb {
#[allow(clippy::new_without_default)]
pub fn new() -> TestDb {
let dir = TempDir::new().unwrap();
let mut dir_path = dir.path().to_path_buf();
dir_path.push("rocks");
fs::create_dir(&dir_path).unwrap();
let rocks = Rocks::create(&dir_path, "testDB".to_string()).unwrap();
TestDb {
rocks: Arc::new(rocks),
dir,
}
}
pub fn checkpoint(&mut self) -> PathBuf {
let mut checkpoint_dir: PathBuf = self.dir.path().into();
checkpoint_dir.push("checkpoint");
self.rocks.checkpoint(&checkpoint_dir).unwrap();
checkpoint_dir
}
pub fn from_checkpoint(checkpoint_dir: &str) -> TestDb {
let dir = TempDir::new().unwrap();
let mut dir_path = dir.path().to_path_buf();
dir_path.push("rocks");
let rocks =
Rocks::restore(&dir_path, checkpoint_dir.as_ref(), "testDB".to_string()).unwrap();
TestDb {
rocks: Arc::new(rocks),
dir,
}
}
}
impl Deref for TestDb {
type Target = Arc<Rocks>;
fn deref(&self) -> &Self::Target {
&self.rocks
}
}
impl DerefMut for TestDb {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.rocks
}
}
#[test]
fn simple_rocksdb_test() {
let db = TestDb::new();
let key = "key";
let value = "test";
let column_family = "default";
db.put(column_family, key.as_bytes(), value.as_bytes())
.expect("put");
{
let v = db.get(column_family, key.as_bytes()).unwrap().unwrap();
assert_eq!(value, String::from_utf8_lossy(&v));
}
db.remove(column_family, key.as_bytes()).expect("remove");
let v = db.get(column_family, key.as_bytes()).unwrap();
assert!(v.is_none());
}
#[test]
fn checkpoint_rocksdb_raw_test() {
let tmp_dir = TempDir::new().unwrap();
let checkpoints_dir = TempDir::new().unwrap();
let restore_dir = TempDir::new().unwrap();
let dir_path = tmp_dir.path();
let mut checkpoints_dir_path = checkpoints_dir.path().to_path_buf();
checkpoints_dir_path.push("chkp0");
let mut restore_dir_path = restore_dir.path().to_path_buf();
restore_dir_path.push("chkp0");
let db = Rocks::create(dir_path, "testDB".to_string()).unwrap();
let key: &[u8] = b"key";
let initial_value: &[u8] = b"value";
let new_value: &[u8] = b"new value";
let column_family = "default";
db.put(column_family, key, initial_value)
.expect("put failed");
db.checkpoint(&checkpoints_dir_path)
.expect("checkpoint failed");
db.put(column_family, key, new_value)
.expect("second put failed");
let db_from_checkpoint = Rocks::restore(
&restore_dir_path,
&checkpoints_dir_path,
"testDB".to_string(),
)
.expect("Could not open checkpointed db");
assert_eq!(
new_value,
db.get(column_family, key)
.expect("Could not get from the original db")
.unwrap()
.as_ref()
);
assert_eq!(
initial_value,
db_from_checkpoint
.get(column_family, key)
.expect("Could not get from the checkpoint")
.unwrap()
.as_ref()
);
}
#[test]
fn checkpoint_restore_state_test() {
let mut original_test = TestDb::new();
let mut a_handle = Handle::value("a");
original_test.register_value_handle(&mut a_handle);
let checkpoint_dir = {
let mut a = a_handle.activate(original_test.clone());
a.set(420).unwrap();
let checkpoint_dir = original_test.checkpoint();
assert_eq!(a.get().unwrap().unwrap(), 420);
a.set(69).unwrap();
assert_eq!(a.get().unwrap().unwrap(), 69);
checkpoint_dir
};
let restored = TestDb::from_checkpoint(&checkpoint_dir.to_string_lossy());
{
let mut a_handle = Handle::value("a");
restored.register_value_handle(&mut a_handle);
let mut a_restored = a_handle.activate(restored.clone());
// TODO: serialize value state metadata (type names, serialization, etc.) into rocksdb, so
// that type mismatches are caught early. Right now it would be possible to, let's say,
// store an integer, and then read a float from the restored state backend
assert_eq!(a_restored.get().unwrap().unwrap(), 420);
a_restored.set(1337).unwrap();
assert_eq!(a_restored.get().unwrap().unwrap(), 1337);
}
}
common_state_tests!(TestDb::new());
}
| {
let prefix_size = IK::SIZE + N::SIZE;
let mut opts = Options::default();
// for map state to work properly, but useful for all the states, so the bloom filters get
// populated
opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(prefix_size as usize));
opts
} | identifier_body |
mod.rs | use crate::{
data::{Key, Metakey, Value},
error::*,
Aggregator, AggregatorState, Backend, Handle, MapState, Reducer, ReducerState, ValueState,
VecState,
};
use rocksdb::{
checkpoint::Checkpoint, ColumnFamily, ColumnFamilyDescriptor, DBPinnableSlice, Options,
SliceTransform, WriteBatch, WriteOptions, DB,
};
use std::{
cell::UnsafeCell,
collections::HashSet,
fs,
path::{Path, PathBuf},
};
unsafe impl Send for Rocks {}
unsafe impl Sync for Rocks {}
#[derive(Debug)]
pub struct Rocks {
inner: UnsafeCell<DB>,
restored: bool,
name: String,
}
// we use epochs, so WAL is useless for us
fn default_write_opts() -> WriteOptions {
let mut res = WriteOptions::default();
res.disable_wal(true);
res
}
impl Rocks {
#[inline(always)]
#[allow(clippy::mut_from_ref)]
fn db_mut(&self) -> &mut DB {
unsafe { &mut (*self.inner.get()) }
}
#[inline(always)]
fn db(&self) -> &DB {
unsafe { &(*self.inner.get()) }
}
#[inline]
fn get_cf_handle(&self, cf_name: impl AsRef<str>) -> Result<&ColumnFamily> {
let cf_name = cf_name.as_ref();
self.db()
.cf_handle(cf_name)
.with_context(|| RocksMissingColumnFamily {
cf_name: cf_name.to_string(),
})
}
#[inline]
fn get(
&self,
cf_name: impl AsRef<str>,
key: impl AsRef<[u8]>,
) -> Result<Option<DBPinnableSlice>> {
let cf = self.get_cf_handle(cf_name)?;
Ok(self.db().get_pinned_cf(cf, key)?)
}
#[inline]
fn put(
&self,
cf_name: impl AsRef<str>,
key: impl AsRef<[u8]>,
value: impl AsRef<[u8]>,
) -> Result<()> {
let cf = self.get_cf_handle(cf_name)?;
Ok(self
.db()
.put_cf_opt(cf, key, value, &default_write_opts())?)
}
#[inline]
fn remove(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<()> {
let cf = self.get_cf_handle(cf)?;
Ok(self.db().delete_cf_opt(cf, key, &default_write_opts())?)
}
fn remove_prefix(&self, cf: impl AsRef<str>, prefix: impl AsRef<[u8]>) -> Result<()> {
let prefix = prefix.as_ref();
let cf_name = cf.as_ref();
let cf = self.get_cf_handle(cf_name)?;
// NOTE: this only works assuming the column family is lexicographically ordered (which is
// the default, so we don't explicitly set it, see Options::set_comparator)
let start = prefix;
// delete_range deletes all the entries in [start, end) range, so we can just increment the
// least significant byte of the prefix
let mut end = start.to_vec();
*end.last_mut()
.expect("unreachable, the empty case is covered a few lines above") += 1;
let mut wb = WriteBatch::default();
wb.delete_range_cf(cf, start, &end);
self.db().write_opt(wb, &default_write_opts())?;
Ok(())
}
#[inline]
fn contains(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<bool> {
let cf = self.get_cf_handle(cf.as_ref())?;
Ok(self.db().get_pinned_cf(cf, key)?.is_some())
}
fn create_column_family(&self, cf_name: &str, opts: Options) -> Result<()> {
if self.db().cf_handle(cf_name).is_none() {
self.db_mut().create_cf(cf_name, &opts)?;
}
Ok(())
}
}
fn common_options<IK, N>() -> Options
where
IK: Metakey,
N: Metakey,
{
let prefix_size = IK::SIZE + N::SIZE;
let mut opts = Options::default();
// for map state to work properly, but useful for all the states, so the bloom filters get
// populated
opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(prefix_size as usize));
opts
}
impl Backend for Rocks {
fn name(&self) -> &str {
self.name.as_str()
}
fn create(path: &Path, name: String) -> Result<Self>
where
Self: Sized,
{
let mut opts = Options::default();
opts.create_if_missing(true);
let path: PathBuf = path.into();
if !path.exists() {
fs::create_dir_all(&path)?;
}
let column_families: HashSet<String> = match DB::list_cf(&opts, &path) {
Ok(cfs) => cfs.into_iter().filter(|n| n != "default").collect(),
// TODO: possibly platform-dependant error message check
Err(e) if e.to_string().contains("No such file or directory") => HashSet::new(),
Err(e) => return Err(e.into()),
};
let cfds = if !column_families.is_empty() {
column_families
.into_iter()
.map(|name| ColumnFamilyDescriptor::new(name, Options::default()))
.collect()
} else {
vec![ColumnFamilyDescriptor::new("default", Options::default())]
};
Ok(Rocks {
inner: UnsafeCell::new(DB::open_cf_descriptors(&opts, &path, cfds)?),
restored: false,
name,
})
}
fn restore(live_path: &Path, checkpoint_path: &Path, name: String) -> Result<Self>
where
Self: Sized,
{
fs::create_dir_all(live_path)?;
ensure!(
fs::read_dir(live_path)?.next().is_none(),
RocksRestoreDirNotEmpty { dir: &(*live_path) }
);
let mut target_path: PathBuf = live_path.into();
target_path.push("__DUMMY"); // the file name is replaced inside the loop below
for entry in fs::read_dir(checkpoint_path)? {
let entry = entry?;
assert!(entry
.file_type()
.expect("Cannot read entry metadata")
.is_file());
let source_path = entry.path();
// replaces the __DUMMY from above the loop
target_path.set_file_name(
source_path
.file_name()
.expect("directory entry with no name?"),
);
fs::copy(&source_path, &target_path)?;
}
Rocks::create(live_path, name).map(|mut r| {
r.restored = true;
r
})
}
fn was_restored(&self) -> bool {
self.restored
}
fn checkpoint(&self, checkpoint_path: &Path) -> Result<()> {
let db = self.db();
db.flush()?;
let checkpointer = Checkpoint::new(db)?;
if checkpoint_path.exists() { | }
checkpointer.create_checkpoint(checkpoint_path)?;
Ok(())
}
fn register_value_handle<'s, T: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<ValueState<T>, IK, N>,
) {
handle.registered = true;
let opts = common_options::<IK, N>();
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_map_handle<'s, K: Key, V: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<MapState<K, V>, IK, N>,
) {
handle.registered = true;
let opts = common_options::<IK, N>();
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_vec_handle<'s, T: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<VecState<T>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
opts.set_merge_operator_associative("vec_merge", vec_ops::vec_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_reducer_handle<'s, T: Value, F: Reducer<T>, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<ReducerState<T, F>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
let reducer_merge = reducer_ops::make_reducer_merge(handle.extra_data.clone());
opts.set_merge_operator_associative("reducer_merge", reducer_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_aggregator_handle<'s, A: Aggregator, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<AggregatorState<A>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
let aggregator_merge = aggregator_ops::make_aggregator_merge(handle.extra_data.clone());
opts.set_merge_operator_associative("aggregator_merge", aggregator_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
}
mod aggregator_ops;
mod map_ops;
mod reducer_ops;
mod value_ops;
mod vec_ops;
#[cfg(test)]
pub mod tests {
use super::*;
use std::{
ops::{Deref, DerefMut},
sync::Arc,
};
use tempfile::TempDir;
#[derive(Debug)]
pub struct TestDb {
rocks: Arc<Rocks>,
dir: TempDir,
}
impl TestDb {
#[allow(clippy::new_without_default)]
pub fn new() -> TestDb {
let dir = TempDir::new().unwrap();
let mut dir_path = dir.path().to_path_buf();
dir_path.push("rocks");
fs::create_dir(&dir_path).unwrap();
let rocks = Rocks::create(&dir_path, "testDB".to_string()).unwrap();
TestDb {
rocks: Arc::new(rocks),
dir,
}
}
pub fn checkpoint(&mut self) -> PathBuf {
let mut checkpoint_dir: PathBuf = self.dir.path().into();
checkpoint_dir.push("checkpoint");
self.rocks.checkpoint(&checkpoint_dir).unwrap();
checkpoint_dir
}
pub fn from_checkpoint(checkpoint_dir: &str) -> TestDb {
let dir = TempDir::new().unwrap();
let mut dir_path = dir.path().to_path_buf();
dir_path.push("rocks");
let rocks =
Rocks::restore(&dir_path, checkpoint_dir.as_ref(), "testDB".to_string()).unwrap();
TestDb {
rocks: Arc::new(rocks),
dir,
}
}
}
impl Deref for TestDb {
type Target = Arc<Rocks>;
fn deref(&self) -> &Self::Target {
&self.rocks
}
}
impl DerefMut for TestDb {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.rocks
}
}
#[test]
fn simple_rocksdb_test() {
let db = TestDb::new();
let key = "key";
let value = "test";
let column_family = "default";
db.put(column_family, key.as_bytes(), value.as_bytes())
.expect("put");
{
let v = db.get(column_family, key.as_bytes()).unwrap().unwrap();
assert_eq!(value, String::from_utf8_lossy(&v));
}
db.remove(column_family, key.as_bytes()).expect("remove");
let v = db.get(column_family, key.as_bytes()).unwrap();
assert!(v.is_none());
}
#[test]
fn checkpoint_rocksdb_raw_test() {
let tmp_dir = TempDir::new().unwrap();
let checkpoints_dir = TempDir::new().unwrap();
let restore_dir = TempDir::new().unwrap();
let dir_path = tmp_dir.path();
let mut checkpoints_dir_path = checkpoints_dir.path().to_path_buf();
checkpoints_dir_path.push("chkp0");
let mut restore_dir_path = restore_dir.path().to_path_buf();
restore_dir_path.push("chkp0");
let db = Rocks::create(dir_path, "testDB".to_string()).unwrap();
let key: &[u8] = b"key";
let initial_value: &[u8] = b"value";
let new_value: &[u8] = b"new value";
let column_family = "default";
db.put(column_family, key, initial_value)
.expect("put failed");
db.checkpoint(&checkpoints_dir_path)
.expect("checkpoint failed");
db.put(column_family, key, new_value)
.expect("second put failed");
let db_from_checkpoint = Rocks::restore(
&restore_dir_path,
&checkpoints_dir_path,
"testDB".to_string(),
)
.expect("Could not open checkpointed db");
assert_eq!(
new_value,
db.get(column_family, key)
.expect("Could not get from the original db")
.unwrap()
.as_ref()
);
assert_eq!(
initial_value,
db_from_checkpoint
.get(column_family, key)
.expect("Could not get from the checkpoint")
.unwrap()
.as_ref()
);
}
#[test]
fn checkpoint_restore_state_test() {
let mut original_test = TestDb::new();
let mut a_handle = Handle::value("a");
original_test.register_value_handle(&mut a_handle);
let checkpoint_dir = {
let mut a = a_handle.activate(original_test.clone());
a.set(420).unwrap();
let checkpoint_dir = original_test.checkpoint();
assert_eq!(a.get().unwrap().unwrap(), 420);
a.set(69).unwrap();
assert_eq!(a.get().unwrap().unwrap(), 69);
checkpoint_dir
};
let restored = TestDb::from_checkpoint(&checkpoint_dir.to_string_lossy());
{
let mut a_handle = Handle::value("a");
restored.register_value_handle(&mut a_handle);
let mut a_restored = a_handle.activate(restored.clone());
// TODO: serialize value state metadata (type names, serialization, etc.) into rocksdb, so
// that type mismatches are caught early. Right now it would be possible to, let's say,
// store an integer, and then read a float from the restored state backend
assert_eq!(a_restored.get().unwrap().unwrap(), 420);
a_restored.set(1337).unwrap();
assert_eq!(a_restored.get().unwrap().unwrap(), 1337);
}
}
common_state_tests!(TestDb::new());
} | // TODO: add a warning log here
// warn!(logger, "Checkpoint path {:?} exists, deleting");
fs::remove_dir_all(checkpoint_path)? | random_line_split |
index.ts | import transport from './transport'
import modules, { RokkaApi } from './apis'
import RokkaResponse, {
RokkaResponse as RokkaResponseInterface,
} from './response'
import { stringify } from 'query-string'
import FormData from 'form-data'
import user, {
ApiTokenGetCallback,
ApiTokenPayload,
ApiTokenSetCallback,
RequestQueryParamsNewToken,
} from './apis/user'
import { _getTokenPayload, _isTokenExpiring, _tokenValidFor } from './utils'
export interface Config {
apiKey?: string
apiHost?: string // default: https://api.rokka.io
apiVersion?: number | string // default: 1
apiTokenGetCallback?: ApiTokenGetCallback
apiTokenSetCallback?: ApiTokenSetCallback
apiTokenRefreshTime?: number
apiTokenOptions?: RequestQueryParamsNewToken | null
renderHost?: string // default: https://{organization}.rokka.io
debug?: boolean // default: false
transport?: {
requestTimeout?: number // milliseconds to wait for rokka server response (default: 30000)
retries?: number // number of retries when API response is 429 (default: 10) | maxTimeout?: number // maximum milliseconds between retries (default: 10000)
randomize?: boolean // randomize time between retries (default: true)
agent?: any
}
}
interface RequestOptions {
headers?: object
noAuthHeaders?: boolean
fallBackToText?: boolean
form?: boolean
multipart?: boolean
forceUseApiKey?: boolean
noTokenRefresh?: boolean
host?: string
}
const defaults = {
apiHost: 'https://api.rokka.io',
renderHost: 'https://{organization}.rokka.io',
apiVersion: 1,
transport: {
requestTimeout: 30000,
retries: 10,
minTimeout: 1000,
maxTimeout: 10000,
randomize: true,
factor: 2,
debug: false,
},
}
const getResponseBody = async (response: Response, fallbackToText = false) => {
if (response.headers && response.json) {
if (response.headers.get('content-type') === 'application/json') {
return response.json()
}
if (response.status === 204 || response.status === 201 || fallbackToText) {
return response.text()
}
return response.body
}
return response.body
}
interface Request {
method: string
headers: { 'Api-Version'?: string | number; 'Api-Key'?: string }
timeout: number | undefined
retries: number | undefined | any
retryDelay: (attempt: number) => number
form: {}
json: boolean
body: any
agent?: any
}
export interface RequestQueryParams {
[key: string]: string | number | boolean | undefined | null
}
export interface State {
apiKey: string | undefined
apiHost: string
apiVersion: number | string
renderHost: string
transportOptions: any
apiTokenGetCallback?: ApiTokenGetCallback
apiTokenSetCallback?: ApiTokenSetCallback
apiTokenPayload: ApiTokenPayload | null
apiTokenOptions?: RequestQueryParamsNewToken | null
apiTokenRefreshTime: number
request(
method: string,
path: string,
payload?: any | null | undefined,
queryParams?: RequestQueryParams | null,
options?: RequestOptions | undefined | null,
): Promise<RokkaResponseInterface>
}
/**
* Initializing the rokka client.
*
* ```js
* const rokka = require('rokka')({
* apiKey: 'apikey', // required for certain operations
* apiTokenGetCallback?: <() => string> // return JWT token instead of API Key
* apiTokenSetCallback?: <((token: string, payload?: object|null) => void)> // Stores a newly retrieved JWT token
* apiTokenOptions?: <object> // The rokka.user.getNewToken query parameter options, default: {}
* apiTokenRefreshTime?: <number> // how many seconds before the token is expiring, it should be refreshed, default: 3600
* apiHost: '<url>', // default: https://api.rokka.io
* apiVersion: <number>, // default: 1
* renderHost: '<url>', // default: https://{organization}.rokka.io
* debug: true, // default: false
* transport: {
* requestTimeout: <number>, // milliseconds to wait for rokka server response (default: 30000)
* retries: <number>, // number of retries when API response is 429 (default: 10)
* minTimeout: <number>, // minimum milliseconds between retries (default: 1000)
* maxTimeout: <number>, // maximum milliseconds between retries (default: 10000)
* randomize: <boolean> // randomize time between retries (default: true)
* agent?: <any> // an agent to be used with node-fetch, eg. if you need a proxy (default: undefined)
* }
* });
* ```
*
* All properties are optional since certain calls don't require credentials.
*
* If you need to use a proxy, you can do the following
*
* ```js
* import { HttpsProxyAgent } from 'https-proxy-agent'
*
* const rokka = require('rokka')({
* apiKey: 'apikey'
* transport: {agent: new HttpsProxyAgent(proxy)}
* });
* ```
*
* @param {Object} [config={}] configuration properties
* @return {Object}
*
* @module rokka
*/
export default (config: Config = {}): RokkaApi => {
const state: State = {
// config
apiKey: config.apiKey,
apiHost: config.apiHost || defaults.apiHost,
apiTokenGetCallback: config.apiTokenGetCallback || null,
apiTokenSetCallback: config.apiTokenSetCallback || null,
apiTokenPayload: null,
apiTokenOptions: config.apiTokenOptions || {},
apiTokenRefreshTime: config.apiTokenRefreshTime || 3600,
apiVersion: config.apiVersion || defaults.apiVersion,
renderHost: config.renderHost || defaults.renderHost,
transportOptions: Object.assign(defaults.transport, config.transport),
// functions
async request(
method: string,
path: string,
payload: any | null = null,
queryParams: {
[key: string]: string | number | boolean
} | null = null,
options: RequestOptions = {
noAuthHeaders: false,
fallBackToText: false,
forceUseApiKey: false,
noTokenRefresh: false,
host: undefined,
},
): Promise<RokkaResponseInterface> {
let uri = [options.host || state.apiHost, path].join('/')
if (
queryParams &&
!(
Object.entries(queryParams).length === 0 &&
queryParams.constructor === Object
)
) {
uri += '?' + stringify(queryParams)
}
const headers: {
'Api-Version'?: string | number
'Api-Key'?: string
Authorization?: string
} = options.headers || {}
headers['Api-Version'] = state.apiVersion
if (options.noAuthHeaders !== true) {
if (!options.forceUseApiKey && state.apiTokenGetCallback) {
let apiToken = state.apiTokenGetCallback()
// fill apiTokenPayload, if not set, this happens when you load a page, for example
if (!state.apiTokenPayload) {
state.apiTokenPayload = _getTokenPayload(apiToken)
}
// get a new token, when it's somehow almost expired, but should still be valid
const isTokenValid =
apiToken &&
state.apiTokenPayload?.rn === true &&
_tokenValidFor(state.apiTokenPayload?.exp, apiToken) > 0
// if it's not valid, it's also not expiring...
const isTokenExpiring =
isTokenValid &&
_isTokenExpiring(
state.apiTokenPayload?.exp,
apiToken,
state.apiTokenRefreshTime,
)
if (
(!options.noTokenRefresh && isTokenValid && isTokenExpiring) ||
(!isTokenValid && state.apiKey) //or do we have an apiKey
) {
try {
apiToken = (await user(state).user.getNewToken(state.apiKey)).body
.token
} catch (e: any) {
// clear the api token so that we can enforce a new login usually
// a 403 means that we couldn't get a new token (trying to get a longer expiry time for example)
if (e && e.statusCode === 403 && state.apiTokenSetCallback) {
state.apiTokenSetCallback('', null)
}
}
}
if (!apiToken) {
const code = 401
throw {
error: {
code,
message: 'No API token (or renewing it did not work correctly)',
},
status: code,
}
}
// set apiTokenExpiry, if not set, to avoid to having to decode it all the time
headers['Authorization'] = `Bearer ${apiToken}`
} else {
if (!state.apiKey) {
return Promise.reject(
new Error('Missing required property `apiKey`'),
)
}
headers['Api-Key'] = state.apiKey
}
}
const retryDelay = (attempt: number) => {
// from https://github.com/tim-kos/node-retry/blob/master/lib/retry.js
const random = state.transportOptions.randomize ? Math.random() + 1 : 1
const timeout = Math.round(
random *
state.transportOptions.minTimeout *
Math.pow(state.transportOptions.factor, attempt),
)
return Math.min(timeout, state.transportOptions.maxTimeout)
}
const requestOptions: Request = {
method: method,
headers: headers,
timeout: state.transportOptions.requestTimeout,
retries: state.transportOptions.retries,
retryDelay,
form: {},
json: false,
body: undefined,
agent: state.transportOptions.agent,
}
if (options.form === true) {
const formData = payload || {}
const requestData = new FormData()
Object.keys(formData).forEach(function (meta) {
requestData.append(meta, formData[meta])
})
requestOptions.body = requestData
} else if (options.multipart !== true) {
requestOptions.json = true
requestOptions.body = payload
} else {
const formData = payload.formData || {}
const requestData = new FormData()
requestData.append(payload.name, payload.contents, payload.filename)
Object.keys(formData).forEach(function (meta) {
requestData.append(meta, JSON.stringify(formData[meta]))
})
requestOptions.body = requestData
}
if (
requestOptions.json &&
requestOptions.body &&
typeof requestOptions.body === 'object'
) {
requestOptions.body = JSON.stringify(requestOptions.body)
}
const t = transport(uri, requestOptions)
return t.then(
async (response: Response): Promise<RokkaResponseInterface> => {
const rokkaResponse = RokkaResponse(response)
rokkaResponse.body = await getResponseBody(
response,
options.fallBackToText,
)
if (response.status >= 400) {
rokkaResponse.error = rokkaResponse.body
rokkaResponse.message =
response.status + ' - ' + JSON.stringify(rokkaResponse.body)
// if response is a 401 and we have apiTokenSetCallback, clear the token
if (
response.status === 401 &&
state.apiTokenSetCallback &&
state.apiTokenGetCallback
) {
// but not when the authorization header changed in the meantime
if (
headers['Authorization'] ===
'Bearer ' + state.apiTokenGetCallback()
) {
state.apiTokenSetCallback('', null)
state.apiTokenPayload = null
}
}
throw rokkaResponse
}
return rokkaResponse
},
)
},
}
return Object.assign({}, modules(state))
} | minTimeout?: number // minimum milliseconds between retries (default: 1000) | random_line_split |
index.ts | import transport from './transport'
import modules, { RokkaApi } from './apis'
import RokkaResponse, {
RokkaResponse as RokkaResponseInterface,
} from './response'
import { stringify } from 'query-string'
import FormData from 'form-data'
import user, {
ApiTokenGetCallback,
ApiTokenPayload,
ApiTokenSetCallback,
RequestQueryParamsNewToken,
} from './apis/user'
import { _getTokenPayload, _isTokenExpiring, _tokenValidFor } from './utils'
export interface Config {
apiKey?: string
apiHost?: string // default: https://api.rokka.io
apiVersion?: number | string // default: 1
apiTokenGetCallback?: ApiTokenGetCallback
apiTokenSetCallback?: ApiTokenSetCallback
apiTokenRefreshTime?: number
apiTokenOptions?: RequestQueryParamsNewToken | null
renderHost?: string // default: https://{organization}.rokka.io
debug?: boolean // default: false
transport?: {
requestTimeout?: number // milliseconds to wait for rokka server response (default: 30000)
retries?: number // number of retries when API response is 429 (default: 10)
minTimeout?: number // minimum milliseconds between retries (default: 1000)
maxTimeout?: number // maximum milliseconds between retries (default: 10000)
randomize?: boolean // randomize time between retries (default: true)
agent?: any
}
}
interface RequestOptions {
headers?: object
noAuthHeaders?: boolean
fallBackToText?: boolean
form?: boolean
multipart?: boolean
forceUseApiKey?: boolean
noTokenRefresh?: boolean
host?: string
}
const defaults = {
apiHost: 'https://api.rokka.io',
renderHost: 'https://{organization}.rokka.io',
apiVersion: 1,
transport: {
requestTimeout: 30000,
retries: 10,
minTimeout: 1000,
maxTimeout: 10000,
randomize: true,
factor: 2,
debug: false,
},
}
const getResponseBody = async (response: Response, fallbackToText = false) => {
if (response.headers && response.json) {
if (response.headers.get('content-type') === 'application/json') {
return response.json()
}
if (response.status === 204 || response.status === 201 || fallbackToText) {
return response.text()
}
return response.body
}
return response.body
}
interface Request {
method: string
headers: { 'Api-Version'?: string | number; 'Api-Key'?: string }
timeout: number | undefined
retries: number | undefined | any
retryDelay: (attempt: number) => number
form: {}
json: boolean
body: any
agent?: any
}
export interface RequestQueryParams {
[key: string]: string | number | boolean | undefined | null
}
export interface State {
apiKey: string | undefined
apiHost: string
apiVersion: number | string
renderHost: string
transportOptions: any
apiTokenGetCallback?: ApiTokenGetCallback
apiTokenSetCallback?: ApiTokenSetCallback
apiTokenPayload: ApiTokenPayload | null
apiTokenOptions?: RequestQueryParamsNewToken | null
apiTokenRefreshTime: number
request(
method: string,
path: string,
payload?: any | null | undefined,
queryParams?: RequestQueryParams | null,
options?: RequestOptions | undefined | null,
): Promise<RokkaResponseInterface>
}
/**
* Initializing the rokka client.
*
* ```js
* const rokka = require('rokka')({
* apiKey: 'apikey', // required for certain operations
* apiTokenGetCallback?: <() => string> // return JWT token instead of API Key
* apiTokenSetCallback?: <((token: string, payload?: object|null) => void)> // Stores a newly retrieved JWT token
* apiTokenOptions?: <object> // The rokka.user.getNewToken query parameter options, default: {}
* apiTokenRefreshTime?: <number> // how many seconds before the token is expiring, it should be refreshed, default: 3600
* apiHost: '<url>', // default: https://api.rokka.io
* apiVersion: <number>, // default: 1
* renderHost: '<url>', // default: https://{organization}.rokka.io
* debug: true, // default: false
* transport: {
* requestTimeout: <number>, // milliseconds to wait for rokka server response (default: 30000)
* retries: <number>, // number of retries when API response is 429 (default: 10)
* minTimeout: <number>, // minimum milliseconds between retries (default: 1000)
* maxTimeout: <number>, // maximum milliseconds between retries (default: 10000)
* randomize: <boolean> // randomize time between retries (default: true)
* agent?: <any> // an agent to be used with node-fetch, eg. if you need a proxy (default: undefined)
* }
* });
* ```
*
* All properties are optional since certain calls don't require credentials.
*
* If you need to use a proxy, you can do the following
*
* ```js
* import { HttpsProxyAgent } from 'https-proxy-agent'
*
* const rokka = require('rokka')({
* apiKey: 'apikey'
* transport: {agent: new HttpsProxyAgent(proxy)}
* });
* ```
*
* @param {Object} [config={}] configuration properties
* @return {Object}
*
* @module rokka
*/
export default (config: Config = {}): RokkaApi => {
const state: State = {
// config
apiKey: config.apiKey,
apiHost: config.apiHost || defaults.apiHost,
apiTokenGetCallback: config.apiTokenGetCallback || null,
apiTokenSetCallback: config.apiTokenSetCallback || null,
apiTokenPayload: null,
apiTokenOptions: config.apiTokenOptions || {},
apiTokenRefreshTime: config.apiTokenRefreshTime || 3600,
apiVersion: config.apiVersion || defaults.apiVersion,
renderHost: config.renderHost || defaults.renderHost,
transportOptions: Object.assign(defaults.transport, config.transport),
// functions
async request(
method: string,
path: string,
payload: any | null = null,
queryParams: {
[key: string]: string | number | boolean
} | null = null,
options: RequestOptions = {
noAuthHeaders: false,
fallBackToText: false,
forceUseApiKey: false,
noTokenRefresh: false,
host: undefined,
},
): Promise<RokkaResponseInterface> | ,
}
return Object.assign({}, modules(state))
}
| {
let uri = [options.host || state.apiHost, path].join('/')
if (
queryParams &&
!(
Object.entries(queryParams).length === 0 &&
queryParams.constructor === Object
)
) {
uri += '?' + stringify(queryParams)
}
const headers: {
'Api-Version'?: string | number
'Api-Key'?: string
Authorization?: string
} = options.headers || {}
headers['Api-Version'] = state.apiVersion
if (options.noAuthHeaders !== true) {
if (!options.forceUseApiKey && state.apiTokenGetCallback) {
let apiToken = state.apiTokenGetCallback()
// fill apiTokenPayload, if not set, this happens when you load a page, for example
if (!state.apiTokenPayload) {
state.apiTokenPayload = _getTokenPayload(apiToken)
}
// get a new token, when it's somehow almost expired, but should still be valid
const isTokenValid =
apiToken &&
state.apiTokenPayload?.rn === true &&
_tokenValidFor(state.apiTokenPayload?.exp, apiToken) > 0
// if it's not valid, it's also not expiring...
const isTokenExpiring =
isTokenValid &&
_isTokenExpiring(
state.apiTokenPayload?.exp,
apiToken,
state.apiTokenRefreshTime,
)
if (
(!options.noTokenRefresh && isTokenValid && isTokenExpiring) ||
(!isTokenValid && state.apiKey) //or do we have an apiKey
) {
try {
apiToken = (await user(state).user.getNewToken(state.apiKey)).body
.token
} catch (e: any) {
// clear the api token so that we can enforce a new login usually
// a 403 means that we couldn't get a new token (trying to get a longer expiry time for example)
if (e && e.statusCode === 403 && state.apiTokenSetCallback) {
state.apiTokenSetCallback('', null)
}
}
}
if (!apiToken) {
const code = 401
throw {
error: {
code,
message: 'No API token (or renewing it did not work correctly)',
},
status: code,
}
}
// set apiTokenExpiry, if not set, to avoid to having to decode it all the time
headers['Authorization'] = `Bearer ${apiToken}`
} else {
if (!state.apiKey) {
return Promise.reject(
new Error('Missing required property `apiKey`'),
)
}
headers['Api-Key'] = state.apiKey
}
}
const retryDelay = (attempt: number) => {
// from https://github.com/tim-kos/node-retry/blob/master/lib/retry.js
const random = state.transportOptions.randomize ? Math.random() + 1 : 1
const timeout = Math.round(
random *
state.transportOptions.minTimeout *
Math.pow(state.transportOptions.factor, attempt),
)
return Math.min(timeout, state.transportOptions.maxTimeout)
}
const requestOptions: Request = {
method: method,
headers: headers,
timeout: state.transportOptions.requestTimeout,
retries: state.transportOptions.retries,
retryDelay,
form: {},
json: false,
body: undefined,
agent: state.transportOptions.agent,
}
if (options.form === true) {
const formData = payload || {}
const requestData = new FormData()
Object.keys(formData).forEach(function (meta) {
requestData.append(meta, formData[meta])
})
requestOptions.body = requestData
} else if (options.multipart !== true) {
requestOptions.json = true
requestOptions.body = payload
} else {
const formData = payload.formData || {}
const requestData = new FormData()
requestData.append(payload.name, payload.contents, payload.filename)
Object.keys(formData).forEach(function (meta) {
requestData.append(meta, JSON.stringify(formData[meta]))
})
requestOptions.body = requestData
}
if (
requestOptions.json &&
requestOptions.body &&
typeof requestOptions.body === 'object'
) {
requestOptions.body = JSON.stringify(requestOptions.body)
}
const t = transport(uri, requestOptions)
return t.then(
async (response: Response): Promise<RokkaResponseInterface> => {
const rokkaResponse = RokkaResponse(response)
rokkaResponse.body = await getResponseBody(
response,
options.fallBackToText,
)
if (response.status >= 400) {
rokkaResponse.error = rokkaResponse.body
rokkaResponse.message =
response.status + ' - ' + JSON.stringify(rokkaResponse.body)
// if response is a 401 and we have apiTokenSetCallback, clear the token
if (
response.status === 401 &&
state.apiTokenSetCallback &&
state.apiTokenGetCallback
) {
// but not when the authorization header changed in the meantime
if (
headers['Authorization'] ===
'Bearer ' + state.apiTokenGetCallback()
) {
state.apiTokenSetCallback('', null)
state.apiTokenPayload = null
}
}
throw rokkaResponse
}
return rokkaResponse
},
)
} | identifier_body |
index.ts | import transport from './transport'
import modules, { RokkaApi } from './apis'
import RokkaResponse, {
RokkaResponse as RokkaResponseInterface,
} from './response'
import { stringify } from 'query-string'
import FormData from 'form-data'
import user, {
ApiTokenGetCallback,
ApiTokenPayload,
ApiTokenSetCallback,
RequestQueryParamsNewToken,
} from './apis/user'
import { _getTokenPayload, _isTokenExpiring, _tokenValidFor } from './utils'
export interface Config {
apiKey?: string
apiHost?: string // default: https://api.rokka.io
apiVersion?: number | string // default: 1
apiTokenGetCallback?: ApiTokenGetCallback
apiTokenSetCallback?: ApiTokenSetCallback
apiTokenRefreshTime?: number
apiTokenOptions?: RequestQueryParamsNewToken | null
renderHost?: string // default: https://{organization}.rokka.io
debug?: boolean // default: false
transport?: {
requestTimeout?: number // milliseconds to wait for rokka server response (default: 30000)
retries?: number // number of retries when API response is 429 (default: 10)
minTimeout?: number // minimum milliseconds between retries (default: 1000)
maxTimeout?: number // maximum milliseconds between retries (default: 10000)
randomize?: boolean // randomize time between retries (default: true)
agent?: any
}
}
interface RequestOptions {
headers?: object
noAuthHeaders?: boolean
fallBackToText?: boolean
form?: boolean
multipart?: boolean
forceUseApiKey?: boolean
noTokenRefresh?: boolean
host?: string
}
const defaults = {
apiHost: 'https://api.rokka.io',
renderHost: 'https://{organization}.rokka.io',
apiVersion: 1,
transport: {
requestTimeout: 30000,
retries: 10,
minTimeout: 1000,
maxTimeout: 10000,
randomize: true,
factor: 2,
debug: false,
},
}
const getResponseBody = async (response: Response, fallbackToText = false) => {
if (response.headers && response.json) {
if (response.headers.get('content-type') === 'application/json') {
return response.json()
}
if (response.status === 204 || response.status === 201 || fallbackToText) {
return response.text()
}
return response.body
}
return response.body
}
interface Request {
method: string
headers: { 'Api-Version'?: string | number; 'Api-Key'?: string }
timeout: number | undefined
retries: number | undefined | any
retryDelay: (attempt: number) => number
form: {}
json: boolean
body: any
agent?: any
}
export interface RequestQueryParams {
[key: string]: string | number | boolean | undefined | null
}
export interface State {
apiKey: string | undefined
apiHost: string
apiVersion: number | string
renderHost: string
transportOptions: any
apiTokenGetCallback?: ApiTokenGetCallback
apiTokenSetCallback?: ApiTokenSetCallback
apiTokenPayload: ApiTokenPayload | null
apiTokenOptions?: RequestQueryParamsNewToken | null
apiTokenRefreshTime: number
request(
method: string,
path: string,
payload?: any | null | undefined,
queryParams?: RequestQueryParams | null,
options?: RequestOptions | undefined | null,
): Promise<RokkaResponseInterface>
}
/**
* Initializing the rokka client.
*
* ```js
* const rokka = require('rokka')({
* apiKey: 'apikey', // required for certain operations
* apiTokenGetCallback?: <() => string> // return JWT token instead of API Key
* apiTokenSetCallback?: <((token: string, payload?: object|null) => void)> // Stores a newly retrieved JWT token
* apiTokenOptions?: <object> // The rokka.user.getNewToken query parameter options, default: {}
* apiTokenRefreshTime?: <number> // how many seconds before the token is expiring, it should be refreshed, default: 3600
* apiHost: '<url>', // default: https://api.rokka.io
* apiVersion: <number>, // default: 1
* renderHost: '<url>', // default: https://{organization}.rokka.io
* debug: true, // default: false
* transport: {
* requestTimeout: <number>, // milliseconds to wait for rokka server response (default: 30000)
* retries: <number>, // number of retries when API response is 429 (default: 10)
* minTimeout: <number>, // minimum milliseconds between retries (default: 1000)
* maxTimeout: <number>, // maximum milliseconds between retries (default: 10000)
* randomize: <boolean> // randomize time between retries (default: true)
* agent?: <any> // an agent to be used with node-fetch, eg. if you need a proxy (default: undefined)
* }
* });
* ```
*
* All properties are optional since certain calls don't require credentials.
*
* If you need to use a proxy, you can do the following
*
* ```js
* import { HttpsProxyAgent } from 'https-proxy-agent'
*
* const rokka = require('rokka')({
* apiKey: 'apikey'
* transport: {agent: new HttpsProxyAgent(proxy)}
* });
* ```
*
* @param {Object} [config={}] configuration properties
* @return {Object}
*
* @module rokka
*/
export default (config: Config = {}): RokkaApi => {
const state: State = {
// config
apiKey: config.apiKey,
apiHost: config.apiHost || defaults.apiHost,
apiTokenGetCallback: config.apiTokenGetCallback || null,
apiTokenSetCallback: config.apiTokenSetCallback || null,
apiTokenPayload: null,
apiTokenOptions: config.apiTokenOptions || {},
apiTokenRefreshTime: config.apiTokenRefreshTime || 3600,
apiVersion: config.apiVersion || defaults.apiVersion,
renderHost: config.renderHost || defaults.renderHost,
transportOptions: Object.assign(defaults.transport, config.transport),
// functions
async | (
method: string,
path: string,
payload: any | null = null,
queryParams: {
[key: string]: string | number | boolean
} | null = null,
options: RequestOptions = {
noAuthHeaders: false,
fallBackToText: false,
forceUseApiKey: false,
noTokenRefresh: false,
host: undefined,
},
): Promise<RokkaResponseInterface> {
let uri = [options.host || state.apiHost, path].join('/')
if (
queryParams &&
!(
Object.entries(queryParams).length === 0 &&
queryParams.constructor === Object
)
) {
uri += '?' + stringify(queryParams)
}
const headers: {
'Api-Version'?: string | number
'Api-Key'?: string
Authorization?: string
} = options.headers || {}
headers['Api-Version'] = state.apiVersion
if (options.noAuthHeaders !== true) {
if (!options.forceUseApiKey && state.apiTokenGetCallback) {
let apiToken = state.apiTokenGetCallback()
// fill apiTokenPayload, if not set, this happens when you load a page, for example
if (!state.apiTokenPayload) {
state.apiTokenPayload = _getTokenPayload(apiToken)
}
// get a new token, when it's somehow almost expired, but should still be valid
const isTokenValid =
apiToken &&
state.apiTokenPayload?.rn === true &&
_tokenValidFor(state.apiTokenPayload?.exp, apiToken) > 0
// if it's not valid, it's also not expiring...
const isTokenExpiring =
isTokenValid &&
_isTokenExpiring(
state.apiTokenPayload?.exp,
apiToken,
state.apiTokenRefreshTime,
)
if (
(!options.noTokenRefresh && isTokenValid && isTokenExpiring) ||
(!isTokenValid && state.apiKey) //or do we have an apiKey
) {
try {
apiToken = (await user(state).user.getNewToken(state.apiKey)).body
.token
} catch (e: any) {
// clear the api token so that we can enforce a new login usually
// a 403 means that we couldn't get a new token (trying to get a longer expiry time for example)
if (e && e.statusCode === 403 && state.apiTokenSetCallback) {
state.apiTokenSetCallback('', null)
}
}
}
if (!apiToken) {
const code = 401
throw {
error: {
code,
message: 'No API token (or renewing it did not work correctly)',
},
status: code,
}
}
// set apiTokenExpiry, if not set, to avoid to having to decode it all the time
headers['Authorization'] = `Bearer ${apiToken}`
} else {
if (!state.apiKey) {
return Promise.reject(
new Error('Missing required property `apiKey`'),
)
}
headers['Api-Key'] = state.apiKey
}
}
const retryDelay = (attempt: number) => {
// from https://github.com/tim-kos/node-retry/blob/master/lib/retry.js
const random = state.transportOptions.randomize ? Math.random() + 1 : 1
const timeout = Math.round(
random *
state.transportOptions.minTimeout *
Math.pow(state.transportOptions.factor, attempt),
)
return Math.min(timeout, state.transportOptions.maxTimeout)
}
const requestOptions: Request = {
method: method,
headers: headers,
timeout: state.transportOptions.requestTimeout,
retries: state.transportOptions.retries,
retryDelay,
form: {},
json: false,
body: undefined,
agent: state.transportOptions.agent,
}
if (options.form === true) {
const formData = payload || {}
const requestData = new FormData()
Object.keys(formData).forEach(function (meta) {
requestData.append(meta, formData[meta])
})
requestOptions.body = requestData
} else if (options.multipart !== true) {
requestOptions.json = true
requestOptions.body = payload
} else {
const formData = payload.formData || {}
const requestData = new FormData()
requestData.append(payload.name, payload.contents, payload.filename)
Object.keys(formData).forEach(function (meta) {
requestData.append(meta, JSON.stringify(formData[meta]))
})
requestOptions.body = requestData
}
if (
requestOptions.json &&
requestOptions.body &&
typeof requestOptions.body === 'object'
) {
requestOptions.body = JSON.stringify(requestOptions.body)
}
const t = transport(uri, requestOptions)
return t.then(
async (response: Response): Promise<RokkaResponseInterface> => {
const rokkaResponse = RokkaResponse(response)
rokkaResponse.body = await getResponseBody(
response,
options.fallBackToText,
)
if (response.status >= 400) {
rokkaResponse.error = rokkaResponse.body
rokkaResponse.message =
response.status + ' - ' + JSON.stringify(rokkaResponse.body)
// if response is a 401 and we have apiTokenSetCallback, clear the token
if (
response.status === 401 &&
state.apiTokenSetCallback &&
state.apiTokenGetCallback
) {
// but not when the authorization header changed in the meantime
if (
headers['Authorization'] ===
'Bearer ' + state.apiTokenGetCallback()
) {
state.apiTokenSetCallback('', null)
state.apiTokenPayload = null
}
}
throw rokkaResponse
}
return rokkaResponse
},
)
},
}
return Object.assign({}, modules(state))
}
| request | identifier_name |
index.ts | import transport from './transport'
import modules, { RokkaApi } from './apis'
import RokkaResponse, {
RokkaResponse as RokkaResponseInterface,
} from './response'
import { stringify } from 'query-string'
import FormData from 'form-data'
import user, {
ApiTokenGetCallback,
ApiTokenPayload,
ApiTokenSetCallback,
RequestQueryParamsNewToken,
} from './apis/user'
import { _getTokenPayload, _isTokenExpiring, _tokenValidFor } from './utils'
export interface Config {
apiKey?: string
apiHost?: string // default: https://api.rokka.io
apiVersion?: number | string // default: 1
apiTokenGetCallback?: ApiTokenGetCallback
apiTokenSetCallback?: ApiTokenSetCallback
apiTokenRefreshTime?: number
apiTokenOptions?: RequestQueryParamsNewToken | null
renderHost?: string // default: https://{organization}.rokka.io
debug?: boolean // default: false
transport?: {
requestTimeout?: number // milliseconds to wait for rokka server response (default: 30000)
retries?: number // number of retries when API response is 429 (default: 10)
minTimeout?: number // minimum milliseconds between retries (default: 1000)
maxTimeout?: number // maximum milliseconds between retries (default: 10000)
randomize?: boolean // randomize time between retries (default: true)
agent?: any
}
}
interface RequestOptions {
headers?: object
noAuthHeaders?: boolean
fallBackToText?: boolean
form?: boolean
multipart?: boolean
forceUseApiKey?: boolean
noTokenRefresh?: boolean
host?: string
}
const defaults = {
apiHost: 'https://api.rokka.io',
renderHost: 'https://{organization}.rokka.io',
apiVersion: 1,
transport: {
requestTimeout: 30000,
retries: 10,
minTimeout: 1000,
maxTimeout: 10000,
randomize: true,
factor: 2,
debug: false,
},
}
const getResponseBody = async (response: Response, fallbackToText = false) => {
if (response.headers && response.json) {
if (response.headers.get('content-type') === 'application/json') {
return response.json()
}
if (response.status === 204 || response.status === 201 || fallbackToText) {
return response.text()
}
return response.body
}
return response.body
}
interface Request {
method: string
headers: { 'Api-Version'?: string | number; 'Api-Key'?: string }
timeout: number | undefined
retries: number | undefined | any
retryDelay: (attempt: number) => number
form: {}
json: boolean
body: any
agent?: any
}
export interface RequestQueryParams {
[key: string]: string | number | boolean | undefined | null
}
export interface State {
apiKey: string | undefined
apiHost: string
apiVersion: number | string
renderHost: string
transportOptions: any
apiTokenGetCallback?: ApiTokenGetCallback
apiTokenSetCallback?: ApiTokenSetCallback
apiTokenPayload: ApiTokenPayload | null
apiTokenOptions?: RequestQueryParamsNewToken | null
apiTokenRefreshTime: number
request(
method: string,
path: string,
payload?: any | null | undefined,
queryParams?: RequestQueryParams | null,
options?: RequestOptions | undefined | null,
): Promise<RokkaResponseInterface>
}
/**
* Initializing the rokka client.
*
* ```js
* const rokka = require('rokka')({
* apiKey: 'apikey', // required for certain operations
* apiTokenGetCallback?: <() => string> // return JWT token instead of API Key
* apiTokenSetCallback?: <((token: string, payload?: object|null) => void)> // Stores a newly retrieved JWT token
* apiTokenOptions?: <object> // The rokka.user.getNewToken query parameter options, default: {}
* apiTokenRefreshTime?: <number> // how many seconds before the token is expiring, it should be refreshed, default: 3600
* apiHost: '<url>', // default: https://api.rokka.io
* apiVersion: <number>, // default: 1
* renderHost: '<url>', // default: https://{organization}.rokka.io
* debug: true, // default: false
* transport: {
* requestTimeout: <number>, // milliseconds to wait for rokka server response (default: 30000)
* retries: <number>, // number of retries when API response is 429 (default: 10)
* minTimeout: <number>, // minimum milliseconds between retries (default: 1000)
* maxTimeout: <number>, // maximum milliseconds between retries (default: 10000)
* randomize: <boolean> // randomize time between retries (default: true)
* agent?: <any> // an agent to be used with node-fetch, eg. if you need a proxy (default: undefined)
* }
* });
* ```
*
* All properties are optional since certain calls don't require credentials.
*
* If you need to use a proxy, you can do the following
*
* ```js
* import { HttpsProxyAgent } from 'https-proxy-agent'
*
* const rokka = require('rokka')({
* apiKey: 'apikey'
* transport: {agent: new HttpsProxyAgent(proxy)}
* });
* ```
*
* @param {Object} [config={}] configuration properties
* @return {Object}
*
* @module rokka
*/
export default (config: Config = {}): RokkaApi => {
const state: State = {
// config
apiKey: config.apiKey,
apiHost: config.apiHost || defaults.apiHost,
apiTokenGetCallback: config.apiTokenGetCallback || null,
apiTokenSetCallback: config.apiTokenSetCallback || null,
apiTokenPayload: null,
apiTokenOptions: config.apiTokenOptions || {},
apiTokenRefreshTime: config.apiTokenRefreshTime || 3600,
apiVersion: config.apiVersion || defaults.apiVersion,
renderHost: config.renderHost || defaults.renderHost,
transportOptions: Object.assign(defaults.transport, config.transport),
// functions
async request(
method: string,
path: string,
payload: any | null = null,
queryParams: {
[key: string]: string | number | boolean
} | null = null,
options: RequestOptions = {
noAuthHeaders: false,
fallBackToText: false,
forceUseApiKey: false,
noTokenRefresh: false,
host: undefined,
},
): Promise<RokkaResponseInterface> {
let uri = [options.host || state.apiHost, path].join('/')
if (
queryParams &&
!(
Object.entries(queryParams).length === 0 &&
queryParams.constructor === Object
)
) {
uri += '?' + stringify(queryParams)
}
const headers: {
'Api-Version'?: string | number
'Api-Key'?: string
Authorization?: string
} = options.headers || {}
headers['Api-Version'] = state.apiVersion
if (options.noAuthHeaders !== true) {
if (!options.forceUseApiKey && state.apiTokenGetCallback) {
let apiToken = state.apiTokenGetCallback()
// fill apiTokenPayload, if not set, this happens when you load a page, for example
if (!state.apiTokenPayload) {
state.apiTokenPayload = _getTokenPayload(apiToken)
}
// get a new token, when it's somehow almost expired, but should still be valid
const isTokenValid =
apiToken &&
state.apiTokenPayload?.rn === true &&
_tokenValidFor(state.apiTokenPayload?.exp, apiToken) > 0
// if it's not valid, it's also not expiring...
const isTokenExpiring =
isTokenValid &&
_isTokenExpiring(
state.apiTokenPayload?.exp,
apiToken,
state.apiTokenRefreshTime,
)
if (
(!options.noTokenRefresh && isTokenValid && isTokenExpiring) ||
(!isTokenValid && state.apiKey) //or do we have an apiKey
) {
try {
apiToken = (await user(state).user.getNewToken(state.apiKey)).body
.token
} catch (e: any) {
// clear the api token so that we can enforce a new login usually
// a 403 means that we couldn't get a new token (trying to get a longer expiry time for example)
if (e && e.statusCode === 403 && state.apiTokenSetCallback) {
state.apiTokenSetCallback('', null)
}
}
}
if (!apiToken) {
const code = 401
throw {
error: {
code,
message: 'No API token (or renewing it did not work correctly)',
},
status: code,
}
}
// set apiTokenExpiry, if not set, to avoid to having to decode it all the time
headers['Authorization'] = `Bearer ${apiToken}`
} else {
if (!state.apiKey) {
return Promise.reject(
new Error('Missing required property `apiKey`'),
)
}
headers['Api-Key'] = state.apiKey
}
}
const retryDelay = (attempt: number) => {
// from https://github.com/tim-kos/node-retry/blob/master/lib/retry.js
const random = state.transportOptions.randomize ? Math.random() + 1 : 1
const timeout = Math.round(
random *
state.transportOptions.minTimeout *
Math.pow(state.transportOptions.factor, attempt),
)
return Math.min(timeout, state.transportOptions.maxTimeout)
}
const requestOptions: Request = {
method: method,
headers: headers,
timeout: state.transportOptions.requestTimeout,
retries: state.transportOptions.retries,
retryDelay,
form: {},
json: false,
body: undefined,
agent: state.transportOptions.agent,
}
if (options.form === true) {
const formData = payload || {}
const requestData = new FormData()
Object.keys(formData).forEach(function (meta) {
requestData.append(meta, formData[meta])
})
requestOptions.body = requestData
} else if (options.multipart !== true) {
requestOptions.json = true
requestOptions.body = payload
} else |
if (
requestOptions.json &&
requestOptions.body &&
typeof requestOptions.body === 'object'
) {
requestOptions.body = JSON.stringify(requestOptions.body)
}
const t = transport(uri, requestOptions)
return t.then(
async (response: Response): Promise<RokkaResponseInterface> => {
const rokkaResponse = RokkaResponse(response)
rokkaResponse.body = await getResponseBody(
response,
options.fallBackToText,
)
if (response.status >= 400) {
rokkaResponse.error = rokkaResponse.body
rokkaResponse.message =
response.status + ' - ' + JSON.stringify(rokkaResponse.body)
// if response is a 401 and we have apiTokenSetCallback, clear the token
if (
response.status === 401 &&
state.apiTokenSetCallback &&
state.apiTokenGetCallback
) {
// but not when the authorization header changed in the meantime
if (
headers['Authorization'] ===
'Bearer ' + state.apiTokenGetCallback()
) {
state.apiTokenSetCallback('', null)
state.apiTokenPayload = null
}
}
throw rokkaResponse
}
return rokkaResponse
},
)
},
}
return Object.assign({}, modules(state))
}
| {
const formData = payload.formData || {}
const requestData = new FormData()
requestData.append(payload.name, payload.contents, payload.filename)
Object.keys(formData).forEach(function (meta) {
requestData.append(meta, JSON.stringify(formData[meta]))
})
requestOptions.body = requestData
} | conditional_block |
test-checkpoint.py | {
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# Dependencies\n",
"from bs4 import BeautifulSoup\n",
"import requests\n",
"import pandas as pd\n",
"import pymongo\n",
"import time\n",
"import numpy as np\n",
"from splinter import Browser\n",
"from webdriver_manager.chrome import ChromeDriverManager"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Step 1 - Scraping\n",
"<hr>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### NASA Mars News Site\n",
"<p> Scrape NASA Mars news site for latest headlines and preview text"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[WDM] - ====== WebDriver manager ======\n",
"[WDM] - Current google-chrome version is 88.0.4324\n",
"[WDM] - Get LATEST driver version for 88.0.4324\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"[WDM] - Driver [C:\\Users\\WelanR_01\\.wdm\\drivers\\chromedriver\\win32\\88.0.4324.96\\chromedriver.exe] found in cache\n"
]
}
],
"source": [
"executable_path = {'executable_path': ChromeDriverManager().install()}\n",
"browser = Browser('chrome', **executable_path, headless=False)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# Scrape the NASA Mars News Site and collect the latest News Title and Paragraph Text. \n",
"# Assign the text to variables for later reference.\n",
"\n",
"# URL of page to be scraped\n",
"url = 'https://mars.nasa.gov/news/'\n",
"\n",
"# Retrieve page in splinter browser\n",
"browser.visit(url)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"# HTML object\n",
"html = browser.html\n",
"\n",
"# Retrieve the parent divs for all articles\n",
"soup = BeautifulSoup(html, 'lxml')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Minor gotcha: website needs to be run through Splinter before scraping with BeautifulSoup, otherwise it won't appear in BeautifulSoup's scrape"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [], | "source": [
"# Retrieve the parent divs for all headlines and preview text\n",
"results = soup.find_all(\"div\", class_=\"list_text\")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"-----------------\n",
"Testing Proves Its Worth With Successful Mars Parachute Deployment\n",
"The giant canopy that helped land Perseverance on Mars was tested here on Earth at NASA’s Wallops Flight Facility in Virginia.\n",
"-----------------\n",
"NASA's Perseverance Rover Gives High-Definition Panoramic View of Landing Site\n",
"A 360-degree panorama taken by the rover’s Mastcam-Z instrument will be discussed during a public video chat this Thursday.\n",
"-----------------\n",
"Nearly 11 Million Names of Earthlings are on Mars Perseverance\n",
"When the Perseverance rover safely touched down on the Martian surface, inside Jezero Crater, on Feb. 18, 2021, it was also a safe landing for the nearly 11 million names on board.\n",
"-----------------\n",
"NASA's Mars Perseverance Rover Provides Front-Row Seat to Landing, First Audio Recording of Red Planet \n",
"The agency’s newest rover captured first-of-its kind footage of its Feb. 18 touchdown and has recorded audio of Martian wind.\n",
"\n",
"\n",
"-----------------\n",
"NASA to Reveal New Video, Images From Mars Perseverance Rover\n",
"First-of-its kind footage from the agency’s newest rover will be presented during a briefing this morning.\n",
"-----------------\n",
"NASA's Mars Helicopter Reports In \n",
"The technology demonstration has phoned home from where it is attached to the belly of NASA’s Perseverance rover. \n",
"-----------------\n",
"NASA's Perseverance Rover Sends Sneak Peek of Mars Landing\n",
"The six-wheeled robot’s latest data since touching down yesterday include a hi-res image captured as the rover’s jetpack lowered it to the ground.\n",
"-----------------\n",
"Touchdown! NASA's Mars Perseverance Rover Safely Lands on Red Planet\n",
"The agency’s latest and most complex mission to the Red Planet has touched down at Jezero Crater. Now it’s time to begin testing the health of the rover. \n",
"-----------------\n",
"Searching for Life in NASA's Perseverance Mars Samples\n",
"When the agency’s newest rover mission searches for fossilized microscopic life on the Red Planet, how will scientists know whether they’ve found it?\n",
"-----------------\n",
"The Mars Relay Network Connects Us to NASA's Martian Explorers\n",
"A tightly choreographed dance between NASA’s Deep Space Network and Mars orbiters will keep the agency’s Perseverance in touch with Earth during landing and beyond.\n",
"-----------------\n",
"NASA's Next Mars Rover Is Ready for the Most Precise Landing Yet\n",
"What to expect when the Mars 2020 Perseverance rover arrives at the Red Planet on Feb. 18, 2021.\n",
"-----------------\n",
"Sensors Prepare to Collect Data as Perseverance Enters Mars' Atmosphere\n",
"Technology will collect critical data about the harsh entry environment during Perseverance’s entry next Thursday.\n",
"-----------------\n",
"InSight Is Meeting the Challenge of Winter on Dusty Mars\n",
"As dust collects on the solar panels and winter comes to Elysium Planitia, the team is following a plan to reduce science operations in order to keep the lander safe.\n",
"-----------------\n",
"NASA Invites Public to Share Thrill of Mars Perseverance Rover Landing\n",
"Mark your calendars for live landing commentary, news briefings, livestreamed Q&As, virtual watch parties, student activities, and more.\n",
"-----------------\n",
"Tricky Terrain: Helping to Assure a Safe Rover Landing\n",
"How two new technologies will help Perseverance, NASA’s most sophisticated rover yet, touch down onto the surface of Mars this month.\n",
"-----------------\n",
"Where Should Future Astronauts Land on Mars? Follow the Water\n",
"A new NASA paper provides the most detailed map to date of near-surface water ice on the Red Planet.\n",
"-----------------\n",
"NASA's Perseverance Pays Off Back Home\n",
"Even as the Perseverance rover approaches Mars, technology on board is paying off on Earth.\n",
"-----------------\n",
"Could the Surface of Phobos Reveal Secrets of the Martian Past?\n",
"The Martian moon Phobos orbits through a stream of charged atoms and molecules that flow off the Red Planet’s atmosphere, new research shows.\n",
"-----------------\n",
"NASA's MAVEN Continues to Advance Mars Science and Telecommunications Relay Efforts\n",
"With a suite of new national and international spacecraft primed to explore the Red Planet after their arrival next month, NASA’s MAVEN mission is ready to provide support and continue its study of the Martian atmosphere.\n",
"-----------------\n",
"NASA's Perseverance Rover 22 Days From Mars Landing\n",
"Seven minutes of harrowing descent to the Red Planet is in the not-so-distant future for the agency’s Mars 2020 mission. \n",
"-----------------\n",
"6 Things to Know About NASA's Mars Helicopter on Its Way to Mars\n",
"Ingenuity, a technology experiment, is preparing to attempt the first powered, controlled flight on the Red Planet.\n",
"-----------------\n",
"NASA to Host Virtual Briefing on February Perseverance Mars Rover Landing\n",
"NASA leadership and members of the mission will discuss the agency’s latest rover, which touches down on the Red Planet on Feb. 18.\n",
"-----------------\n",
"NASA InSight's ‘Mole' Ends Its Journey on Mars\n",
"The heat probe hasn’t been able to gain the friction it needs to dig, but the mission has been granted an extension to carry on with its other science.\n",
"-----------------\n",
"Mars 2020 Perseverance Rover to Capture Sounds From the Red Planet\n",
"Audio gathered by the mission may not sound quite the same on Mars as it would to our ears on Earth. A new interactive online experience lets you sample the difference.\n",
"-----------------\n",
"NASA's Curiosity Rover Reaches Its 3,000th Day on Mars\n",
"As the rover has continued to ascend Mount Sharp, it’s found distinctive benchlike rock formations.\n",
"-----------------\n",
"Celebrate the Perseverance Rover Landing With NASA's Student Challenge\n",
"The rover touches down on the Red Planet next month, and students are invited to join the excitement by designing, building, and landing their own Mars mission. NASA can help.\n",
"-----------------\n",
"NASA Extends Exploration for Two Planetary Science Missions\n",
"The missions – Juno and InSight – have each increased our understanding of our solar system, as well as spurred new sets of diverse questions.\n",
"-----------------\n",
"7 Things to Know About the NASA Rover About to Land on Mars\n",
"The Mars 2020 Perseverance rover, which has started its approach to the Red Planet, will help answer the next logical question in Mars exploration.\n",
"-----------------\n",
"A Martian Roundtrip: NASA's Perseverance Rover Sample Tubes\n",
"Marvels of engineering, the rover's sample tubes must be tough enough to safely bring Red Planet samples on the long journey back to Earth in immaculate condition. \n",
"-----------------\n",
"NASA Moves Forward With Campaign to Return Mars Samples to Earth\n",
"During this next phase, the program will mature critical technologies and make critical design decisions as well as assess industry partnerships.\n",
"-----------------\n",
"3 Things We've Learned From NASA's Mars InSight \n",
"Scientists are finding new mysteries since the geophysics mission landed two years ago.\n",
"-----------------\n",
"From JPL's Mailroom to Mars and Beyond\n",
"Bill Allen has thrived as the mechanical systems design lead for three Mars rover missions, but he got his start as a teenager sorting letters for the NASA center.\n",
"-----------------\n",
"5 Hidden Gems Are Riding Aboard NASA's Perseverance Rover\n",
"The symbols, mottos, and small objects added to the agency's newest Mars rover serve a variety of purposes, from functional to decorative.\n",
"-----------------\n",
"MOXIE Could Help Future Rockets Launch Off Mars\n",
"NASA's Perseverance rover carries a device to convert Martian air into oxygen that, if produced on a larger scale, could be used not just for breathing, but also for fuel.\n",
"-----------------\n",
"Hear Audio From NASA's Perseverance As It Travels Through Deep Space\n",
"The first to be rigged with microphones, the agency's latest Mars rover picked up the subtle sounds of its own inner workings during interplanetary flight.\n",
"-----------------\n",
"Mars Is Getting a New Robotic Meteorologist\n",
"Sensors on NASA's Perseverance will help prepare for future human exploration by taking weather measurements and studying dust particles.\n",
"-----------------\n",
"Heat and Dust Help Launch Martian Water Into Space, Scientists Find\n",
"Scientists using an instrument aboard NASA’s Mars Atmosphere and Volatile EvolutioN, or MAVEN, spacecraft have discovered that water vapor near the surface of the Red Planet is lofted higher into the atmosphere than anyone expected was possible. \n",
"-----------------\n",
"NASA's Curiosity Takes Selfie With 'Mary Anning' on the Red Planet\n",
"The Mars rover has drilled three samples of rock in this clay-enriched region since arriving in July.\n",
"-----------------\n",
"Independent Review Indicates NASA Prepared for Mars Sample Return Campaign\n",
"NASA released an independent review report Tuesday indicating the agency is well positioned for its Mars Sample Return campaign to bring pristine samples from Mars to Earth for scientific study.\n",
"-----------------\n",
"NASA's Perseverance Rover 100 Days Out\n",
"Mark your calendars: The agency's latest rover has only about 8,640,000 seconds to go before it touches down on the Red Planet, becoming history's next Mars car.\n"
]
}
],
"source": [
"# Create empty lists to append with headlines and preview text data\n",
"headers = list()\n",
"preview_texts = list()\n",
"\n",
"# loop over results to get headlines and preview text data\n",
"for result in results:\n",
" # scrape the article header \n",
" header = result.find(\"div\", class_=\"content_title\").text\n",
" \n",
" # scrape the article subheader\n",
" preview_text = result.find(\"div\", class_=\"article_teaser_body\").text\n",
" \n",
" # print article data\n",
" print('-----------------')\n",
" print(header)\n",
" print(preview_text)\n",
"\n",
" # Append lists with headlines and preview texts\n",
" headers.append(header)\n",
" preview_texts.append(preview_text)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### NASA JPL Images\n",
"<p> Scrape NASA Mars news site for latest headlines and preview text"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"# URL of JPL page to be scraped\n",
"jpl_url = \"https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html\"\n",
"\n",
"# Retrieve page in splinter browser\n",
"browser.visit(jpl_url)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"# HTML object\n",
"jpl_html = browser.html\n",
"\n",
"# Retrieve the parent divs for all articles\n",
"jpl_soup = BeautifulSoup(jpl_html, 'lxml')"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"# Retrieve featured image relative URL \n",
"featured_image = jpl_soup.find(\"img\", class_=\"headerimage\")[\"src\"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Minor gotcha: JPL url needs to have the \"index.html\" suffix removed before appending the image's relative URL"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"# Clean up the webpage URL to combine with \n",
"# featured image relative URL to create full URL\n",
"clean_url = jpl_url.split(\"index.html\")"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
"# Concatenate URLs to create full featured image URL\n",
"featured_image_url = clean_url[0] + featured_image"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/image/featured/mars2.jpg'"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"featured_image_url"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Mars Facts\n",
"<p> Scrape Space Facts website for space facts"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"# URL of Space Facts page to be scraped\n",
"facts_url = \"https://space-facts.com/mars/\""
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[ 0 1\n",
" 0 Equatorial Diameter: 6,792 km\n",
" 1 Polar Diameter: 6,752 km\n",
" 2 Mass: 6.39 × 10^23 kg (0.11 Earths)\n",
" 3 Moons: 2 (Phobos & Deimos)\n",
" 4 Orbit Distance: 227,943,824 km (1.38 AU)\n",
" 5 Orbit Period: 687 days (1.9 years)\n",
" 6 Surface Temperature: -87 to -5 °C\n",
" 7 First Record: 2nd millennium BC\n",
" 8 Recorded By: Egyptian astronomers,\n",
" Mars - Earth Comparison Mars Earth\n",
" 0 Diameter: 6,779 km 12,742 km\n",
" 1 Mass: 6.39 × 10^23 kg 5.97 × 10^24 kg\n",
" 2 Moons: 2 1\n",
" 3 Distance from Sun: 227,943,824 km 149,598,262 km\n",
" 4 Length of Year: 687 Earth days 365.24 days\n",
" 5 Temperature: -87 to -5 °C -88 to 58°C,\n",
" 0 1\n",
" 0 Equatorial Diameter: 6,792 km\n",
" 1 Polar Diameter: 6,752 km\n",
" 2 Mass: 6.39 × 10^23 kg (0.11 Earths)\n",
" 3 Moons: 2 (Phobos & Deimos)\n",
" 4 Orbit Distance: 227,943,824 km (1.38 AU)\n",
" 5 Orbit Period: 687 days (1.9 years)\n",
" 6 Surface Temperature: -87 to -5 °C\n",
" 7 First Record: 2nd millennium BC\n",
" 8 Recorded By: Egyptian astronomers]"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Get all tables in URL via pandas\n",
"tables = pd.read_html(facts_url)\n",
"tables"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
"# Save relevant table as DataFrame\n",
"df = tables[0]"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [],
"source": [
"# Rename column headers to appropriate names\n",
"df = df.rename(columns={0:\"Description\",1:\"Mars\"})"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [],
"source": [
"# Export table as HTML string, remove superfluous index\n",
"df.to_html('table.html', index=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Mars Hemispheres\n",
"<p> Scrape USGS Astrogeology website for Mars hemispheres facts"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
"hemi_url = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n",
"\n",
"# Retrieve page in splinter browser\n",
"browser.visit(hemi_url)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"hemisphere_image_urls = list()\n",
"\n",
"for image in images:\n",
" click on links with partial match \"Enhanced\"\n",
" title = soup.find(\"h2\", class_=\"title\").text\n",
" img_url = soup.find(\"div\", class_=\"downloads\").find(\"a\")[i][\"href\"]\n",
" \n",
" title_strip = title.strip(\" \")\n",
" title_clean = f\"{title_strip[0]} {title_strip[1]}\"\n",
" \n",
" hemisphere_image_urls.append({\"title\":title_clean, \"img_url\":img_url})\n",
" click back\n",
" \n"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [],
"source": [
"# HTML object\n",
"hemi_html = browser.html\n",
"\n",
"# Retrieve the parent divs for all articles\n",
"hemi_soup = BeautifulSoup(hemi_html, 'lxml')"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"4"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"hemi_results = hemi_soup.find_all(\"div\", class_=\"item\")"
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['Cerberus', 'Schiaparelli', 'Syrtis', 'Valles']"
]
},
"execution_count": 28,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"hemispheres = list()\n",
"\n",
"for r in hemi_results:\n",
" hemisphere = r.find(\"h3\").text\n",
" hemi_split = hemisphere.split(\" \")\n",
" hemispheres.append(hemi_split[0])\n",
" \n",
"hemispheres"
]
},
{
"cell_type": "code",
"execution_count": 38,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Cerberus Hemisphere\n",
"https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/cerberus_enhanced.tif/full.jpg\n",
"Schiaparelli Hemisphere\n",
"https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/schiaparelli_enhanced.tif/full.jpg\n",
"Syrtis Major\n",
"https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/syrtis_major_enhanced.tif/full.jpg\n",
"Valles Marineris\n",
"https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/valles_marineris_enhanced.tif/full.jpg\n"
]
}
],
"source": [
"hemisphere_image_urls = list()\n",
"\n",
"for hemi in hemispheres:\n",
" try:\n",
" # click on links with partial match \"Enhanced\"\n",
" browser.click_link_by_partial_text(f\"{hemi}\")\n",
" \n",
" # get full image webpage URL and save as BeautifulSoup\n",
" hemi_image_html = browser.html\n",
" image_soup = BeautifulSoup(hemi_image_html, 'lxml')\n",
" \n",
" # find full image name and URL\n",
" title = image_soup.find(\"h2\", class_=\"title\").text\n",
" img_url = image_soup.find(\"div\", class_=\"downloads\").find(\"a\")[\"href\"]\n",
"\n",
" # remove word \"Enhanced\" from hemisphere name\n",
" title_split = title.split(\" \")\n",
" title_clean = f\"{title_split[0]} {title_split[1]}\"\n",
"\n",
" # print to check the code works\n",
" print(title_clean)\n",
" print(img_url)\n",
" \n",
" # append list with dictionary\n",
" hemisphere_image_urls.append({\"title\":title_clean, \"img_url\":img_url})\n",
" \n",
" time.sleep(2)\n",
" \n",
" # click back\n",
" browser.back()\n",
" \n",
" # stop code if all images scraped \n",
" except ElementDoesNotExist:\n",
" print(\"Scraping Complete\")"
]
},
{
"cell_type": "code",
"execution_count": 39,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'title': 'Cerberus Hemisphere',\n",
" 'img_url': 'https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/cerberus_enhanced.tif/full.jpg'},\n",
" {'title': 'Schiaparelli Hemisphere',\n",
" 'img_url': 'https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/schiaparelli_enhanced.tif/full.jpg'},\n",
" {'title': 'Syrtis Major',\n",
" 'img_url': 'https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/syrtis_major_enhanced.tif/full.jpg'},\n",
" {'title': 'Valles Marineris',\n",
" 'img_url': 'https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/valles_marineris_enhanced.tif/full.jpg'}]"
]
},
"execution_count": 39,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"hemisphere_image_urls"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Step 2 - MongoDB and Flask Application\n",
"<hr>"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python [conda env:PythonData] *",
"language": "python",
"name": "conda-env-PythonData-py"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.10"
}
},
"nbformat": 4,
"nbformat_minor": 4
} | random_line_split |
|
app.component.ts | import { Component } from '@angular/core';
import { MatFormFieldControl, MatFormField } from '@angular/material';
import { SearchService } from './search.service';
declare var google: any;
@Component({
selector: 'app-root',
templateUrl: './app.component.html',
styleUrls: ['./app.component.scss'],
})
export class AppComponent {
title = 'trainasyougo';
script = document.createElement("script");
map;
input;
searchBox;
markers = [];
places;
searchText = "";
myMarker;
radius = 500;
searchresults = [];
selectedNo = -1;
additionalFilter = "no";
currentLat;
currentLng;
detailService;
isLoading = false;
directionsService;
directionsDisplay;
geocoder;
myLocationAddress;
travelMode = "DRIVING";
currentToDir = "";
searchFitnessService = "GYM";
infowindow;
constructor(private searchservice: SearchService) {
// this.search()
}
ngOnInit() {
this.script.src = "https://maps.googleapis.com/maps/api/js?key=AIzaSyDz7iXxtwOMovXzKaaWLzStFo1tDLP5PEg&libraries=places";
this.script.onload = () => {
this.map = new google.maps.Map(document.getElementById('map'), {
center: {lat: -33.8688, lng: 151.2195},
zoom: 13,
mapTypeId: 'roadmap',
gestureHandling: 'greedy'
});
this.input = document.getElementById('pac-input');
this.searchBox = new google.maps.places.SearchBox(this.input);
var me = this;
this.map.addListener('bounds_changed', function() {
me.searchBox.setBounds(me.map.getBounds());
});
var bounds = new google.maps.LatLngBounds();
if (navigator.geolocation) {
navigator.geolocation.getCurrentPosition(function(position) {
bounds = new google.maps.LatLngBounds(new google.maps.LatLng(position.coords.latitude, position.coords.longitude));
me.map.fitBounds(bounds);
me.currentLat = position.coords.latitude;
me.currentLng = position.coords.longitude;
me.myMarker = new google.maps.Marker({
map: me.map,
title: "My location",
position: new google.maps.LatLng(position.coords.latitude, position.coords.longitude)
});
var latlng = {lat: me.currentLat, lng: me.currentLng};
me.geocoder.geocode({'location': latlng}, function(results, status) {
if (status === 'OK') {
if (results[0]) {
me.myLocationAddress = results[0].formatted_address;
} else {
window.alert('No results found');
}
} else {
window.alert('Geocoder failed due to: ' + status);
}
});
});
} else { }
this.detailService = new google.maps.places.PlacesService(this.map);
this.directionsService = new google.maps.DirectionsService;
this.directionsDisplay = new google.maps.DirectionsRenderer;
this.directionsDisplay.setMap(this.map);
this.geocoder = new google.maps.Geocoder;
this.searchBox.addListener('places_changed', function() {
me.places = me.searchBox.getPlaces();
if (me.places.length == 0) {
return;
}
// Clear out the old markers.
me.markers.forEach(function(marker) {
marker.setMap(null);
});
me.markers = [];
// For each place, get the icon, name and location.
bounds = new google.maps.LatLngBounds();
var place = me.places[0];
me.currentLat = place.geometry.location.lat();
me.currentLng = place.geometry.location.lng();
if (!place.geometry) {
console.log("Returned place contains no geometry");
return;
}
if (place.geometry.viewport) {
// Only geocodes have viewport.
bounds.union(place.geometry.viewport);
} else {
bounds.extend(place.geometry.location);
}
me.map.fitBounds(bounds);
me.search();
});
}
document.body.appendChild(this.script);
}
search() |
async select(i) {
this.selectedNo = i;
var place = this.searchresults[i];
var newMarker = this.markers[i];
var me = this;
var img = './assets/images/service.jpg';
if( place.photos && place.photos.length > 0) {
img = place.photos[0].getUrl();
}
let placeDeatil : any = await me.getPlaceDetails(place.place_id);
var open_hours = '';
if(placeDeatil.opening_hours) {
placeDeatil.opening_hours.weekday_text.forEach(t => {
open_hours += t + "<br>";
})
}
// debugger;
var contentString =
`<div class="infowindow">
<div>
<img class="thumb" src="` + img + `">
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/pin.svg">
<div class="info-text">` + placeDeatil.formatted_address +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/open_in_new.svg">
<a class="info-text" target="_blank" href="`+placeDeatil.website+`">` + placeDeatil.website +`</a>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/phone.svg">
<div class="info-text">` + placeDeatil.formatted_phone_number +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/timeline.svg">
<div class="info-text">` + open_hours +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/bookmark.svg">
<div class="info-text">Add to my favorite</div>
</div>
</div>`;
if(me.infowindow) {
me.infowindow.close();
}
me.infowindow = new google.maps.InfoWindow({
content: contentString
});
me.infowindow.open(me.map, newMarker);
}
onChangeAdditionalFilter(value) {
if(value == "no") {return;}
// alert(this.additionalFilter);
this.search();
}
changedRadius(value){
if(value == "") {return;}
this.search();
}
async getPlacesDetails() {
var me = this;
var count = 0;
this.searchresults.forEach(async function(place) {
if(place.detail) return true;
count ++;
var placeDetail = await me.getPlaceDetails(place.place_id);
place.detail = placeDetail;
});
if(count != 0) {
var timer = setTimeout(function(){ me.getPlacesDetails(); }, 200);
} else {
this.isLoading = false;
}
}
getPlaceDetails(place_id) {
var me = this;
var request = {
placeId: place_id,
fields: ['name', 'formatted_address', 'formatted_phone_number', 'website', 'opening_hours', 'place_id']
};
return new Promise((resolve, reject) => {
me.detailService.getDetails(request, function(place1, status) {
if (status === google.maps.places.PlacesServiceStatus.OK) {
resolve(place1);
}
});
});
}
directionToHere(to) {
var me = this;
this.currentToDir = to;
this.directionsService.route({
origin: me.myLocationAddress,
destination: me.currentToDir,
travelMode: me.travelMode
}, function(response, status) {
if (status === 'OK') {
me.directionsDisplay.setDirections(response);
} else {
window.alert('Directions request failed due to ' + status);
}
});
}
onChangeTravelMode() {
if(this.searchresults.length > 0) {
this.directionToHere(this.currentToDir)
}
}
}
| {
var me = this;
var pyrmont = new google.maps.LatLng(this.currentLat, this.currentLng);
var request = {
location: pyrmont,
radius: this.radius,
type: ['fitness'],
query: this.additionalFilter + ' ' + this.searchFitnessService
};
// this.isLoading = true;
this.searchresults = [];
var service = new google.maps.places.PlacesService(this.map);
var i = 0;
me.markers.forEach(function(marker) {
marker.setMap(null);
});
me.markers = [];
var bounds = new google.maps.LatLngBounds();
service.textSearch(request, function(results, status, pagination) {
if (status == google.maps.places.PlacesServiceStatus.OK) {
results.forEach(function(place) {
i++;
var icon = {
url:"https://cdn.mapmarker.io/api/v1/pin?size=120&background=%230C797D&text=" + i + "&color=%23FFFFFF&voffset=2&hoffset=1&", //"assets/pins/number_" + i + ".png",
size: new google.maps.Size(150, 150),
origin: new google.maps.Point(0, 0),
anchor: new google.maps.Point(17, 75),
scaledSize: new google.maps.Size(50, 50)
};
var newMarker = new google.maps.Marker({
map: me.map,
icon: icon,
title: place.name,
position: place.geometry.location
});
me.markers.push(newMarker);
newMarker.addListener('click', async function() {
var img = './assets/images/service.jpg';
if( place.photos && place.photos.length > 0) {
img = place.photos[0].getUrl();
}
let placeDeatil : any = await me.getPlaceDetails(place.place_id);
var open_hours = '';
if(placeDeatil.opening_hours) {
placeDeatil.opening_hours.weekday_text.forEach(t => {
open_hours += t + "<br>";
})
}
// debugger;
var contentString =
`<div class="infowindow">
<div>
<img class="thumb" src="` + img + `">
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/pin.svg">
<div class="info-text">` + placeDeatil.formatted_address +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/open_in_new.svg">
<a class="info-text" target="_blank" href="`+placeDeatil.website+`">` + placeDeatil.website +`</a>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/phone.svg">
<div class="info-text">` + placeDeatil.formatted_phone_number +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/timeline.svg">
<div class="info-text">` + open_hours +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/bookmark.svg">
<div class="info-text">Add to my favorite</div>
</div>
</div>`;
if(me.infowindow) {
me.infowindow.close();
}
me.infowindow = new google.maps.InfoWindow({
content: contentString
});
me.infowindow.open(me.map, newMarker);
});
if (place.geometry.viewport) {
// Only geocodes have viewport.
bounds.union(place.geometry.viewport);
} else {
bounds.extend(place.geometry.location);
}
me.map.fitBounds(bounds);
});
me.searchresults = me.searchresults.concat(results);
if(pagination.hasNextPage) {
pagination.nextPage();
}
}
});
} | identifier_body |
app.component.ts | import { Component } from '@angular/core';
import { MatFormFieldControl, MatFormField } from '@angular/material';
import { SearchService } from './search.service';
declare var google: any;
@Component({
selector: 'app-root',
templateUrl: './app.component.html',
styleUrls: ['./app.component.scss'],
})
export class AppComponent {
title = 'trainasyougo';
script = document.createElement("script");
map;
input;
searchBox;
markers = [];
places;
searchText = "";
myMarker;
radius = 500;
searchresults = [];
selectedNo = -1;
additionalFilter = "no";
currentLat;
currentLng;
detailService;
isLoading = false;
directionsService;
directionsDisplay;
geocoder;
myLocationAddress;
travelMode = "DRIVING";
currentToDir = "";
searchFitnessService = "GYM";
infowindow;
constructor(private searchservice: SearchService) {
// this.search()
}
ngOnInit() {
this.script.src = "https://maps.googleapis.com/maps/api/js?key=AIzaSyDz7iXxtwOMovXzKaaWLzStFo1tDLP5PEg&libraries=places";
this.script.onload = () => {
this.map = new google.maps.Map(document.getElementById('map'), {
center: {lat: -33.8688, lng: 151.2195},
zoom: 13,
mapTypeId: 'roadmap',
gestureHandling: 'greedy'
});
this.input = document.getElementById('pac-input');
this.searchBox = new google.maps.places.SearchBox(this.input);
var me = this;
this.map.addListener('bounds_changed', function() {
me.searchBox.setBounds(me.map.getBounds());
});
var bounds = new google.maps.LatLngBounds();
if (navigator.geolocation) {
navigator.geolocation.getCurrentPosition(function(position) {
bounds = new google.maps.LatLngBounds(new google.maps.LatLng(position.coords.latitude, position.coords.longitude));
me.map.fitBounds(bounds);
me.currentLat = position.coords.latitude;
me.currentLng = position.coords.longitude;
me.myMarker = new google.maps.Marker({
map: me.map,
title: "My location",
position: new google.maps.LatLng(position.coords.latitude, position.coords.longitude)
});
var latlng = {lat: me.currentLat, lng: me.currentLng};
me.geocoder.geocode({'location': latlng}, function(results, status) {
if (status === 'OK') {
if (results[0]) {
me.myLocationAddress = results[0].formatted_address;
} else {
window.alert('No results found');
}
} else {
window.alert('Geocoder failed due to: ' + status);
}
});
});
} else { }
this.detailService = new google.maps.places.PlacesService(this.map);
this.directionsService = new google.maps.DirectionsService;
this.directionsDisplay = new google.maps.DirectionsRenderer;
this.directionsDisplay.setMap(this.map);
this.geocoder = new google.maps.Geocoder;
this.searchBox.addListener('places_changed', function() {
me.places = me.searchBox.getPlaces();
if (me.places.length == 0) {
return;
}
// Clear out the old markers.
me.markers.forEach(function(marker) {
marker.setMap(null);
});
me.markers = [];
// For each place, get the icon, name and location.
bounds = new google.maps.LatLngBounds();
var place = me.places[0];
me.currentLat = place.geometry.location.lat();
me.currentLng = place.geometry.location.lng();
if (!place.geometry) {
console.log("Returned place contains no geometry");
return;
}
if (place.geometry.viewport) {
// Only geocodes have viewport.
bounds.union(place.geometry.viewport);
} else {
bounds.extend(place.geometry.location);
}
me.map.fitBounds(bounds);
me.search();
});
}
document.body.appendChild(this.script);
}
search() {
var me = this;
var pyrmont = new google.maps.LatLng(this.currentLat, this.currentLng);
var request = {
location: pyrmont,
radius: this.radius,
type: ['fitness'],
query: this.additionalFilter + ' ' + this.searchFitnessService
};
// this.isLoading = true;
this.searchresults = [];
var service = new google.maps.places.PlacesService(this.map);
var i = 0;
me.markers.forEach(function(marker) {
marker.setMap(null);
});
me.markers = [];
var bounds = new google.maps.LatLngBounds();
service.textSearch(request, function(results, status, pagination) {
if (status == google.maps.places.PlacesServiceStatus.OK) {
results.forEach(function(place) {
i++;
var icon = {
url:"https://cdn.mapmarker.io/api/v1/pin?size=120&background=%230C797D&text=" + i + "&color=%23FFFFFF&voffset=2&hoffset=1&", //"assets/pins/number_" + i + ".png",
size: new google.maps.Size(150, 150),
origin: new google.maps.Point(0, 0),
anchor: new google.maps.Point(17, 75),
scaledSize: new google.maps.Size(50, 50)
};
var newMarker = new google.maps.Marker({
map: me.map,
icon: icon,
title: place.name,
position: place.geometry.location
});
me.markers.push(newMarker);
newMarker.addListener('click', async function() {
var img = './assets/images/service.jpg';
if( place.photos && place.photos.length > 0) {
img = place.photos[0].getUrl();
}
let placeDeatil : any = await me.getPlaceDetails(place.place_id);
var open_hours = '';
if(placeDeatil.opening_hours) {
placeDeatil.opening_hours.weekday_text.forEach(t => {
open_hours += t + "<br>";
})
}
// debugger;
var contentString =
`<div class="infowindow">
<div>
<img class="thumb" src="` + img + `">
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/pin.svg">
<div class="info-text">` + placeDeatil.formatted_address +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/open_in_new.svg">
<a class="info-text" target="_blank" href="`+placeDeatil.website+`">` + placeDeatil.website +`</a>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/phone.svg">
<div class="info-text">` + placeDeatil.formatted_phone_number +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/timeline.svg">
<div class="info-text">` + open_hours +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/bookmark.svg">
<div class="info-text">Add to my favorite</div>
</div>
</div>`;
if(me.infowindow) {
me.infowindow.close();
}
me.infowindow = new google.maps.InfoWindow({
content: contentString
});
me.infowindow.open(me.map, newMarker);
});
if (place.geometry.viewport) {
// Only geocodes have viewport.
bounds.union(place.geometry.viewport);
} else {
bounds.extend(place.geometry.location);
}
me.map.fitBounds(bounds);
});
me.searchresults = me.searchresults.concat(results);
if(pagination.hasNextPage) {
pagination.nextPage();
}
}
});
}
async select(i) {
this.selectedNo = i;
var place = this.searchresults[i];
var newMarker = this.markers[i];
var me = this;
var img = './assets/images/service.jpg';
if( place.photos && place.photos.length > 0) {
img = place.photos[0].getUrl();
}
let placeDeatil : any = await me.getPlaceDetails(place.place_id);
var open_hours = '';
if(placeDeatil.opening_hours) {
placeDeatil.opening_hours.weekday_text.forEach(t => {
open_hours += t + "<br>";
})
}
// debugger;
var contentString =
`<div class="infowindow">
<div>
<img class="thumb" src="` + img + `">
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/pin.svg">
<div class="info-text">` + placeDeatil.formatted_address +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/open_in_new.svg">
<a class="info-text" target="_blank" href="`+placeDeatil.website+`">` + placeDeatil.website +`</a>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/phone.svg">
<div class="info-text">` + placeDeatil.formatted_phone_number +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/timeline.svg">
<div class="info-text">` + open_hours +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/bookmark.svg">
<div class="info-text">Add to my favorite</div>
</div>
</div>`;
if(me.infowindow) {
me.infowindow.close();
}
me.infowindow = new google.maps.InfoWindow({
content: contentString
});
me.infowindow.open(me.map, newMarker);
}
| (value) {
if(value == "no") {return;}
// alert(this.additionalFilter);
this.search();
}
changedRadius(value){
if(value == "") {return;}
this.search();
}
async getPlacesDetails() {
var me = this;
var count = 0;
this.searchresults.forEach(async function(place) {
if(place.detail) return true;
count ++;
var placeDetail = await me.getPlaceDetails(place.place_id);
place.detail = placeDetail;
});
if(count != 0) {
var timer = setTimeout(function(){ me.getPlacesDetails(); }, 200);
} else {
this.isLoading = false;
}
}
getPlaceDetails(place_id) {
var me = this;
var request = {
placeId: place_id,
fields: ['name', 'formatted_address', 'formatted_phone_number', 'website', 'opening_hours', 'place_id']
};
return new Promise((resolve, reject) => {
me.detailService.getDetails(request, function(place1, status) {
if (status === google.maps.places.PlacesServiceStatus.OK) {
resolve(place1);
}
});
});
}
directionToHere(to) {
var me = this;
this.currentToDir = to;
this.directionsService.route({
origin: me.myLocationAddress,
destination: me.currentToDir,
travelMode: me.travelMode
}, function(response, status) {
if (status === 'OK') {
me.directionsDisplay.setDirections(response);
} else {
window.alert('Directions request failed due to ' + status);
}
});
}
onChangeTravelMode() {
if(this.searchresults.length > 0) {
this.directionToHere(this.currentToDir)
}
}
}
| onChangeAdditionalFilter | identifier_name |
app.component.ts | import { Component } from '@angular/core';
import { MatFormFieldControl, MatFormField } from '@angular/material';
import { SearchService } from './search.service';
declare var google: any;
@Component({
selector: 'app-root',
templateUrl: './app.component.html',
styleUrls: ['./app.component.scss'],
})
export class AppComponent {
title = 'trainasyougo';
script = document.createElement("script");
map;
input;
searchBox;
markers = [];
places;
searchText = "";
myMarker;
radius = 500;
searchresults = [];
selectedNo = -1;
additionalFilter = "no";
currentLat;
currentLng;
detailService;
isLoading = false;
directionsService;
directionsDisplay;
geocoder;
myLocationAddress;
travelMode = "DRIVING";
currentToDir = "";
searchFitnessService = "GYM";
infowindow;
constructor(private searchservice: SearchService) {
// this.search()
}
ngOnInit() {
this.script.src = "https://maps.googleapis.com/maps/api/js?key=AIzaSyDz7iXxtwOMovXzKaaWLzStFo1tDLP5PEg&libraries=places";
this.script.onload = () => {
this.map = new google.maps.Map(document.getElementById('map'), {
center: {lat: -33.8688, lng: 151.2195},
zoom: 13,
mapTypeId: 'roadmap',
gestureHandling: 'greedy'
});
this.input = document.getElementById('pac-input');
this.searchBox = new google.maps.places.SearchBox(this.input);
var me = this;
this.map.addListener('bounds_changed', function() {
me.searchBox.setBounds(me.map.getBounds());
});
var bounds = new google.maps.LatLngBounds();
if (navigator.geolocation) {
navigator.geolocation.getCurrentPosition(function(position) {
bounds = new google.maps.LatLngBounds(new google.maps.LatLng(position.coords.latitude, position.coords.longitude));
me.map.fitBounds(bounds);
me.currentLat = position.coords.latitude;
me.currentLng = position.coords.longitude;
me.myMarker = new google.maps.Marker({
map: me.map,
title: "My location",
position: new google.maps.LatLng(position.coords.latitude, position.coords.longitude)
});
var latlng = {lat: me.currentLat, lng: me.currentLng};
me.geocoder.geocode({'location': latlng}, function(results, status) {
if (status === 'OK') {
if (results[0]) {
me.myLocationAddress = results[0].formatted_address;
} else {
window.alert('No results found');
}
} else {
window.alert('Geocoder failed due to: ' + status);
}
});
});
} else { }
this.detailService = new google.maps.places.PlacesService(this.map);
this.directionsService = new google.maps.DirectionsService;
this.directionsDisplay = new google.maps.DirectionsRenderer;
this.directionsDisplay.setMap(this.map);
this.geocoder = new google.maps.Geocoder;
this.searchBox.addListener('places_changed', function() {
me.places = me.searchBox.getPlaces();
if (me.places.length == 0) {
return;
}
// Clear out the old markers.
me.markers.forEach(function(marker) {
marker.setMap(null);
});
me.markers = [];
// For each place, get the icon, name and location.
bounds = new google.maps.LatLngBounds();
var place = me.places[0];
me.currentLat = place.geometry.location.lat();
me.currentLng = place.geometry.location.lng();
if (!place.geometry) {
console.log("Returned place contains no geometry");
return;
}
if (place.geometry.viewport) {
// Only geocodes have viewport.
bounds.union(place.geometry.viewport);
} else {
bounds.extend(place.geometry.location);
}
me.map.fitBounds(bounds);
me.search();
});
}
document.body.appendChild(this.script);
}
search() {
var me = this;
var pyrmont = new google.maps.LatLng(this.currentLat, this.currentLng);
var request = {
location: pyrmont,
radius: this.radius,
type: ['fitness'],
query: this.additionalFilter + ' ' + this.searchFitnessService
};
// this.isLoading = true;
this.searchresults = [];
var service = new google.maps.places.PlacesService(this.map);
var i = 0;
me.markers.forEach(function(marker) {
marker.setMap(null);
});
me.markers = [];
var bounds = new google.maps.LatLngBounds();
service.textSearch(request, function(results, status, pagination) {
if (status == google.maps.places.PlacesServiceStatus.OK) {
results.forEach(function(place) {
i++;
var icon = {
url:"https://cdn.mapmarker.io/api/v1/pin?size=120&background=%230C797D&text=" + i + "&color=%23FFFFFF&voffset=2&hoffset=1&", //"assets/pins/number_" + i + ".png",
size: new google.maps.Size(150, 150),
origin: new google.maps.Point(0, 0),
anchor: new google.maps.Point(17, 75),
scaledSize: new google.maps.Size(50, 50)
};
var newMarker = new google.maps.Marker({
map: me.map,
icon: icon,
title: place.name,
position: place.geometry.location
});
me.markers.push(newMarker);
newMarker.addListener('click', async function() {
var img = './assets/images/service.jpg';
if( place.photos && place.photos.length > 0) {
img = place.photos[0].getUrl();
}
let placeDeatil : any = await me.getPlaceDetails(place.place_id);
var open_hours = '';
if(placeDeatil.opening_hours) {
placeDeatil.opening_hours.weekday_text.forEach(t => {
open_hours += t + "<br>";
})
}
// debugger;
var contentString =
`<div class="infowindow">
<div>
<img class="thumb" src="` + img + `">
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/pin.svg">
<div class="info-text">` + placeDeatil.formatted_address +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/open_in_new.svg">
<a class="info-text" target="_blank" href="`+placeDeatil.website+`">` + placeDeatil.website +`</a>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/phone.svg">
<div class="info-text">` + placeDeatil.formatted_phone_number +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/timeline.svg">
<div class="info-text">` + open_hours +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/bookmark.svg">
<div class="info-text">Add to my favorite</div>
</div>
</div>`;
if(me.infowindow) {
me.infowindow.close();
}
me.infowindow = new google.maps.InfoWindow({
content: contentString
});
me.infowindow.open(me.map, newMarker);
});
if (place.geometry.viewport) {
// Only geocodes have viewport.
bounds.union(place.geometry.viewport);
} else {
bounds.extend(place.geometry.location);
}
me.map.fitBounds(bounds);
});
me.searchresults = me.searchresults.concat(results);
if(pagination.hasNextPage) {
pagination.nextPage();
}
}
});
}
async select(i) {
this.selectedNo = i;
var place = this.searchresults[i];
var newMarker = this.markers[i];
var me = this;
var img = './assets/images/service.jpg';
if( place.photos && place.photos.length > 0) {
img = place.photos[0].getUrl();
}
let placeDeatil : any = await me.getPlaceDetails(place.place_id);
var open_hours = '';
if(placeDeatil.opening_hours) {
placeDeatil.opening_hours.weekday_text.forEach(t => {
open_hours += t + "<br>";
})
}
// debugger;
var contentString =
`<div class="infowindow">
<div>
<img class="thumb" src="` + img + `">
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/pin.svg">
<div class="info-text">` + placeDeatil.formatted_address +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/open_in_new.svg">
<a class="info-text" target="_blank" href="`+placeDeatil.website+`">` + placeDeatil.website +`</a>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/phone.svg">
<div class="info-text">` + placeDeatil.formatted_phone_number +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/timeline.svg">
<div class="info-text">` + open_hours +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/bookmark.svg">
<div class="info-text">Add to my favorite</div>
</div>
</div>`;
if(me.infowindow) {
me.infowindow.close();
}
me.infowindow = new google.maps.InfoWindow({
content: contentString
});
me.infowindow.open(me.map, newMarker);
}
onChangeAdditionalFilter(value) {
if(value == "no") {return;}
// alert(this.additionalFilter);
this.search();
}
changedRadius(value){ |
async getPlacesDetails() {
var me = this;
var count = 0;
this.searchresults.forEach(async function(place) {
if(place.detail) return true;
count ++;
var placeDetail = await me.getPlaceDetails(place.place_id);
place.detail = placeDetail;
});
if(count != 0) {
var timer = setTimeout(function(){ me.getPlacesDetails(); }, 200);
} else {
this.isLoading = false;
}
}
getPlaceDetails(place_id) {
var me = this;
var request = {
placeId: place_id,
fields: ['name', 'formatted_address', 'formatted_phone_number', 'website', 'opening_hours', 'place_id']
};
return new Promise((resolve, reject) => {
me.detailService.getDetails(request, function(place1, status) {
if (status === google.maps.places.PlacesServiceStatus.OK) {
resolve(place1);
}
});
});
}
directionToHere(to) {
var me = this;
this.currentToDir = to;
this.directionsService.route({
origin: me.myLocationAddress,
destination: me.currentToDir,
travelMode: me.travelMode
}, function(response, status) {
if (status === 'OK') {
me.directionsDisplay.setDirections(response);
} else {
window.alert('Directions request failed due to ' + status);
}
});
}
onChangeTravelMode() {
if(this.searchresults.length > 0) {
this.directionToHere(this.currentToDir)
}
}
} | if(value == "") {return;}
this.search();
} | random_line_split |
reading.rs | use crate::{protocols::ReturnableConnection, Pea2Pea};
use async_trait::async_trait;
use tokio::{
io::{AsyncRead, AsyncReadExt},
sync::mpsc,
time::sleep,
};
use tracing::*;
use std::{io, net::SocketAddr, time::Duration};
/// Can be used to specify and enable reading, i.e. receiving inbound messages.
/// If handshaking is enabled too, it goes into force only after the handshake has been concluded.
#[async_trait]
pub trait Reading: Pea2Pea
where
Self: Clone + Send + Sync + 'static,
{
/// The final (deserialized) type of inbound messages.
type Message: Send;
/// Prepares the node to receive messages; failures to read from a connection's stream are penalized by a timeout
/// defined in `NodeConfig`, while broken/unreadable messages result in an immediate disconnect (in order to avoid
/// accidentally reading "borked" messages).
fn enable_reading(&self) {
let (conn_sender, mut conn_receiver) = mpsc::channel::<ReturnableConnection>(
self.node().config().protocol_handler_queue_depth,
);
// the main task spawning per-connection tasks reading messages from their streams
let self_clone = self.clone();
let reading_task = tokio::spawn(async move {
trace!(parent: self_clone.node().span(), "spawned the Reading handler task");
loop {
// these objects are sent from `Node::adapt_stream`
if let Some((mut conn, conn_returner)) = conn_receiver.recv().await {
let addr = conn.addr;
let mut reader = conn.reader.take().unwrap(); // safe; it is available at this point
let mut buffer = vec![0; self_clone.node().config().conn_read_buffer_size]
.into_boxed_slice();
let (inbound_message_sender, mut inbound_message_receiver) =
mpsc::channel(self_clone.node().config().conn_inbound_queue_depth);
// the task for processing parsed messages
let processing_clone = self_clone.clone();
let inbound_processing_task = tokio::spawn(async move {
let node = processing_clone.node();
trace!(parent: node.span(), "spawned a task for processing messages from {}", addr);
loop {
if let Some(msg) = inbound_message_receiver.recv().await {
if let Err(e) = processing_clone.process_message(addr, msg).await {
error!(parent: node.span(), "can't process an inbound message: {}", e);
node.known_peers().register_failure(addr);
}
} else {
node.disconnect(addr);
break;
}
}
});
conn.tasks.push(inbound_processing_task);
// the task for reading messages from a stream
let reader_clone = self_clone.clone();
let reader_task = tokio::spawn(async move {
let node = reader_clone.node();
trace!(parent: node.span(), "spawned a task for reading messages from {}", addr);
// postpone reads until the connection is fully established; if the process fails,
// this task gets aborted, so there is no need for a dedicated timeout
while !node.connected_addrs().contains(&addr) {
sleep(Duration::from_millis(5)).await;
}
let mut carry = 0;
loop {
match reader_clone
.read_from_stream(
addr,
&mut buffer,
&mut reader,
carry,
&inbound_message_sender,
)
.await
{
Ok(leftover) => {
carry = leftover;
}
Err(e) => {
node.known_peers().register_failure(addr);
if node.config().fatal_io_errors.contains(&e.kind()) {
node.disconnect(addr);
break;
} else {
sleep(Duration::from_secs(
node.config().invalid_read_delay_secs,
))
.await;
}
}
}
}
});
conn.tasks.push(reader_task);
// return the Connection to the Node, resuming Node::adapt_stream
if conn_returner.send(Ok(conn)).is_err() {
unreachable!("could't return a Connection to the Node");
}
} else {
error!("the Reading protocol is down!");
break;
}
}
});
self.node().tasks.lock().push(reading_task);
// register the ReadingHandler with the Node
self.node().set_reading_handler(conn_sender.into());
}
/// Performs a read from the given reader. The default implementation is buffered; it sacrifices a bit of
/// simplicity for better performance. Read messages are sent to a message processing task in order to enable
/// faster reads. Returns the number of pending bytes left in the buffer in case of an incomplete read; they
/// should be provided to the medthod on the next call as `carry`.
async fn read_from_stream<R: AsyncRead + Unpin + Send>(
&self,
addr: SocketAddr,
buffer: &mut [u8],
reader: &mut R,
carry: usize,
message_sender: &mpsc::Sender<Self::Message>,
) -> io::Result<usize> {
// perform a read from the stream, being careful not to overwrite any bytes carried over from the previous read
match reader.read(&mut buffer[carry..]).await {
Ok(0) => return Err(io::ErrorKind::UnexpectedEof.into()),
Ok(n) => {
trace!(parent: self.node().span(), "read {}B from {}", n, addr);
let mut processed = 0;
let mut left = carry + n;
// several messages could have been read at once; process the contents of the buffer
loop {
// try to read a single message from the buffer
match self.read_message(addr, &buffer[processed..processed + left]) {
// a full message was read successfully
Ok(Some((msg, len))) => {
// advance the counters
processed += len;
left -= len;
trace!(
parent: self.node().span(),
"isolated {}B as a message from {}; {}B left to process",
len,
addr,
left
);
self.node()
.known_peers()
.register_received_message(addr, len);
self.node().stats().register_received_message(len);
// send the message for further processing
if message_sender.send(msg).await.is_err() {
error!(parent: self.node().span(), "the inbound message channel is closed");
return Err(io::ErrorKind::BrokenPipe.into());
}
// if the read is exhausted, reset the carry and return
if left == 0 {
return Ok(0);
}
}
// the message in the buffer is incomplete
Ok(None) => {
// forbid messages that are larger than the read buffer
if left >= buffer.len() {
error!(parent: self.node().span(), "a message from {} is too large", addr);
return Err(io::ErrorKind::InvalidData.into());
}
trace!(
parent: self.node().span(),
"a message from {} is incomplete; carrying {}B over",
addr,
left
);
// move the leftover bytes to the beginning of the buffer; the next read will append bytes
// starting from where the leftover ones end, allowing the message to be completed
buffer.copy_within(processed..processed + left, 0);
return Ok(left);
}
// an erroneous message (e.g. an unexpected zero-length payload)
Err(_) => {
error!(parent: self.node().span(), "a message from {} is invalid", addr);
return Err(io::ErrorKind::InvalidData.into());
}
}
}
}
// a stream read error
Err(e) => {
error!(parent: self.node().span(), "can't read from {}: {}", addr, e);
Err(e)
}
}
}
/// Reads a single message from the given buffer; `Ok(None)` indicates that the message is
/// incomplete, i.e. further reads from the stream must be performed in order to produce the whole message.
/// Alongside the message it returns the number of bytes the read message occupied in the buffer. An `Err`
/// returned here will result in the associated connection being dropped.
fn read_message(
&self,
source: SocketAddr,
buffer: &[u8],
) -> io::Result<Option<(Self::Message, usize)>>;
/// Processes an inbound message. Can be used to update state, send replies etc.
#[allow(unused_variables)]
async fn process_message(&self, source: SocketAddr, message: Self::Message) -> io::Result<()> |
}
| {
// don't do anything by default
Ok(())
} | identifier_body |
reading.rs | use crate::{protocols::ReturnableConnection, Pea2Pea};
use async_trait::async_trait;
use tokio::{
io::{AsyncRead, AsyncReadExt},
sync::mpsc,
time::sleep,
};
use tracing::*;
use std::{io, net::SocketAddr, time::Duration};
/// Can be used to specify and enable reading, i.e. receiving inbound messages.
/// If handshaking is enabled too, it goes into force only after the handshake has been concluded.
#[async_trait]
pub trait Reading: Pea2Pea
where
Self: Clone + Send + Sync + 'static,
{
/// The final (deserialized) type of inbound messages.
type Message: Send;
/// Prepares the node to receive messages; failures to read from a connection's stream are penalized by a timeout
/// defined in `NodeConfig`, while broken/unreadable messages result in an immediate disconnect (in order to avoid
/// accidentally reading "borked" messages).
fn enable_reading(&self) {
let (conn_sender, mut conn_receiver) = mpsc::channel::<ReturnableConnection>(
self.node().config().protocol_handler_queue_depth,
);
// the main task spawning per-connection tasks reading messages from their streams
let self_clone = self.clone();
let reading_task = tokio::spawn(async move {
trace!(parent: self_clone.node().span(), "spawned the Reading handler task");
loop {
// these objects are sent from `Node::adapt_stream`
if let Some((mut conn, conn_returner)) = conn_receiver.recv().await {
let addr = conn.addr;
let mut reader = conn.reader.take().unwrap(); // safe; it is available at this point
let mut buffer = vec![0; self_clone.node().config().conn_read_buffer_size]
.into_boxed_slice();
let (inbound_message_sender, mut inbound_message_receiver) =
mpsc::channel(self_clone.node().config().conn_inbound_queue_depth);
// the task for processing parsed messages
let processing_clone = self_clone.clone();
let inbound_processing_task = tokio::spawn(async move {
let node = processing_clone.node();
trace!(parent: node.span(), "spawned a task for processing messages from {}", addr);
loop {
if let Some(msg) = inbound_message_receiver.recv().await {
if let Err(e) = processing_clone.process_message(addr, msg).await {
error!(parent: node.span(), "can't process an inbound message: {}", e);
node.known_peers().register_failure(addr);
}
} else {
node.disconnect(addr);
break;
}
}
});
conn.tasks.push(inbound_processing_task);
// the task for reading messages from a stream
let reader_clone = self_clone.clone();
let reader_task = tokio::spawn(async move {
let node = reader_clone.node();
trace!(parent: node.span(), "spawned a task for reading messages from {}", addr);
// postpone reads until the connection is fully established; if the process fails,
// this task gets aborted, so there is no need for a dedicated timeout
while !node.connected_addrs().contains(&addr) {
sleep(Duration::from_millis(5)).await;
}
let mut carry = 0;
loop {
match reader_clone
.read_from_stream(
addr,
&mut buffer,
&mut reader,
carry,
&inbound_message_sender,
)
.await
{
Ok(leftover) => {
carry = leftover;
}
Err(e) => {
node.known_peers().register_failure(addr);
if node.config().fatal_io_errors.contains(&e.kind()) {
node.disconnect(addr);
break;
} else {
sleep(Duration::from_secs(
node.config().invalid_read_delay_secs,
))
.await;
}
}
}
}
});
conn.tasks.push(reader_task);
// return the Connection to the Node, resuming Node::adapt_stream
if conn_returner.send(Ok(conn)).is_err() {
unreachable!("could't return a Connection to the Node");
}
} else {
error!("the Reading protocol is down!");
break;
}
}
});
self.node().tasks.lock().push(reading_task);
// register the ReadingHandler with the Node
self.node().set_reading_handler(conn_sender.into());
}
/// Performs a read from the given reader. The default implementation is buffered; it sacrifices a bit of
/// simplicity for better performance. Read messages are sent to a message processing task in order to enable
/// faster reads. Returns the number of pending bytes left in the buffer in case of an incomplete read; they
/// should be provided to the medthod on the next call as `carry`.
async fn read_from_stream<R: AsyncRead + Unpin + Send>(
&self,
addr: SocketAddr,
buffer: &mut [u8],
reader: &mut R,
carry: usize,
message_sender: &mpsc::Sender<Self::Message>,
) -> io::Result<usize> {
// perform a read from the stream, being careful not to overwrite any bytes carried over from the previous read
match reader.read(&mut buffer[carry..]).await {
Ok(0) => return Err(io::ErrorKind::UnexpectedEof.into()),
Ok(n) => {
trace!(parent: self.node().span(), "read {}B from {}", n, addr);
let mut processed = 0;
let mut left = carry + n;
// several messages could have been read at once; process the contents of the buffer
loop {
// try to read a single message from the buffer
match self.read_message(addr, &buffer[processed..processed + left]) {
// a full message was read successfully
Ok(Some((msg, len))) => {
// advance the counters
processed += len;
left -= len;
trace!(
parent: self.node().span(),
"isolated {}B as a message from {}; {}B left to process",
len,
addr,
left
);
self.node()
.known_peers()
.register_received_message(addr, len);
self.node().stats().register_received_message(len);
// send the message for further processing
if message_sender.send(msg).await.is_err() {
error!(parent: self.node().span(), "the inbound message channel is closed");
return Err(io::ErrorKind::BrokenPipe.into());
}
// if the read is exhausted, reset the carry and return
if left == 0 {
return Ok(0);
}
}
// the message in the buffer is incomplete
Ok(None) => {
// forbid messages that are larger than the read buffer
if left >= buffer.len() {
error!(parent: self.node().span(), "a message from {} is too large", addr);
return Err(io::ErrorKind::InvalidData.into());
}
trace!(
parent: self.node().span(),
"a message from {} is incomplete; carrying {}B over",
addr,
left
);
// move the leftover bytes to the beginning of the buffer; the next read will append bytes
// starting from where the leftover ones end, allowing the message to be completed
buffer.copy_within(processed..processed + left, 0);
return Ok(left);
}
// an erroneous message (e.g. an unexpected zero-length payload)
Err(_) => {
error!(parent: self.node().span(), "a message from {} is invalid", addr);
return Err(io::ErrorKind::InvalidData.into());
}
}
}
}
// a stream read error
Err(e) => {
error!(parent: self.node().span(), "can't read from {}: {}", addr, e);
Err(e)
}
}
}
/// Reads a single message from the given buffer; `Ok(None)` indicates that the message is
/// incomplete, i.e. further reads from the stream must be performed in order to produce the whole message.
/// Alongside the message it returns the number of bytes the read message occupied in the buffer. An `Err`
/// returned here will result in the associated connection being dropped.
fn read_message(
&self,
source: SocketAddr,
buffer: &[u8],
) -> io::Result<Option<(Self::Message, usize)>>;
/// Processes an inbound message. Can be used to update state, send replies etc.
#[allow(unused_variables)]
async fn | (&self, source: SocketAddr, message: Self::Message) -> io::Result<()> {
// don't do anything by default
Ok(())
}
}
| process_message | identifier_name |
reading.rs | use crate::{protocols::ReturnableConnection, Pea2Pea};
use async_trait::async_trait;
use tokio::{
io::{AsyncRead, AsyncReadExt},
sync::mpsc,
time::sleep,
};
use tracing::*;
use std::{io, net::SocketAddr, time::Duration};
/// Can be used to specify and enable reading, i.e. receiving inbound messages.
/// If handshaking is enabled too, it goes into force only after the handshake has been concluded.
#[async_trait]
pub trait Reading: Pea2Pea
where
Self: Clone + Send + Sync + 'static,
{
/// The final (deserialized) type of inbound messages.
type Message: Send;
/// Prepares the node to receive messages; failures to read from a connection's stream are penalized by a timeout
/// defined in `NodeConfig`, while broken/unreadable messages result in an immediate disconnect (in order to avoid
/// accidentally reading "borked" messages).
fn enable_reading(&self) {
let (conn_sender, mut conn_receiver) = mpsc::channel::<ReturnableConnection>(
self.node().config().protocol_handler_queue_depth,
);
// the main task spawning per-connection tasks reading messages from their streams
let self_clone = self.clone();
let reading_task = tokio::spawn(async move {
trace!(parent: self_clone.node().span(), "spawned the Reading handler task");
loop {
// these objects are sent from `Node::adapt_stream`
if let Some((mut conn, conn_returner)) = conn_receiver.recv().await {
let addr = conn.addr;
let mut reader = conn.reader.take().unwrap(); // safe; it is available at this point
let mut buffer = vec![0; self_clone.node().config().conn_read_buffer_size]
.into_boxed_slice();
let (inbound_message_sender, mut inbound_message_receiver) =
mpsc::channel(self_clone.node().config().conn_inbound_queue_depth);
// the task for processing parsed messages
let processing_clone = self_clone.clone();
let inbound_processing_task = tokio::spawn(async move {
let node = processing_clone.node();
trace!(parent: node.span(), "spawned a task for processing messages from {}", addr);
loop {
if let Some(msg) = inbound_message_receiver.recv().await {
if let Err(e) = processing_clone.process_message(addr, msg).await {
error!(parent: node.span(), "can't process an inbound message: {}", e);
node.known_peers().register_failure(addr);
}
} else {
node.disconnect(addr);
break;
}
}
});
conn.tasks.push(inbound_processing_task);
// the task for reading messages from a stream
let reader_clone = self_clone.clone();
let reader_task = tokio::spawn(async move {
let node = reader_clone.node();
trace!(parent: node.span(), "spawned a task for reading messages from {}", addr);
// postpone reads until the connection is fully established; if the process fails,
// this task gets aborted, so there is no need for a dedicated timeout
while !node.connected_addrs().contains(&addr) {
sleep(Duration::from_millis(5)).await;
}
let mut carry = 0;
loop {
match reader_clone
.read_from_stream(
addr,
&mut buffer,
&mut reader,
carry,
&inbound_message_sender,
)
.await
{
Ok(leftover) => {
carry = leftover;
}
Err(e) => {
node.known_peers().register_failure(addr);
if node.config().fatal_io_errors.contains(&e.kind()) {
node.disconnect(addr);
break;
} else {
sleep(Duration::from_secs(
node.config().invalid_read_delay_secs,
))
.await;
}
}
}
}
});
conn.tasks.push(reader_task);
// return the Connection to the Node, resuming Node::adapt_stream
if conn_returner.send(Ok(conn)).is_err() {
unreachable!("could't return a Connection to the Node");
}
} else {
error!("the Reading protocol is down!");
break;
}
}
});
self.node().tasks.lock().push(reading_task);
// register the ReadingHandler with the Node
self.node().set_reading_handler(conn_sender.into());
}
/// Performs a read from the given reader. The default implementation is buffered; it sacrifices a bit of
/// simplicity for better performance. Read messages are sent to a message processing task in order to enable
/// faster reads. Returns the number of pending bytes left in the buffer in case of an incomplete read; they
/// should be provided to the medthod on the next call as `carry`.
async fn read_from_stream<R: AsyncRead + Unpin + Send>(
&self,
addr: SocketAddr,
buffer: &mut [u8],
reader: &mut R,
carry: usize,
message_sender: &mpsc::Sender<Self::Message>,
) -> io::Result<usize> {
// perform a read from the stream, being careful not to overwrite any bytes carried over from the previous read
match reader.read(&mut buffer[carry..]).await {
Ok(0) => return Err(io::ErrorKind::UnexpectedEof.into()),
Ok(n) => {
trace!(parent: self.node().span(), "read {}B from {}", n, addr);
let mut processed = 0;
let mut left = carry + n;
// several messages could have been read at once; process the contents of the buffer
loop {
// try to read a single message from the buffer
match self.read_message(addr, &buffer[processed..processed + left]) {
// a full message was read successfully
Ok(Some((msg, len))) => {
// advance the counters
processed += len;
left -= len;
trace!(
parent: self.node().span(),
"isolated {}B as a message from {}; {}B left to process",
len,
addr,
left
);
self.node()
.known_peers()
.register_received_message(addr, len);
self.node().stats().register_received_message(len);
// send the message for further processing
if message_sender.send(msg).await.is_err() {
error!(parent: self.node().span(), "the inbound message channel is closed");
return Err(io::ErrorKind::BrokenPipe.into());
}
// if the read is exhausted, reset the carry and return
if left == 0 {
return Ok(0);
}
}
// the message in the buffer is incomplete
Ok(None) => {
// forbid messages that are larger than the read buffer
if left >= buffer.len() {
error!(parent: self.node().span(), "a message from {} is too large", addr);
return Err(io::ErrorKind::InvalidData.into());
}
trace!(
parent: self.node().span(),
"a message from {} is incomplete; carrying {}B over",
addr,
left
);
// move the leftover bytes to the beginning of the buffer; the next read will append bytes
// starting from where the leftover ones end, allowing the message to be completed
buffer.copy_within(processed..processed + left, 0);
return Ok(left);
}
// an erroneous message (e.g. an unexpected zero-length payload)
Err(_) => {
error!(parent: self.node().span(), "a message from {} is invalid", addr);
return Err(io::ErrorKind::InvalidData.into());
}
}
}
}
// a stream read error
Err(e) => {
error!(parent: self.node().span(), "can't read from {}: {}", addr, e);
Err(e)
}
}
}
/// Reads a single message from the given buffer; `Ok(None)` indicates that the message is
/// incomplete, i.e. further reads from the stream must be performed in order to produce the whole message.
/// Alongside the message it returns the number of bytes the read message occupied in the buffer. An `Err`
/// returned here will result in the associated connection being dropped.
fn read_message( | buffer: &[u8],
) -> io::Result<Option<(Self::Message, usize)>>;
/// Processes an inbound message. Can be used to update state, send replies etc.
#[allow(unused_variables)]
async fn process_message(&self, source: SocketAddr, message: Self::Message) -> io::Result<()> {
// don't do anything by default
Ok(())
}
} | &self,
source: SocketAddr, | random_line_split |
reading.rs | use crate::{protocols::ReturnableConnection, Pea2Pea};
use async_trait::async_trait;
use tokio::{
io::{AsyncRead, AsyncReadExt},
sync::mpsc,
time::sleep,
};
use tracing::*;
use std::{io, net::SocketAddr, time::Duration};
/// Can be used to specify and enable reading, i.e. receiving inbound messages.
/// If handshaking is enabled too, it goes into force only after the handshake has been concluded.
#[async_trait]
pub trait Reading: Pea2Pea
where
Self: Clone + Send + Sync + 'static,
{
/// The final (deserialized) type of inbound messages.
type Message: Send;
/// Prepares the node to receive messages; failures to read from a connection's stream are penalized by a timeout
/// defined in `NodeConfig`, while broken/unreadable messages result in an immediate disconnect (in order to avoid
/// accidentally reading "borked" messages).
fn enable_reading(&self) {
let (conn_sender, mut conn_receiver) = mpsc::channel::<ReturnableConnection>(
self.node().config().protocol_handler_queue_depth,
);
// the main task spawning per-connection tasks reading messages from their streams
let self_clone = self.clone();
let reading_task = tokio::spawn(async move {
trace!(parent: self_clone.node().span(), "spawned the Reading handler task");
loop {
// these objects are sent from `Node::adapt_stream`
if let Some((mut conn, conn_returner)) = conn_receiver.recv().await {
let addr = conn.addr;
let mut reader = conn.reader.take().unwrap(); // safe; it is available at this point
let mut buffer = vec![0; self_clone.node().config().conn_read_buffer_size]
.into_boxed_slice();
let (inbound_message_sender, mut inbound_message_receiver) =
mpsc::channel(self_clone.node().config().conn_inbound_queue_depth);
// the task for processing parsed messages
let processing_clone = self_clone.clone();
let inbound_processing_task = tokio::spawn(async move {
let node = processing_clone.node();
trace!(parent: node.span(), "spawned a task for processing messages from {}", addr);
loop {
if let Some(msg) = inbound_message_receiver.recv().await | else {
node.disconnect(addr);
break;
}
}
});
conn.tasks.push(inbound_processing_task);
// the task for reading messages from a stream
let reader_clone = self_clone.clone();
let reader_task = tokio::spawn(async move {
let node = reader_clone.node();
trace!(parent: node.span(), "spawned a task for reading messages from {}", addr);
// postpone reads until the connection is fully established; if the process fails,
// this task gets aborted, so there is no need for a dedicated timeout
while !node.connected_addrs().contains(&addr) {
sleep(Duration::from_millis(5)).await;
}
let mut carry = 0;
loop {
match reader_clone
.read_from_stream(
addr,
&mut buffer,
&mut reader,
carry,
&inbound_message_sender,
)
.await
{
Ok(leftover) => {
carry = leftover;
}
Err(e) => {
node.known_peers().register_failure(addr);
if node.config().fatal_io_errors.contains(&e.kind()) {
node.disconnect(addr);
break;
} else {
sleep(Duration::from_secs(
node.config().invalid_read_delay_secs,
))
.await;
}
}
}
}
});
conn.tasks.push(reader_task);
// return the Connection to the Node, resuming Node::adapt_stream
if conn_returner.send(Ok(conn)).is_err() {
unreachable!("could't return a Connection to the Node");
}
} else {
error!("the Reading protocol is down!");
break;
}
}
});
self.node().tasks.lock().push(reading_task);
// register the ReadingHandler with the Node
self.node().set_reading_handler(conn_sender.into());
}
/// Performs a read from the given reader. The default implementation is buffered; it sacrifices a bit of
/// simplicity for better performance. Read messages are sent to a message processing task in order to enable
/// faster reads. Returns the number of pending bytes left in the buffer in case of an incomplete read; they
/// should be provided to the medthod on the next call as `carry`.
async fn read_from_stream<R: AsyncRead + Unpin + Send>(
&self,
addr: SocketAddr,
buffer: &mut [u8],
reader: &mut R,
carry: usize,
message_sender: &mpsc::Sender<Self::Message>,
) -> io::Result<usize> {
// perform a read from the stream, being careful not to overwrite any bytes carried over from the previous read
match reader.read(&mut buffer[carry..]).await {
Ok(0) => return Err(io::ErrorKind::UnexpectedEof.into()),
Ok(n) => {
trace!(parent: self.node().span(), "read {}B from {}", n, addr);
let mut processed = 0;
let mut left = carry + n;
// several messages could have been read at once; process the contents of the buffer
loop {
// try to read a single message from the buffer
match self.read_message(addr, &buffer[processed..processed + left]) {
// a full message was read successfully
Ok(Some((msg, len))) => {
// advance the counters
processed += len;
left -= len;
trace!(
parent: self.node().span(),
"isolated {}B as a message from {}; {}B left to process",
len,
addr,
left
);
self.node()
.known_peers()
.register_received_message(addr, len);
self.node().stats().register_received_message(len);
// send the message for further processing
if message_sender.send(msg).await.is_err() {
error!(parent: self.node().span(), "the inbound message channel is closed");
return Err(io::ErrorKind::BrokenPipe.into());
}
// if the read is exhausted, reset the carry and return
if left == 0 {
return Ok(0);
}
}
// the message in the buffer is incomplete
Ok(None) => {
// forbid messages that are larger than the read buffer
if left >= buffer.len() {
error!(parent: self.node().span(), "a message from {} is too large", addr);
return Err(io::ErrorKind::InvalidData.into());
}
trace!(
parent: self.node().span(),
"a message from {} is incomplete; carrying {}B over",
addr,
left
);
// move the leftover bytes to the beginning of the buffer; the next read will append bytes
// starting from where the leftover ones end, allowing the message to be completed
buffer.copy_within(processed..processed + left, 0);
return Ok(left);
}
// an erroneous message (e.g. an unexpected zero-length payload)
Err(_) => {
error!(parent: self.node().span(), "a message from {} is invalid", addr);
return Err(io::ErrorKind::InvalidData.into());
}
}
}
}
// a stream read error
Err(e) => {
error!(parent: self.node().span(), "can't read from {}: {}", addr, e);
Err(e)
}
}
}
/// Reads a single message from the given buffer; `Ok(None)` indicates that the message is
/// incomplete, i.e. further reads from the stream must be performed in order to produce the whole message.
/// Alongside the message it returns the number of bytes the read message occupied in the buffer. An `Err`
/// returned here will result in the associated connection being dropped.
fn read_message(
&self,
source: SocketAddr,
buffer: &[u8],
) -> io::Result<Option<(Self::Message, usize)>>;
/// Processes an inbound message. Can be used to update state, send replies etc.
#[allow(unused_variables)]
async fn process_message(&self, source: SocketAddr, message: Self::Message) -> io::Result<()> {
// don't do anything by default
Ok(())
}
}
| {
if let Err(e) = processing_clone.process_message(addr, msg).await {
error!(parent: node.span(), "can't process an inbound message: {}", e);
node.known_peers().register_failure(addr);
}
} | conditional_block |
lib.rs | // Copyright 2018 Torsten Weber
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! # LHEF
//!
//! The `lhef` library is a [`rust`] library to read and write files in
//! the [`LesHouchesEvents`] format.
//! It can be used to just read the common blocks specified by the
//! standard, but is flexible enough to also handle the additional
//! information that is allowed by the standard.
//! This can be done either by reading them as `String`s or by parsing
//! them into custom data structures.
//! Reading common blocks has been tested for event files generated by
//! [`MG5_aMC@NLO`] and [`HELAC_NLO`].
//! Specialized data structures for the reweighting information written
//! by `HELAC_NLO` are included.
//!
//! ## Usage examples
//!
//! ### Reading a file and ignoring all extra information:
//!
//! ```rust,ignore
//! use lhef::ReadLhe;
//! use lhef::plain::LheFile;
//!
//! let lhe = LheFile::read_lhe_from_file(&"events.lhe").unwrap();
//!
//! // Energy of beam 1
//! let beam_1_energy = lhe.init.beam_1_energy;
//!
//! // pz of the 4rd particle in the 7th event
//! let pz = lhe.events[6].particles[3].momentum.pz;
//! ```
//!
//! ### Reading a file generated including extra information as strings:
//!
//! Specialized data structures for e.g. Madgraph do not exist, but the
//! additional information stored in the event files written by it can
//! still be extracted as strings:
//!
//! ```rust,ignore
//! use lhef::ReadLhe;
//! use lhef::string::{LheFile, EventExtra};
//!
//! let lhe = LheFile::read_lhe_from_file(&"events.lhe").unwrap();
//!
//! // extra information of the 5th event
//! let EventExtra(ref extra) = lhe.events[4].extra;
//! ```
//!
//! ### Reading a file generated by `HELAC-NLO`
//!
//! This library comes with a module containing special data structures
//! for the additional information contained in event files generated by
//! `HELAC-NLO`.
//! Therefore event files generated by `HELAC` can be read directly into
//! the appropriate structures:
//!
//! ```rust,ignore
//! use lhef::ReadLhe;
//! use lhef::helac::LheFileRS;
//!
//! let lhe = LheFileRS::read_lhe_from_file(&"events.lhe").unwrap();
//!
//! // x1 of the 5th event
//! let extra = lhe.events[4].extra.pdf.x1;
//! ```
//!
//! ## Supported file types
//!
//! This library comes with three specialization modules to handle extra
//! information contained in event files:
//!
//! ### plain
//!
//! The [`plain`] module allows to read `lhe` files without taking any
//! extra information into account.
//! The [`plain::LheFile`] struct contains only the information that is
//! guaranteed to be present in all `lhe` files.
//! The `extra` fields on the file, init and event objects are still
//! present, but only return dummy objects that do not contain any
//! information.
//! The `comment` and the `header` are also dummy objects.
//!
//!
//! ### string
//!
//! The [`string`] module allows to read `lhe` files and keeping all the
//! extra information in the files as unparsed strings.
//! The `comment` and the `header` are kept as strings, without the
//! start and end tags.
//! All extra information has leading and trailing whitespace removed.
//! Whitespace (including linebreaks) within the strings is conserved.
//!
//!
//! ### helac
//!
//! The [`helac`] module contains specialized structs that the extra
//! information contained in `lhe` files generated by `HELAC-NLO` is
//! parsed into.
//! The comment is kept as a string, and since `HELAC` `lhe` files do
//! not contain a header, the header is a dummy object.
//!
//!
//! ### Adding support for new file types
//!
//! To add new file types, you need to add types that implement the
//! `ReadLhe` and `WriteLhe` traits for the additional information
//! stored in the file type.
//! The type signature of the `read_from_lhe` function of the `ReadLhe`
//! trait means that you should use [`nom`] to parse your type.
//! Your implementations need to parse the opening and end tags for
//! comments (`<!--` and `-->`) and the header (`<header>` and
//! `</header>`) respectively, but must leave the tags for the init
//! section and for events alone.
//! With these implementations you can then use `LheFileGeneric` with
//! your types to read and write `lhe` files.
//!
//!
//! [`rust`]: https://www.rust-lang.org
//! [`LesHouchesEvents`]: https://arxiv.org/abs/hep-ph/0609017
//! [`MG5_aMC@NLO`]: https://launchpad.net/mg5amcnlo
//! [`HELAC_NLO`]: http://helac-phegas.web.cern.ch/helac-phegas/
//! [`nom`]: https://github.com/Geal/nom
//! [`plain`]: plain/index.html
//! [`string`]: string/index.html
//! [`helac`]: helac/index.html
extern crate lorentz_vector;
#[macro_use]
extern crate nom;
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
#[cfg(test)]
#[macro_use]
extern crate serde;
#[cfg(test)]
#[cfg(test)]
extern crate serde_json;
#[macro_use]
pub mod nom_util;
pub mod generic;
pub mod helac;
pub mod plain;
pub mod string;
use lorentz_vector::LorentzVector;
use std::error;
use std::fmt;
use std::fs;
use std::io;
use std::io::Read;
use std::marker;
use std::path::Path;
#[cfg(test)]
use quickcheck::Arbitrary;
#[cfg(test)]
use quickcheck::Gen;
use nom_util::{parse_f64, parse_i64};
/// A type to use for pdg ids
///
/// See the [Particle Data Group] website for more information.
/// A list of all particle numbers can be found [here].
///
/// [Particle Data Group]: http://pdg.lbl.gov/
/// [here]: http://pdg.lbl.gov/2017/reviews/rpp2017-rev-monte-carlo-numbering.pdf
pub type PdgId = i64;
/// A trait to read (parts of) lhe files
///
/// This trait needs to be implemented for a type to be able to use it
/// in [`LheFileGeneric`] to hold extra information.
///
/// [`LheFileGeneric`]: generic/struct.LheFileGeneric.html
pub trait ReadLhe
where
Self: marker::Sized,
{
/// Read an lhe object from a byte string
///
/// The input to this function is the remaining input in the file
/// (or just a chunk of it) and if successful, it should return the
/// parsed object and the input left after parsing the object.
/// See the [`nom documentation`] for more information.
///
/// [`nom documentation`]: http://rust.unhandledexpression.com/nom/
fn read_lhe(&[u8]) -> nom::IResult<&[u8], Self>;
/// Read an lhe object from a file
fn read_lhe_from_file<P: AsRef<Path>>(path: &P) -> Result<Self, ReadError> {
let mut file = fs::File::open(path)?;
let mut contents = Vec::new();
file.read_to_end(&mut contents)?;
Self::read_lhe(&contents)
.to_full_result()
.map_err(ReadError::Nom)
}
}
/// A trait to write (parts of) lhe files
///
/// This trait needs to be implemented for a type to be able to use it
/// in [`LheFileGeneric`] to hold extra information.
///
/// [`LheFileGeneric`]: generic/struct.LheFileGeneric.html
pub trait WriteLhe {
/// Write the object to a writer
fn write_lhe<W: io::Write>(&self, &mut W) -> io::Result<()>;
/// Write the object to a file
fn write_lhe_to_file<P: AsRef<Path>>(&self, path: P) -> io::Result<()> {
let mut file = fs::File::create(path)?;
self.write_lhe(&mut file)
}
}
/// Errors that may occur when reading lhe objects from files
#[derive(Debug)]
pub enum ReadError {
/// An io error occured
Io(io::Error),
/// A parse error occured
Nom(nom::IError),
}
impl From<io::Error> for ReadError {
fn from(err: io::Error) -> ReadError {
ReadError::Io(err)
}
}
impl fmt::Display for ReadError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ReadError::Io(ref err) => {
write!(f, "Failed to read the lhe file with an IO error: {}", err)
}
ReadError::Nom(ref err) => write!(
f,
"Failed to read the lhe file with a parse error: {:?}",
err
),
}
}
}
impl error::Error for ReadError {
fn description(&self) -> &str |
fn cause(&self) -> Option<&error::Error> {
match *self {
ReadError::Io(ref err) => Some(err),
ReadError::Nom(_) => None,
}
}
}
/// A struct for process information
///
/// This is the per process information contained in the `init` section
/// of `lhe` files.
/// When reading a file, the `Init` struct will contain `NPRUP`
/// `ProcInfo` objects.
/// `ProcInfo` is part of the compulsory initialization information.
///
/// For more information on the fields, see the [`lhe`] paper and the
/// documentation of the [`LHA common blocks`].
/// The names in parentheses are the names of the fields in these
/// papers.
///
/// # Examples
///
/// ```rust
/// use lhef::{ProcInfo, ReadLhe};
/// use lhef::plain::LheFile;
///
/// let bytes = b"\
/// <LesHouchesEvents version=\"1.0\">
/// <init>
/// 2212 2212 6500 6500 0 0 13100 13100 3 2
/// 2.1 3.2E-03 1.0E+00 1
/// 4.0 7.4E-03 1.0E+00 2
/// </init>
/// </LesHouchesEvents>";
/// let lhe = LheFile::read_lhe(bytes).to_full_result().unwrap();
/// assert_eq!(lhe.init.process_info.len(), 2);
/// assert_eq!(lhe.init.process_info[0].xsect, 2.1);
/// assert_eq!(lhe.init.process_info[1].xsect_err, 0.0074);
/// ```
///
/// [`lhe`]: https://arxiv.org/abs/hep-ph/0609017
/// [`LHA common blocks`]: https://arxiv.org/abs/hep-ph/0109068
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(test, derive(Serialize, Deserialize))]
pub struct ProcInfo {
/// The cross section of the process (`XSECUP`)
pub xsect: f64,
/// The cross section error of the process (`XERRUP`)
pub xsect_err: f64,
/// The maximum weight of the events of the process (`XMAXUP`)
pub maximum_weight: f64,
/// The process id (`LPRUP`)
pub process_id: i64,
}
impl ReadLhe for ProcInfo {
fn read_lhe(input: &[u8]) -> nom::IResult<&[u8], ProcInfo> {
do_parse!(
input,
xsect: ws!(parse_f64) >> xsect_err: ws!(parse_f64) >> maximum_weight: ws!(parse_f64)
>> process_id: ws!(parse_i64) >> (ProcInfo {
xsect,
xsect_err,
maximum_weight,
process_id,
})
)
}
}
impl WriteLhe for ProcInfo {
fn write_lhe<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writeln!(
writer,
"{:e} {:e} {:e} {}",
self.xsect, self.xsect_err, self.maximum_weight, self.process_id
)
}
}
#[cfg(test)]
impl Arbitrary for ProcInfo {
fn arbitrary<G: Gen>(gen: &mut G) -> ProcInfo {
ProcInfo {
xsect: Arbitrary::arbitrary(gen),
xsect_err: Arbitrary::arbitrary(gen),
maximum_weight: Arbitrary::arbitrary(gen),
process_id: Arbitrary::arbitrary(gen),
}
}
}
/// A particle in lhe format
///
/// An event will contain as many `Particle`s as there are particles in
/// the event.
/// `Particle` is part of the compulsory event information.
///
/// For more information on the fields, see the [`lhe`] paper and the
/// documentation of the [`LHA common blocks`].
/// The names in parentheses are the names of the fields in these
/// papers.
///
/// # Examples
///
/// ```rust
/// use lhef::{Particle, ReadLhe};
/// use lhef::plain::LheFile;
///
/// let bytes = b"\
/// <LesHouchesEvents version=\"1.0\">
/// <init>
/// 2212 2212 6500 6500 0 0 13100 13100 3 1
/// 2.1 3.2E-03 1.0E+00 1
/// </init>
/// <event>
/// 4 1 +1.04e-01 1.00e+03 7.54e-03 8.68e-02
/// -11 -1 0 0 0 0 +0.00e+00 +0.00e+00 +5.00e+02 5.00e+02 0.00e+00 0.00e+00 -1.00e+00
/// 11 -1 0 0 0 0 -0.00e+00 -0.00e+00 -5.00e+02 5.00e+02 0.00e+00 0.00e+00 1.00e+00
/// -13 1 1 2 0 0 -1.97e+02 -4.52e+02 -7.94e+01 5.00e+02 0.00e+00 0.00e+00 -1.00e+00
/// 13 1 1 2 0 0 +1.97e+02 +4.52e+02 +7.94e+01 5.00e+02 0.00e+00 0.00e+00 1.00e+00
/// </event>
/// </LesHouchesEvents>";
///
/// let lhe = LheFile::read_lhe(bytes).to_full_result().unwrap();
/// let event = &lhe.events[0];
/// assert_eq!(event.particles.len(), 4);
/// assert_eq!(event.particles[0].pdg_id, -11);
/// assert_eq!(event.particles[3].momentum.py, 452.);
/// ```
///
/// [`lhe`]: https://arxiv.org/abs/hep-ph/0609017
/// [`LHA common blocks`]: https://arxiv.org/abs/hep-ph/0109068
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(test, derive(Serialize, Deserialize))]
pub struct Particle {
/// The pdg id of the particle (`IDUP`)
pub pdg_id: PdgId,
/// The status code of the particle (`ISTUP`)
pub status: i64,
/// The id of the first mother of the particle (`MOTHUP(1)`).
/// This isn't a pdg id, but a (1 based) index into the particles vector.
pub mother_1_id: i64,
/// The id of the second mother of the particle (`MOTHUP(2)`).
/// This isn't a pdg id, but a (1 based) index into the particles vector.
pub mother_2_id: i64,
/// The color of the particle (`ICOLUP(1)`)
pub color_1: i64,
/// The color of the particle (`ICOLUP(2)`)
pub color_2: i64,
/// The four momentum of the particle (`PUP` 1 - 4)
pub momentum: LorentzVector,
/// The mass of the particle (`PUP(5)`)
pub mass: f64,
/// The proper lifetime of the particle (`VTIMUP`)
pub proper_lifetime: f64,
/// The spin of the particle (`SPINUP`)
pub spin: f64,
}
impl ReadLhe for Particle {
fn read_lhe(input: &[u8]) -> nom::IResult<&[u8], Particle> {
do_parse!(
input,
pdg_id: ws!(parse_i64) >> status: ws!(parse_i64) >> mother_1_id: ws!(parse_i64)
>> mother_2_id: ws!(parse_i64) >> color_1: ws!(parse_i64)
>> color_2: ws!(parse_i64) >> px: ws!(parse_f64) >> py: ws!(parse_f64)
>> pz: ws!(parse_f64) >> e: ws!(parse_f64) >> mass: ws!(parse_f64)
>> proper_lifetime: ws!(parse_f64) >> spin: ws!(parse_f64)
>> (Particle {
pdg_id,
status,
mother_1_id,
mother_2_id,
color_1,
color_2,
momentum: LorentzVector { e, px, py, pz },
mass,
proper_lifetime,
spin,
})
)
}
}
impl WriteLhe for Particle {
fn write_lhe<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writeln!(
writer,
"{} {} {} {} {} {} {:e} {:e} {:e} {:e} {:e} {:e} {:e}",
self.pdg_id,
self.status,
self.mother_1_id,
self.mother_2_id,
self.color_1,
self.color_2,
self.momentum.px,
self.momentum.py,
self.momentum.pz,
self.momentum.e,
self.mass,
self.proper_lifetime,
self.spin
)
}
}
#[cfg(test)]
impl Arbitrary for Particle {
fn arbitrary<G: Gen>(gen: &mut G) -> Particle {
let momentum = LorentzVector {
e: Arbitrary::arbitrary(gen),
px: Arbitrary::arbitrary(gen),
py: Arbitrary::arbitrary(gen),
pz: Arbitrary::arbitrary(gen),
};
Particle {
pdg_id: Arbitrary::arbitrary(gen),
status: Arbitrary::arbitrary(gen),
mother_1_id: Arbitrary::arbitrary(gen),
mother_2_id: Arbitrary::arbitrary(gen),
color_1: Arbitrary::arbitrary(gen),
color_2: Arbitrary::arbitrary(gen),
momentum,
mass: Arbitrary::arbitrary(gen),
proper_lifetime: Arbitrary::arbitrary(gen),
spin: Arbitrary::arbitrary(gen),
}
}
}
#[cfg(test)]
mod tests {
use lorentz_vector::LorentzVector;
use super::{ReadLhe, WriteLhe};
use super::{Particle, ProcInfo};
#[test]
fn read_procinfo() {
let bytes = b"1. 2. 3. 4\n";
let expected = ProcInfo {
xsect: 1.,
xsect_err: 2.,
maximum_weight: 3.,
process_id: 4,
};
let result = ProcInfo::read_lhe(bytes).to_full_result().unwrap();
assert_eq!(result, expected);
}
#[test]
fn read_particle() {
let bytes = b"1 2 3 4 5 6 7. 8. 9. 10. 11. 12. 13.\n";
let expected = Particle {
pdg_id: 1,
status: 2,
mother_1_id: 3,
mother_2_id: 4,
color_1: 5,
color_2: 6,
momentum: LorentzVector {
px: 7.,
py: 8.,
pz: 9.,
e: 10.,
},
mass: 11.,
proper_lifetime: 12.,
spin: 13.,
};
let result = Particle::read_lhe(bytes).to_full_result().unwrap();
assert_eq!(result, expected);
}
quickcheck! {
fn proc_info_roundtrip_qc(p: ProcInfo) -> bool {
let mut bytes = Vec::new();
p.write_lhe(&mut bytes).unwrap();
let round = match ProcInfo::read_lhe(&bytes).to_full_result() {
Ok(r) => r,
Err(err) => panic!("Failed to read roundtrip: {:?}", err),
};
p == round
}
}
quickcheck! {
fn particle_roundtrip_qc(m: Particle) -> bool {
let mut bytes = Vec::new();
m.write_lhe(&mut bytes).unwrap();
let round = match Particle::read_lhe(&bytes).to_full_result() {
Ok(r) => r,
Err(err) => panic!("Failed to read roundtrip: {:?}", err),
};
m == round
}
}
}
| {
match *self {
ReadError::Io(..) => &"Failed to read the lhe file with an IO error",
ReadError::Nom(..) => &"Failed to read the lhe file with a parse error",
}
} | identifier_body |
lib.rs | // Copyright 2018 Torsten Weber
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! # LHEF
//!
//! The `lhef` library is a [`rust`] library to read and write files in
//! the [`LesHouchesEvents`] format.
//! It can be used to just read the common blocks specified by the
//! standard, but is flexible enough to also handle the additional
//! information that is allowed by the standard.
//! This can be done either by reading them as `String`s or by parsing
//! them into custom data structures.
//! Reading common blocks has been tested for event files generated by
//! [`MG5_aMC@NLO`] and [`HELAC_NLO`].
//! Specialized data structures for the reweighting information written
//! by `HELAC_NLO` are included.
//!
//! ## Usage examples
//!
//! ### Reading a file and ignoring all extra information:
//!
//! ```rust,ignore
//! use lhef::ReadLhe;
//! use lhef::plain::LheFile;
//!
//! let lhe = LheFile::read_lhe_from_file(&"events.lhe").unwrap();
//!
//! // Energy of beam 1
//! let beam_1_energy = lhe.init.beam_1_energy;
//!
//! // pz of the 4rd particle in the 7th event
//! let pz = lhe.events[6].particles[3].momentum.pz;
//! ```
//!
//! ### Reading a file generated including extra information as strings:
//!
//! Specialized data structures for e.g. Madgraph do not exist, but the
//! additional information stored in the event files written by it can
//! still be extracted as strings:
//!
//! ```rust,ignore
//! use lhef::ReadLhe;
//! use lhef::string::{LheFile, EventExtra};
//!
//! let lhe = LheFile::read_lhe_from_file(&"events.lhe").unwrap();
//!
//! // extra information of the 5th event
//! let EventExtra(ref extra) = lhe.events[4].extra;
//! ```
//!
//! ### Reading a file generated by `HELAC-NLO`
//!
//! This library comes with a module containing special data structures
//! for the additional information contained in event files generated by
//! `HELAC-NLO`.
//! Therefore event files generated by `HELAC` can be read directly into
//! the appropriate structures:
//!
//! ```rust,ignore
//! use lhef::ReadLhe;
//! use lhef::helac::LheFileRS;
//!
//! let lhe = LheFileRS::read_lhe_from_file(&"events.lhe").unwrap();
//!
//! // x1 of the 5th event
//! let extra = lhe.events[4].extra.pdf.x1;
//! ```
//!
//! ## Supported file types
//!
//! This library comes with three specialization modules to handle extra
//! information contained in event files:
//!
//! ### plain
//!
//! The [`plain`] module allows to read `lhe` files without taking any
//! extra information into account.
//! The [`plain::LheFile`] struct contains only the information that is
//! guaranteed to be present in all `lhe` files.
//! The `extra` fields on the file, init and event objects are still
//! present, but only return dummy objects that do not contain any
//! information.
//! The `comment` and the `header` are also dummy objects.
//!
//!
//! ### string
//!
//! The [`string`] module allows to read `lhe` files and keeping all the
//! extra information in the files as unparsed strings.
//! The `comment` and the `header` are kept as strings, without the
//! start and end tags.
//! All extra information has leading and trailing whitespace removed.
//! Whitespace (including linebreaks) within the strings is conserved.
//!
//!
//! ### helac
//!
//! The [`helac`] module contains specialized structs that the extra
//! information contained in `lhe` files generated by `HELAC-NLO` is
//! parsed into.
//! The comment is kept as a string, and since `HELAC` `lhe` files do
//! not contain a header, the header is a dummy object.
//!
//!
//! ### Adding support for new file types
//!
//! To add new file types, you need to add types that implement the
//! `ReadLhe` and `WriteLhe` traits for the additional information
//! stored in the file type.
//! The type signature of the `read_from_lhe` function of the `ReadLhe`
//! trait means that you should use [`nom`] to parse your type.
//! Your implementations need to parse the opening and end tags for
//! comments (`<!--` and `-->`) and the header (`<header>` and
//! `</header>`) respectively, but must leave the tags for the init
//! section and for events alone.
//! With these implementations you can then use `LheFileGeneric` with
//! your types to read and write `lhe` files.
//!
//!
//! [`rust`]: https://www.rust-lang.org
//! [`LesHouchesEvents`]: https://arxiv.org/abs/hep-ph/0609017
//! [`MG5_aMC@NLO`]: https://launchpad.net/mg5amcnlo
//! [`HELAC_NLO`]: http://helac-phegas.web.cern.ch/helac-phegas/
//! [`nom`]: https://github.com/Geal/nom
//! [`plain`]: plain/index.html
//! [`string`]: string/index.html
//! [`helac`]: helac/index.html
extern crate lorentz_vector;
#[macro_use]
extern crate nom;
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
#[cfg(test)]
#[macro_use]
extern crate serde;
#[cfg(test)]
#[cfg(test)]
extern crate serde_json;
#[macro_use]
pub mod nom_util;
pub mod generic;
pub mod helac;
pub mod plain;
pub mod string;
use lorentz_vector::LorentzVector;
use std::error;
use std::fmt;
use std::fs;
use std::io;
use std::io::Read;
use std::marker;
use std::path::Path;
#[cfg(test)]
use quickcheck::Arbitrary;
#[cfg(test)]
use quickcheck::Gen;
use nom_util::{parse_f64, parse_i64};
/// A type to use for pdg ids
///
/// See the [Particle Data Group] website for more information.
/// A list of all particle numbers can be found [here].
///
/// [Particle Data Group]: http://pdg.lbl.gov/
/// [here]: http://pdg.lbl.gov/2017/reviews/rpp2017-rev-monte-carlo-numbering.pdf
pub type PdgId = i64;
/// A trait to read (parts of) lhe files
///
/// This trait needs to be implemented for a type to be able to use it
/// in [`LheFileGeneric`] to hold extra information.
///
/// [`LheFileGeneric`]: generic/struct.LheFileGeneric.html
pub trait ReadLhe
where
Self: marker::Sized,
{
/// Read an lhe object from a byte string
///
/// The input to this function is the remaining input in the file
/// (or just a chunk of it) and if successful, it should return the
/// parsed object and the input left after parsing the object.
/// See the [`nom documentation`] for more information.
///
/// [`nom documentation`]: http://rust.unhandledexpression.com/nom/
fn read_lhe(&[u8]) -> nom::IResult<&[u8], Self>;
/// Read an lhe object from a file
fn read_lhe_from_file<P: AsRef<Path>>(path: &P) -> Result<Self, ReadError> {
let mut file = fs::File::open(path)?;
let mut contents = Vec::new();
file.read_to_end(&mut contents)?;
Self::read_lhe(&contents)
.to_full_result()
.map_err(ReadError::Nom)
}
}
/// A trait to write (parts of) lhe files
///
/// This trait needs to be implemented for a type to be able to use it
/// in [`LheFileGeneric`] to hold extra information.
///
/// [`LheFileGeneric`]: generic/struct.LheFileGeneric.html
pub trait WriteLhe {
/// Write the object to a writer
fn write_lhe<W: io::Write>(&self, &mut W) -> io::Result<()>;
/// Write the object to a file
fn write_lhe_to_file<P: AsRef<Path>>(&self, path: P) -> io::Result<()> {
let mut file = fs::File::create(path)?;
self.write_lhe(&mut file)
}
}
/// Errors that may occur when reading lhe objects from files
#[derive(Debug)]
pub enum ReadError {
/// An io error occured
Io(io::Error),
/// A parse error occured
Nom(nom::IError),
}
impl From<io::Error> for ReadError {
fn from(err: io::Error) -> ReadError {
ReadError::Io(err)
}
}
impl fmt::Display for ReadError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ReadError::Io(ref err) => {
write!(f, "Failed to read the lhe file with an IO error: {}", err)
}
ReadError::Nom(ref err) => write!(
f,
"Failed to read the lhe file with a parse error: {:?}",
err
),
}
}
}
impl error::Error for ReadError {
fn description(&self) -> &str {
match *self {
ReadError::Io(..) => &"Failed to read the lhe file with an IO error",
ReadError::Nom(..) => &"Failed to read the lhe file with a parse error",
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
ReadError::Io(ref err) => Some(err),
ReadError::Nom(_) => None,
}
}
}
/// A struct for process information
///
/// This is the per process information contained in the `init` section
/// of `lhe` files.
/// When reading a file, the `Init` struct will contain `NPRUP`
/// `ProcInfo` objects.
/// `ProcInfo` is part of the compulsory initialization information.
///
/// For more information on the fields, see the [`lhe`] paper and the
/// documentation of the [`LHA common blocks`].
/// The names in parentheses are the names of the fields in these
/// papers.
///
/// # Examples
///
/// ```rust
/// use lhef::{ProcInfo, ReadLhe};
/// use lhef::plain::LheFile;
///
/// let bytes = b"\
/// <LesHouchesEvents version=\"1.0\">
/// <init>
/// 2212 2212 6500 6500 0 0 13100 13100 3 2
/// 2.1 3.2E-03 1.0E+00 1
/// 4.0 7.4E-03 1.0E+00 2
/// </init>
/// </LesHouchesEvents>";
/// let lhe = LheFile::read_lhe(bytes).to_full_result().unwrap();
/// assert_eq!(lhe.init.process_info.len(), 2);
/// assert_eq!(lhe.init.process_info[0].xsect, 2.1);
/// assert_eq!(lhe.init.process_info[1].xsect_err, 0.0074);
/// ```
///
/// [`lhe`]: https://arxiv.org/abs/hep-ph/0609017
/// [`LHA common blocks`]: https://arxiv.org/abs/hep-ph/0109068
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(test, derive(Serialize, Deserialize))]
pub struct ProcInfo {
/// The cross section of the process (`XSECUP`)
pub xsect: f64,
/// The cross section error of the process (`XERRUP`)
pub xsect_err: f64,
/// The maximum weight of the events of the process (`XMAXUP`)
pub maximum_weight: f64,
/// The process id (`LPRUP`)
pub process_id: i64,
}
impl ReadLhe for ProcInfo {
fn read_lhe(input: &[u8]) -> nom::IResult<&[u8], ProcInfo> {
do_parse!(
input,
xsect: ws!(parse_f64) >> xsect_err: ws!(parse_f64) >> maximum_weight: ws!(parse_f64)
>> process_id: ws!(parse_i64) >> (ProcInfo {
xsect,
xsect_err,
maximum_weight,
process_id,
})
)
}
}
impl WriteLhe for ProcInfo {
fn write_lhe<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writeln!(
writer,
"{:e} {:e} {:e} {}",
self.xsect, self.xsect_err, self.maximum_weight, self.process_id
)
}
}
#[cfg(test)]
impl Arbitrary for ProcInfo {
fn arbitrary<G: Gen>(gen: &mut G) -> ProcInfo {
ProcInfo {
xsect: Arbitrary::arbitrary(gen),
xsect_err: Arbitrary::arbitrary(gen),
maximum_weight: Arbitrary::arbitrary(gen),
process_id: Arbitrary::arbitrary(gen),
}
}
}
/// A particle in lhe format
///
/// An event will contain as many `Particle`s as there are particles in
/// the event.
/// `Particle` is part of the compulsory event information.
///
/// For more information on the fields, see the [`lhe`] paper and the
/// documentation of the [`LHA common blocks`].
/// The names in parentheses are the names of the fields in these
/// papers.
///
/// # Examples
///
/// ```rust
/// use lhef::{Particle, ReadLhe};
/// use lhef::plain::LheFile;
///
/// let bytes = b"\
/// <LesHouchesEvents version=\"1.0\">
/// <init>
/// 2212 2212 6500 6500 0 0 13100 13100 3 1
/// 2.1 3.2E-03 1.0E+00 1
/// </init>
/// <event>
/// 4 1 +1.04e-01 1.00e+03 7.54e-03 8.68e-02
/// -11 -1 0 0 0 0 +0.00e+00 +0.00e+00 +5.00e+02 5.00e+02 0.00e+00 0.00e+00 -1.00e+00
/// 11 -1 0 0 0 0 -0.00e+00 -0.00e+00 -5.00e+02 5.00e+02 0.00e+00 0.00e+00 1.00e+00
/// -13 1 1 2 0 0 -1.97e+02 -4.52e+02 -7.94e+01 5.00e+02 0.00e+00 0.00e+00 -1.00e+00
/// 13 1 1 2 0 0 +1.97e+02 +4.52e+02 +7.94e+01 5.00e+02 0.00e+00 0.00e+00 1.00e+00
/// </event>
/// </LesHouchesEvents>";
///
/// let lhe = LheFile::read_lhe(bytes).to_full_result().unwrap();
/// let event = &lhe.events[0];
/// assert_eq!(event.particles.len(), 4);
/// assert_eq!(event.particles[0].pdg_id, -11);
/// assert_eq!(event.particles[3].momentum.py, 452.);
/// ```
///
/// [`lhe`]: https://arxiv.org/abs/hep-ph/0609017
/// [`LHA common blocks`]: https://arxiv.org/abs/hep-ph/0109068
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(test, derive(Serialize, Deserialize))]
pub struct Particle {
/// The pdg id of the particle (`IDUP`)
pub pdg_id: PdgId,
/// The status code of the particle (`ISTUP`)
pub status: i64,
/// The id of the first mother of the particle (`MOTHUP(1)`).
/// This isn't a pdg id, but a (1 based) index into the particles vector.
pub mother_1_id: i64,
/// The id of the second mother of the particle (`MOTHUP(2)`).
/// This isn't a pdg id, but a (1 based) index into the particles vector.
pub mother_2_id: i64,
/// The color of the particle (`ICOLUP(1)`)
pub color_1: i64,
/// The color of the particle (`ICOLUP(2)`)
pub color_2: i64,
/// The four momentum of the particle (`PUP` 1 - 4)
pub momentum: LorentzVector,
/// The mass of the particle (`PUP(5)`)
pub mass: f64,
/// The proper lifetime of the particle (`VTIMUP`)
pub proper_lifetime: f64,
/// The spin of the particle (`SPINUP`)
pub spin: f64,
}
impl ReadLhe for Particle {
fn read_lhe(input: &[u8]) -> nom::IResult<&[u8], Particle> {
do_parse!(
input,
pdg_id: ws!(parse_i64) >> status: ws!(parse_i64) >> mother_1_id: ws!(parse_i64)
>> mother_2_id: ws!(parse_i64) >> color_1: ws!(parse_i64)
>> color_2: ws!(parse_i64) >> px: ws!(parse_f64) >> py: ws!(parse_f64)
>> pz: ws!(parse_f64) >> e: ws!(parse_f64) >> mass: ws!(parse_f64)
>> proper_lifetime: ws!(parse_f64) >> spin: ws!(parse_f64)
>> (Particle {
pdg_id,
status,
mother_1_id,
mother_2_id,
color_1,
color_2,
momentum: LorentzVector { e, px, py, pz },
mass,
proper_lifetime,
spin,
})
)
}
}
impl WriteLhe for Particle {
fn write_lhe<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writeln!(
writer,
"{} {} {} {} {} {} {:e} {:e} {:e} {:e} {:e} {:e} {:e}",
self.pdg_id,
self.status,
self.mother_1_id, | self.color_1,
self.color_2,
self.momentum.px,
self.momentum.py,
self.momentum.pz,
self.momentum.e,
self.mass,
self.proper_lifetime,
self.spin
)
}
}
#[cfg(test)]
impl Arbitrary for Particle {
fn arbitrary<G: Gen>(gen: &mut G) -> Particle {
let momentum = LorentzVector {
e: Arbitrary::arbitrary(gen),
px: Arbitrary::arbitrary(gen),
py: Arbitrary::arbitrary(gen),
pz: Arbitrary::arbitrary(gen),
};
Particle {
pdg_id: Arbitrary::arbitrary(gen),
status: Arbitrary::arbitrary(gen),
mother_1_id: Arbitrary::arbitrary(gen),
mother_2_id: Arbitrary::arbitrary(gen),
color_1: Arbitrary::arbitrary(gen),
color_2: Arbitrary::arbitrary(gen),
momentum,
mass: Arbitrary::arbitrary(gen),
proper_lifetime: Arbitrary::arbitrary(gen),
spin: Arbitrary::arbitrary(gen),
}
}
}
#[cfg(test)]
mod tests {
use lorentz_vector::LorentzVector;
use super::{ReadLhe, WriteLhe};
use super::{Particle, ProcInfo};
#[test]
fn read_procinfo() {
let bytes = b"1. 2. 3. 4\n";
let expected = ProcInfo {
xsect: 1.,
xsect_err: 2.,
maximum_weight: 3.,
process_id: 4,
};
let result = ProcInfo::read_lhe(bytes).to_full_result().unwrap();
assert_eq!(result, expected);
}
#[test]
fn read_particle() {
let bytes = b"1 2 3 4 5 6 7. 8. 9. 10. 11. 12. 13.\n";
let expected = Particle {
pdg_id: 1,
status: 2,
mother_1_id: 3,
mother_2_id: 4,
color_1: 5,
color_2: 6,
momentum: LorentzVector {
px: 7.,
py: 8.,
pz: 9.,
e: 10.,
},
mass: 11.,
proper_lifetime: 12.,
spin: 13.,
};
let result = Particle::read_lhe(bytes).to_full_result().unwrap();
assert_eq!(result, expected);
}
quickcheck! {
fn proc_info_roundtrip_qc(p: ProcInfo) -> bool {
let mut bytes = Vec::new();
p.write_lhe(&mut bytes).unwrap();
let round = match ProcInfo::read_lhe(&bytes).to_full_result() {
Ok(r) => r,
Err(err) => panic!("Failed to read roundtrip: {:?}", err),
};
p == round
}
}
quickcheck! {
fn particle_roundtrip_qc(m: Particle) -> bool {
let mut bytes = Vec::new();
m.write_lhe(&mut bytes).unwrap();
let round = match Particle::read_lhe(&bytes).to_full_result() {
Ok(r) => r,
Err(err) => panic!("Failed to read roundtrip: {:?}", err),
};
m == round
}
}
} | self.mother_2_id, | random_line_split |
lib.rs | // Copyright 2018 Torsten Weber
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! # LHEF
//!
//! The `lhef` library is a [`rust`] library to read and write files in
//! the [`LesHouchesEvents`] format.
//! It can be used to just read the common blocks specified by the
//! standard, but is flexible enough to also handle the additional
//! information that is allowed by the standard.
//! This can be done either by reading them as `String`s or by parsing
//! them into custom data structures.
//! Reading common blocks has been tested for event files generated by
//! [`MG5_aMC@NLO`] and [`HELAC_NLO`].
//! Specialized data structures for the reweighting information written
//! by `HELAC_NLO` are included.
//!
//! ## Usage examples
//!
//! ### Reading a file and ignoring all extra information:
//!
//! ```rust,ignore
//! use lhef::ReadLhe;
//! use lhef::plain::LheFile;
//!
//! let lhe = LheFile::read_lhe_from_file(&"events.lhe").unwrap();
//!
//! // Energy of beam 1
//! let beam_1_energy = lhe.init.beam_1_energy;
//!
//! // pz of the 4rd particle in the 7th event
//! let pz = lhe.events[6].particles[3].momentum.pz;
//! ```
//!
//! ### Reading a file generated including extra information as strings:
//!
//! Specialized data structures for e.g. Madgraph do not exist, but the
//! additional information stored in the event files written by it can
//! still be extracted as strings:
//!
//! ```rust,ignore
//! use lhef::ReadLhe;
//! use lhef::string::{LheFile, EventExtra};
//!
//! let lhe = LheFile::read_lhe_from_file(&"events.lhe").unwrap();
//!
//! // extra information of the 5th event
//! let EventExtra(ref extra) = lhe.events[4].extra;
//! ```
//!
//! ### Reading a file generated by `HELAC-NLO`
//!
//! This library comes with a module containing special data structures
//! for the additional information contained in event files generated by
//! `HELAC-NLO`.
//! Therefore event files generated by `HELAC` can be read directly into
//! the appropriate structures:
//!
//! ```rust,ignore
//! use lhef::ReadLhe;
//! use lhef::helac::LheFileRS;
//!
//! let lhe = LheFileRS::read_lhe_from_file(&"events.lhe").unwrap();
//!
//! // x1 of the 5th event
//! let extra = lhe.events[4].extra.pdf.x1;
//! ```
//!
//! ## Supported file types
//!
//! This library comes with three specialization modules to handle extra
//! information contained in event files:
//!
//! ### plain
//!
//! The [`plain`] module allows to read `lhe` files without taking any
//! extra information into account.
//! The [`plain::LheFile`] struct contains only the information that is
//! guaranteed to be present in all `lhe` files.
//! The `extra` fields on the file, init and event objects are still
//! present, but only return dummy objects that do not contain any
//! information.
//! The `comment` and the `header` are also dummy objects.
//!
//!
//! ### string
//!
//! The [`string`] module allows to read `lhe` files and keeping all the
//! extra information in the files as unparsed strings.
//! The `comment` and the `header` are kept as strings, without the
//! start and end tags.
//! All extra information has leading and trailing whitespace removed.
//! Whitespace (including linebreaks) within the strings is conserved.
//!
//!
//! ### helac
//!
//! The [`helac`] module contains specialized structs that the extra
//! information contained in `lhe` files generated by `HELAC-NLO` is
//! parsed into.
//! The comment is kept as a string, and since `HELAC` `lhe` files do
//! not contain a header, the header is a dummy object.
//!
//!
//! ### Adding support for new file types
//!
//! To add new file types, you need to add types that implement the
//! `ReadLhe` and `WriteLhe` traits for the additional information
//! stored in the file type.
//! The type signature of the `read_from_lhe` function of the `ReadLhe`
//! trait means that you should use [`nom`] to parse your type.
//! Your implementations need to parse the opening and end tags for
//! comments (`<!--` and `-->`) and the header (`<header>` and
//! `</header>`) respectively, but must leave the tags for the init
//! section and for events alone.
//! With these implementations you can then use `LheFileGeneric` with
//! your types to read and write `lhe` files.
//!
//!
//! [`rust`]: https://www.rust-lang.org
//! [`LesHouchesEvents`]: https://arxiv.org/abs/hep-ph/0609017
//! [`MG5_aMC@NLO`]: https://launchpad.net/mg5amcnlo
//! [`HELAC_NLO`]: http://helac-phegas.web.cern.ch/helac-phegas/
//! [`nom`]: https://github.com/Geal/nom
//! [`plain`]: plain/index.html
//! [`string`]: string/index.html
//! [`helac`]: helac/index.html
extern crate lorentz_vector;
#[macro_use]
extern crate nom;
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
#[cfg(test)]
#[macro_use]
extern crate serde;
#[cfg(test)]
#[cfg(test)]
extern crate serde_json;
#[macro_use]
pub mod nom_util;
pub mod generic;
pub mod helac;
pub mod plain;
pub mod string;
use lorentz_vector::LorentzVector;
use std::error;
use std::fmt;
use std::fs;
use std::io;
use std::io::Read;
use std::marker;
use std::path::Path;
#[cfg(test)]
use quickcheck::Arbitrary;
#[cfg(test)]
use quickcheck::Gen;
use nom_util::{parse_f64, parse_i64};
/// A type to use for pdg ids
///
/// See the [Particle Data Group] website for more information.
/// A list of all particle numbers can be found [here].
///
/// [Particle Data Group]: http://pdg.lbl.gov/
/// [here]: http://pdg.lbl.gov/2017/reviews/rpp2017-rev-monte-carlo-numbering.pdf
pub type PdgId = i64;
/// A trait to read (parts of) lhe files
///
/// This trait needs to be implemented for a type to be able to use it
/// in [`LheFileGeneric`] to hold extra information.
///
/// [`LheFileGeneric`]: generic/struct.LheFileGeneric.html
pub trait ReadLhe
where
Self: marker::Sized,
{
/// Read an lhe object from a byte string
///
/// The input to this function is the remaining input in the file
/// (or just a chunk of it) and if successful, it should return the
/// parsed object and the input left after parsing the object.
/// See the [`nom documentation`] for more information.
///
/// [`nom documentation`]: http://rust.unhandledexpression.com/nom/
fn read_lhe(&[u8]) -> nom::IResult<&[u8], Self>;
/// Read an lhe object from a file
fn read_lhe_from_file<P: AsRef<Path>>(path: &P) -> Result<Self, ReadError> {
let mut file = fs::File::open(path)?;
let mut contents = Vec::new();
file.read_to_end(&mut contents)?;
Self::read_lhe(&contents)
.to_full_result()
.map_err(ReadError::Nom)
}
}
/// A trait to write (parts of) lhe files
///
/// This trait needs to be implemented for a type to be able to use it
/// in [`LheFileGeneric`] to hold extra information.
///
/// [`LheFileGeneric`]: generic/struct.LheFileGeneric.html
pub trait WriteLhe {
/// Write the object to a writer
fn write_lhe<W: io::Write>(&self, &mut W) -> io::Result<()>;
/// Write the object to a file
fn write_lhe_to_file<P: AsRef<Path>>(&self, path: P) -> io::Result<()> {
let mut file = fs::File::create(path)?;
self.write_lhe(&mut file)
}
}
/// Errors that may occur when reading lhe objects from files
#[derive(Debug)]
pub enum ReadError {
/// An io error occured
Io(io::Error),
/// A parse error occured
Nom(nom::IError),
}
impl From<io::Error> for ReadError {
fn from(err: io::Error) -> ReadError {
ReadError::Io(err)
}
}
impl fmt::Display for ReadError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ReadError::Io(ref err) => {
write!(f, "Failed to read the lhe file with an IO error: {}", err)
}
ReadError::Nom(ref err) => write!(
f,
"Failed to read the lhe file with a parse error: {:?}",
err
),
}
}
}
impl error::Error for ReadError {
fn description(&self) -> &str {
match *self {
ReadError::Io(..) => &"Failed to read the lhe file with an IO error",
ReadError::Nom(..) => &"Failed to read the lhe file with a parse error",
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
ReadError::Io(ref err) => Some(err),
ReadError::Nom(_) => None,
}
}
}
/// A struct for process information
///
/// This is the per process information contained in the `init` section
/// of `lhe` files.
/// When reading a file, the `Init` struct will contain `NPRUP`
/// `ProcInfo` objects.
/// `ProcInfo` is part of the compulsory initialization information.
///
/// For more information on the fields, see the [`lhe`] paper and the
/// documentation of the [`LHA common blocks`].
/// The names in parentheses are the names of the fields in these
/// papers.
///
/// # Examples
///
/// ```rust
/// use lhef::{ProcInfo, ReadLhe};
/// use lhef::plain::LheFile;
///
/// let bytes = b"\
/// <LesHouchesEvents version=\"1.0\">
/// <init>
/// 2212 2212 6500 6500 0 0 13100 13100 3 2
/// 2.1 3.2E-03 1.0E+00 1
/// 4.0 7.4E-03 1.0E+00 2
/// </init>
/// </LesHouchesEvents>";
/// let lhe = LheFile::read_lhe(bytes).to_full_result().unwrap();
/// assert_eq!(lhe.init.process_info.len(), 2);
/// assert_eq!(lhe.init.process_info[0].xsect, 2.1);
/// assert_eq!(lhe.init.process_info[1].xsect_err, 0.0074);
/// ```
///
/// [`lhe`]: https://arxiv.org/abs/hep-ph/0609017
/// [`LHA common blocks`]: https://arxiv.org/abs/hep-ph/0109068
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(test, derive(Serialize, Deserialize))]
pub struct ProcInfo {
/// The cross section of the process (`XSECUP`)
pub xsect: f64,
/// The cross section error of the process (`XERRUP`)
pub xsect_err: f64,
/// The maximum weight of the events of the process (`XMAXUP`)
pub maximum_weight: f64,
/// The process id (`LPRUP`)
pub process_id: i64,
}
impl ReadLhe for ProcInfo {
fn read_lhe(input: &[u8]) -> nom::IResult<&[u8], ProcInfo> {
do_parse!(
input,
xsect: ws!(parse_f64) >> xsect_err: ws!(parse_f64) >> maximum_weight: ws!(parse_f64)
>> process_id: ws!(parse_i64) >> (ProcInfo {
xsect,
xsect_err,
maximum_weight,
process_id,
})
)
}
}
impl WriteLhe for ProcInfo {
fn write_lhe<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writeln!(
writer,
"{:e} {:e} {:e} {}",
self.xsect, self.xsect_err, self.maximum_weight, self.process_id
)
}
}
#[cfg(test)]
impl Arbitrary for ProcInfo {
fn arbitrary<G: Gen>(gen: &mut G) -> ProcInfo {
ProcInfo {
xsect: Arbitrary::arbitrary(gen),
xsect_err: Arbitrary::arbitrary(gen),
maximum_weight: Arbitrary::arbitrary(gen),
process_id: Arbitrary::arbitrary(gen),
}
}
}
/// A particle in lhe format
///
/// An event will contain as many `Particle`s as there are particles in
/// the event.
/// `Particle` is part of the compulsory event information.
///
/// For more information on the fields, see the [`lhe`] paper and the
/// documentation of the [`LHA common blocks`].
/// The names in parentheses are the names of the fields in these
/// papers.
///
/// # Examples
///
/// ```rust
/// use lhef::{Particle, ReadLhe};
/// use lhef::plain::LheFile;
///
/// let bytes = b"\
/// <LesHouchesEvents version=\"1.0\">
/// <init>
/// 2212 2212 6500 6500 0 0 13100 13100 3 1
/// 2.1 3.2E-03 1.0E+00 1
/// </init>
/// <event>
/// 4 1 +1.04e-01 1.00e+03 7.54e-03 8.68e-02
/// -11 -1 0 0 0 0 +0.00e+00 +0.00e+00 +5.00e+02 5.00e+02 0.00e+00 0.00e+00 -1.00e+00
/// 11 -1 0 0 0 0 -0.00e+00 -0.00e+00 -5.00e+02 5.00e+02 0.00e+00 0.00e+00 1.00e+00
/// -13 1 1 2 0 0 -1.97e+02 -4.52e+02 -7.94e+01 5.00e+02 0.00e+00 0.00e+00 -1.00e+00
/// 13 1 1 2 0 0 +1.97e+02 +4.52e+02 +7.94e+01 5.00e+02 0.00e+00 0.00e+00 1.00e+00
/// </event>
/// </LesHouchesEvents>";
///
/// let lhe = LheFile::read_lhe(bytes).to_full_result().unwrap();
/// let event = &lhe.events[0];
/// assert_eq!(event.particles.len(), 4);
/// assert_eq!(event.particles[0].pdg_id, -11);
/// assert_eq!(event.particles[3].momentum.py, 452.);
/// ```
///
/// [`lhe`]: https://arxiv.org/abs/hep-ph/0609017
/// [`LHA common blocks`]: https://arxiv.org/abs/hep-ph/0109068
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(test, derive(Serialize, Deserialize))]
pub struct Particle {
/// The pdg id of the particle (`IDUP`)
pub pdg_id: PdgId,
/// The status code of the particle (`ISTUP`)
pub status: i64,
/// The id of the first mother of the particle (`MOTHUP(1)`).
/// This isn't a pdg id, but a (1 based) index into the particles vector.
pub mother_1_id: i64,
/// The id of the second mother of the particle (`MOTHUP(2)`).
/// This isn't a pdg id, but a (1 based) index into the particles vector.
pub mother_2_id: i64,
/// The color of the particle (`ICOLUP(1)`)
pub color_1: i64,
/// The color of the particle (`ICOLUP(2)`)
pub color_2: i64,
/// The four momentum of the particle (`PUP` 1 - 4)
pub momentum: LorentzVector,
/// The mass of the particle (`PUP(5)`)
pub mass: f64,
/// The proper lifetime of the particle (`VTIMUP`)
pub proper_lifetime: f64,
/// The spin of the particle (`SPINUP`)
pub spin: f64,
}
impl ReadLhe for Particle {
fn | (input: &[u8]) -> nom::IResult<&[u8], Particle> {
do_parse!(
input,
pdg_id: ws!(parse_i64) >> status: ws!(parse_i64) >> mother_1_id: ws!(parse_i64)
>> mother_2_id: ws!(parse_i64) >> color_1: ws!(parse_i64)
>> color_2: ws!(parse_i64) >> px: ws!(parse_f64) >> py: ws!(parse_f64)
>> pz: ws!(parse_f64) >> e: ws!(parse_f64) >> mass: ws!(parse_f64)
>> proper_lifetime: ws!(parse_f64) >> spin: ws!(parse_f64)
>> (Particle {
pdg_id,
status,
mother_1_id,
mother_2_id,
color_1,
color_2,
momentum: LorentzVector { e, px, py, pz },
mass,
proper_lifetime,
spin,
})
)
}
}
impl WriteLhe for Particle {
fn write_lhe<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writeln!(
writer,
"{} {} {} {} {} {} {:e} {:e} {:e} {:e} {:e} {:e} {:e}",
self.pdg_id,
self.status,
self.mother_1_id,
self.mother_2_id,
self.color_1,
self.color_2,
self.momentum.px,
self.momentum.py,
self.momentum.pz,
self.momentum.e,
self.mass,
self.proper_lifetime,
self.spin
)
}
}
#[cfg(test)]
impl Arbitrary for Particle {
fn arbitrary<G: Gen>(gen: &mut G) -> Particle {
let momentum = LorentzVector {
e: Arbitrary::arbitrary(gen),
px: Arbitrary::arbitrary(gen),
py: Arbitrary::arbitrary(gen),
pz: Arbitrary::arbitrary(gen),
};
Particle {
pdg_id: Arbitrary::arbitrary(gen),
status: Arbitrary::arbitrary(gen),
mother_1_id: Arbitrary::arbitrary(gen),
mother_2_id: Arbitrary::arbitrary(gen),
color_1: Arbitrary::arbitrary(gen),
color_2: Arbitrary::arbitrary(gen),
momentum,
mass: Arbitrary::arbitrary(gen),
proper_lifetime: Arbitrary::arbitrary(gen),
spin: Arbitrary::arbitrary(gen),
}
}
}
#[cfg(test)]
mod tests {
use lorentz_vector::LorentzVector;
use super::{ReadLhe, WriteLhe};
use super::{Particle, ProcInfo};
#[test]
fn read_procinfo() {
let bytes = b"1. 2. 3. 4\n";
let expected = ProcInfo {
xsect: 1.,
xsect_err: 2.,
maximum_weight: 3.,
process_id: 4,
};
let result = ProcInfo::read_lhe(bytes).to_full_result().unwrap();
assert_eq!(result, expected);
}
#[test]
fn read_particle() {
let bytes = b"1 2 3 4 5 6 7. 8. 9. 10. 11. 12. 13.\n";
let expected = Particle {
pdg_id: 1,
status: 2,
mother_1_id: 3,
mother_2_id: 4,
color_1: 5,
color_2: 6,
momentum: LorentzVector {
px: 7.,
py: 8.,
pz: 9.,
e: 10.,
},
mass: 11.,
proper_lifetime: 12.,
spin: 13.,
};
let result = Particle::read_lhe(bytes).to_full_result().unwrap();
assert_eq!(result, expected);
}
quickcheck! {
fn proc_info_roundtrip_qc(p: ProcInfo) -> bool {
let mut bytes = Vec::new();
p.write_lhe(&mut bytes).unwrap();
let round = match ProcInfo::read_lhe(&bytes).to_full_result() {
Ok(r) => r,
Err(err) => panic!("Failed to read roundtrip: {:?}", err),
};
p == round
}
}
quickcheck! {
fn particle_roundtrip_qc(m: Particle) -> bool {
let mut bytes = Vec::new();
m.write_lhe(&mut bytes).unwrap();
let round = match Particle::read_lhe(&bytes).to_full_result() {
Ok(r) => r,
Err(err) => panic!("Failed to read roundtrip: {:?}", err),
};
m == round
}
}
}
| read_lhe | identifier_name |
TEST.py | import sys
if not hasattr(sys, 'argv'):
sys.argv = ['']
from CNN import model as model
from UTILS import *
# from cnn_n import saveImg
from shutil import copyfile
tplt1 = "{0:^30}\t{1:^10}\t{2:^10}\t{3:^10}\t{4:^10}" #\t{4:^10}\t{5:^10}
tplt2 = "{0:^30}\t{1:^10}\t{2:^10}"
model_set = {
'''所用模型'''
"CNN2_I_QP37":r"progressive_cnn_filter\models\firstCNN\HEVC\QP37\CNN2\CNN2_I_QP37.ckpt",}
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction =1
config.gpu_options.allow_growth = True
global cnn17, cnn27, cnn37, cnn47, cnn7, cnn57
def prepare_test_data(fileOrDir):
original_ycbcr = []
imgCbCr = []
gt_y = []
fileName_list = []
# The input is a single file.
if type(fileOrDir) is str:
fileName_list.append(fileOrDir)
# w, h = getWH(fileOrDir)
# imgY = getYdata(fileOrDir, [w, h])
imgY = c_getYdata(fileOrDir)
imgY = normalize(imgY)
imgY = np.resize(imgY, (1, imgY.shape[0], imgY.shape[1], 1))
original_ycbcr.append([imgY, imgCbCr])
##The input is one directory of test images.
elif len(fileOrDir) == 1:
fileName_list = load_file_list(fileOrDir)
for path in fileName_list:
# w, h = getWH(path)
# imgY = getYdata(path, [w, h])
imgY = c_getYdata(path)
imgY = normalize(imgY)
imgY = np.resize(imgY, (1, imgY.shape[0], imgY.shape[1], 1))
original_ycbcr.append([imgY, imgCbCr])
##The input is two directories, including ground truth.
elif len(fileOrDir) == 2:
fileName | e:
print("Invalid Inputs.")
exit(0)
return original_ycbcr, gt_y, fileName_list
class Predict:
input_tensor = None
output_tensor = None
model = None
def __init__(self, model, modelpath):
self.graph = tf.Graph() # 为每个类(实例)单独创建一个graph
self.model = model
with self.graph.as_default():
self.input_tensor = tf.placeholder(tf.float32, shape=(1, None, None, 1))
#self.output_tensor = tf.make_template('input_scope', self.model)(self.input_tensor)
self.output_tensor = model(self.input_tensor)
self.output_tensor = tf.clip_by_value(self.output_tensor, 0., 1.)
self.output_tensor = tf.multiply(self.output_tensor, 255)
self.saver = tf.train.Saver()
self.sess = tf.Session(graph=self.graph,config=config) # 创建新的sess
with self.sess.as_default():
with self.graph.as_default():
self.sess.run(tf.global_variables_initializer())
self.saver.restore(self.sess, modelpath) # 从恢复点恢复参数
print(modelpath)
def predict(self, fileOrDir):
#print("------------")
if (isinstance(fileOrDir, str)):
original_ycbcr, gt_y, fileName_list = prepare_test_data(fileOrDir)
imgY = original_ycbcr[0][0]
elif type(fileOrDir) is np.ndarray:
imgY = fileOrDir
imgY = normalize(np.reshape(fileOrDir, (1, len(fileOrDir), len(fileOrDir[0]), 1)))
elif (isinstance(fileOrDir, list)):
fileOrDir = np.asarray(fileOrDir, dtype='float32')
# fileOrDir = fileOrDir / 255
imgY = normalize(np.reshape(fileOrDir, (1, len(fileOrDir), len(fileOrDir[0]), 1)))
else:
imgY=None
with self.sess.as_default():
with self.sess.graph.as_default():
out = self.sess.run(self.output_tensor, feed_dict={self.input_tensor: imgY})
out = np.reshape(out, (out.shape[1], out.shape[2]))
out = np.around(out)
out = out.astype('int')
out = out.tolist()
return out
def init(sliceType, QP):
# print("init !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
global cnn17,cnn27,cnn37,cnn47,cnn7,cnn57
cnn7 =Predict(model,model_set["CNN_I_QP7"])
cnn17=Predict(model,model_set["CNN_I_QP17"])
cnn27=Predict(model,model_set["CNN_I_QP27"])
cnn37=Predict(model,model_set["CNN2_I_QP37"])
cnn47=Predict(model,model_set["CNN_I_QP47"])
cnn57 = Predict(model, model_set["CNN_I_QP57"])
def predict(file, QP, frame_type):
global cnn17, cnn27, cnn37, cnn47, cnn7, cnn57
if QP < 17:
R = cnn7.predict(file)
elif 17 <= QP < 27:
R = cnn17.predict(file)
elif 27 <= QP < 37:
R = cnn27.predict(file)
elif 37 <= QP < 47:
R = cnn37.predict(file)
elif 47 <= QP < 57:
R = cnn47.predict(file)
else:
R = cnn57.predict(file)
def showImg(inp):
h, w = inp[0], inp[1]
tem = np.asarray(inp, dtype='uint8')
#np.save(r"H:\KONG\cnn_2K%f" % time.time(),tem)
tem = Image.fromarray(tem, 'L')
tem.show()
#tem.save("D:/rec/FromPython%f.jpg" % time.time())
def test_all_ckpt(modelPath):
low_img = r"test_set\qp37"
# low_img = r"H:\KONG\FRAME_!\AV!_deblock_nocdefLr\QP53"
heigh_img = r"test_set\ori"
NUM_CNN=3 #cnn 次数
original_ycbcr, gt_y, fileName_list = prepare_test_data([low_img,heigh_img])
total_imgs = len(fileName_list)
tem = [f for f in os.listdir(modelPath) if 'data' in f]
ckptFiles = sorted([r.split('.data')[0] for r in tem])
max_psnr=0
max_epoch=0
max_ckpt_psnr = 0
for ckpt in ckptFiles:
cur_ckpt_psnr=0
#epoch = int(ckpt.split('_')[3])
# print(ckpt.split('.')[0].split('_'))
epoch = int(ckpt.split('.')[0].split('_')[-2])
# loss =int(ckpt.split('.')[0].split('_')[-1])
#
# if epoch <1000:
# continue
#print(epoch)
print(os.path.join(modelPath, ckpt))
predictor = Predict(model, os.path.join(modelPath, ckpt))
img_index = [14, 17, 4, 2, 7, 10, 12, 3, 0, 13, 16, 5, 6, 1, 15, 8, 9, 11]
for i in img_index:
# if i>5:
# continue
imgY = original_ycbcr[i][0]
gtY = gt_y[i] if gt_y else 0
#showImg(rec)
#print(np.shape(np.reshape(imgY, [np.shape(imgY)[1],np.shape(imgY)[2]])))
#cur_psnr[cnnTime]=psnr(denormalize(np.reshape(imgY, [np.shape(imgY)[1],np.shape(imgY)[2]])),np.reshape(gtY, [np.shape(imgY)[1],np.shape(imgY)[2]]))
cur_psnr=[]
rec = predictor.predict(imgY)
cur_psnr.append(psnr(rec,np.reshape(gtY, np.shape(rec))))
for cc in range(2,NUM_CNN+1):
rec = predictor.predict(rec)
cur_psnr.append(psnr(rec, np.reshape(gtY, np.shape(rec))))
# print(cur_psnr)
#print(len(cur_psnr))
cur_ckpt_psnr=cur_ckpt_psnr+np.mean(cur_psnr)
# print(tplt2.format(os.path.basename(fileName_list[i]), cur_psnr,psnr(denormalize(np.reshape(imgY, np.shape(rec))),np.reshape(gtY, np.shape(rec)))))
print("%30s"%os.path.basename(fileName_list[i]),end="")
for cc in cur_psnr:
print(" %.5f"%cc,end="")
print(" %.5f" % np.mean(cur_psnr), end="")
print(" %.5f"%psnr(denormalize(np.reshape(imgY, np.shape(rec))),np.reshape(gtY, np.shape(rec))))
if(cur_ckpt_psnr/total_imgs>max_ckpt_psnr):
max_ckpt_psnr=cur_ckpt_psnr/total_imgs
max_epoch=epoch
print("______________________________________________________________")
print(epoch,cur_ckpt_psnr/total_imgs,max_epoch,max_ckpt_psnr)
def build_trainset():
global mModel
reconDir = r"train_set\hevc_div2k_train_noFilter_yuv"
saveDir=r"train_set\hevc_div2k_train_noFilter_yuv_cnn" #cnn1 cnn2 cnn3
NUM_CNN = 3 #cnn次数
for i in range(1,NUM_CNN+1):
if os.path.exists(saveDir+str(i))==0:
os.mkdir(saveDir+str(i))
mModel = Predict(model,
r"progressive_cnn_filter\models\firstCNN\HEVC\QP37\CNN2\CNN2_I_QP37.ckpt")
for dir in [y for y in os.listdir(reconDir) if os.path.isdir(os.path.join(reconDir, y)) ]:#
# print(dir)
if 36<int(dir.split("qp")[1])<47: #改成模型对应的QP范围
for i in range(1,NUM_CNN+1):
sub_save_dir=os.path.join(saveDir+str(i),dir)
# print(sub_save_dir.replace("cnn1","cnn2"))
if os.path.exists(sub_save_dir)==0:
os.mkdir(sub_save_dir)
file_list=load_file_list(os.path.join(reconDir,dir))
# shuffle(file_list)
for file in file_list:
# print(file)
# exit(0)
# qp=int(dir[-2:])
# print(os.path.join(sub_save_dir ,os.path.basename(file[0])),qp)
# input_img=c_getYdata(file[0])
# qp_img=np.ones(np.shape(input_img),dtype="int8")*qp*4
# input_img=np.dstack((input_img,qp_img))
rec= mModel.predict(c_getYdata(file))
saveImg(os.path.join(saveDir+"1" ,dir, os.path.basename(file)), rec)
for i in range(2,NUM_CNN+1):
rec = mModel.predict(rec)
saveImg(os.path.join(saveDir+str(i),dir, os.path.basename(file)), rec)
if __name__ == '__main__':
# low_img = r"D:\konglingyi\test_result\E19091301\BasketballPass_416x240_50_l.yuv"
# heigh_img=r"D:\konglingyi\test_result\E19091301\BasketballPass_416x240_50_h.yuv"
# init(0,37)
# original_ycbcr, gt_y, fileName_list=prepare_test_data(heigh_img)
# rec1 = predict(low_img, 160,0)
#showImg(rec1)
#rec2 = predict(file, 212,2)
#print(psnr(denormalize(np.reshape(original_ycbcr[0][0],np.shape(rec1))), rec1))
# test_all_ckpt(r"progressive_cnn_filter\models\firstCNN\HEVC\QP37\CNN2")
build_trainset()
# for i in range(1,1):
# print(i) | _list = load_file_list(fileOrDir[0])
test_list = get_train_list(load_file_list(fileOrDir[0]), load_file_list(fileOrDir[1]))
for pair in test_list:
filesize = os.path.getsize(pair[0])
picsize = getWH(pair[0])[0]*getWH(pair[0])[0] * 3 // 2
numFrames = filesize // picsize
# if numFrames ==1:
or_imgY = c_getYdata(pair[0])
gt_imgY = c_getYdata(pair[1])
# normalize
or_imgY = normalize(or_imgY)
or_imgY = np.resize(or_imgY, (1, or_imgY.shape[0], or_imgY.shape[1], 1))
gt_imgY = np.resize(gt_imgY, (1, gt_imgY.shape[0], gt_imgY.shape[1], 1))
## act as a placeholder
or_imgCbCr = 0
original_ycbcr.append([or_imgY, or_imgCbCr])
gt_y.append(gt_imgY)
# else:
# while numFrames>0:
# or_imgY =getOneFrameY(pair[0])
# gt_imgY =getOneFrameY(pair[1])
# # normalize
# or_imgY = normalize(or_imgY)
#
# or_imgY = np.resize(or_imgY, (1, or_imgY.shape[0], or_imgY.shape[1], 1))
# gt_imgY = np.resize(gt_imgY, (1, gt_imgY.shape[0], gt_imgY.shape[1], 1))
#
# ## act as a placeholder
# or_imgCbCr = 0
# original_ycbcr.append([or_imgY, or_imgCbCr])
# gt_y.append(gt_imgY)
els | conditional_block |
TEST.py | import sys
if not hasattr(sys, 'argv'):
sys.argv = ['']
from CNN import model as model
from UTILS import *
# from cnn_n import saveImg
from shutil import copyfile
tplt1 = "{0:^30}\t{1:^10}\t{2:^10}\t{3:^10}\t{4:^10}" #\t{4:^10}\t{5:^10}
tplt2 = "{0:^30}\t{1:^10}\t{2:^10}"
model_set = {
'''所用模型'''
"CNN2_I_QP37":r"progressive_cnn_filter\models\firstCNN\HEVC\QP37\CNN2\CNN2_I_QP37.ckpt",}
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction =1
config.gpu_options.allow_growth = True
global cnn17, cnn27, cnn37, cnn47, cnn7, cnn57
def prepare_test_data(fileOrDir):
original_ycbcr = []
imgCbCr = []
gt_y = []
fileName_list = []
# The input is a single file.
if type(fileOrDir) is str:
fileName_list.append(fileOrDir)
# w, h = getWH(fileOrDir)
# imgY = getYdata(fileOrDir, [w, h])
imgY = c_getYdata(fileOrDir)
imgY = normalize(imgY)
imgY = np.resize(imgY, (1, imgY.shape[0], imgY.shape[1], 1))
original_ycbcr.append([imgY, imgCbCr])
##The input is one directory of test images.
elif len(fileOrDir) == 1:
fileName_list = load_file_list(fileOrDir)
for path in fileName_list:
# w, h = getWH(path)
# imgY = getYdata(path, [w, h])
imgY = c_getYdata(path)
imgY = normalize(imgY)
imgY = np.resize(imgY, (1, imgY.shape[0], imgY.shape[1], 1))
original_ycbcr.append([imgY, imgCbCr])
##The input is two directories, including ground truth.
elif len(fileOrDir) == 2:
fileName_list = load_file_list(fileOrDir[0])
test_list = get_train_list(load_file_list(fileOrDir[0]), load_file_list(fileOrDir[1]))
for pair in test_list:
filesize = os.path.getsize(pair[0])
picsize = getWH(pair[0])[0]*getWH(pair[0])[0] * 3 // 2
numFrames = filesize // picsize
# if numFrames ==1:
or_imgY = c_getYdata(pair[0])
gt_imgY = c_getYdata(pair[1])
# normalize
or_imgY = normalize(or_imgY)
or_imgY = np.resize(or_imgY, (1, or_imgY.shape[0], or_imgY.shape[1], 1))
gt_imgY = np.resize(gt_imgY, (1, gt_imgY.shape[0], gt_imgY.shape[1], 1))
## act as a placeholder
or_imgCbCr = 0
original_ycbcr.append([or_imgY, or_imgCbCr])
gt_y.append(gt_imgY)
# else:
# while numFrames>0:
# or_imgY =getOneFrameY(pair[0])
# gt_imgY =getOneFrameY(pair[1])
# # normalize
# or_imgY = normalize(or_imgY)
#
# or_imgY = np.resize(or_imgY, (1, or_imgY.shape[0], or_imgY.shape[1], 1))
# gt_imgY = np.resize(gt_imgY, (1, gt_imgY.shape[0], gt_imgY.shape[1], 1))
#
# ## act as a placeholder
# or_imgCbCr = 0
# original_ycbcr.append([or_imgY, or_imgCbCr])
# gt_y.append(gt_imgY)
else:
print("Invalid Inputs.")
exit(0)
return original_ycbcr, gt_y, fileName_list
class Predict:
input_tensor = None
output_tensor = None
model = None
def __init__(self, model, modelpath):
self.graph = tf.Graph() # 为每个类(实例)单独创建一个graph
self.model = model
with self.graph.as_default():
self.input_tensor = tf.placeholder(tf.float32, shape=(1, None, None, 1))
#self.output_tensor = tf.make_template('input_scope', self.model)(self.input_tensor)
self.output_tensor = model(self.input_tensor)
self.output_tensor = tf.clip_by_value(self.output_tensor, 0., 1.)
self.output_tensor = tf.multiply(self.output_tensor, 255)
self.saver = tf.train.Saver()
self.sess = tf.Session(graph=self.graph,config=config) # 创建新的sess
with self.sess.as_default():
with self.graph.as_default():
self.sess.run(tf.global_variables_initializer())
self.saver.restore(self.sess, modelpath) # 从恢复点恢复参数
print(modelpath)
def predict(self, fileOrDir):
#print("------------")
if (isinstance(fileOrDir, str)):
original_ycbcr, gt_y, fileName_list = prepare_test_data(fileOrDir)
imgY = original_ycbcr[0][0]
elif type(fileOrDir) is np.ndarray:
imgY = fileOrDir
imgY = normalize(np.reshape(fileOrDir, (1, len(fileOrDir), len(fileOrDir[0]), 1)))
elif (isinstance(fileOrDir, list)):
fileOrDir = np.asarray(fileOrDir, dtype='float32')
# fileOrDir = fileOrDir / 255
imgY = normalize(np.reshape(fileOrDir, (1, len(fileOrDir), len(fileOrDir[0]), 1)))
else:
imgY=None
with self.sess.as_default():
with self.sess.graph.as_default():
out = self.sess.run(self.output_tensor, feed_dict={self.input_tensor: imgY})
out = np.reshape(out, (out.shape[1], out.shape[2]))
out = np.around(out)
out = out.astype('int')
out = out.tolist()
return out
def init(sliceType, QP):
# print("init !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
global cnn17,cnn27,cnn37,cnn47,cnn7,cnn57
cnn7 =Pre | n17, cnn27, cnn37, cnn47, cnn7, cnn57
if QP < 17:
R = cnn7.predict(file)
elif 17 <= QP < 27:
R = cnn17.predict(file)
elif 27 <= QP < 37:
R = cnn27.predict(file)
elif 37 <= QP < 47:
R = cnn37.predict(file)
elif 47 <= QP < 57:
R = cnn47.predict(file)
else:
R = cnn57.predict(file)
def showImg(inp):
h, w = inp[0], inp[1]
tem = np.asarray(inp, dtype='uint8')
#np.save(r"H:\KONG\cnn_2K%f" % time.time(),tem)
tem = Image.fromarray(tem, 'L')
tem.show()
#tem.save("D:/rec/FromPython%f.jpg" % time.time())
def test_all_ckpt(modelPath):
low_img = r"test_set\qp37"
# low_img = r"H:\KONG\FRAME_!\AV!_deblock_nocdefLr\QP53"
heigh_img = r"test_set\ori"
NUM_CNN=3 #cnn 次数
original_ycbcr, gt_y, fileName_list = prepare_test_data([low_img,heigh_img])
total_imgs = len(fileName_list)
tem = [f for f in os.listdir(modelPath) if 'data' in f]
ckptFiles = sorted([r.split('.data')[0] for r in tem])
max_psnr=0
max_epoch=0
max_ckpt_psnr = 0
for ckpt in ckptFiles:
cur_ckpt_psnr=0
#epoch = int(ckpt.split('_')[3])
# print(ckpt.split('.')[0].split('_'))
epoch = int(ckpt.split('.')[0].split('_')[-2])
# loss =int(ckpt.split('.')[0].split('_')[-1])
#
# if epoch <1000:
# continue
#print(epoch)
print(os.path.join(modelPath, ckpt))
predictor = Predict(model, os.path.join(modelPath, ckpt))
img_index = [14, 17, 4, 2, 7, 10, 12, 3, 0, 13, 16, 5, 6, 1, 15, 8, 9, 11]
for i in img_index:
# if i>5:
# continue
imgY = original_ycbcr[i][0]
gtY = gt_y[i] if gt_y else 0
#showImg(rec)
#print(np.shape(np.reshape(imgY, [np.shape(imgY)[1],np.shape(imgY)[2]])))
#cur_psnr[cnnTime]=psnr(denormalize(np.reshape(imgY, [np.shape(imgY)[1],np.shape(imgY)[2]])),np.reshape(gtY, [np.shape(imgY)[1],np.shape(imgY)[2]]))
cur_psnr=[]
rec = predictor.predict(imgY)
cur_psnr.append(psnr(rec,np.reshape(gtY, np.shape(rec))))
for cc in range(2,NUM_CNN+1):
rec = predictor.predict(rec)
cur_psnr.append(psnr(rec, np.reshape(gtY, np.shape(rec))))
# print(cur_psnr)
#print(len(cur_psnr))
cur_ckpt_psnr=cur_ckpt_psnr+np.mean(cur_psnr)
# print(tplt2.format(os.path.basename(fileName_list[i]), cur_psnr,psnr(denormalize(np.reshape(imgY, np.shape(rec))),np.reshape(gtY, np.shape(rec)))))
print("%30s"%os.path.basename(fileName_list[i]),end="")
for cc in cur_psnr:
print(" %.5f"%cc,end="")
print(" %.5f" % np.mean(cur_psnr), end="")
print(" %.5f"%psnr(denormalize(np.reshape(imgY, np.shape(rec))),np.reshape(gtY, np.shape(rec))))
if(cur_ckpt_psnr/total_imgs>max_ckpt_psnr):
max_ckpt_psnr=cur_ckpt_psnr/total_imgs
max_epoch=epoch
print("______________________________________________________________")
print(epoch,cur_ckpt_psnr/total_imgs,max_epoch,max_ckpt_psnr)
def build_trainset():
global mModel
reconDir = r"train_set\hevc_div2k_train_noFilter_yuv"
saveDir=r"train_set\hevc_div2k_train_noFilter_yuv_cnn" #cnn1 cnn2 cnn3
NUM_CNN = 3 #cnn次数
for i in range(1,NUM_CNN+1):
if os.path.exists(saveDir+str(i))==0:
os.mkdir(saveDir+str(i))
mModel = Predict(model,
r"progressive_cnn_filter\models\firstCNN\HEVC\QP37\CNN2\CNN2_I_QP37.ckpt")
for dir in [y for y in os.listdir(reconDir) if os.path.isdir(os.path.join(reconDir, y)) ]:#
# print(dir)
if 36<int(dir.split("qp")[1])<47: #改成模型对应的QP范围
for i in range(1,NUM_CNN+1):
sub_save_dir=os.path.join(saveDir+str(i),dir)
# print(sub_save_dir.replace("cnn1","cnn2"))
if os.path.exists(sub_save_dir)==0:
os.mkdir(sub_save_dir)
file_list=load_file_list(os.path.join(reconDir,dir))
# shuffle(file_list)
for file in file_list:
# print(file)
# exit(0)
# qp=int(dir[-2:])
# print(os.path.join(sub_save_dir ,os.path.basename(file[0])),qp)
# input_img=c_getYdata(file[0])
# qp_img=np.ones(np.shape(input_img),dtype="int8")*qp*4
# input_img=np.dstack((input_img,qp_img))
rec= mModel.predict(c_getYdata(file))
saveImg(os.path.join(saveDir+"1" ,dir, os.path.basename(file)), rec)
for i in range(2,NUM_CNN+1):
rec = mModel.predict(rec)
saveImg(os.path.join(saveDir+str(i),dir, os.path.basename(file)), rec)
if __name__ == '__main__':
# low_img = r"D:\konglingyi\test_result\E19091301\BasketballPass_416x240_50_l.yuv"
# heigh_img=r"D:\konglingyi\test_result\E19091301\BasketballPass_416x240_50_h.yuv"
# init(0,37)
# original_ycbcr, gt_y, fileName_list=prepare_test_data(heigh_img)
# rec1 = predict(low_img, 160,0)
#showImg(rec1)
#rec2 = predict(file, 212,2)
#print(psnr(denormalize(np.reshape(original_ycbcr[0][0],np.shape(rec1))), rec1))
# test_all_ckpt(r"progressive_cnn_filter\models\firstCNN\HEVC\QP37\CNN2")
build_trainset()
# for i in range(1,1):
# print(i) | dict(model,model_set["CNN_I_QP7"])
cnn17=Predict(model,model_set["CNN_I_QP17"])
cnn27=Predict(model,model_set["CNN_I_QP27"])
cnn37=Predict(model,model_set["CNN2_I_QP37"])
cnn47=Predict(model,model_set["CNN_I_QP47"])
cnn57 = Predict(model, model_set["CNN_I_QP57"])
def predict(file, QP, frame_type):
global cn | identifier_body |
TEST.py | import sys
if not hasattr(sys, 'argv'):
sys.argv = ['']
from CNN import model as model
from UTILS import *
# from cnn_n import saveImg
from shutil import copyfile
tplt1 = "{0:^30}\t{1:^10}\t{2:^10}\t{3:^10}\t{4:^10}" #\t{4:^10}\t{5:^10}
tplt2 = "{0:^30}\t{1:^10}\t{2:^10}"
model_set = {
'''所用模型'''
"CNN2_I_QP37":r"progressive_cnn_filter\models\firstCNN\HEVC\QP37\CNN2\CNN2_I_QP37.ckpt",}
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction =1
config.gpu_options.allow_growth = True
global cnn17, cnn27, cnn37, cnn47, cnn7, cnn57
def prepare_test_data(fileOrDir):
original_ycbcr = []
imgCbCr = []
gt_y = []
fileName_list = []
# The input is a single file.
if type(fileOrDir) is str:
fileName_list.append(fileOrDir)
# w, h = getWH(fileOrDir)
# imgY = getYdata(fileOrDir, [w, h])
imgY = c_getYdata(fileOrDir)
imgY = normalize(imgY)
imgY = np.resize(imgY, (1, imgY.shape[0], imgY.shape[1], 1))
original_ycbcr.append([imgY, imgCbCr])
##The input is one directory of test images.
elif len(fileOrDir) == 1:
fileName_list = load_file_list(fileOrDir)
for path in fileName_list:
# w, h = getWH(path)
# imgY = getYdata(path, [w, h])
imgY = c_getYdata(path)
imgY = normalize(imgY)
imgY = np.resize(imgY, (1, imgY.shape[0], imgY.shape[1], 1))
original_ycbcr.append([imgY, imgCbCr])
##The input is two directories, including ground truth.
elif len(fileOrDir) == 2:
fileName_list = load_file_list(fileOrDir[0])
test_list = get_train_list(load_file_list(fileOrDir[0]), load_file_list(fileOrDir[1]))
for pair in test_list:
filesize = os.path.getsize(pair[0])
picsize = getWH(pair[0])[0]*getWH(pair[0])[0] * 3 // 2
numFrames = filesize // picsize
# if numFrames ==1:
or_imgY = c_getYdata(pair[0])
gt_imgY = c_getYdata(pair[1])
# normalize
or_imgY = normalize(or_imgY)
or_imgY = np.resize(or_imgY, (1, or_imgY.shape[0], or_imgY.shape[1], 1))
gt_imgY = np.resize(gt_imgY, (1, gt_imgY.shape[0], gt_imgY.shape[1], 1))
## act as a placeholder
or_imgCbCr = 0
original_ycbcr.append([or_imgY, or_imgCbCr])
gt_y.append(gt_imgY)
# else:
# while numFrames>0:
# or_imgY =getOneFrameY(pair[0])
# gt_imgY =getOneFrameY(pair[1])
# # normalize
# or_imgY = normalize(or_imgY)
#
# or_imgY = np.resize(or_imgY, (1, or_imgY.shape[0], or_imgY.shape[1], 1))
# gt_imgY = np.resize(gt_imgY, (1, gt_imgY.shape[0], gt_imgY.shape[1], 1))
#
# ## act as a placeholder
# or_imgCbCr = 0
# original_ycbcr.append([or_imgY, or_imgCbCr])
# gt_y.append(gt_imgY)
else:
print("Invalid Inputs.")
exit(0)
return original_ycbcr, gt_y, fileName_list
class Predict:
input_tensor = None
output_tensor = None | self.graph = tf.Graph() # 为每个类(实例)单独创建一个graph
self.model = model
with self.graph.as_default():
self.input_tensor = tf.placeholder(tf.float32, shape=(1, None, None, 1))
#self.output_tensor = tf.make_template('input_scope', self.model)(self.input_tensor)
self.output_tensor = model(self.input_tensor)
self.output_tensor = tf.clip_by_value(self.output_tensor, 0., 1.)
self.output_tensor = tf.multiply(self.output_tensor, 255)
self.saver = tf.train.Saver()
self.sess = tf.Session(graph=self.graph,config=config) # 创建新的sess
with self.sess.as_default():
with self.graph.as_default():
self.sess.run(tf.global_variables_initializer())
self.saver.restore(self.sess, modelpath) # 从恢复点恢复参数
print(modelpath)
def predict(self, fileOrDir):
#print("------------")
if (isinstance(fileOrDir, str)):
original_ycbcr, gt_y, fileName_list = prepare_test_data(fileOrDir)
imgY = original_ycbcr[0][0]
elif type(fileOrDir) is np.ndarray:
imgY = fileOrDir
imgY = normalize(np.reshape(fileOrDir, (1, len(fileOrDir), len(fileOrDir[0]), 1)))
elif (isinstance(fileOrDir, list)):
fileOrDir = np.asarray(fileOrDir, dtype='float32')
# fileOrDir = fileOrDir / 255
imgY = normalize(np.reshape(fileOrDir, (1, len(fileOrDir), len(fileOrDir[0]), 1)))
else:
imgY=None
with self.sess.as_default():
with self.sess.graph.as_default():
out = self.sess.run(self.output_tensor, feed_dict={self.input_tensor: imgY})
out = np.reshape(out, (out.shape[1], out.shape[2]))
out = np.around(out)
out = out.astype('int')
out = out.tolist()
return out
def init(sliceType, QP):
# print("init !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
global cnn17,cnn27,cnn37,cnn47,cnn7,cnn57
cnn7 =Predict(model,model_set["CNN_I_QP7"])
cnn17=Predict(model,model_set["CNN_I_QP17"])
cnn27=Predict(model,model_set["CNN_I_QP27"])
cnn37=Predict(model,model_set["CNN2_I_QP37"])
cnn47=Predict(model,model_set["CNN_I_QP47"])
cnn57 = Predict(model, model_set["CNN_I_QP57"])
def predict(file, QP, frame_type):
global cnn17, cnn27, cnn37, cnn47, cnn7, cnn57
if QP < 17:
R = cnn7.predict(file)
elif 17 <= QP < 27:
R = cnn17.predict(file)
elif 27 <= QP < 37:
R = cnn27.predict(file)
elif 37 <= QP < 47:
R = cnn37.predict(file)
elif 47 <= QP < 57:
R = cnn47.predict(file)
else:
R = cnn57.predict(file)
def showImg(inp):
h, w = inp[0], inp[1]
tem = np.asarray(inp, dtype='uint8')
#np.save(r"H:\KONG\cnn_2K%f" % time.time(),tem)
tem = Image.fromarray(tem, 'L')
tem.show()
#tem.save("D:/rec/FromPython%f.jpg" % time.time())
def test_all_ckpt(modelPath):
low_img = r"test_set\qp37"
# low_img = r"H:\KONG\FRAME_!\AV!_deblock_nocdefLr\QP53"
heigh_img = r"test_set\ori"
NUM_CNN=3 #cnn 次数
original_ycbcr, gt_y, fileName_list = prepare_test_data([low_img,heigh_img])
total_imgs = len(fileName_list)
tem = [f for f in os.listdir(modelPath) if 'data' in f]
ckptFiles = sorted([r.split('.data')[0] for r in tem])
max_psnr=0
max_epoch=0
max_ckpt_psnr = 0
for ckpt in ckptFiles:
cur_ckpt_psnr=0
#epoch = int(ckpt.split('_')[3])
# print(ckpt.split('.')[0].split('_'))
epoch = int(ckpt.split('.')[0].split('_')[-2])
# loss =int(ckpt.split('.')[0].split('_')[-1])
#
# if epoch <1000:
# continue
#print(epoch)
print(os.path.join(modelPath, ckpt))
predictor = Predict(model, os.path.join(modelPath, ckpt))
img_index = [14, 17, 4, 2, 7, 10, 12, 3, 0, 13, 16, 5, 6, 1, 15, 8, 9, 11]
for i in img_index:
# if i>5:
# continue
imgY = original_ycbcr[i][0]
gtY = gt_y[i] if gt_y else 0
#showImg(rec)
#print(np.shape(np.reshape(imgY, [np.shape(imgY)[1],np.shape(imgY)[2]])))
#cur_psnr[cnnTime]=psnr(denormalize(np.reshape(imgY, [np.shape(imgY)[1],np.shape(imgY)[2]])),np.reshape(gtY, [np.shape(imgY)[1],np.shape(imgY)[2]]))
cur_psnr=[]
rec = predictor.predict(imgY)
cur_psnr.append(psnr(rec,np.reshape(gtY, np.shape(rec))))
for cc in range(2,NUM_CNN+1):
rec = predictor.predict(rec)
cur_psnr.append(psnr(rec, np.reshape(gtY, np.shape(rec))))
# print(cur_psnr)
#print(len(cur_psnr))
cur_ckpt_psnr=cur_ckpt_psnr+np.mean(cur_psnr)
# print(tplt2.format(os.path.basename(fileName_list[i]), cur_psnr,psnr(denormalize(np.reshape(imgY, np.shape(rec))),np.reshape(gtY, np.shape(rec)))))
print("%30s"%os.path.basename(fileName_list[i]),end="")
for cc in cur_psnr:
print(" %.5f"%cc,end="")
print(" %.5f" % np.mean(cur_psnr), end="")
print(" %.5f"%psnr(denormalize(np.reshape(imgY, np.shape(rec))),np.reshape(gtY, np.shape(rec))))
if(cur_ckpt_psnr/total_imgs>max_ckpt_psnr):
max_ckpt_psnr=cur_ckpt_psnr/total_imgs
max_epoch=epoch
print("______________________________________________________________")
print(epoch,cur_ckpt_psnr/total_imgs,max_epoch,max_ckpt_psnr)
def build_trainset():
global mModel
reconDir = r"train_set\hevc_div2k_train_noFilter_yuv"
saveDir=r"train_set\hevc_div2k_train_noFilter_yuv_cnn" #cnn1 cnn2 cnn3
NUM_CNN = 3 #cnn次数
for i in range(1,NUM_CNN+1):
if os.path.exists(saveDir+str(i))==0:
os.mkdir(saveDir+str(i))
mModel = Predict(model,
r"progressive_cnn_filter\models\firstCNN\HEVC\QP37\CNN2\CNN2_I_QP37.ckpt")
for dir in [y for y in os.listdir(reconDir) if os.path.isdir(os.path.join(reconDir, y)) ]:#
# print(dir)
if 36<int(dir.split("qp")[1])<47: #改成模型对应的QP范围
for i in range(1,NUM_CNN+1):
sub_save_dir=os.path.join(saveDir+str(i),dir)
# print(sub_save_dir.replace("cnn1","cnn2"))
if os.path.exists(sub_save_dir)==0:
os.mkdir(sub_save_dir)
file_list=load_file_list(os.path.join(reconDir,dir))
# shuffle(file_list)
for file in file_list:
# print(file)
# exit(0)
# qp=int(dir[-2:])
# print(os.path.join(sub_save_dir ,os.path.basename(file[0])),qp)
# input_img=c_getYdata(file[0])
# qp_img=np.ones(np.shape(input_img),dtype="int8")*qp*4
# input_img=np.dstack((input_img,qp_img))
rec= mModel.predict(c_getYdata(file))
saveImg(os.path.join(saveDir+"1" ,dir, os.path.basename(file)), rec)
for i in range(2,NUM_CNN+1):
rec = mModel.predict(rec)
saveImg(os.path.join(saveDir+str(i),dir, os.path.basename(file)), rec)
if __name__ == '__main__':
# low_img = r"D:\konglingyi\test_result\E19091301\BasketballPass_416x240_50_l.yuv"
# heigh_img=r"D:\konglingyi\test_result\E19091301\BasketballPass_416x240_50_h.yuv"
# init(0,37)
# original_ycbcr, gt_y, fileName_list=prepare_test_data(heigh_img)
# rec1 = predict(low_img, 160,0)
#showImg(rec1)
#rec2 = predict(file, 212,2)
#print(psnr(denormalize(np.reshape(original_ycbcr[0][0],np.shape(rec1))), rec1))
# test_all_ckpt(r"progressive_cnn_filter\models\firstCNN\HEVC\QP37\CNN2")
build_trainset()
# for i in range(1,1):
# print(i) | model = None
def __init__(self, model, modelpath): | random_line_split |
TEST.py | import sys
if not hasattr(sys, 'argv'):
sys.argv = ['']
from CNN import model as model
from UTILS import *
# from cnn_n import saveImg
from shutil import copyfile
tplt1 = "{0:^30}\t{1:^10}\t{2:^10}\t{3:^10}\t{4:^10}" #\t{4:^10}\t{5:^10}
tplt2 = "{0:^30}\t{1:^10}\t{2:^10}"
model_set = {
'''所用模型'''
"CNN2_I_QP37":r"progressive_cnn_filter\models\firstCNN\HEVC\QP37\CNN2\CNN2_I_QP37.ckpt",}
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction =1
config.gpu_options.allow_growth = True
global cnn17, cnn27, cnn37, cnn47, cnn7, cnn57
def prepare_test_data(fileOrDir):
original_ycbcr = []
imgCbCr = []
gt_y = []
fileName_list = []
# The input is a single file.
if type(fileOrDir) is str:
fileName_list.append(fileOrDir)
# w, h = getWH(fileOrDir)
# imgY = getYdata(fileOrDir, [w, h])
imgY = c_getYdata(fileOrDir)
imgY = normalize(imgY)
imgY = np.resize(imgY, (1, imgY.shape[0], imgY.shape[1], 1))
original_ycbcr.append([imgY, imgCbCr])
##The input is one directory of test images.
elif len(fileOrDir) == 1:
fileName_list = load_file_list(fileOrDir)
for path in fileName_list:
# w, h = getWH(path)
# imgY = getYdata(path, [w, h])
imgY = c_getYdata(path)
imgY = normalize(imgY)
imgY = np.resize(imgY, (1, imgY.shape[0], imgY.shape[1], 1))
original_ycbcr.append([imgY, imgCbCr])
##The input is two directories, including ground truth.
elif len(fileOrDir) == 2:
fileName_list = load_file_list(fileOrDir[0])
test_list = get_train_list(load_file_list(fileOrDir[0]), load_file_list(fileOrDir[1]))
for pair in test_list:
filesize = os.path.getsize(pair[0])
picsize = getWH(pair[0])[0]*getWH(pair[0])[0] * 3 // 2
numFrames = filesize // picsize
# if numFrames ==1:
or_imgY = c_getYdata(pair[0])
gt_imgY = c_getYdata(pair[1])
# normalize
or_imgY = normalize(or_imgY)
or_imgY = np.resize(or_imgY, (1, or_imgY.shape[0], or_imgY.shape[1], 1))
gt_imgY = np.resize(gt_imgY, (1, gt_imgY.shape[0], gt_imgY.shape[1], 1))
## act as a placeholder
or_imgCbCr = 0
original_ycbcr.append([or_imgY, or_imgCbCr])
gt_y.append(gt_imgY)
# else:
# while numFrames>0:
# or_imgY =getOneFrameY(pair[0])
# gt_imgY =getOneFrameY(pair[1])
# # normalize
# or_imgY = normalize(or_imgY)
#
# or_imgY = np.resize(or_imgY, (1, or_imgY.shape[0], or_imgY.shape[1], 1))
# gt_imgY = np.resize(gt_imgY, (1, gt_imgY.shape[0], gt_imgY.shape[1], 1))
#
# ## act as a placeholder
# or_imgCbCr = 0
# original_ycbcr.append([or_imgY, or_imgCbCr])
# gt_y.append(gt_imgY)
else:
print("Invalid Inputs.")
exit(0)
return original_ycbcr, gt_y, fileName_list
class Predict:
input_tensor = None
output_tensor = None
model = None
def __init__(self, model, modelpath):
self.graph = tf.Graph() # 为每个类(实例)单独创建一个graph
self.model = model
with self.graph.as_default():
self.input_tensor = tf.placeholder(tf.float32, shape=(1, None, None, 1))
#self.output_tensor = tf.make_template('input_scope', self.model)(self.input_tensor)
self.output_tensor = model(self.input_tensor)
self.output_tensor = tf.clip_by_value(self.output_tensor, 0., 1.)
self.output_tensor = tf.multiply(self.output_tensor, 255)
self.saver = tf.train.Saver()
self.sess = tf.Session(graph=self.graph,config=config) # 创建新的sess
with self.sess.as_default():
with self.graph.as_default():
self.sess.run(tf.global_variables_initializer())
self.saver.restore(self.sess, modelpath) # 从恢复点恢复参数
print(modelpath)
def predict(self, fileOrDir):
#print("------------") | if (isinstance(fileOrDir, str)):
original_ycbcr, gt_y, fileName_list = prepare_test_data(fileOrDir)
imgY = original_ycbcr[0][0]
elif type(fileOrDir) is np.ndarray:
imgY = fileOrDir
imgY = normalize(np.reshape(fileOrDir, (1, len(fileOrDir), len(fileOrDir[0]), 1)))
elif (isinstance(fileOrDir, list)):
fileOrDir = np.asarray(fileOrDir, dtype='float32')
# fileOrDir = fileOrDir / 255
imgY = normalize(np.reshape(fileOrDir, (1, len(fileOrDir), len(fileOrDir[0]), 1)))
else:
imgY=None
with self.sess.as_default():
with self.sess.graph.as_default():
out = self.sess.run(self.output_tensor, feed_dict={self.input_tensor: imgY})
out = np.reshape(out, (out.shape[1], out.shape[2]))
out = np.around(out)
out = out.astype('int')
out = out.tolist()
return out
def init(sliceType, QP):
# print("init !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
global cnn17,cnn27,cnn37,cnn47,cnn7,cnn57
cnn7 =Predict(model,model_set["CNN_I_QP7"])
cnn17=Predict(model,model_set["CNN_I_QP17"])
cnn27=Predict(model,model_set["CNN_I_QP27"])
cnn37=Predict(model,model_set["CNN2_I_QP37"])
cnn47=Predict(model,model_set["CNN_I_QP47"])
cnn57 = Predict(model, model_set["CNN_I_QP57"])
def predict(file, QP, frame_type):
global cnn17, cnn27, cnn37, cnn47, cnn7, cnn57
if QP < 17:
R = cnn7.predict(file)
elif 17 <= QP < 27:
R = cnn17.predict(file)
elif 27 <= QP < 37:
R = cnn27.predict(file)
elif 37 <= QP < 47:
R = cnn37.predict(file)
elif 47 <= QP < 57:
R = cnn47.predict(file)
else:
R = cnn57.predict(file)
def showImg(inp):
h, w = inp[0], inp[1]
tem = np.asarray(inp, dtype='uint8')
#np.save(r"H:\KONG\cnn_2K%f" % time.time(),tem)
tem = Image.fromarray(tem, 'L')
tem.show()
#tem.save("D:/rec/FromPython%f.jpg" % time.time())
def test_all_ckpt(modelPath):
low_img = r"test_set\qp37"
# low_img = r"H:\KONG\FRAME_!\AV!_deblock_nocdefLr\QP53"
heigh_img = r"test_set\ori"
NUM_CNN=3 #cnn 次数
original_ycbcr, gt_y, fileName_list = prepare_test_data([low_img,heigh_img])
total_imgs = len(fileName_list)
tem = [f for f in os.listdir(modelPath) if 'data' in f]
ckptFiles = sorted([r.split('.data')[0] for r in tem])
max_psnr=0
max_epoch=0
max_ckpt_psnr = 0
for ckpt in ckptFiles:
cur_ckpt_psnr=0
#epoch = int(ckpt.split('_')[3])
# print(ckpt.split('.')[0].split('_'))
epoch = int(ckpt.split('.')[0].split('_')[-2])
# loss =int(ckpt.split('.')[0].split('_')[-1])
#
# if epoch <1000:
# continue
#print(epoch)
print(os.path.join(modelPath, ckpt))
predictor = Predict(model, os.path.join(modelPath, ckpt))
img_index = [14, 17, 4, 2, 7, 10, 12, 3, 0, 13, 16, 5, 6, 1, 15, 8, 9, 11]
for i in img_index:
# if i>5:
# continue
imgY = original_ycbcr[i][0]
gtY = gt_y[i] if gt_y else 0
#showImg(rec)
#print(np.shape(np.reshape(imgY, [np.shape(imgY)[1],np.shape(imgY)[2]])))
#cur_psnr[cnnTime]=psnr(denormalize(np.reshape(imgY, [np.shape(imgY)[1],np.shape(imgY)[2]])),np.reshape(gtY, [np.shape(imgY)[1],np.shape(imgY)[2]]))
cur_psnr=[]
rec = predictor.predict(imgY)
cur_psnr.append(psnr(rec,np.reshape(gtY, np.shape(rec))))
for cc in range(2,NUM_CNN+1):
rec = predictor.predict(rec)
cur_psnr.append(psnr(rec, np.reshape(gtY, np.shape(rec))))
# print(cur_psnr)
#print(len(cur_psnr))
cur_ckpt_psnr=cur_ckpt_psnr+np.mean(cur_psnr)
# print(tplt2.format(os.path.basename(fileName_list[i]), cur_psnr,psnr(denormalize(np.reshape(imgY, np.shape(rec))),np.reshape(gtY, np.shape(rec)))))
print("%30s"%os.path.basename(fileName_list[i]),end="")
for cc in cur_psnr:
print(" %.5f"%cc,end="")
print(" %.5f" % np.mean(cur_psnr), end="")
print(" %.5f"%psnr(denormalize(np.reshape(imgY, np.shape(rec))),np.reshape(gtY, np.shape(rec))))
if(cur_ckpt_psnr/total_imgs>max_ckpt_psnr):
max_ckpt_psnr=cur_ckpt_psnr/total_imgs
max_epoch=epoch
print("______________________________________________________________")
print(epoch,cur_ckpt_psnr/total_imgs,max_epoch,max_ckpt_psnr)
def build_trainset():
global mModel
reconDir = r"train_set\hevc_div2k_train_noFilter_yuv"
saveDir=r"train_set\hevc_div2k_train_noFilter_yuv_cnn" #cnn1 cnn2 cnn3
NUM_CNN = 3 #cnn次数
for i in range(1,NUM_CNN+1):
if os.path.exists(saveDir+str(i))==0:
os.mkdir(saveDir+str(i))
mModel = Predict(model,
r"progressive_cnn_filter\models\firstCNN\HEVC\QP37\CNN2\CNN2_I_QP37.ckpt")
for dir in [y for y in os.listdir(reconDir) if os.path.isdir(os.path.join(reconDir, y)) ]:#
# print(dir)
if 36<int(dir.split("qp")[1])<47: #改成模型对应的QP范围
for i in range(1,NUM_CNN+1):
sub_save_dir=os.path.join(saveDir+str(i),dir)
# print(sub_save_dir.replace("cnn1","cnn2"))
if os.path.exists(sub_save_dir)==0:
os.mkdir(sub_save_dir)
file_list=load_file_list(os.path.join(reconDir,dir))
# shuffle(file_list)
for file in file_list:
# print(file)
# exit(0)
# qp=int(dir[-2:])
# print(os.path.join(sub_save_dir ,os.path.basename(file[0])),qp)
# input_img=c_getYdata(file[0])
# qp_img=np.ones(np.shape(input_img),dtype="int8")*qp*4
# input_img=np.dstack((input_img,qp_img))
rec= mModel.predict(c_getYdata(file))
saveImg(os.path.join(saveDir+"1" ,dir, os.path.basename(file)), rec)
for i in range(2,NUM_CNN+1):
rec = mModel.predict(rec)
saveImg(os.path.join(saveDir+str(i),dir, os.path.basename(file)), rec)
if __name__ == '__main__':
# low_img = r"D:\konglingyi\test_result\E19091301\BasketballPass_416x240_50_l.yuv"
# heigh_img=r"D:\konglingyi\test_result\E19091301\BasketballPass_416x240_50_h.yuv"
# init(0,37)
# original_ycbcr, gt_y, fileName_list=prepare_test_data(heigh_img)
# rec1 = predict(low_img, 160,0)
#showImg(rec1)
#rec2 = predict(file, 212,2)
#print(psnr(denormalize(np.reshape(original_ycbcr[0][0],np.shape(rec1))), rec1))
# test_all_ckpt(r"progressive_cnn_filter\models\firstCNN\HEVC\QP37\CNN2")
build_trainset()
# for i in range(1,1):
# print(i) | identifier_name |
|
lib.rs | use byteorder::{ByteOrder, LittleEndian};
use num_derive::FromPrimitive;
use solana_program::{
account_info::next_account_info,
account_info::AccountInfo,
decode_error::DecodeError,
entrypoint,
entrypoint::ProgramResult,
msg,
program_error::ProgramError,
program_pack::{Pack, Sealed},
pubkey::Pubkey,
rent::Rent,
sysvar::{self, Sysvar},
};
use thiserror::Error;
#[derive(Clone, Debug, Eq, Error, FromPrimitive, PartialEq)]
pub enum DiceErr {
#[error("Unexpected Roll Mode")]
UnexpectedRollMode,
#[error("Incrrect Threshold")]
IncorrectThreshold,
#[error("Incorrect Owner")]
IncorrectOwner,
#[error("Account Not Rent Exempt")]
AccountNotRentExempt,
#[error("Account Not Balance Account")]
AccountNotBalanceAccount,
#[error("Not Enough Balance")]
NotEnoughBalance,
#[error("Invalid Bet")]
InvalidBet,
}
impl From<DiceErr> for ProgramError {
fn from(e: DiceErr) -> Self {
ProgramError::Custom(e as u32)
}
}
impl<T> DecodeError<T> for DiceErr {
fn type_of() -> &'static str {
"Dice Error"
}
}
// Instruction data
pub struct Dice {
pub roll_type: u8,
pub threshold: u8,
pub bet_amount: u32,
}
impl Sealed for Dice {}
impl Pack for Dice {
const LEN: usize = 6;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
let roll_type = src[0];
//println!("Roll Type: {}", roll_type);
if roll_type != 1 && roll_type != 2 {
msg!("You should roll under (1) or Roll Over (2)");
return Err(DiceErr::UnexpectedRollMode.into());
}
let threshold = src[1];
//println!("Threshold: {}", threshold);
if threshold < 2 || threshold > 98 {
msg!("Your guess has to in between 2 and 98");
return Err(DiceErr::IncorrectThreshold.into());
}
let bet_amount = LittleEndian::read_u32(&src[2..6]);
//println!("Bet: {}", bet_amount);
Ok(Dice { roll_type, threshold, bet_amount})
}
fn pack_into_slice(&self, _dst: &mut [u8]) {}
}
// Player's Balance structure, which is one 4 byte u32 number
pub struct PlayerBalance {
pub balance: u32,
}
impl Sealed for PlayerBalance {}
impl Pack for PlayerBalance {
const LEN: usize = 4;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
Ok(PlayerBalance {
balance: LittleEndian::read_u32(&src[0..4]),
})
}
fn pack_into_slice(&self, dst: &mut [u8]) {
LittleEndian::write_u32(&mut dst[0..4], self.balance);
}
}
// Prize Pool structure, which is a 4 byte u32 number
pub struct PrizePool {
pub pool_amount: u32,
}
impl Sealed for PrizePool {}
impl Pack for PrizePool {
const LEN: usize = 4;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
Ok(PrizePool {
pool_amount: LittleEndian::read_u32(&src[0..4]),
})
}
fn pack_into_slice(&self, dst: &mut [u8]) {
LittleEndian::write_u32(&mut dst[0..4], self.pool_amount);
}
}
// Declare and export the program's entrypoint
entrypoint!(process_instruction);
// Program entrypoint's implementation
fn process_instruction(
program_id: &Pubkey, // Public key of program account
accounts: &[AccountInfo], // data accounts
instruction_data: &[u8], // First Element: Roll type, Second Element: Threshold, [2..6] Bet Amount
) -> ProgramResult {
msg!("Rust program entrypoint");
// get Dice information
let roll_type = Dice::unpack_unchecked(&instruction_data)?.roll_type;
msg!("Roll Type: {}", roll_type);
let threshold = Dice::unpack_unchecked(&instruction_data)?.threshold;
msg!("Threshold: {}", threshold);
let bet_amount = Dice::unpack_unchecked(&instruction_data)?.bet_amount;
msg!("Bet: {}", bet_amount);
// Iterating accounts is safer then indexing
let accounts_iter = &mut accounts.iter();
// Get the account that holds the Prize Pool
let prize_pool_account = next_account_info(accounts_iter)?;
// The account must be owned by the program in order to modify its data
if prize_pool_account.owner != program_id {
msg!(
"Prize Pool account ({}) not owned by program, actual: {}, expected: {}",
prize_pool_account.key,
prize_pool_account.owner,
program_id
);
return Err(DiceErr::IncorrectOwner.into());
}
// Get the account that holds the balance for the players
let player_balance_account = next_account_info(accounts_iter)?;
// The check account must be owned by the program in order to modify its data
if player_balance_account.owner != program_id {
msg!("Check account not owned by program");
return Err(DiceErr::IncorrectOwner.into());
}
// The account must be rent exempt, i.e. live forever
let sysvar_account = next_account_info(accounts_iter)?;
let rent = &Rent::from_account_info(sysvar_account)?;
if !sysvar::rent::check_id(sysvar_account.key) {
msg!("Rent system account is not rent system account");
return Err(ProgramError::InvalidAccountData);
}
if !rent.is_exempt(player_balance_account.lamports(), player_balance_account.data_len()) {
msg!("Balance account is not rent exempt");
return Err(DiceErr::AccountNotRentExempt.into());
}
// the player
let player_account = next_account_info(accounts_iter)?;
if !player_account.is_signer {
msg!("Player account is not signer");
return Err(ProgramError::MissingRequiredSignature);
}
let expected_check_account_pubkey =
Pubkey::create_with_seed(player_account.key, "checkvote", program_id)?;
if expected_check_account_pubkey != *player_balance_account.key {
msg!("Voter fraud! not the correct balance_account");
return Err(DiceErr::AccountNotBalanceAccount.into());
}
let mut balance_data = player_balance_account.try_borrow_mut_data()?;
// this unpack reads and deserialises the account data and also checks the data is the correct length
let mut player_balance =
PlayerBalance::unpack_unchecked(&balance_data).expect("Failed to read PlayerBalance");
// Handle the bet_amount and the balance
/*if vote_check.voted_for != 0 {
msg!("Voter fraud! You already voted");
return Err(VoteError::AlreadyVoted.into());
}*/
let mut prize_pool_data = prize_pool_account.try_borrow_mut_data()?;
let mut prize_pool =
PrizePool::unpack_unchecked(&prize_pool_data).expect("Failed to read PrizePool");
///////////////////////
// Jut for Debug
if player_balance.balance == 0 {
msg!{"Airdrop some money!!!"};
player_balance.balance = 50;
}
if prize_pool.pool_amount == 0 {
msg!{"Airdrop some money!!!"};
prize_pool.pool_amount = 1000;
}
// Check the valid of the bet amount
if bet_amount > player_balance.balance |
if bet_amount == 0 {
msg!("Inavalid Bet");
return Err(DiceErr::InvalidBet.into());
}
let lucky_number:u8 = 20;
println!("Result {}", lucky_number);
let mut win_amount:u32 = 0;
if (1 == roll_type && lucky_number <= threshold) || (2 == roll_type && lucky_number >= threshold) {
if lucky_number <= 25 || lucky_number >= 75 {
win_amount = bet_amount as u32 * 2;
msg!("Win: {}", win_amount);
}else{
win_amount = bet_amount as u32;
msg!("Win: {}", win_amount);
}
}
if win_amount == 0 {
prize_pool.pool_amount += bet_amount;
player_balance.balance -= bet_amount;
msg!("You Lose!");
}else{
prize_pool.pool_amount -= win_amount;
player_balance.balance += win_amount;
msg!("You Win!");
}
PrizePool::pack(prize_pool, &mut prize_pool_data).expect("Failed to write Prize Pool");
PlayerBalance::pack(player_balance, &mut balance_data).expect("Failed to write Player Balance");
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use solana_program::instruction::InstructionError::Custom;
use solana_program::{
instruction::{AccountMeta, Instruction},
pubkey::Pubkey,
sysvar,
};
use solana_program_test::*;
use solana_sdk::transaction::TransactionError;
use solana_sdk::{
account::Account,
signature::{Keypair, Signer},
transaction::Transaction,
};
use std::mem;
use self::tokio;
impl From<DiceErr> for TransactionError {
fn from(e: DiceErr) -> Self {
TransactionError::InstructionError(0, Custom(e as u32))
}
}
#[tokio::test]
async fn test_sanity1() {
//++++++++++++++++++++++++++++++++++++
// TEST: Simply vote for Bet
//++++++++++++++++++++++++++++++++++++
let program_id = Pubkey::new_unique();
let mut program_test =
ProgramTest::new("dice", program_id, processor!(process_instruction));
// mock contract data account
let game_key = Pubkey::new_unique();
let mut data: Vec<u8> = vec![0; 4 * mem::size_of::<u8>()];
LittleEndian::write_u32(&mut data[0..4], 1000); // set prize pool to 1000
println!("Prize Pool {:?}", data);
program_test.add_account(
game_key,
Account {
lamports: 60000,
data,
owner: program_id,
executable: false,
rent_epoch: 0,
},
);
// player account
let player_keypair = Keypair::new();
let player_key = player_keypair.pubkey();
// mock player balance_account_data
let balance_key = Pubkey::create_with_seed(&player_key, "checkvote", &program_id).unwrap(); // derived (correctly)
let mut data = vec![0; mem::size_of::<u32>()];
LittleEndian::write_u32(&mut data[0..4], 50); // set storage to 50
program_test.add_account(
balance_key,
Account {
lamports: 1000000,
data,
owner: program_id,
executable: false,
rent_epoch: 0,
},
);
let (mut banks_client, payer, recent_blockhash) = program_test.start().await;
let game_account = banks_client.get_account(game_key).await.unwrap().unwrap();
let prize_bool_amount =
PrizePool::unpack_unchecked(&game_account.data).expect("Failed to read Prize Pool");
assert_eq!(prize_bool_amount.pool_amount, 1000);
// Roll Under
let accounts = vec![
AccountMeta::new(game_key, false),
AccountMeta::new(balance_key, false),
AccountMeta::new_readonly(sysvar::rent::id(), false),
AccountMeta::new(player_key, true),
];
let mut bet = vec![0; 6*mem::size_of::<u8>()];
bet[0] = 1; // Role Under
bet[1] = 30; // Threshold 30
LittleEndian::write_u32(&mut bet[2..6], 10); // Bet 10
println!("Instruction Data {:?}", bet);
let mut transaction = Transaction::new_with_payer(
&[Instruction {
program_id,
accounts,
data: bet,
}],
Some(&payer.pubkey()),
);
transaction.sign(&[&payer, &player_keypair], recent_blockhash);
let a = banks_client.process_transaction(transaction).await;
println!("Test Log {:?}", a);
let game_account = banks_client.get_account(game_key).await.unwrap().unwrap();
let prize_pool_check =
PrizePool::unpack_unchecked(&game_account.data).expect("Failed to read Prize Pool");
assert_eq!(prize_pool_check.pool_amount, 980);
let player = banks_client.get_account(balance_key).await.unwrap().unwrap();
let bal_check =
PlayerBalance::unpack_unchecked(&player.data).expect("Failed to read Balance");
assert_eq!(bal_check.balance, 70);
}
}
| {
msg!("Not Enough Balance");
return Err(DiceErr::NotEnoughBalance.into());
} | conditional_block |
lib.rs | use byteorder::{ByteOrder, LittleEndian};
use num_derive::FromPrimitive;
use solana_program::{
account_info::next_account_info,
account_info::AccountInfo,
decode_error::DecodeError,
entrypoint,
entrypoint::ProgramResult,
msg,
program_error::ProgramError,
program_pack::{Pack, Sealed},
pubkey::Pubkey,
rent::Rent,
sysvar::{self, Sysvar},
};
use thiserror::Error;
#[derive(Clone, Debug, Eq, Error, FromPrimitive, PartialEq)]
pub enum DiceErr {
#[error("Unexpected Roll Mode")]
UnexpectedRollMode,
#[error("Incrrect Threshold")]
IncorrectThreshold,
#[error("Incorrect Owner")]
IncorrectOwner,
#[error("Account Not Rent Exempt")]
AccountNotRentExempt,
#[error("Account Not Balance Account")]
AccountNotBalanceAccount,
#[error("Not Enough Balance")]
NotEnoughBalance,
#[error("Invalid Bet")]
InvalidBet,
}
impl From<DiceErr> for ProgramError {
fn from(e: DiceErr) -> Self {
ProgramError::Custom(e as u32)
}
}
impl<T> DecodeError<T> for DiceErr {
fn type_of() -> &'static str {
"Dice Error"
}
}
// Instruction data
pub struct Dice {
pub roll_type: u8,
pub threshold: u8,
pub bet_amount: u32,
}
impl Sealed for Dice {}
impl Pack for Dice {
const LEN: usize = 6;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
let roll_type = src[0];
//println!("Roll Type: {}", roll_type);
if roll_type != 1 && roll_type != 2 {
msg!("You should roll under (1) or Roll Over (2)");
return Err(DiceErr::UnexpectedRollMode.into());
}
let threshold = src[1];
//println!("Threshold: {}", threshold);
if threshold < 2 || threshold > 98 {
msg!("Your guess has to in between 2 and 98");
return Err(DiceErr::IncorrectThreshold.into());
}
let bet_amount = LittleEndian::read_u32(&src[2..6]);
//println!("Bet: {}", bet_amount);
Ok(Dice { roll_type, threshold, bet_amount})
}
fn pack_into_slice(&self, _dst: &mut [u8]) {}
}
// Player's Balance structure, which is one 4 byte u32 number
pub struct PlayerBalance {
pub balance: u32,
}
impl Sealed for PlayerBalance {}
impl Pack for PlayerBalance {
const LEN: usize = 4;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
Ok(PlayerBalance {
balance: LittleEndian::read_u32(&src[0..4]),
})
}
fn pack_into_slice(&self, dst: &mut [u8]) {
LittleEndian::write_u32(&mut dst[0..4], self.balance);
}
}
// Prize Pool structure, which is a 4 byte u32 number
pub struct PrizePool {
pub pool_amount: u32,
}
impl Sealed for PrizePool {}
impl Pack for PrizePool {
const LEN: usize = 4;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
Ok(PrizePool {
pool_amount: LittleEndian::read_u32(&src[0..4]),
})
}
fn pack_into_slice(&self, dst: &mut [u8]) {
LittleEndian::write_u32(&mut dst[0..4], self.pool_amount);
}
}
// Declare and export the program's entrypoint
entrypoint!(process_instruction);
// Program entrypoint's implementation
fn process_instruction(
program_id: &Pubkey, // Public key of program account
accounts: &[AccountInfo], // data accounts
instruction_data: &[u8], // First Element: Roll type, Second Element: Threshold, [2..6] Bet Amount
) -> ProgramResult {
msg!("Rust program entrypoint");
// get Dice information
let roll_type = Dice::unpack_unchecked(&instruction_data)?.roll_type;
msg!("Roll Type: {}", roll_type);
let threshold = Dice::unpack_unchecked(&instruction_data)?.threshold;
msg!("Threshold: {}", threshold);
let bet_amount = Dice::unpack_unchecked(&instruction_data)?.bet_amount;
msg!("Bet: {}", bet_amount);
// Iterating accounts is safer then indexing
let accounts_iter = &mut accounts.iter();
// Get the account that holds the Prize Pool
let prize_pool_account = next_account_info(accounts_iter)?;
// The account must be owned by the program in order to modify its data
if prize_pool_account.owner != program_id {
msg!(
"Prize Pool account ({}) not owned by program, actual: {}, expected: {}",
prize_pool_account.key,
prize_pool_account.owner,
program_id
);
return Err(DiceErr::IncorrectOwner.into());
}
// Get the account that holds the balance for the players
let player_balance_account = next_account_info(accounts_iter)?;
// The check account must be owned by the program in order to modify its data
if player_balance_account.owner != program_id {
msg!("Check account not owned by program");
return Err(DiceErr::IncorrectOwner.into()); | let sysvar_account = next_account_info(accounts_iter)?;
let rent = &Rent::from_account_info(sysvar_account)?;
if !sysvar::rent::check_id(sysvar_account.key) {
msg!("Rent system account is not rent system account");
return Err(ProgramError::InvalidAccountData);
}
if !rent.is_exempt(player_balance_account.lamports(), player_balance_account.data_len()) {
msg!("Balance account is not rent exempt");
return Err(DiceErr::AccountNotRentExempt.into());
}
// the player
let player_account = next_account_info(accounts_iter)?;
if !player_account.is_signer {
msg!("Player account is not signer");
return Err(ProgramError::MissingRequiredSignature);
}
let expected_check_account_pubkey =
Pubkey::create_with_seed(player_account.key, "checkvote", program_id)?;
if expected_check_account_pubkey != *player_balance_account.key {
msg!("Voter fraud! not the correct balance_account");
return Err(DiceErr::AccountNotBalanceAccount.into());
}
let mut balance_data = player_balance_account.try_borrow_mut_data()?;
// this unpack reads and deserialises the account data and also checks the data is the correct length
let mut player_balance =
PlayerBalance::unpack_unchecked(&balance_data).expect("Failed to read PlayerBalance");
// Handle the bet_amount and the balance
/*if vote_check.voted_for != 0 {
msg!("Voter fraud! You already voted");
return Err(VoteError::AlreadyVoted.into());
}*/
let mut prize_pool_data = prize_pool_account.try_borrow_mut_data()?;
let mut prize_pool =
PrizePool::unpack_unchecked(&prize_pool_data).expect("Failed to read PrizePool");
///////////////////////
// Jut for Debug
if player_balance.balance == 0 {
msg!{"Airdrop some money!!!"};
player_balance.balance = 50;
}
if prize_pool.pool_amount == 0 {
msg!{"Airdrop some money!!!"};
prize_pool.pool_amount = 1000;
}
// Check the valid of the bet amount
if bet_amount > player_balance.balance {
msg!("Not Enough Balance");
return Err(DiceErr::NotEnoughBalance.into());
}
if bet_amount == 0 {
msg!("Inavalid Bet");
return Err(DiceErr::InvalidBet.into());
}
let lucky_number:u8 = 20;
println!("Result {}", lucky_number);
let mut win_amount:u32 = 0;
if (1 == roll_type && lucky_number <= threshold) || (2 == roll_type && lucky_number >= threshold) {
if lucky_number <= 25 || lucky_number >= 75 {
win_amount = bet_amount as u32 * 2;
msg!("Win: {}", win_amount);
}else{
win_amount = bet_amount as u32;
msg!("Win: {}", win_amount);
}
}
if win_amount == 0 {
prize_pool.pool_amount += bet_amount;
player_balance.balance -= bet_amount;
msg!("You Lose!");
}else{
prize_pool.pool_amount -= win_amount;
player_balance.balance += win_amount;
msg!("You Win!");
}
PrizePool::pack(prize_pool, &mut prize_pool_data).expect("Failed to write Prize Pool");
PlayerBalance::pack(player_balance, &mut balance_data).expect("Failed to write Player Balance");
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use solana_program::instruction::InstructionError::Custom;
use solana_program::{
instruction::{AccountMeta, Instruction},
pubkey::Pubkey,
sysvar,
};
use solana_program_test::*;
use solana_sdk::transaction::TransactionError;
use solana_sdk::{
account::Account,
signature::{Keypair, Signer},
transaction::Transaction,
};
use std::mem;
use self::tokio;
impl From<DiceErr> for TransactionError {
fn from(e: DiceErr) -> Self {
TransactionError::InstructionError(0, Custom(e as u32))
}
}
#[tokio::test]
async fn test_sanity1() {
//++++++++++++++++++++++++++++++++++++
// TEST: Simply vote for Bet
//++++++++++++++++++++++++++++++++++++
let program_id = Pubkey::new_unique();
let mut program_test =
ProgramTest::new("dice", program_id, processor!(process_instruction));
// mock contract data account
let game_key = Pubkey::new_unique();
let mut data: Vec<u8> = vec![0; 4 * mem::size_of::<u8>()];
LittleEndian::write_u32(&mut data[0..4], 1000); // set prize pool to 1000
println!("Prize Pool {:?}", data);
program_test.add_account(
game_key,
Account {
lamports: 60000,
data,
owner: program_id,
executable: false,
rent_epoch: 0,
},
);
// player account
let player_keypair = Keypair::new();
let player_key = player_keypair.pubkey();
// mock player balance_account_data
let balance_key = Pubkey::create_with_seed(&player_key, "checkvote", &program_id).unwrap(); // derived (correctly)
let mut data = vec![0; mem::size_of::<u32>()];
LittleEndian::write_u32(&mut data[0..4], 50); // set storage to 50
program_test.add_account(
balance_key,
Account {
lamports: 1000000,
data,
owner: program_id,
executable: false,
rent_epoch: 0,
},
);
let (mut banks_client, payer, recent_blockhash) = program_test.start().await;
let game_account = banks_client.get_account(game_key).await.unwrap().unwrap();
let prize_bool_amount =
PrizePool::unpack_unchecked(&game_account.data).expect("Failed to read Prize Pool");
assert_eq!(prize_bool_amount.pool_amount, 1000);
// Roll Under
let accounts = vec![
AccountMeta::new(game_key, false),
AccountMeta::new(balance_key, false),
AccountMeta::new_readonly(sysvar::rent::id(), false),
AccountMeta::new(player_key, true),
];
let mut bet = vec![0; 6*mem::size_of::<u8>()];
bet[0] = 1; // Role Under
bet[1] = 30; // Threshold 30
LittleEndian::write_u32(&mut bet[2..6], 10); // Bet 10
println!("Instruction Data {:?}", bet);
let mut transaction = Transaction::new_with_payer(
&[Instruction {
program_id,
accounts,
data: bet,
}],
Some(&payer.pubkey()),
);
transaction.sign(&[&payer, &player_keypair], recent_blockhash);
let a = banks_client.process_transaction(transaction).await;
println!("Test Log {:?}", a);
let game_account = banks_client.get_account(game_key).await.unwrap().unwrap();
let prize_pool_check =
PrizePool::unpack_unchecked(&game_account.data).expect("Failed to read Prize Pool");
assert_eq!(prize_pool_check.pool_amount, 980);
let player = banks_client.get_account(balance_key).await.unwrap().unwrap();
let bal_check =
PlayerBalance::unpack_unchecked(&player.data).expect("Failed to read Balance");
assert_eq!(bal_check.balance, 70);
}
} | }
// The account must be rent exempt, i.e. live forever | random_line_split |
lib.rs | use byteorder::{ByteOrder, LittleEndian};
use num_derive::FromPrimitive;
use solana_program::{
account_info::next_account_info,
account_info::AccountInfo,
decode_error::DecodeError,
entrypoint,
entrypoint::ProgramResult,
msg,
program_error::ProgramError,
program_pack::{Pack, Sealed},
pubkey::Pubkey,
rent::Rent,
sysvar::{self, Sysvar},
};
use thiserror::Error;
#[derive(Clone, Debug, Eq, Error, FromPrimitive, PartialEq)]
pub enum | {
#[error("Unexpected Roll Mode")]
UnexpectedRollMode,
#[error("Incrrect Threshold")]
IncorrectThreshold,
#[error("Incorrect Owner")]
IncorrectOwner,
#[error("Account Not Rent Exempt")]
AccountNotRentExempt,
#[error("Account Not Balance Account")]
AccountNotBalanceAccount,
#[error("Not Enough Balance")]
NotEnoughBalance,
#[error("Invalid Bet")]
InvalidBet,
}
impl From<DiceErr> for ProgramError {
fn from(e: DiceErr) -> Self {
ProgramError::Custom(e as u32)
}
}
impl<T> DecodeError<T> for DiceErr {
fn type_of() -> &'static str {
"Dice Error"
}
}
// Instruction data
pub struct Dice {
pub roll_type: u8,
pub threshold: u8,
pub bet_amount: u32,
}
impl Sealed for Dice {}
impl Pack for Dice {
const LEN: usize = 6;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
let roll_type = src[0];
//println!("Roll Type: {}", roll_type);
if roll_type != 1 && roll_type != 2 {
msg!("You should roll under (1) or Roll Over (2)");
return Err(DiceErr::UnexpectedRollMode.into());
}
let threshold = src[1];
//println!("Threshold: {}", threshold);
if threshold < 2 || threshold > 98 {
msg!("Your guess has to in between 2 and 98");
return Err(DiceErr::IncorrectThreshold.into());
}
let bet_amount = LittleEndian::read_u32(&src[2..6]);
//println!("Bet: {}", bet_amount);
Ok(Dice { roll_type, threshold, bet_amount})
}
fn pack_into_slice(&self, _dst: &mut [u8]) {}
}
// Player's Balance structure, which is one 4 byte u32 number
pub struct PlayerBalance {
pub balance: u32,
}
impl Sealed for PlayerBalance {}
impl Pack for PlayerBalance {
const LEN: usize = 4;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
Ok(PlayerBalance {
balance: LittleEndian::read_u32(&src[0..4]),
})
}
fn pack_into_slice(&self, dst: &mut [u8]) {
LittleEndian::write_u32(&mut dst[0..4], self.balance);
}
}
// Prize Pool structure, which is a 4 byte u32 number
pub struct PrizePool {
pub pool_amount: u32,
}
impl Sealed for PrizePool {}
impl Pack for PrizePool {
const LEN: usize = 4;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
Ok(PrizePool {
pool_amount: LittleEndian::read_u32(&src[0..4]),
})
}
fn pack_into_slice(&self, dst: &mut [u8]) {
LittleEndian::write_u32(&mut dst[0..4], self.pool_amount);
}
}
// Declare and export the program's entrypoint
entrypoint!(process_instruction);
// Program entrypoint's implementation
fn process_instruction(
program_id: &Pubkey, // Public key of program account
accounts: &[AccountInfo], // data accounts
instruction_data: &[u8], // First Element: Roll type, Second Element: Threshold, [2..6] Bet Amount
) -> ProgramResult {
msg!("Rust program entrypoint");
// get Dice information
let roll_type = Dice::unpack_unchecked(&instruction_data)?.roll_type;
msg!("Roll Type: {}", roll_type);
let threshold = Dice::unpack_unchecked(&instruction_data)?.threshold;
msg!("Threshold: {}", threshold);
let bet_amount = Dice::unpack_unchecked(&instruction_data)?.bet_amount;
msg!("Bet: {}", bet_amount);
// Iterating accounts is safer then indexing
let accounts_iter = &mut accounts.iter();
// Get the account that holds the Prize Pool
let prize_pool_account = next_account_info(accounts_iter)?;
// The account must be owned by the program in order to modify its data
if prize_pool_account.owner != program_id {
msg!(
"Prize Pool account ({}) not owned by program, actual: {}, expected: {}",
prize_pool_account.key,
prize_pool_account.owner,
program_id
);
return Err(DiceErr::IncorrectOwner.into());
}
// Get the account that holds the balance for the players
let player_balance_account = next_account_info(accounts_iter)?;
// The check account must be owned by the program in order to modify its data
if player_balance_account.owner != program_id {
msg!("Check account not owned by program");
return Err(DiceErr::IncorrectOwner.into());
}
// The account must be rent exempt, i.e. live forever
let sysvar_account = next_account_info(accounts_iter)?;
let rent = &Rent::from_account_info(sysvar_account)?;
if !sysvar::rent::check_id(sysvar_account.key) {
msg!("Rent system account is not rent system account");
return Err(ProgramError::InvalidAccountData);
}
if !rent.is_exempt(player_balance_account.lamports(), player_balance_account.data_len()) {
msg!("Balance account is not rent exempt");
return Err(DiceErr::AccountNotRentExempt.into());
}
// the player
let player_account = next_account_info(accounts_iter)?;
if !player_account.is_signer {
msg!("Player account is not signer");
return Err(ProgramError::MissingRequiredSignature);
}
let expected_check_account_pubkey =
Pubkey::create_with_seed(player_account.key, "checkvote", program_id)?;
if expected_check_account_pubkey != *player_balance_account.key {
msg!("Voter fraud! not the correct balance_account");
return Err(DiceErr::AccountNotBalanceAccount.into());
}
let mut balance_data = player_balance_account.try_borrow_mut_data()?;
// this unpack reads and deserialises the account data and also checks the data is the correct length
let mut player_balance =
PlayerBalance::unpack_unchecked(&balance_data).expect("Failed to read PlayerBalance");
// Handle the bet_amount and the balance
/*if vote_check.voted_for != 0 {
msg!("Voter fraud! You already voted");
return Err(VoteError::AlreadyVoted.into());
}*/
let mut prize_pool_data = prize_pool_account.try_borrow_mut_data()?;
let mut prize_pool =
PrizePool::unpack_unchecked(&prize_pool_data).expect("Failed to read PrizePool");
///////////////////////
// Jut for Debug
if player_balance.balance == 0 {
msg!{"Airdrop some money!!!"};
player_balance.balance = 50;
}
if prize_pool.pool_amount == 0 {
msg!{"Airdrop some money!!!"};
prize_pool.pool_amount = 1000;
}
// Check the valid of the bet amount
if bet_amount > player_balance.balance {
msg!("Not Enough Balance");
return Err(DiceErr::NotEnoughBalance.into());
}
if bet_amount == 0 {
msg!("Inavalid Bet");
return Err(DiceErr::InvalidBet.into());
}
let lucky_number:u8 = 20;
println!("Result {}", lucky_number);
let mut win_amount:u32 = 0;
if (1 == roll_type && lucky_number <= threshold) || (2 == roll_type && lucky_number >= threshold) {
if lucky_number <= 25 || lucky_number >= 75 {
win_amount = bet_amount as u32 * 2;
msg!("Win: {}", win_amount);
}else{
win_amount = bet_amount as u32;
msg!("Win: {}", win_amount);
}
}
if win_amount == 0 {
prize_pool.pool_amount += bet_amount;
player_balance.balance -= bet_amount;
msg!("You Lose!");
}else{
prize_pool.pool_amount -= win_amount;
player_balance.balance += win_amount;
msg!("You Win!");
}
PrizePool::pack(prize_pool, &mut prize_pool_data).expect("Failed to write Prize Pool");
PlayerBalance::pack(player_balance, &mut balance_data).expect("Failed to write Player Balance");
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use solana_program::instruction::InstructionError::Custom;
use solana_program::{
instruction::{AccountMeta, Instruction},
pubkey::Pubkey,
sysvar,
};
use solana_program_test::*;
use solana_sdk::transaction::TransactionError;
use solana_sdk::{
account::Account,
signature::{Keypair, Signer},
transaction::Transaction,
};
use std::mem;
use self::tokio;
impl From<DiceErr> for TransactionError {
fn from(e: DiceErr) -> Self {
TransactionError::InstructionError(0, Custom(e as u32))
}
}
#[tokio::test]
async fn test_sanity1() {
//++++++++++++++++++++++++++++++++++++
// TEST: Simply vote for Bet
//++++++++++++++++++++++++++++++++++++
let program_id = Pubkey::new_unique();
let mut program_test =
ProgramTest::new("dice", program_id, processor!(process_instruction));
// mock contract data account
let game_key = Pubkey::new_unique();
let mut data: Vec<u8> = vec![0; 4 * mem::size_of::<u8>()];
LittleEndian::write_u32(&mut data[0..4], 1000); // set prize pool to 1000
println!("Prize Pool {:?}", data);
program_test.add_account(
game_key,
Account {
lamports: 60000,
data,
owner: program_id,
executable: false,
rent_epoch: 0,
},
);
// player account
let player_keypair = Keypair::new();
let player_key = player_keypair.pubkey();
// mock player balance_account_data
let balance_key = Pubkey::create_with_seed(&player_key, "checkvote", &program_id).unwrap(); // derived (correctly)
let mut data = vec![0; mem::size_of::<u32>()];
LittleEndian::write_u32(&mut data[0..4], 50); // set storage to 50
program_test.add_account(
balance_key,
Account {
lamports: 1000000,
data,
owner: program_id,
executable: false,
rent_epoch: 0,
},
);
let (mut banks_client, payer, recent_blockhash) = program_test.start().await;
let game_account = banks_client.get_account(game_key).await.unwrap().unwrap();
let prize_bool_amount =
PrizePool::unpack_unchecked(&game_account.data).expect("Failed to read Prize Pool");
assert_eq!(prize_bool_amount.pool_amount, 1000);
// Roll Under
let accounts = vec![
AccountMeta::new(game_key, false),
AccountMeta::new(balance_key, false),
AccountMeta::new_readonly(sysvar::rent::id(), false),
AccountMeta::new(player_key, true),
];
let mut bet = vec![0; 6*mem::size_of::<u8>()];
bet[0] = 1; // Role Under
bet[1] = 30; // Threshold 30
LittleEndian::write_u32(&mut bet[2..6], 10); // Bet 10
println!("Instruction Data {:?}", bet);
let mut transaction = Transaction::new_with_payer(
&[Instruction {
program_id,
accounts,
data: bet,
}],
Some(&payer.pubkey()),
);
transaction.sign(&[&payer, &player_keypair], recent_blockhash);
let a = banks_client.process_transaction(transaction).await;
println!("Test Log {:?}", a);
let game_account = banks_client.get_account(game_key).await.unwrap().unwrap();
let prize_pool_check =
PrizePool::unpack_unchecked(&game_account.data).expect("Failed to read Prize Pool");
assert_eq!(prize_pool_check.pool_amount, 980);
let player = banks_client.get_account(balance_key).await.unwrap().unwrap();
let bal_check =
PlayerBalance::unpack_unchecked(&player.data).expect("Failed to read Balance");
assert_eq!(bal_check.balance, 70);
}
}
| DiceErr | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.