file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
decl.go | // Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package noder
import (
"go/constant"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/compile/internal/types2"
)
// TODO(mdempsky): Skip blank declarations? Probably only safe
// for declarations without pragmas.
func (g *irgen) decls(res *ir.Nodes, decls []syntax.Decl) {
for _, decl := range decls {
switch decl := decl.(type) {
case *syntax.ConstDecl:
g.constDecl(res, decl)
case *syntax.FuncDecl:
g.funcDecl(res, decl)
case *syntax.TypeDecl:
if ir.CurFunc == nil {
continue // already handled in irgen.generate
}
g.typeDecl(res, decl)
case *syntax.VarDecl:
g.varDecl(res, decl)
default:
g.unhandled("declaration", decl)
}
}
}
func (g *irgen) importDecl(p *noder, decl *syntax.ImportDecl) {
g.pragmaFlags(decl.Pragma, 0)
// Get the imported package's path, as resolved already by types2
// and gcimporter. This is the same path as would be computed by
// parseImportPath.
switch pkgNameOf(g.info, decl).Imported().Path() {
case "unsafe":
p.importedUnsafe = true
case "embed":
p.importedEmbed = true
}
}
// pkgNameOf returns the PkgName associated with the given ImportDecl.
func | (info *types2.Info, decl *syntax.ImportDecl) *types2.PkgName {
if name := decl.LocalPkgName; name != nil {
return info.Defs[name].(*types2.PkgName)
}
return info.Implicits[decl].(*types2.PkgName)
}
func (g *irgen) constDecl(out *ir.Nodes, decl *syntax.ConstDecl) {
g.pragmaFlags(decl.Pragma, 0)
for _, name := range decl.NameList {
name, obj := g.def(name)
// For untyped numeric constants, make sure the value
// representation matches what the rest of the
// compiler (really just iexport) expects.
// TODO(mdempsky): Revisit after #43891 is resolved.
val := obj.(*types2.Const).Val()
switch name.Type() {
case types.UntypedInt, types.UntypedRune:
val = constant.ToInt(val)
case types.UntypedFloat:
val = constant.ToFloat(val)
case types.UntypedComplex:
val = constant.ToComplex(val)
}
name.SetVal(val)
out.Append(ir.NewDecl(g.pos(decl), ir.ODCLCONST, name))
}
}
func (g *irgen) funcDecl(out *ir.Nodes, decl *syntax.FuncDecl) {
assert(g.curDecl == "")
// Set g.curDecl to the function name, as context for the type params declared
// during types2-to-types1 translation if this is a generic function.
g.curDecl = decl.Name.Value
obj2 := g.info.Defs[decl.Name]
recv := types2.AsSignature(obj2.Type()).Recv()
if recv != nil {
t2 := deref2(recv.Type())
// This is a method, so set g.curDecl to recvTypeName.methName instead.
g.curDecl = t2.(*types2.Named).Obj().Name() + "." + g.curDecl
}
fn := ir.NewFunc(g.pos(decl))
fn.Nname, _ = g.def(decl.Name)
fn.Nname.Func = fn
fn.Nname.Defn = fn
fn.Pragma = g.pragmaFlags(decl.Pragma, funcPragmas)
if fn.Pragma&ir.Systemstack != 0 && fn.Pragma&ir.Nosplit != 0 {
base.ErrorfAt(fn.Pos(), "go:nosplit and go:systemstack cannot be combined")
}
if fn.Pragma&ir.Nointerface != 0 {
// Propagate //go:nointerface from Func.Pragma to Field.Nointerface.
// This is a bit roundabout, but this is the earliest point where we've
// processed the function's pragma flags, and we've also already created
// the Fields to represent the receiver's method set.
if recv := fn.Type().Recv(); recv != nil {
typ := types.ReceiverBaseType(recv.Type)
if orig := typ.OrigType(); orig != nil {
// For a generic method, we mark the methods on the
// base generic type, since those are the methods
// that will be stenciled.
typ = orig
}
meth := typecheck.Lookdot1(fn, typecheck.Lookup(decl.Name.Value), typ, typ.Methods(), 0)
meth.SetNointerface(true)
}
}
if decl.Body != nil && fn.Pragma&ir.Noescape != 0 {
base.ErrorfAt(fn.Pos(), "can only use //go:noescape with external func implementations")
}
if decl.Name.Value == "init" && decl.Recv == nil {
g.target.Inits = append(g.target.Inits, fn)
}
saveHaveEmbed := g.haveEmbed
saveCurDecl := g.curDecl
g.curDecl = ""
g.later(func() {
defer func(b bool, s string) {
// Revert haveEmbed and curDecl back to what they were before
// the "later" function.
g.haveEmbed = b
g.curDecl = s
}(g.haveEmbed, g.curDecl)
// Set haveEmbed and curDecl to what they were for this funcDecl.
g.haveEmbed = saveHaveEmbed
g.curDecl = saveCurDecl
if fn.Type().HasTParam() {
g.topFuncIsGeneric = true
}
g.funcBody(fn, decl.Recv, decl.Type, decl.Body)
g.topFuncIsGeneric = false
if fn.Type().HasTParam() && fn.Body != nil {
// Set pointers to the dcls/body of a generic function/method in
// the Inl struct, so it is marked for export, is available for
// stenciling, and works with Inline_Flood().
fn.Inl = &ir.Inline{
Cost: 1,
Dcl: fn.Dcl,
Body: fn.Body,
}
}
out.Append(fn)
})
}
func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) {
// Set the position for any error messages we might print (e.g. too large types).
base.Pos = g.pos(decl)
assert(ir.CurFunc != nil || g.curDecl == "")
// Set g.curDecl to the type name, as context for the type params declared
// during types2-to-types1 translation if this is a generic type.
saveCurDecl := g.curDecl
g.curDecl = decl.Name.Value
if decl.Alias {
name, _ := g.def(decl.Name)
g.pragmaFlags(decl.Pragma, 0)
assert(name.Alias()) // should be set by irgen.obj
out.Append(ir.NewDecl(g.pos(decl), ir.ODCLTYPE, name))
g.curDecl = ""
return
}
// Prevent size calculations until we set the underlying type.
types.DeferCheckSize()
name, obj := g.def(decl.Name)
ntyp, otyp := name.Type(), obj.Type()
if ir.CurFunc != nil {
ntyp.SetVargen()
}
pragmas := g.pragmaFlags(decl.Pragma, typePragmas)
name.SetPragma(pragmas) // TODO(mdempsky): Is this still needed?
if pragmas&ir.NotInHeap != 0 {
ntyp.SetNotInHeap(true)
}
// We need to use g.typeExpr(decl.Type) here to ensure that for
// chained, defined-type declarations like:
//
// type T U
//
// //go:notinheap
// type U struct { … }
//
// we mark both T and U as NotInHeap. If we instead used just
// g.typ(otyp.Underlying()), then we'd instead set T's underlying
// type directly to the struct type (which is not marked NotInHeap)
// and fail to mark T as NotInHeap.
//
// Also, we rely here on Type.SetUnderlying allowing passing a
// defined type and handling forward references like from T to U
// above. Contrast with go/types's Named.SetUnderlying, which
// disallows this.
//
// [mdempsky: Subtleties like these are why I always vehemently
// object to new type pragmas.]
ntyp.SetUnderlying(g.typeExpr(decl.Type))
tparams := otyp.(*types2.Named).TypeParams()
if n := tparams.Len(); n > 0 {
rparams := make([]*types.Type, n)
for i := range rparams {
rparams[i] = g.typ(tparams.At(i))
}
// This will set hasTParam flag if any rparams are not concrete types.
ntyp.SetRParams(rparams)
}
types.ResumeCheckSize()
g.curDecl = saveCurDecl
if otyp, ok := otyp.(*types2.Named); ok && otyp.NumMethods() != 0 {
methods := make([]*types.Field, otyp.NumMethods())
for i := range methods {
m := otyp.Method(i)
// Set g.curDecl to recvTypeName.methName, as context for the
// method-specific type params in the receiver.
g.curDecl = decl.Name.Value + "." + m.Name()
meth := g.obj(m)
methods[i] = types.NewField(meth.Pos(), g.selector(m), meth.Type())
methods[i].Nname = meth
g.curDecl = ""
}
ntyp.Methods().Set(methods)
}
out.Append(ir.NewDecl(g.pos(decl), ir.ODCLTYPE, name))
}
func (g *irgen) varDecl(out *ir.Nodes, decl *syntax.VarDecl) {
pos := g.pos(decl)
// Set the position for any error messages we might print (e.g. too large types).
base.Pos = pos
names := make([]*ir.Name, len(decl.NameList))
for i, name := range decl.NameList {
names[i], _ = g.def(name)
}
if decl.Pragma != nil {
pragma := decl.Pragma.(*pragmas)
varEmbed(g.makeXPos, names[0], decl, pragma, g.haveEmbed)
g.reportUnused(pragma)
}
haveEmbed := g.haveEmbed
do := func() {
defer func(b bool) { g.haveEmbed = b }(g.haveEmbed)
g.haveEmbed = haveEmbed
values := g.exprList(decl.Values)
var as2 *ir.AssignListStmt
if len(values) != 0 && len(names) != len(values) {
as2 = ir.NewAssignListStmt(pos, ir.OAS2, make([]ir.Node, len(names)), values)
}
for i, name := range names {
if ir.CurFunc != nil {
out.Append(ir.NewDecl(pos, ir.ODCL, name))
}
if as2 != nil {
as2.Lhs[i] = name
name.Defn = as2
} else {
as := ir.NewAssignStmt(pos, name, nil)
if len(values) != 0 {
as.Y = values[i]
name.Defn = as
} else if ir.CurFunc == nil {
name.Defn = as
}
if !g.delayTransform() {
lhs := []ir.Node{as.X}
rhs := []ir.Node{}
if as.Y != nil {
rhs = []ir.Node{as.Y}
}
transformAssign(as, lhs, rhs)
as.X = lhs[0]
if as.Y != nil {
as.Y = rhs[0]
}
}
as.SetTypecheck(1)
out.Append(as)
}
}
if as2 != nil {
if !g.delayTransform() {
transformAssign(as2, as2.Lhs, as2.Rhs)
}
as2.SetTypecheck(1)
out.Append(as2)
}
}
// If we're within a function, we need to process the assignment
// part of the variable declaration right away. Otherwise, we leave
// it to be handled after all top-level declarations are processed.
if ir.CurFunc != nil {
do()
} else {
g.later(do)
}
}
// pragmaFlags returns any specified pragma flags included in allowed,
// and reports errors about any other, unexpected pragmas.
func (g *irgen) pragmaFlags(pragma syntax.Pragma, allowed ir.PragmaFlag) ir.PragmaFlag {
if pragma == nil {
return 0
}
p := pragma.(*pragmas)
present := p.Flag & allowed
p.Flag &^= allowed
g.reportUnused(p)
return present
}
// reportUnused reports errors about any unused pragmas.
func (g *irgen) reportUnused(pragma *pragmas) {
for _, pos := range pragma.Pos {
if pos.Flag&pragma.Flag != 0 {
base.ErrorfAt(g.makeXPos(pos.Pos), "misplaced compiler directive")
}
}
if len(pragma.Embeds) > 0 {
for _, e := range pragma.Embeds {
base.ErrorfAt(g.makeXPos(e.Pos), "misplaced go:embed directive")
}
}
}
| pkgNameOf | identifier_name |
decl.go | // Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package noder
import (
"go/constant"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/compile/internal/types2"
)
// TODO(mdempsky): Skip blank declarations? Probably only safe
// for declarations without pragmas.
func (g *irgen) decls(res *ir.Nodes, decls []syntax.Decl) {
for _, decl := range decls {
switch decl := decl.(type) {
case *syntax.ConstDecl:
g.constDecl(res, decl)
case *syntax.FuncDecl:
g.funcDecl(res, decl)
case *syntax.TypeDecl:
if ir.CurFunc == nil {
continue // already handled in irgen.generate
}
g.typeDecl(res, decl)
case *syntax.VarDecl:
g.varDecl(res, decl)
default:
g.unhandled("declaration", decl)
}
}
}
func (g *irgen) importDecl(p *noder, decl *syntax.ImportDecl) {
g.pragmaFlags(decl.Pragma, 0)
// Get the imported package's path, as resolved already by types2
// and gcimporter. This is the same path as would be computed by
// parseImportPath.
switch pkgNameOf(g.info, decl).Imported().Path() {
case "unsafe":
p.importedUnsafe = true
case "embed":
p.importedEmbed = true
}
}
// pkgNameOf returns the PkgName associated with the given ImportDecl.
func pkgNameOf(info *types2.Info, decl *syntax.ImportDecl) *types2.PkgName {
if name := decl.LocalPkgName; name != nil {
return info.Defs[name].(*types2.PkgName)
}
return info.Implicits[decl].(*types2.PkgName)
}
func (g *irgen) constDecl(out *ir.Nodes, decl *syntax.ConstDecl) {
g.pragmaFlags(decl.Pragma, 0)
for _, name := range decl.NameList {
name, obj := g.def(name)
// For untyped numeric constants, make sure the value
// representation matches what the rest of the
// compiler (really just iexport) expects.
// TODO(mdempsky): Revisit after #43891 is resolved.
val := obj.(*types2.Const).Val()
switch name.Type() {
case types.UntypedInt, types.UntypedRune:
val = constant.ToInt(val)
case types.UntypedFloat:
val = constant.ToFloat(val)
case types.UntypedComplex:
val = constant.ToComplex(val)
}
name.SetVal(val)
out.Append(ir.NewDecl(g.pos(decl), ir.ODCLCONST, name))
}
}
func (g *irgen) funcDecl(out *ir.Nodes, decl *syntax.FuncDecl) {
assert(g.curDecl == "")
// Set g.curDecl to the function name, as context for the type params declared
// during types2-to-types1 translation if this is a generic function.
g.curDecl = decl.Name.Value
obj2 := g.info.Defs[decl.Name]
recv := types2.AsSignature(obj2.Type()).Recv()
if recv != nil {
t2 := deref2(recv.Type())
// This is a method, so set g.curDecl to recvTypeName.methName instead.
g.curDecl = t2.(*types2.Named).Obj().Name() + "." + g.curDecl
}
fn := ir.NewFunc(g.pos(decl))
fn.Nname, _ = g.def(decl.Name)
fn.Nname.Func = fn
fn.Nname.Defn = fn
fn.Pragma = g.pragmaFlags(decl.Pragma, funcPragmas)
if fn.Pragma&ir.Systemstack != 0 && fn.Pragma&ir.Nosplit != 0 {
base.ErrorfAt(fn.Pos(), "go:nosplit and go:systemstack cannot be combined")
}
if fn.Pragma&ir.Nointerface != 0 {
// Propagate //go:nointerface from Func.Pragma to Field.Nointerface.
// This is a bit roundabout, but this is the earliest point where we've
// processed the function's pragma flags, and we've also already created
// the Fields to represent the receiver's method set.
if recv := fn.Type().Recv(); recv != nil {
typ := types.ReceiverBaseType(recv.Type)
if orig := typ.OrigType(); orig != nil {
// For a generic method, we mark the methods on the
// base generic type, since those are the methods
// that will be stenciled.
typ = orig
}
meth := typecheck.Lookdot1(fn, typecheck.Lookup(decl.Name.Value), typ, typ.Methods(), 0)
meth.SetNointerface(true)
}
}
if decl.Body != nil && fn.Pragma&ir.Noescape != 0 {
base.ErrorfAt(fn.Pos(), "can only use //go:noescape with external func implementations")
}
if decl.Name.Value == "init" && decl.Recv == nil {
g.target.Inits = append(g.target.Inits, fn)
}
saveHaveEmbed := g.haveEmbed
saveCurDecl := g.curDecl
g.curDecl = ""
g.later(func() {
defer func(b bool, s string) {
// Revert haveEmbed and curDecl back to what they were before
// the "later" function.
g.haveEmbed = b
g.curDecl = s
}(g.haveEmbed, g.curDecl)
// Set haveEmbed and curDecl to what they were for this funcDecl.
g.haveEmbed = saveHaveEmbed
g.curDecl = saveCurDecl
if fn.Type().HasTParam() {
g.topFuncIsGeneric = true
}
g.funcBody(fn, decl.Recv, decl.Type, decl.Body)
g.topFuncIsGeneric = false
if fn.Type().HasTParam() && fn.Body != nil {
// Set pointers to the dcls/body of a generic function/method in
// the Inl struct, so it is marked for export, is available for
// stenciling, and works with Inline_Flood().
fn.Inl = &ir.Inline{
Cost: 1,
Dcl: fn.Dcl,
Body: fn.Body,
}
}
out.Append(fn)
})
}
func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) {
// Set the position for any error messages we might print (e.g. too large types).
base.Pos = g.pos(decl)
assert(ir.CurFunc != nil || g.curDecl == "")
// Set g.curDecl to the type name, as context for the type params declared
// during types2-to-types1 translation if this is a generic type.
saveCurDecl := g.curDecl
g.curDecl = decl.Name.Value
if decl.Alias {
name, _ := g.def(decl.Name)
g.pragmaFlags(decl.Pragma, 0)
assert(name.Alias()) // should be set by irgen.obj
out.Append(ir.NewDecl(g.pos(decl), ir.ODCLTYPE, name))
g.curDecl = ""
return
}
// Prevent size calculations until we set the underlying type.
types.DeferCheckSize()
name, obj := g.def(decl.Name)
ntyp, otyp := name.Type(), obj.Type()
if ir.CurFunc != nil {
ntyp.SetVargen()
}
pragmas := g.pragmaFlags(decl.Pragma, typePragmas)
name.SetPragma(pragmas) // TODO(mdempsky): Is this still needed?
if pragmas&ir.NotInHeap != 0 { | //
// type T U
//
// //go:notinheap
// type U struct { … }
//
// we mark both T and U as NotInHeap. If we instead used just
// g.typ(otyp.Underlying()), then we'd instead set T's underlying
// type directly to the struct type (which is not marked NotInHeap)
// and fail to mark T as NotInHeap.
//
// Also, we rely here on Type.SetUnderlying allowing passing a
// defined type and handling forward references like from T to U
// above. Contrast with go/types's Named.SetUnderlying, which
// disallows this.
//
// [mdempsky: Subtleties like these are why I always vehemently
// object to new type pragmas.]
ntyp.SetUnderlying(g.typeExpr(decl.Type))
tparams := otyp.(*types2.Named).TypeParams()
if n := tparams.Len(); n > 0 {
rparams := make([]*types.Type, n)
for i := range rparams {
rparams[i] = g.typ(tparams.At(i))
}
// This will set hasTParam flag if any rparams are not concrete types.
ntyp.SetRParams(rparams)
}
types.ResumeCheckSize()
g.curDecl = saveCurDecl
if otyp, ok := otyp.(*types2.Named); ok && otyp.NumMethods() != 0 {
methods := make([]*types.Field, otyp.NumMethods())
for i := range methods {
m := otyp.Method(i)
// Set g.curDecl to recvTypeName.methName, as context for the
// method-specific type params in the receiver.
g.curDecl = decl.Name.Value + "." + m.Name()
meth := g.obj(m)
methods[i] = types.NewField(meth.Pos(), g.selector(m), meth.Type())
methods[i].Nname = meth
g.curDecl = ""
}
ntyp.Methods().Set(methods)
}
out.Append(ir.NewDecl(g.pos(decl), ir.ODCLTYPE, name))
}
func (g *irgen) varDecl(out *ir.Nodes, decl *syntax.VarDecl) {
pos := g.pos(decl)
// Set the position for any error messages we might print (e.g. too large types).
base.Pos = pos
names := make([]*ir.Name, len(decl.NameList))
for i, name := range decl.NameList {
names[i], _ = g.def(name)
}
if decl.Pragma != nil {
pragma := decl.Pragma.(*pragmas)
varEmbed(g.makeXPos, names[0], decl, pragma, g.haveEmbed)
g.reportUnused(pragma)
}
haveEmbed := g.haveEmbed
do := func() {
defer func(b bool) { g.haveEmbed = b }(g.haveEmbed)
g.haveEmbed = haveEmbed
values := g.exprList(decl.Values)
var as2 *ir.AssignListStmt
if len(values) != 0 && len(names) != len(values) {
as2 = ir.NewAssignListStmt(pos, ir.OAS2, make([]ir.Node, len(names)), values)
}
for i, name := range names {
if ir.CurFunc != nil {
out.Append(ir.NewDecl(pos, ir.ODCL, name))
}
if as2 != nil {
as2.Lhs[i] = name
name.Defn = as2
} else {
as := ir.NewAssignStmt(pos, name, nil)
if len(values) != 0 {
as.Y = values[i]
name.Defn = as
} else if ir.CurFunc == nil {
name.Defn = as
}
if !g.delayTransform() {
lhs := []ir.Node{as.X}
rhs := []ir.Node{}
if as.Y != nil {
rhs = []ir.Node{as.Y}
}
transformAssign(as, lhs, rhs)
as.X = lhs[0]
if as.Y != nil {
as.Y = rhs[0]
}
}
as.SetTypecheck(1)
out.Append(as)
}
}
if as2 != nil {
if !g.delayTransform() {
transformAssign(as2, as2.Lhs, as2.Rhs)
}
as2.SetTypecheck(1)
out.Append(as2)
}
}
// If we're within a function, we need to process the assignment
// part of the variable declaration right away. Otherwise, we leave
// it to be handled after all top-level declarations are processed.
if ir.CurFunc != nil {
do()
} else {
g.later(do)
}
}
// pragmaFlags returns any specified pragma flags included in allowed,
// and reports errors about any other, unexpected pragmas.
func (g *irgen) pragmaFlags(pragma syntax.Pragma, allowed ir.PragmaFlag) ir.PragmaFlag {
if pragma == nil {
return 0
}
p := pragma.(*pragmas)
present := p.Flag & allowed
p.Flag &^= allowed
g.reportUnused(p)
return present
}
// reportUnused reports errors about any unused pragmas.
func (g *irgen) reportUnused(pragma *pragmas) {
for _, pos := range pragma.Pos {
if pos.Flag&pragma.Flag != 0 {
base.ErrorfAt(g.makeXPos(pos.Pos), "misplaced compiler directive")
}
}
if len(pragma.Embeds) > 0 {
for _, e := range pragma.Embeds {
base.ErrorfAt(g.makeXPos(e.Pos), "misplaced go:embed directive")
}
}
} | ntyp.SetNotInHeap(true)
}
// We need to use g.typeExpr(decl.Type) here to ensure that for
// chained, defined-type declarations like: | random_line_split |
decl.go | // Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package noder
import (
"go/constant"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/compile/internal/types2"
)
// TODO(mdempsky): Skip blank declarations? Probably only safe
// for declarations without pragmas.
func (g *irgen) decls(res *ir.Nodes, decls []syntax.Decl) {
for _, decl := range decls {
switch decl := decl.(type) {
case *syntax.ConstDecl:
g.constDecl(res, decl)
case *syntax.FuncDecl:
g.funcDecl(res, decl)
case *syntax.TypeDecl:
if ir.CurFunc == nil {
continue // already handled in irgen.generate
}
g.typeDecl(res, decl)
case *syntax.VarDecl:
g.varDecl(res, decl)
default:
g.unhandled("declaration", decl)
}
}
}
func (g *irgen) importDecl(p *noder, decl *syntax.ImportDecl) {
g.pragmaFlags(decl.Pragma, 0)
// Get the imported package's path, as resolved already by types2
// and gcimporter. This is the same path as would be computed by
// parseImportPath.
switch pkgNameOf(g.info, decl).Imported().Path() {
case "unsafe":
p.importedUnsafe = true
case "embed":
p.importedEmbed = true
}
}
// pkgNameOf returns the PkgName associated with the given ImportDecl.
func pkgNameOf(info *types2.Info, decl *syntax.ImportDecl) *types2.PkgName {
if name := decl.LocalPkgName; name != nil {
return info.Defs[name].(*types2.PkgName)
}
return info.Implicits[decl].(*types2.PkgName)
}
func (g *irgen) constDecl(out *ir.Nodes, decl *syntax.ConstDecl) {
g.pragmaFlags(decl.Pragma, 0)
for _, name := range decl.NameList {
name, obj := g.def(name)
// For untyped numeric constants, make sure the value
// representation matches what the rest of the
// compiler (really just iexport) expects.
// TODO(mdempsky): Revisit after #43891 is resolved.
val := obj.(*types2.Const).Val()
switch name.Type() {
case types.UntypedInt, types.UntypedRune:
val = constant.ToInt(val)
case types.UntypedFloat:
val = constant.ToFloat(val)
case types.UntypedComplex:
val = constant.ToComplex(val)
}
name.SetVal(val)
out.Append(ir.NewDecl(g.pos(decl), ir.ODCLCONST, name))
}
}
func (g *irgen) funcDecl(out *ir.Nodes, decl *syntax.FuncDecl) {
assert(g.curDecl == "")
// Set g.curDecl to the function name, as context for the type params declared
// during types2-to-types1 translation if this is a generic function.
g.curDecl = decl.Name.Value
obj2 := g.info.Defs[decl.Name]
recv := types2.AsSignature(obj2.Type()).Recv()
if recv != nil {
t2 := deref2(recv.Type())
// This is a method, so set g.curDecl to recvTypeName.methName instead.
g.curDecl = t2.(*types2.Named).Obj().Name() + "." + g.curDecl
}
fn := ir.NewFunc(g.pos(decl))
fn.Nname, _ = g.def(decl.Name)
fn.Nname.Func = fn
fn.Nname.Defn = fn
fn.Pragma = g.pragmaFlags(decl.Pragma, funcPragmas)
if fn.Pragma&ir.Systemstack != 0 && fn.Pragma&ir.Nosplit != 0 {
base.ErrorfAt(fn.Pos(), "go:nosplit and go:systemstack cannot be combined")
}
if fn.Pragma&ir.Nointerface != 0 {
// Propagate //go:nointerface from Func.Pragma to Field.Nointerface.
// This is a bit roundabout, but this is the earliest point where we've
// processed the function's pragma flags, and we've also already created
// the Fields to represent the receiver's method set.
if recv := fn.Type().Recv(); recv != nil {
typ := types.ReceiverBaseType(recv.Type)
if orig := typ.OrigType(); orig != nil {
// For a generic method, we mark the methods on the
// base generic type, since those are the methods
// that will be stenciled.
typ = orig
}
meth := typecheck.Lookdot1(fn, typecheck.Lookup(decl.Name.Value), typ, typ.Methods(), 0)
meth.SetNointerface(true)
}
}
if decl.Body != nil && fn.Pragma&ir.Noescape != 0 {
base.ErrorfAt(fn.Pos(), "can only use //go:noescape with external func implementations")
}
if decl.Name.Value == "init" && decl.Recv == nil {
g.target.Inits = append(g.target.Inits, fn)
}
saveHaveEmbed := g.haveEmbed
saveCurDecl := g.curDecl
g.curDecl = ""
g.later(func() {
defer func(b bool, s string) {
// Revert haveEmbed and curDecl back to what they were before
// the "later" function.
g.haveEmbed = b
g.curDecl = s
}(g.haveEmbed, g.curDecl)
// Set haveEmbed and curDecl to what they were for this funcDecl.
g.haveEmbed = saveHaveEmbed
g.curDecl = saveCurDecl
if fn.Type().HasTParam() {
g.topFuncIsGeneric = true
}
g.funcBody(fn, decl.Recv, decl.Type, decl.Body)
g.topFuncIsGeneric = false
if fn.Type().HasTParam() && fn.Body != nil {
// Set pointers to the dcls/body of a generic function/method in
// the Inl struct, so it is marked for export, is available for
// stenciling, and works with Inline_Flood().
fn.Inl = &ir.Inline{
Cost: 1,
Dcl: fn.Dcl,
Body: fn.Body,
}
}
out.Append(fn)
})
}
func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) {
// Set the position for any error messages we might print (e.g. too large types).
base.Pos = g.pos(decl)
assert(ir.CurFunc != nil || g.curDecl == "")
// Set g.curDecl to the type name, as context for the type params declared
// during types2-to-types1 translation if this is a generic type.
saveCurDecl := g.curDecl
g.curDecl = decl.Name.Value
if decl.Alias {
name, _ := g.def(decl.Name)
g.pragmaFlags(decl.Pragma, 0)
assert(name.Alias()) // should be set by irgen.obj
out.Append(ir.NewDecl(g.pos(decl), ir.ODCLTYPE, name))
g.curDecl = ""
return
}
// Prevent size calculations until we set the underlying type.
types.DeferCheckSize()
name, obj := g.def(decl.Name)
ntyp, otyp := name.Type(), obj.Type()
if ir.CurFunc != nil {
ntyp.SetVargen()
}
pragmas := g.pragmaFlags(decl.Pragma, typePragmas)
name.SetPragma(pragmas) // TODO(mdempsky): Is this still needed?
if pragmas&ir.NotInHeap != 0 {
ntyp.SetNotInHeap(true)
}
// We need to use g.typeExpr(decl.Type) here to ensure that for
// chained, defined-type declarations like:
//
// type T U
//
// //go:notinheap
// type U struct { … }
//
// we mark both T and U as NotInHeap. If we instead used just
// g.typ(otyp.Underlying()), then we'd instead set T's underlying
// type directly to the struct type (which is not marked NotInHeap)
// and fail to mark T as NotInHeap.
//
// Also, we rely here on Type.SetUnderlying allowing passing a
// defined type and handling forward references like from T to U
// above. Contrast with go/types's Named.SetUnderlying, which
// disallows this.
//
// [mdempsky: Subtleties like these are why I always vehemently
// object to new type pragmas.]
ntyp.SetUnderlying(g.typeExpr(decl.Type))
tparams := otyp.(*types2.Named).TypeParams()
if n := tparams.Len(); n > 0 {
rparams := make([]*types.Type, n)
for i := range rparams {
rparams[i] = g.typ(tparams.At(i))
}
// This will set hasTParam flag if any rparams are not concrete types.
ntyp.SetRParams(rparams)
}
types.ResumeCheckSize()
g.curDecl = saveCurDecl
if otyp, ok := otyp.(*types2.Named); ok && otyp.NumMethods() != 0 {
methods := make([]*types.Field, otyp.NumMethods())
for i := range methods {
m := otyp.Method(i)
// Set g.curDecl to recvTypeName.methName, as context for the
// method-specific type params in the receiver.
g.curDecl = decl.Name.Value + "." + m.Name()
meth := g.obj(m)
methods[i] = types.NewField(meth.Pos(), g.selector(m), meth.Type())
methods[i].Nname = meth
g.curDecl = ""
}
ntyp.Methods().Set(methods)
}
out.Append(ir.NewDecl(g.pos(decl), ir.ODCLTYPE, name))
}
func (g *irgen) varDecl(out *ir.Nodes, decl *syntax.VarDecl) {
pos := g.pos(decl)
// Set the position for any error messages we might print (e.g. too large types).
base.Pos = pos
names := make([]*ir.Name, len(decl.NameList))
for i, name := range decl.NameList {
names[i], _ = g.def(name)
}
if decl.Pragma != nil {
pragma := decl.Pragma.(*pragmas)
varEmbed(g.makeXPos, names[0], decl, pragma, g.haveEmbed)
g.reportUnused(pragma)
}
haveEmbed := g.haveEmbed
do := func() {
defer func(b bool) { g.haveEmbed = b }(g.haveEmbed)
g.haveEmbed = haveEmbed
values := g.exprList(decl.Values)
var as2 *ir.AssignListStmt
if len(values) != 0 && len(names) != len(values) {
as2 = ir.NewAssignListStmt(pos, ir.OAS2, make([]ir.Node, len(names)), values)
}
for i, name := range names {
if ir.CurFunc != nil {
out.Append(ir.NewDecl(pos, ir.ODCL, name))
}
if as2 != nil {
as2.Lhs[i] = name
name.Defn = as2
} else {
as := ir.NewAssignStmt(pos, name, nil)
if len(values) != 0 {
as.Y = values[i]
name.Defn = as
} else if ir.CurFunc == nil {
name.Defn = as
}
if !g.delayTransform() {
lhs := []ir.Node{as.X}
rhs := []ir.Node{}
if as.Y != nil {
rhs = []ir.Node{as.Y}
}
transformAssign(as, lhs, rhs)
as.X = lhs[0]
if as.Y != nil {
as.Y = rhs[0]
}
}
as.SetTypecheck(1)
out.Append(as)
}
}
if as2 != nil {
if !g.delayTransform() {
transformAssign(as2, as2.Lhs, as2.Rhs)
}
as2.SetTypecheck(1)
out.Append(as2)
}
}
// If we're within a function, we need to process the assignment
// part of the variable declaration right away. Otherwise, we leave
// it to be handled after all top-level declarations are processed.
if ir.CurFunc != nil {
do()
} else {
g.later(do)
}
}
// pragmaFlags returns any specified pragma flags included in allowed,
// and reports errors about any other, unexpected pragmas.
func (g *irgen) pragmaFlags(pragma syntax.Pragma, allowed ir.PragmaFlag) ir.PragmaFlag {
if pragma == nil {
return 0
}
p := pragma.(*pragmas)
present := p.Flag & allowed
p.Flag &^= allowed
g.reportUnused(p)
return present
}
// reportUnused reports errors about any unused pragmas.
func (g *irgen) reportUnused(pragma *pragmas) {
| for _, pos := range pragma.Pos {
if pos.Flag&pragma.Flag != 0 {
base.ErrorfAt(g.makeXPos(pos.Pos), "misplaced compiler directive")
}
}
if len(pragma.Embeds) > 0 {
for _, e := range pragma.Embeds {
base.ErrorfAt(g.makeXPos(e.Pos), "misplaced go:embed directive")
}
}
}
| identifier_body |
|
test_automl.py | # pylint: disable=C0321,C0103,E1221,C0301,E1305,E1121,C0302,C0330
# -*- coding: utf-8 -*-
"""
https://github.com/mljar/mljar-supervised
python test_automl.py train > zlog/log_titanic_train.txt 2>&1
python test_automl.py predict > zlog/log_titanic_predict.txt 2>&1
conda install -c conda-forge fastparquet
dtreeviz==1.0, which is not installed.
fastparquet==0.4.1, which is not installed.
wordcloud==1.7.0, which is not installed.
catboost==0.24.1, but you'll have catboost 0.22 which is incompatible.
category-encoders==2.2.2, but you'll have category-encoders 2.1.0 which is incompatible.
lightgbm==3.0.0, but you'll have lightgbm 2.3.0 which is incompatible.
numpy>=1.18.5, but you'll have numpy 1.18.1 which is incompatible.
pandas==1.1.2, but you'll have pandas 0.25.3 which is incompatible.
pyarrow==0.17.0, but you'll have pyarrow 2.0.0 which is incompatible.
scipy==1.4.1, but you'll have scipy 1.3.1 which is incompatible.
seaborn==0.10.1, but you'll have seaborn 0.10.0 which is incompatible.
shap==0.36.0, but you'll have shap 0.35.0 which is incompatible.
tabulate==0.8.7, but you'll have tabulate 0.8.6 which is incompatible.
xgboost==1.2.0, but you'll have xgboost 1.3.3 which is incompatible.
"""
import warnings, copy, os, sys
warnings.filterwarnings('ignore')
####################################################################################
###### Path ########################################################################
root_repo = os.path.abspath(os.getcwd()).replace("\\", "/") + "/" ; print(root_repo)
THIS_FILEPATH = os.path.abspath(__file__)
sys.path.append(root_repo)
from source.util_feature import save,os_get_function_name
def global_pars_update(model_dict, data_name, config_name):
print("config_name", config_name)
dir_data = root_repo + "/data/" ; print("dir_data", dir_data)
m = {}
m['config_path'] = THIS_FILEPATH
m['config_name'] = config_name
#### peoprocess input path
m['path_data_preprocess'] = dir_data + f'/input/{data_name}/train/'
#### train input path
dir_data_url = "https://github.com/arita37/dsa2_data/tree/main/" #### Remote Data directory
m['path_data_train'] = dir_data_url + f'/input/{data_name}/train/'
m['path_data_test'] = dir_data_url + f'/input/{data_name}/test/'
#m['path_data_val'] = dir_data + f'/input/{data_name}/test/'
#### train output path
m['path_train_output'] = dir_data + f'/output/{data_name}/{config_name}/'
m['path_train_model'] = dir_data + f'/output/{data_name}/{config_name}/model/'
m['path_features_store'] = dir_data + f'/output/{data_name}/{config_name}/features_store/'
m['path_pipeline'] = dir_data + f'/output/{data_name}/{config_name}/pipeline/'
#### predict input path
m['path_pred_data'] = dir_data + f'/input/{data_name}/test/'
m['path_pred_pipeline'] = dir_data + f'/output/{data_name}/{config_name}/pipeline/'
m['path_pred_model'] = dir_data + f'/output/{data_name}/{config_name}/model/'
#### predict output path
m['path_pred_output'] = dir_data + f'/output/{data_name}/pred_{config_name}/'
##### Generic
m['n_sample'] = model_dict['data_pars'].get('n_sample', 5000)
model_dict[ 'global_pars'] = m
return model_dict
####################################################################################
##### Params########################################################################
config_default = 'config1' ### name of function which contains data configuration
########
cols_input_type_1 = {
"coly" : "Survived"
,"colid" : "PassengerId"
,"colcat" : ["Sex", "Embarked" ]
,"colnum" : ["Pclass", "Age","SibSp", "Parch","Fare"]
,"coltext" : []
,"coldate" : []
,"colcross" : [ "Name", "Sex", "Ticket","Embarked","Pclass", "Age", "SibSp", ]
}
####################################################################################
def config1() :
"""
ONE SINGLE DICT Contains all needed informations for
used for titanic classification task
"""
data_name = "titanic" ### in data/input/
model_class = 'AutoML' ### ACTUAL Class name for model_sklearn.py
n_sample = 1000
def post_process_fun(y): ### After prediction is done
return int(y)
def pre_process_fun(y): ### Before the prediction is done
return int(y)
model_dict = {'model_pars': {
### LightGBM API model #######################################
'model_class': model_class
,'model_pars' : {
'total_time_limit' : 20,
'algorithms' : 'auto',
'results_path' : root_repo + f'/data/output/{data_name}/{os_get_function_name()}/automl_1',
'eval_metric' : 'auto'
# mode='Explain',
# ml_task='auto', model_time_limit=None, algorithms='auto', train_ensemble=True,
# stack_models='auto', eval_metric='auto', validation_strategy='auto', explain_level='auto',
# golden_features='auto', features_selection='auto', start_random_models='auto',
# hill_climbing_steps='auto', top_models_to_improve='auto', verbose=1, random_state=1234)
}
, 'post_process_fun' : post_process_fun ### After prediction ##########################################
, 'pre_process_pars' : {'y_norm_fun' : pre_process_fun , ### Before training ##########################
### Pipeline for data processing ##############################
'pipe_list': [
#### coly target prorcessing
{'uri': 'source/prepro.py::pd_coly', 'pars': {}, 'cols_family': 'coly', 'cols_out': 'coly', 'type': 'coly' },
{'uri': 'source/prepro.py::pd_colnum_bin', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'colnum_bin', 'type': '' },
{'uri': 'source/prepro.py::pd_colnum_binto_onehot', 'pars': {}, 'cols_family': 'colnum_bin', 'cols_out': 'colnum_onehot', 'type': '' },
#### catcol INTO integer, colcat into OneHot
{'uri': 'source/prepro.py::pd_colcat_bin', 'pars': {}, 'cols_family': 'colcat', 'cols_out': 'colcat_bin', 'type': '' },
# {'uri': 'source/prepro.py::pd_colcat_to_onehot', 'pars': {}, 'cols_family': 'colcat_bin', 'cols_out': 'colcat_onehot', 'type': '' },
### Cross_feat = feat1 X feat2
# {'uri': 'source/prepro.py::pd_colcross', 'pars': {}, 'cols_family': 'colcross', 'cols_out': 'colcross_pair', 'type': 'cross'},
#### Example of Custom processor
#{'uri': THIS_FILEPATH + '::pd_col_myfun', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'col_myfun', 'type': '' },
],
}
},
'compute_pars': { 'metric_list': ['accuracy_score','average_precision_score']
,'mlflow_pars' : None # {} ### Not empty --> use mlflow
},
'data_pars': { 'n_sample' : n_sample,
'download_pars' : None,
'cols_input_type' : cols_input_type_1,
### family of columns for MODEL #########################################################
# "colnum", "colnum_bin", "colnum_onehot", "colnum_binmap", #### Colnum columns
# "colcat", "colcat_bin", "colcat_onehot", "colcat_bin_map", #### colcat columns
# 'colcross_single_onehot_select', "colcross_pair_onehot", 'colcross_pair', #### colcross columns 'coldate', 'coltext',
'cols_model_group': [ 'colnum_bin',
'colcat_bin',
# 'coltext',
# 'coldate',
#'colcross_pair',
### example of custom
# 'col_myfun'
]
### Filter data rows ##################################################################
,'filter_pars': { 'ymax' : 2 ,'ymin' : -1 }
}
}
##### Filling Global parameters ############################################################
model_dict = global_pars_update(model_dict, data_name, config_name=os_get_function_name() )
return model_dict
def | (df=None, col=None, pars={}):
"""
Example of custom Processor
"""
from source.util_feature import save, load
prefix = 'col_myfun`'
if 'path_pipeline' in pars : #### Inference time LOAD previous pars
prepro = load(pars['path_pipeline'] + f"/{prefix}_model.pkl" )
pars = load(pars['path_pipeline'] + f"/{prefix}_pars.pkl" )
pars = {} if pars is None else pars
#### Do something #################################################################
df_new = df[col] ### Do nithi
df_new.columns = [ col + "_myfun" for col in df.columns ]
cols_new = list(df_new.columns)
prepro = None
pars_new = None
###################################################################################
if 'path_features_store' in pars and 'path_pipeline_export' in pars:
save(prepro, pars['path_pipeline_export'] + f"/{prefix}_model.pkl" )
save(cols_new, pars['path_pipeline_export'] + f"/{prefix}.pkl" )
save(pars_new, pars['path_pipeline_export'] + f"/{prefix}_pars.pkl" )
col_pars = {'prefix' : prefix , 'path' : pars.get('path_pipeline_export', pars.get('path_pipeline', None)) }
col_pars['cols_new'] = {
'col_myfun' : cols_new ### list
}
return df_new, col_pars
#####################################################################################
########## Profile data #############################################################
from core_run import data_profile
# def data_profile(path_data="", path_output="", n_sample= 5000):
"""
def data_profile(path_data="", path_output="", n_sample= 5000):
from source.run_feature_profile import run_profile
run_profile(path_data = path_data,
path_output = path_output + "/profile/",
n_sample = n_sample,
)
"""
###################################################################################
########## Preprocess #############################################################
### def preprocess(config='', nsample=1000):
from core_run import preprocess
"""
def preprocess(config=None, nsample=None):
config_name = config if config is not None else config_default
mdict = globals()[config_name]()
m = mdict['global_pars']
print(mdict)
from source import run_preprocess
run_preprocess.run_preprocess(config_name = config_name,
config_path = m['config_path'],
n_sample = nsample if nsample is not None else m['n_sample'],
### Optonal
mode = 'run_preprocess')
"""
##################################################################################
########## Train #################################################################
from core_run import train
"""
def train(config=None, nsample=None):
config_name = config if config is not None else config_default
mdict = globals()[config_name]()
m = mdict['global_pars']
print(mdict)
from source import run_train
run_train.run_train(config_name = config_name,
config_path = m['config_path'],
n_sample = nsample if nsample is not None else m['n_sample']
)
"""
###################################################################################
######### Check data ##############################################################
def check():
pass
####################################################################################
####### Inference ##################################################################
# predict(config='', nsample=10000)
from core_run import predict
"""
def predict(config=None, nsample=None):
config_name = config if config is not None else config_default
mdict = globals()[config_name]()
m = mdict['global_pars']
from source import run_inference
run_inference.run_predict(config_name = config_name,
config_path = m['config_path'],
n_sample = nsample if nsample is not None else m['n_sample'],
#### Optional
path_data = m['path_pred_data'],
path_output = m['path_pred_output'],
model_dict = None
)
"""
###########################################################################################################
###########################################################################################################
"""
python test_automl.py data_profile
python test_automl.py preprocess --nsample 100
python test_automl.py train --nsample 200
python test_automl.py check
python test_automl.py predict
"""
if __name__ == "__main__":
d = { 'data_profile': data_profile, 'train' : train, 'predict' : predict, 'config' : config_default }
import fire
fire.Fire(d)
| pd_col_myfun | identifier_name |
test_automl.py | # pylint: disable=C0321,C0103,E1221,C0301,E1305,E1121,C0302,C0330
# -*- coding: utf-8 -*-
"""
https://github.com/mljar/mljar-supervised
python test_automl.py train > zlog/log_titanic_train.txt 2>&1
python test_automl.py predict > zlog/log_titanic_predict.txt 2>&1
conda install -c conda-forge fastparquet
dtreeviz==1.0, which is not installed.
fastparquet==0.4.1, which is not installed.
wordcloud==1.7.0, which is not installed.
catboost==0.24.1, but you'll have catboost 0.22 which is incompatible.
category-encoders==2.2.2, but you'll have category-encoders 2.1.0 which is incompatible.
lightgbm==3.0.0, but you'll have lightgbm 2.3.0 which is incompatible.
numpy>=1.18.5, but you'll have numpy 1.18.1 which is incompatible.
pandas==1.1.2, but you'll have pandas 0.25.3 which is incompatible.
pyarrow==0.17.0, but you'll have pyarrow 2.0.0 which is incompatible.
scipy==1.4.1, but you'll have scipy 1.3.1 which is incompatible.
seaborn==0.10.1, but you'll have seaborn 0.10.0 which is incompatible.
shap==0.36.0, but you'll have shap 0.35.0 which is incompatible.
tabulate==0.8.7, but you'll have tabulate 0.8.6 which is incompatible.
xgboost==1.2.0, but you'll have xgboost 1.3.3 which is incompatible.
"""
import warnings, copy, os, sys
warnings.filterwarnings('ignore')
####################################################################################
###### Path ########################################################################
root_repo = os.path.abspath(os.getcwd()).replace("\\", "/") + "/" ; print(root_repo)
THIS_FILEPATH = os.path.abspath(__file__)
sys.path.append(root_repo)
from source.util_feature import save,os_get_function_name
def global_pars_update(model_dict, data_name, config_name):
print("config_name", config_name)
dir_data = root_repo + "/data/" ; print("dir_data", dir_data)
m = {}
m['config_path'] = THIS_FILEPATH
m['config_name'] = config_name
#### peoprocess input path
m['path_data_preprocess'] = dir_data + f'/input/{data_name}/train/'
#### train input path
dir_data_url = "https://github.com/arita37/dsa2_data/tree/main/" #### Remote Data directory
m['path_data_train'] = dir_data_url + f'/input/{data_name}/train/'
m['path_data_test'] = dir_data_url + f'/input/{data_name}/test/'
#m['path_data_val'] = dir_data + f'/input/{data_name}/test/'
#### train output path
m['path_train_output'] = dir_data + f'/output/{data_name}/{config_name}/'
m['path_train_model'] = dir_data + f'/output/{data_name}/{config_name}/model/'
m['path_features_store'] = dir_data + f'/output/{data_name}/{config_name}/features_store/'
m['path_pipeline'] = dir_data + f'/output/{data_name}/{config_name}/pipeline/'
#### predict input path
m['path_pred_data'] = dir_data + f'/input/{data_name}/test/'
m['path_pred_pipeline'] = dir_data + f'/output/{data_name}/{config_name}/pipeline/'
m['path_pred_model'] = dir_data + f'/output/{data_name}/{config_name}/model/'
#### predict output path
m['path_pred_output'] = dir_data + f'/output/{data_name}/pred_{config_name}/'
##### Generic
m['n_sample'] = model_dict['data_pars'].get('n_sample', 5000)
model_dict[ 'global_pars'] = m
return model_dict
####################################################################################
##### Params########################################################################
config_default = 'config1' ### name of function which contains data configuration
########
cols_input_type_1 = {
"coly" : "Survived"
,"colid" : "PassengerId"
,"colcat" : ["Sex", "Embarked" ]
,"colnum" : ["Pclass", "Age","SibSp", "Parch","Fare"]
,"coltext" : []
,"coldate" : []
,"colcross" : [ "Name", "Sex", "Ticket","Embarked","Pclass", "Age", "SibSp", ]
}
####################################################################################
def config1() :
"""
ONE SINGLE DICT Contains all needed informations for
used for titanic classification task
"""
data_name = "titanic" ### in data/input/
model_class = 'AutoML' ### ACTUAL Class name for model_sklearn.py
n_sample = 1000
def post_process_fun(y): ### After prediction is done
return int(y)
def pre_process_fun(y): ### Before the prediction is done
return int(y)
model_dict = {'model_pars': {
### LightGBM API model #######################################
'model_class': model_class
,'model_pars' : {
'total_time_limit' : 20,
'algorithms' : 'auto',
'results_path' : root_repo + f'/data/output/{data_name}/{os_get_function_name()}/automl_1',
'eval_metric' : 'auto'
# mode='Explain',
# ml_task='auto', model_time_limit=None, algorithms='auto', train_ensemble=True,
# stack_models='auto', eval_metric='auto', validation_strategy='auto', explain_level='auto',
# golden_features='auto', features_selection='auto', start_random_models='auto',
# hill_climbing_steps='auto', top_models_to_improve='auto', verbose=1, random_state=1234)
}
, 'post_process_fun' : post_process_fun ### After prediction ##########################################
, 'pre_process_pars' : {'y_norm_fun' : pre_process_fun , ### Before training ##########################
### Pipeline for data processing ##############################
'pipe_list': [
#### coly target prorcessing
{'uri': 'source/prepro.py::pd_coly', 'pars': {}, 'cols_family': 'coly', 'cols_out': 'coly', 'type': 'coly' },
{'uri': 'source/prepro.py::pd_colnum_bin', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'colnum_bin', 'type': '' },
{'uri': 'source/prepro.py::pd_colnum_binto_onehot', 'pars': {}, 'cols_family': 'colnum_bin', 'cols_out': 'colnum_onehot', 'type': '' },
#### catcol INTO integer, colcat into OneHot
{'uri': 'source/prepro.py::pd_colcat_bin', 'pars': {}, 'cols_family': 'colcat', 'cols_out': 'colcat_bin', 'type': '' },
# {'uri': 'source/prepro.py::pd_colcat_to_onehot', 'pars': {}, 'cols_family': 'colcat_bin', 'cols_out': 'colcat_onehot', 'type': '' },
### Cross_feat = feat1 X feat2
# {'uri': 'source/prepro.py::pd_colcross', 'pars': {}, 'cols_family': 'colcross', 'cols_out': 'colcross_pair', 'type': 'cross'},
#### Example of Custom processor
#{'uri': THIS_FILEPATH + '::pd_col_myfun', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'col_myfun', 'type': '' },
],
}
},
'compute_pars': { 'metric_list': ['accuracy_score','average_precision_score']
,'mlflow_pars' : None # {} ### Not empty --> use mlflow
},
'data_pars': { 'n_sample' : n_sample,
'download_pars' : None,
'cols_input_type' : cols_input_type_1,
### family of columns for MODEL #########################################################
# "colnum", "colnum_bin", "colnum_onehot", "colnum_binmap", #### Colnum columns
# "colcat", "colcat_bin", "colcat_onehot", "colcat_bin_map", #### colcat columns
# 'colcross_single_onehot_select', "colcross_pair_onehot", 'colcross_pair', #### colcross columns 'coldate', 'coltext',
'cols_model_group': [ 'colnum_bin',
'colcat_bin',
# 'coltext',
# 'coldate',
#'colcross_pair',
### example of custom
# 'col_myfun'
]
### Filter data rows ##################################################################
,'filter_pars': { 'ymax' : 2 ,'ymin' : -1 }
}
}
##### Filling Global parameters ############################################################
model_dict = global_pars_update(model_dict, data_name, config_name=os_get_function_name() )
return model_dict
def pd_col_myfun(df=None, col=None, pars={}):
"""
Example of custom Processor
"""
from source.util_feature import save, load
prefix = 'col_myfun`'
if 'path_pipeline' in pars : #### Inference time LOAD previous pars
prepro = load(pars['path_pipeline'] + f"/{prefix}_model.pkl" )
pars = load(pars['path_pipeline'] + f"/{prefix}_pars.pkl" )
pars = {} if pars is None else pars
#### Do something #################################################################
df_new = df[col] ### Do nithi
df_new.columns = [ col + "_myfun" for col in df.columns ]
cols_new = list(df_new.columns)
prepro = None
pars_new = None
###################################################################################
if 'path_features_store' in pars and 'path_pipeline_export' in pars:
|
col_pars = {'prefix' : prefix , 'path' : pars.get('path_pipeline_export', pars.get('path_pipeline', None)) }
col_pars['cols_new'] = {
'col_myfun' : cols_new ### list
}
return df_new, col_pars
#####################################################################################
########## Profile data #############################################################
from core_run import data_profile
# def data_profile(path_data="", path_output="", n_sample= 5000):
"""
def data_profile(path_data="", path_output="", n_sample= 5000):
from source.run_feature_profile import run_profile
run_profile(path_data = path_data,
path_output = path_output + "/profile/",
n_sample = n_sample,
)
"""
###################################################################################
########## Preprocess #############################################################
### def preprocess(config='', nsample=1000):
from core_run import preprocess
"""
def preprocess(config=None, nsample=None):
config_name = config if config is not None else config_default
mdict = globals()[config_name]()
m = mdict['global_pars']
print(mdict)
from source import run_preprocess
run_preprocess.run_preprocess(config_name = config_name,
config_path = m['config_path'],
n_sample = nsample if nsample is not None else m['n_sample'],
### Optonal
mode = 'run_preprocess')
"""
##################################################################################
########## Train #################################################################
from core_run import train
"""
def train(config=None, nsample=None):
config_name = config if config is not None else config_default
mdict = globals()[config_name]()
m = mdict['global_pars']
print(mdict)
from source import run_train
run_train.run_train(config_name = config_name,
config_path = m['config_path'],
n_sample = nsample if nsample is not None else m['n_sample']
)
"""
###################################################################################
######### Check data ##############################################################
def check():
pass
####################################################################################
####### Inference ##################################################################
# predict(config='', nsample=10000)
from core_run import predict
"""
def predict(config=None, nsample=None):
config_name = config if config is not None else config_default
mdict = globals()[config_name]()
m = mdict['global_pars']
from source import run_inference
run_inference.run_predict(config_name = config_name,
config_path = m['config_path'],
n_sample = nsample if nsample is not None else m['n_sample'],
#### Optional
path_data = m['path_pred_data'],
path_output = m['path_pred_output'],
model_dict = None
)
"""
###########################################################################################################
###########################################################################################################
"""
python test_automl.py data_profile
python test_automl.py preprocess --nsample 100
python test_automl.py train --nsample 200
python test_automl.py check
python test_automl.py predict
"""
if __name__ == "__main__":
d = { 'data_profile': data_profile, 'train' : train, 'predict' : predict, 'config' : config_default }
import fire
fire.Fire(d)
| save(prepro, pars['path_pipeline_export'] + f"/{prefix}_model.pkl" )
save(cols_new, pars['path_pipeline_export'] + f"/{prefix}.pkl" )
save(pars_new, pars['path_pipeline_export'] + f"/{prefix}_pars.pkl" ) | conditional_block |
test_automl.py | # pylint: disable=C0321,C0103,E1221,C0301,E1305,E1121,C0302,C0330
# -*- coding: utf-8 -*-
"""
https://github.com/mljar/mljar-supervised
python test_automl.py train > zlog/log_titanic_train.txt 2>&1
python test_automl.py predict > zlog/log_titanic_predict.txt 2>&1
conda install -c conda-forge fastparquet
dtreeviz==1.0, which is not installed.
fastparquet==0.4.1, which is not installed.
wordcloud==1.7.0, which is not installed.
catboost==0.24.1, but you'll have catboost 0.22 which is incompatible.
category-encoders==2.2.2, but you'll have category-encoders 2.1.0 which is incompatible.
lightgbm==3.0.0, but you'll have lightgbm 2.3.0 which is incompatible.
numpy>=1.18.5, but you'll have numpy 1.18.1 which is incompatible.
pandas==1.1.2, but you'll have pandas 0.25.3 which is incompatible.
pyarrow==0.17.0, but you'll have pyarrow 2.0.0 which is incompatible.
scipy==1.4.1, but you'll have scipy 1.3.1 which is incompatible.
seaborn==0.10.1, but you'll have seaborn 0.10.0 which is incompatible.
shap==0.36.0, but you'll have shap 0.35.0 which is incompatible.
tabulate==0.8.7, but you'll have tabulate 0.8.6 which is incompatible.
xgboost==1.2.0, but you'll have xgboost 1.3.3 which is incompatible.
"""
import warnings, copy, os, sys
warnings.filterwarnings('ignore')
####################################################################################
###### Path ########################################################################
root_repo = os.path.abspath(os.getcwd()).replace("\\", "/") + "/" ; print(root_repo)
THIS_FILEPATH = os.path.abspath(__file__)
sys.path.append(root_repo)
from source.util_feature import save,os_get_function_name
def global_pars_update(model_dict, data_name, config_name):
print("config_name", config_name)
dir_data = root_repo + "/data/" ; print("dir_data", dir_data)
m = {}
m['config_path'] = THIS_FILEPATH
m['config_name'] = config_name
#### peoprocess input path
m['path_data_preprocess'] = dir_data + f'/input/{data_name}/train/'
#### train input path
dir_data_url = "https://github.com/arita37/dsa2_data/tree/main/" #### Remote Data directory
m['path_data_train'] = dir_data_url + f'/input/{data_name}/train/'
m['path_data_test'] = dir_data_url + f'/input/{data_name}/test/'
#m['path_data_val'] = dir_data + f'/input/{data_name}/test/'
#### train output path
m['path_train_output'] = dir_data + f'/output/{data_name}/{config_name}/'
m['path_train_model'] = dir_data + f'/output/{data_name}/{config_name}/model/'
m['path_features_store'] = dir_data + f'/output/{data_name}/{config_name}/features_store/'
m['path_pipeline'] = dir_data + f'/output/{data_name}/{config_name}/pipeline/'
#### predict input path
m['path_pred_data'] = dir_data + f'/input/{data_name}/test/'
m['path_pred_pipeline'] = dir_data + f'/output/{data_name}/{config_name}/pipeline/'
m['path_pred_model'] = dir_data + f'/output/{data_name}/{config_name}/model/'
#### predict output path
m['path_pred_output'] = dir_data + f'/output/{data_name}/pred_{config_name}/'
##### Generic
m['n_sample'] = model_dict['data_pars'].get('n_sample', 5000)
model_dict[ 'global_pars'] = m
return model_dict
####################################################################################
##### Params########################################################################
config_default = 'config1' ### name of function which contains data configuration
########
cols_input_type_1 = {
"coly" : "Survived"
,"colid" : "PassengerId"
,"colcat" : ["Sex", "Embarked" ]
,"colnum" : ["Pclass", "Age","SibSp", "Parch","Fare"]
,"coltext" : []
,"coldate" : []
,"colcross" : [ "Name", "Sex", "Ticket","Embarked","Pclass", "Age", "SibSp", ]
}
####################################################################################
def config1() :
"""
ONE SINGLE DICT Contains all needed informations for
used for titanic classification task
"""
data_name = "titanic" ### in data/input/
model_class = 'AutoML' ### ACTUAL Class name for model_sklearn.py
n_sample = 1000
def post_process_fun(y): ### After prediction is done
|
def pre_process_fun(y): ### Before the prediction is done
return int(y)
model_dict = {'model_pars': {
### LightGBM API model #######################################
'model_class': model_class
,'model_pars' : {
'total_time_limit' : 20,
'algorithms' : 'auto',
'results_path' : root_repo + f'/data/output/{data_name}/{os_get_function_name()}/automl_1',
'eval_metric' : 'auto'
# mode='Explain',
# ml_task='auto', model_time_limit=None, algorithms='auto', train_ensemble=True,
# stack_models='auto', eval_metric='auto', validation_strategy='auto', explain_level='auto',
# golden_features='auto', features_selection='auto', start_random_models='auto',
# hill_climbing_steps='auto', top_models_to_improve='auto', verbose=1, random_state=1234)
}
, 'post_process_fun' : post_process_fun ### After prediction ##########################################
, 'pre_process_pars' : {'y_norm_fun' : pre_process_fun , ### Before training ##########################
### Pipeline for data processing ##############################
'pipe_list': [
#### coly target prorcessing
{'uri': 'source/prepro.py::pd_coly', 'pars': {}, 'cols_family': 'coly', 'cols_out': 'coly', 'type': 'coly' },
{'uri': 'source/prepro.py::pd_colnum_bin', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'colnum_bin', 'type': '' },
{'uri': 'source/prepro.py::pd_colnum_binto_onehot', 'pars': {}, 'cols_family': 'colnum_bin', 'cols_out': 'colnum_onehot', 'type': '' },
#### catcol INTO integer, colcat into OneHot
{'uri': 'source/prepro.py::pd_colcat_bin', 'pars': {}, 'cols_family': 'colcat', 'cols_out': 'colcat_bin', 'type': '' },
# {'uri': 'source/prepro.py::pd_colcat_to_onehot', 'pars': {}, 'cols_family': 'colcat_bin', 'cols_out': 'colcat_onehot', 'type': '' },
### Cross_feat = feat1 X feat2
# {'uri': 'source/prepro.py::pd_colcross', 'pars': {}, 'cols_family': 'colcross', 'cols_out': 'colcross_pair', 'type': 'cross'},
#### Example of Custom processor
#{'uri': THIS_FILEPATH + '::pd_col_myfun', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'col_myfun', 'type': '' },
],
}
},
'compute_pars': { 'metric_list': ['accuracy_score','average_precision_score']
,'mlflow_pars' : None # {} ### Not empty --> use mlflow
},
'data_pars': { 'n_sample' : n_sample,
'download_pars' : None,
'cols_input_type' : cols_input_type_1,
### family of columns for MODEL #########################################################
# "colnum", "colnum_bin", "colnum_onehot", "colnum_binmap", #### Colnum columns
# "colcat", "colcat_bin", "colcat_onehot", "colcat_bin_map", #### colcat columns
# 'colcross_single_onehot_select', "colcross_pair_onehot", 'colcross_pair', #### colcross columns 'coldate', 'coltext',
'cols_model_group': [ 'colnum_bin',
'colcat_bin',
# 'coltext',
# 'coldate',
#'colcross_pair',
### example of custom
# 'col_myfun'
]
### Filter data rows ##################################################################
,'filter_pars': { 'ymax' : 2 ,'ymin' : -1 }
}
}
##### Filling Global parameters ############################################################
model_dict = global_pars_update(model_dict, data_name, config_name=os_get_function_name() )
return model_dict
def pd_col_myfun(df=None, col=None, pars={}):
"""
Example of custom Processor
"""
from source.util_feature import save, load
prefix = 'col_myfun`'
if 'path_pipeline' in pars : #### Inference time LOAD previous pars
prepro = load(pars['path_pipeline'] + f"/{prefix}_model.pkl" )
pars = load(pars['path_pipeline'] + f"/{prefix}_pars.pkl" )
pars = {} if pars is None else pars
#### Do something #################################################################
df_new = df[col] ### Do nithi
df_new.columns = [ col + "_myfun" for col in df.columns ]
cols_new = list(df_new.columns)
prepro = None
pars_new = None
###################################################################################
if 'path_features_store' in pars and 'path_pipeline_export' in pars:
save(prepro, pars['path_pipeline_export'] + f"/{prefix}_model.pkl" )
save(cols_new, pars['path_pipeline_export'] + f"/{prefix}.pkl" )
save(pars_new, pars['path_pipeline_export'] + f"/{prefix}_pars.pkl" )
col_pars = {'prefix' : prefix , 'path' : pars.get('path_pipeline_export', pars.get('path_pipeline', None)) }
col_pars['cols_new'] = {
'col_myfun' : cols_new ### list
}
return df_new, col_pars
#####################################################################################
########## Profile data #############################################################
from core_run import data_profile
# def data_profile(path_data="", path_output="", n_sample= 5000):
"""
def data_profile(path_data="", path_output="", n_sample= 5000):
from source.run_feature_profile import run_profile
run_profile(path_data = path_data,
path_output = path_output + "/profile/",
n_sample = n_sample,
)
"""
###################################################################################
########## Preprocess #############################################################
### def preprocess(config='', nsample=1000):
from core_run import preprocess
"""
def preprocess(config=None, nsample=None):
config_name = config if config is not None else config_default
mdict = globals()[config_name]()
m = mdict['global_pars']
print(mdict)
from source import run_preprocess
run_preprocess.run_preprocess(config_name = config_name,
config_path = m['config_path'],
n_sample = nsample if nsample is not None else m['n_sample'],
### Optonal
mode = 'run_preprocess')
"""
##################################################################################
########## Train #################################################################
from core_run import train
"""
def train(config=None, nsample=None):
config_name = config if config is not None else config_default
mdict = globals()[config_name]()
m = mdict['global_pars']
print(mdict)
from source import run_train
run_train.run_train(config_name = config_name,
config_path = m['config_path'],
n_sample = nsample if nsample is not None else m['n_sample']
)
"""
###################################################################################
######### Check data ##############################################################
def check():
pass
####################################################################################
####### Inference ##################################################################
# predict(config='', nsample=10000)
from core_run import predict
"""
def predict(config=None, nsample=None):
config_name = config if config is not None else config_default
mdict = globals()[config_name]()
m = mdict['global_pars']
from source import run_inference
run_inference.run_predict(config_name = config_name,
config_path = m['config_path'],
n_sample = nsample if nsample is not None else m['n_sample'],
#### Optional
path_data = m['path_pred_data'],
path_output = m['path_pred_output'],
model_dict = None
)
"""
###########################################################################################################
###########################################################################################################
"""
python test_automl.py data_profile
python test_automl.py preprocess --nsample 100
python test_automl.py train --nsample 200
python test_automl.py check
python test_automl.py predict
"""
if __name__ == "__main__":
d = { 'data_profile': data_profile, 'train' : train, 'predict' : predict, 'config' : config_default }
import fire
fire.Fire(d)
| return int(y) | identifier_body |
test_automl.py | # pylint: disable=C0321,C0103,E1221,C0301,E1305,E1121,C0302,C0330
# -*- coding: utf-8 -*-
"""
https://github.com/mljar/mljar-supervised
python test_automl.py train > zlog/log_titanic_train.txt 2>&1
python test_automl.py predict > zlog/log_titanic_predict.txt 2>&1
conda install -c conda-forge fastparquet
dtreeviz==1.0, which is not installed.
fastparquet==0.4.1, which is not installed.
wordcloud==1.7.0, which is not installed.
catboost==0.24.1, but you'll have catboost 0.22 which is incompatible.
category-encoders==2.2.2, but you'll have category-encoders 2.1.0 which is incompatible.
lightgbm==3.0.0, but you'll have lightgbm 2.3.0 which is incompatible.
numpy>=1.18.5, but you'll have numpy 1.18.1 which is incompatible.
pandas==1.1.2, but you'll have pandas 0.25.3 which is incompatible.
pyarrow==0.17.0, but you'll have pyarrow 2.0.0 which is incompatible.
scipy==1.4.1, but you'll have scipy 1.3.1 which is incompatible.
seaborn==0.10.1, but you'll have seaborn 0.10.0 which is incompatible.
shap==0.36.0, but you'll have shap 0.35.0 which is incompatible.
tabulate==0.8.7, but you'll have tabulate 0.8.6 which is incompatible.
xgboost==1.2.0, but you'll have xgboost 1.3.3 which is incompatible.
"""
import warnings, copy, os, sys
warnings.filterwarnings('ignore')
####################################################################################
###### Path ########################################################################
root_repo = os.path.abspath(os.getcwd()).replace("\\", "/") + "/" ; print(root_repo)
THIS_FILEPATH = os.path.abspath(__file__)
sys.path.append(root_repo)
from source.util_feature import save,os_get_function_name
def global_pars_update(model_dict, data_name, config_name):
print("config_name", config_name)
dir_data = root_repo + "/data/" ; print("dir_data", dir_data)
m = {}
m['config_path'] = THIS_FILEPATH
m['config_name'] = config_name
#### peoprocess input path
m['path_data_preprocess'] = dir_data + f'/input/{data_name}/train/'
#### train input path
dir_data_url = "https://github.com/arita37/dsa2_data/tree/main/" #### Remote Data directory
m['path_data_train'] = dir_data_url + f'/input/{data_name}/train/'
m['path_data_test'] = dir_data_url + f'/input/{data_name}/test/'
#m['path_data_val'] = dir_data + f'/input/{data_name}/test/'
#### train output path
m['path_train_output'] = dir_data + f'/output/{data_name}/{config_name}/'
m['path_train_model'] = dir_data + f'/output/{data_name}/{config_name}/model/'
m['path_features_store'] = dir_data + f'/output/{data_name}/{config_name}/features_store/'
m['path_pipeline'] = dir_data + f'/output/{data_name}/{config_name}/pipeline/'
#### predict input path
m['path_pred_data'] = dir_data + f'/input/{data_name}/test/'
m['path_pred_pipeline'] = dir_data + f'/output/{data_name}/{config_name}/pipeline/'
m['path_pred_model'] = dir_data + f'/output/{data_name}/{config_name}/model/'
#### predict output path
m['path_pred_output'] = dir_data + f'/output/{data_name}/pred_{config_name}/'
##### Generic
m['n_sample'] = model_dict['data_pars'].get('n_sample', 5000)
model_dict[ 'global_pars'] = m
return model_dict
####################################################################################
##### Params########################################################################
config_default = 'config1' ### name of function which contains data configuration
########
cols_input_type_1 = {
"coly" : "Survived"
,"colid" : "PassengerId"
,"colcat" : ["Sex", "Embarked" ]
,"colnum" : ["Pclass", "Age","SibSp", "Parch","Fare"]
,"coltext" : []
,"coldate" : []
,"colcross" : [ "Name", "Sex", "Ticket","Embarked","Pclass", "Age", "SibSp", ]
}
####################################################################################
def config1() :
"""
ONE SINGLE DICT Contains all needed informations for
used for titanic classification task
"""
data_name = "titanic" ### in data/input/
model_class = 'AutoML' ### ACTUAL Class name for model_sklearn.py
n_sample = 1000
def post_process_fun(y): ### After prediction is done
return int(y)
def pre_process_fun(y): ### Before the prediction is done
return int(y)
model_dict = {'model_pars': {
### LightGBM API model #######################################
'model_class': model_class
,'model_pars' : {
'total_time_limit' : 20,
'algorithms' : 'auto',
'results_path' : root_repo + f'/data/output/{data_name}/{os_get_function_name()}/automl_1',
'eval_metric' : 'auto'
# mode='Explain',
# ml_task='auto', model_time_limit=None, algorithms='auto', train_ensemble=True,
# stack_models='auto', eval_metric='auto', validation_strategy='auto', explain_level='auto',
# golden_features='auto', features_selection='auto', start_random_models='auto',
# hill_climbing_steps='auto', top_models_to_improve='auto', verbose=1, random_state=1234)
}
, 'post_process_fun' : post_process_fun ### After prediction ##########################################
, 'pre_process_pars' : {'y_norm_fun' : pre_process_fun , ### Before training ##########################
### Pipeline for data processing ##############################
'pipe_list': [
#### coly target prorcessing
{'uri': 'source/prepro.py::pd_coly', 'pars': {}, 'cols_family': 'coly', 'cols_out': 'coly', 'type': 'coly' },
{'uri': 'source/prepro.py::pd_colnum_bin', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'colnum_bin', 'type': '' },
{'uri': 'source/prepro.py::pd_colnum_binto_onehot', 'pars': {}, 'cols_family': 'colnum_bin', 'cols_out': 'colnum_onehot', 'type': '' },
#### catcol INTO integer, colcat into OneHot
{'uri': 'source/prepro.py::pd_colcat_bin', 'pars': {}, 'cols_family': 'colcat', 'cols_out': 'colcat_bin', 'type': '' },
# {'uri': 'source/prepro.py::pd_colcat_to_onehot', 'pars': {}, 'cols_family': 'colcat_bin', 'cols_out': 'colcat_onehot', 'type': '' },
### Cross_feat = feat1 X feat2
# {'uri': 'source/prepro.py::pd_colcross', 'pars': {}, 'cols_family': 'colcross', 'cols_out': 'colcross_pair', 'type': 'cross'},
#### Example of Custom processor
#{'uri': THIS_FILEPATH + '::pd_col_myfun', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'col_myfun', 'type': '' },
],
}
},
'compute_pars': { 'metric_list': ['accuracy_score','average_precision_score']
,'mlflow_pars' : None # {} ### Not empty --> use mlflow
},
'data_pars': { 'n_sample' : n_sample,
'download_pars' : None,
'cols_input_type' : cols_input_type_1,
### family of columns for MODEL #########################################################
# "colnum", "colnum_bin", "colnum_onehot", "colnum_binmap", #### Colnum columns
# "colcat", "colcat_bin", "colcat_onehot", "colcat_bin_map", #### colcat columns
# 'colcross_single_onehot_select', "colcross_pair_onehot", 'colcross_pair', #### colcross columns 'coldate', 'coltext',
'cols_model_group': [ 'colnum_bin',
'colcat_bin',
# 'coltext',
# 'coldate',
#'colcross_pair',
### example of custom
# 'col_myfun'
]
### Filter data rows ##################################################################
,'filter_pars': { 'ymax' : 2 ,'ymin' : -1 }
}
}
##### Filling Global parameters ############################################################
model_dict = global_pars_update(model_dict, data_name, config_name=os_get_function_name() )
return model_dict
def pd_col_myfun(df=None, col=None, pars={}):
"""
Example of custom Processor
"""
from source.util_feature import save, load
prefix = 'col_myfun`'
if 'path_pipeline' in pars : #### Inference time LOAD previous pars
prepro = load(pars['path_pipeline'] + f"/{prefix}_model.pkl" )
pars = load(pars['path_pipeline'] + f"/{prefix}_pars.pkl" )
pars = {} if pars is None else pars
#### Do something #################################################################
df_new = df[col] ### Do nithi
df_new.columns = [ col + "_myfun" for col in df.columns ]
cols_new = list(df_new.columns)
prepro = None
pars_new = None
###################################################################################
if 'path_features_store' in pars and 'path_pipeline_export' in pars:
save(prepro, pars['path_pipeline_export'] + f"/{prefix}_model.pkl" )
save(cols_new, pars['path_pipeline_export'] + f"/{prefix}.pkl" )
save(pars_new, pars['path_pipeline_export'] + f"/{prefix}_pars.pkl" )
col_pars = {'prefix' : prefix , 'path' : pars.get('path_pipeline_export', pars.get('path_pipeline', None)) }
col_pars['cols_new'] = {
'col_myfun' : cols_new ### list
}
return df_new, col_pars
#####################################################################################
########## Profile data #############################################################
from core_run import data_profile
# def data_profile(path_data="", path_output="", n_sample= 5000):
"""
def data_profile(path_data="", path_output="", n_sample= 5000):
from source.run_feature_profile import run_profile
run_profile(path_data = path_data,
path_output = path_output + "/profile/",
n_sample = n_sample,
)
"""
###################################################################################
########## Preprocess #############################################################
### def preprocess(config='', nsample=1000):
from core_run import preprocess
"""
def preprocess(config=None, nsample=None):
config_name = config if config is not None else config_default
mdict = globals()[config_name]()
m = mdict['global_pars'] | run_preprocess.run_preprocess(config_name = config_name,
config_path = m['config_path'],
n_sample = nsample if nsample is not None else m['n_sample'],
### Optonal
mode = 'run_preprocess')
"""
##################################################################################
########## Train #################################################################
from core_run import train
"""
def train(config=None, nsample=None):
config_name = config if config is not None else config_default
mdict = globals()[config_name]()
m = mdict['global_pars']
print(mdict)
from source import run_train
run_train.run_train(config_name = config_name,
config_path = m['config_path'],
n_sample = nsample if nsample is not None else m['n_sample']
)
"""
###################################################################################
######### Check data ##############################################################
def check():
pass
####################################################################################
####### Inference ##################################################################
# predict(config='', nsample=10000)
from core_run import predict
"""
def predict(config=None, nsample=None):
config_name = config if config is not None else config_default
mdict = globals()[config_name]()
m = mdict['global_pars']
from source import run_inference
run_inference.run_predict(config_name = config_name,
config_path = m['config_path'],
n_sample = nsample if nsample is not None else m['n_sample'],
#### Optional
path_data = m['path_pred_data'],
path_output = m['path_pred_output'],
model_dict = None
)
"""
###########################################################################################################
###########################################################################################################
"""
python test_automl.py data_profile
python test_automl.py preprocess --nsample 100
python test_automl.py train --nsample 200
python test_automl.py check
python test_automl.py predict
"""
if __name__ == "__main__":
d = { 'data_profile': data_profile, 'train' : train, 'predict' : predict, 'config' : config_default }
import fire
fire.Fire(d) | print(mdict)
from source import run_preprocess | random_line_split |
mod.rs | use std::{
cell::{Ref, RefCell},
ops::RangeInclusive,
rc::Rc,
};
pub type Time = f32;
use crate::Pose;
/// Aggregate of [`ColumnData`] that can only be added and not deleted.
/// Length of all [`ColumnData`] are equal, making this effectively a 2D table.
pub type DataSet<T = f32> = Vec<ColumnData<T>>;
/// Easily accessible wrapper around [`TimeTable`] to be shared by multiple owners
pub struct DataStore(pub(crate) Rc<RefCell<TimeTable<Pose>>>);
impl Default for DataStore {
fn default() -> Self {
// Temporary, create dummy data for showing
use crate::math::{cos, sin};
use std::f32::consts::PI;
let n = 10000;
let dt = 0.01;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let pose: Vec<Pose> = t
.iter()
.map(|&t| Pose::new(30.0 * sin(t), 20.0 * cos(t), 2.0 * PI * sin(t)))
.collect();
Self(Rc::new(RefCell::new(TimeTable::new(t, pose))))
}
}
impl DataStore {
pub fn new() -> Self {
Default::default()
}
pub fn clone(&self) -> Self {
DataStore(Rc::clone(&self.0))
}
pub fn borrow(&self) -> Ref<TimeTable<Pose>> {
self.0.borrow()
}
}
/// Single column of data
#[derive(Debug, Default, Clone)]
pub struct ColumnData<T> {
data: Vec<T>,
name: Option<String>,
}
impl<T> ColumnData<T> {
pub fn from_vec(data: impl Into<Vec<T>>) -> Self {
Self {
data: data.into(),
name: None,
}
}
pub fn len(&self) -> usize {
self.data.len()
}
pub fn add(&mut self, element: T) {
self.data.push(element);
}
pub fn get(&self, index: usize) -> Option<&T> {
self.data.get(index)
}
pub fn get_between(&self, range: RangeInclusive<usize>) -> &[T] {
&self.data[range]
}
}
/// Basic time vector for finding indices to look up within [`TimeSeries`] and [`TimeTable`]
#[derive(Debug, Default)]
struct Timeline {
/// Cache stores previously found index to avoid unecessary iteration when finding time index
cache: Option<(Time, usize)>,
/// Actual vector
vec: Vec<Time>,
}
impl Into<Timeline> for Vec<Time> {
fn into(self) -> Timeline {
Timeline {
vec: self,
..Default::default()
}
}
}
impl Timeline {
/// Tolerance to compare two input time for their equality
const EPSILON: f32 = 0.0005;
pub fn new(time_vec: impl Into<Vec<Time>>) -> Self {
Self {
vec: time_vec.into(),
..Default::default()
}
}
/// Adds a time element to the end
pub fn add(&mut self, time: Time) {
self.vec.push(time);
}
/// Checks if time input has changed from last index search
/// If time input is sufficiently close, assume same index can be used without calling [`get_index`]
///
/// [`get_index`]: Self::get_index
fn time_changed(&self, time: Time) -> bool {
self.cache
.map_or(true, |(prev, _)| (time - prev).abs() > Self::EPSILON)
}
/// Find the index that corresponds to the given time in seconds.
///
/// Returns index of first time that is greater or equal to the specified time.
fn get_index(&self, time: Time) -> Option<usize> {
if self.time_changed(time) {
self.vec.iter().position(|&t| t >= time).map(|index| {
// self.cache = Some((time, index));
index
})
} else {
// unwrap here is ok, since time_changed always ensures cache is not None
Some(self.cache.unwrap().1)
}
}
/// Similar to [`get_index`], but only returns time index that is smaller than the input time.
/// This is useful when making sure the returned time index never exceeds the given time, as
/// in [`get_range`]
///
/// [`get_index`]: Self::get_index
/// [`get_range`]: Self::get_range
fn get_index_under(&self, time: Time) -> Option<usize> {
if self.time_changed(time) {
self.vec
.iter()
.position(|&t| t > time)
.map(|idx| (idx - 1).max(0))
.map(|index| {
// self.cache = Some((time, index));
index
})
} else {
// unwrap here is ok, since time_changed always ensures cache is not None
Some(self.cache.unwrap().1)
}
}
/// Returns range indices that is within the time range specified
pub fn get_range(&self, start: Time, end: Time) -> Option<RangeInclusive<usize>> {
if start < end {
if let Some(start) = self.get_index(start) {
if let Some(end) = self.get_index_under(end) {
return Some(start..=end);
}
}
}
None
}
pub fn get_range_raw(&self, start: Time, end: Time) -> Option<Vec<Time>> {
self.get_range(start, end)
.map(|range| self.vec[range].to_vec())
}
/// Length of the time vector
pub fn len(&self) -> usize {
self.vec.len()
}
}
#[derive(Debug, Default)]
pub struct TimeTable<T> {
time: Timeline,
data: DataSet<T>,
}
impl<T> Into<TimeTable<T>> for TimeSeries<T> {
fn into(self) -> TimeTable<T> {
TimeTable {
time: self.time,
data: vec![self.data],
}
}
}
impl<T: Clone> TimeTable<T> {
#[allow(dead_code)]
pub fn | (timeseries: TimeSeries<T>) -> Self {
Self {
time: timeseries.time,
data: vec![timeseries.data],
}
}
}
impl<T: Clone> TimeTable<T> {
pub fn new(time: Vec<Time>, data: Vec<T>) -> Self {
TimeSeries::new(time, data).into()
}
#[allow(dead_code)]
pub fn get_column(&self, column: usize) -> Option<ColumnData<T>> {
self.data.get(column).map(|val| val.clone())
}
pub fn get_at_time(&self, column: usize, time: Time) -> Option<T> {
if let Some(idx) = self.time.get_index(time) {
self.data
.get(column)
.and_then(|vec| vec.get(idx).clone())
.map(|el| el.to_owned())
} else {
None
}
}
pub fn get_time_range(&self, start: Time, end: Time) -> Option<Vec<Time>> {
self.time.get_range_raw(start, end)
}
/// Returns slice of data that is within the time range specified
pub fn get_range(&self, column: usize, start: Time, end: Time) -> Option<Vec<T>> {
if let Some(range) = self.time.get_range(start, end) {
self.data
.get(column)
.map(|vec| vec.get_between(range).to_owned())
} else {
None
}
}
}
#[derive(Debug, Default)]
pub struct TimeSeries<T> {
time: Timeline,
data: ColumnData<T>,
sample_time: Time,
}
impl<T: Clone> TimeSeries<T> {
/// Create a new [`TimeSeries`] from given array of `time` and `data`.
pub fn new(time: impl Into<Vec<Time>>, data: impl Into<Vec<T>>) -> Self {
let time = Timeline::new(time.into());
let data = ColumnData::from_vec(data);
let sample_time = 0.0;
if time.len() != data.len() {
panic!("Size of time and data are different!");
}
Self {
time,
data,
sample_time,
}
}
/// Create an empty [`TimeSeries`]
pub fn empty() -> Self {
Self {
time: Timeline::default(),
data: ColumnData {
data: Vec::new(),
name: None,
},
sample_time: 0.0,
}
}
/// Sets the `sample_time` of the [`TimeSeries`]. If `add` is called with timestamp
/// that is smaller than the sum of the last timestamp and `sample_time`, `add` will
/// not push the data into the [`TimeSeries`]. When `sample_time` is set to zero (by default),
/// `add` will only discard data points whose timestamp is identical to the last timestamp,
/// i.e. it only guarantees monotonically increasing timestamps.
pub fn with_sample_time(mut self, sample_time: Time) -> Self {
self.sample_time = sample_time;
self
}
pub fn add(&mut self, time: Time, element: T) {
// if let Some(last) = self.time.vec.last() {
// if last + self.sample_time < time {
// self.time.add(time);
// self.data.add(element);
// }
// } else {
// self.time.add(time);
// self.data.add(element);
// }
if self.time.vec.is_empty() {
self.time.add(time);
self.data.add(element);
} else {
if self.sample_time > 0.0 {
if self.time.vec.last().unwrap() + self.sample_time <= time {
self.time.add(time);
self.data.add(element);
}
} else {
if self.time.vec.last().unwrap() < &time {
self.time.add(time);
self.data.add(element);
}
}
}
}
/// Get data element for a given time
pub fn get_at_time(&self, time: Time) -> Option<T> {
self.time
.get_index(time)
.and_then(|idx| self.data.get(idx))
.map(|val| val.to_owned())
}
/// Returns slice of data that is within the time range specified
#[allow(dead_code)]
pub fn get_range(&self, start: Time, end: Time) -> Option<&[T]> {
self.time
.get_range(start, end)
.map(|range| self.data.get_between(range))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::math::{cos, sin};
use crate::Pose;
use std::f32::consts::PI;
fn dummy_pose() -> TimeSeries<Pose> {
let n = 5;
let dt = 0.01;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let pose: Vec<Pose> = t
.iter()
.map(|&t| Pose::new(30.0 * sin(t), 20.0 * cos(t), 2.0 * PI * sin(t)))
.collect();
TimeSeries::new(t, pose)
}
fn dummy_f32() -> TimeSeries<f32> {
let n = 5;
let dt = 1.0;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let data: Vec<f32> = t.iter().map(|&t| t * 3.0).collect();
TimeSeries::new(t, data)
}
#[test]
fn add_timeseries() {
let mut ts = TimeSeries::<f32>::empty();
ts.add(0.5, 5.0);
ts.add(0.8, 15.0);
dbg!(&ts);
}
#[test]
fn check_index() {
// dbg!(&ts);
let ts = dummy_pose();
assert_eq!(2, ts.time.get_index(0.02).unwrap()); // finding exactly matching time
assert_eq!(2, ts.time.get_index(0.02).unwrap()); // running again should give same result
assert_eq!(2, ts.time.get_index(0.015).unwrap()); // finding next closest time stamp
}
#[test]
fn check_range() {
let ts = dummy_f32();
assert_eq!(1, ts.time.get_index(1.0).unwrap());
assert_eq!(3, ts.time.get_index(2.1).unwrap());
assert_eq!(3, ts.time.get_index(2.9).unwrap());
assert_eq!(3, ts.time.get_index(3.0).unwrap());
assert_eq!(&[3.0, 6.0], ts.get_range(1.0, 2.9).unwrap());
assert_eq!(&[3.0, 6.0, 9.0], ts.get_range(1.0, 3.0).unwrap());
}
#[test]
fn series_to_table() {
let ts = dummy_f32();
let _table: TimeTable<f32> = ts.into();
}
#[test]
fn check_sample_time() {
let mut ts = TimeSeries::<f32>::empty();
ts.add(0.0, 1.0);
ts.add(0.0, 2.0); // This shouldn't be added
ts.add(0.5, 3.0);
ts.add(0.5, 4.0); // This shouldn't be added
assert_eq!(0, ts.time.get_index(0.0).unwrap());
assert_eq!(1, ts.time.get_index(0.5).unwrap());
let mut ts = TimeSeries::<f32>::empty().with_sample_time(0.1);
ts.add(0.0, 1.0);
ts.add(0.05, 2.0); // This shouldn't be added
ts.add(0.1, 3.0);
assert_eq!(0, ts.time.get_index(0.0).unwrap());
assert_eq!(1, ts.time.get_index(0.1).unwrap());
}
}
// TimeTable data format options:
// 1. Generational-arena -> Arena<TimeSeries>
// pros: easy to push and manage TimeSeries
// cons: Dependency, TimeSeries cannot contain different data types
// 2. Vec<Box<dyn TimeSeries>>
// pros: No dependency
// cons: Use of trait object
// 3. ndarray?
| from_timeseries | identifier_name |
mod.rs | use std::{
cell::{Ref, RefCell},
ops::RangeInclusive,
rc::Rc,
};
pub type Time = f32;
use crate::Pose;
/// Aggregate of [`ColumnData`] that can only be added and not deleted.
/// Length of all [`ColumnData`] are equal, making this effectively a 2D table.
pub type DataSet<T = f32> = Vec<ColumnData<T>>;
/// Easily accessible wrapper around [`TimeTable`] to be shared by multiple owners
pub struct DataStore(pub(crate) Rc<RefCell<TimeTable<Pose>>>);
impl Default for DataStore {
fn default() -> Self {
// Temporary, create dummy data for showing
use crate::math::{cos, sin};
use std::f32::consts::PI;
let n = 10000;
let dt = 0.01;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let pose: Vec<Pose> = t
.iter()
.map(|&t| Pose::new(30.0 * sin(t), 20.0 * cos(t), 2.0 * PI * sin(t)))
.collect();
Self(Rc::new(RefCell::new(TimeTable::new(t, pose))))
}
}
impl DataStore {
pub fn new() -> Self {
Default::default()
}
pub fn clone(&self) -> Self {
DataStore(Rc::clone(&self.0))
}
pub fn borrow(&self) -> Ref<TimeTable<Pose>> {
self.0.borrow()
}
}
/// Single column of data
#[derive(Debug, Default, Clone)]
pub struct ColumnData<T> {
data: Vec<T>,
name: Option<String>,
}
impl<T> ColumnData<T> {
pub fn from_vec(data: impl Into<Vec<T>>) -> Self {
Self {
data: data.into(),
name: None,
}
}
pub fn len(&self) -> usize {
self.data.len()
}
pub fn add(&mut self, element: T) {
self.data.push(element);
}
pub fn get(&self, index: usize) -> Option<&T> {
self.data.get(index)
}
pub fn get_between(&self, range: RangeInclusive<usize>) -> &[T] {
&self.data[range]
}
}
/// Basic time vector for finding indices to look up within [`TimeSeries`] and [`TimeTable`]
#[derive(Debug, Default)]
struct Timeline {
/// Cache stores previously found index to avoid unecessary iteration when finding time index
cache: Option<(Time, usize)>,
/// Actual vector
vec: Vec<Time>,
}
impl Into<Timeline> for Vec<Time> {
fn into(self) -> Timeline {
Timeline {
vec: self,
..Default::default()
}
}
}
impl Timeline {
/// Tolerance to compare two input time for their equality
const EPSILON: f32 = 0.0005;
pub fn new(time_vec: impl Into<Vec<Time>>) -> Self {
Self {
vec: time_vec.into(),
..Default::default()
}
}
/// Adds a time element to the end
pub fn add(&mut self, time: Time) {
self.vec.push(time);
}
/// Checks if time input has changed from last index search
/// If time input is sufficiently close, assume same index can be used without calling [`get_index`]
///
/// [`get_index`]: Self::get_index
fn time_changed(&self, time: Time) -> bool {
self.cache
.map_or(true, |(prev, _)| (time - prev).abs() > Self::EPSILON)
}
/// Find the index that corresponds to the given time in seconds.
///
/// Returns index of first time that is greater or equal to the specified time.
fn get_index(&self, time: Time) -> Option<usize> {
if self.time_changed(time) {
self.vec.iter().position(|&t| t >= time).map(|index| {
// self.cache = Some((time, index));
index
})
} else {
// unwrap here is ok, since time_changed always ensures cache is not None
Some(self.cache.unwrap().1)
}
}
/// Similar to [`get_index`], but only returns time index that is smaller than the input time.
/// This is useful when making sure the returned time index never exceeds the given time, as
/// in [`get_range`]
///
/// [`get_index`]: Self::get_index
/// [`get_range`]: Self::get_range
fn get_index_under(&self, time: Time) -> Option<usize> {
if self.time_changed(time) {
self.vec
.iter()
.position(|&t| t > time)
.map(|idx| (idx - 1).max(0))
.map(|index| {
// self.cache = Some((time, index));
index
})
} else {
// unwrap here is ok, since time_changed always ensures cache is not None
Some(self.cache.unwrap().1)
}
}
/// Returns range indices that is within the time range specified
pub fn get_range(&self, start: Time, end: Time) -> Option<RangeInclusive<usize>> |
pub fn get_range_raw(&self, start: Time, end: Time) -> Option<Vec<Time>> {
self.get_range(start, end)
.map(|range| self.vec[range].to_vec())
}
/// Length of the time vector
pub fn len(&self) -> usize {
self.vec.len()
}
}
#[derive(Debug, Default)]
pub struct TimeTable<T> {
time: Timeline,
data: DataSet<T>,
}
impl<T> Into<TimeTable<T>> for TimeSeries<T> {
fn into(self) -> TimeTable<T> {
TimeTable {
time: self.time,
data: vec![self.data],
}
}
}
impl<T: Clone> TimeTable<T> {
#[allow(dead_code)]
pub fn from_timeseries(timeseries: TimeSeries<T>) -> Self {
Self {
time: timeseries.time,
data: vec![timeseries.data],
}
}
}
impl<T: Clone> TimeTable<T> {
pub fn new(time: Vec<Time>, data: Vec<T>) -> Self {
TimeSeries::new(time, data).into()
}
#[allow(dead_code)]
pub fn get_column(&self, column: usize) -> Option<ColumnData<T>> {
self.data.get(column).map(|val| val.clone())
}
pub fn get_at_time(&self, column: usize, time: Time) -> Option<T> {
if let Some(idx) = self.time.get_index(time) {
self.data
.get(column)
.and_then(|vec| vec.get(idx).clone())
.map(|el| el.to_owned())
} else {
None
}
}
pub fn get_time_range(&self, start: Time, end: Time) -> Option<Vec<Time>> {
self.time.get_range_raw(start, end)
}
/// Returns slice of data that is within the time range specified
pub fn get_range(&self, column: usize, start: Time, end: Time) -> Option<Vec<T>> {
if let Some(range) = self.time.get_range(start, end) {
self.data
.get(column)
.map(|vec| vec.get_between(range).to_owned())
} else {
None
}
}
}
#[derive(Debug, Default)]
pub struct TimeSeries<T> {
time: Timeline,
data: ColumnData<T>,
sample_time: Time,
}
impl<T: Clone> TimeSeries<T> {
/// Create a new [`TimeSeries`] from given array of `time` and `data`.
pub fn new(time: impl Into<Vec<Time>>, data: impl Into<Vec<T>>) -> Self {
let time = Timeline::new(time.into());
let data = ColumnData::from_vec(data);
let sample_time = 0.0;
if time.len() != data.len() {
panic!("Size of time and data are different!");
}
Self {
time,
data,
sample_time,
}
}
/// Create an empty [`TimeSeries`]
pub fn empty() -> Self {
Self {
time: Timeline::default(),
data: ColumnData {
data: Vec::new(),
name: None,
},
sample_time: 0.0,
}
}
/// Sets the `sample_time` of the [`TimeSeries`]. If `add` is called with timestamp
/// that is smaller than the sum of the last timestamp and `sample_time`, `add` will
/// not push the data into the [`TimeSeries`]. When `sample_time` is set to zero (by default),
/// `add` will only discard data points whose timestamp is identical to the last timestamp,
/// i.e. it only guarantees monotonically increasing timestamps.
pub fn with_sample_time(mut self, sample_time: Time) -> Self {
self.sample_time = sample_time;
self
}
pub fn add(&mut self, time: Time, element: T) {
// if let Some(last) = self.time.vec.last() {
// if last + self.sample_time < time {
// self.time.add(time);
// self.data.add(element);
// }
// } else {
// self.time.add(time);
// self.data.add(element);
// }
if self.time.vec.is_empty() {
self.time.add(time);
self.data.add(element);
} else {
if self.sample_time > 0.0 {
if self.time.vec.last().unwrap() + self.sample_time <= time {
self.time.add(time);
self.data.add(element);
}
} else {
if self.time.vec.last().unwrap() < &time {
self.time.add(time);
self.data.add(element);
}
}
}
}
/// Get data element for a given time
pub fn get_at_time(&self, time: Time) -> Option<T> {
self.time
.get_index(time)
.and_then(|idx| self.data.get(idx))
.map(|val| val.to_owned())
}
/// Returns slice of data that is within the time range specified
#[allow(dead_code)]
pub fn get_range(&self, start: Time, end: Time) -> Option<&[T]> {
self.time
.get_range(start, end)
.map(|range| self.data.get_between(range))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::math::{cos, sin};
use crate::Pose;
use std::f32::consts::PI;
fn dummy_pose() -> TimeSeries<Pose> {
let n = 5;
let dt = 0.01;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let pose: Vec<Pose> = t
.iter()
.map(|&t| Pose::new(30.0 * sin(t), 20.0 * cos(t), 2.0 * PI * sin(t)))
.collect();
TimeSeries::new(t, pose)
}
fn dummy_f32() -> TimeSeries<f32> {
let n = 5;
let dt = 1.0;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let data: Vec<f32> = t.iter().map(|&t| t * 3.0).collect();
TimeSeries::new(t, data)
}
#[test]
fn add_timeseries() {
let mut ts = TimeSeries::<f32>::empty();
ts.add(0.5, 5.0);
ts.add(0.8, 15.0);
dbg!(&ts);
}
#[test]
fn check_index() {
// dbg!(&ts);
let ts = dummy_pose();
assert_eq!(2, ts.time.get_index(0.02).unwrap()); // finding exactly matching time
assert_eq!(2, ts.time.get_index(0.02).unwrap()); // running again should give same result
assert_eq!(2, ts.time.get_index(0.015).unwrap()); // finding next closest time stamp
}
#[test]
fn check_range() {
let ts = dummy_f32();
assert_eq!(1, ts.time.get_index(1.0).unwrap());
assert_eq!(3, ts.time.get_index(2.1).unwrap());
assert_eq!(3, ts.time.get_index(2.9).unwrap());
assert_eq!(3, ts.time.get_index(3.0).unwrap());
assert_eq!(&[3.0, 6.0], ts.get_range(1.0, 2.9).unwrap());
assert_eq!(&[3.0, 6.0, 9.0], ts.get_range(1.0, 3.0).unwrap());
}
#[test]
fn series_to_table() {
let ts = dummy_f32();
let _table: TimeTable<f32> = ts.into();
}
#[test]
fn check_sample_time() {
let mut ts = TimeSeries::<f32>::empty();
ts.add(0.0, 1.0);
ts.add(0.0, 2.0); // This shouldn't be added
ts.add(0.5, 3.0);
ts.add(0.5, 4.0); // This shouldn't be added
assert_eq!(0, ts.time.get_index(0.0).unwrap());
assert_eq!(1, ts.time.get_index(0.5).unwrap());
let mut ts = TimeSeries::<f32>::empty().with_sample_time(0.1);
ts.add(0.0, 1.0);
ts.add(0.05, 2.0); // This shouldn't be added
ts.add(0.1, 3.0);
assert_eq!(0, ts.time.get_index(0.0).unwrap());
assert_eq!(1, ts.time.get_index(0.1).unwrap());
}
}
// TimeTable data format options:
// 1. Generational-arena -> Arena<TimeSeries>
// pros: easy to push and manage TimeSeries
// cons: Dependency, TimeSeries cannot contain different data types
// 2. Vec<Box<dyn TimeSeries>>
// pros: No dependency
// cons: Use of trait object
// 3. ndarray?
| {
if start < end {
if let Some(start) = self.get_index(start) {
if let Some(end) = self.get_index_under(end) {
return Some(start..=end);
}
}
}
None
} | identifier_body |
mod.rs | use std::{
cell::{Ref, RefCell},
ops::RangeInclusive,
rc::Rc,
};
pub type Time = f32;
use crate::Pose;
/// Aggregate of [`ColumnData`] that can only be added and not deleted.
/// Length of all [`ColumnData`] are equal, making this effectively a 2D table.
pub type DataSet<T = f32> = Vec<ColumnData<T>>;
/// Easily accessible wrapper around [`TimeTable`] to be shared by multiple owners
pub struct DataStore(pub(crate) Rc<RefCell<TimeTable<Pose>>>);
impl Default for DataStore {
fn default() -> Self {
// Temporary, create dummy data for showing
use crate::math::{cos, sin};
use std::f32::consts::PI;
let n = 10000;
let dt = 0.01;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let pose: Vec<Pose> = t
.iter()
.map(|&t| Pose::new(30.0 * sin(t), 20.0 * cos(t), 2.0 * PI * sin(t)))
.collect();
Self(Rc::new(RefCell::new(TimeTable::new(t, pose))))
}
}
impl DataStore {
pub fn new() -> Self {
Default::default()
}
pub fn clone(&self) -> Self {
DataStore(Rc::clone(&self.0))
}
pub fn borrow(&self) -> Ref<TimeTable<Pose>> {
self.0.borrow()
}
}
/// Single column of data
#[derive(Debug, Default, Clone)]
pub struct ColumnData<T> {
data: Vec<T>,
name: Option<String>,
}
impl<T> ColumnData<T> {
pub fn from_vec(data: impl Into<Vec<T>>) -> Self {
Self {
data: data.into(),
name: None,
}
}
pub fn len(&self) -> usize {
self.data.len()
}
pub fn add(&mut self, element: T) {
self.data.push(element);
}
pub fn get(&self, index: usize) -> Option<&T> {
self.data.get(index)
}
pub fn get_between(&self, range: RangeInclusive<usize>) -> &[T] {
&self.data[range]
}
}
/// Basic time vector for finding indices to look up within [`TimeSeries`] and [`TimeTable`]
#[derive(Debug, Default)]
struct Timeline {
/// Cache stores previously found index to avoid unecessary iteration when finding time index
cache: Option<(Time, usize)>,
/// Actual vector
vec: Vec<Time>,
}
impl Into<Timeline> for Vec<Time> {
fn into(self) -> Timeline {
Timeline {
vec: self,
..Default::default()
}
}
}
impl Timeline {
/// Tolerance to compare two input time for their equality
const EPSILON: f32 = 0.0005;
pub fn new(time_vec: impl Into<Vec<Time>>) -> Self {
Self {
vec: time_vec.into(),
..Default::default()
}
}
/// Adds a time element to the end
pub fn add(&mut self, time: Time) {
self.vec.push(time);
}
/// Checks if time input has changed from last index search
/// If time input is sufficiently close, assume same index can be used without calling [`get_index`]
///
/// [`get_index`]: Self::get_index
fn time_changed(&self, time: Time) -> bool {
self.cache
.map_or(true, |(prev, _)| (time - prev).abs() > Self::EPSILON)
}
/// Find the index that corresponds to the given time in seconds.
///
/// Returns index of first time that is greater or equal to the specified time.
fn get_index(&self, time: Time) -> Option<usize> {
if self.time_changed(time) {
self.vec.iter().position(|&t| t >= time).map(|index| {
// self.cache = Some((time, index));
index
})
} else |
}
/// Similar to [`get_index`], but only returns time index that is smaller than the input time.
/// This is useful when making sure the returned time index never exceeds the given time, as
/// in [`get_range`]
///
/// [`get_index`]: Self::get_index
/// [`get_range`]: Self::get_range
fn get_index_under(&self, time: Time) -> Option<usize> {
if self.time_changed(time) {
self.vec
.iter()
.position(|&t| t > time)
.map(|idx| (idx - 1).max(0))
.map(|index| {
// self.cache = Some((time, index));
index
})
} else {
// unwrap here is ok, since time_changed always ensures cache is not None
Some(self.cache.unwrap().1)
}
}
/// Returns range indices that is within the time range specified
pub fn get_range(&self, start: Time, end: Time) -> Option<RangeInclusive<usize>> {
if start < end {
if let Some(start) = self.get_index(start) {
if let Some(end) = self.get_index_under(end) {
return Some(start..=end);
}
}
}
None
}
pub fn get_range_raw(&self, start: Time, end: Time) -> Option<Vec<Time>> {
self.get_range(start, end)
.map(|range| self.vec[range].to_vec())
}
/// Length of the time vector
pub fn len(&self) -> usize {
self.vec.len()
}
}
#[derive(Debug, Default)]
pub struct TimeTable<T> {
time: Timeline,
data: DataSet<T>,
}
impl<T> Into<TimeTable<T>> for TimeSeries<T> {
fn into(self) -> TimeTable<T> {
TimeTable {
time: self.time,
data: vec![self.data],
}
}
}
impl<T: Clone> TimeTable<T> {
#[allow(dead_code)]
pub fn from_timeseries(timeseries: TimeSeries<T>) -> Self {
Self {
time: timeseries.time,
data: vec![timeseries.data],
}
}
}
impl<T: Clone> TimeTable<T> {
pub fn new(time: Vec<Time>, data: Vec<T>) -> Self {
TimeSeries::new(time, data).into()
}
#[allow(dead_code)]
pub fn get_column(&self, column: usize) -> Option<ColumnData<T>> {
self.data.get(column).map(|val| val.clone())
}
pub fn get_at_time(&self, column: usize, time: Time) -> Option<T> {
if let Some(idx) = self.time.get_index(time) {
self.data
.get(column)
.and_then(|vec| vec.get(idx).clone())
.map(|el| el.to_owned())
} else {
None
}
}
pub fn get_time_range(&self, start: Time, end: Time) -> Option<Vec<Time>> {
self.time.get_range_raw(start, end)
}
/// Returns slice of data that is within the time range specified
pub fn get_range(&self, column: usize, start: Time, end: Time) -> Option<Vec<T>> {
if let Some(range) = self.time.get_range(start, end) {
self.data
.get(column)
.map(|vec| vec.get_between(range).to_owned())
} else {
None
}
}
}
#[derive(Debug, Default)]
pub struct TimeSeries<T> {
time: Timeline,
data: ColumnData<T>,
sample_time: Time,
}
impl<T: Clone> TimeSeries<T> {
/// Create a new [`TimeSeries`] from given array of `time` and `data`.
pub fn new(time: impl Into<Vec<Time>>, data: impl Into<Vec<T>>) -> Self {
let time = Timeline::new(time.into());
let data = ColumnData::from_vec(data);
let sample_time = 0.0;
if time.len() != data.len() {
panic!("Size of time and data are different!");
}
Self {
time,
data,
sample_time,
}
}
/// Create an empty [`TimeSeries`]
pub fn empty() -> Self {
Self {
time: Timeline::default(),
data: ColumnData {
data: Vec::new(),
name: None,
},
sample_time: 0.0,
}
}
/// Sets the `sample_time` of the [`TimeSeries`]. If `add` is called with timestamp
/// that is smaller than the sum of the last timestamp and `sample_time`, `add` will
/// not push the data into the [`TimeSeries`]. When `sample_time` is set to zero (by default),
/// `add` will only discard data points whose timestamp is identical to the last timestamp,
/// i.e. it only guarantees monotonically increasing timestamps.
pub fn with_sample_time(mut self, sample_time: Time) -> Self {
self.sample_time = sample_time;
self
}
pub fn add(&mut self, time: Time, element: T) {
// if let Some(last) = self.time.vec.last() {
// if last + self.sample_time < time {
// self.time.add(time);
// self.data.add(element);
// }
// } else {
// self.time.add(time);
// self.data.add(element);
// }
if self.time.vec.is_empty() {
self.time.add(time);
self.data.add(element);
} else {
if self.sample_time > 0.0 {
if self.time.vec.last().unwrap() + self.sample_time <= time {
self.time.add(time);
self.data.add(element);
}
} else {
if self.time.vec.last().unwrap() < &time {
self.time.add(time);
self.data.add(element);
}
}
}
}
/// Get data element for a given time
pub fn get_at_time(&self, time: Time) -> Option<T> {
self.time
.get_index(time)
.and_then(|idx| self.data.get(idx))
.map(|val| val.to_owned())
}
/// Returns slice of data that is within the time range specified
#[allow(dead_code)]
pub fn get_range(&self, start: Time, end: Time) -> Option<&[T]> {
self.time
.get_range(start, end)
.map(|range| self.data.get_between(range))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::math::{cos, sin};
use crate::Pose;
use std::f32::consts::PI;
fn dummy_pose() -> TimeSeries<Pose> {
let n = 5;
let dt = 0.01;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let pose: Vec<Pose> = t
.iter()
.map(|&t| Pose::new(30.0 * sin(t), 20.0 * cos(t), 2.0 * PI * sin(t)))
.collect();
TimeSeries::new(t, pose)
}
fn dummy_f32() -> TimeSeries<f32> {
let n = 5;
let dt = 1.0;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let data: Vec<f32> = t.iter().map(|&t| t * 3.0).collect();
TimeSeries::new(t, data)
}
#[test]
fn add_timeseries() {
let mut ts = TimeSeries::<f32>::empty();
ts.add(0.5, 5.0);
ts.add(0.8, 15.0);
dbg!(&ts);
}
#[test]
fn check_index() {
// dbg!(&ts);
let ts = dummy_pose();
assert_eq!(2, ts.time.get_index(0.02).unwrap()); // finding exactly matching time
assert_eq!(2, ts.time.get_index(0.02).unwrap()); // running again should give same result
assert_eq!(2, ts.time.get_index(0.015).unwrap()); // finding next closest time stamp
}
#[test]
fn check_range() {
let ts = dummy_f32();
assert_eq!(1, ts.time.get_index(1.0).unwrap());
assert_eq!(3, ts.time.get_index(2.1).unwrap());
assert_eq!(3, ts.time.get_index(2.9).unwrap());
assert_eq!(3, ts.time.get_index(3.0).unwrap());
assert_eq!(&[3.0, 6.0], ts.get_range(1.0, 2.9).unwrap());
assert_eq!(&[3.0, 6.0, 9.0], ts.get_range(1.0, 3.0).unwrap());
}
#[test]
fn series_to_table() {
let ts = dummy_f32();
let _table: TimeTable<f32> = ts.into();
}
#[test]
fn check_sample_time() {
let mut ts = TimeSeries::<f32>::empty();
ts.add(0.0, 1.0);
ts.add(0.0, 2.0); // This shouldn't be added
ts.add(0.5, 3.0);
ts.add(0.5, 4.0); // This shouldn't be added
assert_eq!(0, ts.time.get_index(0.0).unwrap());
assert_eq!(1, ts.time.get_index(0.5).unwrap());
let mut ts = TimeSeries::<f32>::empty().with_sample_time(0.1);
ts.add(0.0, 1.0);
ts.add(0.05, 2.0); // This shouldn't be added
ts.add(0.1, 3.0);
assert_eq!(0, ts.time.get_index(0.0).unwrap());
assert_eq!(1, ts.time.get_index(0.1).unwrap());
}
}
// TimeTable data format options:
// 1. Generational-arena -> Arena<TimeSeries>
// pros: easy to push and manage TimeSeries
// cons: Dependency, TimeSeries cannot contain different data types
// 2. Vec<Box<dyn TimeSeries>>
// pros: No dependency
// cons: Use of trait object
// 3. ndarray?
| {
// unwrap here is ok, since time_changed always ensures cache is not None
Some(self.cache.unwrap().1)
} | conditional_block |
mod.rs | use std::{
cell::{Ref, RefCell},
ops::RangeInclusive,
rc::Rc,
};
pub type Time = f32;
use crate::Pose;
/// Aggregate of [`ColumnData`] that can only be added and not deleted.
/// Length of all [`ColumnData`] are equal, making this effectively a 2D table.
pub type DataSet<T = f32> = Vec<ColumnData<T>>;
/// Easily accessible wrapper around [`TimeTable`] to be shared by multiple owners
pub struct DataStore(pub(crate) Rc<RefCell<TimeTable<Pose>>>);
impl Default for DataStore {
fn default() -> Self {
// Temporary, create dummy data for showing
use crate::math::{cos, sin};
use std::f32::consts::PI;
let n = 10000;
let dt = 0.01;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let pose: Vec<Pose> = t
.iter()
.map(|&t| Pose::new(30.0 * sin(t), 20.0 * cos(t), 2.0 * PI * sin(t)))
.collect();
Self(Rc::new(RefCell::new(TimeTable::new(t, pose))))
}
}
impl DataStore {
pub fn new() -> Self {
Default::default()
}
pub fn clone(&self) -> Self {
DataStore(Rc::clone(&self.0))
}
pub fn borrow(&self) -> Ref<TimeTable<Pose>> {
self.0.borrow()
}
}
/// Single column of data
#[derive(Debug, Default, Clone)]
pub struct ColumnData<T> {
data: Vec<T>,
name: Option<String>,
}
impl<T> ColumnData<T> {
pub fn from_vec(data: impl Into<Vec<T>>) -> Self {
Self {
data: data.into(),
name: None,
}
}
pub fn len(&self) -> usize {
self.data.len()
}
pub fn add(&mut self, element: T) {
self.data.push(element);
}
pub fn get(&self, index: usize) -> Option<&T> {
self.data.get(index)
}
pub fn get_between(&self, range: RangeInclusive<usize>) -> &[T] {
&self.data[range]
}
}
/// Basic time vector for finding indices to look up within [`TimeSeries`] and [`TimeTable`]
#[derive(Debug, Default)]
struct Timeline {
/// Cache stores previously found index to avoid unecessary iteration when finding time index
cache: Option<(Time, usize)>,
/// Actual vector
vec: Vec<Time>,
}
impl Into<Timeline> for Vec<Time> {
fn into(self) -> Timeline {
Timeline {
vec: self,
..Default::default()
}
}
}
impl Timeline {
/// Tolerance to compare two input time for their equality
const EPSILON: f32 = 0.0005;
pub fn new(time_vec: impl Into<Vec<Time>>) -> Self {
Self {
vec: time_vec.into(),
..Default::default()
}
}
/// Adds a time element to the end
pub fn add(&mut self, time: Time) {
self.vec.push(time);
}
/// Checks if time input has changed from last index search
/// If time input is sufficiently close, assume same index can be used without calling [`get_index`]
///
/// [`get_index`]: Self::get_index
fn time_changed(&self, time: Time) -> bool {
self.cache
.map_or(true, |(prev, _)| (time - prev).abs() > Self::EPSILON)
}
/// Find the index that corresponds to the given time in seconds.
///
/// Returns index of first time that is greater or equal to the specified time.
fn get_index(&self, time: Time) -> Option<usize> { | // self.cache = Some((time, index));
index
})
} else {
// unwrap here is ok, since time_changed always ensures cache is not None
Some(self.cache.unwrap().1)
}
}
/// Similar to [`get_index`], but only returns time index that is smaller than the input time.
/// This is useful when making sure the returned time index never exceeds the given time, as
/// in [`get_range`]
///
/// [`get_index`]: Self::get_index
/// [`get_range`]: Self::get_range
fn get_index_under(&self, time: Time) -> Option<usize> {
if self.time_changed(time) {
self.vec
.iter()
.position(|&t| t > time)
.map(|idx| (idx - 1).max(0))
.map(|index| {
// self.cache = Some((time, index));
index
})
} else {
// unwrap here is ok, since time_changed always ensures cache is not None
Some(self.cache.unwrap().1)
}
}
/// Returns range indices that is within the time range specified
pub fn get_range(&self, start: Time, end: Time) -> Option<RangeInclusive<usize>> {
if start < end {
if let Some(start) = self.get_index(start) {
if let Some(end) = self.get_index_under(end) {
return Some(start..=end);
}
}
}
None
}
pub fn get_range_raw(&self, start: Time, end: Time) -> Option<Vec<Time>> {
self.get_range(start, end)
.map(|range| self.vec[range].to_vec())
}
/// Length of the time vector
pub fn len(&self) -> usize {
self.vec.len()
}
}
#[derive(Debug, Default)]
pub struct TimeTable<T> {
time: Timeline,
data: DataSet<T>,
}
impl<T> Into<TimeTable<T>> for TimeSeries<T> {
fn into(self) -> TimeTable<T> {
TimeTable {
time: self.time,
data: vec![self.data],
}
}
}
impl<T: Clone> TimeTable<T> {
#[allow(dead_code)]
pub fn from_timeseries(timeseries: TimeSeries<T>) -> Self {
Self {
time: timeseries.time,
data: vec![timeseries.data],
}
}
}
impl<T: Clone> TimeTable<T> {
pub fn new(time: Vec<Time>, data: Vec<T>) -> Self {
TimeSeries::new(time, data).into()
}
#[allow(dead_code)]
pub fn get_column(&self, column: usize) -> Option<ColumnData<T>> {
self.data.get(column).map(|val| val.clone())
}
pub fn get_at_time(&self, column: usize, time: Time) -> Option<T> {
if let Some(idx) = self.time.get_index(time) {
self.data
.get(column)
.and_then(|vec| vec.get(idx).clone())
.map(|el| el.to_owned())
} else {
None
}
}
pub fn get_time_range(&self, start: Time, end: Time) -> Option<Vec<Time>> {
self.time.get_range_raw(start, end)
}
/// Returns slice of data that is within the time range specified
pub fn get_range(&self, column: usize, start: Time, end: Time) -> Option<Vec<T>> {
if let Some(range) = self.time.get_range(start, end) {
self.data
.get(column)
.map(|vec| vec.get_between(range).to_owned())
} else {
None
}
}
}
#[derive(Debug, Default)]
pub struct TimeSeries<T> {
time: Timeline,
data: ColumnData<T>,
sample_time: Time,
}
impl<T: Clone> TimeSeries<T> {
/// Create a new [`TimeSeries`] from given array of `time` and `data`.
pub fn new(time: impl Into<Vec<Time>>, data: impl Into<Vec<T>>) -> Self {
let time = Timeline::new(time.into());
let data = ColumnData::from_vec(data);
let sample_time = 0.0;
if time.len() != data.len() {
panic!("Size of time and data are different!");
}
Self {
time,
data,
sample_time,
}
}
/// Create an empty [`TimeSeries`]
pub fn empty() -> Self {
Self {
time: Timeline::default(),
data: ColumnData {
data: Vec::new(),
name: None,
},
sample_time: 0.0,
}
}
/// Sets the `sample_time` of the [`TimeSeries`]. If `add` is called with timestamp
/// that is smaller than the sum of the last timestamp and `sample_time`, `add` will
/// not push the data into the [`TimeSeries`]. When `sample_time` is set to zero (by default),
/// `add` will only discard data points whose timestamp is identical to the last timestamp,
/// i.e. it only guarantees monotonically increasing timestamps.
pub fn with_sample_time(mut self, sample_time: Time) -> Self {
self.sample_time = sample_time;
self
}
pub fn add(&mut self, time: Time, element: T) {
// if let Some(last) = self.time.vec.last() {
// if last + self.sample_time < time {
// self.time.add(time);
// self.data.add(element);
// }
// } else {
// self.time.add(time);
// self.data.add(element);
// }
if self.time.vec.is_empty() {
self.time.add(time);
self.data.add(element);
} else {
if self.sample_time > 0.0 {
if self.time.vec.last().unwrap() + self.sample_time <= time {
self.time.add(time);
self.data.add(element);
}
} else {
if self.time.vec.last().unwrap() < &time {
self.time.add(time);
self.data.add(element);
}
}
}
}
/// Get data element for a given time
pub fn get_at_time(&self, time: Time) -> Option<T> {
self.time
.get_index(time)
.and_then(|idx| self.data.get(idx))
.map(|val| val.to_owned())
}
/// Returns slice of data that is within the time range specified
#[allow(dead_code)]
pub fn get_range(&self, start: Time, end: Time) -> Option<&[T]> {
self.time
.get_range(start, end)
.map(|range| self.data.get_between(range))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::math::{cos, sin};
use crate::Pose;
use std::f32::consts::PI;
fn dummy_pose() -> TimeSeries<Pose> {
let n = 5;
let dt = 0.01;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let pose: Vec<Pose> = t
.iter()
.map(|&t| Pose::new(30.0 * sin(t), 20.0 * cos(t), 2.0 * PI * sin(t)))
.collect();
TimeSeries::new(t, pose)
}
fn dummy_f32() -> TimeSeries<f32> {
let n = 5;
let dt = 1.0;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let data: Vec<f32> = t.iter().map(|&t| t * 3.0).collect();
TimeSeries::new(t, data)
}
#[test]
fn add_timeseries() {
let mut ts = TimeSeries::<f32>::empty();
ts.add(0.5, 5.0);
ts.add(0.8, 15.0);
dbg!(&ts);
}
#[test]
fn check_index() {
// dbg!(&ts);
let ts = dummy_pose();
assert_eq!(2, ts.time.get_index(0.02).unwrap()); // finding exactly matching time
assert_eq!(2, ts.time.get_index(0.02).unwrap()); // running again should give same result
assert_eq!(2, ts.time.get_index(0.015).unwrap()); // finding next closest time stamp
}
#[test]
fn check_range() {
let ts = dummy_f32();
assert_eq!(1, ts.time.get_index(1.0).unwrap());
assert_eq!(3, ts.time.get_index(2.1).unwrap());
assert_eq!(3, ts.time.get_index(2.9).unwrap());
assert_eq!(3, ts.time.get_index(3.0).unwrap());
assert_eq!(&[3.0, 6.0], ts.get_range(1.0, 2.9).unwrap());
assert_eq!(&[3.0, 6.0, 9.0], ts.get_range(1.0, 3.0).unwrap());
}
#[test]
fn series_to_table() {
let ts = dummy_f32();
let _table: TimeTable<f32> = ts.into();
}
#[test]
fn check_sample_time() {
let mut ts = TimeSeries::<f32>::empty();
ts.add(0.0, 1.0);
ts.add(0.0, 2.0); // This shouldn't be added
ts.add(0.5, 3.0);
ts.add(0.5, 4.0); // This shouldn't be added
assert_eq!(0, ts.time.get_index(0.0).unwrap());
assert_eq!(1, ts.time.get_index(0.5).unwrap());
let mut ts = TimeSeries::<f32>::empty().with_sample_time(0.1);
ts.add(0.0, 1.0);
ts.add(0.05, 2.0); // This shouldn't be added
ts.add(0.1, 3.0);
assert_eq!(0, ts.time.get_index(0.0).unwrap());
assert_eq!(1, ts.time.get_index(0.1).unwrap());
}
}
// TimeTable data format options:
// 1. Generational-arena -> Arena<TimeSeries>
// pros: easy to push and manage TimeSeries
// cons: Dependency, TimeSeries cannot contain different data types
// 2. Vec<Box<dyn TimeSeries>>
// pros: No dependency
// cons: Use of trait object
// 3. ndarray? | if self.time_changed(time) {
self.vec.iter().position(|&t| t >= time).map(|index| { | random_line_split |
main.rs | // Copyright 2021 The Simlin Authors. All rights reserved.
// Use of this source code is governed by the Apache License,
// Version 2.0, that can be found in the LICENSE file.
use std::fs::File;
use std::io::{BufReader, Write};
use std::rc::Rc;
use pico_args::Arguments;
use simlin_compat::engine::builtins::Loc;
use simlin_compat::engine::common::UnitError;
use simlin_compat::engine::datamodel::{Equation, Project as DatamodelProject};
use simlin_compat::engine::{
eprintln, serde, ErrorCode, Project, Results, Simulation, Variable, Vm,
};
use simlin_compat::prost::Message;
use simlin_compat::{load_csv, open_vensim, open_xmile, to_xmile};
const VERSION: &str = "1.0";
const EXIT_FAILURE: i32 = 1;
#[macro_export]
macro_rules! die(
($($arg:tt)*) => { {
use std;
eprintln!($($arg)*);
std::process::exit(EXIT_FAILURE)
} }
);
fn usage() -> ! {
let argv0 = std::env::args()
.next()
.unwrap_or_else(|| "<mdl>".to_string());
die!(
concat!(
"mdl {}: Simulate system dynamics models.\n\
\n\
USAGE:\n",
" {} [SUBCOMMAND] [OPTION...] PATH\n",
"\n\
OPTIONS:\n",
" -h, --help show this message\n",
" --vensim model is a Vensim .mdl file\n",
" --to-xmile output should be XMILE not protobuf\n",
" --model-only for conversion, only output model instead of project\n",
" --output FILE path to write output file\n",
" --reference FILE reference TSV for debug subcommand\n",
" --no-output don't print the output (for benchmarking)\n",
"\n\
SUBCOMMANDS:\n",
" simulate Simulate a model and display output\n",
" convert Convert an XMILE or Vensim model to protobuf\n",
" equations Print the equations out\n",
" debug Output model equations interleaved with a reference run\n",
),
VERSION,
argv0
);
}
#[derive(Clone, Default, Debug)]
struct Args {
path: Option<String>,
output: Option<String>,
reference: Option<String>,
is_vensim: bool,
is_to_xmile: bool,
is_convert: bool,
is_model_only: bool,
is_no_output: bool,
is_equations: bool,
is_debug: bool,
}
fn parse_args() -> Result<Args, Box<dyn std::error::Error>> {
let mut parsed = Arguments::from_env();
if parsed.contains(["-h", "--help"]) {
usage();
}
let subcommand = parsed.subcommand()?;
if subcommand.is_none() {
eprintln!("error: subcommand required");
usage();
}
let mut args: Args = Default::default();
let subcommand = subcommand.unwrap();
if subcommand == "convert" {
args.is_convert = true;
} else if subcommand == "simulate" {
} else if subcommand == "equations" {
args.is_equations = true;
} else if subcommand == "debug" {
args.is_debug = true;
} else {
eprintln!("error: unknown subcommand {}", subcommand);
usage();
}
args.output = parsed.value_from_str("--output").ok();
args.reference = parsed.value_from_str("--reference").ok();
args.is_no_output = parsed.contains("--no-output");
args.is_model_only = parsed.contains("--model-only");
args.is_to_xmile = parsed.contains("--to-xmile");
args.is_vensim = parsed.contains("--vensim");
let free_arguments = parsed.finish();
if free_arguments.is_empty() {
eprintln!("error: input path required");
usage();
}
args.path = free_arguments[0].to_str().map(|s| s.to_owned());
Ok(args)
}
fn simulate(project: &DatamodelProject) -> Results {
let project_datamodel = project.clone();
let project = Rc::new(Project::from(project.clone()));
if !project.errors.is_empty() {
for err in project.errors.iter() {
eprintln!("project error: {}", err);
}
}
let mut found_model_error = false;
for (model_name, model) in project.models.iter() {
let model_datamodel = project_datamodel.get_model(model_name);
if model_datamodel.is_none() {
continue;
}
let model_datamodel = model_datamodel.unwrap();
let mut found_var_error = false;
for (ident, errors) in model.get_variable_errors() {
assert!(!errors.is_empty());
let var = model_datamodel.get_variable(&ident).unwrap();
found_var_error = true;
for error in errors {
eprintln!();
if let Some(Equation::Scalar(eqn)) = var.get_equation() {
eprintln!(" {}", eqn);
let space = " ".repeat(error.start as usize);
let underline = "~".repeat((error.end - error.start) as usize);
eprintln!(" {}{}", space, underline);
}
eprintln!(
"error in model '{}' variable '{}': {}",
model_name, ident, error.code
);
}
}
for (ident, errors) in model.get_unit_errors() {
assert!(!errors.is_empty());
let var = model_datamodel.get_variable(&ident).unwrap();
for error in errors {
eprintln!();
let (eqn, loc, details) = match error {
UnitError::DefinitionError(error, details) => {
let details = if let Some(details) = details {
format!("{} -- {}", error.code, details)
} else {
format!("{}", error.code)
};
(
var.get_units(),
Loc::new(error.start.into(), error.end.into()),
details,
)
}
UnitError::ConsistencyError(code, loc, details) => {
let (eqn, loc, code) =
if let Some(Equation::Scalar(eqn)) = var.get_equation() {
(Some(eqn), loc, code)
} else {
(None, loc, code)
};
let details = match details {
Some(details) => format!("{} -- {}", code, details),
None => format!("{}", code),
};
(eqn, loc, details)
}
};
if let Some(eqn) = eqn {
eprintln!(" {}", eqn);
let space = " ".repeat(loc.start as usize);
let underline = "~".repeat((loc.end - loc.start) as usize);
eprintln!(" {}{}", space, underline);
}
eprintln!(
"units error in model '{}' variable '{}': {}",
model_name, ident, details
);
}
}
if let Some(errors) = &model.errors {
for error in errors.iter() {
if error.code == ErrorCode::VariablesHaveErrors && found_var_error {
continue;
}
eprintln!("error in model {}: {}", model_name, error);
found_model_error = true;
}
}
}
let sim = match Simulation::new(&project, "main") {
Ok(sim) => sim,
Err(err) => {
if !(err.code == ErrorCode::NotSimulatable && found_model_error) {
eprintln!("error: {}", err);
}
std::process::exit(1);
}
};
let compiled = sim.compile().unwrap();
let mut vm = Vm::new(compiled).unwrap();
vm.run_to_end().unwrap();
vm.into_results()
}
fn main() {
let args = match parse_args() {
Ok(args) => args,
Err(err) => {
eprintln!("error: {}", err);
usage();
}
};
let file_path = args.path.unwrap_or_else(|| "/dev/stdin".to_string());
let file = File::open(&file_path).unwrap();
let mut reader = BufReader::new(file);
let project = if args.is_vensim {
open_vensim(&mut reader)
} else {
open_xmile(&mut reader)
};
if project.is_err() {
eprintln!("model '{}' error: {}", &file_path, project.err().unwrap());
return;
};
let project = project.unwrap();
if args.is_equations {
let mut output_file =
File::create(&args.output.unwrap_or_else(|| "/dev/stdout".to_string())).unwrap();
let project = Rc::new(Project::from(project));
for (model_name, model) in project.models.iter().filter(|(_, model)| !model.implicit) {
output_file
.write_fmt(format_args!("% {}\n", model_name))
.unwrap();
output_file
.write_fmt(format_args!("\\begin{{align*}}\n"))
.unwrap();
let var_count = model.variables.len();
for (i, (var_name, var)) in model.variables.iter().enumerate() {
let subscript = if var.is_stock() { "(t_0)" } else { "" };
let var_name = str::replace(var_name, "_", "\\_");
let continuation = if !var.is_stock() && i == var_count - 1 {
""
} else {
" \\\\"
};
let eqn = var
.ast()
.map(|ast| ast.to_latex())
.unwrap_or_else(|| "\\varnothing".to_owned());
output_file
.write_fmt(format_args!(
"\\mathrm{{{}}}{} & = {}{}\n",
var_name, subscript, eqn, continuation
))
.unwrap();
if var.is_stock() {
if let Variable::Stock {
inflows, outflows, ..
} = var
{
let continuation = if i == var_count - 1 { "" } else { " \\\\" };
let use_parens = inflows.len() + outflows.len() > 1;
let mut eqn = inflows
.iter()
.map(|inflow| {
format!("\\mathrm{{{}}}", str::replace(inflow, "_", "\\_"))
})
.collect::<Vec<_>>()
.join(" + ");
if !outflows.is_empty() {
eqn = format!(
"{}-{}",
eqn,
outflows
.iter()
.map(|inflow| format!(
"\\mathrm{{{}}}",
str::replace(inflow, "_", "\\_")
))
.collect::<Vec<_>>()
.join(" - ")
);
}
if use_parens {
eqn = format!("({}) ", eqn);
} else {
eqn = format!("{} \\cdot ", eqn);
}
output_file
.write_fmt(format_args!(
"\\mathrm{{{}}}(t) & = \\mathrm{{{}}}(t - dt) + {} dt{}\n",
var_name, var_name, eqn, continuation
))
.unwrap();
}
}
}
output_file
.write_fmt(format_args!("\\end{{align*}}\n"))
.unwrap();
}
} else if args.is_convert {
let pb_project = serde::serialize(&project);
let mut buf: Vec<u8> = if args.is_model_only {
if pb_project.models.len() != 1 {
die!("--model-only specified, but more than 1 model in this project");
}
let mut buf = Vec::with_capacity(pb_project.models[0].encoded_len());
pb_project.models[0].encode(&mut buf).unwrap();
buf
} else {
let mut buf = Vec::with_capacity(pb_project.encoded_len());
pb_project.encode(&mut buf).unwrap();
buf
};
if args.is_to_xmile {
match to_xmile(&project) {
Ok(s) => {
buf = s.into_bytes();
buf.push(b'\n');
}
Err(err) => {
die!("error converting to XMILE: {}", err);
}
}
}
let mut output_file =
File::create(&args.output.unwrap_or_else(|| "/dev/stdout".to_string())).unwrap();
output_file.write_all(&buf).unwrap();
} else if args.is_debug | else {
let results = simulate(&project);
if !args.is_no_output {
results.print_tsv();
}
}
}
| {
if args.reference.is_none() {
eprintln!("missing required argument --reference FILE");
std::process::exit(1);
}
let ref_path = args.reference.unwrap();
let reference = load_csv(&ref_path, b'\t').unwrap();
let results = simulate(&project);
results.print_tsv_comparison(Some(&reference));
} | conditional_block |
main.rs | // Copyright 2021 The Simlin Authors. All rights reserved.
// Use of this source code is governed by the Apache License,
// Version 2.0, that can be found in the LICENSE file.
use std::fs::File;
use std::io::{BufReader, Write};
use std::rc::Rc;
use pico_args::Arguments;
use simlin_compat::engine::builtins::Loc;
use simlin_compat::engine::common::UnitError;
use simlin_compat::engine::datamodel::{Equation, Project as DatamodelProject};
use simlin_compat::engine::{
eprintln, serde, ErrorCode, Project, Results, Simulation, Variable, Vm,
};
use simlin_compat::prost::Message;
use simlin_compat::{load_csv, open_vensim, open_xmile, to_xmile};
const VERSION: &str = "1.0";
const EXIT_FAILURE: i32 = 1;
#[macro_export]
macro_rules! die(
($($arg:tt)*) => { {
use std;
eprintln!($($arg)*);
std::process::exit(EXIT_FAILURE)
} }
);
fn usage() -> ! {
let argv0 = std::env::args()
.next()
.unwrap_or_else(|| "<mdl>".to_string());
die!(
concat!(
"mdl {}: Simulate system dynamics models.\n\
\n\
USAGE:\n",
" {} [SUBCOMMAND] [OPTION...] PATH\n",
"\n\
OPTIONS:\n",
" -h, --help show this message\n",
" --vensim model is a Vensim .mdl file\n",
" --to-xmile output should be XMILE not protobuf\n",
" --model-only for conversion, only output model instead of project\n",
" --output FILE path to write output file\n",
" --reference FILE reference TSV for debug subcommand\n",
" --no-output don't print the output (for benchmarking)\n",
"\n\
SUBCOMMANDS:\n",
" simulate Simulate a model and display output\n",
" convert Convert an XMILE or Vensim model to protobuf\n",
" equations Print the equations out\n",
" debug Output model equations interleaved with a reference run\n",
),
VERSION,
argv0
);
}
#[derive(Clone, Default, Debug)]
struct Args {
path: Option<String>,
output: Option<String>,
reference: Option<String>,
is_vensim: bool,
is_to_xmile: bool,
is_convert: bool,
is_model_only: bool,
is_no_output: bool,
is_equations: bool,
is_debug: bool,
}
fn parse_args() -> Result<Args, Box<dyn std::error::Error>> {
let mut parsed = Arguments::from_env();
if parsed.contains(["-h", "--help"]) {
usage();
}
let subcommand = parsed.subcommand()?;
if subcommand.is_none() {
eprintln!("error: subcommand required");
usage();
}
let mut args: Args = Default::default();
let subcommand = subcommand.unwrap();
if subcommand == "convert" {
args.is_convert = true;
} else if subcommand == "simulate" {
} else if subcommand == "equations" {
args.is_equations = true;
} else if subcommand == "debug" {
args.is_debug = true;
} else {
eprintln!("error: unknown subcommand {}", subcommand);
usage();
}
args.output = parsed.value_from_str("--output").ok();
args.reference = parsed.value_from_str("--reference").ok();
args.is_no_output = parsed.contains("--no-output");
args.is_model_only = parsed.contains("--model-only");
args.is_to_xmile = parsed.contains("--to-xmile");
args.is_vensim = parsed.contains("--vensim");
let free_arguments = parsed.finish();
if free_arguments.is_empty() {
eprintln!("error: input path required");
usage();
}
args.path = free_arguments[0].to_str().map(|s| s.to_owned());
Ok(args)
}
fn simulate(project: &DatamodelProject) -> Results {
let project_datamodel = project.clone();
let project = Rc::new(Project::from(project.clone()));
if !project.errors.is_empty() {
for err in project.errors.iter() {
eprintln!("project error: {}", err);
}
}
let mut found_model_error = false;
for (model_name, model) in project.models.iter() {
let model_datamodel = project_datamodel.get_model(model_name);
if model_datamodel.is_none() {
continue;
}
let model_datamodel = model_datamodel.unwrap();
let mut found_var_error = false;
for (ident, errors) in model.get_variable_errors() {
assert!(!errors.is_empty());
let var = model_datamodel.get_variable(&ident).unwrap();
found_var_error = true;
for error in errors {
eprintln!();
if let Some(Equation::Scalar(eqn)) = var.get_equation() {
eprintln!(" {}", eqn);
let space = " ".repeat(error.start as usize);
let underline = "~".repeat((error.end - error.start) as usize);
eprintln!(" {}{}", space, underline);
}
eprintln!(
"error in model '{}' variable '{}': {}",
model_name, ident, error.code
);
}
}
for (ident, errors) in model.get_unit_errors() {
assert!(!errors.is_empty());
let var = model_datamodel.get_variable(&ident).unwrap();
for error in errors {
eprintln!();
let (eqn, loc, details) = match error {
UnitError::DefinitionError(error, details) => {
let details = if let Some(details) = details {
format!("{} -- {}", error.code, details)
} else {
format!("{}", error.code)
};
(
var.get_units(),
Loc::new(error.start.into(), error.end.into()),
details,
)
}
UnitError::ConsistencyError(code, loc, details) => {
let (eqn, loc, code) =
if let Some(Equation::Scalar(eqn)) = var.get_equation() {
(Some(eqn), loc, code)
} else {
(None, loc, code)
};
let details = match details {
Some(details) => format!("{} -- {}", code, details),
None => format!("{}", code),
};
(eqn, loc, details)
}
};
if let Some(eqn) = eqn {
eprintln!(" {}", eqn);
let space = " ".repeat(loc.start as usize);
let underline = "~".repeat((loc.end - loc.start) as usize);
eprintln!(" {}{}", space, underline);
}
eprintln!(
"units error in model '{}' variable '{}': {}",
model_name, ident, details
);
}
}
if let Some(errors) = &model.errors {
for error in errors.iter() {
if error.code == ErrorCode::VariablesHaveErrors && found_var_error {
continue;
}
eprintln!("error in model {}: {}", model_name, error);
found_model_error = true;
}
}
}
let sim = match Simulation::new(&project, "main") {
Ok(sim) => sim,
Err(err) => {
if !(err.code == ErrorCode::NotSimulatable && found_model_error) {
eprintln!("error: {}", err);
}
std::process::exit(1);
}
};
let compiled = sim.compile().unwrap();
let mut vm = Vm::new(compiled).unwrap();
vm.run_to_end().unwrap(); |
fn main() {
let args = match parse_args() {
Ok(args) => args,
Err(err) => {
eprintln!("error: {}", err);
usage();
}
};
let file_path = args.path.unwrap_or_else(|| "/dev/stdin".to_string());
let file = File::open(&file_path).unwrap();
let mut reader = BufReader::new(file);
let project = if args.is_vensim {
open_vensim(&mut reader)
} else {
open_xmile(&mut reader)
};
if project.is_err() {
eprintln!("model '{}' error: {}", &file_path, project.err().unwrap());
return;
};
let project = project.unwrap();
if args.is_equations {
let mut output_file =
File::create(&args.output.unwrap_or_else(|| "/dev/stdout".to_string())).unwrap();
let project = Rc::new(Project::from(project));
for (model_name, model) in project.models.iter().filter(|(_, model)| !model.implicit) {
output_file
.write_fmt(format_args!("% {}\n", model_name))
.unwrap();
output_file
.write_fmt(format_args!("\\begin{{align*}}\n"))
.unwrap();
let var_count = model.variables.len();
for (i, (var_name, var)) in model.variables.iter().enumerate() {
let subscript = if var.is_stock() { "(t_0)" } else { "" };
let var_name = str::replace(var_name, "_", "\\_");
let continuation = if !var.is_stock() && i == var_count - 1 {
""
} else {
" \\\\"
};
let eqn = var
.ast()
.map(|ast| ast.to_latex())
.unwrap_or_else(|| "\\varnothing".to_owned());
output_file
.write_fmt(format_args!(
"\\mathrm{{{}}}{} & = {}{}\n",
var_name, subscript, eqn, continuation
))
.unwrap();
if var.is_stock() {
if let Variable::Stock {
inflows, outflows, ..
} = var
{
let continuation = if i == var_count - 1 { "" } else { " \\\\" };
let use_parens = inflows.len() + outflows.len() > 1;
let mut eqn = inflows
.iter()
.map(|inflow| {
format!("\\mathrm{{{}}}", str::replace(inflow, "_", "\\_"))
})
.collect::<Vec<_>>()
.join(" + ");
if !outflows.is_empty() {
eqn = format!(
"{}-{}",
eqn,
outflows
.iter()
.map(|inflow| format!(
"\\mathrm{{{}}}",
str::replace(inflow, "_", "\\_")
))
.collect::<Vec<_>>()
.join(" - ")
);
}
if use_parens {
eqn = format!("({}) ", eqn);
} else {
eqn = format!("{} \\cdot ", eqn);
}
output_file
.write_fmt(format_args!(
"\\mathrm{{{}}}(t) & = \\mathrm{{{}}}(t - dt) + {} dt{}\n",
var_name, var_name, eqn, continuation
))
.unwrap();
}
}
}
output_file
.write_fmt(format_args!("\\end{{align*}}\n"))
.unwrap();
}
} else if args.is_convert {
let pb_project = serde::serialize(&project);
let mut buf: Vec<u8> = if args.is_model_only {
if pb_project.models.len() != 1 {
die!("--model-only specified, but more than 1 model in this project");
}
let mut buf = Vec::with_capacity(pb_project.models[0].encoded_len());
pb_project.models[0].encode(&mut buf).unwrap();
buf
} else {
let mut buf = Vec::with_capacity(pb_project.encoded_len());
pb_project.encode(&mut buf).unwrap();
buf
};
if args.is_to_xmile {
match to_xmile(&project) {
Ok(s) => {
buf = s.into_bytes();
buf.push(b'\n');
}
Err(err) => {
die!("error converting to XMILE: {}", err);
}
}
}
let mut output_file =
File::create(&args.output.unwrap_or_else(|| "/dev/stdout".to_string())).unwrap();
output_file.write_all(&buf).unwrap();
} else if args.is_debug {
if args.reference.is_none() {
eprintln!("missing required argument --reference FILE");
std::process::exit(1);
}
let ref_path = args.reference.unwrap();
let reference = load_csv(&ref_path, b'\t').unwrap();
let results = simulate(&project);
results.print_tsv_comparison(Some(&reference));
} else {
let results = simulate(&project);
if !args.is_no_output {
results.print_tsv();
}
}
} | vm.into_results()
} | random_line_split |
main.rs | // Copyright 2021 The Simlin Authors. All rights reserved.
// Use of this source code is governed by the Apache License,
// Version 2.0, that can be found in the LICENSE file.
use std::fs::File;
use std::io::{BufReader, Write};
use std::rc::Rc;
use pico_args::Arguments;
use simlin_compat::engine::builtins::Loc;
use simlin_compat::engine::common::UnitError;
use simlin_compat::engine::datamodel::{Equation, Project as DatamodelProject};
use simlin_compat::engine::{
eprintln, serde, ErrorCode, Project, Results, Simulation, Variable, Vm,
};
use simlin_compat::prost::Message;
use simlin_compat::{load_csv, open_vensim, open_xmile, to_xmile};
const VERSION: &str = "1.0";
const EXIT_FAILURE: i32 = 1;
#[macro_export]
macro_rules! die(
($($arg:tt)*) => { {
use std;
eprintln!($($arg)*);
std::process::exit(EXIT_FAILURE)
} }
);
fn | () -> ! {
let argv0 = std::env::args()
.next()
.unwrap_or_else(|| "<mdl>".to_string());
die!(
concat!(
"mdl {}: Simulate system dynamics models.\n\
\n\
USAGE:\n",
" {} [SUBCOMMAND] [OPTION...] PATH\n",
"\n\
OPTIONS:\n",
" -h, --help show this message\n",
" --vensim model is a Vensim .mdl file\n",
" --to-xmile output should be XMILE not protobuf\n",
" --model-only for conversion, only output model instead of project\n",
" --output FILE path to write output file\n",
" --reference FILE reference TSV for debug subcommand\n",
" --no-output don't print the output (for benchmarking)\n",
"\n\
SUBCOMMANDS:\n",
" simulate Simulate a model and display output\n",
" convert Convert an XMILE or Vensim model to protobuf\n",
" equations Print the equations out\n",
" debug Output model equations interleaved with a reference run\n",
),
VERSION,
argv0
);
}
#[derive(Clone, Default, Debug)]
struct Args {
path: Option<String>,
output: Option<String>,
reference: Option<String>,
is_vensim: bool,
is_to_xmile: bool,
is_convert: bool,
is_model_only: bool,
is_no_output: bool,
is_equations: bool,
is_debug: bool,
}
fn parse_args() -> Result<Args, Box<dyn std::error::Error>> {
let mut parsed = Arguments::from_env();
if parsed.contains(["-h", "--help"]) {
usage();
}
let subcommand = parsed.subcommand()?;
if subcommand.is_none() {
eprintln!("error: subcommand required");
usage();
}
let mut args: Args = Default::default();
let subcommand = subcommand.unwrap();
if subcommand == "convert" {
args.is_convert = true;
} else if subcommand == "simulate" {
} else if subcommand == "equations" {
args.is_equations = true;
} else if subcommand == "debug" {
args.is_debug = true;
} else {
eprintln!("error: unknown subcommand {}", subcommand);
usage();
}
args.output = parsed.value_from_str("--output").ok();
args.reference = parsed.value_from_str("--reference").ok();
args.is_no_output = parsed.contains("--no-output");
args.is_model_only = parsed.contains("--model-only");
args.is_to_xmile = parsed.contains("--to-xmile");
args.is_vensim = parsed.contains("--vensim");
let free_arguments = parsed.finish();
if free_arguments.is_empty() {
eprintln!("error: input path required");
usage();
}
args.path = free_arguments[0].to_str().map(|s| s.to_owned());
Ok(args)
}
fn simulate(project: &DatamodelProject) -> Results {
let project_datamodel = project.clone();
let project = Rc::new(Project::from(project.clone()));
if !project.errors.is_empty() {
for err in project.errors.iter() {
eprintln!("project error: {}", err);
}
}
let mut found_model_error = false;
for (model_name, model) in project.models.iter() {
let model_datamodel = project_datamodel.get_model(model_name);
if model_datamodel.is_none() {
continue;
}
let model_datamodel = model_datamodel.unwrap();
let mut found_var_error = false;
for (ident, errors) in model.get_variable_errors() {
assert!(!errors.is_empty());
let var = model_datamodel.get_variable(&ident).unwrap();
found_var_error = true;
for error in errors {
eprintln!();
if let Some(Equation::Scalar(eqn)) = var.get_equation() {
eprintln!(" {}", eqn);
let space = " ".repeat(error.start as usize);
let underline = "~".repeat((error.end - error.start) as usize);
eprintln!(" {}{}", space, underline);
}
eprintln!(
"error in model '{}' variable '{}': {}",
model_name, ident, error.code
);
}
}
for (ident, errors) in model.get_unit_errors() {
assert!(!errors.is_empty());
let var = model_datamodel.get_variable(&ident).unwrap();
for error in errors {
eprintln!();
let (eqn, loc, details) = match error {
UnitError::DefinitionError(error, details) => {
let details = if let Some(details) = details {
format!("{} -- {}", error.code, details)
} else {
format!("{}", error.code)
};
(
var.get_units(),
Loc::new(error.start.into(), error.end.into()),
details,
)
}
UnitError::ConsistencyError(code, loc, details) => {
let (eqn, loc, code) =
if let Some(Equation::Scalar(eqn)) = var.get_equation() {
(Some(eqn), loc, code)
} else {
(None, loc, code)
};
let details = match details {
Some(details) => format!("{} -- {}", code, details),
None => format!("{}", code),
};
(eqn, loc, details)
}
};
if let Some(eqn) = eqn {
eprintln!(" {}", eqn);
let space = " ".repeat(loc.start as usize);
let underline = "~".repeat((loc.end - loc.start) as usize);
eprintln!(" {}{}", space, underline);
}
eprintln!(
"units error in model '{}' variable '{}': {}",
model_name, ident, details
);
}
}
if let Some(errors) = &model.errors {
for error in errors.iter() {
if error.code == ErrorCode::VariablesHaveErrors && found_var_error {
continue;
}
eprintln!("error in model {}: {}", model_name, error);
found_model_error = true;
}
}
}
let sim = match Simulation::new(&project, "main") {
Ok(sim) => sim,
Err(err) => {
if !(err.code == ErrorCode::NotSimulatable && found_model_error) {
eprintln!("error: {}", err);
}
std::process::exit(1);
}
};
let compiled = sim.compile().unwrap();
let mut vm = Vm::new(compiled).unwrap();
vm.run_to_end().unwrap();
vm.into_results()
}
fn main() {
let args = match parse_args() {
Ok(args) => args,
Err(err) => {
eprintln!("error: {}", err);
usage();
}
};
let file_path = args.path.unwrap_or_else(|| "/dev/stdin".to_string());
let file = File::open(&file_path).unwrap();
let mut reader = BufReader::new(file);
let project = if args.is_vensim {
open_vensim(&mut reader)
} else {
open_xmile(&mut reader)
};
if project.is_err() {
eprintln!("model '{}' error: {}", &file_path, project.err().unwrap());
return;
};
let project = project.unwrap();
if args.is_equations {
let mut output_file =
File::create(&args.output.unwrap_or_else(|| "/dev/stdout".to_string())).unwrap();
let project = Rc::new(Project::from(project));
for (model_name, model) in project.models.iter().filter(|(_, model)| !model.implicit) {
output_file
.write_fmt(format_args!("% {}\n", model_name))
.unwrap();
output_file
.write_fmt(format_args!("\\begin{{align*}}\n"))
.unwrap();
let var_count = model.variables.len();
for (i, (var_name, var)) in model.variables.iter().enumerate() {
let subscript = if var.is_stock() { "(t_0)" } else { "" };
let var_name = str::replace(var_name, "_", "\\_");
let continuation = if !var.is_stock() && i == var_count - 1 {
""
} else {
" \\\\"
};
let eqn = var
.ast()
.map(|ast| ast.to_latex())
.unwrap_or_else(|| "\\varnothing".to_owned());
output_file
.write_fmt(format_args!(
"\\mathrm{{{}}}{} & = {}{}\n",
var_name, subscript, eqn, continuation
))
.unwrap();
if var.is_stock() {
if let Variable::Stock {
inflows, outflows, ..
} = var
{
let continuation = if i == var_count - 1 { "" } else { " \\\\" };
let use_parens = inflows.len() + outflows.len() > 1;
let mut eqn = inflows
.iter()
.map(|inflow| {
format!("\\mathrm{{{}}}", str::replace(inflow, "_", "\\_"))
})
.collect::<Vec<_>>()
.join(" + ");
if !outflows.is_empty() {
eqn = format!(
"{}-{}",
eqn,
outflows
.iter()
.map(|inflow| format!(
"\\mathrm{{{}}}",
str::replace(inflow, "_", "\\_")
))
.collect::<Vec<_>>()
.join(" - ")
);
}
if use_parens {
eqn = format!("({}) ", eqn);
} else {
eqn = format!("{} \\cdot ", eqn);
}
output_file
.write_fmt(format_args!(
"\\mathrm{{{}}}(t) & = \\mathrm{{{}}}(t - dt) + {} dt{}\n",
var_name, var_name, eqn, continuation
))
.unwrap();
}
}
}
output_file
.write_fmt(format_args!("\\end{{align*}}\n"))
.unwrap();
}
} else if args.is_convert {
let pb_project = serde::serialize(&project);
let mut buf: Vec<u8> = if args.is_model_only {
if pb_project.models.len() != 1 {
die!("--model-only specified, but more than 1 model in this project");
}
let mut buf = Vec::with_capacity(pb_project.models[0].encoded_len());
pb_project.models[0].encode(&mut buf).unwrap();
buf
} else {
let mut buf = Vec::with_capacity(pb_project.encoded_len());
pb_project.encode(&mut buf).unwrap();
buf
};
if args.is_to_xmile {
match to_xmile(&project) {
Ok(s) => {
buf = s.into_bytes();
buf.push(b'\n');
}
Err(err) => {
die!("error converting to XMILE: {}", err);
}
}
}
let mut output_file =
File::create(&args.output.unwrap_or_else(|| "/dev/stdout".to_string())).unwrap();
output_file.write_all(&buf).unwrap();
} else if args.is_debug {
if args.reference.is_none() {
eprintln!("missing required argument --reference FILE");
std::process::exit(1);
}
let ref_path = args.reference.unwrap();
let reference = load_csv(&ref_path, b'\t').unwrap();
let results = simulate(&project);
results.print_tsv_comparison(Some(&reference));
} else {
let results = simulate(&project);
if !args.is_no_output {
results.print_tsv();
}
}
}
| usage | identifier_name |
snippet-bot.ts | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* eslint-disable @typescript-eslint/no-var-requires */
/* eslint-disable node/no-extraneous-import */
import {Probot, Context} from 'probot';
import {PullRequest} from '@octokit/webhooks-definitions/schema';
import {Configuration, ConfigurationOptions} from './configuration';
import {DEFAULT_CONFIGURATION, CONFIGURATION_FILE_PATH} from './configuration';
import {REFRESH_LABEL, NO_PREFIX_REQ_LABEL, SNIPPET_BOT_LABELS} from './labels';
import {
parseRegionTags,
parseRegionTagsInPullRequest,
ParseResult,
} from './region-tag-parser';
import {
Conclusion,
CheckAggregator,
formatBody,
formatExpandable,
formatRegionTag,
formatViolations,
formatMatchingViolation,
isFile,
} from './utils';
import {invalidateCache} from './snippets';
import {
Violation,
checkProductPrefixViolations,
checkRemovingUsedTagViolations,
} from './violations';
import schema from './config-schema.json';
import {ConfigChecker, getConfig} from '@google-automations/bot-config-utils';
import {syncLabels} from '@google-automations/label-utils';
import {logger, addOrUpdateIssueComment} from 'gcf-utils';
import fetch from 'node-fetch';
import tmp from 'tmp-promise';
import tar from 'tar';
import util from 'util';
import fs from 'fs';
import {promises as pfs} from 'fs';
import path from 'path';
const streamPipeline = util.promisify(require('stream').pipeline);
// Solely for avoid using `any` type.
interface Label {
name: string;
}
const FULL_SCAN_ISSUE_TITLE = 'snippet-bot full scan';
const REFRESH_UI = '- [ ] Refresh this comment';
const REFRESH_STRING = '- [x] Refresh this comment';
// Github issue comment API has a limit of 65536 characters.
const MAX_CHARS_IN_COMMENT = 64000;
async function downloadFile(url: string, file: string) {
const response = await fetch(url);
if (response.ok) {
return streamPipeline(response.body, fs.createWriteStream(file));
}
throw new Error(`unexpected response ${response.statusText}`);
}
async function getFiles(dir: string, allFiles: string[]) {
const files = (await pfs.readdir(dir)).map(f => path.join(dir, f));
for (const f of files) {
if (!(await pfs.stat(f)).isDirectory()) {
allFiles.push(f);
}
}
await Promise.all(
files.map(
async f => (await pfs.stat(f)).isDirectory() && getFiles(f, allFiles)
)
);
return allFiles;
}
async function fullScan(
context: Context<'issues'>,
configuration: Configuration
) {
const installationId = context.payload.installation?.id;
const commentMark = `<!-- probot comment [${installationId}]-->`;
const owner = context.payload.repository.owner.login;
const repo = context.payload.repository.name;
const defaultBranch = context.payload.repository.default_branch;
if (!context.payload.issue?.title.includes(FULL_SCAN_ISSUE_TITLE)) {
return;
}
// full scan start
const issueNumber = context.payload.issue.number;
const url = `https://github.com/${owner}/${repo}/tarball/${defaultBranch}`;
const tmpDir = tmp.dirSync();
logger.info(`working directory: ${tmpDir.name}`);
const file = `${tmpDir.name}/${repo}.tar.gz`;
// Download the default branch tarball and run full scan.
try {
await downloadFile(url, file);
logger.info(`Downloaded to ${file}`);
tar.x({
file: file,
cwd: tmpDir.name,
sync: true,
});
let archiveDir!: string;
for (const f of await pfs.readdir(tmpDir.name)) {
const cur = tmpDir.name + '/' + f;
const stat = await pfs.lstat(cur);
if (stat.isDirectory()) {
archiveDir = cur;
}
}
if (archiveDir === undefined) {
throw new Error('Failed to extract the archive');
}
// Determine the short commit hash from the directory name.
// We'll use the hash for creating permalink.
let commitHash = defaultBranch; // Defaulting to the default branch.
const lastDashIndex = archiveDir.lastIndexOf('-');
if (lastDashIndex !== -1) {
commitHash = archiveDir.substr(lastDashIndex + 1);
}
logger.info(`Using commit hash "${commitHash}"`);
const files = await getFiles(archiveDir, []);
let mismatchedTags = false;
const failureMessages: string[] = [];
for (const file of files) {
if (configuration.ignoredFile(file)) {
logger.info('ignoring file from configuration: ' + file);
continue;
}
try {
const fileContents = await pfs.readFile(file, 'utf-8');
const parseResult = parseRegionTags(
fileContents,
file.replace(archiveDir + '/', ''),
owner,
repo,
commitHash
);
if (!parseResult.result) {
mismatchedTags = true;
for (const violation of parseResult.violations) {
const formatted = formatMatchingViolation(violation);
failureMessages.push(`- [ ] ${formatted}`);
}
}
} catch (err) {
err.message = `Failed to read the file: ${err.message}`;
logger.error(err);
continue;
}
}
let bodyDetail = 'Great job! No unmatching region tags found!';
if (mismatchedTags) {
bodyDetail = failureMessages.join('\n');
}
await context.octokit.issues.update({
owner: owner,
repo: repo,
issue_number: issueNumber,
body: formatBody(
context.payload.issue.body as string,
commentMark,
`## snippet-bot scan result
Life is too short to manually check unmatched region tags.
Here is the result:
${bodyDetail}`
),
});
} catch (err) {
err.message = `Failed to scan files: ${err.message}`;
logger.error(err);
await context.octokit.issues.update({
owner: owner,
repo: repo,
issue_number: issueNumber,
body: formatBody(
context.payload.issue.body as string,
commentMark,
`## snippet-bot scan result\nFailed running the full scan: ${err}.`
),
});
} finally {
// Clean up the directory.
await pfs.rmdir(tmpDir.name, {recursive: true});
}
}
async function scanPullRequest(
context: Context<'pull_request'> | Context<'issue_comment'>,
pull_request: PullRequest,
configuration: Configuration,
refreshing = false
) {
const installationId = context.payload.installation?.id;
const owner = context.payload.repository.owner.login;
const repo = context.payload.repository.name;
const aggregator = new CheckAggregator(
context.octokit,
'snippet-bot check',
configuration.aggregateChecks()
);
// Parse the PR diff and recognize added/deleted region tags.
const result = await parseRegionTagsInPullRequest(
context.octokit,
pull_request.diff_url,
pull_request.base.repo.owner.login,
pull_request.base.repo.name,
pull_request.base.sha,
pull_request.head.repo.owner.login,
pull_request.head.repo.name,
pull_request.head.sha
);
let mismatchedTags = false;
let tagsFound = false;
const failureMessages: string[] = [];
// Whether to ignore prefix requirement.
const noPrefixReq = pull_request.labels.some((label: Label) => {
return label.name === NO_PREFIX_REQ_LABEL;
});
// Keep track of start tags in all the files.
const parseResults = new Map<string, ParseResult>();
// If we found any new files, verify they all have matching region tags.
for (const file of result.files) {
if (configuration.ignoredFile(file)) {
logger.info('ignoring file from configuration: ' + file);
continue;
}
try {
const blob = await context.octokit.repos.getContent({
owner: pull_request.head.repo.owner.login,
repo: pull_request.head.repo.name,
path: file,
ref: pull_request.head.sha,
});
if (!isFile(blob.data)) {
continue;
}
const fileContents = Buffer.from(blob.data.content, 'base64').toString(
'utf8'
);
const parseResult = parseRegionTags(
fileContents,
file,
owner,
repo,
pull_request.head.sha
);
parseResults.set(file, parseResult);
if (!parseResult.result) {
mismatchedTags = true;
for (const violation of parseResult.violations) {
failureMessages.push(formatMatchingViolation(violation));
}
}
if (parseResult.tagsFound) {
tagsFound = true;
}
} catch (err) {
// Ignoring 403/404 errors.
if (err.status === 403 || err.status === 404) {
logger.info(
`ignoring 403/404 errors upon fetching ${file}: ${err.message}`
);
} else {
throw err;
}
}
}
const checkParams = context.repo({
name: 'Mismatched region tag',
conclusion: 'success' as Conclusion,
head_sha: pull_request.head.sha,
output: {
title: 'Region tag check',
summary: 'Region tag successful',
text: 'Region tag successful',
},
});
if (mismatchedTags) {
checkParams.conclusion = 'failure';
checkParams.output = {
title: 'Mismatched region tag detected.',
summary: 'Some new files have mismatched region tag',
text: failureMessages.join('\n'),
};
}
// post the status of commit linting to the PR, using:
// https://developer.github.com/v3/checks/
if (
configuration.alwaysCreateStatusCheck() ||
configuration.aggregateChecks() ||
tagsFound
) {
await aggregator.add(checkParams);
}
let commentBody = '';
if (result.changes.length === 0) {
// If this run is initiated by a user with the force-run label
// or refresh checkbox, we don't exit.
//
// Also, the config `alwaysCreateStatusCheck` is true, we need
// to create successfull status checks, so we don't exit.
if (
!refreshing &&
!configuration.alwaysCreateStatusCheck() &&
!configuration.aggregateChecks()
) {
return;
}
commentBody += 'No region tags are edited in this PR.\n';
}
// Add or update a comment on the PR.
const prNumber = pull_request.number;
// First check product prefix for added region tags.
let productPrefixViolations: Array<Violation> = [];
if (!noPrefixReq) {
productPrefixViolations = await checkProductPrefixViolations(
result,
configuration
);
}
const removingUsedTagsViolations = await checkRemovingUsedTagViolations(
result,
configuration,
parseResults,
pull_request.base.repo.full_name,
pull_request.base.ref
);
const removeUsedTagViolations = [
...(removingUsedTagsViolations.get('REMOVE_USED_TAG') as Violation[]),
...(removingUsedTagsViolations.get(
'REMOVE_CONFLICTING_TAG'
) as Violation[]),
];
const removeSampleBrowserViolations = removingUsedTagsViolations.get(
'REMOVE_SAMPLE_BROWSER_PAGE'
) as Violation[];
const removeFrozenRegionTagViolations = removingUsedTagsViolations.get(
'REMOVE_FROZEN_REGION_TAG'
) as Violation[];
// status check for productPrefixViolations
const prefixCheckParams = context.repo({
name: 'Region tag product prefix',
conclusion: 'success' as Conclusion,
head_sha: pull_request.head.sha,
output: {
title: 'No violations',
summary: 'No violations found',
text: 'All the tags have appropriate product prefix',
},
});
// status check for removeUsedTagViolations
const removeUsedTagCheckParams = context.repo({
name: 'Disruptive region tag removal',
conclusion: 'success' as Conclusion,
head_sha: pull_request.head.sha,
output: {
title: 'No violations',
summary: 'No violations found',
text: 'No disruptive region tag removal',
},
});
if (
productPrefixViolations.length > 0 ||
removeUsedTagViolations.length > 0
) {
commentBody += 'Here is the summary of possible violations 😱';
// Rendering prefix violations
if (productPrefixViolations.length > 0) {
let summary = '';
if (productPrefixViolations.length === 1) {
summary =
'There is a possible violation for not having product prefix.';
} else {
summary = `There are ${productPrefixViolations.length} possible violations for not having product prefix.`;
}
const productPrefixViolationsDetail = formatViolations(
productPrefixViolations,
summary
);
commentBody += productPrefixViolationsDetail;
prefixCheckParams.conclusion = 'failure';
prefixCheckParams.output = {
title: 'Missing region tag prefix',
summary: 'Some region tags do not have appropriate prefix',
text: productPrefixViolationsDetail,
};
}
// Rendering used tag violations
if (removeUsedTagViolations.length > 0) {
let summary = '';
if (removeUsedTagViolations.length === 1) {
summary =
'There is a possible violation for removing region tag in use.';
} else {
summary = `There are ${removeUsedTagViolations.length} possible violations for removing region tag in use.`;
}
const removeUsedTagViolationsDetail = formatViolations(
removeUsedTagViolations,
summary
);
commentBody += removeUsedTagViolationsDetail;
removeUsedTagCheckParams.conclusion = 'failure';
removeUsedTagCheckParams.output = {
title: 'Removal of region tags in use',
summary: '',
text: removeUsedTagViolationsDetail,
};
}
commentBody +=
'**The end of the violation section. All the stuff below is FYI purposes only.**\n\n';
commentBody += '---\n';
}
if (removeSampleBrowserViolations.length > 0) {
let summary = 'You are about to delete the following sample browser page';
if (removeSampleBrowserViolations.length > 1) {
summary += 's';
}
summary += '.';
commentBody += formatViolations(removeSampleBrowserViolations, summary);
commentBody += '---\n';
}
if (removeFrozenRegionTagViolations.length > 0) {
let summary = 'You are about to delete the following frozen region tag';
if (removeFrozenRegionTagViolations.length > 1) {
summary += 's';
}
summary += '.';
commentBody += formatViolations(removeFrozenRegionTagViolations, summary);
commentBody += '---\n';
}
if (result.added > 0 || result.deleted > 0) {
commentBody += 'Here is the summary of changes.\n';
}
if (result.added > 0) {
const plural = result.added === 1 ? '' : 's';
const summary = `You are about to add ${result.added} region tag${plural}.`;
let detail = '';
for (const change of result.changes) {
if (change.type === 'add') {
detail += `- ${formatRegionTag(change)}\n`;
}
}
commentBody += formatExpandable(summary, detail);
}
if (result.deleted > 0) {
const plural = result.deleted === 1 ? '' : 's';
const summary = `You are about to delete ${result.deleted} region tag${plural}.\n`;
let detail = '';
for (const change of result.changes) {
if (change.type === 'del') {
detail += `- ${formatRegionTag(change)}\n`;
}
}
commentBody += formatExpandable(summary, detail);
}
// Trim the commentBody when it's too long.
if (commentBody.length > MAX_CHARS_IN_COMMENT) {
commentBody = commentBody.substring(0, MAX_CHARS_IN_COMMENT);
// Also trim the string after the last newline to prevent a broken
// UI rendering.
const newLineIndex = commentBody.lastIndexOf('\n');
if (newLineIndex !== -1) {
commentBody = commentBody.substring(0, newLineIndex);
}
commentBody += '\n...(The comment is too long, omitted)\n';
}
commentBody += `---
This comment is generated by [snippet-bot](https://github.com/apps/snippet-bot).
If you find problems with this result, please file an issue at:
https://github.com/googleapis/repo-automation-bots/issues.
To update this comment, add \`${REFRESH_LABEL}\` label or use the checkbox below:
${REFRESH_UI}
`;
// The bot should not add a new comment when there's no region tag
// changes, so we pass `onlyUpdate` flag.
const onlyUpdate = result.changes.length === 0;
await addOrUpdateIssueComment(
context.octokit,
owner,
repo,
prNumber,
installationId as number,
commentBody,
onlyUpdate
);
// Status checks for missing region tag prefix
if (
configuration.alwaysCreateStatusCheck() ||
configuration.aggregateChecks() ||
productPrefixViolations.length > 0
) {
await aggregator.add(prefixCheckParams);
}
// Status checks for disruptive region tag removal
if (
configuration.alwaysCreateStatusCheck() ||
configuration.aggregateChecks() ||
removeUsedTagViolations.length > 0
) {
await aggregator.add(removeUsedTagCheckParams);
}
await aggregator.submit();
// emit metrics
logger.metric('snippet-bot-violations', {
target: pull_request.url,
violation_type: 'UNMATCHED_REGION_TAG',
count: failureMessages.length,
});
logger.metric('snippet-bot-violations', {
target: pull_request.url,
violation_type: 'MISSING_PRODUCT_PREFIX',
count: productPrefixViolations.length,
});
logger.metric('snippet-bot-violations', {
target: pull_request.url,
violation_type: 'REMOVING_USED_TAG',
count: removeUsedTagViolations.length,
});
}
/**
* Creates a comment mark used for addOrupdateissuecomment.
* I'll move this function to gcf-utils later.
*/
function getCommentMark(installationId: number | undefined): string {
return `<!-- probot comment [${installationId}]-->`;
}
export = (app: Probot) => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
app.on('schedule.repository' as any, async context => {
const owner = context.payload.organization.login;
const repo = context.payload.repository.name;
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${owner}/${repo}.`);
return;
}
await syncLabels(context.octokit, owner, repo, SNIPPET_BOT_LABELS);
});
app.on('issue_comment.edited', async context => {
const commentMark = getCommentMark(context.payload.installation?.id);
// If the comment is made by bots, and the comment has the refresh
// checkbox checked, we'll proceed.
if (
!context.payload.comment.body.includes(commentMark) ||
!context.payload.comment.body.includes(REFRESH_STRING)
) {
return;
}
const repoUrl = context.payload.repository.full_name;
const {owner, repo} = context.repo();
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${repoUrl}.`);
return;
}
const configuration = new Configuration({
...DEFAULT_CONFIGURATION,
...configOptions,
});
logger.info({config: configuration});
const prNumber = context.payload.issue.number;
const prResponse = await context.octokit.pulls.get({
owner: owner,
repo: repo,
pull_number: prNumber,
});
// Invalidate the cache for Snippets.
invalidateCache();
// Examine the pull request.
await scanPullRequest(
context,
prResponse.data as PullRequest,
configuration,
true
);
});
app.on(['issues.opened', 'issues.reopened'], async context => {
const repoUrl = context.payload.repository.full_name;
const {owner, repo} = context.repo();
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${repoUrl}.`);
return;
}
const configuration = new Configuration({
...DEFAULT_CONFIGURATION,
...configOptions,
});
logger.info({config: configuration});
await fullScan(context, configuration);
});
app.on('pull_request.labeled', async context => {
const repoUrl = context.payload.repository.full_name;
const {owner, repo} = context.repo();
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${repoUrl}.`);
return;
}
const configuration = new Configuration({
...DEFAULT_CONFIGURATION,
...configOptions,
});
logger.info({config: configuration});
// Only proceeds if `snippet-bot:force-run` label is added.
if (context.payload.pull_request.labels === undefined) {
return;
}
// Exits when there's no REFRESH_LABEL
const labelFound = context.payload.pull_request.labels.some(
(label: Label) => {
return label.name === REFRESH_LABEL;
}
);
if (!labelFound) {
return;
}
// Remove the label and proceed.
try {
await context.octokit.issues.removeLabel(
context.issue({name: REFRESH_LABEL})
);
} catch (err) {
// Ignoring 404 errors.
if (err.status !== 404) {
throw err;
}
}
// Also invalidate the cache for Snippets.
invalidateCache();
// Examine the pull request.
await scanPullRequest(
context,
context.payload.pull_request as PullRequest,
configuration,
true
);
});
app.on(
[
'pull_request.opened',
'pull_request.reopened',
'pull_request.edited',
'pull_request.synchronize',
],
async context => {
// Exit if the PR is closed.
if (context.payload.pull_request.state === 'closed') {
| // If the head repo is null, we can not proceed.
if (
context.payload.pull_request.head.repo === undefined ||
context.payload.pull_request.head.repo === null
) {
logger.info(
`The head repo is undefined for ${context.payload.pull_request.url}, exiting.`
);
return;
}
const repoUrl = context.payload.repository.full_name;
const {owner, repo} = context.repo();
// We should first check the config schema. Otherwise, we'll miss
// the opportunity for checking the schema when adding the config
// file for the first time.
const configChecker = new ConfigChecker<ConfigurationOptions>(
schema,
CONFIGURATION_FILE_PATH
);
await configChecker.validateConfigChanges(
context.octokit,
owner,
repo,
context.payload.pull_request.head.sha,
context.payload.pull_request.number
);
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${repoUrl}.`);
return;
}
const configuration = new Configuration({
...DEFAULT_CONFIGURATION,
...configOptions,
});
logger.info({config: configuration});
await scanPullRequest(
context,
context.payload.pull_request as PullRequest,
configuration
);
}
);
};
| logger.info(
`The pull request ${context.payload.pull_request.url} is closed, exiting.`
);
return;
}
| conditional_block |
snippet-bot.ts | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* eslint-disable @typescript-eslint/no-var-requires */
/* eslint-disable node/no-extraneous-import */
import {Probot, Context} from 'probot';
import {PullRequest} from '@octokit/webhooks-definitions/schema';
import {Configuration, ConfigurationOptions} from './configuration';
import {DEFAULT_CONFIGURATION, CONFIGURATION_FILE_PATH} from './configuration';
import {REFRESH_LABEL, NO_PREFIX_REQ_LABEL, SNIPPET_BOT_LABELS} from './labels';
import {
parseRegionTags,
parseRegionTagsInPullRequest,
ParseResult,
} from './region-tag-parser';
import {
Conclusion,
CheckAggregator,
formatBody,
formatExpandable,
formatRegionTag,
formatViolations,
formatMatchingViolation,
isFile,
} from './utils';
import {invalidateCache} from './snippets';
import {
Violation,
checkProductPrefixViolations,
checkRemovingUsedTagViolations,
} from './violations';
import schema from './config-schema.json';
import {ConfigChecker, getConfig} from '@google-automations/bot-config-utils';
import {syncLabels} from '@google-automations/label-utils';
import {logger, addOrUpdateIssueComment} from 'gcf-utils';
import fetch from 'node-fetch';
import tmp from 'tmp-promise';
import tar from 'tar';
import util from 'util';
import fs from 'fs';
import {promises as pfs} from 'fs';
import path from 'path';
const streamPipeline = util.promisify(require('stream').pipeline);
// Solely for avoid using `any` type.
interface Label {
name: string;
}
const FULL_SCAN_ISSUE_TITLE = 'snippet-bot full scan';
const REFRESH_UI = '- [ ] Refresh this comment';
const REFRESH_STRING = '- [x] Refresh this comment';
// Github issue comment API has a limit of 65536 characters.
const MAX_CHARS_IN_COMMENT = 64000;
async function downloadFile(url: string, file: string) {
const response = await fetch(url);
if (response.ok) {
return streamPipeline(response.body, fs.createWriteStream(file));
}
throw new Error(`unexpected response ${response.statusText}`);
}
async function | (dir: string, allFiles: string[]) {
const files = (await pfs.readdir(dir)).map(f => path.join(dir, f));
for (const f of files) {
if (!(await pfs.stat(f)).isDirectory()) {
allFiles.push(f);
}
}
await Promise.all(
files.map(
async f => (await pfs.stat(f)).isDirectory() && getFiles(f, allFiles)
)
);
return allFiles;
}
async function fullScan(
context: Context<'issues'>,
configuration: Configuration
) {
const installationId = context.payload.installation?.id;
const commentMark = `<!-- probot comment [${installationId}]-->`;
const owner = context.payload.repository.owner.login;
const repo = context.payload.repository.name;
const defaultBranch = context.payload.repository.default_branch;
if (!context.payload.issue?.title.includes(FULL_SCAN_ISSUE_TITLE)) {
return;
}
// full scan start
const issueNumber = context.payload.issue.number;
const url = `https://github.com/${owner}/${repo}/tarball/${defaultBranch}`;
const tmpDir = tmp.dirSync();
logger.info(`working directory: ${tmpDir.name}`);
const file = `${tmpDir.name}/${repo}.tar.gz`;
// Download the default branch tarball and run full scan.
try {
await downloadFile(url, file);
logger.info(`Downloaded to ${file}`);
tar.x({
file: file,
cwd: tmpDir.name,
sync: true,
});
let archiveDir!: string;
for (const f of await pfs.readdir(tmpDir.name)) {
const cur = tmpDir.name + '/' + f;
const stat = await pfs.lstat(cur);
if (stat.isDirectory()) {
archiveDir = cur;
}
}
if (archiveDir === undefined) {
throw new Error('Failed to extract the archive');
}
// Determine the short commit hash from the directory name.
// We'll use the hash for creating permalink.
let commitHash = defaultBranch; // Defaulting to the default branch.
const lastDashIndex = archiveDir.lastIndexOf('-');
if (lastDashIndex !== -1) {
commitHash = archiveDir.substr(lastDashIndex + 1);
}
logger.info(`Using commit hash "${commitHash}"`);
const files = await getFiles(archiveDir, []);
let mismatchedTags = false;
const failureMessages: string[] = [];
for (const file of files) {
if (configuration.ignoredFile(file)) {
logger.info('ignoring file from configuration: ' + file);
continue;
}
try {
const fileContents = await pfs.readFile(file, 'utf-8');
const parseResult = parseRegionTags(
fileContents,
file.replace(archiveDir + '/', ''),
owner,
repo,
commitHash
);
if (!parseResult.result) {
mismatchedTags = true;
for (const violation of parseResult.violations) {
const formatted = formatMatchingViolation(violation);
failureMessages.push(`- [ ] ${formatted}`);
}
}
} catch (err) {
err.message = `Failed to read the file: ${err.message}`;
logger.error(err);
continue;
}
}
let bodyDetail = 'Great job! No unmatching region tags found!';
if (mismatchedTags) {
bodyDetail = failureMessages.join('\n');
}
await context.octokit.issues.update({
owner: owner,
repo: repo,
issue_number: issueNumber,
body: formatBody(
context.payload.issue.body as string,
commentMark,
`## snippet-bot scan result
Life is too short to manually check unmatched region tags.
Here is the result:
${bodyDetail}`
),
});
} catch (err) {
err.message = `Failed to scan files: ${err.message}`;
logger.error(err);
await context.octokit.issues.update({
owner: owner,
repo: repo,
issue_number: issueNumber,
body: formatBody(
context.payload.issue.body as string,
commentMark,
`## snippet-bot scan result\nFailed running the full scan: ${err}.`
),
});
} finally {
// Clean up the directory.
await pfs.rmdir(tmpDir.name, {recursive: true});
}
}
async function scanPullRequest(
context: Context<'pull_request'> | Context<'issue_comment'>,
pull_request: PullRequest,
configuration: Configuration,
refreshing = false
) {
const installationId = context.payload.installation?.id;
const owner = context.payload.repository.owner.login;
const repo = context.payload.repository.name;
const aggregator = new CheckAggregator(
context.octokit,
'snippet-bot check',
configuration.aggregateChecks()
);
// Parse the PR diff and recognize added/deleted region tags.
const result = await parseRegionTagsInPullRequest(
context.octokit,
pull_request.diff_url,
pull_request.base.repo.owner.login,
pull_request.base.repo.name,
pull_request.base.sha,
pull_request.head.repo.owner.login,
pull_request.head.repo.name,
pull_request.head.sha
);
let mismatchedTags = false;
let tagsFound = false;
const failureMessages: string[] = [];
// Whether to ignore prefix requirement.
const noPrefixReq = pull_request.labels.some((label: Label) => {
return label.name === NO_PREFIX_REQ_LABEL;
});
// Keep track of start tags in all the files.
const parseResults = new Map<string, ParseResult>();
// If we found any new files, verify they all have matching region tags.
for (const file of result.files) {
if (configuration.ignoredFile(file)) {
logger.info('ignoring file from configuration: ' + file);
continue;
}
try {
const blob = await context.octokit.repos.getContent({
owner: pull_request.head.repo.owner.login,
repo: pull_request.head.repo.name,
path: file,
ref: pull_request.head.sha,
});
if (!isFile(blob.data)) {
continue;
}
const fileContents = Buffer.from(blob.data.content, 'base64').toString(
'utf8'
);
const parseResult = parseRegionTags(
fileContents,
file,
owner,
repo,
pull_request.head.sha
);
parseResults.set(file, parseResult);
if (!parseResult.result) {
mismatchedTags = true;
for (const violation of parseResult.violations) {
failureMessages.push(formatMatchingViolation(violation));
}
}
if (parseResult.tagsFound) {
tagsFound = true;
}
} catch (err) {
// Ignoring 403/404 errors.
if (err.status === 403 || err.status === 404) {
logger.info(
`ignoring 403/404 errors upon fetching ${file}: ${err.message}`
);
} else {
throw err;
}
}
}
const checkParams = context.repo({
name: 'Mismatched region tag',
conclusion: 'success' as Conclusion,
head_sha: pull_request.head.sha,
output: {
title: 'Region tag check',
summary: 'Region tag successful',
text: 'Region tag successful',
},
});
if (mismatchedTags) {
checkParams.conclusion = 'failure';
checkParams.output = {
title: 'Mismatched region tag detected.',
summary: 'Some new files have mismatched region tag',
text: failureMessages.join('\n'),
};
}
// post the status of commit linting to the PR, using:
// https://developer.github.com/v3/checks/
if (
configuration.alwaysCreateStatusCheck() ||
configuration.aggregateChecks() ||
tagsFound
) {
await aggregator.add(checkParams);
}
let commentBody = '';
if (result.changes.length === 0) {
// If this run is initiated by a user with the force-run label
// or refresh checkbox, we don't exit.
//
// Also, the config `alwaysCreateStatusCheck` is true, we need
// to create successfull status checks, so we don't exit.
if (
!refreshing &&
!configuration.alwaysCreateStatusCheck() &&
!configuration.aggregateChecks()
) {
return;
}
commentBody += 'No region tags are edited in this PR.\n';
}
// Add or update a comment on the PR.
const prNumber = pull_request.number;
// First check product prefix for added region tags.
let productPrefixViolations: Array<Violation> = [];
if (!noPrefixReq) {
productPrefixViolations = await checkProductPrefixViolations(
result,
configuration
);
}
const removingUsedTagsViolations = await checkRemovingUsedTagViolations(
result,
configuration,
parseResults,
pull_request.base.repo.full_name,
pull_request.base.ref
);
const removeUsedTagViolations = [
...(removingUsedTagsViolations.get('REMOVE_USED_TAG') as Violation[]),
...(removingUsedTagsViolations.get(
'REMOVE_CONFLICTING_TAG'
) as Violation[]),
];
const removeSampleBrowserViolations = removingUsedTagsViolations.get(
'REMOVE_SAMPLE_BROWSER_PAGE'
) as Violation[];
const removeFrozenRegionTagViolations = removingUsedTagsViolations.get(
'REMOVE_FROZEN_REGION_TAG'
) as Violation[];
// status check for productPrefixViolations
const prefixCheckParams = context.repo({
name: 'Region tag product prefix',
conclusion: 'success' as Conclusion,
head_sha: pull_request.head.sha,
output: {
title: 'No violations',
summary: 'No violations found',
text: 'All the tags have appropriate product prefix',
},
});
// status check for removeUsedTagViolations
const removeUsedTagCheckParams = context.repo({
name: 'Disruptive region tag removal',
conclusion: 'success' as Conclusion,
head_sha: pull_request.head.sha,
output: {
title: 'No violations',
summary: 'No violations found',
text: 'No disruptive region tag removal',
},
});
if (
productPrefixViolations.length > 0 ||
removeUsedTagViolations.length > 0
) {
commentBody += 'Here is the summary of possible violations 😱';
// Rendering prefix violations
if (productPrefixViolations.length > 0) {
let summary = '';
if (productPrefixViolations.length === 1) {
summary =
'There is a possible violation for not having product prefix.';
} else {
summary = `There are ${productPrefixViolations.length} possible violations for not having product prefix.`;
}
const productPrefixViolationsDetail = formatViolations(
productPrefixViolations,
summary
);
commentBody += productPrefixViolationsDetail;
prefixCheckParams.conclusion = 'failure';
prefixCheckParams.output = {
title: 'Missing region tag prefix',
summary: 'Some region tags do not have appropriate prefix',
text: productPrefixViolationsDetail,
};
}
// Rendering used tag violations
if (removeUsedTagViolations.length > 0) {
let summary = '';
if (removeUsedTagViolations.length === 1) {
summary =
'There is a possible violation for removing region tag in use.';
} else {
summary = `There are ${removeUsedTagViolations.length} possible violations for removing region tag in use.`;
}
const removeUsedTagViolationsDetail = formatViolations(
removeUsedTagViolations,
summary
);
commentBody += removeUsedTagViolationsDetail;
removeUsedTagCheckParams.conclusion = 'failure';
removeUsedTagCheckParams.output = {
title: 'Removal of region tags in use',
summary: '',
text: removeUsedTagViolationsDetail,
};
}
commentBody +=
'**The end of the violation section. All the stuff below is FYI purposes only.**\n\n';
commentBody += '---\n';
}
if (removeSampleBrowserViolations.length > 0) {
let summary = 'You are about to delete the following sample browser page';
if (removeSampleBrowserViolations.length > 1) {
summary += 's';
}
summary += '.';
commentBody += formatViolations(removeSampleBrowserViolations, summary);
commentBody += '---\n';
}
if (removeFrozenRegionTagViolations.length > 0) {
let summary = 'You are about to delete the following frozen region tag';
if (removeFrozenRegionTagViolations.length > 1) {
summary += 's';
}
summary += '.';
commentBody += formatViolations(removeFrozenRegionTagViolations, summary);
commentBody += '---\n';
}
if (result.added > 0 || result.deleted > 0) {
commentBody += 'Here is the summary of changes.\n';
}
if (result.added > 0) {
const plural = result.added === 1 ? '' : 's';
const summary = `You are about to add ${result.added} region tag${plural}.`;
let detail = '';
for (const change of result.changes) {
if (change.type === 'add') {
detail += `- ${formatRegionTag(change)}\n`;
}
}
commentBody += formatExpandable(summary, detail);
}
if (result.deleted > 0) {
const plural = result.deleted === 1 ? '' : 's';
const summary = `You are about to delete ${result.deleted} region tag${plural}.\n`;
let detail = '';
for (const change of result.changes) {
if (change.type === 'del') {
detail += `- ${formatRegionTag(change)}\n`;
}
}
commentBody += formatExpandable(summary, detail);
}
// Trim the commentBody when it's too long.
if (commentBody.length > MAX_CHARS_IN_COMMENT) {
commentBody = commentBody.substring(0, MAX_CHARS_IN_COMMENT);
// Also trim the string after the last newline to prevent a broken
// UI rendering.
const newLineIndex = commentBody.lastIndexOf('\n');
if (newLineIndex !== -1) {
commentBody = commentBody.substring(0, newLineIndex);
}
commentBody += '\n...(The comment is too long, omitted)\n';
}
commentBody += `---
This comment is generated by [snippet-bot](https://github.com/apps/snippet-bot).
If you find problems with this result, please file an issue at:
https://github.com/googleapis/repo-automation-bots/issues.
To update this comment, add \`${REFRESH_LABEL}\` label or use the checkbox below:
${REFRESH_UI}
`;
// The bot should not add a new comment when there's no region tag
// changes, so we pass `onlyUpdate` flag.
const onlyUpdate = result.changes.length === 0;
await addOrUpdateIssueComment(
context.octokit,
owner,
repo,
prNumber,
installationId as number,
commentBody,
onlyUpdate
);
// Status checks for missing region tag prefix
if (
configuration.alwaysCreateStatusCheck() ||
configuration.aggregateChecks() ||
productPrefixViolations.length > 0
) {
await aggregator.add(prefixCheckParams);
}
// Status checks for disruptive region tag removal
if (
configuration.alwaysCreateStatusCheck() ||
configuration.aggregateChecks() ||
removeUsedTagViolations.length > 0
) {
await aggregator.add(removeUsedTagCheckParams);
}
await aggregator.submit();
// emit metrics
logger.metric('snippet-bot-violations', {
target: pull_request.url,
violation_type: 'UNMATCHED_REGION_TAG',
count: failureMessages.length,
});
logger.metric('snippet-bot-violations', {
target: pull_request.url,
violation_type: 'MISSING_PRODUCT_PREFIX',
count: productPrefixViolations.length,
});
logger.metric('snippet-bot-violations', {
target: pull_request.url,
violation_type: 'REMOVING_USED_TAG',
count: removeUsedTagViolations.length,
});
}
/**
* Creates a comment mark used for addOrupdateissuecomment.
* I'll move this function to gcf-utils later.
*/
function getCommentMark(installationId: number | undefined): string {
return `<!-- probot comment [${installationId}]-->`;
}
export = (app: Probot) => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
app.on('schedule.repository' as any, async context => {
const owner = context.payload.organization.login;
const repo = context.payload.repository.name;
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${owner}/${repo}.`);
return;
}
await syncLabels(context.octokit, owner, repo, SNIPPET_BOT_LABELS);
});
app.on('issue_comment.edited', async context => {
const commentMark = getCommentMark(context.payload.installation?.id);
// If the comment is made by bots, and the comment has the refresh
// checkbox checked, we'll proceed.
if (
!context.payload.comment.body.includes(commentMark) ||
!context.payload.comment.body.includes(REFRESH_STRING)
) {
return;
}
const repoUrl = context.payload.repository.full_name;
const {owner, repo} = context.repo();
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${repoUrl}.`);
return;
}
const configuration = new Configuration({
...DEFAULT_CONFIGURATION,
...configOptions,
});
logger.info({config: configuration});
const prNumber = context.payload.issue.number;
const prResponse = await context.octokit.pulls.get({
owner: owner,
repo: repo,
pull_number: prNumber,
});
// Invalidate the cache for Snippets.
invalidateCache();
// Examine the pull request.
await scanPullRequest(
context,
prResponse.data as PullRequest,
configuration,
true
);
});
app.on(['issues.opened', 'issues.reopened'], async context => {
const repoUrl = context.payload.repository.full_name;
const {owner, repo} = context.repo();
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${repoUrl}.`);
return;
}
const configuration = new Configuration({
...DEFAULT_CONFIGURATION,
...configOptions,
});
logger.info({config: configuration});
await fullScan(context, configuration);
});
app.on('pull_request.labeled', async context => {
const repoUrl = context.payload.repository.full_name;
const {owner, repo} = context.repo();
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${repoUrl}.`);
return;
}
const configuration = new Configuration({
...DEFAULT_CONFIGURATION,
...configOptions,
});
logger.info({config: configuration});
// Only proceeds if `snippet-bot:force-run` label is added.
if (context.payload.pull_request.labels === undefined) {
return;
}
// Exits when there's no REFRESH_LABEL
const labelFound = context.payload.pull_request.labels.some(
(label: Label) => {
return label.name === REFRESH_LABEL;
}
);
if (!labelFound) {
return;
}
// Remove the label and proceed.
try {
await context.octokit.issues.removeLabel(
context.issue({name: REFRESH_LABEL})
);
} catch (err) {
// Ignoring 404 errors.
if (err.status !== 404) {
throw err;
}
}
// Also invalidate the cache for Snippets.
invalidateCache();
// Examine the pull request.
await scanPullRequest(
context,
context.payload.pull_request as PullRequest,
configuration,
true
);
});
app.on(
[
'pull_request.opened',
'pull_request.reopened',
'pull_request.edited',
'pull_request.synchronize',
],
async context => {
// Exit if the PR is closed.
if (context.payload.pull_request.state === 'closed') {
logger.info(
`The pull request ${context.payload.pull_request.url} is closed, exiting.`
);
return;
}
// If the head repo is null, we can not proceed.
if (
context.payload.pull_request.head.repo === undefined ||
context.payload.pull_request.head.repo === null
) {
logger.info(
`The head repo is undefined for ${context.payload.pull_request.url}, exiting.`
);
return;
}
const repoUrl = context.payload.repository.full_name;
const {owner, repo} = context.repo();
// We should first check the config schema. Otherwise, we'll miss
// the opportunity for checking the schema when adding the config
// file for the first time.
const configChecker = new ConfigChecker<ConfigurationOptions>(
schema,
CONFIGURATION_FILE_PATH
);
await configChecker.validateConfigChanges(
context.octokit,
owner,
repo,
context.payload.pull_request.head.sha,
context.payload.pull_request.number
);
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${repoUrl}.`);
return;
}
const configuration = new Configuration({
...DEFAULT_CONFIGURATION,
...configOptions,
});
logger.info({config: configuration});
await scanPullRequest(
context,
context.payload.pull_request as PullRequest,
configuration
);
}
);
};
| getFiles | identifier_name |
snippet-bot.ts | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* eslint-disable @typescript-eslint/no-var-requires */
/* eslint-disable node/no-extraneous-import */
import {Probot, Context} from 'probot';
import {PullRequest} from '@octokit/webhooks-definitions/schema';
import {Configuration, ConfigurationOptions} from './configuration';
import {DEFAULT_CONFIGURATION, CONFIGURATION_FILE_PATH} from './configuration';
import {REFRESH_LABEL, NO_PREFIX_REQ_LABEL, SNIPPET_BOT_LABELS} from './labels';
import {
parseRegionTags,
parseRegionTagsInPullRequest,
ParseResult,
} from './region-tag-parser';
import {
Conclusion,
CheckAggregator,
formatBody,
formatExpandable,
formatRegionTag,
formatViolations,
formatMatchingViolation,
isFile,
} from './utils';
import {invalidateCache} from './snippets';
import {
Violation,
checkProductPrefixViolations,
checkRemovingUsedTagViolations,
} from './violations';
import schema from './config-schema.json';
import {ConfigChecker, getConfig} from '@google-automations/bot-config-utils';
import {syncLabels} from '@google-automations/label-utils';
import {logger, addOrUpdateIssueComment} from 'gcf-utils';
import fetch from 'node-fetch';
import tmp from 'tmp-promise';
import tar from 'tar';
import util from 'util';
import fs from 'fs';
import {promises as pfs} from 'fs';
import path from 'path';
const streamPipeline = util.promisify(require('stream').pipeline);
// Solely for avoid using `any` type.
interface Label {
name: string;
}
const FULL_SCAN_ISSUE_TITLE = 'snippet-bot full scan';
const REFRESH_UI = '- [ ] Refresh this comment';
const REFRESH_STRING = '- [x] Refresh this comment';
// Github issue comment API has a limit of 65536 characters.
const MAX_CHARS_IN_COMMENT = 64000;
async function downloadFile(url: string, file: string) {
const response = await fetch(url);
if (response.ok) {
return streamPipeline(response.body, fs.createWriteStream(file));
}
throw new Error(`unexpected response ${response.statusText}`);
}
async function getFiles(dir: string, allFiles: string[]) {
const files = (await pfs.readdir(dir)).map(f => path.join(dir, f));
for (const f of files) {
if (!(await pfs.stat(f)).isDirectory()) {
allFiles.push(f);
}
}
await Promise.all(
files.map(
async f => (await pfs.stat(f)).isDirectory() && getFiles(f, allFiles)
)
);
return allFiles;
}
async function fullScan(
context: Context<'issues'>,
configuration: Configuration
) {
const installationId = context.payload.installation?.id;
const commentMark = `<!-- probot comment [${installationId}]-->`;
const owner = context.payload.repository.owner.login;
const repo = context.payload.repository.name;
const defaultBranch = context.payload.repository.default_branch;
if (!context.payload.issue?.title.includes(FULL_SCAN_ISSUE_TITLE)) {
return;
}
// full scan start
const issueNumber = context.payload.issue.number;
const url = `https://github.com/${owner}/${repo}/tarball/${defaultBranch}`;
const tmpDir = tmp.dirSync();
logger.info(`working directory: ${tmpDir.name}`);
const file = `${tmpDir.name}/${repo}.tar.gz`;
// Download the default branch tarball and run full scan.
try {
await downloadFile(url, file);
logger.info(`Downloaded to ${file}`);
tar.x({
file: file,
cwd: tmpDir.name,
sync: true,
});
let archiveDir!: string;
for (const f of await pfs.readdir(tmpDir.name)) {
const cur = tmpDir.name + '/' + f;
const stat = await pfs.lstat(cur);
if (stat.isDirectory()) {
archiveDir = cur;
}
}
if (archiveDir === undefined) {
throw new Error('Failed to extract the archive');
}
// Determine the short commit hash from the directory name.
// We'll use the hash for creating permalink.
let commitHash = defaultBranch; // Defaulting to the default branch.
const lastDashIndex = archiveDir.lastIndexOf('-');
if (lastDashIndex !== -1) {
commitHash = archiveDir.substr(lastDashIndex + 1);
}
logger.info(`Using commit hash "${commitHash}"`);
const files = await getFiles(archiveDir, []);
let mismatchedTags = false;
const failureMessages: string[] = [];
for (const file of files) {
if (configuration.ignoredFile(file)) {
logger.info('ignoring file from configuration: ' + file);
continue;
}
try {
const fileContents = await pfs.readFile(file, 'utf-8');
const parseResult = parseRegionTags(
fileContents,
file.replace(archiveDir + '/', ''),
owner,
repo,
commitHash
);
if (!parseResult.result) {
mismatchedTags = true;
for (const violation of parseResult.violations) {
const formatted = formatMatchingViolation(violation);
failureMessages.push(`- [ ] ${formatted}`);
}
}
} catch (err) {
err.message = `Failed to read the file: ${err.message}`;
logger.error(err);
continue;
}
}
let bodyDetail = 'Great job! No unmatching region tags found!';
if (mismatchedTags) {
bodyDetail = failureMessages.join('\n');
}
await context.octokit.issues.update({
owner: owner,
repo: repo,
issue_number: issueNumber,
body: formatBody(
context.payload.issue.body as string,
commentMark,
`## snippet-bot scan result
Life is too short to manually check unmatched region tags.
Here is the result:
${bodyDetail}`
),
});
} catch (err) {
err.message = `Failed to scan files: ${err.message}`;
logger.error(err);
await context.octokit.issues.update({
owner: owner,
repo: repo,
issue_number: issueNumber,
body: formatBody(
context.payload.issue.body as string,
commentMark,
`## snippet-bot scan result\nFailed running the full scan: ${err}.`
),
});
} finally {
// Clean up the directory.
await pfs.rmdir(tmpDir.name, {recursive: true});
}
}
async function scanPullRequest(
context: Context<'pull_request'> | Context<'issue_comment'>,
pull_request: PullRequest,
configuration: Configuration,
refreshing = false
) {
const installationId = context.payload.installation?.id;
const owner = context.payload.repository.owner.login;
const repo = context.payload.repository.name;
const aggregator = new CheckAggregator(
context.octokit,
'snippet-bot check',
configuration.aggregateChecks()
);
// Parse the PR diff and recognize added/deleted region tags.
const result = await parseRegionTagsInPullRequest(
context.octokit,
pull_request.diff_url,
pull_request.base.repo.owner.login,
pull_request.base.repo.name,
pull_request.base.sha,
pull_request.head.repo.owner.login,
pull_request.head.repo.name,
pull_request.head.sha
);
let mismatchedTags = false;
let tagsFound = false;
const failureMessages: string[] = [];
// Whether to ignore prefix requirement.
const noPrefixReq = pull_request.labels.some((label: Label) => {
return label.name === NO_PREFIX_REQ_LABEL;
});
// Keep track of start tags in all the files.
const parseResults = new Map<string, ParseResult>();
// If we found any new files, verify they all have matching region tags.
for (const file of result.files) {
if (configuration.ignoredFile(file)) {
logger.info('ignoring file from configuration: ' + file);
continue;
}
try {
const blob = await context.octokit.repos.getContent({
owner: pull_request.head.repo.owner.login,
repo: pull_request.head.repo.name,
path: file,
ref: pull_request.head.sha,
});
if (!isFile(blob.data)) {
continue;
}
const fileContents = Buffer.from(blob.data.content, 'base64').toString(
'utf8'
);
const parseResult = parseRegionTags(
fileContents,
file,
owner,
repo,
pull_request.head.sha
);
parseResults.set(file, parseResult);
if (!parseResult.result) {
mismatchedTags = true;
for (const violation of parseResult.violations) {
failureMessages.push(formatMatchingViolation(violation));
}
}
if (parseResult.tagsFound) {
tagsFound = true;
}
} catch (err) {
// Ignoring 403/404 errors.
if (err.status === 403 || err.status === 404) {
logger.info(
`ignoring 403/404 errors upon fetching ${file}: ${err.message}`
);
} else {
throw err;
}
}
}
const checkParams = context.repo({
name: 'Mismatched region tag',
conclusion: 'success' as Conclusion,
head_sha: pull_request.head.sha,
output: {
title: 'Region tag check',
summary: 'Region tag successful',
text: 'Region tag successful',
},
});
if (mismatchedTags) {
checkParams.conclusion = 'failure';
checkParams.output = {
title: 'Mismatched region tag detected.',
summary: 'Some new files have mismatched region tag',
text: failureMessages.join('\n'),
};
}
// post the status of commit linting to the PR, using:
// https://developer.github.com/v3/checks/
if (
configuration.alwaysCreateStatusCheck() ||
configuration.aggregateChecks() ||
tagsFound
) {
await aggregator.add(checkParams);
}
let commentBody = '';
if (result.changes.length === 0) {
// If this run is initiated by a user with the force-run label
// or refresh checkbox, we don't exit.
//
// Also, the config `alwaysCreateStatusCheck` is true, we need
// to create successfull status checks, so we don't exit.
if (
!refreshing &&
!configuration.alwaysCreateStatusCheck() &&
!configuration.aggregateChecks()
) {
return;
}
commentBody += 'No region tags are edited in this PR.\n';
}
// Add or update a comment on the PR.
const prNumber = pull_request.number;
// First check product prefix for added region tags.
let productPrefixViolations: Array<Violation> = [];
if (!noPrefixReq) {
productPrefixViolations = await checkProductPrefixViolations(
result,
configuration
);
}
const removingUsedTagsViolations = await checkRemovingUsedTagViolations(
result,
configuration,
parseResults,
pull_request.base.repo.full_name,
pull_request.base.ref
);
const removeUsedTagViolations = [
...(removingUsedTagsViolations.get('REMOVE_USED_TAG') as Violation[]),
...(removingUsedTagsViolations.get(
'REMOVE_CONFLICTING_TAG'
) as Violation[]),
];
const removeSampleBrowserViolations = removingUsedTagsViolations.get(
'REMOVE_SAMPLE_BROWSER_PAGE'
) as Violation[];
const removeFrozenRegionTagViolations = removingUsedTagsViolations.get(
'REMOVE_FROZEN_REGION_TAG'
) as Violation[];
// status check for productPrefixViolations
const prefixCheckParams = context.repo({
name: 'Region tag product prefix',
conclusion: 'success' as Conclusion,
head_sha: pull_request.head.sha,
output: {
title: 'No violations',
summary: 'No violations found',
text: 'All the tags have appropriate product prefix',
},
});
// status check for removeUsedTagViolations
const removeUsedTagCheckParams = context.repo({
name: 'Disruptive region tag removal',
conclusion: 'success' as Conclusion,
head_sha: pull_request.head.sha,
output: {
title: 'No violations',
summary: 'No violations found',
text: 'No disruptive region tag removal',
},
});
if (
productPrefixViolations.length > 0 ||
removeUsedTagViolations.length > 0
) {
commentBody += 'Here is the summary of possible violations 😱';
// Rendering prefix violations
if (productPrefixViolations.length > 0) {
let summary = '';
if (productPrefixViolations.length === 1) {
summary =
'There is a possible violation for not having product prefix.';
} else {
summary = `There are ${productPrefixViolations.length} possible violations for not having product prefix.`;
}
const productPrefixViolationsDetail = formatViolations(
productPrefixViolations,
summary
);
commentBody += productPrefixViolationsDetail;
prefixCheckParams.conclusion = 'failure';
prefixCheckParams.output = {
title: 'Missing region tag prefix',
summary: 'Some region tags do not have appropriate prefix',
text: productPrefixViolationsDetail,
};
}
// Rendering used tag violations
if (removeUsedTagViolations.length > 0) {
let summary = '';
if (removeUsedTagViolations.length === 1) {
summary =
'There is a possible violation for removing region tag in use.';
} else {
summary = `There are ${removeUsedTagViolations.length} possible violations for removing region tag in use.`;
}
const removeUsedTagViolationsDetail = formatViolations(
removeUsedTagViolations,
summary
);
commentBody += removeUsedTagViolationsDetail;
removeUsedTagCheckParams.conclusion = 'failure';
removeUsedTagCheckParams.output = {
title: 'Removal of region tags in use',
summary: '',
text: removeUsedTagViolationsDetail,
};
}
commentBody +=
'**The end of the violation section. All the stuff below is FYI purposes only.**\n\n';
commentBody += '---\n';
}
if (removeSampleBrowserViolations.length > 0) {
let summary = 'You are about to delete the following sample browser page';
if (removeSampleBrowserViolations.length > 1) {
summary += 's';
}
summary += '.';
commentBody += formatViolations(removeSampleBrowserViolations, summary);
commentBody += '---\n';
}
if (removeFrozenRegionTagViolations.length > 0) {
let summary = 'You are about to delete the following frozen region tag';
if (removeFrozenRegionTagViolations.length > 1) {
summary += 's';
}
summary += '.';
commentBody += formatViolations(removeFrozenRegionTagViolations, summary);
commentBody += '---\n';
}
if (result.added > 0 || result.deleted > 0) {
commentBody += 'Here is the summary of changes.\n';
}
if (result.added > 0) {
const plural = result.added === 1 ? '' : 's';
const summary = `You are about to add ${result.added} region tag${plural}.`;
let detail = '';
for (const change of result.changes) {
if (change.type === 'add') {
detail += `- ${formatRegionTag(change)}\n`;
}
}
commentBody += formatExpandable(summary, detail);
}
if (result.deleted > 0) {
const plural = result.deleted === 1 ? '' : 's';
const summary = `You are about to delete ${result.deleted} region tag${plural}.\n`;
let detail = '';
for (const change of result.changes) {
if (change.type === 'del') {
detail += `- ${formatRegionTag(change)}\n`;
}
}
commentBody += formatExpandable(summary, detail);
}
// Trim the commentBody when it's too long.
if (commentBody.length > MAX_CHARS_IN_COMMENT) {
commentBody = commentBody.substring(0, MAX_CHARS_IN_COMMENT);
// Also trim the string after the last newline to prevent a broken
// UI rendering.
const newLineIndex = commentBody.lastIndexOf('\n');
if (newLineIndex !== -1) {
commentBody = commentBody.substring(0, newLineIndex);
}
commentBody += '\n...(The comment is too long, omitted)\n';
}
commentBody += `---
This comment is generated by [snippet-bot](https://github.com/apps/snippet-bot).
If you find problems with this result, please file an issue at:
https://github.com/googleapis/repo-automation-bots/issues.
To update this comment, add \`${REFRESH_LABEL}\` label or use the checkbox below:
${REFRESH_UI}
`;
// The bot should not add a new comment when there's no region tag
// changes, so we pass `onlyUpdate` flag.
const onlyUpdate = result.changes.length === 0;
await addOrUpdateIssueComment(
context.octokit,
owner,
repo,
prNumber,
installationId as number,
commentBody,
onlyUpdate
);
// Status checks for missing region tag prefix
if (
configuration.alwaysCreateStatusCheck() ||
configuration.aggregateChecks() ||
productPrefixViolations.length > 0
) {
await aggregator.add(prefixCheckParams);
}
// Status checks for disruptive region tag removal
if (
configuration.alwaysCreateStatusCheck() ||
configuration.aggregateChecks() ||
removeUsedTagViolations.length > 0
) {
await aggregator.add(removeUsedTagCheckParams);
}
await aggregator.submit();
// emit metrics
logger.metric('snippet-bot-violations', {
target: pull_request.url,
violation_type: 'UNMATCHED_REGION_TAG',
count: failureMessages.length,
});
logger.metric('snippet-bot-violations', {
target: pull_request.url,
violation_type: 'MISSING_PRODUCT_PREFIX',
count: productPrefixViolations.length,
});
logger.metric('snippet-bot-violations', {
target: pull_request.url,
violation_type: 'REMOVING_USED_TAG',
count: removeUsedTagViolations.length,
});
}
/**
* Creates a comment mark used for addOrupdateissuecomment.
* I'll move this function to gcf-utils later.
*/
function getCommentMark(installationId: number | undefined): string {
| xport = (app: Probot) => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
app.on('schedule.repository' as any, async context => {
const owner = context.payload.organization.login;
const repo = context.payload.repository.name;
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${owner}/${repo}.`);
return;
}
await syncLabels(context.octokit, owner, repo, SNIPPET_BOT_LABELS);
});
app.on('issue_comment.edited', async context => {
const commentMark = getCommentMark(context.payload.installation?.id);
// If the comment is made by bots, and the comment has the refresh
// checkbox checked, we'll proceed.
if (
!context.payload.comment.body.includes(commentMark) ||
!context.payload.comment.body.includes(REFRESH_STRING)
) {
return;
}
const repoUrl = context.payload.repository.full_name;
const {owner, repo} = context.repo();
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${repoUrl}.`);
return;
}
const configuration = new Configuration({
...DEFAULT_CONFIGURATION,
...configOptions,
});
logger.info({config: configuration});
const prNumber = context.payload.issue.number;
const prResponse = await context.octokit.pulls.get({
owner: owner,
repo: repo,
pull_number: prNumber,
});
// Invalidate the cache for Snippets.
invalidateCache();
// Examine the pull request.
await scanPullRequest(
context,
prResponse.data as PullRequest,
configuration,
true
);
});
app.on(['issues.opened', 'issues.reopened'], async context => {
const repoUrl = context.payload.repository.full_name;
const {owner, repo} = context.repo();
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${repoUrl}.`);
return;
}
const configuration = new Configuration({
...DEFAULT_CONFIGURATION,
...configOptions,
});
logger.info({config: configuration});
await fullScan(context, configuration);
});
app.on('pull_request.labeled', async context => {
const repoUrl = context.payload.repository.full_name;
const {owner, repo} = context.repo();
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${repoUrl}.`);
return;
}
const configuration = new Configuration({
...DEFAULT_CONFIGURATION,
...configOptions,
});
logger.info({config: configuration});
// Only proceeds if `snippet-bot:force-run` label is added.
if (context.payload.pull_request.labels === undefined) {
return;
}
// Exits when there's no REFRESH_LABEL
const labelFound = context.payload.pull_request.labels.some(
(label: Label) => {
return label.name === REFRESH_LABEL;
}
);
if (!labelFound) {
return;
}
// Remove the label and proceed.
try {
await context.octokit.issues.removeLabel(
context.issue({name: REFRESH_LABEL})
);
} catch (err) {
// Ignoring 404 errors.
if (err.status !== 404) {
throw err;
}
}
// Also invalidate the cache for Snippets.
invalidateCache();
// Examine the pull request.
await scanPullRequest(
context,
context.payload.pull_request as PullRequest,
configuration,
true
);
});
app.on(
[
'pull_request.opened',
'pull_request.reopened',
'pull_request.edited',
'pull_request.synchronize',
],
async context => {
// Exit if the PR is closed.
if (context.payload.pull_request.state === 'closed') {
logger.info(
`The pull request ${context.payload.pull_request.url} is closed, exiting.`
);
return;
}
// If the head repo is null, we can not proceed.
if (
context.payload.pull_request.head.repo === undefined ||
context.payload.pull_request.head.repo === null
) {
logger.info(
`The head repo is undefined for ${context.payload.pull_request.url}, exiting.`
);
return;
}
const repoUrl = context.payload.repository.full_name;
const {owner, repo} = context.repo();
// We should first check the config schema. Otherwise, we'll miss
// the opportunity for checking the schema when adding the config
// file for the first time.
const configChecker = new ConfigChecker<ConfigurationOptions>(
schema,
CONFIGURATION_FILE_PATH
);
await configChecker.validateConfigChanges(
context.octokit,
owner,
repo,
context.payload.pull_request.head.sha,
context.payload.pull_request.number
);
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${repoUrl}.`);
return;
}
const configuration = new Configuration({
...DEFAULT_CONFIGURATION,
...configOptions,
});
logger.info({config: configuration});
await scanPullRequest(
context,
context.payload.pull_request as PullRequest,
configuration
);
}
);
};
| return `<!-- probot comment [${installationId}]-->`;
}
e | identifier_body |
snippet-bot.ts | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* eslint-disable @typescript-eslint/no-var-requires */
/* eslint-disable node/no-extraneous-import */
import {Probot, Context} from 'probot';
import {PullRequest} from '@octokit/webhooks-definitions/schema';
import {Configuration, ConfigurationOptions} from './configuration';
import {DEFAULT_CONFIGURATION, CONFIGURATION_FILE_PATH} from './configuration';
import {REFRESH_LABEL, NO_PREFIX_REQ_LABEL, SNIPPET_BOT_LABELS} from './labels';
import {
parseRegionTags,
parseRegionTagsInPullRequest,
ParseResult,
} from './region-tag-parser';
import {
Conclusion,
CheckAggregator,
formatBody,
formatExpandable,
formatRegionTag,
formatViolations,
formatMatchingViolation,
isFile,
} from './utils';
import {invalidateCache} from './snippets';
import {
Violation,
checkProductPrefixViolations,
checkRemovingUsedTagViolations,
} from './violations';
import schema from './config-schema.json';
import {ConfigChecker, getConfig} from '@google-automations/bot-config-utils';
import {syncLabels} from '@google-automations/label-utils';
import {logger, addOrUpdateIssueComment} from 'gcf-utils';
import fetch from 'node-fetch';
import tmp from 'tmp-promise';
import tar from 'tar';
import util from 'util';
import fs from 'fs';
import {promises as pfs} from 'fs';
import path from 'path';
const streamPipeline = util.promisify(require('stream').pipeline);
// Solely for avoid using `any` type.
interface Label {
name: string;
}
const FULL_SCAN_ISSUE_TITLE = 'snippet-bot full scan';
const REFRESH_UI = '- [ ] Refresh this comment';
const REFRESH_STRING = '- [x] Refresh this comment';
// Github issue comment API has a limit of 65536 characters.
const MAX_CHARS_IN_COMMENT = 64000;
async function downloadFile(url: string, file: string) {
const response = await fetch(url);
if (response.ok) {
return streamPipeline(response.body, fs.createWriteStream(file));
}
throw new Error(`unexpected response ${response.statusText}`);
}
async function getFiles(dir: string, allFiles: string[]) {
const files = (await pfs.readdir(dir)).map(f => path.join(dir, f));
for (const f of files) {
if (!(await pfs.stat(f)).isDirectory()) {
allFiles.push(f);
}
}
await Promise.all(
files.map(
async f => (await pfs.stat(f)).isDirectory() && getFiles(f, allFiles)
)
);
return allFiles;
}
async function fullScan(
context: Context<'issues'>,
configuration: Configuration
) {
const installationId = context.payload.installation?.id;
const commentMark = `<!-- probot comment [${installationId}]-->`;
const owner = context.payload.repository.owner.login;
const repo = context.payload.repository.name;
const defaultBranch = context.payload.repository.default_branch;
if (!context.payload.issue?.title.includes(FULL_SCAN_ISSUE_TITLE)) {
return;
}
// full scan start
const issueNumber = context.payload.issue.number;
const url = `https://github.com/${owner}/${repo}/tarball/${defaultBranch}`;
const tmpDir = tmp.dirSync();
logger.info(`working directory: ${tmpDir.name}`);
const file = `${tmpDir.name}/${repo}.tar.gz`;
// Download the default branch tarball and run full scan.
try {
await downloadFile(url, file);
logger.info(`Downloaded to ${file}`);
tar.x({
file: file,
cwd: tmpDir.name,
sync: true,
});
let archiveDir!: string;
for (const f of await pfs.readdir(tmpDir.name)) {
const cur = tmpDir.name + '/' + f;
const stat = await pfs.lstat(cur);
if (stat.isDirectory()) {
archiveDir = cur;
}
}
if (archiveDir === undefined) {
throw new Error('Failed to extract the archive');
}
// Determine the short commit hash from the directory name.
// We'll use the hash for creating permalink.
let commitHash = defaultBranch; // Defaulting to the default branch.
const lastDashIndex = archiveDir.lastIndexOf('-');
if (lastDashIndex !== -1) {
commitHash = archiveDir.substr(lastDashIndex + 1);
}
logger.info(`Using commit hash "${commitHash}"`);
const files = await getFiles(archiveDir, []);
let mismatchedTags = false;
const failureMessages: string[] = [];
for (const file of files) {
if (configuration.ignoredFile(file)) {
logger.info('ignoring file from configuration: ' + file);
continue;
}
try {
const fileContents = await pfs.readFile(file, 'utf-8');
const parseResult = parseRegionTags(
fileContents,
file.replace(archiveDir + '/', ''),
owner,
repo,
commitHash
);
if (!parseResult.result) {
mismatchedTags = true;
for (const violation of parseResult.violations) {
const formatted = formatMatchingViolation(violation);
failureMessages.push(`- [ ] ${formatted}`);
}
}
} catch (err) {
err.message = `Failed to read the file: ${err.message}`;
logger.error(err);
continue;
}
}
let bodyDetail = 'Great job! No unmatching region tags found!';
if (mismatchedTags) {
bodyDetail = failureMessages.join('\n');
}
await context.octokit.issues.update({
owner: owner,
repo: repo,
issue_number: issueNumber,
body: formatBody(
context.payload.issue.body as string,
commentMark,
`## snippet-bot scan result
Life is too short to manually check unmatched region tags.
Here is the result:
${bodyDetail}`
),
});
} catch (err) {
err.message = `Failed to scan files: ${err.message}`;
logger.error(err);
await context.octokit.issues.update({
owner: owner,
repo: repo,
issue_number: issueNumber,
body: formatBody(
context.payload.issue.body as string,
commentMark,
`## snippet-bot scan result\nFailed running the full scan: ${err}.`
),
});
} finally {
// Clean up the directory.
await pfs.rmdir(tmpDir.name, {recursive: true});
}
}
async function scanPullRequest(
context: Context<'pull_request'> | Context<'issue_comment'>,
pull_request: PullRequest,
configuration: Configuration,
refreshing = false
) {
const installationId = context.payload.installation?.id;
const owner = context.payload.repository.owner.login;
const repo = context.payload.repository.name;
const aggregator = new CheckAggregator(
context.octokit,
'snippet-bot check',
configuration.aggregateChecks()
);
// Parse the PR diff and recognize added/deleted region tags.
const result = await parseRegionTagsInPullRequest(
context.octokit,
pull_request.diff_url,
pull_request.base.repo.owner.login,
pull_request.base.repo.name,
pull_request.base.sha,
pull_request.head.repo.owner.login,
pull_request.head.repo.name,
pull_request.head.sha
);
let mismatchedTags = false;
let tagsFound = false;
const failureMessages: string[] = [];
// Whether to ignore prefix requirement.
const noPrefixReq = pull_request.labels.some((label: Label) => {
return label.name === NO_PREFIX_REQ_LABEL;
});
// Keep track of start tags in all the files.
const parseResults = new Map<string, ParseResult>();
// If we found any new files, verify they all have matching region tags.
for (const file of result.files) {
if (configuration.ignoredFile(file)) {
logger.info('ignoring file from configuration: ' + file);
continue;
}
try {
const blob = await context.octokit.repos.getContent({
owner: pull_request.head.repo.owner.login,
repo: pull_request.head.repo.name,
path: file,
ref: pull_request.head.sha,
});
if (!isFile(blob.data)) {
continue;
}
const fileContents = Buffer.from(blob.data.content, 'base64').toString(
'utf8'
);
const parseResult = parseRegionTags(
fileContents,
file,
owner,
repo,
pull_request.head.sha
);
parseResults.set(file, parseResult);
if (!parseResult.result) {
mismatchedTags = true;
for (const violation of parseResult.violations) {
failureMessages.push(formatMatchingViolation(violation));
}
}
if (parseResult.tagsFound) {
tagsFound = true;
}
} catch (err) {
// Ignoring 403/404 errors.
if (err.status === 403 || err.status === 404) {
logger.info(
`ignoring 403/404 errors upon fetching ${file}: ${err.message}`
);
} else {
throw err;
}
}
}
const checkParams = context.repo({
name: 'Mismatched region tag',
conclusion: 'success' as Conclusion,
head_sha: pull_request.head.sha,
output: {
title: 'Region tag check',
summary: 'Region tag successful',
text: 'Region tag successful',
},
});
if (mismatchedTags) {
checkParams.conclusion = 'failure';
checkParams.output = {
title: 'Mismatched region tag detected.',
summary: 'Some new files have mismatched region tag',
text: failureMessages.join('\n'),
};
}
// post the status of commit linting to the PR, using:
// https://developer.github.com/v3/checks/
if (
configuration.alwaysCreateStatusCheck() ||
configuration.aggregateChecks() ||
tagsFound
) {
await aggregator.add(checkParams);
}
let commentBody = '';
if (result.changes.length === 0) {
// If this run is initiated by a user with the force-run label
// or refresh checkbox, we don't exit.
//
// Also, the config `alwaysCreateStatusCheck` is true, we need
// to create successfull status checks, so we don't exit.
if (
!refreshing &&
!configuration.alwaysCreateStatusCheck() &&
!configuration.aggregateChecks()
) {
return;
}
commentBody += 'No region tags are edited in this PR.\n';
}
// Add or update a comment on the PR.
const prNumber = pull_request.number;
// First check product prefix for added region tags.
let productPrefixViolations: Array<Violation> = [];
if (!noPrefixReq) {
productPrefixViolations = await checkProductPrefixViolations(
result,
configuration
);
}
const removingUsedTagsViolations = await checkRemovingUsedTagViolations( | configuration,
parseResults,
pull_request.base.repo.full_name,
pull_request.base.ref
);
const removeUsedTagViolations = [
...(removingUsedTagsViolations.get('REMOVE_USED_TAG') as Violation[]),
...(removingUsedTagsViolations.get(
'REMOVE_CONFLICTING_TAG'
) as Violation[]),
];
const removeSampleBrowserViolations = removingUsedTagsViolations.get(
'REMOVE_SAMPLE_BROWSER_PAGE'
) as Violation[];
const removeFrozenRegionTagViolations = removingUsedTagsViolations.get(
'REMOVE_FROZEN_REGION_TAG'
) as Violation[];
// status check for productPrefixViolations
const prefixCheckParams = context.repo({
name: 'Region tag product prefix',
conclusion: 'success' as Conclusion,
head_sha: pull_request.head.sha,
output: {
title: 'No violations',
summary: 'No violations found',
text: 'All the tags have appropriate product prefix',
},
});
// status check for removeUsedTagViolations
const removeUsedTagCheckParams = context.repo({
name: 'Disruptive region tag removal',
conclusion: 'success' as Conclusion,
head_sha: pull_request.head.sha,
output: {
title: 'No violations',
summary: 'No violations found',
text: 'No disruptive region tag removal',
},
});
if (
productPrefixViolations.length > 0 ||
removeUsedTagViolations.length > 0
) {
commentBody += 'Here is the summary of possible violations 😱';
// Rendering prefix violations
if (productPrefixViolations.length > 0) {
let summary = '';
if (productPrefixViolations.length === 1) {
summary =
'There is a possible violation for not having product prefix.';
} else {
summary = `There are ${productPrefixViolations.length} possible violations for not having product prefix.`;
}
const productPrefixViolationsDetail = formatViolations(
productPrefixViolations,
summary
);
commentBody += productPrefixViolationsDetail;
prefixCheckParams.conclusion = 'failure';
prefixCheckParams.output = {
title: 'Missing region tag prefix',
summary: 'Some region tags do not have appropriate prefix',
text: productPrefixViolationsDetail,
};
}
// Rendering used tag violations
if (removeUsedTagViolations.length > 0) {
let summary = '';
if (removeUsedTagViolations.length === 1) {
summary =
'There is a possible violation for removing region tag in use.';
} else {
summary = `There are ${removeUsedTagViolations.length} possible violations for removing region tag in use.`;
}
const removeUsedTagViolationsDetail = formatViolations(
removeUsedTagViolations,
summary
);
commentBody += removeUsedTagViolationsDetail;
removeUsedTagCheckParams.conclusion = 'failure';
removeUsedTagCheckParams.output = {
title: 'Removal of region tags in use',
summary: '',
text: removeUsedTagViolationsDetail,
};
}
commentBody +=
'**The end of the violation section. All the stuff below is FYI purposes only.**\n\n';
commentBody += '---\n';
}
if (removeSampleBrowserViolations.length > 0) {
let summary = 'You are about to delete the following sample browser page';
if (removeSampleBrowserViolations.length > 1) {
summary += 's';
}
summary += '.';
commentBody += formatViolations(removeSampleBrowserViolations, summary);
commentBody += '---\n';
}
if (removeFrozenRegionTagViolations.length > 0) {
let summary = 'You are about to delete the following frozen region tag';
if (removeFrozenRegionTagViolations.length > 1) {
summary += 's';
}
summary += '.';
commentBody += formatViolations(removeFrozenRegionTagViolations, summary);
commentBody += '---\n';
}
if (result.added > 0 || result.deleted > 0) {
commentBody += 'Here is the summary of changes.\n';
}
if (result.added > 0) {
const plural = result.added === 1 ? '' : 's';
const summary = `You are about to add ${result.added} region tag${plural}.`;
let detail = '';
for (const change of result.changes) {
if (change.type === 'add') {
detail += `- ${formatRegionTag(change)}\n`;
}
}
commentBody += formatExpandable(summary, detail);
}
if (result.deleted > 0) {
const plural = result.deleted === 1 ? '' : 's';
const summary = `You are about to delete ${result.deleted} region tag${plural}.\n`;
let detail = '';
for (const change of result.changes) {
if (change.type === 'del') {
detail += `- ${formatRegionTag(change)}\n`;
}
}
commentBody += formatExpandable(summary, detail);
}
// Trim the commentBody when it's too long.
if (commentBody.length > MAX_CHARS_IN_COMMENT) {
commentBody = commentBody.substring(0, MAX_CHARS_IN_COMMENT);
// Also trim the string after the last newline to prevent a broken
// UI rendering.
const newLineIndex = commentBody.lastIndexOf('\n');
if (newLineIndex !== -1) {
commentBody = commentBody.substring(0, newLineIndex);
}
commentBody += '\n...(The comment is too long, omitted)\n';
}
commentBody += `---
This comment is generated by [snippet-bot](https://github.com/apps/snippet-bot).
If you find problems with this result, please file an issue at:
https://github.com/googleapis/repo-automation-bots/issues.
To update this comment, add \`${REFRESH_LABEL}\` label or use the checkbox below:
${REFRESH_UI}
`;
// The bot should not add a new comment when there's no region tag
// changes, so we pass `onlyUpdate` flag.
const onlyUpdate = result.changes.length === 0;
await addOrUpdateIssueComment(
context.octokit,
owner,
repo,
prNumber,
installationId as number,
commentBody,
onlyUpdate
);
// Status checks for missing region tag prefix
if (
configuration.alwaysCreateStatusCheck() ||
configuration.aggregateChecks() ||
productPrefixViolations.length > 0
) {
await aggregator.add(prefixCheckParams);
}
// Status checks for disruptive region tag removal
if (
configuration.alwaysCreateStatusCheck() ||
configuration.aggregateChecks() ||
removeUsedTagViolations.length > 0
) {
await aggregator.add(removeUsedTagCheckParams);
}
await aggregator.submit();
// emit metrics
logger.metric('snippet-bot-violations', {
target: pull_request.url,
violation_type: 'UNMATCHED_REGION_TAG',
count: failureMessages.length,
});
logger.metric('snippet-bot-violations', {
target: pull_request.url,
violation_type: 'MISSING_PRODUCT_PREFIX',
count: productPrefixViolations.length,
});
logger.metric('snippet-bot-violations', {
target: pull_request.url,
violation_type: 'REMOVING_USED_TAG',
count: removeUsedTagViolations.length,
});
}
/**
* Creates a comment mark used for addOrupdateissuecomment.
* I'll move this function to gcf-utils later.
*/
function getCommentMark(installationId: number | undefined): string {
return `<!-- probot comment [${installationId}]-->`;
}
export = (app: Probot) => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
app.on('schedule.repository' as any, async context => {
const owner = context.payload.organization.login;
const repo = context.payload.repository.name;
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${owner}/${repo}.`);
return;
}
await syncLabels(context.octokit, owner, repo, SNIPPET_BOT_LABELS);
});
app.on('issue_comment.edited', async context => {
const commentMark = getCommentMark(context.payload.installation?.id);
// If the comment is made by bots, and the comment has the refresh
// checkbox checked, we'll proceed.
if (
!context.payload.comment.body.includes(commentMark) ||
!context.payload.comment.body.includes(REFRESH_STRING)
) {
return;
}
const repoUrl = context.payload.repository.full_name;
const {owner, repo} = context.repo();
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${repoUrl}.`);
return;
}
const configuration = new Configuration({
...DEFAULT_CONFIGURATION,
...configOptions,
});
logger.info({config: configuration});
const prNumber = context.payload.issue.number;
const prResponse = await context.octokit.pulls.get({
owner: owner,
repo: repo,
pull_number: prNumber,
});
// Invalidate the cache for Snippets.
invalidateCache();
// Examine the pull request.
await scanPullRequest(
context,
prResponse.data as PullRequest,
configuration,
true
);
});
app.on(['issues.opened', 'issues.reopened'], async context => {
const repoUrl = context.payload.repository.full_name;
const {owner, repo} = context.repo();
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${repoUrl}.`);
return;
}
const configuration = new Configuration({
...DEFAULT_CONFIGURATION,
...configOptions,
});
logger.info({config: configuration});
await fullScan(context, configuration);
});
app.on('pull_request.labeled', async context => {
const repoUrl = context.payload.repository.full_name;
const {owner, repo} = context.repo();
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${repoUrl}.`);
return;
}
const configuration = new Configuration({
...DEFAULT_CONFIGURATION,
...configOptions,
});
logger.info({config: configuration});
// Only proceeds if `snippet-bot:force-run` label is added.
if (context.payload.pull_request.labels === undefined) {
return;
}
// Exits when there's no REFRESH_LABEL
const labelFound = context.payload.pull_request.labels.some(
(label: Label) => {
return label.name === REFRESH_LABEL;
}
);
if (!labelFound) {
return;
}
// Remove the label and proceed.
try {
await context.octokit.issues.removeLabel(
context.issue({name: REFRESH_LABEL})
);
} catch (err) {
// Ignoring 404 errors.
if (err.status !== 404) {
throw err;
}
}
// Also invalidate the cache for Snippets.
invalidateCache();
// Examine the pull request.
await scanPullRequest(
context,
context.payload.pull_request as PullRequest,
configuration,
true
);
});
app.on(
[
'pull_request.opened',
'pull_request.reopened',
'pull_request.edited',
'pull_request.synchronize',
],
async context => {
// Exit if the PR is closed.
if (context.payload.pull_request.state === 'closed') {
logger.info(
`The pull request ${context.payload.pull_request.url} is closed, exiting.`
);
return;
}
// If the head repo is null, we can not proceed.
if (
context.payload.pull_request.head.repo === undefined ||
context.payload.pull_request.head.repo === null
) {
logger.info(
`The head repo is undefined for ${context.payload.pull_request.url}, exiting.`
);
return;
}
const repoUrl = context.payload.repository.full_name;
const {owner, repo} = context.repo();
// We should first check the config schema. Otherwise, we'll miss
// the opportunity for checking the schema when adding the config
// file for the first time.
const configChecker = new ConfigChecker<ConfigurationOptions>(
schema,
CONFIGURATION_FILE_PATH
);
await configChecker.validateConfigChanges(
context.octokit,
owner,
repo,
context.payload.pull_request.head.sha,
context.payload.pull_request.number
);
const configOptions = await getConfig<ConfigurationOptions>(
context.octokit,
owner,
repo,
CONFIGURATION_FILE_PATH,
{schema: schema}
);
if (configOptions === null) {
logger.info(`snippet-bot is not configured for ${repoUrl}.`);
return;
}
const configuration = new Configuration({
...DEFAULT_CONFIGURATION,
...configOptions,
});
logger.info({config: configuration});
await scanPullRequest(
context,
context.payload.pull_request as PullRequest,
configuration
);
}
);
}; | result, | random_line_split |
class_system_1_1_class_type.js | var class_system_1_1_class_type =
[
[ "ClassType", "class_system_1_1_class_type.html#a6b4c4fc5b96b487e7a01ffd474996be0", null ],
[ "~ClassType", "class_system_1_1_class_type.html#a9bd32e140623554f512e1606a11f3e28", null ],
[ "Clone", "class_system_1_1_class_type.html#ae07dbc768b7946e4791f76ae093d983b", null ],
[ "CreateFromArchive", "class_system_1_1_class_type.html#a69bd42b33425604e73f2709588b321d9", null ],
[ "Equals", "class_system_1_1_class_type.html#a12db75b7d44b232a045ad4ccd8dce7fa", null ],
[ "FindCopyConstructor", "class_system_1_1_class_type.html#a3c20bc8355c0253890b055ac30d12f72", null ],
[ "FindCopyOperator", "class_system_1_1_class_type.html#a149b62d0962453f0c4a436c5ea4f1b31", null ],
[ "FindEmptyConstructor", "class_system_1_1_class_type.html#afc163e8406877c0499e6a3969d4fbd95", null ],
[ "FindMoveConstructor", "class_system_1_1_class_type.html#a543b54976ebcba2b8755f5cbd7329a37", null ],
[ "FindMoveOperator", "class_system_1_1_class_type.html#a83bc2647157525fc4a0400874eae7759", null ],
[ "get_BaseClasses", "class_system_1_1_class_type.html#ab666b0348efd7aaf5ada19ce328e3e8e", null ],
[ "get_DerivedClasses", "class_system_1_1_class_type.html#a056addefb2f7201e001731b2af1235b9", null ],
[ "get_Instances", "class_system_1_1_class_type.html#a366794cd333ce4da06e7ca9836bc64a5", null ],
[ "get_InstantiatedFromClass", "class_system_1_1_class_type.html#a8fd46f4defe506376abf453412e952e6", null ],
[ "get_InstantiatedFromParams", "class_system_1_1_class_type.html#a516fa3cb835cff1f3e08305f1360b774", null ],
[ "get_Kind", "class_system_1_1_class_type.html#aa812da19c7579c9e2cb690d3ec082575", null ],
[ "get_sizeof", "class_system_1_1_class_type.html#afc7c1879430b61b4a234f7d0381cb395", null ],
[ "GetBracketOperator", "class_system_1_1_class_type.html#a32ede758f3fe46dc715f4072b4b09186", null ],
[ "GetBracketOperatorConst", "class_system_1_1_class_type.html#afe79bc2c5f4e5a9e06100f2db6d4b9f7", null ],
[ "GetClass", "class_system_1_1_class_type.html#a9a75508ac378baf3869f6167499c53e6", null ],
[ "GetClassKey", "class_system_1_1_class_type.html#a7df2ad7b390ede2ea67f22a9b4b0d4f0", null ],
[ "GetComplementOperator", "class_system_1_1_class_type.html#a159c0e21c8f4af19651627991eb84a0d", null ],
[ "GetComplementOperatorConst", "class_system_1_1_class_type.html#a14d36c7c4584d2d4e4ab04f01571349c", null ],
[ "GetConversionOperator", "class_system_1_1_class_type.html#a87381fd98a57c4a6114af66774e9312b", null ],
[ "GetConversionOperatorConst", "class_system_1_1_class_type.html#ad374c2f5dd9fbe1855437850ae4fcd4b", null ],
[ "GetCopyConstructor", "class_system_1_1_class_type.html#a7ebbe1242752a9db3da5aa2470683cb8", null ],
[ "GetCopyConstructor", "class_system_1_1_class_type.html#ad542c9f039da1fe3747c6af89451cee1", null ],
[ "GetCopyOperator", "class_system_1_1_class_type.html#a2b0384b6d900d80e2da04cc768e9247d", null ],
[ "GetCopyOperator", "class_system_1_1_class_type.html#ab773bc68943f819b7ece80511bca2c74", null ],
[ "GetDeclarator", "class_system_1_1_class_type.html#a010e108e5f43b7295a58d38900ff9b01", null ],
[ "GetDecrementOperator", "class_system_1_1_class_type.html#a43afdac1cb4865f7ee9c9f3d3a9a63fd", null ],
[ "GetDecrementOperatorConst", "class_system_1_1_class_type.html#aa8a2c74edae7d00983a803026ef54b09", null ],
[ "GetDestructor", "class_system_1_1_class_type.html#abd9a681a73daa4bb4f1e5094cab43786", null ],
[ "GetEmptyConstructor", "class_system_1_1_class_type.html#ab4cf4239660a700ceeedac624ae0714d", null ],
[ "GetIncrementOperator", "class_system_1_1_class_type.html#a9af341e8b911050a7cfe23e3bf241275", null ],
[ "GetIncrementOperatorConst", "class_system_1_1_class_type.html#a3ad95255b731fb71da81ab98c3525d2d", null ],
[ "GetMemberByOffset", "class_system_1_1_class_type.html#ad4528e2978781114b85fbe82810a0e91", null ],
[ "GetMoveConstructor", "class_system_1_1_class_type.html#ac96808101d62fe461b3114323cab0bd2", null ],
[ "GetMoveConstructor", "class_system_1_1_class_type.html#a96c9c49ea4ddf5e8e3b818af57dce98a", null ],
[ "GetMoveOperator", "class_system_1_1_class_type.html#a800e4cd216b421e419c5b2409c2ac6de", null ],
[ "GetMoveOperator", "class_system_1_1_class_type.html#a02b1c31a8b9bb276a1f70a65e895d6b2", null ],
[ "GetNotOperator", "class_system_1_1_class_type.html#a7a69e2ffbf1c3dd1678804386ee09432", null ],
[ "GetNotOperatorConst", "class_system_1_1_class_type.html#a3dcc7707287eb3f66c34707c9cf9523a", null ],
[ "GetOffset", "class_system_1_1_class_type.html#a999b2fc5a8ba1a297cc0afb9306a8291", null ],
[ "GetOperatorDelete", "class_system_1_1_class_type.html#ad340ef7c6fdc3a8ef6fd50406c005cbc", null ],
[ "GetOperatorNew", "class_system_1_1_class_type.html#a635e114a8909d443c102e40bffc8955a", null ],
[ "GetSerializableType", "class_system_1_1_class_type.html#aba23e878d7b587930ca8d73fc987d8c7", null ],
[ "GetUserData", "class_system_1_1_class_type.html#a3da7eaf1b44f0d4bf77ee3ff71942708", null ],
[ "HasVirtualTable", "class_system_1_1_class_type.html#abb074f2fec0d94116460a9e55ac96974", null ],
[ "InstantiateTemplate", "class_system_1_1_class_type.html#ac007e6490cab5760aef27d7ed337075c", null ],
[ "IsDerivedFrom", "class_system_1_1_class_type.html#ad3fbd42c7e19e5c4380b3265b1adc604", null ],
[ "IsEmpty", "class_system_1_1_class_type.html#afa2fad6d43c046624c585cbccd4070f5", null ],
[ "IsLeftMostDerivedFrom", "class_system_1_1_class_type.html#af3944ac707aaadafec365e1981e445cd", null ],
[ "IsOfType", "class_system_1_1_class_type.html#a6971579bccd587c2d6661df6fddb34c8", null ],
[ "IsPOD", "class_system_1_1_class_type.html#a84ba8a3697787dcdff8e210dc1153417", null ],
[ "IsPolymorphic", "class_system_1_1_class_type.html#ad9cea313b434610a7c0599332dca3220", null ],
[ "Load", "class_system_1_1_class_type.html#a053ce96c22a1c1b4a968826fbd74f532", null ],
[ "LookupType", "class_system_1_1_class_type.html#ae4a9db889f9aa7b5247813287838a163", null ],
[ "SetUserData", "class_system_1_1_class_type.html#ab96bddfb470d9531e85d458cecde8ffc", null ],
[ "Store", "class_system_1_1_class_type.html#aa962a7071af72240bb5749c1d86b157a", null ],
[ "ToString", "class_system_1_1_class_type.html#ab999ee80846bd38247e9d50440aa2f34", null ],
[ "Write", "class_system_1_1_class_type.html#a114d7b89d6c440c9f2fbec56cbe9bb8f", null ],
[ "operator>>", "class_system_1_1_class_type.html#a2d6e1668949d2067c0c42c12844be414", null ],
[ "m_alignment", "class_system_1_1_class_type.html#ace13153c8717c90adc8abf7ec748c70d", null ],
[ "m_attributeDefs", "class_system_1_1_class_type.html#a9152c51271d4473f6f811b3819e41895", null ],
[ "m_attributes", "class_system_1_1_class_type.html#aa3a1c11df75c0090e4b5a1489af0be14", null ],
[ "m_bases", "class_system_1_1_class_type.html#ab129cafbc0494b750fbc3467b5c40f2d", null ],
[ "m_bVT", "class_system_1_1_class_type.html#abdba7509a95afdc3b4254f63a10ab6f8", null ], | [ "m_copyOperator", "class_system_1_1_class_type.html#a908aaf2b4a33416ecf6fe4bda27ecd9a", null ],
[ "m_decrementOperator", "class_system_1_1_class_type.html#a86fde0d4bc7adb2916588b4c424685d2", null ],
[ "m_decrementOperatorConst", "class_system_1_1_class_type.html#a44606633d535d3cc9d1d63d061d62519", null ],
[ "m_def", "class_system_1_1_class_type.html#ae951c7ed528dbddf8b6f1b3985aad169", null ],
[ "m_derived", "class_system_1_1_class_type.html#a33772232557393afbe1fd5e76fd071c0", null ],
[ "m_emptyConstructor", "class_system_1_1_class_type.html#ab083a1ee2a87e1c1f7f729dbae4cc474", null ],
[ "m_extraInterfaces", "class_system_1_1_class_type.html#ab154c1b63bad7b5d4c75bd9ee30f99c9", null ],
[ "m_force_alignment", "class_system_1_1_class_type.html#ad256851b7e667df6ac166ca00e53aa0c", null ],
[ "m_forwardDeclarations", "class_system_1_1_class_type.html#a7d2eb0cf9a05946476c5e5a78c805ce1", null ],
[ "m_gcArrayMembers", "class_system_1_1_class_type.html#add0b7f1c4c7d64c63e1f3e2b091e16d3", null ],
[ "m_gcInnerMembers", "class_system_1_1_class_type.html#a3598954612aca23fb67ea94fdf1c3885", null ],
[ "m_gcMembers", "class_system_1_1_class_type.html#ad5687f97cd1bddeae191d5d78bef7365", null ],
[ "m_HasTemplateArgs", "class_system_1_1_class_type.html#a8ca6f63388989d8cedc8bbaa890308a4", null ],
[ "m_incrementOperator", "class_system_1_1_class_type.html#ad4e37f5d7f7989467b1eeb2180b52615", null ],
[ "m_incrementOperatorConst", "class_system_1_1_class_type.html#adba323683f3867eb7e1528ba21a0efd7", null ],
[ "m_instances", "class_system_1_1_class_type.html#ab84c8c07f93ac60daee1540dc531bc40", null ],
[ "m_instantiatedClasses", "class_system_1_1_class_type.html#a12fa9b9331c0862272e6e1c06bb89213", null ],
[ "m_isEmpty", "class_system_1_1_class_type.html#a7ed59180fc1b01e309499d7f7348dfd3", null ],
[ "m_isPOD", "class_system_1_1_class_type.html#a09369b35f5a4d2e6cb916c7ab1ad6dfc", null ],
[ "m_kwType", "class_system_1_1_class_type.html#a5ea1f77527d6cf15a2f3e015c81f1152", null ],
[ "m_moveOperator", "class_system_1_1_class_type.html#a6aee11f1bdc3dac652ae95eeb4997804", null ],
[ "m_notOperator", "class_system_1_1_class_type.html#a8b295ab89c1ab4e87b6e82664dba9961", null ],
[ "m_notOperatorConst", "class_system_1_1_class_type.html#ad1fcb9826c2aedbe6f613ff0a67f5543", null ],
[ "m_packing", "class_system_1_1_class_type.html#a0e625f21239728cba50365c3697c0330", null ],
[ "m_pInstantiatedFromArgs", "class_system_1_1_class_type.html#addbcc0e80e8d96b3ab04f61a3d8d8548", null ],
[ "m_pInstantiatedFromClass", "class_system_1_1_class_type.html#a7290ce3c042da55fbc69e4f1c2e66915", null ],
[ "m_postDecrementOperator", "class_system_1_1_class_type.html#a59d178599908ccf0597b0f405ab023a9", null ],
[ "m_postDecrementOperatorConst", "class_system_1_1_class_type.html#ada30a93076e4ff19d015b27568363c3a", null ],
[ "m_postIncrementOperator", "class_system_1_1_class_type.html#add5bcf8ec148ae522ed1aae037b3724a", null ],
[ "m_postIncrementOperatorConst", "class_system_1_1_class_type.html#a5bc2f61386ed05302f8b304eb9f97503", null ],
[ "m_pTemplateParams", "class_system_1_1_class_type.html#a64b0ef3f6fc493700ddbef7da0808b56", null ],
[ "m_rsizeof", "class_system_1_1_class_type.html#aa37c70268c995c9cbe7bafc6328e0078", null ],
[ "m_sizeof", "class_system_1_1_class_type.html#a319eb64e6feaa076153c42502145582b", null ],
[ "m_virtualSize", "class_system_1_1_class_type.html#aad3655645e6573c023e8b06e0912d2e9", null ]
]; | [ "m_classUserData", "class_system_1_1_class_type.html#a8e410f39f3a150f214a4f93b6f9e4273", null ],
[ "m_complementOperator", "class_system_1_1_class_type.html#a39bc6cb62342bd90a8681b0112ee9eaf", null ],
[ "m_complementOperatorConst", "class_system_1_1_class_type.html#aac639396e1a7aa5bf5121a4dbd0f65e6", null ],
[ "m_copyConstructor", "class_system_1_1_class_type.html#a93042c7e41337f9c283210fb3f15c809", null ], | random_line_split |
ui.rs | use std::collections::{HashSet, HashMap, VecDeque};
use std::any::{Any, TypeId};
use std::rc::Rc;
use std::cell::RefCell;
use cassowary::Constraint;
use cassowary::strength::*;
use glutin;
use window::Window;
use app::App;
use widget::{WidgetRef, WidgetBuilder};
use layout::{LimnSolver, LayoutChanged, LayoutVars, ExactFrame};
use layout::constraint::*;
use geometry::{Point, Rect, Size};
use resources::WidgetId;
use event::Target;
use render::WebRenderContext;
/// If true, the constraint that matches the root layout size to the window size
/// is required. This can be useful for debugging but can result in panics from resizing the window.
const WINDOW_CONSTRAINT_REQUIRED: bool = false;
pub struct Ui {
pub(crate) root: WidgetRef,
widget_map: HashMap<WidgetId, WidgetRef>,
pub(crate) solver: LimnSolver,
pub(crate) render: WebRenderContext,
needs_redraw: bool,
should_close: bool,
debug_draw_bounds: bool,
window: Rc<RefCell<Window>>,
window_constraints: Vec<Constraint>,
}
impl Ui {
pub(super) fn new(mut window: Window, events_loop: &glutin::EventsLoop) -> Self {
let mut root = WidgetBuilder::new("window");
root.layout().set_container(ExactFrame);
root.layout().add(top_left(Point::zero()));
if !WINDOW_CONSTRAINT_REQUIRED {
let mut root_layout = root.layout();
root_layout.edit_right().strength(REQUIRED - 1.0);
root_layout.edit_bottom().strength(REQUIRED - 1.0);
}
let render = WebRenderContext::new(&mut window, events_loop);
Ui {
widget_map: HashMap::new(),
root: root.into(),
solver: LimnSolver::new(),
render: render,
needs_redraw: true,
should_close: false,
debug_draw_bounds: false,
window: Rc::new(RefCell::new(window)),
window_constraints: Vec::new(),
}
}
pub fn get_widget(&self, widget_id: WidgetId) -> Option<WidgetRef> {
self.widget_map.get(&widget_id).map(|widget| widget.clone())
}
pub fn get_root(&self) -> WidgetRef {
self.root.clone()
}
pub fn event<T: 'static>(&self, data: T) {
self.get_root().event(data);
}
pub fn close(&mut self) {
self.should_close = true;
}
pub(super) fn should_close(&self) -> bool {
self.should_close
}
pub(super) fn resize_window_to_fit(&mut self) {
let window_dims = self.get_root_dims();
self.window.borrow_mut().resize(window_dims.width as u32, window_dims.height as u32);
}
pub fn get_root_dims(&self) -> Size {
let root = self.get_root();
let mut dims = root.bounds().size;
// use min size to prevent window size from being set to 0 (X crashes)
dims.width = f32::max(100.0, dims.width);
dims.height = f32::max(100.0, dims.height);
dims
}
pub(super) fn window_resized(&mut self, window_dims: Size) {
let window_size = self.window.borrow_mut().size_u32();
self.render.window_resized(window_size);
let mut root = self.get_root();
if WINDOW_CONSTRAINT_REQUIRED {
let window_constraints = root.layout().create_constraint(size(window_dims));
{
let window_constraints = window_constraints.clone();
root.update_layout(|layout| {
for constraint in self.window_constraints.drain(..) {
layout.remove_constraint(constraint);
}
layout.add(window_constraints);
});
}
self.window_constraints = window_constraints;
} else {
root.update_layout(|layout| {
layout.edit_right().set(window_dims.width);
layout.edit_bottom().set(window_dims.height);
});
}
self.needs_redraw = true;
}
pub fn check_layout_changes(&mut self) {
let changes = self.solver.fetch_changes();
debug!("layout has {} changes", changes.len());
if !changes.is_empty() {
self.event(LayoutChanged(changes));
}
}
pub fn redraw(&mut self) |
pub fn needs_redraw(&self) -> bool {
self.needs_redraw
}
pub(super) fn draw_if_needed(&mut self) {
if self.needs_redraw {
self.draw();
self.needs_redraw = false;
}
}
fn draw(&mut self) {
let window_size = self.window.borrow_mut().size_f32();
let (builder, resources) = {
let mut renderer = self.render.render_builder(window_size);
let crop_to = Rect::new(Point::zero(), Size::new(::std::f32::MAX, ::std::f32::MAX));
self.root.widget_mut().draw(crop_to, &mut renderer);
if self.debug_draw_bounds {
self.root.widget_mut().draw_debug(&mut renderer);
}
(renderer.builder, renderer.resources)
};
self.render.set_display_list(builder, resources, window_size);
self.render.generate_frame();
}
// Call after drawing
pub(super) fn update(&mut self) {
self.render.update(self.window.borrow_mut().size_u32());
let window = self.window.borrow_mut();
window.swap_buffers();
}
pub fn widgets_bfs(&self) -> WidgetsBfs {
WidgetsBfs::new(self.get_root())
}
pub fn widgets_under_cursor(&mut self, point: Point) -> WidgetsUnderCursor {
WidgetsUnderCursor::new(point, self.get_root())
}
/// Find the first widget under the cursor, ie. the last to be drawn that is under the cursor
pub fn widget_under_cursor(&mut self, point: Point) -> Option<WidgetRef> {
self.widgets_under_cursor(point).next()
}
fn handle_widget_event(&mut self, widget_ref: WidgetRef, type_id: TypeId, data: &Any) -> bool {
let handled = widget_ref.trigger_event(self, type_id, data);
if widget_ref.has_updated() {
self.needs_redraw = true;
widget_ref.set_updated(false);
}
handled
}
pub(super) fn handle_event(&mut self, address: Target, type_id: TypeId, data: &Any) {
match address {
Target::Root => {
let root = self.get_root();
self.handle_widget_event(root, type_id, data);
}
Target::Widget(widget_ref) => {
self.handle_widget_event(widget_ref, type_id, data);
}
Target::SubTree(widget_ref) => {
self.handle_event_subtree(widget_ref, type_id, data);
}
Target::BubbleUp(widget_ref) => {
let mut maybe_widget_ref = Some(widget_ref);
while let Some(widget_ref) = maybe_widget_ref {
if self.handle_widget_event(widget_ref.clone(), type_id, data) {
break;
}
maybe_widget_ref = widget_ref.parent();
}
}
}
}
fn handle_event_subtree(&mut self, widget_ref: WidgetRef, type_id: TypeId, data: &Any) {
self.handle_widget_event(widget_ref.clone(), type_id, data);
let children = &widget_ref.children();
for child in children {
self.handle_event_subtree(child.clone(), type_id, data);
}
}
pub fn set_debug_draw_bounds(&mut self, debug_draw_bounds: bool) {
self.debug_draw_bounds = debug_draw_bounds;
self.redraw();
}
pub fn debug_widget_positions(&self) {
println!("WIDGET POSITIONS");
for widget_ref in self.widgets_bfs() {
let bounds = widget_ref.bounds();
let name = widget_ref.name();
println!("{:?} {:?}", name, bounds);
}
}
}
#[derive(Clone)]
pub struct RegisterWidget(pub WidgetRef);
#[derive(Clone)]
pub struct RemoveWidget(pub WidgetRef);
impl App {
pub fn add_ui_handlers(&mut self) {
self.add_handler_fn(|event: &RegisterWidget, args| {
let event = event.clone();
let RegisterWidget(widget_ref) = event;
args.ui.widget_map.insert(widget_ref.id(), widget_ref.clone());
});
self.add_handler_fn(|event: &RemoveWidget, args| {
let event = event.clone();
let RemoveWidget(widget_ref) = event;
args.ui.solver.remove_layout(widget_ref.id().0);
args.ui.check_layout_changes();
args.ui.widget_map.remove(&widget_ref.id());
});
}
}
pub struct WidgetAttachedEvent;
pub struct WidgetDetachedEvent;
pub struct ChildAttachedEvent(pub WidgetId, pub LayoutVars);
pub enum ChildrenUpdatedEvent {
Added(WidgetRef),
Removed(WidgetRef),
}
pub struct WidgetsUnderCursor {
point: Point,
dfs: WidgetsDfsPostReverse,
}
impl WidgetsUnderCursor {
fn new(point: Point, root: WidgetRef) -> Self {
WidgetsUnderCursor {
point: point,
dfs: WidgetsDfsPostReverse::new(root),
}
}
}
impl Iterator for WidgetsUnderCursor {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
for widget_ref in self.dfs.by_ref() {
let widget = &widget_ref.widget();
if widget.is_under_cursor(self.point) {
return Some(widget_ref.clone());
}
}
None
}
}
// Iterates in reverse of draw order, that is, depth first post order,
// with siblings in reverse of insertion order
struct WidgetsDfsPostReverse {
stack: Vec<WidgetRef>,
discovered: HashSet<WidgetRef>,
finished: HashSet<WidgetRef>,
}
impl WidgetsDfsPostReverse {
fn new(root: WidgetRef) -> Self {
WidgetsDfsPostReverse {
stack: vec![root],
discovered: HashSet::new(),
finished: HashSet::new(),
}
}
}
impl Iterator for WidgetsDfsPostReverse {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
while let Some(widget_ref) = self.stack.last().cloned() {
if self.discovered.insert(widget_ref.clone()) {
for child in &widget_ref.children() {
self.stack.push(child.clone());
}
} else {
self.stack.pop();
if self.finished.insert(widget_ref.clone()) {
return Some(widget_ref.clone());
}
}
}
None
}
}
pub struct WidgetsBfs {
queue: VecDeque<WidgetRef>,
}
impl WidgetsBfs {
fn new(root: WidgetRef) -> Self {
let mut queue = VecDeque::new();
queue.push_front(root);
WidgetsBfs { queue: queue }
}
}
impl Iterator for WidgetsBfs {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
if let Some(widget_ref) = self.queue.pop_front() {
for child in &widget_ref.children() {
self.queue.push_back(child.clone());
}
Some(widget_ref)
} else {
None
}
}
}
| {
self.needs_redraw = true;
} | identifier_body |
ui.rs | use std::collections::{HashSet, HashMap, VecDeque};
use std::any::{Any, TypeId};
use std::rc::Rc;
use std::cell::RefCell;
use cassowary::Constraint;
use cassowary::strength::*;
use glutin;
use window::Window;
use app::App;
use widget::{WidgetRef, WidgetBuilder};
use layout::{LimnSolver, LayoutChanged, LayoutVars, ExactFrame};
use layout::constraint::*;
use geometry::{Point, Rect, Size};
use resources::WidgetId;
use event::Target;
use render::WebRenderContext;
/// If true, the constraint that matches the root layout size to the window size
/// is required. This can be useful for debugging but can result in panics from resizing the window.
const WINDOW_CONSTRAINT_REQUIRED: bool = false;
pub struct Ui {
pub(crate) root: WidgetRef,
widget_map: HashMap<WidgetId, WidgetRef>,
pub(crate) solver: LimnSolver,
pub(crate) render: WebRenderContext,
needs_redraw: bool,
should_close: bool,
debug_draw_bounds: bool,
window: Rc<RefCell<Window>>,
window_constraints: Vec<Constraint>,
}
impl Ui {
pub(super) fn new(mut window: Window, events_loop: &glutin::EventsLoop) -> Self {
let mut root = WidgetBuilder::new("window");
root.layout().set_container(ExactFrame);
root.layout().add(top_left(Point::zero()));
if !WINDOW_CONSTRAINT_REQUIRED {
let mut root_layout = root.layout();
root_layout.edit_right().strength(REQUIRED - 1.0);
root_layout.edit_bottom().strength(REQUIRED - 1.0);
}
let render = WebRenderContext::new(&mut window, events_loop);
Ui {
widget_map: HashMap::new(),
root: root.into(),
solver: LimnSolver::new(),
render: render,
needs_redraw: true,
should_close: false,
debug_draw_bounds: false,
window: Rc::new(RefCell::new(window)),
window_constraints: Vec::new(),
}
}
pub fn get_widget(&self, widget_id: WidgetId) -> Option<WidgetRef> {
self.widget_map.get(&widget_id).map(|widget| widget.clone())
}
pub fn get_root(&self) -> WidgetRef {
self.root.clone()
}
pub fn event<T: 'static>(&self, data: T) {
self.get_root().event(data);
}
pub fn close(&mut self) {
self.should_close = true;
}
pub(super) fn should_close(&self) -> bool {
self.should_close
}
pub(super) fn resize_window_to_fit(&mut self) {
let window_dims = self.get_root_dims();
self.window.borrow_mut().resize(window_dims.width as u32, window_dims.height as u32);
}
pub fn get_root_dims(&self) -> Size {
let root = self.get_root();
let mut dims = root.bounds().size;
// use min size to prevent window size from being set to 0 (X crashes)
dims.width = f32::max(100.0, dims.width);
dims.height = f32::max(100.0, dims.height);
dims
}
pub(super) fn window_resized(&mut self, window_dims: Size) {
let window_size = self.window.borrow_mut().size_u32();
self.render.window_resized(window_size);
let mut root = self.get_root();
if WINDOW_CONSTRAINT_REQUIRED {
let window_constraints = root.layout().create_constraint(size(window_dims));
{
let window_constraints = window_constraints.clone();
root.update_layout(|layout| {
for constraint in self.window_constraints.drain(..) {
layout.remove_constraint(constraint);
}
layout.add(window_constraints);
});
}
self.window_constraints = window_constraints;
} else {
root.update_layout(|layout| {
layout.edit_right().set(window_dims.width);
layout.edit_bottom().set(window_dims.height);
});
}
self.needs_redraw = true;
}
pub fn check_layout_changes(&mut self) {
let changes = self.solver.fetch_changes();
debug!("layout has {} changes", changes.len());
if !changes.is_empty() {
self.event(LayoutChanged(changes));
}
}
pub fn redraw(&mut self) {
self.needs_redraw = true;
}
pub fn needs_redraw(&self) -> bool {
self.needs_redraw
}
pub(super) fn draw_if_needed(&mut self) {
if self.needs_redraw {
self.draw();
self.needs_redraw = false;
}
}
fn draw(&mut self) {
let window_size = self.window.borrow_mut().size_f32();
let (builder, resources) = {
let mut renderer = self.render.render_builder(window_size);
let crop_to = Rect::new(Point::zero(), Size::new(::std::f32::MAX, ::std::f32::MAX));
self.root.widget_mut().draw(crop_to, &mut renderer);
if self.debug_draw_bounds {
self.root.widget_mut().draw_debug(&mut renderer);
}
(renderer.builder, renderer.resources)
};
self.render.set_display_list(builder, resources, window_size);
self.render.generate_frame();
}
// Call after drawing
pub(super) fn update(&mut self) {
self.render.update(self.window.borrow_mut().size_u32());
let window = self.window.borrow_mut();
window.swap_buffers();
}
pub fn widgets_bfs(&self) -> WidgetsBfs {
WidgetsBfs::new(self.get_root())
}
pub fn widgets_under_cursor(&mut self, point: Point) -> WidgetsUnderCursor {
WidgetsUnderCursor::new(point, self.get_root())
}
/// Find the first widget under the cursor, ie. the last to be drawn that is under the cursor
pub fn widget_under_cursor(&mut self, point: Point) -> Option<WidgetRef> {
self.widgets_under_cursor(point).next()
}
fn handle_widget_event(&mut self, widget_ref: WidgetRef, type_id: TypeId, data: &Any) -> bool {
let handled = widget_ref.trigger_event(self, type_id, data);
if widget_ref.has_updated() {
self.needs_redraw = true;
widget_ref.set_updated(false);
}
handled
}
pub(super) fn handle_event(&mut self, address: Target, type_id: TypeId, data: &Any) {
match address {
Target::Root => {
let root = self.get_root();
self.handle_widget_event(root, type_id, data);
}
Target::Widget(widget_ref) => {
self.handle_widget_event(widget_ref, type_id, data);
}
Target::SubTree(widget_ref) => {
self.handle_event_subtree(widget_ref, type_id, data);
}
Target::BubbleUp(widget_ref) => {
let mut maybe_widget_ref = Some(widget_ref);
while let Some(widget_ref) = maybe_widget_ref {
if self.handle_widget_event(widget_ref.clone(), type_id, data) {
break;
}
maybe_widget_ref = widget_ref.parent();
}
}
}
}
fn handle_event_subtree(&mut self, widget_ref: WidgetRef, type_id: TypeId, data: &Any) {
self.handle_widget_event(widget_ref.clone(), type_id, data);
let children = &widget_ref.children();
for child in children {
self.handle_event_subtree(child.clone(), type_id, data);
}
}
pub fn set_debug_draw_bounds(&mut self, debug_draw_bounds: bool) {
self.debug_draw_bounds = debug_draw_bounds;
self.redraw();
}
pub fn debug_widget_positions(&self) {
println!("WIDGET POSITIONS");
for widget_ref in self.widgets_bfs() {
let bounds = widget_ref.bounds();
let name = widget_ref.name();
println!("{:?} {:?}", name, bounds);
}
}
}
#[derive(Clone)]
pub struct RegisterWidget(pub WidgetRef);
#[derive(Clone)]
pub struct RemoveWidget(pub WidgetRef);
impl App {
pub fn add_ui_handlers(&mut self) {
self.add_handler_fn(|event: &RegisterWidget, args| {
let event = event.clone();
let RegisterWidget(widget_ref) = event;
args.ui.widget_map.insert(widget_ref.id(), widget_ref.clone());
});
self.add_handler_fn(|event: &RemoveWidget, args| {
let event = event.clone();
let RemoveWidget(widget_ref) = event;
args.ui.solver.remove_layout(widget_ref.id().0);
args.ui.check_layout_changes();
args.ui.widget_map.remove(&widget_ref.id());
});
}
}
pub struct WidgetAttachedEvent;
pub struct WidgetDetachedEvent;
pub struct ChildAttachedEvent(pub WidgetId, pub LayoutVars);
pub enum ChildrenUpdatedEvent {
Added(WidgetRef),
Removed(WidgetRef),
}
pub struct WidgetsUnderCursor {
point: Point,
dfs: WidgetsDfsPostReverse,
}
impl WidgetsUnderCursor {
fn new(point: Point, root: WidgetRef) -> Self {
WidgetsUnderCursor {
point: point,
dfs: WidgetsDfsPostReverse::new(root),
}
}
}
impl Iterator for WidgetsUnderCursor {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
for widget_ref in self.dfs.by_ref() {
let widget = &widget_ref.widget();
if widget.is_under_cursor(self.point) {
return Some(widget_ref.clone());
}
}
None
}
}
// Iterates in reverse of draw order, that is, depth first post order,
// with siblings in reverse of insertion order
struct WidgetsDfsPostReverse {
stack: Vec<WidgetRef>,
discovered: HashSet<WidgetRef>,
finished: HashSet<WidgetRef>,
}
impl WidgetsDfsPostReverse {
fn | (root: WidgetRef) -> Self {
WidgetsDfsPostReverse {
stack: vec![root],
discovered: HashSet::new(),
finished: HashSet::new(),
}
}
}
impl Iterator for WidgetsDfsPostReverse {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
while let Some(widget_ref) = self.stack.last().cloned() {
if self.discovered.insert(widget_ref.clone()) {
for child in &widget_ref.children() {
self.stack.push(child.clone());
}
} else {
self.stack.pop();
if self.finished.insert(widget_ref.clone()) {
return Some(widget_ref.clone());
}
}
}
None
}
}
pub struct WidgetsBfs {
queue: VecDeque<WidgetRef>,
}
impl WidgetsBfs {
fn new(root: WidgetRef) -> Self {
let mut queue = VecDeque::new();
queue.push_front(root);
WidgetsBfs { queue: queue }
}
}
impl Iterator for WidgetsBfs {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
if let Some(widget_ref) = self.queue.pop_front() {
for child in &widget_ref.children() {
self.queue.push_back(child.clone());
}
Some(widget_ref)
} else {
None
}
}
}
| new | identifier_name |
ui.rs | use std::collections::{HashSet, HashMap, VecDeque};
use std::any::{Any, TypeId};
use std::rc::Rc;
use std::cell::RefCell;
use cassowary::Constraint;
use cassowary::strength::*;
use glutin;
use window::Window;
use app::App;
use widget::{WidgetRef, WidgetBuilder};
use layout::{LimnSolver, LayoutChanged, LayoutVars, ExactFrame};
use layout::constraint::*;
use geometry::{Point, Rect, Size};
use resources::WidgetId;
use event::Target;
use render::WebRenderContext;
/// If true, the constraint that matches the root layout size to the window size
/// is required. This can be useful for debugging but can result in panics from resizing the window.
const WINDOW_CONSTRAINT_REQUIRED: bool = false;
pub struct Ui {
pub(crate) root: WidgetRef,
widget_map: HashMap<WidgetId, WidgetRef>,
pub(crate) solver: LimnSolver,
pub(crate) render: WebRenderContext,
needs_redraw: bool,
should_close: bool,
debug_draw_bounds: bool,
window: Rc<RefCell<Window>>,
window_constraints: Vec<Constraint>,
}
impl Ui {
pub(super) fn new(mut window: Window, events_loop: &glutin::EventsLoop) -> Self {
let mut root = WidgetBuilder::new("window");
root.layout().set_container(ExactFrame);
root.layout().add(top_left(Point::zero()));
if !WINDOW_CONSTRAINT_REQUIRED {
let mut root_layout = root.layout();
root_layout.edit_right().strength(REQUIRED - 1.0);
root_layout.edit_bottom().strength(REQUIRED - 1.0);
}
let render = WebRenderContext::new(&mut window, events_loop);
Ui {
widget_map: HashMap::new(),
root: root.into(),
solver: LimnSolver::new(),
render: render,
needs_redraw: true,
should_close: false,
debug_draw_bounds: false,
window: Rc::new(RefCell::new(window)),
window_constraints: Vec::new(),
}
}
pub fn get_widget(&self, widget_id: WidgetId) -> Option<WidgetRef> {
self.widget_map.get(&widget_id).map(|widget| widget.clone())
}
pub fn get_root(&self) -> WidgetRef {
self.root.clone()
}
pub fn event<T: 'static>(&self, data: T) {
self.get_root().event(data);
}
pub fn close(&mut self) {
self.should_close = true;
}
pub(super) fn should_close(&self) -> bool {
self.should_close
}
pub(super) fn resize_window_to_fit(&mut self) {
let window_dims = self.get_root_dims();
self.window.borrow_mut().resize(window_dims.width as u32, window_dims.height as u32);
}
pub fn get_root_dims(&self) -> Size {
let root = self.get_root();
let mut dims = root.bounds().size;
// use min size to prevent window size from being set to 0 (X crashes)
dims.width = f32::max(100.0, dims.width);
dims.height = f32::max(100.0, dims.height);
dims
}
pub(super) fn window_resized(&mut self, window_dims: Size) {
let window_size = self.window.borrow_mut().size_u32();
self.render.window_resized(window_size);
let mut root = self.get_root();
if WINDOW_CONSTRAINT_REQUIRED {
let window_constraints = root.layout().create_constraint(size(window_dims));
{
let window_constraints = window_constraints.clone();
root.update_layout(|layout| {
for constraint in self.window_constraints.drain(..) {
layout.remove_constraint(constraint);
}
layout.add(window_constraints);
});
}
self.window_constraints = window_constraints;
} else {
root.update_layout(|layout| {
layout.edit_right().set(window_dims.width);
layout.edit_bottom().set(window_dims.height);
});
}
self.needs_redraw = true;
}
pub fn check_layout_changes(&mut self) {
let changes = self.solver.fetch_changes();
debug!("layout has {} changes", changes.len());
if !changes.is_empty() {
self.event(LayoutChanged(changes));
}
}
pub fn redraw(&mut self) {
self.needs_redraw = true;
}
pub fn needs_redraw(&self) -> bool {
self.needs_redraw
}
pub(super) fn draw_if_needed(&mut self) {
if self.needs_redraw {
self.draw();
self.needs_redraw = false;
}
}
fn draw(&mut self) {
let window_size = self.window.borrow_mut().size_f32();
let (builder, resources) = {
let mut renderer = self.render.render_builder(window_size);
let crop_to = Rect::new(Point::zero(), Size::new(::std::f32::MAX, ::std::f32::MAX));
self.root.widget_mut().draw(crop_to, &mut renderer);
if self.debug_draw_bounds {
self.root.widget_mut().draw_debug(&mut renderer);
}
(renderer.builder, renderer.resources)
};
self.render.set_display_list(builder, resources, window_size);
self.render.generate_frame();
}
// Call after drawing
pub(super) fn update(&mut self) {
self.render.update(self.window.borrow_mut().size_u32());
let window = self.window.borrow_mut();
window.swap_buffers();
}
pub fn widgets_bfs(&self) -> WidgetsBfs {
WidgetsBfs::new(self.get_root())
}
pub fn widgets_under_cursor(&mut self, point: Point) -> WidgetsUnderCursor {
WidgetsUnderCursor::new(point, self.get_root())
}
/// Find the first widget under the cursor, ie. the last to be drawn that is under the cursor
pub fn widget_under_cursor(&mut self, point: Point) -> Option<WidgetRef> {
self.widgets_under_cursor(point).next()
}
fn handle_widget_event(&mut self, widget_ref: WidgetRef, type_id: TypeId, data: &Any) -> bool {
let handled = widget_ref.trigger_event(self, type_id, data);
if widget_ref.has_updated() {
self.needs_redraw = true;
widget_ref.set_updated(false);
}
handled
}
pub(super) fn handle_event(&mut self, address: Target, type_id: TypeId, data: &Any) {
match address {
Target::Root => {
let root = self.get_root();
self.handle_widget_event(root, type_id, data);
}
Target::Widget(widget_ref) => {
self.handle_widget_event(widget_ref, type_id, data);
}
Target::SubTree(widget_ref) => {
self.handle_event_subtree(widget_ref, type_id, data);
}
Target::BubbleUp(widget_ref) => {
let mut maybe_widget_ref = Some(widget_ref);
while let Some(widget_ref) = maybe_widget_ref {
if self.handle_widget_event(widget_ref.clone(), type_id, data) {
break;
}
maybe_widget_ref = widget_ref.parent();
}
}
}
}
fn handle_event_subtree(&mut self, widget_ref: WidgetRef, type_id: TypeId, data: &Any) {
self.handle_widget_event(widget_ref.clone(), type_id, data);
let children = &widget_ref.children();
for child in children {
self.handle_event_subtree(child.clone(), type_id, data);
}
}
pub fn set_debug_draw_bounds(&mut self, debug_draw_bounds: bool) {
self.debug_draw_bounds = debug_draw_bounds;
self.redraw();
}
pub fn debug_widget_positions(&self) {
println!("WIDGET POSITIONS");
for widget_ref in self.widgets_bfs() {
let bounds = widget_ref.bounds();
let name = widget_ref.name();
println!("{:?} {:?}", name, bounds);
}
}
}
#[derive(Clone)]
pub struct RegisterWidget(pub WidgetRef);
#[derive(Clone)]
pub struct RemoveWidget(pub WidgetRef);
impl App {
pub fn add_ui_handlers(&mut self) {
self.add_handler_fn(|event: &RegisterWidget, args| {
let event = event.clone();
let RegisterWidget(widget_ref) = event;
args.ui.widget_map.insert(widget_ref.id(), widget_ref.clone());
});
self.add_handler_fn(|event: &RemoveWidget, args| {
let event = event.clone();
let RemoveWidget(widget_ref) = event;
args.ui.solver.remove_layout(widget_ref.id().0);
args.ui.check_layout_changes();
args.ui.widget_map.remove(&widget_ref.id());
});
}
}
pub struct WidgetAttachedEvent;
pub struct WidgetDetachedEvent;
pub struct ChildAttachedEvent(pub WidgetId, pub LayoutVars);
pub enum ChildrenUpdatedEvent {
Added(WidgetRef),
Removed(WidgetRef),
}
pub struct WidgetsUnderCursor {
point: Point,
dfs: WidgetsDfsPostReverse,
}
impl WidgetsUnderCursor {
fn new(point: Point, root: WidgetRef) -> Self {
WidgetsUnderCursor {
point: point,
dfs: WidgetsDfsPostReverse::new(root),
}
}
}
impl Iterator for WidgetsUnderCursor {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
for widget_ref in self.dfs.by_ref() {
let widget = &widget_ref.widget();
if widget.is_under_cursor(self.point) |
}
None
}
}
// Iterates in reverse of draw order, that is, depth first post order,
// with siblings in reverse of insertion order
struct WidgetsDfsPostReverse {
stack: Vec<WidgetRef>,
discovered: HashSet<WidgetRef>,
finished: HashSet<WidgetRef>,
}
impl WidgetsDfsPostReverse {
fn new(root: WidgetRef) -> Self {
WidgetsDfsPostReverse {
stack: vec![root],
discovered: HashSet::new(),
finished: HashSet::new(),
}
}
}
impl Iterator for WidgetsDfsPostReverse {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
while let Some(widget_ref) = self.stack.last().cloned() {
if self.discovered.insert(widget_ref.clone()) {
for child in &widget_ref.children() {
self.stack.push(child.clone());
}
} else {
self.stack.pop();
if self.finished.insert(widget_ref.clone()) {
return Some(widget_ref.clone());
}
}
}
None
}
}
pub struct WidgetsBfs {
queue: VecDeque<WidgetRef>,
}
impl WidgetsBfs {
fn new(root: WidgetRef) -> Self {
let mut queue = VecDeque::new();
queue.push_front(root);
WidgetsBfs { queue: queue }
}
}
impl Iterator for WidgetsBfs {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
if let Some(widget_ref) = self.queue.pop_front() {
for child in &widget_ref.children() {
self.queue.push_back(child.clone());
}
Some(widget_ref)
} else {
None
}
}
}
| {
return Some(widget_ref.clone());
} | conditional_block |
ui.rs | use std::collections::{HashSet, HashMap, VecDeque};
use std::any::{Any, TypeId};
use std::rc::Rc;
use std::cell::RefCell;
use cassowary::Constraint;
use cassowary::strength::*;
use glutin;
use window::Window;
use app::App;
use widget::{WidgetRef, WidgetBuilder};
use layout::{LimnSolver, LayoutChanged, LayoutVars, ExactFrame};
use layout::constraint::*;
use geometry::{Point, Rect, Size};
use resources::WidgetId;
use event::Target;
use render::WebRenderContext;
/// If true, the constraint that matches the root layout size to the window size
/// is required. This can be useful for debugging but can result in panics from resizing the window.
const WINDOW_CONSTRAINT_REQUIRED: bool = false;
pub struct Ui {
pub(crate) root: WidgetRef,
widget_map: HashMap<WidgetId, WidgetRef>,
pub(crate) solver: LimnSolver,
pub(crate) render: WebRenderContext,
needs_redraw: bool,
should_close: bool,
debug_draw_bounds: bool,
window: Rc<RefCell<Window>>,
window_constraints: Vec<Constraint>,
}
impl Ui {
pub(super) fn new(mut window: Window, events_loop: &glutin::EventsLoop) -> Self {
let mut root = WidgetBuilder::new("window");
root.layout().set_container(ExactFrame);
root.layout().add(top_left(Point::zero()));
if !WINDOW_CONSTRAINT_REQUIRED {
let mut root_layout = root.layout();
root_layout.edit_right().strength(REQUIRED - 1.0);
root_layout.edit_bottom().strength(REQUIRED - 1.0);
}
let render = WebRenderContext::new(&mut window, events_loop);
Ui {
widget_map: HashMap::new(),
root: root.into(),
solver: LimnSolver::new(),
render: render,
needs_redraw: true,
should_close: false,
debug_draw_bounds: false,
window: Rc::new(RefCell::new(window)),
window_constraints: Vec::new(),
}
}
pub fn get_widget(&self, widget_id: WidgetId) -> Option<WidgetRef> {
self.widget_map.get(&widget_id).map(|widget| widget.clone())
}
pub fn get_root(&self) -> WidgetRef {
self.root.clone()
}
pub fn event<T: 'static>(&self, data: T) {
self.get_root().event(data);
}
pub fn close(&mut self) {
self.should_close = true;
}
pub(super) fn should_close(&self) -> bool {
self.should_close
}
pub(super) fn resize_window_to_fit(&mut self) {
let window_dims = self.get_root_dims();
self.window.borrow_mut().resize(window_dims.width as u32, window_dims.height as u32);
}
pub fn get_root_dims(&self) -> Size {
let root = self.get_root();
let mut dims = root.bounds().size;
// use min size to prevent window size from being set to 0 (X crashes)
dims.width = f32::max(100.0, dims.width);
dims.height = f32::max(100.0, dims.height);
dims
}
pub(super) fn window_resized(&mut self, window_dims: Size) {
let window_size = self.window.borrow_mut().size_u32();
self.render.window_resized(window_size);
let mut root = self.get_root();
if WINDOW_CONSTRAINT_REQUIRED {
let window_constraints = root.layout().create_constraint(size(window_dims));
{
let window_constraints = window_constraints.clone();
root.update_layout(|layout| {
for constraint in self.window_constraints.drain(..) {
layout.remove_constraint(constraint);
}
layout.add(window_constraints);
});
}
self.window_constraints = window_constraints;
} else {
root.update_layout(|layout| {
layout.edit_right().set(window_dims.width);
layout.edit_bottom().set(window_dims.height);
});
}
self.needs_redraw = true;
}
pub fn check_layout_changes(&mut self) {
let changes = self.solver.fetch_changes();
debug!("layout has {} changes", changes.len());
if !changes.is_empty() {
self.event(LayoutChanged(changes));
}
}
pub fn redraw(&mut self) {
self.needs_redraw = true;
}
pub fn needs_redraw(&self) -> bool {
self.needs_redraw
}
pub(super) fn draw_if_needed(&mut self) {
if self.needs_redraw {
self.draw();
self.needs_redraw = false;
}
}
fn draw(&mut self) {
let window_size = self.window.borrow_mut().size_f32();
let (builder, resources) = {
let mut renderer = self.render.render_builder(window_size);
let crop_to = Rect::new(Point::zero(), Size::new(::std::f32::MAX, ::std::f32::MAX));
self.root.widget_mut().draw(crop_to, &mut renderer);
if self.debug_draw_bounds {
self.root.widget_mut().draw_debug(&mut renderer);
}
(renderer.builder, renderer.resources)
};
self.render.set_display_list(builder, resources, window_size);
self.render.generate_frame();
}
// Call after drawing
pub(super) fn update(&mut self) {
self.render.update(self.window.borrow_mut().size_u32());
let window = self.window.borrow_mut();
window.swap_buffers();
}
pub fn widgets_bfs(&self) -> WidgetsBfs {
WidgetsBfs::new(self.get_root())
}
pub fn widgets_under_cursor(&mut self, point: Point) -> WidgetsUnderCursor {
WidgetsUnderCursor::new(point, self.get_root())
}
/// Find the first widget under the cursor, ie. the last to be drawn that is under the cursor
pub fn widget_under_cursor(&mut self, point: Point) -> Option<WidgetRef> {
self.widgets_under_cursor(point).next()
}
fn handle_widget_event(&mut self, widget_ref: WidgetRef, type_id: TypeId, data: &Any) -> bool {
let handled = widget_ref.trigger_event(self, type_id, data);
if widget_ref.has_updated() {
self.needs_redraw = true;
widget_ref.set_updated(false);
}
handled
}
pub(super) fn handle_event(&mut self, address: Target, type_id: TypeId, data: &Any) {
match address {
Target::Root => {
let root = self.get_root();
self.handle_widget_event(root, type_id, data);
}
Target::Widget(widget_ref) => {
self.handle_widget_event(widget_ref, type_id, data);
}
Target::SubTree(widget_ref) => {
self.handle_event_subtree(widget_ref, type_id, data);
}
Target::BubbleUp(widget_ref) => {
let mut maybe_widget_ref = Some(widget_ref);
while let Some(widget_ref) = maybe_widget_ref {
if self.handle_widget_event(widget_ref.clone(), type_id, data) {
break;
}
maybe_widget_ref = widget_ref.parent();
}
}
}
}
fn handle_event_subtree(&mut self, widget_ref: WidgetRef, type_id: TypeId, data: &Any) {
self.handle_widget_event(widget_ref.clone(), type_id, data);
let children = &widget_ref.children();
for child in children {
self.handle_event_subtree(child.clone(), type_id, data);
}
}
pub fn set_debug_draw_bounds(&mut self, debug_draw_bounds: bool) {
self.debug_draw_bounds = debug_draw_bounds;
self.redraw();
}
pub fn debug_widget_positions(&self) {
println!("WIDGET POSITIONS");
for widget_ref in self.widgets_bfs() {
let bounds = widget_ref.bounds();
let name = widget_ref.name();
println!("{:?} {:?}", name, bounds);
}
}
}
#[derive(Clone)]
pub struct RegisterWidget(pub WidgetRef);
#[derive(Clone)]
pub struct RemoveWidget(pub WidgetRef);
impl App {
pub fn add_ui_handlers(&mut self) {
self.add_handler_fn(|event: &RegisterWidget, args| {
let event = event.clone();
let RegisterWidget(widget_ref) = event;
args.ui.widget_map.insert(widget_ref.id(), widget_ref.clone());
});
self.add_handler_fn(|event: &RemoveWidget, args| {
let event = event.clone();
let RemoveWidget(widget_ref) = event;
args.ui.solver.remove_layout(widget_ref.id().0);
args.ui.check_layout_changes();
args.ui.widget_map.remove(&widget_ref.id());
});
}
}
pub struct WidgetAttachedEvent;
pub struct WidgetDetachedEvent;
pub struct ChildAttachedEvent(pub WidgetId, pub LayoutVars);
pub enum ChildrenUpdatedEvent {
Added(WidgetRef),
Removed(WidgetRef), | dfs: WidgetsDfsPostReverse,
}
impl WidgetsUnderCursor {
fn new(point: Point, root: WidgetRef) -> Self {
WidgetsUnderCursor {
point: point,
dfs: WidgetsDfsPostReverse::new(root),
}
}
}
impl Iterator for WidgetsUnderCursor {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
for widget_ref in self.dfs.by_ref() {
let widget = &widget_ref.widget();
if widget.is_under_cursor(self.point) {
return Some(widget_ref.clone());
}
}
None
}
}
// Iterates in reverse of draw order, that is, depth first post order,
// with siblings in reverse of insertion order
struct WidgetsDfsPostReverse {
stack: Vec<WidgetRef>,
discovered: HashSet<WidgetRef>,
finished: HashSet<WidgetRef>,
}
impl WidgetsDfsPostReverse {
fn new(root: WidgetRef) -> Self {
WidgetsDfsPostReverse {
stack: vec![root],
discovered: HashSet::new(),
finished: HashSet::new(),
}
}
}
impl Iterator for WidgetsDfsPostReverse {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
while let Some(widget_ref) = self.stack.last().cloned() {
if self.discovered.insert(widget_ref.clone()) {
for child in &widget_ref.children() {
self.stack.push(child.clone());
}
} else {
self.stack.pop();
if self.finished.insert(widget_ref.clone()) {
return Some(widget_ref.clone());
}
}
}
None
}
}
pub struct WidgetsBfs {
queue: VecDeque<WidgetRef>,
}
impl WidgetsBfs {
fn new(root: WidgetRef) -> Self {
let mut queue = VecDeque::new();
queue.push_front(root);
WidgetsBfs { queue: queue }
}
}
impl Iterator for WidgetsBfs {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
if let Some(widget_ref) = self.queue.pop_front() {
for child in &widget_ref.children() {
self.queue.push_back(child.clone());
}
Some(widget_ref)
} else {
None
}
}
} | }
pub struct WidgetsUnderCursor {
point: Point, | random_line_split |
differ_test.go | // Copyright (c) 2018 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package differ
import (
"crypto/sha512"
"fmt"
"github.com/couchbase/gomemcached"
"github.com/stretchr/testify/assert"
"io/ioutil"
"math/rand"
"os"
"sync"
"testing"
"time"
"xdcrDiffer/dcp"
fdp "xdcrDiffer/fileDescriptorPool"
)
const MaxUint64 = ^uint64(0)
const MinUint = 0
var randomOnce sync.Once
func randomString(l int) string {
bytes := make([]byte, l)
for i := 0; i < l; i++ {
bytes[i] = byte(randInt(65, 90))
}
return string(bytes)
}
func randInt(min int, max int) int {
return min + rand.Intn(max-min)
}
// serialize mutation into []byte
// format:
// keyLen - 2 bytes
// key - length specified by keyLen
// seqno - 8 bytes
// revId - 8 bytes
// cas - 8 bytes
// flags - 4 bytes
// expiry - 4 bytes
// opCode - 1 bytes
// hash - 64 bytes
func genTestData(regularMutation, colFilters bool) (key string, seqno, revId, cas uint64, flags, expiry uint32, opCode gomemcached.CommandCode, hash [64]byte, ret []byte, colId uint32, filterIds []uint8) {
randomOnce.Do(func() {
rand.Seed(time.Now().UTC().UnixNano())
})
key = randomString(randInt(12, 64))
seqno = rand.Uint64()
revId = rand.Uint64()
cas = rand.Uint64()
flags = rand.Uint32()
expiry = rand.Uint32()
if regularMutation {
opCode = gomemcached.UPR_MUTATION
} else {
opCodeArray := [3]gomemcached.CommandCode{gomemcached.UPR_MUTATION, gomemcached.UPR_DELETION, gomemcached.UPR_EXPIRATION}
opCode = opCodeArray[rand.Uint32()%3]
}
// Note we don't have the actual body hash so just randomly generate a hash using key
hash = sha512.Sum512([]byte(key))
if colFilters {
randomLen := uint8(rand.Int() % 8)
for i := uint8(0); i < randomLen; i++ {
filterIds = append(filterIds, i)
}
}
//dataSlice := createDataByteSlice(key, seqno, revId, cas, flags, expiry, opCode, hash, colId, filterIds)
mutationToSerialize := dcp.Mutation{
Vbno: 0,
Key: []byte(key),
Seqno: seqno,
RevId: revId,
Cas: cas,
Flags: flags,
Expiry: expiry,
OpCode: opCode,
Value: []byte(key),
Datatype: 0,
ColId: 0,
ColFiltersMatched: filterIds,
}
dataSlice := mutationToSerialize.Serialize()
return key, seqno, revId, cas, flags, expiry, opCode, hash, dataSlice, colId, filterIds
}
func genMultipleRecords(numOfRecords int) []byte {
var retSlice []byte
for i := 0; i < numOfRecords; i++ {
_, _, _, _, _, _, _, _, record, _, _ := genTestData(true, false)
retSlice = append(retSlice, record...)
}
return retSlice
}
func genSameFiles(numOfRecords int, fileName1, fileName2 string) error {
data := genMultipleRecords(numOfRecords)
err := ioutil.WriteFile(fileName1, data, 0644)
if err != nil {
return err
}
err = ioutil.WriteFile(fileName2, data, 0644)
if err != nil {
return err
}
return nil
}
func genMismatchedFiles(numOfRecords, mismatchCnt int, fileName1, fileName2 string) ([]string, error) {
var mismatchedKeyNames []string
data := genMultipleRecords(numOfRecords - mismatchCnt)
err := ioutil.WriteFile(fileName1, data, 0644)
if err != nil {
return mismatchedKeyNames, err
}
err = ioutil.WriteFile(fileName2, data, 0644)
if err != nil {
return mismatchedKeyNames, err
}
// Now create mismatched entries
f1, err := os.OpenFile(fileName1, os.O_APPEND|os.O_WRONLY, 644)
if err != nil {
return mismatchedKeyNames, err
}
defer f1.Close()
f2, err := os.OpenFile(fileName2, os.O_APPEND|os.O_WRONLY, 644)
if err != nil {
return mismatchedKeyNames, err
}
defer f2.Close()
for i := 0; i < mismatchCnt; i++ {
key, seqno, revId, cas, flags, expiry, opCode, _, oneData, colId, _ := genTestData(true, false)
mismatchedDataMut := &dcp.Mutation{
Vbno: 0,
Key: []byte(key),
Seqno: seqno,
RevId: revId,
Cas: cas,
Flags: flags,
Expiry: expiry,
OpCode: opCode,
Value: []byte(key),
Datatype: 0,
ColId: colId,
ColFiltersMatched: nil,
}
mismatchedData := mismatchedDataMut.Serialize()
_, err = f1.Write(oneData)
if err != nil {
return mismatchedKeyNames, err
}
_, err = f2.Write(mismatchedData)
if err != nil {
return mismatchedKeyNames, err
}
mismatchedKeyNames = append(mismatchedKeyNames, key)
}
return mismatchedKeyNames, nil
}
func verifyMisMatch(mismatchKeys []string, differ *FilesDiffer) bool {
for _, key := range mismatchKeys {
found := false
for _, onePair := range differ.BothExistButMismatch {
if key == onePair[0].Key {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func TestLoader(t *testing.T) {
assert := assert.New(t)
var outputFileTemp string = "/tmp/xdcrDiffer.tmp"
defer os.Remove(outputFileTemp)
key, seqno, _, _, _, _, _, _, data, _, _ := genTestData(true, false)
err := ioutil.WriteFile(outputFileTemp, data, 0644)
assert.Nil(err)
differ := NewFilesDiffer(outputFileTemp, "", nil, nil, nil)
err = differ.file1.LoadFileIntoBuffer()
assert.Nil(err)
assert.Equal(1, len(differ.file1.entries[0]))
assert.Equal(seqno, differ.file1.entries[0][key].Seqno)
assert.Equal(1, len(differ.file1.sortedEntries[0]))
assert.Equal(seqno, differ.file1.sortedEntries[0][0].Seqno)
}
func TestLoaderWithColFilters(t *testing.T) {
assert := assert.New(t)
var outputFileTemp string = "/tmp/xdcrDiffer.tmp"
defer os.Remove(outputFileTemp)
key, _, _, _, _, _, _, _, data, _, filterIds := genTestData(true, true)
err := ioutil.WriteFile(outputFileTemp, data, 0644)
assert.Nil(err)
differ := NewFilesDiffer(outputFileTemp, "", nil, nil, nil)
err = differ.file1.LoadFileIntoBuffer()
assert.Nil(err)
assert.Equal(1, len(differ.file1.entries[0]))
assert.Equal(uint8(len(filterIds)), differ.file1.entries[0][key].ColMigrFilterLen)
for i := 0; i < len(filterIds); i++ {
assert.Equal(filterIds[i], differ.file1.entries[0][key].ColFiltersMatched[i])
}
}
| fmt.Println("============== Test case start: TestLoadSameFile =================")
assert := assert.New(t)
file1 := "/tmp/test1.bin"
file2 := "/tmp/test2.bin"
defer os.Remove(file1)
defer os.Remove(file2)
entries := 10000
err := genSameFiles(entries, file1, file2)
assert.Equal(nil, err)
differ := NewFilesDiffer(file1, file2, nil, nil, nil)
assert.NotNil(differ)
srcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()
assert.True(len(srcDiffMap) == 0)
assert.True(len(tgtDiffMap) == 0)
differ.PrettyPrintResult()
fmt.Println("============== Test case end: TestLoadSameFile =================")
}
// This test used to work because it used a customized test generator
// But now that is incorrect and the test is no longer valid
func Disabled_TestLoadMismatchedFilesOnly(t *testing.T) {
fmt.Println("============== Test case start: TestLoadMismatchedFilesOnly =================")
assert := assert.New(t)
file1 := "/tmp/test1.bin"
file2 := "/tmp/test2.bin"
defer os.Remove(file1)
defer os.Remove(file2)
entries := 10000
numMismatch := 5
keys, err := genMismatchedFiles(entries, numMismatch, file1, file2)
assert.Nil(err)
differ := NewFilesDiffer(file1, file2, nil, nil, nil)
assert.NotNil(differ)
srcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()
assert.False(len(srcDiffMap) == 0)
assert.False(len(tgtDiffMap) == 0)
assert.Equal(numMismatch, len(differ.BothExistButMismatch))
assert.True(verifyMisMatch(keys, differ))
assert.Equal(0, len(differ.MissingFromFile1))
assert.Equal(0, len(differ.MissingFromFile2))
differ.PrettyPrintResult()
fmt.Println("============== Test case end: TestLoadMismatchedFilesOnly =================")
}
// This test used to work because it used a customized test generator
// But now that is incorrect and the test is no longer valid
func Disabled_TestLoadMismatchedFilesAndUneven(t *testing.T) {
fmt.Println("============== Test case start: TestLoadMismatchedFilesAndUneven =================")
assert := assert.New(t)
file1 := "/tmp/test1.bin"
file2 := "/tmp/test2.bin"
defer os.Remove(file1)
defer os.Remove(file2)
entries := 1000
numMismatch := 5
extraEntries := 2
keys, err := genMismatchedFiles(entries, numMismatch, file1, file2)
assert.Nil(err)
// Add more records to one file
extraSliceOfPizza := genMultipleRecords(extraEntries)
f, err := os.OpenFile(file1, os.O_APPEND|os.O_WRONLY, 644)
assert.Nil(err)
_, err = f.Write(extraSliceOfPizza)
assert.Nil(err)
f.Close()
differ := NewFilesDiffer(file1, file2, nil, nil, nil)
assert.NotNil(differ)
srcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()
assert.False(len(srcDiffMap) == 0)
assert.False(len(tgtDiffMap) == 0)
assert.Equal(numMismatch, len(differ.BothExistButMismatch))
assert.True(verifyMisMatch(keys, differ))
assert.Equal(0, len(differ.MissingFromFile1))
assert.Equal(extraEntries, len(differ.MissingFromFile2))
differ.PrettyPrintResult()
fmt.Println("============== Test case start: TestLoadMismatchedFilesAndUneven =================")
}
func TestLoadSameFileWPool(t *testing.T) {
fmt.Println("============== Test case start: TestLoadSameFileWPool =================")
assert := assert.New(t)
fileDescPool := fdp.NewFileDescriptorPool(50)
file1 := "/tmp/test1.bin"
file2 := "/tmp/test2.bin"
defer os.Remove(file1)
defer os.Remove(file2)
entries := 10000
err := genSameFiles(entries, file1, file2)
assert.Equal(nil, err)
differ, err := NewFilesDifferWithFDPool(file1, file2, fileDescPool, nil, nil, nil)
assert.NotNil(differ)
assert.Nil(err)
srcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()
assert.True(len(srcDiffMap) == 0)
assert.True(len(tgtDiffMap) == 0)
fmt.Println("============== Test case end: TestLoadSameFileWPool =================")
}
func TestNoFilePool(t *testing.T) {
fmt.Println("============== Test case start: TestNoFilePool =================")
assert := assert.New(t)
differDriver := NewDifferDriver("", "", "", "", 2, 2, 0, nil, nil, nil)
assert.NotNil(differDriver)
assert.Nil(differDriver.fileDescPool)
fmt.Println("============== Test case end: TestNoFilePool =================")
} | func TestLoadSameFile(t *testing.T) { | random_line_split |
differ_test.go | // Copyright (c) 2018 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package differ
import (
"crypto/sha512"
"fmt"
"github.com/couchbase/gomemcached"
"github.com/stretchr/testify/assert"
"io/ioutil"
"math/rand"
"os"
"sync"
"testing"
"time"
"xdcrDiffer/dcp"
fdp "xdcrDiffer/fileDescriptorPool"
)
const MaxUint64 = ^uint64(0)
const MinUint = 0
var randomOnce sync.Once
func randomString(l int) string {
bytes := make([]byte, l)
for i := 0; i < l; i++ {
bytes[i] = byte(randInt(65, 90))
}
return string(bytes)
}
func randInt(min int, max int) int {
return min + rand.Intn(max-min)
}
// serialize mutation into []byte
// format:
// keyLen - 2 bytes
// key - length specified by keyLen
// seqno - 8 bytes
// revId - 8 bytes
// cas - 8 bytes
// flags - 4 bytes
// expiry - 4 bytes
// opCode - 1 bytes
// hash - 64 bytes
func genTestData(regularMutation, colFilters bool) (key string, seqno, revId, cas uint64, flags, expiry uint32, opCode gomemcached.CommandCode, hash [64]byte, ret []byte, colId uint32, filterIds []uint8) {
randomOnce.Do(func() {
rand.Seed(time.Now().UTC().UnixNano())
})
key = randomString(randInt(12, 64))
seqno = rand.Uint64()
revId = rand.Uint64()
cas = rand.Uint64()
flags = rand.Uint32()
expiry = rand.Uint32()
if regularMutation {
opCode = gomemcached.UPR_MUTATION
} else {
opCodeArray := [3]gomemcached.CommandCode{gomemcached.UPR_MUTATION, gomemcached.UPR_DELETION, gomemcached.UPR_EXPIRATION}
opCode = opCodeArray[rand.Uint32()%3]
}
// Note we don't have the actual body hash so just randomly generate a hash using key
hash = sha512.Sum512([]byte(key))
if colFilters {
randomLen := uint8(rand.Int() % 8)
for i := uint8(0); i < randomLen; i++ {
filterIds = append(filterIds, i)
}
}
//dataSlice := createDataByteSlice(key, seqno, revId, cas, flags, expiry, opCode, hash, colId, filterIds)
mutationToSerialize := dcp.Mutation{
Vbno: 0,
Key: []byte(key),
Seqno: seqno,
RevId: revId,
Cas: cas,
Flags: flags,
Expiry: expiry,
OpCode: opCode,
Value: []byte(key),
Datatype: 0,
ColId: 0,
ColFiltersMatched: filterIds,
}
dataSlice := mutationToSerialize.Serialize()
return key, seqno, revId, cas, flags, expiry, opCode, hash, dataSlice, colId, filterIds
}
func genMultipleRecords(numOfRecords int) []byte {
var retSlice []byte
for i := 0; i < numOfRecords; i++ |
return retSlice
}
func genSameFiles(numOfRecords int, fileName1, fileName2 string) error {
data := genMultipleRecords(numOfRecords)
err := ioutil.WriteFile(fileName1, data, 0644)
if err != nil {
return err
}
err = ioutil.WriteFile(fileName2, data, 0644)
if err != nil {
return err
}
return nil
}
func genMismatchedFiles(numOfRecords, mismatchCnt int, fileName1, fileName2 string) ([]string, error) {
var mismatchedKeyNames []string
data := genMultipleRecords(numOfRecords - mismatchCnt)
err := ioutil.WriteFile(fileName1, data, 0644)
if err != nil {
return mismatchedKeyNames, err
}
err = ioutil.WriteFile(fileName2, data, 0644)
if err != nil {
return mismatchedKeyNames, err
}
// Now create mismatched entries
f1, err := os.OpenFile(fileName1, os.O_APPEND|os.O_WRONLY, 644)
if err != nil {
return mismatchedKeyNames, err
}
defer f1.Close()
f2, err := os.OpenFile(fileName2, os.O_APPEND|os.O_WRONLY, 644)
if err != nil {
return mismatchedKeyNames, err
}
defer f2.Close()
for i := 0; i < mismatchCnt; i++ {
key, seqno, revId, cas, flags, expiry, opCode, _, oneData, colId, _ := genTestData(true, false)
mismatchedDataMut := &dcp.Mutation{
Vbno: 0,
Key: []byte(key),
Seqno: seqno,
RevId: revId,
Cas: cas,
Flags: flags,
Expiry: expiry,
OpCode: opCode,
Value: []byte(key),
Datatype: 0,
ColId: colId,
ColFiltersMatched: nil,
}
mismatchedData := mismatchedDataMut.Serialize()
_, err = f1.Write(oneData)
if err != nil {
return mismatchedKeyNames, err
}
_, err = f2.Write(mismatchedData)
if err != nil {
return mismatchedKeyNames, err
}
mismatchedKeyNames = append(mismatchedKeyNames, key)
}
return mismatchedKeyNames, nil
}
func verifyMisMatch(mismatchKeys []string, differ *FilesDiffer) bool {
for _, key := range mismatchKeys {
found := false
for _, onePair := range differ.BothExistButMismatch {
if key == onePair[0].Key {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func TestLoader(t *testing.T) {
assert := assert.New(t)
var outputFileTemp string = "/tmp/xdcrDiffer.tmp"
defer os.Remove(outputFileTemp)
key, seqno, _, _, _, _, _, _, data, _, _ := genTestData(true, false)
err := ioutil.WriteFile(outputFileTemp, data, 0644)
assert.Nil(err)
differ := NewFilesDiffer(outputFileTemp, "", nil, nil, nil)
err = differ.file1.LoadFileIntoBuffer()
assert.Nil(err)
assert.Equal(1, len(differ.file1.entries[0]))
assert.Equal(seqno, differ.file1.entries[0][key].Seqno)
assert.Equal(1, len(differ.file1.sortedEntries[0]))
assert.Equal(seqno, differ.file1.sortedEntries[0][0].Seqno)
}
func TestLoaderWithColFilters(t *testing.T) {
assert := assert.New(t)
var outputFileTemp string = "/tmp/xdcrDiffer.tmp"
defer os.Remove(outputFileTemp)
key, _, _, _, _, _, _, _, data, _, filterIds := genTestData(true, true)
err := ioutil.WriteFile(outputFileTemp, data, 0644)
assert.Nil(err)
differ := NewFilesDiffer(outputFileTemp, "", nil, nil, nil)
err = differ.file1.LoadFileIntoBuffer()
assert.Nil(err)
assert.Equal(1, len(differ.file1.entries[0]))
assert.Equal(uint8(len(filterIds)), differ.file1.entries[0][key].ColMigrFilterLen)
for i := 0; i < len(filterIds); i++ {
assert.Equal(filterIds[i], differ.file1.entries[0][key].ColFiltersMatched[i])
}
}
func TestLoadSameFile(t *testing.T) {
fmt.Println("============== Test case start: TestLoadSameFile =================")
assert := assert.New(t)
file1 := "/tmp/test1.bin"
file2 := "/tmp/test2.bin"
defer os.Remove(file1)
defer os.Remove(file2)
entries := 10000
err := genSameFiles(entries, file1, file2)
assert.Equal(nil, err)
differ := NewFilesDiffer(file1, file2, nil, nil, nil)
assert.NotNil(differ)
srcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()
assert.True(len(srcDiffMap) == 0)
assert.True(len(tgtDiffMap) == 0)
differ.PrettyPrintResult()
fmt.Println("============== Test case end: TestLoadSameFile =================")
}
// This test used to work because it used a customized test generator
// But now that is incorrect and the test is no longer valid
func Disabled_TestLoadMismatchedFilesOnly(t *testing.T) {
fmt.Println("============== Test case start: TestLoadMismatchedFilesOnly =================")
assert := assert.New(t)
file1 := "/tmp/test1.bin"
file2 := "/tmp/test2.bin"
defer os.Remove(file1)
defer os.Remove(file2)
entries := 10000
numMismatch := 5
keys, err := genMismatchedFiles(entries, numMismatch, file1, file2)
assert.Nil(err)
differ := NewFilesDiffer(file1, file2, nil, nil, nil)
assert.NotNil(differ)
srcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()
assert.False(len(srcDiffMap) == 0)
assert.False(len(tgtDiffMap) == 0)
assert.Equal(numMismatch, len(differ.BothExistButMismatch))
assert.True(verifyMisMatch(keys, differ))
assert.Equal(0, len(differ.MissingFromFile1))
assert.Equal(0, len(differ.MissingFromFile2))
differ.PrettyPrintResult()
fmt.Println("============== Test case end: TestLoadMismatchedFilesOnly =================")
}
// This test used to work because it used a customized test generator
// But now that is incorrect and the test is no longer valid
func Disabled_TestLoadMismatchedFilesAndUneven(t *testing.T) {
fmt.Println("============== Test case start: TestLoadMismatchedFilesAndUneven =================")
assert := assert.New(t)
file1 := "/tmp/test1.bin"
file2 := "/tmp/test2.bin"
defer os.Remove(file1)
defer os.Remove(file2)
entries := 1000
numMismatch := 5
extraEntries := 2
keys, err := genMismatchedFiles(entries, numMismatch, file1, file2)
assert.Nil(err)
// Add more records to one file
extraSliceOfPizza := genMultipleRecords(extraEntries)
f, err := os.OpenFile(file1, os.O_APPEND|os.O_WRONLY, 644)
assert.Nil(err)
_, err = f.Write(extraSliceOfPizza)
assert.Nil(err)
f.Close()
differ := NewFilesDiffer(file1, file2, nil, nil, nil)
assert.NotNil(differ)
srcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()
assert.False(len(srcDiffMap) == 0)
assert.False(len(tgtDiffMap) == 0)
assert.Equal(numMismatch, len(differ.BothExistButMismatch))
assert.True(verifyMisMatch(keys, differ))
assert.Equal(0, len(differ.MissingFromFile1))
assert.Equal(extraEntries, len(differ.MissingFromFile2))
differ.PrettyPrintResult()
fmt.Println("============== Test case start: TestLoadMismatchedFilesAndUneven =================")
}
func TestLoadSameFileWPool(t *testing.T) {
fmt.Println("============== Test case start: TestLoadSameFileWPool =================")
assert := assert.New(t)
fileDescPool := fdp.NewFileDescriptorPool(50)
file1 := "/tmp/test1.bin"
file2 := "/tmp/test2.bin"
defer os.Remove(file1)
defer os.Remove(file2)
entries := 10000
err := genSameFiles(entries, file1, file2)
assert.Equal(nil, err)
differ, err := NewFilesDifferWithFDPool(file1, file2, fileDescPool, nil, nil, nil)
assert.NotNil(differ)
assert.Nil(err)
srcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()
assert.True(len(srcDiffMap) == 0)
assert.True(len(tgtDiffMap) == 0)
fmt.Println("============== Test case end: TestLoadSameFileWPool =================")
}
func TestNoFilePool(t *testing.T) {
fmt.Println("============== Test case start: TestNoFilePool =================")
assert := assert.New(t)
differDriver := NewDifferDriver("", "", "", "", 2, 2, 0, nil, nil, nil)
assert.NotNil(differDriver)
assert.Nil(differDriver.fileDescPool)
fmt.Println("============== Test case end: TestNoFilePool =================")
}
| {
_, _, _, _, _, _, _, _, record, _, _ := genTestData(true, false)
retSlice = append(retSlice, record...)
} | conditional_block |
differ_test.go | // Copyright (c) 2018 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package differ
import (
"crypto/sha512"
"fmt"
"github.com/couchbase/gomemcached"
"github.com/stretchr/testify/assert"
"io/ioutil"
"math/rand"
"os"
"sync"
"testing"
"time"
"xdcrDiffer/dcp"
fdp "xdcrDiffer/fileDescriptorPool"
)
const MaxUint64 = ^uint64(0)
const MinUint = 0
var randomOnce sync.Once
func randomString(l int) string {
bytes := make([]byte, l)
for i := 0; i < l; i++ {
bytes[i] = byte(randInt(65, 90))
}
return string(bytes)
}
func randInt(min int, max int) int {
return min + rand.Intn(max-min)
}
// serialize mutation into []byte
// format:
// keyLen - 2 bytes
// key - length specified by keyLen
// seqno - 8 bytes
// revId - 8 bytes
// cas - 8 bytes
// flags - 4 bytes
// expiry - 4 bytes
// opCode - 1 bytes
// hash - 64 bytes
func genTestData(regularMutation, colFilters bool) (key string, seqno, revId, cas uint64, flags, expiry uint32, opCode gomemcached.CommandCode, hash [64]byte, ret []byte, colId uint32, filterIds []uint8) {
randomOnce.Do(func() {
rand.Seed(time.Now().UTC().UnixNano())
})
key = randomString(randInt(12, 64))
seqno = rand.Uint64()
revId = rand.Uint64()
cas = rand.Uint64()
flags = rand.Uint32()
expiry = rand.Uint32()
if regularMutation {
opCode = gomemcached.UPR_MUTATION
} else {
opCodeArray := [3]gomemcached.CommandCode{gomemcached.UPR_MUTATION, gomemcached.UPR_DELETION, gomemcached.UPR_EXPIRATION}
opCode = opCodeArray[rand.Uint32()%3]
}
// Note we don't have the actual body hash so just randomly generate a hash using key
hash = sha512.Sum512([]byte(key))
if colFilters {
randomLen := uint8(rand.Int() % 8)
for i := uint8(0); i < randomLen; i++ {
filterIds = append(filterIds, i)
}
}
//dataSlice := createDataByteSlice(key, seqno, revId, cas, flags, expiry, opCode, hash, colId, filterIds)
mutationToSerialize := dcp.Mutation{
Vbno: 0,
Key: []byte(key),
Seqno: seqno,
RevId: revId,
Cas: cas,
Flags: flags,
Expiry: expiry,
OpCode: opCode,
Value: []byte(key),
Datatype: 0,
ColId: 0,
ColFiltersMatched: filterIds,
}
dataSlice := mutationToSerialize.Serialize()
return key, seqno, revId, cas, flags, expiry, opCode, hash, dataSlice, colId, filterIds
}
func genMultipleRecords(numOfRecords int) []byte {
var retSlice []byte
for i := 0; i < numOfRecords; i++ {
_, _, _, _, _, _, _, _, record, _, _ := genTestData(true, false)
retSlice = append(retSlice, record...)
}
return retSlice
}
func genSameFiles(numOfRecords int, fileName1, fileName2 string) error {
data := genMultipleRecords(numOfRecords)
err := ioutil.WriteFile(fileName1, data, 0644)
if err != nil {
return err
}
err = ioutil.WriteFile(fileName2, data, 0644)
if err != nil {
return err
}
return nil
}
func genMismatchedFiles(numOfRecords, mismatchCnt int, fileName1, fileName2 string) ([]string, error) {
var mismatchedKeyNames []string
data := genMultipleRecords(numOfRecords - mismatchCnt)
err := ioutil.WriteFile(fileName1, data, 0644)
if err != nil {
return mismatchedKeyNames, err
}
err = ioutil.WriteFile(fileName2, data, 0644)
if err != nil {
return mismatchedKeyNames, err
}
// Now create mismatched entries
f1, err := os.OpenFile(fileName1, os.O_APPEND|os.O_WRONLY, 644)
if err != nil {
return mismatchedKeyNames, err
}
defer f1.Close()
f2, err := os.OpenFile(fileName2, os.O_APPEND|os.O_WRONLY, 644)
if err != nil {
return mismatchedKeyNames, err
}
defer f2.Close()
for i := 0; i < mismatchCnt; i++ {
key, seqno, revId, cas, flags, expiry, opCode, _, oneData, colId, _ := genTestData(true, false)
mismatchedDataMut := &dcp.Mutation{
Vbno: 0,
Key: []byte(key),
Seqno: seqno,
RevId: revId,
Cas: cas,
Flags: flags,
Expiry: expiry,
OpCode: opCode,
Value: []byte(key),
Datatype: 0,
ColId: colId,
ColFiltersMatched: nil,
}
mismatchedData := mismatchedDataMut.Serialize()
_, err = f1.Write(oneData)
if err != nil {
return mismatchedKeyNames, err
}
_, err = f2.Write(mismatchedData)
if err != nil {
return mismatchedKeyNames, err
}
mismatchedKeyNames = append(mismatchedKeyNames, key)
}
return mismatchedKeyNames, nil
}
func verifyMisMatch(mismatchKeys []string, differ *FilesDiffer) bool {
for _, key := range mismatchKeys {
found := false
for _, onePair := range differ.BothExistButMismatch {
if key == onePair[0].Key {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func | (t *testing.T) {
assert := assert.New(t)
var outputFileTemp string = "/tmp/xdcrDiffer.tmp"
defer os.Remove(outputFileTemp)
key, seqno, _, _, _, _, _, _, data, _, _ := genTestData(true, false)
err := ioutil.WriteFile(outputFileTemp, data, 0644)
assert.Nil(err)
differ := NewFilesDiffer(outputFileTemp, "", nil, nil, nil)
err = differ.file1.LoadFileIntoBuffer()
assert.Nil(err)
assert.Equal(1, len(differ.file1.entries[0]))
assert.Equal(seqno, differ.file1.entries[0][key].Seqno)
assert.Equal(1, len(differ.file1.sortedEntries[0]))
assert.Equal(seqno, differ.file1.sortedEntries[0][0].Seqno)
}
func TestLoaderWithColFilters(t *testing.T) {
assert := assert.New(t)
var outputFileTemp string = "/tmp/xdcrDiffer.tmp"
defer os.Remove(outputFileTemp)
key, _, _, _, _, _, _, _, data, _, filterIds := genTestData(true, true)
err := ioutil.WriteFile(outputFileTemp, data, 0644)
assert.Nil(err)
differ := NewFilesDiffer(outputFileTemp, "", nil, nil, nil)
err = differ.file1.LoadFileIntoBuffer()
assert.Nil(err)
assert.Equal(1, len(differ.file1.entries[0]))
assert.Equal(uint8(len(filterIds)), differ.file1.entries[0][key].ColMigrFilterLen)
for i := 0; i < len(filterIds); i++ {
assert.Equal(filterIds[i], differ.file1.entries[0][key].ColFiltersMatched[i])
}
}
func TestLoadSameFile(t *testing.T) {
fmt.Println("============== Test case start: TestLoadSameFile =================")
assert := assert.New(t)
file1 := "/tmp/test1.bin"
file2 := "/tmp/test2.bin"
defer os.Remove(file1)
defer os.Remove(file2)
entries := 10000
err := genSameFiles(entries, file1, file2)
assert.Equal(nil, err)
differ := NewFilesDiffer(file1, file2, nil, nil, nil)
assert.NotNil(differ)
srcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()
assert.True(len(srcDiffMap) == 0)
assert.True(len(tgtDiffMap) == 0)
differ.PrettyPrintResult()
fmt.Println("============== Test case end: TestLoadSameFile =================")
}
// This test used to work because it used a customized test generator
// But now that is incorrect and the test is no longer valid
func Disabled_TestLoadMismatchedFilesOnly(t *testing.T) {
fmt.Println("============== Test case start: TestLoadMismatchedFilesOnly =================")
assert := assert.New(t)
file1 := "/tmp/test1.bin"
file2 := "/tmp/test2.bin"
defer os.Remove(file1)
defer os.Remove(file2)
entries := 10000
numMismatch := 5
keys, err := genMismatchedFiles(entries, numMismatch, file1, file2)
assert.Nil(err)
differ := NewFilesDiffer(file1, file2, nil, nil, nil)
assert.NotNil(differ)
srcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()
assert.False(len(srcDiffMap) == 0)
assert.False(len(tgtDiffMap) == 0)
assert.Equal(numMismatch, len(differ.BothExistButMismatch))
assert.True(verifyMisMatch(keys, differ))
assert.Equal(0, len(differ.MissingFromFile1))
assert.Equal(0, len(differ.MissingFromFile2))
differ.PrettyPrintResult()
fmt.Println("============== Test case end: TestLoadMismatchedFilesOnly =================")
}
// This test used to work because it used a customized test generator
// But now that is incorrect and the test is no longer valid
func Disabled_TestLoadMismatchedFilesAndUneven(t *testing.T) {
fmt.Println("============== Test case start: TestLoadMismatchedFilesAndUneven =================")
assert := assert.New(t)
file1 := "/tmp/test1.bin"
file2 := "/tmp/test2.bin"
defer os.Remove(file1)
defer os.Remove(file2)
entries := 1000
numMismatch := 5
extraEntries := 2
keys, err := genMismatchedFiles(entries, numMismatch, file1, file2)
assert.Nil(err)
// Add more records to one file
extraSliceOfPizza := genMultipleRecords(extraEntries)
f, err := os.OpenFile(file1, os.O_APPEND|os.O_WRONLY, 644)
assert.Nil(err)
_, err = f.Write(extraSliceOfPizza)
assert.Nil(err)
f.Close()
differ := NewFilesDiffer(file1, file2, nil, nil, nil)
assert.NotNil(differ)
srcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()
assert.False(len(srcDiffMap) == 0)
assert.False(len(tgtDiffMap) == 0)
assert.Equal(numMismatch, len(differ.BothExistButMismatch))
assert.True(verifyMisMatch(keys, differ))
assert.Equal(0, len(differ.MissingFromFile1))
assert.Equal(extraEntries, len(differ.MissingFromFile2))
differ.PrettyPrintResult()
fmt.Println("============== Test case start: TestLoadMismatchedFilesAndUneven =================")
}
func TestLoadSameFileWPool(t *testing.T) {
fmt.Println("============== Test case start: TestLoadSameFileWPool =================")
assert := assert.New(t)
fileDescPool := fdp.NewFileDescriptorPool(50)
file1 := "/tmp/test1.bin"
file2 := "/tmp/test2.bin"
defer os.Remove(file1)
defer os.Remove(file2)
entries := 10000
err := genSameFiles(entries, file1, file2)
assert.Equal(nil, err)
differ, err := NewFilesDifferWithFDPool(file1, file2, fileDescPool, nil, nil, nil)
assert.NotNil(differ)
assert.Nil(err)
srcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()
assert.True(len(srcDiffMap) == 0)
assert.True(len(tgtDiffMap) == 0)
fmt.Println("============== Test case end: TestLoadSameFileWPool =================")
}
func TestNoFilePool(t *testing.T) {
fmt.Println("============== Test case start: TestNoFilePool =================")
assert := assert.New(t)
differDriver := NewDifferDriver("", "", "", "", 2, 2, 0, nil, nil, nil)
assert.NotNil(differDriver)
assert.Nil(differDriver.fileDescPool)
fmt.Println("============== Test case end: TestNoFilePool =================")
}
| TestLoader | identifier_name |
differ_test.go | // Copyright (c) 2018 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package differ
import (
"crypto/sha512"
"fmt"
"github.com/couchbase/gomemcached"
"github.com/stretchr/testify/assert"
"io/ioutil"
"math/rand"
"os"
"sync"
"testing"
"time"
"xdcrDiffer/dcp"
fdp "xdcrDiffer/fileDescriptorPool"
)
const MaxUint64 = ^uint64(0)
const MinUint = 0
var randomOnce sync.Once
func randomString(l int) string {
bytes := make([]byte, l)
for i := 0; i < l; i++ {
bytes[i] = byte(randInt(65, 90))
}
return string(bytes)
}
func randInt(min int, max int) int {
return min + rand.Intn(max-min)
}
// serialize mutation into []byte
// format:
// keyLen - 2 bytes
// key - length specified by keyLen
// seqno - 8 bytes
// revId - 8 bytes
// cas - 8 bytes
// flags - 4 bytes
// expiry - 4 bytes
// opCode - 1 bytes
// hash - 64 bytes
func genTestData(regularMutation, colFilters bool) (key string, seqno, revId, cas uint64, flags, expiry uint32, opCode gomemcached.CommandCode, hash [64]byte, ret []byte, colId uint32, filterIds []uint8) {
randomOnce.Do(func() {
rand.Seed(time.Now().UTC().UnixNano())
})
key = randomString(randInt(12, 64))
seqno = rand.Uint64()
revId = rand.Uint64()
cas = rand.Uint64()
flags = rand.Uint32()
expiry = rand.Uint32()
if regularMutation {
opCode = gomemcached.UPR_MUTATION
} else {
opCodeArray := [3]gomemcached.CommandCode{gomemcached.UPR_MUTATION, gomemcached.UPR_DELETION, gomemcached.UPR_EXPIRATION}
opCode = opCodeArray[rand.Uint32()%3]
}
// Note we don't have the actual body hash so just randomly generate a hash using key
hash = sha512.Sum512([]byte(key))
if colFilters {
randomLen := uint8(rand.Int() % 8)
for i := uint8(0); i < randomLen; i++ {
filterIds = append(filterIds, i)
}
}
//dataSlice := createDataByteSlice(key, seqno, revId, cas, flags, expiry, opCode, hash, colId, filterIds)
mutationToSerialize := dcp.Mutation{
Vbno: 0,
Key: []byte(key),
Seqno: seqno,
RevId: revId,
Cas: cas,
Flags: flags,
Expiry: expiry,
OpCode: opCode,
Value: []byte(key),
Datatype: 0,
ColId: 0,
ColFiltersMatched: filterIds,
}
dataSlice := mutationToSerialize.Serialize()
return key, seqno, revId, cas, flags, expiry, opCode, hash, dataSlice, colId, filterIds
}
func genMultipleRecords(numOfRecords int) []byte {
var retSlice []byte
for i := 0; i < numOfRecords; i++ {
_, _, _, _, _, _, _, _, record, _, _ := genTestData(true, false)
retSlice = append(retSlice, record...)
}
return retSlice
}
func genSameFiles(numOfRecords int, fileName1, fileName2 string) error {
data := genMultipleRecords(numOfRecords)
err := ioutil.WriteFile(fileName1, data, 0644)
if err != nil {
return err
}
err = ioutil.WriteFile(fileName2, data, 0644)
if err != nil {
return err
}
return nil
}
func genMismatchedFiles(numOfRecords, mismatchCnt int, fileName1, fileName2 string) ([]string, error) |
func verifyMisMatch(mismatchKeys []string, differ *FilesDiffer) bool {
for _, key := range mismatchKeys {
found := false
for _, onePair := range differ.BothExistButMismatch {
if key == onePair[0].Key {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func TestLoader(t *testing.T) {
assert := assert.New(t)
var outputFileTemp string = "/tmp/xdcrDiffer.tmp"
defer os.Remove(outputFileTemp)
key, seqno, _, _, _, _, _, _, data, _, _ := genTestData(true, false)
err := ioutil.WriteFile(outputFileTemp, data, 0644)
assert.Nil(err)
differ := NewFilesDiffer(outputFileTemp, "", nil, nil, nil)
err = differ.file1.LoadFileIntoBuffer()
assert.Nil(err)
assert.Equal(1, len(differ.file1.entries[0]))
assert.Equal(seqno, differ.file1.entries[0][key].Seqno)
assert.Equal(1, len(differ.file1.sortedEntries[0]))
assert.Equal(seqno, differ.file1.sortedEntries[0][0].Seqno)
}
func TestLoaderWithColFilters(t *testing.T) {
assert := assert.New(t)
var outputFileTemp string = "/tmp/xdcrDiffer.tmp"
defer os.Remove(outputFileTemp)
key, _, _, _, _, _, _, _, data, _, filterIds := genTestData(true, true)
err := ioutil.WriteFile(outputFileTemp, data, 0644)
assert.Nil(err)
differ := NewFilesDiffer(outputFileTemp, "", nil, nil, nil)
err = differ.file1.LoadFileIntoBuffer()
assert.Nil(err)
assert.Equal(1, len(differ.file1.entries[0]))
assert.Equal(uint8(len(filterIds)), differ.file1.entries[0][key].ColMigrFilterLen)
for i := 0; i < len(filterIds); i++ {
assert.Equal(filterIds[i], differ.file1.entries[0][key].ColFiltersMatched[i])
}
}
func TestLoadSameFile(t *testing.T) {
fmt.Println("============== Test case start: TestLoadSameFile =================")
assert := assert.New(t)
file1 := "/tmp/test1.bin"
file2 := "/tmp/test2.bin"
defer os.Remove(file1)
defer os.Remove(file2)
entries := 10000
err := genSameFiles(entries, file1, file2)
assert.Equal(nil, err)
differ := NewFilesDiffer(file1, file2, nil, nil, nil)
assert.NotNil(differ)
srcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()
assert.True(len(srcDiffMap) == 0)
assert.True(len(tgtDiffMap) == 0)
differ.PrettyPrintResult()
fmt.Println("============== Test case end: TestLoadSameFile =================")
}
// This test used to work because it used a customized test generator
// But now that is incorrect and the test is no longer valid
func Disabled_TestLoadMismatchedFilesOnly(t *testing.T) {
fmt.Println("============== Test case start: TestLoadMismatchedFilesOnly =================")
assert := assert.New(t)
file1 := "/tmp/test1.bin"
file2 := "/tmp/test2.bin"
defer os.Remove(file1)
defer os.Remove(file2)
entries := 10000
numMismatch := 5
keys, err := genMismatchedFiles(entries, numMismatch, file1, file2)
assert.Nil(err)
differ := NewFilesDiffer(file1, file2, nil, nil, nil)
assert.NotNil(differ)
srcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()
assert.False(len(srcDiffMap) == 0)
assert.False(len(tgtDiffMap) == 0)
assert.Equal(numMismatch, len(differ.BothExistButMismatch))
assert.True(verifyMisMatch(keys, differ))
assert.Equal(0, len(differ.MissingFromFile1))
assert.Equal(0, len(differ.MissingFromFile2))
differ.PrettyPrintResult()
fmt.Println("============== Test case end: TestLoadMismatchedFilesOnly =================")
}
// This test used to work because it used a customized test generator
// But now that is incorrect and the test is no longer valid
func Disabled_TestLoadMismatchedFilesAndUneven(t *testing.T) {
fmt.Println("============== Test case start: TestLoadMismatchedFilesAndUneven =================")
assert := assert.New(t)
file1 := "/tmp/test1.bin"
file2 := "/tmp/test2.bin"
defer os.Remove(file1)
defer os.Remove(file2)
entries := 1000
numMismatch := 5
extraEntries := 2
keys, err := genMismatchedFiles(entries, numMismatch, file1, file2)
assert.Nil(err)
// Add more records to one file
extraSliceOfPizza := genMultipleRecords(extraEntries)
f, err := os.OpenFile(file1, os.O_APPEND|os.O_WRONLY, 644)
assert.Nil(err)
_, err = f.Write(extraSliceOfPizza)
assert.Nil(err)
f.Close()
differ := NewFilesDiffer(file1, file2, nil, nil, nil)
assert.NotNil(differ)
srcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()
assert.False(len(srcDiffMap) == 0)
assert.False(len(tgtDiffMap) == 0)
assert.Equal(numMismatch, len(differ.BothExistButMismatch))
assert.True(verifyMisMatch(keys, differ))
assert.Equal(0, len(differ.MissingFromFile1))
assert.Equal(extraEntries, len(differ.MissingFromFile2))
differ.PrettyPrintResult()
fmt.Println("============== Test case start: TestLoadMismatchedFilesAndUneven =================")
}
func TestLoadSameFileWPool(t *testing.T) {
fmt.Println("============== Test case start: TestLoadSameFileWPool =================")
assert := assert.New(t)
fileDescPool := fdp.NewFileDescriptorPool(50)
file1 := "/tmp/test1.bin"
file2 := "/tmp/test2.bin"
defer os.Remove(file1)
defer os.Remove(file2)
entries := 10000
err := genSameFiles(entries, file1, file2)
assert.Equal(nil, err)
differ, err := NewFilesDifferWithFDPool(file1, file2, fileDescPool, nil, nil, nil)
assert.NotNil(differ)
assert.Nil(err)
srcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()
assert.True(len(srcDiffMap) == 0)
assert.True(len(tgtDiffMap) == 0)
fmt.Println("============== Test case end: TestLoadSameFileWPool =================")
}
func TestNoFilePool(t *testing.T) {
fmt.Println("============== Test case start: TestNoFilePool =================")
assert := assert.New(t)
differDriver := NewDifferDriver("", "", "", "", 2, 2, 0, nil, nil, nil)
assert.NotNil(differDriver)
assert.Nil(differDriver.fileDescPool)
fmt.Println("============== Test case end: TestNoFilePool =================")
}
| {
var mismatchedKeyNames []string
data := genMultipleRecords(numOfRecords - mismatchCnt)
err := ioutil.WriteFile(fileName1, data, 0644)
if err != nil {
return mismatchedKeyNames, err
}
err = ioutil.WriteFile(fileName2, data, 0644)
if err != nil {
return mismatchedKeyNames, err
}
// Now create mismatched entries
f1, err := os.OpenFile(fileName1, os.O_APPEND|os.O_WRONLY, 644)
if err != nil {
return mismatchedKeyNames, err
}
defer f1.Close()
f2, err := os.OpenFile(fileName2, os.O_APPEND|os.O_WRONLY, 644)
if err != nil {
return mismatchedKeyNames, err
}
defer f2.Close()
for i := 0; i < mismatchCnt; i++ {
key, seqno, revId, cas, flags, expiry, opCode, _, oneData, colId, _ := genTestData(true, false)
mismatchedDataMut := &dcp.Mutation{
Vbno: 0,
Key: []byte(key),
Seqno: seqno,
RevId: revId,
Cas: cas,
Flags: flags,
Expiry: expiry,
OpCode: opCode,
Value: []byte(key),
Datatype: 0,
ColId: colId,
ColFiltersMatched: nil,
}
mismatchedData := mismatchedDataMut.Serialize()
_, err = f1.Write(oneData)
if err != nil {
return mismatchedKeyNames, err
}
_, err = f2.Write(mismatchedData)
if err != nil {
return mismatchedKeyNames, err
}
mismatchedKeyNames = append(mismatchedKeyNames, key)
}
return mismatchedKeyNames, nil
} | identifier_body |
ndt-server_test.go | package main
import (
"context"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"runtime"
"sync"
"testing"
"time"
"github.com/m-lab/go/osx"
"github.com/m-lab/go/prometheusx/promtest"
"github.com/m-lab/go/rtx"
pipe "gopkg.in/m-lab/pipe.v3"
)
// Get a bunch of open ports, and then close them. Hopefully the ports will
// remain open for the next few microseconds so that we can use them in unit
// tests.
func getOpenPorts(n int) []string {
ports := []string{}
for i := 0; i < n; i++ {
ts := httptest.NewServer(http.NewServeMux())
defer ts.Close()
u, err := url.Parse(ts.URL)
rtx.Must(err, "Could not parse url to local server:", ts.URL)
ports = append(ports, ":"+u.Port())
}
return ports
}
func countFiles(dir string) int {
count := 0
filepath.Walk(dir, func(_path string, info os.FileInfo, _err error) error {
if !info.IsDir() {
count++
}
return nil
})
return count
}
func setupMain() func() {
cleanups := []func(){}
// Create self-signed certs in a temp directory.
dir, err := ioutil.TempDir("", "TestNdtServerMain")
rtx.Must(err, "Could not create tempdir")
certFile := "cert.pem"
keyFile := "key.pem"
rtx.Must(
pipe.Run(
pipe.Script("Create private key and self-signed certificate",
pipe.Exec("openssl", "genrsa", "-out", keyFile),
pipe.Exec("openssl", "req", "-new", "-x509", "-key", keyFile, "-out",
certFile, "-days", "2", "-subj",
"/C=XX/ST=State/L=Locality/O=Org/OU=Unit/CN=Name/[email protected]"),
),
),
"Failed to generate server key and certs")
// Set up the command-line args via environment variables:
ports := getOpenPorts(4)
for _, ev := range []struct{ key, value string }{
{"NDT7_ADDR", ports[0]},
{"NDT5_ADDR", ports[1]},
{"NDT5_WS_ADDR", ports[2]},
{"NDT5_WSS_ADDR", ports[3]},
{"CERT", certFile},
{"KEY", keyFile},
{"DATADIR", dir},
} {
cleanups = append(cleanups, osx.MustSetenv(ev.key, ev.value))
}
return func() {
os.RemoveAll(dir)
for _, f := range cleanups |
}
}
func Test_ContextCancelsMain(t *testing.T) {
// Set up certs and the environment vars for the commandline.
cleanup := setupMain()
defer cleanup()
// Set up the global context for main()
ctx, cancel = context.WithCancel(context.Background())
before := runtime.NumGoroutine()
// Run main, but cancel it very soon after starting.
go func() {
time.Sleep(1 * time.Second)
cancel()
}()
// If this doesn't run forever, then canceling the context causes main to exit.
main()
// A sleep has been added here to allow all completed goroutines to exit.
time.Sleep(100 * time.Millisecond)
// Make sure main() doesn't leak goroutines.
after := runtime.NumGoroutine()
if before != after {
t.Errorf("After running NumGoroutines changed: %d to %d", before, after)
}
}
func TestMetrics(t *testing.T) {
promtest.LintMetrics(t)
}
func Test_MainIntegrationTest(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests take too long")
}
// Set up certs and the environment vars for the commandline.
cleanup := setupMain()
defer cleanup()
// Set up the global context for main()
ctx, cancel = context.WithCancel(context.Background())
defer cancel()
// Get the ports but remove the leading ":"
ndt5Addr := os.Getenv("NDT5_ADDR")[1:]
wsAddr := os.Getenv("NDT5_WS_ADDR")[1:]
wssAddr := os.Getenv("NDT5_WSS_ADDR")[1:]
ndt7Addr := os.Getenv("NDT7_ADDR")[1:]
// Get the datadir
dataDir := os.Getenv("DATADIR")
type testcase struct {
name string
cmd string
// ignoreData's default value (false) will NOT ignore whether data is
// produced. This is good, because it forces tests which ignore their output
// data to explicitly specify this fact.
ignoreData bool
}
tests := []testcase{
// NDT5 TLV-only clients.
{
// NOTE: we must disable the middle-box test in the ndt5 TLV client because it unconditionally expects
// that test to run irrespective of what the server supports.
name: "web100clt (ndt5 TLV)",
cmd: "timeout 45s /bin/web100clt-without-json-support --name localhost --port " + ndt5Addr + " --disablemid",
},
{
name: "libndt-client - ndt5 NDT with JSON, download test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt5Addr + " --download",
},
{
name: "libndt-client - ndt5 NDT with JSON, upload test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt5Addr + " --upload",
},
// Verify that ndt5 clients don't crash when we agree to only run a subset of the requested tests.
{
name: "Request all tests with web100clt (with JSON)",
cmd: "timeout 45s /bin/web100clt-with-json-support --name localhost --port " + ndt5Addr,
},
// The ndt5 client without JSON support looks like it DOES crash, although
// the exact cause has not been investigated.
// TODO(https://github.com/m-lab/ndt-server/issues/66) - make the following test case pass:
// {
// name: "Request all tests with web100clt (ndt5 TLV)",
// cmd: "timeout 45s /bin/web100clt-without-json-support --name localhost --port " + ndt5Addr,
// },
// Test libndt JSON clients
{
name: "libndt-client - ndt5 NDT with JSON, download test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt5Addr + " --json --download",
},
{
name: "libndt-client - ndt5 NDT with JSON, upload test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt5Addr + " --json --upload",
},
{
name: "libndt-client - ndt7, download test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt7Addr + " --ndt7 --download",
// Ignore data because Travis does not support BBR. Once Travis does support BBR, delete this.
ignoreData: true,
},
// Test ndt5 raw JSON clients
{
name: "web100clt (with JSON), no MID or SFW",
cmd: "timeout 45s /bin/web100clt-with-json-support --name localhost --port " + ndt5Addr,
},
// Test ndt5 WS clients connected to the HTTP port
{
name: "Upload & Download ndt5 WS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wsAddr + " --protocol=ws --tests=22",
},
{
name: "Upload ndt5 WS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wsAddr + " --protocol=ws --tests=18",
},
{
name: "Download ndt5 WS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wsAddr + " --protocol=ws --tests=20",
},
// Test ndt5 WS clients connecting to the raw port
{
name: "Connect ndt5 WS (upload and download) to RAW port",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + ndt5Addr + " --protocol=ws --tests=22",
},
{
// Start both tests, but kill the client during the upload test.
// This causes the server to wait for a test that never comes. After the
// timeout, the server should have cleaned up all outstanding goroutines.
name: "Upload & Download ndt5 WS with S2C Timeout",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wsAddr +
" --protocol=ws --abort-c2s-early --tests=22 & " +
"sleep 25",
},
// Test WSS clients with the ndt5 protocol.
{
name: "Upload ndt5 WSS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wssAddr + " --protocol=wss --acceptinvalidcerts --tests=18",
},
{
name: "Download ndt5 WSS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wssAddr + " --protocol=wss --acceptinvalidcerts --tests=20",
},
{
name: "Upload & Download ndt5 WSS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wssAddr + " --protocol=wss --acceptinvalidcerts --tests=22",
},
{
// Start both tests, but kill the client during the upload test.
// This causes the server to wait for a test that never comes. After the
// timeout, the server should have cleaned up all outstanding goroutines.
name: "Upload & Download ndt5 WSS with S2C Timeout",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wssAddr +
" --protocol=wss --acceptinvalidcerts --abort-c2s-early --tests=22 & " +
"sleep 25",
},
// Test NDT7 clients
{
name: "Test the ndt7 protocol",
cmd: "timeout 45s ndt7-client -no-verify -hostname localhost:" + ndt7Addr,
// Ignore data because Travis does not support BBR. Once Travis does support BBR, delete this.
ignoreData: true,
},
// Measurement Kit client
{
name: "measurement_kit testing ndt5 protocol",
cmd: "timeout 45s measurement_kit --no-bouncer --no-collector --no-json --no-geoip ndt -p " + ndt5Addr + " localhost",
},
}
go main()
time.Sleep(1 * time.Second) // Give main a little time to grab all the ports and start listening.
log.Printf(
"ndt5 plain port: %s\nndt5 ws port: %s\nndt5 wss port: %s\nndt7 port: %s\n",
ndt5Addr, wsAddr, wssAddr, ndt7Addr)
wg := sync.WaitGroup{}
// Run every test in parallel (the server must handle parallel tests just fine)
for _, c := range tests {
wg.Add(1)
func(tc testcase) {
go t.Run(tc.name, func(t *testing.T) {
defer wg.Done()
preFileCount := countFiles(dataDir)
stdout, stderr, err := pipe.DividedOutput(pipe.Script(tc.name, pipe.System(tc.cmd)))
if err != nil {
t.Errorf("ERROR %s gave error %q (Command: %s)\nStdout: %s\nStderr: %s\n",
tc.name, err, tc.cmd, string(stdout), string(stderr))
}
postFileCount := countFiles(dataDir)
if !tc.ignoreData {
// Verify that at least one data file was produced while the test ran.
if postFileCount <= preFileCount {
t.Error("No files produced. Before test:", preFileCount, "files. After test:", postFileCount, "files.")
}
}
t.Logf("%s (command=%q) has completed successfully", tc.name, tc.cmd)
})
}(c)
}
wg.Wait()
}
| {
f()
} | conditional_block |
ndt-server_test.go | package main
import (
"context"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"runtime"
"sync"
"testing"
"time"
"github.com/m-lab/go/osx"
"github.com/m-lab/go/prometheusx/promtest"
"github.com/m-lab/go/rtx"
pipe "gopkg.in/m-lab/pipe.v3"
)
// Get a bunch of open ports, and then close them. Hopefully the ports will
// remain open for the next few microseconds so that we can use them in unit
// tests.
func getOpenPorts(n int) []string {
ports := []string{}
for i := 0; i < n; i++ {
ts := httptest.NewServer(http.NewServeMux())
defer ts.Close()
u, err := url.Parse(ts.URL)
rtx.Must(err, "Could not parse url to local server:", ts.URL)
ports = append(ports, ":"+u.Port())
}
return ports
}
func countFiles(dir string) int {
count := 0
filepath.Walk(dir, func(_path string, info os.FileInfo, _err error) error {
if !info.IsDir() {
count++
}
return nil
})
return count
}
func setupMain() func() {
cleanups := []func(){}
// Create self-signed certs in a temp directory.
dir, err := ioutil.TempDir("", "TestNdtServerMain")
rtx.Must(err, "Could not create tempdir")
certFile := "cert.pem"
keyFile := "key.pem"
rtx.Must(
pipe.Run(
pipe.Script("Create private key and self-signed certificate",
pipe.Exec("openssl", "genrsa", "-out", keyFile),
pipe.Exec("openssl", "req", "-new", "-x509", "-key", keyFile, "-out",
certFile, "-days", "2", "-subj",
"/C=XX/ST=State/L=Locality/O=Org/OU=Unit/CN=Name/[email protected]"),
),
),
"Failed to generate server key and certs")
// Set up the command-line args via environment variables:
ports := getOpenPorts(4)
for _, ev := range []struct{ key, value string }{
{"NDT7_ADDR", ports[0]},
{"NDT5_ADDR", ports[1]},
{"NDT5_WS_ADDR", ports[2]},
{"NDT5_WSS_ADDR", ports[3]},
{"CERT", certFile},
{"KEY", keyFile},
{"DATADIR", dir},
} {
cleanups = append(cleanups, osx.MustSetenv(ev.key, ev.value))
}
return func() {
os.RemoveAll(dir)
for _, f := range cleanups {
f()
}
}
}
func Test_ContextCancelsMain(t *testing.T) {
// Set up certs and the environment vars for the commandline.
cleanup := setupMain()
defer cleanup()
// Set up the global context for main()
ctx, cancel = context.WithCancel(context.Background())
before := runtime.NumGoroutine()
// Run main, but cancel it very soon after starting.
go func() {
time.Sleep(1 * time.Second)
cancel()
}()
// If this doesn't run forever, then canceling the context causes main to exit.
main()
// A sleep has been added here to allow all completed goroutines to exit.
time.Sleep(100 * time.Millisecond)
// Make sure main() doesn't leak goroutines.
after := runtime.NumGoroutine()
if before != after {
t.Errorf("After running NumGoroutines changed: %d to %d", before, after)
}
}
func TestMetrics(t *testing.T) {
promtest.LintMetrics(t)
}
func Test_MainIntegrationTest(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests take too long")
}
// Set up certs and the environment vars for the commandline.
cleanup := setupMain()
defer cleanup()
// Set up the global context for main()
ctx, cancel = context.WithCancel(context.Background())
defer cancel()
// Get the ports but remove the leading ":"
ndt5Addr := os.Getenv("NDT5_ADDR")[1:]
wsAddr := os.Getenv("NDT5_WS_ADDR")[1:]
wssAddr := os.Getenv("NDT5_WSS_ADDR")[1:]
ndt7Addr := os.Getenv("NDT7_ADDR")[1:]
// Get the datadir
dataDir := os.Getenv("DATADIR")
type testcase struct {
name string
cmd string
// ignoreData's default value (false) will NOT ignore whether data is
// produced. This is good, because it forces tests which ignore their output
// data to explicitly specify this fact.
ignoreData bool
}
tests := []testcase{
// NDT5 TLV-only clients.
{
// NOTE: we must disable the middle-box test in the ndt5 TLV client because it unconditionally expects
// that test to run irrespective of what the server supports.
name: "web100clt (ndt5 TLV)",
cmd: "timeout 45s /bin/web100clt-without-json-support --name localhost --port " + ndt5Addr + " --disablemid",
},
{
name: "libndt-client - ndt5 NDT with JSON, download test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt5Addr + " --download",
},
{
name: "libndt-client - ndt5 NDT with JSON, upload test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt5Addr + " --upload",
},
// Verify that ndt5 clients don't crash when we agree to only run a subset of the requested tests.
{
name: "Request all tests with web100clt (with JSON)",
cmd: "timeout 45s /bin/web100clt-with-json-support --name localhost --port " + ndt5Addr,
},
// The ndt5 client without JSON support looks like it DOES crash, although
// the exact cause has not been investigated.
// TODO(https://github.com/m-lab/ndt-server/issues/66) - make the following test case pass:
// {
// name: "Request all tests with web100clt (ndt5 TLV)",
// cmd: "timeout 45s /bin/web100clt-without-json-support --name localhost --port " + ndt5Addr,
// },
// Test libndt JSON clients
{
name: "libndt-client - ndt5 NDT with JSON, download test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt5Addr + " --json --download",
},
{
name: "libndt-client - ndt5 NDT with JSON, upload test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt5Addr + " --json --upload",
},
{
name: "libndt-client - ndt7, download test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt7Addr + " --ndt7 --download",
// Ignore data because Travis does not support BBR. Once Travis does support BBR, delete this.
ignoreData: true,
},
// Test ndt5 raw JSON clients
{
name: "web100clt (with JSON), no MID or SFW",
cmd: "timeout 45s /bin/web100clt-with-json-support --name localhost --port " + ndt5Addr,
},
// Test ndt5 WS clients connected to the HTTP port
{
name: "Upload & Download ndt5 WS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wsAddr + " --protocol=ws --tests=22",
},
{
name: "Upload ndt5 WS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wsAddr + " --protocol=ws --tests=18",
},
{
name: "Download ndt5 WS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wsAddr + " --protocol=ws --tests=20",
},
// Test ndt5 WS clients connecting to the raw port
{
name: "Connect ndt5 WS (upload and download) to RAW port",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + ndt5Addr + " --protocol=ws --tests=22",
},
{
// Start both tests, but kill the client during the upload test.
// This causes the server to wait for a test that never comes. After the
// timeout, the server should have cleaned up all outstanding goroutines.
name: "Upload & Download ndt5 WS with S2C Timeout",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wsAddr +
" --protocol=ws --abort-c2s-early --tests=22 & " +
"sleep 25",
},
// Test WSS clients with the ndt5 protocol.
{
name: "Upload ndt5 WSS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wssAddr + " --protocol=wss --acceptinvalidcerts --tests=18",
},
{
name: "Download ndt5 WSS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wssAddr + " --protocol=wss --acceptinvalidcerts --tests=20",
},
{
name: "Upload & Download ndt5 WSS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wssAddr + " --protocol=wss --acceptinvalidcerts --tests=22",
},
{
// Start both tests, but kill the client during the upload test.
// This causes the server to wait for a test that never comes. After the
// timeout, the server should have cleaned up all outstanding goroutines. | cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wssAddr +
" --protocol=wss --acceptinvalidcerts --abort-c2s-early --tests=22 & " +
"sleep 25",
},
// Test NDT7 clients
{
name: "Test the ndt7 protocol",
cmd: "timeout 45s ndt7-client -no-verify -hostname localhost:" + ndt7Addr,
// Ignore data because Travis does not support BBR. Once Travis does support BBR, delete this.
ignoreData: true,
},
// Measurement Kit client
{
name: "measurement_kit testing ndt5 protocol",
cmd: "timeout 45s measurement_kit --no-bouncer --no-collector --no-json --no-geoip ndt -p " + ndt5Addr + " localhost",
},
}
go main()
time.Sleep(1 * time.Second) // Give main a little time to grab all the ports and start listening.
log.Printf(
"ndt5 plain port: %s\nndt5 ws port: %s\nndt5 wss port: %s\nndt7 port: %s\n",
ndt5Addr, wsAddr, wssAddr, ndt7Addr)
wg := sync.WaitGroup{}
// Run every test in parallel (the server must handle parallel tests just fine)
for _, c := range tests {
wg.Add(1)
func(tc testcase) {
go t.Run(tc.name, func(t *testing.T) {
defer wg.Done()
preFileCount := countFiles(dataDir)
stdout, stderr, err := pipe.DividedOutput(pipe.Script(tc.name, pipe.System(tc.cmd)))
if err != nil {
t.Errorf("ERROR %s gave error %q (Command: %s)\nStdout: %s\nStderr: %s\n",
tc.name, err, tc.cmd, string(stdout), string(stderr))
}
postFileCount := countFiles(dataDir)
if !tc.ignoreData {
// Verify that at least one data file was produced while the test ran.
if postFileCount <= preFileCount {
t.Error("No files produced. Before test:", preFileCount, "files. After test:", postFileCount, "files.")
}
}
t.Logf("%s (command=%q) has completed successfully", tc.name, tc.cmd)
})
}(c)
}
wg.Wait()
} | name: "Upload & Download ndt5 WSS with S2C Timeout", | random_line_split |
ndt-server_test.go | package main
import (
"context"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"runtime"
"sync"
"testing"
"time"
"github.com/m-lab/go/osx"
"github.com/m-lab/go/prometheusx/promtest"
"github.com/m-lab/go/rtx"
pipe "gopkg.in/m-lab/pipe.v3"
)
// Get a bunch of open ports, and then close them. Hopefully the ports will
// remain open for the next few microseconds so that we can use them in unit
// tests.
func getOpenPorts(n int) []string {
ports := []string{}
for i := 0; i < n; i++ {
ts := httptest.NewServer(http.NewServeMux())
defer ts.Close()
u, err := url.Parse(ts.URL)
rtx.Must(err, "Could not parse url to local server:", ts.URL)
ports = append(ports, ":"+u.Port())
}
return ports
}
func countFiles(dir string) int {
count := 0
filepath.Walk(dir, func(_path string, info os.FileInfo, _err error) error {
if !info.IsDir() {
count++
}
return nil
})
return count
}
func setupMain() func() {
cleanups := []func(){}
// Create self-signed certs in a temp directory.
dir, err := ioutil.TempDir("", "TestNdtServerMain")
rtx.Must(err, "Could not create tempdir")
certFile := "cert.pem"
keyFile := "key.pem"
rtx.Must(
pipe.Run(
pipe.Script("Create private key and self-signed certificate",
pipe.Exec("openssl", "genrsa", "-out", keyFile),
pipe.Exec("openssl", "req", "-new", "-x509", "-key", keyFile, "-out",
certFile, "-days", "2", "-subj",
"/C=XX/ST=State/L=Locality/O=Org/OU=Unit/CN=Name/[email protected]"),
),
),
"Failed to generate server key and certs")
// Set up the command-line args via environment variables:
ports := getOpenPorts(4)
for _, ev := range []struct{ key, value string }{
{"NDT7_ADDR", ports[0]},
{"NDT5_ADDR", ports[1]},
{"NDT5_WS_ADDR", ports[2]},
{"NDT5_WSS_ADDR", ports[3]},
{"CERT", certFile},
{"KEY", keyFile},
{"DATADIR", dir},
} {
cleanups = append(cleanups, osx.MustSetenv(ev.key, ev.value))
}
return func() {
os.RemoveAll(dir)
for _, f := range cleanups {
f()
}
}
}
func Test_ContextCancelsMain(t *testing.T) {
// Set up certs and the environment vars for the commandline.
cleanup := setupMain()
defer cleanup()
// Set up the global context for main()
ctx, cancel = context.WithCancel(context.Background())
before := runtime.NumGoroutine()
// Run main, but cancel it very soon after starting.
go func() {
time.Sleep(1 * time.Second)
cancel()
}()
// If this doesn't run forever, then canceling the context causes main to exit.
main()
// A sleep has been added here to allow all completed goroutines to exit.
time.Sleep(100 * time.Millisecond)
// Make sure main() doesn't leak goroutines.
after := runtime.NumGoroutine()
if before != after {
t.Errorf("After running NumGoroutines changed: %d to %d", before, after)
}
}
func TestMetrics(t *testing.T) |
func Test_MainIntegrationTest(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests take too long")
}
// Set up certs and the environment vars for the commandline.
cleanup := setupMain()
defer cleanup()
// Set up the global context for main()
ctx, cancel = context.WithCancel(context.Background())
defer cancel()
// Get the ports but remove the leading ":"
ndt5Addr := os.Getenv("NDT5_ADDR")[1:]
wsAddr := os.Getenv("NDT5_WS_ADDR")[1:]
wssAddr := os.Getenv("NDT5_WSS_ADDR")[1:]
ndt7Addr := os.Getenv("NDT7_ADDR")[1:]
// Get the datadir
dataDir := os.Getenv("DATADIR")
type testcase struct {
name string
cmd string
// ignoreData's default value (false) will NOT ignore whether data is
// produced. This is good, because it forces tests which ignore their output
// data to explicitly specify this fact.
ignoreData bool
}
tests := []testcase{
// NDT5 TLV-only clients.
{
// NOTE: we must disable the middle-box test in the ndt5 TLV client because it unconditionally expects
// that test to run irrespective of what the server supports.
name: "web100clt (ndt5 TLV)",
cmd: "timeout 45s /bin/web100clt-without-json-support --name localhost --port " + ndt5Addr + " --disablemid",
},
{
name: "libndt-client - ndt5 NDT with JSON, download test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt5Addr + " --download",
},
{
name: "libndt-client - ndt5 NDT with JSON, upload test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt5Addr + " --upload",
},
// Verify that ndt5 clients don't crash when we agree to only run a subset of the requested tests.
{
name: "Request all tests with web100clt (with JSON)",
cmd: "timeout 45s /bin/web100clt-with-json-support --name localhost --port " + ndt5Addr,
},
// The ndt5 client without JSON support looks like it DOES crash, although
// the exact cause has not been investigated.
// TODO(https://github.com/m-lab/ndt-server/issues/66) - make the following test case pass:
// {
// name: "Request all tests with web100clt (ndt5 TLV)",
// cmd: "timeout 45s /bin/web100clt-without-json-support --name localhost --port " + ndt5Addr,
// },
// Test libndt JSON clients
{
name: "libndt-client - ndt5 NDT with JSON, download test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt5Addr + " --json --download",
},
{
name: "libndt-client - ndt5 NDT with JSON, upload test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt5Addr + " --json --upload",
},
{
name: "libndt-client - ndt7, download test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt7Addr + " --ndt7 --download",
// Ignore data because Travis does not support BBR. Once Travis does support BBR, delete this.
ignoreData: true,
},
// Test ndt5 raw JSON clients
{
name: "web100clt (with JSON), no MID or SFW",
cmd: "timeout 45s /bin/web100clt-with-json-support --name localhost --port " + ndt5Addr,
},
// Test ndt5 WS clients connected to the HTTP port
{
name: "Upload & Download ndt5 WS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wsAddr + " --protocol=ws --tests=22",
},
{
name: "Upload ndt5 WS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wsAddr + " --protocol=ws --tests=18",
},
{
name: "Download ndt5 WS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wsAddr + " --protocol=ws --tests=20",
},
// Test ndt5 WS clients connecting to the raw port
{
name: "Connect ndt5 WS (upload and download) to RAW port",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + ndt5Addr + " --protocol=ws --tests=22",
},
{
// Start both tests, but kill the client during the upload test.
// This causes the server to wait for a test that never comes. After the
// timeout, the server should have cleaned up all outstanding goroutines.
name: "Upload & Download ndt5 WS with S2C Timeout",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wsAddr +
" --protocol=ws --abort-c2s-early --tests=22 & " +
"sleep 25",
},
// Test WSS clients with the ndt5 protocol.
{
name: "Upload ndt5 WSS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wssAddr + " --protocol=wss --acceptinvalidcerts --tests=18",
},
{
name: "Download ndt5 WSS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wssAddr + " --protocol=wss --acceptinvalidcerts --tests=20",
},
{
name: "Upload & Download ndt5 WSS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wssAddr + " --protocol=wss --acceptinvalidcerts --tests=22",
},
{
// Start both tests, but kill the client during the upload test.
// This causes the server to wait for a test that never comes. After the
// timeout, the server should have cleaned up all outstanding goroutines.
name: "Upload & Download ndt5 WSS with S2C Timeout",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wssAddr +
" --protocol=wss --acceptinvalidcerts --abort-c2s-early --tests=22 & " +
"sleep 25",
},
// Test NDT7 clients
{
name: "Test the ndt7 protocol",
cmd: "timeout 45s ndt7-client -no-verify -hostname localhost:" + ndt7Addr,
// Ignore data because Travis does not support BBR. Once Travis does support BBR, delete this.
ignoreData: true,
},
// Measurement Kit client
{
name: "measurement_kit testing ndt5 protocol",
cmd: "timeout 45s measurement_kit --no-bouncer --no-collector --no-json --no-geoip ndt -p " + ndt5Addr + " localhost",
},
}
go main()
time.Sleep(1 * time.Second) // Give main a little time to grab all the ports and start listening.
log.Printf(
"ndt5 plain port: %s\nndt5 ws port: %s\nndt5 wss port: %s\nndt7 port: %s\n",
ndt5Addr, wsAddr, wssAddr, ndt7Addr)
wg := sync.WaitGroup{}
// Run every test in parallel (the server must handle parallel tests just fine)
for _, c := range tests {
wg.Add(1)
func(tc testcase) {
go t.Run(tc.name, func(t *testing.T) {
defer wg.Done()
preFileCount := countFiles(dataDir)
stdout, stderr, err := pipe.DividedOutput(pipe.Script(tc.name, pipe.System(tc.cmd)))
if err != nil {
t.Errorf("ERROR %s gave error %q (Command: %s)\nStdout: %s\nStderr: %s\n",
tc.name, err, tc.cmd, string(stdout), string(stderr))
}
postFileCount := countFiles(dataDir)
if !tc.ignoreData {
// Verify that at least one data file was produced while the test ran.
if postFileCount <= preFileCount {
t.Error("No files produced. Before test:", preFileCount, "files. After test:", postFileCount, "files.")
}
}
t.Logf("%s (command=%q) has completed successfully", tc.name, tc.cmd)
})
}(c)
}
wg.Wait()
}
| {
promtest.LintMetrics(t)
} | identifier_body |
ndt-server_test.go | package main
import (
"context"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"runtime"
"sync"
"testing"
"time"
"github.com/m-lab/go/osx"
"github.com/m-lab/go/prometheusx/promtest"
"github.com/m-lab/go/rtx"
pipe "gopkg.in/m-lab/pipe.v3"
)
// Get a bunch of open ports, and then close them. Hopefully the ports will
// remain open for the next few microseconds so that we can use them in unit
// tests.
func getOpenPorts(n int) []string {
ports := []string{}
for i := 0; i < n; i++ {
ts := httptest.NewServer(http.NewServeMux())
defer ts.Close()
u, err := url.Parse(ts.URL)
rtx.Must(err, "Could not parse url to local server:", ts.URL)
ports = append(ports, ":"+u.Port())
}
return ports
}
func countFiles(dir string) int {
count := 0
filepath.Walk(dir, func(_path string, info os.FileInfo, _err error) error {
if !info.IsDir() {
count++
}
return nil
})
return count
}
func setupMain() func() {
cleanups := []func(){}
// Create self-signed certs in a temp directory.
dir, err := ioutil.TempDir("", "TestNdtServerMain")
rtx.Must(err, "Could not create tempdir")
certFile := "cert.pem"
keyFile := "key.pem"
rtx.Must(
pipe.Run(
pipe.Script("Create private key and self-signed certificate",
pipe.Exec("openssl", "genrsa", "-out", keyFile),
pipe.Exec("openssl", "req", "-new", "-x509", "-key", keyFile, "-out",
certFile, "-days", "2", "-subj",
"/C=XX/ST=State/L=Locality/O=Org/OU=Unit/CN=Name/[email protected]"),
),
),
"Failed to generate server key and certs")
// Set up the command-line args via environment variables:
ports := getOpenPorts(4)
for _, ev := range []struct{ key, value string }{
{"NDT7_ADDR", ports[0]},
{"NDT5_ADDR", ports[1]},
{"NDT5_WS_ADDR", ports[2]},
{"NDT5_WSS_ADDR", ports[3]},
{"CERT", certFile},
{"KEY", keyFile},
{"DATADIR", dir},
} {
cleanups = append(cleanups, osx.MustSetenv(ev.key, ev.value))
}
return func() {
os.RemoveAll(dir)
for _, f := range cleanups {
f()
}
}
}
func Test_ContextCancelsMain(t *testing.T) {
// Set up certs and the environment vars for the commandline.
cleanup := setupMain()
defer cleanup()
// Set up the global context for main()
ctx, cancel = context.WithCancel(context.Background())
before := runtime.NumGoroutine()
// Run main, but cancel it very soon after starting.
go func() {
time.Sleep(1 * time.Second)
cancel()
}()
// If this doesn't run forever, then canceling the context causes main to exit.
main()
// A sleep has been added here to allow all completed goroutines to exit.
time.Sleep(100 * time.Millisecond)
// Make sure main() doesn't leak goroutines.
after := runtime.NumGoroutine()
if before != after {
t.Errorf("After running NumGoroutines changed: %d to %d", before, after)
}
}
func | (t *testing.T) {
promtest.LintMetrics(t)
}
func Test_MainIntegrationTest(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests take too long")
}
// Set up certs and the environment vars for the commandline.
cleanup := setupMain()
defer cleanup()
// Set up the global context for main()
ctx, cancel = context.WithCancel(context.Background())
defer cancel()
// Get the ports but remove the leading ":"
ndt5Addr := os.Getenv("NDT5_ADDR")[1:]
wsAddr := os.Getenv("NDT5_WS_ADDR")[1:]
wssAddr := os.Getenv("NDT5_WSS_ADDR")[1:]
ndt7Addr := os.Getenv("NDT7_ADDR")[1:]
// Get the datadir
dataDir := os.Getenv("DATADIR")
type testcase struct {
name string
cmd string
// ignoreData's default value (false) will NOT ignore whether data is
// produced. This is good, because it forces tests which ignore their output
// data to explicitly specify this fact.
ignoreData bool
}
tests := []testcase{
// NDT5 TLV-only clients.
{
// NOTE: we must disable the middle-box test in the ndt5 TLV client because it unconditionally expects
// that test to run irrespective of what the server supports.
name: "web100clt (ndt5 TLV)",
cmd: "timeout 45s /bin/web100clt-without-json-support --name localhost --port " + ndt5Addr + " --disablemid",
},
{
name: "libndt-client - ndt5 NDT with JSON, download test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt5Addr + " --download",
},
{
name: "libndt-client - ndt5 NDT with JSON, upload test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt5Addr + " --upload",
},
// Verify that ndt5 clients don't crash when we agree to only run a subset of the requested tests.
{
name: "Request all tests with web100clt (with JSON)",
cmd: "timeout 45s /bin/web100clt-with-json-support --name localhost --port " + ndt5Addr,
},
// The ndt5 client without JSON support looks like it DOES crash, although
// the exact cause has not been investigated.
// TODO(https://github.com/m-lab/ndt-server/issues/66) - make the following test case pass:
// {
// name: "Request all tests with web100clt (ndt5 TLV)",
// cmd: "timeout 45s /bin/web100clt-without-json-support --name localhost --port " + ndt5Addr,
// },
// Test libndt JSON clients
{
name: "libndt-client - ndt5 NDT with JSON, download test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt5Addr + " --json --download",
},
{
name: "libndt-client - ndt5 NDT with JSON, upload test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt5Addr + " --json --upload",
},
{
name: "libndt-client - ndt7, download test",
cmd: "timeout 45s /bin/libndt-client localhost --port " + ndt7Addr + " --ndt7 --download",
// Ignore data because Travis does not support BBR. Once Travis does support BBR, delete this.
ignoreData: true,
},
// Test ndt5 raw JSON clients
{
name: "web100clt (with JSON), no MID or SFW",
cmd: "timeout 45s /bin/web100clt-with-json-support --name localhost --port " + ndt5Addr,
},
// Test ndt5 WS clients connected to the HTTP port
{
name: "Upload & Download ndt5 WS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wsAddr + " --protocol=ws --tests=22",
},
{
name: "Upload ndt5 WS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wsAddr + " --protocol=ws --tests=18",
},
{
name: "Download ndt5 WS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wsAddr + " --protocol=ws --tests=20",
},
// Test ndt5 WS clients connecting to the raw port
{
name: "Connect ndt5 WS (upload and download) to RAW port",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + ndt5Addr + " --protocol=ws --tests=22",
},
{
// Start both tests, but kill the client during the upload test.
// This causes the server to wait for a test that never comes. After the
// timeout, the server should have cleaned up all outstanding goroutines.
name: "Upload & Download ndt5 WS with S2C Timeout",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wsAddr +
" --protocol=ws --abort-c2s-early --tests=22 & " +
"sleep 25",
},
// Test WSS clients with the ndt5 protocol.
{
name: "Upload ndt5 WSS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wssAddr + " --protocol=wss --acceptinvalidcerts --tests=18",
},
{
name: "Download ndt5 WSS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wssAddr + " --protocol=wss --acceptinvalidcerts --tests=20",
},
{
name: "Upload & Download ndt5 WSS",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wssAddr + " --protocol=wss --acceptinvalidcerts --tests=22",
},
{
// Start both tests, but kill the client during the upload test.
// This causes the server to wait for a test that never comes. After the
// timeout, the server should have cleaned up all outstanding goroutines.
name: "Upload & Download ndt5 WSS with S2C Timeout",
cmd: "timeout 45s node ./testdata/unittest_client.js --server=localhost " +
" --port=" + wssAddr +
" --protocol=wss --acceptinvalidcerts --abort-c2s-early --tests=22 & " +
"sleep 25",
},
// Test NDT7 clients
{
name: "Test the ndt7 protocol",
cmd: "timeout 45s ndt7-client -no-verify -hostname localhost:" + ndt7Addr,
// Ignore data because Travis does not support BBR. Once Travis does support BBR, delete this.
ignoreData: true,
},
// Measurement Kit client
{
name: "measurement_kit testing ndt5 protocol",
cmd: "timeout 45s measurement_kit --no-bouncer --no-collector --no-json --no-geoip ndt -p " + ndt5Addr + " localhost",
},
}
go main()
time.Sleep(1 * time.Second) // Give main a little time to grab all the ports and start listening.
log.Printf(
"ndt5 plain port: %s\nndt5 ws port: %s\nndt5 wss port: %s\nndt7 port: %s\n",
ndt5Addr, wsAddr, wssAddr, ndt7Addr)
wg := sync.WaitGroup{}
// Run every test in parallel (the server must handle parallel tests just fine)
for _, c := range tests {
wg.Add(1)
func(tc testcase) {
go t.Run(tc.name, func(t *testing.T) {
defer wg.Done()
preFileCount := countFiles(dataDir)
stdout, stderr, err := pipe.DividedOutput(pipe.Script(tc.name, pipe.System(tc.cmd)))
if err != nil {
t.Errorf("ERROR %s gave error %q (Command: %s)\nStdout: %s\nStderr: %s\n",
tc.name, err, tc.cmd, string(stdout), string(stderr))
}
postFileCount := countFiles(dataDir)
if !tc.ignoreData {
// Verify that at least one data file was produced while the test ran.
if postFileCount <= preFileCount {
t.Error("No files produced. Before test:", preFileCount, "files. After test:", postFileCount, "files.")
}
}
t.Logf("%s (command=%q) has completed successfully", tc.name, tc.cmd)
})
}(c)
}
wg.Wait()
}
| TestMetrics | identifier_name |
basic_model.py | import time
import sys
sys.path.insert(0,"/content/FakeNewsPropagation")
import matplotlib
import numpy as np
from sklearn import preprocessing, svm
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import AdaBoostClassifier
#from load_dataset import load_from_nx_graphs
#from construct_sample_features import get_nx_propagation_graphs
#from analysis_util import equal_samples
from centrality import get_degree_centrality,get_closness_centrality,get_betweenness_centrality,get_pagerank
from construct_sample_features import get_TPNF_dataset, get_train_test_split, get_dataset_feature_names
matplotlib.use('agg')
import matplotlib.pyplot as plt
def get_classifier_by_name(classifier_name):
if classifier_name == "GaussianNB":
return GaussianNB()
elif classifier_name == "LogisticRegression":
return LogisticRegression(solver='lbfgs')
elif classifier_name == "DecisionTreeClassifier":
return DecisionTreeClassifier()
elif classifier_name == "RandomForestClassifier":
return RandomForestClassifier(n_estimators=50)
elif classifier_name == "SVM -linear kernel":
return svm.SVC(kernel='linear')
elif classifier_name == "ExtraTreesClassifier":
return ExtraTreesClassifier(n_estimators=100)
elif classifier_name == "AdaBoostClassifier":
return AdaBoostClassifier(n_estimators=100, random_state=0)
else:
return KNeighborsClassifier(n_neighbors=3)
def train_model(classifier_name, X_train, X_test, y_train, y_test):
accuracy_values = []
precision_values = []
recall_values = []
f1_score_values = []
for i in range(6):
classifier_clone = get_classifier_by_name(classifier_name)
classifier_clone.fit(X_train, y_train)
predicted_output = classifier_clone.predict(X_test)
accuracy, precision, recall, f1_score_val = get_metrics(y_test, predicted_output, one_hot_rep=False)
accuracy_values.append(accuracy)
precision_values.append(precision)
recall_values.append(recall)
f1_score_values.append(f1_score_val)
print_metrics(np.mean(accuracy_values), np.mean(precision_values), np.mean(recall_values), np.mean(f1_score_values))
def print_metrics(accuracy, precision, recall, f1_score_val):
print("Accuracy : {}".format(accuracy))
print("Precision : {}".format(precision))
print("Recall : {}".format(recall))
print("F1 : {}".format(f1_score_val))
def get_metrics(target, logits, one_hot_rep=True):
"""
Two numpy one hot arrays
:param target:
:param logits:
:return:
"""
if one_hot_rep:
label = np.argmax(target, axis=1)
predict = np.argmax(logits, axis=1)
else:
label = target
predict = logits
accuracy = accuracy_score(label, predict)
precision = precision_score(label, predict)
recall = recall_score(label, predict)
f1_score_val = f1_score(label, predict)
return accuracy, precision, recall, f1_score_val
def apply_lstm_model(X_train, X_test, y_train, y_test):
print("\n TRAIN AND TEST SHAPES FOR LSTM MODEL IS AS FOLLOWS : ")
X_train = X_train[:,None,:]
X_test = X_test[:,None,:]
print("\n X TRAIN : ",X_train.shape)
print("\n Y TRAIN ",y_train.shape)
print("\n X TEST ",X_test.shape)
print("\n Y TEST ",y_test.shape,"\n")
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
from keras.optimizers import Adam
#Initializing the classifier Network
classifier = Sequential()
#Adding the input LSTM network layer
classifier.add(LSTM(128, input_shape=(X_train.shape[1:]), return_sequences=True))
classifier.add(Dropout(0.2))
#Adding a second LSTM network layer
#classifier.add(LSTM(128, input_shape=(X_train.shape[1:]), return_sequences=True))
classifier.add(LSTM(128))
#Adding a dense hidden layer
classifier.add(Dense(64, activation='relu'))
classifier.add(Dropout(0.2))
#Adding the output layer
classifier.add(Dense(10, activation='softmax'))
#Compiling the network
classifier.compile( loss='sparse_categorical_crossentropy',
optimizer=Adam(lr=0.001, decay=1e-6),
metrics=['accuracy'] )
#Fitting the data to the model
classifier.fit(X_train,
y_train,
epochs=300,
validation_data=(X_test, y_test))
test_loss, test_acc = classifier.evaluate(X_test, y_test)
print('Test Loss: {}'.format(test_loss))
print('Test Accuracy: {}'.format(test_acc))
def get_basic_model_results(X_train, X_test, y_train, y_test):
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
print("\n TRAIN AND TEST SHAPES ARE AS FOLLOWS")
print("\n X TRAIN : ",X_train.shape)
print("\n Y TRAIN ",y_train.shape)
print("\n X TEST ",X_test.shape)
print("\n Y TEST ",y_test.shape,"\n")
classifiers = [GaussianNB(), LogisticRegression(), DecisionTreeClassifier(),
RandomForestClassifier(n_estimators=100),
svm.SVC(),ExtraTreesClassifier(n_estimators=100),KNeighborsClassifier(n_neighbors=3),AdaBoostClassifier(n_estimators=100, random_state=0)]
classifier_names = ["GaussianNB", "LogisticRegression", "DecisionTreeClassifier", "RandomForestClassifier",
"SVM -linear kernel","ExtraTreesClassifier","KNeighborsClassifier","AdaBoostClassifier"]
for idx in range(len(classifiers)):
print("======={}=======".format(classifier_names[idx]))
train_model(classifier_names[idx], X_train, X_test, y_train, y_test)
#apply_lstm_model(X_train, X_test, y_train, y_test);
def get_classificaton_results_tpnf(data_dir, news_source, time_interval, use_cache=False):
include_micro = True
include_macro = True
include_structural = True
include_temporal = True
include_linguistic = False
dc=get_degree_centrality("/content/FakeNewsPropagation/data/nx_network_data/nx_network_data", news_source)
cc=get_closness_centrality("/content/FakeNewsPropagation/data/nx_network_data/nx_network_data", news_source)
pr=get_pagerank("/content/FakeNewsPropagation/data/nx_network_data/nx_network_data", news_source)
sample_feature_arr = get_TPNF_dataset(data_dir, news_source, include_micro, include_macro, include_structural,
include_temporal, include_linguistic, time_interval, use_cache=use_cache)
dc = np.array(dc)
cc = np.array(cc)
pr = np.array(pr)
dc_trans = dc.reshape(-1,1)
cc_trans = cc.reshape(-1,1)
pr_trans = pr.reshape(-1,1)
ccpr = np.append(cc_trans, pr_trans, 1)
sample_feature_arra = np.append(sample_feature_arr, dc_trans, 1)
sample_feature_array = np.append(sample_feature_arra, ccpr, 1)
print("Sample feature array dimensions")
print(sample_feature_array.shape, flush=True)
num_samples = int(len(sample_feature_array) / 2)
target_labels = np.concatenate([np.ones(num_samples), np.zeros(num_samples)], axis=0)
X_train, X_test, y_train, y_test = get_train_test_split(sample_feature_array, target_labels)
get_basic_model_results(X_train, X_test, y_train, y_test)
def plot_feature_importances(coef, names):
imp = coef
imp, names = zip(*sorted(zip(imp, names)))
plt.barh(range(len(names)), imp, align='center')
plt.yticks(range(len(names)), names)
plt.savefig('feature_importance.png', bbox_inches='tight')
plt.show()
def dump_random_forest_feature_importance(data_dir, news_source):
include_micro = True
include_macro = True
include_structural = True
include_temporal = True
include_linguistic = True
sample_feature_array = get_TPNF_dataset(data_dir, news_source, include_micro, include_macro, include_structural,
include_temporal, include_linguistic, use_cache=True)
sample_feature_array = sample_feature_array[:, :-1]
feature_names, short_feature_names = get_dataset_feature_names(include_micro, include_macro, include_structural,
include_temporal, include_linguistic)
feature_names = feature_names[:-1]
short_feature_names = short_feature_names[:-1]
num_samples = int(len(sample_feature_array) / 2)
target_labels = np.concatenate([np.ones(num_samples), np.zeros(num_samples)], axis=0)
X_train, X_test, y_train, y_test = get_train_test_split(sample_feature_array, target_labels)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=100, random_state=0)
forest.fit(X_train, y_train)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X_train.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
matplotlib.rcParams['figure.figsize'] = 5, 2
# Plot the feature importances of the forest
plt.figure()
plt.bar(range(X_train.shape[1]), importances[indices],
color="b", yerr=std[indices], align="center")
plt.xticks(range(X_train.shape[1]), np.array(short_feature_names)[indices], rotation=75, fontsize=9.5)
plt.xlim([-1, X_train.shape[1]])
plt.savefig('{}_feature_importance.png'.format(news_source), bbox_inches='tight')
plt.show()
def get_classificaton_results_tpnf_by_time(news_source: str):
# Time Interval in hours for early-fake news detection
time_intervals = [3, 6, 12, 24, 36, 48, 60, 72, 84, 96]
for time_interval in time_intervals:
print("=============Time Interval : {} ==========".format(time_interval))
start_time = time.time()
get_classificaton_results_tpnf("data/features", news_source, time_interval)
print("\n\n================Exectuion time - {} ==================================\n".format(
time.time() - start_time))
if __name__ == "__main__": | get_classificaton_results_tpnf("data/features", "politifact", time_interval=None, use_cache=False)
print("\n\n Working on Gossipcop Data \n")
get_classificaton_results_tpnf("data/features", "gossipcop", time_interval=None, use_cache=False)
# Filter the graphs by time interval (for early fake news detection) and get the classification results
# get_classificaton_results_tpnf_by_time("politifact")
# get_classificaton_results_tpnf_by_time("gossipcop") |
#dump_random_forest_feature_importance("data/features", "politifact")
print("\n\n Working on Politifact Data \n") | random_line_split |
basic_model.py | import time
import sys
sys.path.insert(0,"/content/FakeNewsPropagation")
import matplotlib
import numpy as np
from sklearn import preprocessing, svm
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import AdaBoostClassifier
#from load_dataset import load_from_nx_graphs
#from construct_sample_features import get_nx_propagation_graphs
#from analysis_util import equal_samples
from centrality import get_degree_centrality,get_closness_centrality,get_betweenness_centrality,get_pagerank
from construct_sample_features import get_TPNF_dataset, get_train_test_split, get_dataset_feature_names
matplotlib.use('agg')
import matplotlib.pyplot as plt
def get_classifier_by_name(classifier_name):
if classifier_name == "GaussianNB":
return GaussianNB()
elif classifier_name == "LogisticRegression":
return LogisticRegression(solver='lbfgs')
elif classifier_name == "DecisionTreeClassifier":
return DecisionTreeClassifier()
elif classifier_name == "RandomForestClassifier":
return RandomForestClassifier(n_estimators=50)
elif classifier_name == "SVM -linear kernel":
return svm.SVC(kernel='linear')
elif classifier_name == "ExtraTreesClassifier":
return ExtraTreesClassifier(n_estimators=100)
elif classifier_name == "AdaBoostClassifier":
return AdaBoostClassifier(n_estimators=100, random_state=0)
else:
return KNeighborsClassifier(n_neighbors=3)
def | (classifier_name, X_train, X_test, y_train, y_test):
accuracy_values = []
precision_values = []
recall_values = []
f1_score_values = []
for i in range(6):
classifier_clone = get_classifier_by_name(classifier_name)
classifier_clone.fit(X_train, y_train)
predicted_output = classifier_clone.predict(X_test)
accuracy, precision, recall, f1_score_val = get_metrics(y_test, predicted_output, one_hot_rep=False)
accuracy_values.append(accuracy)
precision_values.append(precision)
recall_values.append(recall)
f1_score_values.append(f1_score_val)
print_metrics(np.mean(accuracy_values), np.mean(precision_values), np.mean(recall_values), np.mean(f1_score_values))
def print_metrics(accuracy, precision, recall, f1_score_val):
print("Accuracy : {}".format(accuracy))
print("Precision : {}".format(precision))
print("Recall : {}".format(recall))
print("F1 : {}".format(f1_score_val))
def get_metrics(target, logits, one_hot_rep=True):
"""
Two numpy one hot arrays
:param target:
:param logits:
:return:
"""
if one_hot_rep:
label = np.argmax(target, axis=1)
predict = np.argmax(logits, axis=1)
else:
label = target
predict = logits
accuracy = accuracy_score(label, predict)
precision = precision_score(label, predict)
recall = recall_score(label, predict)
f1_score_val = f1_score(label, predict)
return accuracy, precision, recall, f1_score_val
def apply_lstm_model(X_train, X_test, y_train, y_test):
print("\n TRAIN AND TEST SHAPES FOR LSTM MODEL IS AS FOLLOWS : ")
X_train = X_train[:,None,:]
X_test = X_test[:,None,:]
print("\n X TRAIN : ",X_train.shape)
print("\n Y TRAIN ",y_train.shape)
print("\n X TEST ",X_test.shape)
print("\n Y TEST ",y_test.shape,"\n")
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
from keras.optimizers import Adam
#Initializing the classifier Network
classifier = Sequential()
#Adding the input LSTM network layer
classifier.add(LSTM(128, input_shape=(X_train.shape[1:]), return_sequences=True))
classifier.add(Dropout(0.2))
#Adding a second LSTM network layer
#classifier.add(LSTM(128, input_shape=(X_train.shape[1:]), return_sequences=True))
classifier.add(LSTM(128))
#Adding a dense hidden layer
classifier.add(Dense(64, activation='relu'))
classifier.add(Dropout(0.2))
#Adding the output layer
classifier.add(Dense(10, activation='softmax'))
#Compiling the network
classifier.compile( loss='sparse_categorical_crossentropy',
optimizer=Adam(lr=0.001, decay=1e-6),
metrics=['accuracy'] )
#Fitting the data to the model
classifier.fit(X_train,
y_train,
epochs=300,
validation_data=(X_test, y_test))
test_loss, test_acc = classifier.evaluate(X_test, y_test)
print('Test Loss: {}'.format(test_loss))
print('Test Accuracy: {}'.format(test_acc))
def get_basic_model_results(X_train, X_test, y_train, y_test):
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
print("\n TRAIN AND TEST SHAPES ARE AS FOLLOWS")
print("\n X TRAIN : ",X_train.shape)
print("\n Y TRAIN ",y_train.shape)
print("\n X TEST ",X_test.shape)
print("\n Y TEST ",y_test.shape,"\n")
classifiers = [GaussianNB(), LogisticRegression(), DecisionTreeClassifier(),
RandomForestClassifier(n_estimators=100),
svm.SVC(),ExtraTreesClassifier(n_estimators=100),KNeighborsClassifier(n_neighbors=3),AdaBoostClassifier(n_estimators=100, random_state=0)]
classifier_names = ["GaussianNB", "LogisticRegression", "DecisionTreeClassifier", "RandomForestClassifier",
"SVM -linear kernel","ExtraTreesClassifier","KNeighborsClassifier","AdaBoostClassifier"]
for idx in range(len(classifiers)):
print("======={}=======".format(classifier_names[idx]))
train_model(classifier_names[idx], X_train, X_test, y_train, y_test)
#apply_lstm_model(X_train, X_test, y_train, y_test);
def get_classificaton_results_tpnf(data_dir, news_source, time_interval, use_cache=False):
include_micro = True
include_macro = True
include_structural = True
include_temporal = True
include_linguistic = False
dc=get_degree_centrality("/content/FakeNewsPropagation/data/nx_network_data/nx_network_data", news_source)
cc=get_closness_centrality("/content/FakeNewsPropagation/data/nx_network_data/nx_network_data", news_source)
pr=get_pagerank("/content/FakeNewsPropagation/data/nx_network_data/nx_network_data", news_source)
sample_feature_arr = get_TPNF_dataset(data_dir, news_source, include_micro, include_macro, include_structural,
include_temporal, include_linguistic, time_interval, use_cache=use_cache)
dc = np.array(dc)
cc = np.array(cc)
pr = np.array(pr)
dc_trans = dc.reshape(-1,1)
cc_trans = cc.reshape(-1,1)
pr_trans = pr.reshape(-1,1)
ccpr = np.append(cc_trans, pr_trans, 1)
sample_feature_arra = np.append(sample_feature_arr, dc_trans, 1)
sample_feature_array = np.append(sample_feature_arra, ccpr, 1)
print("Sample feature array dimensions")
print(sample_feature_array.shape, flush=True)
num_samples = int(len(sample_feature_array) / 2)
target_labels = np.concatenate([np.ones(num_samples), np.zeros(num_samples)], axis=0)
X_train, X_test, y_train, y_test = get_train_test_split(sample_feature_array, target_labels)
get_basic_model_results(X_train, X_test, y_train, y_test)
def plot_feature_importances(coef, names):
imp = coef
imp, names = zip(*sorted(zip(imp, names)))
plt.barh(range(len(names)), imp, align='center')
plt.yticks(range(len(names)), names)
plt.savefig('feature_importance.png', bbox_inches='tight')
plt.show()
def dump_random_forest_feature_importance(data_dir, news_source):
include_micro = True
include_macro = True
include_structural = True
include_temporal = True
include_linguistic = True
sample_feature_array = get_TPNF_dataset(data_dir, news_source, include_micro, include_macro, include_structural,
include_temporal, include_linguistic, use_cache=True)
sample_feature_array = sample_feature_array[:, :-1]
feature_names, short_feature_names = get_dataset_feature_names(include_micro, include_macro, include_structural,
include_temporal, include_linguistic)
feature_names = feature_names[:-1]
short_feature_names = short_feature_names[:-1]
num_samples = int(len(sample_feature_array) / 2)
target_labels = np.concatenate([np.ones(num_samples), np.zeros(num_samples)], axis=0)
X_train, X_test, y_train, y_test = get_train_test_split(sample_feature_array, target_labels)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=100, random_state=0)
forest.fit(X_train, y_train)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X_train.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
matplotlib.rcParams['figure.figsize'] = 5, 2
# Plot the feature importances of the forest
plt.figure()
plt.bar(range(X_train.shape[1]), importances[indices],
color="b", yerr=std[indices], align="center")
plt.xticks(range(X_train.shape[1]), np.array(short_feature_names)[indices], rotation=75, fontsize=9.5)
plt.xlim([-1, X_train.shape[1]])
plt.savefig('{}_feature_importance.png'.format(news_source), bbox_inches='tight')
plt.show()
def get_classificaton_results_tpnf_by_time(news_source: str):
# Time Interval in hours for early-fake news detection
time_intervals = [3, 6, 12, 24, 36, 48, 60, 72, 84, 96]
for time_interval in time_intervals:
print("=============Time Interval : {} ==========".format(time_interval))
start_time = time.time()
get_classificaton_results_tpnf("data/features", news_source, time_interval)
print("\n\n================Exectuion time - {} ==================================\n".format(
time.time() - start_time))
if __name__ == "__main__":
#dump_random_forest_feature_importance("data/features", "politifact")
print("\n\n Working on Politifact Data \n")
get_classificaton_results_tpnf("data/features", "politifact", time_interval=None, use_cache=False)
print("\n\n Working on Gossipcop Data \n")
get_classificaton_results_tpnf("data/features", "gossipcop", time_interval=None, use_cache=False)
# Filter the graphs by time interval (for early fake news detection) and get the classification results
# get_classificaton_results_tpnf_by_time("politifact")
# get_classificaton_results_tpnf_by_time("gossipcop")
| train_model | identifier_name |
basic_model.py | import time
import sys
sys.path.insert(0,"/content/FakeNewsPropagation")
import matplotlib
import numpy as np
from sklearn import preprocessing, svm
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import AdaBoostClassifier
#from load_dataset import load_from_nx_graphs
#from construct_sample_features import get_nx_propagation_graphs
#from analysis_util import equal_samples
from centrality import get_degree_centrality,get_closness_centrality,get_betweenness_centrality,get_pagerank
from construct_sample_features import get_TPNF_dataset, get_train_test_split, get_dataset_feature_names
matplotlib.use('agg')
import matplotlib.pyplot as plt
def get_classifier_by_name(classifier_name):
if classifier_name == "GaussianNB":
return GaussianNB()
elif classifier_name == "LogisticRegression":
return LogisticRegression(solver='lbfgs')
elif classifier_name == "DecisionTreeClassifier":
return DecisionTreeClassifier()
elif classifier_name == "RandomForestClassifier":
return RandomForestClassifier(n_estimators=50)
elif classifier_name == "SVM -linear kernel":
return svm.SVC(kernel='linear')
elif classifier_name == "ExtraTreesClassifier":
return ExtraTreesClassifier(n_estimators=100)
elif classifier_name == "AdaBoostClassifier":
return AdaBoostClassifier(n_estimators=100, random_state=0)
else:
return KNeighborsClassifier(n_neighbors=3)
def train_model(classifier_name, X_train, X_test, y_train, y_test):
accuracy_values = []
precision_values = []
recall_values = []
f1_score_values = []
for i in range(6):
classifier_clone = get_classifier_by_name(classifier_name)
classifier_clone.fit(X_train, y_train)
predicted_output = classifier_clone.predict(X_test)
accuracy, precision, recall, f1_score_val = get_metrics(y_test, predicted_output, one_hot_rep=False)
accuracy_values.append(accuracy)
precision_values.append(precision)
recall_values.append(recall)
f1_score_values.append(f1_score_val)
print_metrics(np.mean(accuracy_values), np.mean(precision_values), np.mean(recall_values), np.mean(f1_score_values))
def print_metrics(accuracy, precision, recall, f1_score_val):
print("Accuracy : {}".format(accuracy))
print("Precision : {}".format(precision))
print("Recall : {}".format(recall))
print("F1 : {}".format(f1_score_val))
def get_metrics(target, logits, one_hot_rep=True):
"""
Two numpy one hot arrays
:param target:
:param logits:
:return:
"""
if one_hot_rep:
label = np.argmax(target, axis=1)
predict = np.argmax(logits, axis=1)
else:
label = target
predict = logits
accuracy = accuracy_score(label, predict)
precision = precision_score(label, predict)
recall = recall_score(label, predict)
f1_score_val = f1_score(label, predict)
return accuracy, precision, recall, f1_score_val
def apply_lstm_model(X_train, X_test, y_train, y_test):
print("\n TRAIN AND TEST SHAPES FOR LSTM MODEL IS AS FOLLOWS : ")
X_train = X_train[:,None,:]
X_test = X_test[:,None,:]
print("\n X TRAIN : ",X_train.shape)
print("\n Y TRAIN ",y_train.shape)
print("\n X TEST ",X_test.shape)
print("\n Y TEST ",y_test.shape,"\n")
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
from keras.optimizers import Adam
#Initializing the classifier Network
classifier = Sequential()
#Adding the input LSTM network layer
classifier.add(LSTM(128, input_shape=(X_train.shape[1:]), return_sequences=True))
classifier.add(Dropout(0.2))
#Adding a second LSTM network layer
#classifier.add(LSTM(128, input_shape=(X_train.shape[1:]), return_sequences=True))
classifier.add(LSTM(128))
#Adding a dense hidden layer
classifier.add(Dense(64, activation='relu'))
classifier.add(Dropout(0.2))
#Adding the output layer
classifier.add(Dense(10, activation='softmax'))
#Compiling the network
classifier.compile( loss='sparse_categorical_crossentropy',
optimizer=Adam(lr=0.001, decay=1e-6),
metrics=['accuracy'] )
#Fitting the data to the model
classifier.fit(X_train,
y_train,
epochs=300,
validation_data=(X_test, y_test))
test_loss, test_acc = classifier.evaluate(X_test, y_test)
print('Test Loss: {}'.format(test_loss))
print('Test Accuracy: {}'.format(test_acc))
def get_basic_model_results(X_train, X_test, y_train, y_test):
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
print("\n TRAIN AND TEST SHAPES ARE AS FOLLOWS")
print("\n X TRAIN : ",X_train.shape)
print("\n Y TRAIN ",y_train.shape)
print("\n X TEST ",X_test.shape)
print("\n Y TEST ",y_test.shape,"\n")
classifiers = [GaussianNB(), LogisticRegression(), DecisionTreeClassifier(),
RandomForestClassifier(n_estimators=100),
svm.SVC(),ExtraTreesClassifier(n_estimators=100),KNeighborsClassifier(n_neighbors=3),AdaBoostClassifier(n_estimators=100, random_state=0)]
classifier_names = ["GaussianNB", "LogisticRegression", "DecisionTreeClassifier", "RandomForestClassifier",
"SVM -linear kernel","ExtraTreesClassifier","KNeighborsClassifier","AdaBoostClassifier"]
for idx in range(len(classifiers)):
print("======={}=======".format(classifier_names[idx]))
train_model(classifier_names[idx], X_train, X_test, y_train, y_test)
#apply_lstm_model(X_train, X_test, y_train, y_test);
def get_classificaton_results_tpnf(data_dir, news_source, time_interval, use_cache=False):
include_micro = True
include_macro = True
include_structural = True
include_temporal = True
include_linguistic = False
dc=get_degree_centrality("/content/FakeNewsPropagation/data/nx_network_data/nx_network_data", news_source)
cc=get_closness_centrality("/content/FakeNewsPropagation/data/nx_network_data/nx_network_data", news_source)
pr=get_pagerank("/content/FakeNewsPropagation/data/nx_network_data/nx_network_data", news_source)
sample_feature_arr = get_TPNF_dataset(data_dir, news_source, include_micro, include_macro, include_structural,
include_temporal, include_linguistic, time_interval, use_cache=use_cache)
dc = np.array(dc)
cc = np.array(cc)
pr = np.array(pr)
dc_trans = dc.reshape(-1,1)
cc_trans = cc.reshape(-1,1)
pr_trans = pr.reshape(-1,1)
ccpr = np.append(cc_trans, pr_trans, 1)
sample_feature_arra = np.append(sample_feature_arr, dc_trans, 1)
sample_feature_array = np.append(sample_feature_arra, ccpr, 1)
print("Sample feature array dimensions")
print(sample_feature_array.shape, flush=True)
num_samples = int(len(sample_feature_array) / 2)
target_labels = np.concatenate([np.ones(num_samples), np.zeros(num_samples)], axis=0)
X_train, X_test, y_train, y_test = get_train_test_split(sample_feature_array, target_labels)
get_basic_model_results(X_train, X_test, y_train, y_test)
def plot_feature_importances(coef, names):
imp = coef
imp, names = zip(*sorted(zip(imp, names)))
plt.barh(range(len(names)), imp, align='center')
plt.yticks(range(len(names)), names)
plt.savefig('feature_importance.png', bbox_inches='tight')
plt.show()
def dump_random_forest_feature_importance(data_dir, news_source):
|
def get_classificaton_results_tpnf_by_time(news_source: str):
# Time Interval in hours for early-fake news detection
time_intervals = [3, 6, 12, 24, 36, 48, 60, 72, 84, 96]
for time_interval in time_intervals:
print("=============Time Interval : {} ==========".format(time_interval))
start_time = time.time()
get_classificaton_results_tpnf("data/features", news_source, time_interval)
print("\n\n================Exectuion time - {} ==================================\n".format(
time.time() - start_time))
if __name__ == "__main__":
#dump_random_forest_feature_importance("data/features", "politifact")
print("\n\n Working on Politifact Data \n")
get_classificaton_results_tpnf("data/features", "politifact", time_interval=None, use_cache=False)
print("\n\n Working on Gossipcop Data \n")
get_classificaton_results_tpnf("data/features", "gossipcop", time_interval=None, use_cache=False)
# Filter the graphs by time interval (for early fake news detection) and get the classification results
# get_classificaton_results_tpnf_by_time("politifact")
# get_classificaton_results_tpnf_by_time("gossipcop")
| include_micro = True
include_macro = True
include_structural = True
include_temporal = True
include_linguistic = True
sample_feature_array = get_TPNF_dataset(data_dir, news_source, include_micro, include_macro, include_structural,
include_temporal, include_linguistic, use_cache=True)
sample_feature_array = sample_feature_array[:, :-1]
feature_names, short_feature_names = get_dataset_feature_names(include_micro, include_macro, include_structural,
include_temporal, include_linguistic)
feature_names = feature_names[:-1]
short_feature_names = short_feature_names[:-1]
num_samples = int(len(sample_feature_array) / 2)
target_labels = np.concatenate([np.ones(num_samples), np.zeros(num_samples)], axis=0)
X_train, X_test, y_train, y_test = get_train_test_split(sample_feature_array, target_labels)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=100, random_state=0)
forest.fit(X_train, y_train)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X_train.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
matplotlib.rcParams['figure.figsize'] = 5, 2
# Plot the feature importances of the forest
plt.figure()
plt.bar(range(X_train.shape[1]), importances[indices],
color="b", yerr=std[indices], align="center")
plt.xticks(range(X_train.shape[1]), np.array(short_feature_names)[indices], rotation=75, fontsize=9.5)
plt.xlim([-1, X_train.shape[1]])
plt.savefig('{}_feature_importance.png'.format(news_source), bbox_inches='tight')
plt.show() | identifier_body |
basic_model.py | import time
import sys
sys.path.insert(0,"/content/FakeNewsPropagation")
import matplotlib
import numpy as np
from sklearn import preprocessing, svm
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import AdaBoostClassifier
#from load_dataset import load_from_nx_graphs
#from construct_sample_features import get_nx_propagation_graphs
#from analysis_util import equal_samples
from centrality import get_degree_centrality,get_closness_centrality,get_betweenness_centrality,get_pagerank
from construct_sample_features import get_TPNF_dataset, get_train_test_split, get_dataset_feature_names
matplotlib.use('agg')
import matplotlib.pyplot as plt
def get_classifier_by_name(classifier_name):
if classifier_name == "GaussianNB":
return GaussianNB()
elif classifier_name == "LogisticRegression":
return LogisticRegression(solver='lbfgs')
elif classifier_name == "DecisionTreeClassifier":
return DecisionTreeClassifier()
elif classifier_name == "RandomForestClassifier":
return RandomForestClassifier(n_estimators=50)
elif classifier_name == "SVM -linear kernel":
return svm.SVC(kernel='linear')
elif classifier_name == "ExtraTreesClassifier":
return ExtraTreesClassifier(n_estimators=100)
elif classifier_name == "AdaBoostClassifier":
|
else:
return KNeighborsClassifier(n_neighbors=3)
def train_model(classifier_name, X_train, X_test, y_train, y_test):
accuracy_values = []
precision_values = []
recall_values = []
f1_score_values = []
for i in range(6):
classifier_clone = get_classifier_by_name(classifier_name)
classifier_clone.fit(X_train, y_train)
predicted_output = classifier_clone.predict(X_test)
accuracy, precision, recall, f1_score_val = get_metrics(y_test, predicted_output, one_hot_rep=False)
accuracy_values.append(accuracy)
precision_values.append(precision)
recall_values.append(recall)
f1_score_values.append(f1_score_val)
print_metrics(np.mean(accuracy_values), np.mean(precision_values), np.mean(recall_values), np.mean(f1_score_values))
def print_metrics(accuracy, precision, recall, f1_score_val):
print("Accuracy : {}".format(accuracy))
print("Precision : {}".format(precision))
print("Recall : {}".format(recall))
print("F1 : {}".format(f1_score_val))
def get_metrics(target, logits, one_hot_rep=True):
"""
Two numpy one hot arrays
:param target:
:param logits:
:return:
"""
if one_hot_rep:
label = np.argmax(target, axis=1)
predict = np.argmax(logits, axis=1)
else:
label = target
predict = logits
accuracy = accuracy_score(label, predict)
precision = precision_score(label, predict)
recall = recall_score(label, predict)
f1_score_val = f1_score(label, predict)
return accuracy, precision, recall, f1_score_val
def apply_lstm_model(X_train, X_test, y_train, y_test):
print("\n TRAIN AND TEST SHAPES FOR LSTM MODEL IS AS FOLLOWS : ")
X_train = X_train[:,None,:]
X_test = X_test[:,None,:]
print("\n X TRAIN : ",X_train.shape)
print("\n Y TRAIN ",y_train.shape)
print("\n X TEST ",X_test.shape)
print("\n Y TEST ",y_test.shape,"\n")
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
from keras.optimizers import Adam
#Initializing the classifier Network
classifier = Sequential()
#Adding the input LSTM network layer
classifier.add(LSTM(128, input_shape=(X_train.shape[1:]), return_sequences=True))
classifier.add(Dropout(0.2))
#Adding a second LSTM network layer
#classifier.add(LSTM(128, input_shape=(X_train.shape[1:]), return_sequences=True))
classifier.add(LSTM(128))
#Adding a dense hidden layer
classifier.add(Dense(64, activation='relu'))
classifier.add(Dropout(0.2))
#Adding the output layer
classifier.add(Dense(10, activation='softmax'))
#Compiling the network
classifier.compile( loss='sparse_categorical_crossentropy',
optimizer=Adam(lr=0.001, decay=1e-6),
metrics=['accuracy'] )
#Fitting the data to the model
classifier.fit(X_train,
y_train,
epochs=300,
validation_data=(X_test, y_test))
test_loss, test_acc = classifier.evaluate(X_test, y_test)
print('Test Loss: {}'.format(test_loss))
print('Test Accuracy: {}'.format(test_acc))
def get_basic_model_results(X_train, X_test, y_train, y_test):
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
print("\n TRAIN AND TEST SHAPES ARE AS FOLLOWS")
print("\n X TRAIN : ",X_train.shape)
print("\n Y TRAIN ",y_train.shape)
print("\n X TEST ",X_test.shape)
print("\n Y TEST ",y_test.shape,"\n")
classifiers = [GaussianNB(), LogisticRegression(), DecisionTreeClassifier(),
RandomForestClassifier(n_estimators=100),
svm.SVC(),ExtraTreesClassifier(n_estimators=100),KNeighborsClassifier(n_neighbors=3),AdaBoostClassifier(n_estimators=100, random_state=0)]
classifier_names = ["GaussianNB", "LogisticRegression", "DecisionTreeClassifier", "RandomForestClassifier",
"SVM -linear kernel","ExtraTreesClassifier","KNeighborsClassifier","AdaBoostClassifier"]
for idx in range(len(classifiers)):
print("======={}=======".format(classifier_names[idx]))
train_model(classifier_names[idx], X_train, X_test, y_train, y_test)
#apply_lstm_model(X_train, X_test, y_train, y_test);
def get_classificaton_results_tpnf(data_dir, news_source, time_interval, use_cache=False):
include_micro = True
include_macro = True
include_structural = True
include_temporal = True
include_linguistic = False
dc=get_degree_centrality("/content/FakeNewsPropagation/data/nx_network_data/nx_network_data", news_source)
cc=get_closness_centrality("/content/FakeNewsPropagation/data/nx_network_data/nx_network_data", news_source)
pr=get_pagerank("/content/FakeNewsPropagation/data/nx_network_data/nx_network_data", news_source)
sample_feature_arr = get_TPNF_dataset(data_dir, news_source, include_micro, include_macro, include_structural,
include_temporal, include_linguistic, time_interval, use_cache=use_cache)
dc = np.array(dc)
cc = np.array(cc)
pr = np.array(pr)
dc_trans = dc.reshape(-1,1)
cc_trans = cc.reshape(-1,1)
pr_trans = pr.reshape(-1,1)
ccpr = np.append(cc_trans, pr_trans, 1)
sample_feature_arra = np.append(sample_feature_arr, dc_trans, 1)
sample_feature_array = np.append(sample_feature_arra, ccpr, 1)
print("Sample feature array dimensions")
print(sample_feature_array.shape, flush=True)
num_samples = int(len(sample_feature_array) / 2)
target_labels = np.concatenate([np.ones(num_samples), np.zeros(num_samples)], axis=0)
X_train, X_test, y_train, y_test = get_train_test_split(sample_feature_array, target_labels)
get_basic_model_results(X_train, X_test, y_train, y_test)
def plot_feature_importances(coef, names):
imp = coef
imp, names = zip(*sorted(zip(imp, names)))
plt.barh(range(len(names)), imp, align='center')
plt.yticks(range(len(names)), names)
plt.savefig('feature_importance.png', bbox_inches='tight')
plt.show()
def dump_random_forest_feature_importance(data_dir, news_source):
include_micro = True
include_macro = True
include_structural = True
include_temporal = True
include_linguistic = True
sample_feature_array = get_TPNF_dataset(data_dir, news_source, include_micro, include_macro, include_structural,
include_temporal, include_linguistic, use_cache=True)
sample_feature_array = sample_feature_array[:, :-1]
feature_names, short_feature_names = get_dataset_feature_names(include_micro, include_macro, include_structural,
include_temporal, include_linguistic)
feature_names = feature_names[:-1]
short_feature_names = short_feature_names[:-1]
num_samples = int(len(sample_feature_array) / 2)
target_labels = np.concatenate([np.ones(num_samples), np.zeros(num_samples)], axis=0)
X_train, X_test, y_train, y_test = get_train_test_split(sample_feature_array, target_labels)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=100, random_state=0)
forest.fit(X_train, y_train)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X_train.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
matplotlib.rcParams['figure.figsize'] = 5, 2
# Plot the feature importances of the forest
plt.figure()
plt.bar(range(X_train.shape[1]), importances[indices],
color="b", yerr=std[indices], align="center")
plt.xticks(range(X_train.shape[1]), np.array(short_feature_names)[indices], rotation=75, fontsize=9.5)
plt.xlim([-1, X_train.shape[1]])
plt.savefig('{}_feature_importance.png'.format(news_source), bbox_inches='tight')
plt.show()
def get_classificaton_results_tpnf_by_time(news_source: str):
# Time Interval in hours for early-fake news detection
time_intervals = [3, 6, 12, 24, 36, 48, 60, 72, 84, 96]
for time_interval in time_intervals:
print("=============Time Interval : {} ==========".format(time_interval))
start_time = time.time()
get_classificaton_results_tpnf("data/features", news_source, time_interval)
print("\n\n================Exectuion time - {} ==================================\n".format(
time.time() - start_time))
if __name__ == "__main__":
#dump_random_forest_feature_importance("data/features", "politifact")
print("\n\n Working on Politifact Data \n")
get_classificaton_results_tpnf("data/features", "politifact", time_interval=None, use_cache=False)
print("\n\n Working on Gossipcop Data \n")
get_classificaton_results_tpnf("data/features", "gossipcop", time_interval=None, use_cache=False)
# Filter the graphs by time interval (for early fake news detection) and get the classification results
# get_classificaton_results_tpnf_by_time("politifact")
# get_classificaton_results_tpnf_by_time("gossipcop")
| return AdaBoostClassifier(n_estimators=100, random_state=0) | conditional_block |
check.rs | //! Consensus check functions
use std::{collections::HashSet, sync::Arc};
use chrono::{DateTime, Utc};
use zebra_chain::{
amount::{Amount, Error as AmountError, NonNegative},
block::{Block, Hash, Header, Height},
parameters::{Network, NetworkUpgrade},
transaction,
work::{difficulty::ExpandedDifficulty, equihash},
};
use crate::{error::*, parameters::SLOW_START_INTERVAL};
use super::subsidy;
/// Checks if there is exactly one coinbase transaction in `Block`,
/// and if that coinbase transaction is the first transaction in the block.
/// Returns the coinbase transaction is successful.
///
/// > A transaction that has a single transparent input with a null prevout field,
/// > is called a coinbase transaction. Every block has a single coinbase
/// > transaction as the first transaction in the block.
///
/// <https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions>
pub fn coinbase_is_first(block: &Block) -> Result<Arc<transaction::Transaction>, BlockError> {
// # Consensus
//
// > A block MUST have at least one transaction
//
// <https://zips.z.cash/protocol/protocol.pdf#blockheader>
let first = block
.transactions
.get(0)
.ok_or(BlockError::NoTransactions)?;
// > The first transaction in a block MUST be a coinbase transaction,
// > and subsequent transactions MUST NOT be coinbase transactions.
//
// <https://zips.z.cash/protocol/protocol.pdf#blockheader>
//
// > A transaction that has a single transparent input with a null prevout
// > field, is called a coinbase transaction.
//
// <https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions>
let mut rest = block.transactions.iter().skip(1);
if !first.is_coinbase() {
Err(TransactionError::CoinbasePosition)?;
}
// > A transparent input in a non-coinbase transaction MUST NOT have a null prevout
//
// <https://zips.z.cash/protocol/protocol.pdf#txnconsensus>
if !rest.all(|tx| tx.is_valid_non_coinbase()) {
Err(TransactionError::CoinbaseAfterFirst)?;
}
Ok(first.clone())
}
/// Returns `Ok(ExpandedDifficulty)` if the`difficulty_threshold` of `header` is at least as difficult as
/// the target difficulty limit for `network` (PoWLimit)
///
/// If the header difficulty threshold is invalid, returns an error containing `height` and `hash`.
pub fn difficulty_threshold_is_valid(
header: &Header,
network: Network,
height: &Height,
hash: &Hash,
) -> Result<ExpandedDifficulty, BlockError> {
let difficulty_threshold = header
.difficulty_threshold
.to_expanded()
.ok_or(BlockError::InvalidDifficulty(*height, *hash))?;
// Note: the comparison in this function is a u256 integer comparison, like
// zcashd and bitcoin. Greater values represent *less* work.
// The PowLimit check is part of `Threshold()` in the spec, but it doesn't
// actually depend on any previous blocks.
if difficulty_threshold > ExpandedDifficulty::target_difficulty_limit(network) {
Err(BlockError::TargetDifficultyLimit(
*height,
*hash,
difficulty_threshold,
network,
ExpandedDifficulty::target_difficulty_limit(network),
))?;
}
Ok(difficulty_threshold)
}
/// Returns `Ok(())` if `hash` passes:
/// - the target difficulty limit for `network` (PoWLimit), and
/// - the difficulty filter,
/// based on the fields in `header`.
///
/// If the block is invalid, returns an error containing `height` and `hash`.
pub fn difficulty_is_valid(
header: &Header,
network: Network,
height: &Height,
hash: &Hash,
) -> Result<(), BlockError> {
let difficulty_threshold = difficulty_threshold_is_valid(header, network, height, hash)?;
// Note: the comparison in this function is a u256 integer comparison, like
// zcashd and bitcoin. Greater values represent *less* work.
// # Consensus
//
// > The block MUST pass the difficulty filter.
//
// https://zips.z.cash/protocol/protocol.pdf#blockheader
//
// The difficulty filter is also context-free.
if hash > &difficulty_threshold {
Err(BlockError::DifficultyFilter(
*height,
*hash,
difficulty_threshold,
network,
))?;
}
Ok(())
}
/// Returns `Ok(())` if the `EquihashSolution` is valid for `header`
pub fn equihash_solution_is_valid(header: &Header) -> Result<(), equihash::Error> |
/// Returns `Ok(())` if the block subsidy in `block` is valid for `network`
///
/// [3.9]: https://zips.z.cash/protocol/protocol.pdf#subsidyconcepts
pub fn subsidy_is_valid(block: &Block, network: Network) -> Result<(), BlockError> {
let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?;
let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?;
// Validate funding streams
let Some(halving_div) = subsidy::general::halving_divisor(height, network) else {
// Far future halving, with no founders reward or funding streams
return Ok(());
};
let canopy_activation_height = NetworkUpgrade::Canopy
.activation_height(network)
.expect("Canopy activation height is known");
if height < SLOW_START_INTERVAL {
unreachable!(
"unsupported block height: callers should handle blocks below {:?}",
SLOW_START_INTERVAL
)
} else if halving_div.count_ones() != 1 {
unreachable!("invalid halving divisor: the halving divisor must be a non-zero power of two")
} else if height < canopy_activation_height {
// Founders rewards are paid up to Canopy activation, on both mainnet and testnet.
// But we checkpoint in Canopy so founders reward does not apply for Zebra.
unreachable!("we cannot verify consensus rules before Canopy activation");
} else if halving_div < 4 {
// Funding streams are paid from Canopy activation to the second halving
// Note: Canopy activation is at the first halving on mainnet, but not on testnet
// ZIP-1014 only applies to mainnet, ZIP-214 contains the specific rules for testnet
// funding stream amount values
let funding_streams = subsidy::funding_streams::funding_stream_values(height, network)
.expect("We always expect a funding stream hashmap response even if empty");
// # Consensus
//
// > [Canopy onward] The coinbase transaction at block height `height`
// > MUST contain at least one output per funding stream `fs` active at `height`,
// > that pays `fs.Value(height)` zatoshi in the prescribed way to the stream's
// > recipient address represented by `fs.AddressList[fs.AddressIndex(height)]
//
// https://zips.z.cash/protocol/protocol.pdf#fundingstreams
for (receiver, expected_amount) in funding_streams {
let address =
subsidy::funding_streams::funding_stream_address(height, network, receiver);
let has_expected_output =
subsidy::funding_streams::filter_outputs_by_address(coinbase, address)
.iter()
.map(zebra_chain::transparent::Output::value)
.any(|value| value == expected_amount);
if !has_expected_output {
Err(SubsidyError::FundingStreamNotFound)?;
}
}
Ok(())
} else {
// Future halving, with no founders reward or funding streams
Ok(())
}
}
/// Returns `Ok(())` if the miner fees consensus rule is valid.
///
/// [7.1.2]: https://zips.z.cash/protocol/protocol.pdf#txnconsensus
pub fn miner_fees_are_valid(
block: &Block,
network: Network,
block_miner_fees: Amount<NonNegative>,
) -> Result<(), BlockError> {
let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?;
let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?;
let transparent_value_balance: Amount = subsidy::general::output_amounts(coinbase)
.iter()
.sum::<Result<Amount<NonNegative>, AmountError>>()
.map_err(|_| SubsidyError::SumOverflow)?
.constrain()
.expect("positive value always fit in `NegativeAllowed`");
let sapling_value_balance = coinbase.sapling_value_balance().sapling_amount();
let orchard_value_balance = coinbase.orchard_value_balance().orchard_amount();
let block_subsidy = subsidy::general::block_subsidy(height, network)
.expect("a valid block subsidy for this height and network");
// # Consensus
//
// > The total value in zatoshi of transparent outputs from a coinbase transaction,
// > minus vbalanceSapling, minus vbalanceOrchard, MUST NOT be greater than the value
// > in zatoshi of block subsidy plus the transaction fees paid by transactions in this block.
//
// https://zips.z.cash/protocol/protocol.pdf#txnconsensus
let left = (transparent_value_balance - sapling_value_balance - orchard_value_balance)
.map_err(|_| SubsidyError::SumOverflow)?;
let right = (block_subsidy + block_miner_fees).map_err(|_| SubsidyError::SumOverflow)?;
if left > right {
Err(SubsidyError::InvalidMinerFees)?;
}
Ok(())
}
/// Returns `Ok(())` if `header.time` is less than or equal to
/// 2 hours in the future, according to the node's local clock (`now`).
///
/// This is a non-deterministic rule, as clocks vary over time, and
/// between different nodes.
///
/// "In addition, a full validator MUST NOT accept blocks with nTime
/// more than two hours in the future according to its clock. This
/// is not strictly a consensus rule because it is nondeterministic,
/// and clock time varies between nodes. Also note that a block that
/// is rejected by this rule at a given point in time may later be
/// accepted." [§7.5][7.5]
///
/// [7.5]: https://zips.z.cash/protocol/protocol.pdf#blockheader
///
/// If the header time is invalid, returns an error containing `height` and `hash`.
pub fn time_is_valid_at(
header: &Header,
now: DateTime<Utc>,
height: &Height,
hash: &Hash,
) -> Result<(), zebra_chain::block::BlockTimeError> {
header.time_is_valid_at(now, height, hash)
}
/// Check Merkle root validity.
///
/// `transaction_hashes` is a precomputed list of transaction hashes.
///
/// # Consensus rules:
///
/// - A SHA-256d hash in internal byte order. The merkle root is derived from the
/// hashes of all transactions included in this block, ensuring that none of
/// those transactions can be modified without modifying the header. [7.6]
///
/// # Panics
///
/// - If block does not have a coinbase transaction.
///
/// [ZIP-244]: https://zips.z.cash/zip-0244
/// [7.1]: https://zips.z.cash/protocol/nu5.pdf#txnencodingandconsensus
/// [7.6]: https://zips.z.cash/protocol/nu5.pdf#blockheader
pub fn merkle_root_validity(
network: Network,
block: &Block,
transaction_hashes: &[transaction::Hash],
) -> Result<(), BlockError> {
// TODO: deduplicate zebra-chain and zebra-consensus errors (#2908)
block
.check_transaction_network_upgrade_consistency(network)
.map_err(|_| BlockError::WrongTransactionConsensusBranchId)?;
let merkle_root = transaction_hashes.iter().cloned().collect();
if block.header.merkle_root != merkle_root {
return Err(BlockError::BadMerkleRoot {
actual: merkle_root,
expected: block.header.merkle_root,
});
}
// Bitcoin's transaction Merkle trees are malleable, allowing blocks with
// duplicate transactions to have the same Merkle root as blocks without
// duplicate transactions.
//
// Collecting into a HashSet deduplicates, so this checks that there are no
// duplicate transaction hashes, preventing Merkle root malleability.
//
// ## Full Block Validation
//
// Duplicate transactions should cause a block to be
// rejected, as duplicate transactions imply that the block contains a
// double-spend. As a defense-in-depth, however, we also check that there
// are no duplicate transaction hashes.
//
// ## Checkpoint Validation
//
// To prevent malleability (CVE-2012-2459), we also need to check
// whether the transaction hashes are unique.
if transaction_hashes.len() != transaction_hashes.iter().collect::<HashSet<_>>().len() {
return Err(BlockError::DuplicateTransaction);
}
Ok(())
}
| {
// # Consensus
//
// > `solution` MUST represent a valid Equihash solution.
//
// https://zips.z.cash/protocol/protocol.pdf#blockheader
header.solution.check(header)
} | identifier_body |
check.rs | //! Consensus check functions
use std::{collections::HashSet, sync::Arc};
use chrono::{DateTime, Utc};
use zebra_chain::{
amount::{Amount, Error as AmountError, NonNegative},
block::{Block, Hash, Header, Height},
parameters::{Network, NetworkUpgrade},
transaction,
work::{difficulty::ExpandedDifficulty, equihash},
};
use crate::{error::*, parameters::SLOW_START_INTERVAL};
use super::subsidy;
/// Checks if there is exactly one coinbase transaction in `Block`,
/// and if that coinbase transaction is the first transaction in the block.
/// Returns the coinbase transaction is successful.
///
/// > A transaction that has a single transparent input with a null prevout field,
/// > is called a coinbase transaction. Every block has a single coinbase
/// > transaction as the first transaction in the block.
///
/// <https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions>
pub fn coinbase_is_first(block: &Block) -> Result<Arc<transaction::Transaction>, BlockError> {
// # Consensus
//
// > A block MUST have at least one transaction
//
// <https://zips.z.cash/protocol/protocol.pdf#blockheader>
let first = block
.transactions
.get(0)
.ok_or(BlockError::NoTransactions)?;
// > The first transaction in a block MUST be a coinbase transaction,
// > and subsequent transactions MUST NOT be coinbase transactions.
//
// <https://zips.z.cash/protocol/protocol.pdf#blockheader>
//
// > A transaction that has a single transparent input with a null prevout
// > field, is called a coinbase transaction.
//
// <https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions>
let mut rest = block.transactions.iter().skip(1);
if !first.is_coinbase() {
Err(TransactionError::CoinbasePosition)?;
}
// > A transparent input in a non-coinbase transaction MUST NOT have a null prevout
//
// <https://zips.z.cash/protocol/protocol.pdf#txnconsensus>
if !rest.all(|tx| tx.is_valid_non_coinbase()) {
Err(TransactionError::CoinbaseAfterFirst)?;
}
Ok(first.clone())
}
/// Returns `Ok(ExpandedDifficulty)` if the`difficulty_threshold` of `header` is at least as difficult as
/// the target difficulty limit for `network` (PoWLimit)
///
/// If the header difficulty threshold is invalid, returns an error containing `height` and `hash`.
pub fn difficulty_threshold_is_valid(
header: &Header,
network: Network,
height: &Height,
hash: &Hash,
) -> Result<ExpandedDifficulty, BlockError> {
let difficulty_threshold = header
.difficulty_threshold
.to_expanded()
.ok_or(BlockError::InvalidDifficulty(*height, *hash))?;
// Note: the comparison in this function is a u256 integer comparison, like
// zcashd and bitcoin. Greater values represent *less* work.
// The PowLimit check is part of `Threshold()` in the spec, but it doesn't
// actually depend on any previous blocks.
if difficulty_threshold > ExpandedDifficulty::target_difficulty_limit(network) {
Err(BlockError::TargetDifficultyLimit(
*height,
*hash,
difficulty_threshold,
network,
ExpandedDifficulty::target_difficulty_limit(network),
))?;
}
Ok(difficulty_threshold)
}
/// Returns `Ok(())` if `hash` passes:
/// - the target difficulty limit for `network` (PoWLimit), and
/// - the difficulty filter,
/// based on the fields in `header`.
///
/// If the block is invalid, returns an error containing `height` and `hash`.
pub fn difficulty_is_valid(
header: &Header,
network: Network,
height: &Height,
hash: &Hash,
) -> Result<(), BlockError> {
let difficulty_threshold = difficulty_threshold_is_valid(header, network, height, hash)?;
// Note: the comparison in this function is a u256 integer comparison, like
// zcashd and bitcoin. Greater values represent *less* work.
// # Consensus
//
// > The block MUST pass the difficulty filter.
//
// https://zips.z.cash/protocol/protocol.pdf#blockheader
//
// The difficulty filter is also context-free.
if hash > &difficulty_threshold {
Err(BlockError::DifficultyFilter(
*height,
*hash,
difficulty_threshold,
network,
))?;
}
Ok(())
}
/// Returns `Ok(())` if the `EquihashSolution` is valid for `header`
pub fn equihash_solution_is_valid(header: &Header) -> Result<(), equihash::Error> {
// # Consensus
//
// > `solution` MUST represent a valid Equihash solution.
//
// https://zips.z.cash/protocol/protocol.pdf#blockheader
header.solution.check(header)
}
/// Returns `Ok(())` if the block subsidy in `block` is valid for `network`
///
/// [3.9]: https://zips.z.cash/protocol/protocol.pdf#subsidyconcepts
pub fn subsidy_is_valid(block: &Block, network: Network) -> Result<(), BlockError> {
let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?;
let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?;
// Validate funding streams
let Some(halving_div) = subsidy::general::halving_divisor(height, network) else {
// Far future halving, with no founders reward or funding streams
return Ok(());
};
let canopy_activation_height = NetworkUpgrade::Canopy
.activation_height(network)
.expect("Canopy activation height is known");
if height < SLOW_START_INTERVAL {
unreachable!(
"unsupported block height: callers should handle blocks below {:?}",
SLOW_START_INTERVAL
)
} else if halving_div.count_ones() != 1 {
unreachable!("invalid halving divisor: the halving divisor must be a non-zero power of two")
} else if height < canopy_activation_height {
// Founders rewards are paid up to Canopy activation, on both mainnet and testnet.
// But we checkpoint in Canopy so founders reward does not apply for Zebra.
unreachable!("we cannot verify consensus rules before Canopy activation");
} else if halving_div < 4 {
// Funding streams are paid from Canopy activation to the second halving
// Note: Canopy activation is at the first halving on mainnet, but not on testnet
// ZIP-1014 only applies to mainnet, ZIP-214 contains the specific rules for testnet
// funding stream amount values
let funding_streams = subsidy::funding_streams::funding_stream_values(height, network)
.expect("We always expect a funding stream hashmap response even if empty");
// # Consensus
//
// > [Canopy onward] The coinbase transaction at block height `height`
// > MUST contain at least one output per funding stream `fs` active at `height`,
// > that pays `fs.Value(height)` zatoshi in the prescribed way to the stream's
// > recipient address represented by `fs.AddressList[fs.AddressIndex(height)]
//
// https://zips.z.cash/protocol/protocol.pdf#fundingstreams
for (receiver, expected_amount) in funding_streams {
let address =
subsidy::funding_streams::funding_stream_address(height, network, receiver);
let has_expected_output =
subsidy::funding_streams::filter_outputs_by_address(coinbase, address)
.iter()
.map(zebra_chain::transparent::Output::value)
.any(|value| value == expected_amount);
if !has_expected_output {
Err(SubsidyError::FundingStreamNotFound)?;
}
}
Ok(())
} else {
// Future halving, with no founders reward or funding streams
Ok(())
}
}
/// Returns `Ok(())` if the miner fees consensus rule is valid.
///
/// [7.1.2]: https://zips.z.cash/protocol/protocol.pdf#txnconsensus
pub fn | (
block: &Block,
network: Network,
block_miner_fees: Amount<NonNegative>,
) -> Result<(), BlockError> {
let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?;
let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?;
let transparent_value_balance: Amount = subsidy::general::output_amounts(coinbase)
.iter()
.sum::<Result<Amount<NonNegative>, AmountError>>()
.map_err(|_| SubsidyError::SumOverflow)?
.constrain()
.expect("positive value always fit in `NegativeAllowed`");
let sapling_value_balance = coinbase.sapling_value_balance().sapling_amount();
let orchard_value_balance = coinbase.orchard_value_balance().orchard_amount();
let block_subsidy = subsidy::general::block_subsidy(height, network)
.expect("a valid block subsidy for this height and network");
// # Consensus
//
// > The total value in zatoshi of transparent outputs from a coinbase transaction,
// > minus vbalanceSapling, minus vbalanceOrchard, MUST NOT be greater than the value
// > in zatoshi of block subsidy plus the transaction fees paid by transactions in this block.
//
// https://zips.z.cash/protocol/protocol.pdf#txnconsensus
let left = (transparent_value_balance - sapling_value_balance - orchard_value_balance)
.map_err(|_| SubsidyError::SumOverflow)?;
let right = (block_subsidy + block_miner_fees).map_err(|_| SubsidyError::SumOverflow)?;
if left > right {
Err(SubsidyError::InvalidMinerFees)?;
}
Ok(())
}
/// Returns `Ok(())` if `header.time` is less than or equal to
/// 2 hours in the future, according to the node's local clock (`now`).
///
/// This is a non-deterministic rule, as clocks vary over time, and
/// between different nodes.
///
/// "In addition, a full validator MUST NOT accept blocks with nTime
/// more than two hours in the future according to its clock. This
/// is not strictly a consensus rule because it is nondeterministic,
/// and clock time varies between nodes. Also note that a block that
/// is rejected by this rule at a given point in time may later be
/// accepted." [§7.5][7.5]
///
/// [7.5]: https://zips.z.cash/protocol/protocol.pdf#blockheader
///
/// If the header time is invalid, returns an error containing `height` and `hash`.
pub fn time_is_valid_at(
header: &Header,
now: DateTime<Utc>,
height: &Height,
hash: &Hash,
) -> Result<(), zebra_chain::block::BlockTimeError> {
header.time_is_valid_at(now, height, hash)
}
/// Check Merkle root validity.
///
/// `transaction_hashes` is a precomputed list of transaction hashes.
///
/// # Consensus rules:
///
/// - A SHA-256d hash in internal byte order. The merkle root is derived from the
/// hashes of all transactions included in this block, ensuring that none of
/// those transactions can be modified without modifying the header. [7.6]
///
/// # Panics
///
/// - If block does not have a coinbase transaction.
///
/// [ZIP-244]: https://zips.z.cash/zip-0244
/// [7.1]: https://zips.z.cash/protocol/nu5.pdf#txnencodingandconsensus
/// [7.6]: https://zips.z.cash/protocol/nu5.pdf#blockheader
pub fn merkle_root_validity(
network: Network,
block: &Block,
transaction_hashes: &[transaction::Hash],
) -> Result<(), BlockError> {
// TODO: deduplicate zebra-chain and zebra-consensus errors (#2908)
block
.check_transaction_network_upgrade_consistency(network)
.map_err(|_| BlockError::WrongTransactionConsensusBranchId)?;
let merkle_root = transaction_hashes.iter().cloned().collect();
if block.header.merkle_root != merkle_root {
return Err(BlockError::BadMerkleRoot {
actual: merkle_root,
expected: block.header.merkle_root,
});
}
// Bitcoin's transaction Merkle trees are malleable, allowing blocks with
// duplicate transactions to have the same Merkle root as blocks without
// duplicate transactions.
//
// Collecting into a HashSet deduplicates, so this checks that there are no
// duplicate transaction hashes, preventing Merkle root malleability.
//
// ## Full Block Validation
//
// Duplicate transactions should cause a block to be
// rejected, as duplicate transactions imply that the block contains a
// double-spend. As a defense-in-depth, however, we also check that there
// are no duplicate transaction hashes.
//
// ## Checkpoint Validation
//
// To prevent malleability (CVE-2012-2459), we also need to check
// whether the transaction hashes are unique.
if transaction_hashes.len() != transaction_hashes.iter().collect::<HashSet<_>>().len() {
return Err(BlockError::DuplicateTransaction);
}
Ok(())
}
| miner_fees_are_valid | identifier_name |
check.rs | //! Consensus check functions
use std::{collections::HashSet, sync::Arc};
use chrono::{DateTime, Utc};
use zebra_chain::{
amount::{Amount, Error as AmountError, NonNegative},
block::{Block, Hash, Header, Height},
parameters::{Network, NetworkUpgrade},
transaction,
work::{difficulty::ExpandedDifficulty, equihash},
};
use crate::{error::*, parameters::SLOW_START_INTERVAL};
use super::subsidy;
/// Checks if there is exactly one coinbase transaction in `Block`,
/// and if that coinbase transaction is the first transaction in the block.
/// Returns the coinbase transaction is successful.
///
/// > A transaction that has a single transparent input with a null prevout field,
/// > is called a coinbase transaction. Every block has a single coinbase
/// > transaction as the first transaction in the block.
///
/// <https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions>
pub fn coinbase_is_first(block: &Block) -> Result<Arc<transaction::Transaction>, BlockError> {
// # Consensus
//
// > A block MUST have at least one transaction
//
// <https://zips.z.cash/protocol/protocol.pdf#blockheader>
let first = block
.transactions
.get(0)
.ok_or(BlockError::NoTransactions)?;
// > The first transaction in a block MUST be a coinbase transaction,
// > and subsequent transactions MUST NOT be coinbase transactions.
//
// <https://zips.z.cash/protocol/protocol.pdf#blockheader>
//
// > A transaction that has a single transparent input with a null prevout
// > field, is called a coinbase transaction.
//
// <https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions>
let mut rest = block.transactions.iter().skip(1);
if !first.is_coinbase() {
Err(TransactionError::CoinbasePosition)?;
}
// > A transparent input in a non-coinbase transaction MUST NOT have a null prevout
//
// <https://zips.z.cash/protocol/protocol.pdf#txnconsensus>
if !rest.all(|tx| tx.is_valid_non_coinbase()) {
Err(TransactionError::CoinbaseAfterFirst)?;
}
Ok(first.clone())
}
/// Returns `Ok(ExpandedDifficulty)` if the`difficulty_threshold` of `header` is at least as difficult as
/// the target difficulty limit for `network` (PoWLimit)
///
/// If the header difficulty threshold is invalid, returns an error containing `height` and `hash`.
pub fn difficulty_threshold_is_valid(
header: &Header,
network: Network,
height: &Height,
hash: &Hash,
) -> Result<ExpandedDifficulty, BlockError> {
let difficulty_threshold = header
.difficulty_threshold
.to_expanded()
.ok_or(BlockError::InvalidDifficulty(*height, *hash))?;
// Note: the comparison in this function is a u256 integer comparison, like
// zcashd and bitcoin. Greater values represent *less* work.
// The PowLimit check is part of `Threshold()` in the spec, but it doesn't
// actually depend on any previous blocks.
if difficulty_threshold > ExpandedDifficulty::target_difficulty_limit(network) {
Err(BlockError::TargetDifficultyLimit(
*height,
*hash,
difficulty_threshold,
network,
ExpandedDifficulty::target_difficulty_limit(network),
))?;
}
Ok(difficulty_threshold)
}
/// Returns `Ok(())` if `hash` passes:
/// - the target difficulty limit for `network` (PoWLimit), and
/// - the difficulty filter,
/// based on the fields in `header`.
///
/// If the block is invalid, returns an error containing `height` and `hash`.
pub fn difficulty_is_valid(
header: &Header,
network: Network,
height: &Height,
hash: &Hash,
) -> Result<(), BlockError> {
let difficulty_threshold = difficulty_threshold_is_valid(header, network, height, hash)?;
// Note: the comparison in this function is a u256 integer comparison, like
// zcashd and bitcoin. Greater values represent *less* work.
// # Consensus
//
// > The block MUST pass the difficulty filter.
//
// https://zips.z.cash/protocol/protocol.pdf#blockheader
//
// The difficulty filter is also context-free.
if hash > &difficulty_threshold {
Err(BlockError::DifficultyFilter(
*height,
*hash,
difficulty_threshold,
network,
))?;
}
Ok(())
}
/// Returns `Ok(())` if the `EquihashSolution` is valid for `header`
pub fn equihash_solution_is_valid(header: &Header) -> Result<(), equihash::Error> { | //
// https://zips.z.cash/protocol/protocol.pdf#blockheader
header.solution.check(header)
}
/// Returns `Ok(())` if the block subsidy in `block` is valid for `network`
///
/// [3.9]: https://zips.z.cash/protocol/protocol.pdf#subsidyconcepts
pub fn subsidy_is_valid(block: &Block, network: Network) -> Result<(), BlockError> {
let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?;
let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?;
// Validate funding streams
let Some(halving_div) = subsidy::general::halving_divisor(height, network) else {
// Far future halving, with no founders reward or funding streams
return Ok(());
};
let canopy_activation_height = NetworkUpgrade::Canopy
.activation_height(network)
.expect("Canopy activation height is known");
if height < SLOW_START_INTERVAL {
unreachable!(
"unsupported block height: callers should handle blocks below {:?}",
SLOW_START_INTERVAL
)
} else if halving_div.count_ones() != 1 {
unreachable!("invalid halving divisor: the halving divisor must be a non-zero power of two")
} else if height < canopy_activation_height {
// Founders rewards are paid up to Canopy activation, on both mainnet and testnet.
// But we checkpoint in Canopy so founders reward does not apply for Zebra.
unreachable!("we cannot verify consensus rules before Canopy activation");
} else if halving_div < 4 {
// Funding streams are paid from Canopy activation to the second halving
// Note: Canopy activation is at the first halving on mainnet, but not on testnet
// ZIP-1014 only applies to mainnet, ZIP-214 contains the specific rules for testnet
// funding stream amount values
let funding_streams = subsidy::funding_streams::funding_stream_values(height, network)
.expect("We always expect a funding stream hashmap response even if empty");
// # Consensus
//
// > [Canopy onward] The coinbase transaction at block height `height`
// > MUST contain at least one output per funding stream `fs` active at `height`,
// > that pays `fs.Value(height)` zatoshi in the prescribed way to the stream's
// > recipient address represented by `fs.AddressList[fs.AddressIndex(height)]
//
// https://zips.z.cash/protocol/protocol.pdf#fundingstreams
for (receiver, expected_amount) in funding_streams {
let address =
subsidy::funding_streams::funding_stream_address(height, network, receiver);
let has_expected_output =
subsidy::funding_streams::filter_outputs_by_address(coinbase, address)
.iter()
.map(zebra_chain::transparent::Output::value)
.any(|value| value == expected_amount);
if !has_expected_output {
Err(SubsidyError::FundingStreamNotFound)?;
}
}
Ok(())
} else {
// Future halving, with no founders reward or funding streams
Ok(())
}
}
/// Returns `Ok(())` if the miner fees consensus rule is valid.
///
/// [7.1.2]: https://zips.z.cash/protocol/protocol.pdf#txnconsensus
pub fn miner_fees_are_valid(
block: &Block,
network: Network,
block_miner_fees: Amount<NonNegative>,
) -> Result<(), BlockError> {
let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?;
let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?;
let transparent_value_balance: Amount = subsidy::general::output_amounts(coinbase)
.iter()
.sum::<Result<Amount<NonNegative>, AmountError>>()
.map_err(|_| SubsidyError::SumOverflow)?
.constrain()
.expect("positive value always fit in `NegativeAllowed`");
let sapling_value_balance = coinbase.sapling_value_balance().sapling_amount();
let orchard_value_balance = coinbase.orchard_value_balance().orchard_amount();
let block_subsidy = subsidy::general::block_subsidy(height, network)
.expect("a valid block subsidy for this height and network");
// # Consensus
//
// > The total value in zatoshi of transparent outputs from a coinbase transaction,
// > minus vbalanceSapling, minus vbalanceOrchard, MUST NOT be greater than the value
// > in zatoshi of block subsidy plus the transaction fees paid by transactions in this block.
//
// https://zips.z.cash/protocol/protocol.pdf#txnconsensus
let left = (transparent_value_balance - sapling_value_balance - orchard_value_balance)
.map_err(|_| SubsidyError::SumOverflow)?;
let right = (block_subsidy + block_miner_fees).map_err(|_| SubsidyError::SumOverflow)?;
if left > right {
Err(SubsidyError::InvalidMinerFees)?;
}
Ok(())
}
/// Returns `Ok(())` if `header.time` is less than or equal to
/// 2 hours in the future, according to the node's local clock (`now`).
///
/// This is a non-deterministic rule, as clocks vary over time, and
/// between different nodes.
///
/// "In addition, a full validator MUST NOT accept blocks with nTime
/// more than two hours in the future according to its clock. This
/// is not strictly a consensus rule because it is nondeterministic,
/// and clock time varies between nodes. Also note that a block that
/// is rejected by this rule at a given point in time may later be
/// accepted." [§7.5][7.5]
///
/// [7.5]: https://zips.z.cash/protocol/protocol.pdf#blockheader
///
/// If the header time is invalid, returns an error containing `height` and `hash`.
pub fn time_is_valid_at(
header: &Header,
now: DateTime<Utc>,
height: &Height,
hash: &Hash,
) -> Result<(), zebra_chain::block::BlockTimeError> {
header.time_is_valid_at(now, height, hash)
}
/// Check Merkle root validity.
///
/// `transaction_hashes` is a precomputed list of transaction hashes.
///
/// # Consensus rules:
///
/// - A SHA-256d hash in internal byte order. The merkle root is derived from the
/// hashes of all transactions included in this block, ensuring that none of
/// those transactions can be modified without modifying the header. [7.6]
///
/// # Panics
///
/// - If block does not have a coinbase transaction.
///
/// [ZIP-244]: https://zips.z.cash/zip-0244
/// [7.1]: https://zips.z.cash/protocol/nu5.pdf#txnencodingandconsensus
/// [7.6]: https://zips.z.cash/protocol/nu5.pdf#blockheader
pub fn merkle_root_validity(
network: Network,
block: &Block,
transaction_hashes: &[transaction::Hash],
) -> Result<(), BlockError> {
// TODO: deduplicate zebra-chain and zebra-consensus errors (#2908)
block
.check_transaction_network_upgrade_consistency(network)
.map_err(|_| BlockError::WrongTransactionConsensusBranchId)?;
let merkle_root = transaction_hashes.iter().cloned().collect();
if block.header.merkle_root != merkle_root {
return Err(BlockError::BadMerkleRoot {
actual: merkle_root,
expected: block.header.merkle_root,
});
}
// Bitcoin's transaction Merkle trees are malleable, allowing blocks with
// duplicate transactions to have the same Merkle root as blocks without
// duplicate transactions.
//
// Collecting into a HashSet deduplicates, so this checks that there are no
// duplicate transaction hashes, preventing Merkle root malleability.
//
// ## Full Block Validation
//
// Duplicate transactions should cause a block to be
// rejected, as duplicate transactions imply that the block contains a
// double-spend. As a defense-in-depth, however, we also check that there
// are no duplicate transaction hashes.
//
// ## Checkpoint Validation
//
// To prevent malleability (CVE-2012-2459), we also need to check
// whether the transaction hashes are unique.
if transaction_hashes.len() != transaction_hashes.iter().collect::<HashSet<_>>().len() {
return Err(BlockError::DuplicateTransaction);
}
Ok(())
} | // # Consensus
//
// > `solution` MUST represent a valid Equihash solution. | random_line_split |
check.rs | //! Consensus check functions
use std::{collections::HashSet, sync::Arc};
use chrono::{DateTime, Utc};
use zebra_chain::{
amount::{Amount, Error as AmountError, NonNegative},
block::{Block, Hash, Header, Height},
parameters::{Network, NetworkUpgrade},
transaction,
work::{difficulty::ExpandedDifficulty, equihash},
};
use crate::{error::*, parameters::SLOW_START_INTERVAL};
use super::subsidy;
/// Checks if there is exactly one coinbase transaction in `Block`,
/// and if that coinbase transaction is the first transaction in the block.
/// Returns the coinbase transaction is successful.
///
/// > A transaction that has a single transparent input with a null prevout field,
/// > is called a coinbase transaction. Every block has a single coinbase
/// > transaction as the first transaction in the block.
///
/// <https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions>
pub fn coinbase_is_first(block: &Block) -> Result<Arc<transaction::Transaction>, BlockError> {
// # Consensus
//
// > A block MUST have at least one transaction
//
// <https://zips.z.cash/protocol/protocol.pdf#blockheader>
let first = block
.transactions
.get(0)
.ok_or(BlockError::NoTransactions)?;
// > The first transaction in a block MUST be a coinbase transaction,
// > and subsequent transactions MUST NOT be coinbase transactions.
//
// <https://zips.z.cash/protocol/protocol.pdf#blockheader>
//
// > A transaction that has a single transparent input with a null prevout
// > field, is called a coinbase transaction.
//
// <https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions>
let mut rest = block.transactions.iter().skip(1);
if !first.is_coinbase() {
Err(TransactionError::CoinbasePosition)?;
}
// > A transparent input in a non-coinbase transaction MUST NOT have a null prevout
//
// <https://zips.z.cash/protocol/protocol.pdf#txnconsensus>
if !rest.all(|tx| tx.is_valid_non_coinbase()) {
Err(TransactionError::CoinbaseAfterFirst)?;
}
Ok(first.clone())
}
/// Returns `Ok(ExpandedDifficulty)` if the`difficulty_threshold` of `header` is at least as difficult as
/// the target difficulty limit for `network` (PoWLimit)
///
/// If the header difficulty threshold is invalid, returns an error containing `height` and `hash`.
pub fn difficulty_threshold_is_valid(
header: &Header,
network: Network,
height: &Height,
hash: &Hash,
) -> Result<ExpandedDifficulty, BlockError> {
let difficulty_threshold = header
.difficulty_threshold
.to_expanded()
.ok_or(BlockError::InvalidDifficulty(*height, *hash))?;
// Note: the comparison in this function is a u256 integer comparison, like
// zcashd and bitcoin. Greater values represent *less* work.
// The PowLimit check is part of `Threshold()` in the spec, but it doesn't
// actually depend on any previous blocks.
if difficulty_threshold > ExpandedDifficulty::target_difficulty_limit(network) {
Err(BlockError::TargetDifficultyLimit(
*height,
*hash,
difficulty_threshold,
network,
ExpandedDifficulty::target_difficulty_limit(network),
))?;
}
Ok(difficulty_threshold)
}
/// Returns `Ok(())` if `hash` passes:
/// - the target difficulty limit for `network` (PoWLimit), and
/// - the difficulty filter,
/// based on the fields in `header`.
///
/// If the block is invalid, returns an error containing `height` and `hash`.
pub fn difficulty_is_valid(
header: &Header,
network: Network,
height: &Height,
hash: &Hash,
) -> Result<(), BlockError> {
let difficulty_threshold = difficulty_threshold_is_valid(header, network, height, hash)?;
// Note: the comparison in this function is a u256 integer comparison, like
// zcashd and bitcoin. Greater values represent *less* work.
// # Consensus
//
// > The block MUST pass the difficulty filter.
//
// https://zips.z.cash/protocol/protocol.pdf#blockheader
//
// The difficulty filter is also context-free.
if hash > &difficulty_threshold {
Err(BlockError::DifficultyFilter(
*height,
*hash,
difficulty_threshold,
network,
))?;
}
Ok(())
}
/// Returns `Ok(())` if the `EquihashSolution` is valid for `header`
pub fn equihash_solution_is_valid(header: &Header) -> Result<(), equihash::Error> {
// # Consensus
//
// > `solution` MUST represent a valid Equihash solution.
//
// https://zips.z.cash/protocol/protocol.pdf#blockheader
header.solution.check(header)
}
/// Returns `Ok(())` if the block subsidy in `block` is valid for `network`
///
/// [3.9]: https://zips.z.cash/protocol/protocol.pdf#subsidyconcepts
pub fn subsidy_is_valid(block: &Block, network: Network) -> Result<(), BlockError> {
let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?;
let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?;
// Validate funding streams
let Some(halving_div) = subsidy::general::halving_divisor(height, network) else {
// Far future halving, with no founders reward or funding streams
return Ok(());
};
let canopy_activation_height = NetworkUpgrade::Canopy
.activation_height(network)
.expect("Canopy activation height is known");
if height < SLOW_START_INTERVAL {
unreachable!(
"unsupported block height: callers should handle blocks below {:?}",
SLOW_START_INTERVAL
)
} else if halving_div.count_ones() != 1 {
unreachable!("invalid halving divisor: the halving divisor must be a non-zero power of two")
} else if height < canopy_activation_height {
// Founders rewards are paid up to Canopy activation, on both mainnet and testnet.
// But we checkpoint in Canopy so founders reward does not apply for Zebra.
unreachable!("we cannot verify consensus rules before Canopy activation");
} else if halving_div < 4 {
// Funding streams are paid from Canopy activation to the second halving
// Note: Canopy activation is at the first halving on mainnet, but not on testnet
// ZIP-1014 only applies to mainnet, ZIP-214 contains the specific rules for testnet
// funding stream amount values
let funding_streams = subsidy::funding_streams::funding_stream_values(height, network)
.expect("We always expect a funding stream hashmap response even if empty");
// # Consensus
//
// > [Canopy onward] The coinbase transaction at block height `height`
// > MUST contain at least one output per funding stream `fs` active at `height`,
// > that pays `fs.Value(height)` zatoshi in the prescribed way to the stream's
// > recipient address represented by `fs.AddressList[fs.AddressIndex(height)]
//
// https://zips.z.cash/protocol/protocol.pdf#fundingstreams
for (receiver, expected_amount) in funding_streams {
let address =
subsidy::funding_streams::funding_stream_address(height, network, receiver);
let has_expected_output =
subsidy::funding_streams::filter_outputs_by_address(coinbase, address)
.iter()
.map(zebra_chain::transparent::Output::value)
.any(|value| value == expected_amount);
if !has_expected_output {
Err(SubsidyError::FundingStreamNotFound)?;
}
}
Ok(())
} else |
}
/// Returns `Ok(())` if the miner fees consensus rule is valid.
///
/// [7.1.2]: https://zips.z.cash/protocol/protocol.pdf#txnconsensus
pub fn miner_fees_are_valid(
block: &Block,
network: Network,
block_miner_fees: Amount<NonNegative>,
) -> Result<(), BlockError> {
let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?;
let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?;
let transparent_value_balance: Amount = subsidy::general::output_amounts(coinbase)
.iter()
.sum::<Result<Amount<NonNegative>, AmountError>>()
.map_err(|_| SubsidyError::SumOverflow)?
.constrain()
.expect("positive value always fit in `NegativeAllowed`");
let sapling_value_balance = coinbase.sapling_value_balance().sapling_amount();
let orchard_value_balance = coinbase.orchard_value_balance().orchard_amount();
let block_subsidy = subsidy::general::block_subsidy(height, network)
.expect("a valid block subsidy for this height and network");
// # Consensus
//
// > The total value in zatoshi of transparent outputs from a coinbase transaction,
// > minus vbalanceSapling, minus vbalanceOrchard, MUST NOT be greater than the value
// > in zatoshi of block subsidy plus the transaction fees paid by transactions in this block.
//
// https://zips.z.cash/protocol/protocol.pdf#txnconsensus
let left = (transparent_value_balance - sapling_value_balance - orchard_value_balance)
.map_err(|_| SubsidyError::SumOverflow)?;
let right = (block_subsidy + block_miner_fees).map_err(|_| SubsidyError::SumOverflow)?;
if left > right {
Err(SubsidyError::InvalidMinerFees)?;
}
Ok(())
}
/// Returns `Ok(())` if `header.time` is less than or equal to
/// 2 hours in the future, according to the node's local clock (`now`).
///
/// This is a non-deterministic rule, as clocks vary over time, and
/// between different nodes.
///
/// "In addition, a full validator MUST NOT accept blocks with nTime
/// more than two hours in the future according to its clock. This
/// is not strictly a consensus rule because it is nondeterministic,
/// and clock time varies between nodes. Also note that a block that
/// is rejected by this rule at a given point in time may later be
/// accepted." [§7.5][7.5]
///
/// [7.5]: https://zips.z.cash/protocol/protocol.pdf#blockheader
///
/// If the header time is invalid, returns an error containing `height` and `hash`.
pub fn time_is_valid_at(
header: &Header,
now: DateTime<Utc>,
height: &Height,
hash: &Hash,
) -> Result<(), zebra_chain::block::BlockTimeError> {
header.time_is_valid_at(now, height, hash)
}
/// Check Merkle root validity.
///
/// `transaction_hashes` is a precomputed list of transaction hashes.
///
/// # Consensus rules:
///
/// - A SHA-256d hash in internal byte order. The merkle root is derived from the
/// hashes of all transactions included in this block, ensuring that none of
/// those transactions can be modified without modifying the header. [7.6]
///
/// # Panics
///
/// - If block does not have a coinbase transaction.
///
/// [ZIP-244]: https://zips.z.cash/zip-0244
/// [7.1]: https://zips.z.cash/protocol/nu5.pdf#txnencodingandconsensus
/// [7.6]: https://zips.z.cash/protocol/nu5.pdf#blockheader
pub fn merkle_root_validity(
network: Network,
block: &Block,
transaction_hashes: &[transaction::Hash],
) -> Result<(), BlockError> {
// TODO: deduplicate zebra-chain and zebra-consensus errors (#2908)
block
.check_transaction_network_upgrade_consistency(network)
.map_err(|_| BlockError::WrongTransactionConsensusBranchId)?;
let merkle_root = transaction_hashes.iter().cloned().collect();
if block.header.merkle_root != merkle_root {
return Err(BlockError::BadMerkleRoot {
actual: merkle_root,
expected: block.header.merkle_root,
});
}
// Bitcoin's transaction Merkle trees are malleable, allowing blocks with
// duplicate transactions to have the same Merkle root as blocks without
// duplicate transactions.
//
// Collecting into a HashSet deduplicates, so this checks that there are no
// duplicate transaction hashes, preventing Merkle root malleability.
//
// ## Full Block Validation
//
// Duplicate transactions should cause a block to be
// rejected, as duplicate transactions imply that the block contains a
// double-spend. As a defense-in-depth, however, we also check that there
// are no duplicate transaction hashes.
//
// ## Checkpoint Validation
//
// To prevent malleability (CVE-2012-2459), we also need to check
// whether the transaction hashes are unique.
if transaction_hashes.len() != transaction_hashes.iter().collect::<HashSet<_>>().len() {
return Err(BlockError::DuplicateTransaction);
}
Ok(())
}
| {
// Future halving, with no founders reward or funding streams
Ok(())
} | conditional_block |
alias.rs | import syntax::{ast, ast_util};
import ast::{ident, fn_ident, node_id};
import syntax::codemap::span;
import syntax::visit;
import visit::vt;
import core::{vec, option};
import std::list;
import option::{some, none, is_none};
import list::list;
// This is not an alias-analyser (though it would merit from becoming one, or
// getting input from one, to be more precise). It is a pass that checks
// whether aliases are used in a safe way.
tag copied { not_allowed; copied; not_copied; }
tag invalid_reason { overwritten; val_taken; }
type invalid = {reason: invalid_reason,
node_id: node_id,
sp: span, path: @ast::path};
tag unsafe_ty { contains(ty::t); mut_contains(ty::t); }
type binding = @{node_id: node_id,
span: span,
root_var: option::t<node_id>,
local_id: uint,
unsafe_tys: [unsafe_ty],
mutable copied: copied};
// FIXME it may be worthwhile to use a linked list of bindings instead
type scope = {bs: [binding],
invalid: @mutable list<@invalid>};
fn mk_binding(cx: ctx, id: node_id, span: span, root_var: option::t<node_id>,
unsafe_tys: [unsafe_ty]) -> binding {
alt root_var {
some(r_id) { cx.ref_map.insert(id, r_id); }
_ {}
}
ret @{node_id: id, span: span, root_var: root_var,
local_id: local_id_of_node(cx, id),
unsafe_tys: unsafe_tys,
mutable copied: not_copied};
}
tag local_info { local(uint); }
type copy_map = std::map::hashmap<node_id, ()>;
type ref_map = std::map::hashmap<node_id, node_id>;
type ctx = {tcx: ty::ctxt,
copy_map: copy_map,
ref_map: ref_map,
mutable silent: bool};
fn check_crate(tcx: ty::ctxt, crate: @ast::crate) -> (copy_map, ref_map) {
// Stores information about object fields and function
// arguments that's otherwise not easily available.
let cx = @{tcx: tcx,
copy_map: std::map::new_int_hash(),
ref_map: std::map::new_int_hash(),
mutable silent: false};
let v = @{visit_fn: bind visit_fn(cx, _, _, _, _, _, _, _),
visit_expr: bind visit_expr(cx, _, _, _),
visit_block: bind visit_block(cx, _, _, _)
with *visit::default_visitor::<scope>()};
let sc = {bs: [], invalid: @mutable list::nil};
visit::visit_crate(*crate, sc, visit::mk_vt(v));
tcx.sess.abort_if_errors();
ret (cx.copy_map, cx.ref_map);
}
fn visit_fn(cx: @ctx, _fk: visit::fn_kind, decl: ast::fn_decl,
body: ast::blk, sp: span,
id: ast::node_id, sc: scope, v: vt<scope>) {
visit::visit_fn_decl(decl, sc, v);
let fty = ty::node_id_to_type(cx.tcx, id);
let args = ty::ty_fn_args(cx.tcx, fty);
for arg in args {
if arg.mode == ast::by_val &&
ty::type_has_dynamic_size(cx.tcx, arg.ty) {
err(*cx, sp, "can not pass a dynamically-sized type by value");
}
}
// Blocks need to obey any restrictions from the enclosing scope, and may
// be called multiple times.
let proto = ty::ty_fn_proto(cx.tcx, fty);
if proto == ast::proto_block {
check_loop(*cx, sc) {|| v.visit_block(body, sc, v);}
} else {
let sc = {bs: [], invalid: @mutable list::nil};
v.visit_block(body, sc, v);
}
}
fn visit_expr(cx: @ctx, ex: @ast::expr, sc: scope, v: vt<scope>) {
let handled = true;
alt ex.node {
ast::expr_call(f, args, _) {
check_call(*cx, sc, f, args);
handled = false;
}
ast::expr_alt(input, arms) { check_alt(*cx, input, arms, sc, v); }
ast::expr_for(decl, seq, blk) {
v.visit_expr(seq, sc, v);
check_loop(*cx, sc) {|| check_for(*cx, decl, seq, blk, sc, v); }
}
ast::expr_path(pt) {
check_var(*cx, ex, pt, ex.id, false, sc);
handled = false;
}
ast::expr_swap(lhs, rhs) {
check_lval(cx, lhs, sc, v);
check_lval(cx, rhs, sc, v);
handled = false;
}
ast::expr_move(dest, src) {
check_assign(cx, dest, src, sc, v);
check_lval(cx, src, sc, v);
}
ast::expr_assign(dest, src) | ast::expr_assign_op(_, dest, src) {
check_assign(cx, dest, src, sc, v);
}
ast::expr_if(c, then, els) { check_if(c, then, els, sc, v); }
ast::expr_while(_, _) | ast::expr_do_while(_, _) {
check_loop(*cx, sc) {|| visit::visit_expr(ex, sc, v); }
}
_ { handled = false; }
}
if !handled { visit::visit_expr(ex, sc, v); }
}
fn visit_block(cx: @ctx, b: ast::blk, sc: scope, v: vt<scope>) {
let bs = sc.bs, sc = sc;
for stmt in b.node.stmts {
alt stmt.node {
ast::stmt_decl(@{node: ast::decl_item(it), _}, _) {
v.visit_item(it, sc, v);
}
ast::stmt_decl(@{node: ast::decl_local(locs), _}, _) {
for (st, loc) in locs {
if st == ast::let_ref {
add_bindings_for_let(*cx, bs, loc);
sc = {bs: bs with sc};
}
alt loc.node.init {
some(init) {
if init.op == ast::init_move {
check_lval(cx, init.expr, sc, v);
}
}
none. { }
}
}
}
ast::stmt_expr(ex, _) | ast::stmt_semi(ex, _) {
v.visit_expr(ex, sc, v);
}
}
}
visit::visit_expr_opt(b.node.expr, sc, v);
}
fn add_bindings_for_let(cx: ctx, &bs: [binding], loc: @ast::local) {
alt loc.node.init {
some(init) {
if init.op == ast::init_move {
err(cx, loc.span, "can not move into a by-reference binding");
}
let root = expr_root(cx, init.expr, false);
let root_var = path_def_id(cx, root.ex);
if is_none(root_var) {
err(cx, loc.span, "a reference binding can't be \
rooted in a temporary");
}
for proot in pattern_roots(cx.tcx, root.mut, loc.node.pat) {
let bnd = mk_binding(cx, proot.id, proot.span, root_var,
unsafe_set(proot.mut));
// Don't implicitly copy explicit references
bnd.copied = not_allowed;
bs += [bnd];
}
}
_ {
err(cx, loc.span, "by-reference bindings must be initialized");
}
}
}
fn cant_copy(cx: ctx, b: binding) -> bool {
alt b.copied {
not_allowed. { ret true; }
copied. { ret false; }
not_copied. {}
}
let ty = ty::node_id_to_type(cx.tcx, b.node_id);
if ty::type_allows_implicit_copy(cx.tcx, ty) {
b.copied = copied;
cx.copy_map.insert(b.node_id, ());
if copy_is_expensive(cx.tcx, ty) {
cx.tcx.sess.span_warn(b.span,
"inserting an implicit copy for type " +
util::ppaux::ty_to_str(cx.tcx, ty));
}
ret false;
} else { ret true; }
}
fn check_call(cx: ctx, sc: scope, f: @ast::expr, args: [@ast::expr])
-> [binding] {
let fty = ty::expr_ty(cx.tcx, f);
let arg_ts = ty::ty_fn_args(cx.tcx, fty);
let mut_roots: [{arg: uint, node: node_id}] = [];
let bindings = [];
let i = 0u;
for arg_t: ty::arg in arg_ts {
let arg = args[i];
let root = expr_root(cx, arg, false);
if arg_t.mode == ast::by_mut_ref {
alt path_def(cx, arg) {
some(def) {
let dnum = ast_util::def_id_of_def(def).node;
mut_roots += [{arg: i, node: dnum}];
}
_ { }
}
}
let root_var = path_def_id(cx, root.ex);
bindings += [@{node_id: arg.id,
span: arg.span,
root_var: root_var,
local_id: 0u,
unsafe_tys: unsafe_set(root.mut),
mutable copied: alt arg_t.mode {
ast::by_move. | ast::by_copy. { copied }
ast::by_mut_ref. { not_allowed }
_ { not_copied }
}}];
i += 1u;
}
let f_may_close =
alt f.node {
ast::expr_path(_) { def_is_local(cx.tcx.def_map.get(f.id)) }
_ { true }
};
if f_may_close {
let i = 0u;
for b in bindings {
let unsfe = vec::len(b.unsafe_tys) > 0u;
alt b.root_var {
some(rid) {
for o in sc.bs {
if o.node_id == rid && vec::len(o.unsafe_tys) > 0u {
unsfe = true; break;
}
}
}
_ {}
}
if unsfe && cant_copy(cx, b) {
err(cx, f.span, #fmt["function may alias with argument \
%u, which is not immutably rooted", i]);
}
i += 1u;
}
}
let j = 0u;
for b in bindings {
for unsafe_ty in b.unsafe_tys {
let i = 0u;
for arg_t: ty::arg in arg_ts {
let mut_alias = arg_t.mode == ast::by_mut_ref;
if i != j &&
ty_can_unsafely_include(cx, unsafe_ty, arg_t.ty,
mut_alias) &&
cant_copy(cx, b) {
err(cx, args[i].span,
#fmt["argument %u may alias with argument %u, \
which is not immutably rooted", i, j]);
}
i += 1u;
}
}
j += 1u;
}
// Ensure we're not passing a root by mutable alias.
for {node: node, arg: arg} in mut_roots {
let i = 0u;
for b in bindings {
if i != arg {
alt b.root_var {
some(root) {
if node == root && cant_copy(cx, b) {
err(cx, args[arg].span,
"passing a mutable reference to a \
variable that roots another reference");
break;
}
}
none. { }
}
}
i += 1u;
}
}
ret bindings;
}
fn check_alt(cx: ctx, input: @ast::expr, arms: [ast::arm], sc: scope,
v: vt<scope>) {
v.visit_expr(input, sc, v);
let orig_invalid = *sc.invalid;
let all_invalid = orig_invalid;
let root = expr_root(cx, input, true);
for a: ast::arm in arms {
let new_bs = sc.bs;
let root_var = path_def_id(cx, root.ex);
let pat_id_map = ast_util::pat_id_map(a.pats[0]);
type info = {
id: node_id,
mutable unsafe_tys: [unsafe_ty],
span: span};
let binding_info: [info] = [];
for pat in a.pats {
for proot in pattern_roots(cx.tcx, root.mut, pat) {
let canon_id = pat_id_map.get(proot.name);
alt vec::find(binding_info, {|x| x.id == canon_id}) {
some(s) { s.unsafe_tys += unsafe_set(proot.mut); }
none. {
binding_info += [
{id: canon_id,
mutable unsafe_tys: unsafe_set(proot.mut),
span: proot.span}];
}
}
}
}
for info in binding_info {
new_bs += [mk_binding(cx, info.id, info.span, root_var,
copy info.unsafe_tys)];
}
*sc.invalid = orig_invalid;
visit::visit_arm(a, {bs: new_bs with sc}, v);
all_invalid = append_invalid(all_invalid, *sc.invalid, orig_invalid);
}
*sc.invalid = all_invalid;
}
fn check_for(cx: ctx, local: @ast::local, seq: @ast::expr, blk: ast::blk,
sc: scope, v: vt<scope>) {
let root = expr_root(cx, seq, false);
// If this is a mutable vector, don't allow it to be touched.
let seq_t = ty::expr_ty(cx.tcx, seq);
let cur_mut = root.mut;
alt ty::struct(cx.tcx, seq_t) {
ty::ty_vec(mt) {
if mt.mut != ast::imm {
cur_mut = some(contains(seq_t));
}
}
_ {}
}
let root_var = path_def_id(cx, root.ex);
let new_bs = sc.bs;
for proot in pattern_roots(cx.tcx, cur_mut, local.node.pat) {
new_bs += [mk_binding(cx, proot.id, proot.span, root_var,
unsafe_set(proot.mut))];
}
visit::visit_block(blk, {bs: new_bs with sc}, v);
}
fn check_var(cx: ctx, ex: @ast::expr, p: @ast::path, id: ast::node_id,
assign: bool, sc: scope) {
let def = cx.tcx.def_map.get(id);
if !def_is_local(def) { ret; }
let my_defnum = ast_util::def_id_of_def(def).node;
let my_local_id = local_id_of_node(cx, my_defnum);
let var_t = ty::expr_ty(cx.tcx, ex);
for b in sc.bs {
// excludes variables introduced since the alias was made
if my_local_id < b.local_id {
for unsafe_ty in b.unsafe_tys {
if ty_can_unsafely_include(cx, unsafe_ty, var_t, assign) {
let inv = @{reason: val_taken, node_id: b.node_id,
sp: ex.span, path: p};
*sc.invalid = list::cons(inv, @*sc.invalid);
}
}
} else if b.node_id == my_defnum {
test_scope(cx, sc, b, p);
}
}
}
fn check_lval(cx: @ctx, dest: @ast::expr, sc: scope, v: vt<scope>) {
alt dest.node {
ast::expr_path(p) {
let def = cx.tcx.def_map.get(dest.id);
let dnum = ast_util::def_id_of_def(def).node;
for b in sc.bs {
if b.root_var == some(dnum) {
let inv = @{reason: overwritten, node_id: b.node_id,
sp: dest.span, path: p};
*sc.invalid = list::cons(inv, @*sc.invalid);
}
}
}
_ { visit_expr(cx, dest, sc, v); }
}
}
fn check_assign(cx: @ctx, dest: @ast::expr, src: @ast::expr, sc: scope,
v: vt<scope>) {
visit_expr(cx, src, sc, v);
check_lval(cx, dest, sc, v);
}
fn check_if(c: @ast::expr, then: ast::blk, els: option::t<@ast::expr>,
sc: scope, v: vt<scope>) {
v.visit_expr(c, sc, v);
let orig_invalid = *sc.invalid;
v.visit_block(then, sc, v);
let then_invalid = *sc.invalid;
*sc.invalid = orig_invalid;
visit::visit_expr_opt(els, sc, v);
*sc.invalid = append_invalid(*sc.invalid, then_invalid, orig_invalid);
}
fn check_loop(cx: ctx, sc: scope, checker: block()) {
let orig_invalid = filter_invalid(*sc.invalid, sc.bs);
checker();
let new_invalid = filter_invalid(*sc.invalid, sc.bs);
// Have to check contents of loop again if it invalidated an alias
if list::len(orig_invalid) < list::len(new_invalid) {
let old_silent = cx.silent;
cx.silent = true;
checker();
cx.silent = old_silent;
}
*sc.invalid = new_invalid;
}
fn test_scope(cx: ctx, sc: scope, b: binding, p: @ast::path) {
let prob = find_invalid(b.node_id, *sc.invalid);
alt b.root_var {
some(dn) {
for other in sc.bs {
if !is_none(prob) { break; }
if other.node_id == dn {
prob = find_invalid(other.node_id, *sc.invalid);
}
}
}
_ {}
}
if !is_none(prob) && cant_copy(cx, b) {
let i = option::get(prob);
let msg = alt i.reason {
overwritten. { "overwriting " + ast_util::path_name(i.path) }
val_taken. { "taking the value of " + ast_util::path_name(i.path) }
};
err(cx, i.sp, msg + " will invalidate reference " +
ast_util::path_name(p) + ", which is still used");
}
}
fn path_def(cx: ctx, ex: @ast::expr) -> option::t<ast::def> {
ret alt ex.node {
ast::expr_path(_) { some(cx.tcx.def_map.get(ex.id)) }
_ { none }
}
}
fn path_def_id(cx: ctx, ex: @ast::expr) -> option::t<ast::node_id> {
alt ex.node {
ast::expr_path(_) {
ret some(ast_util::def_id_of_def(cx.tcx.def_map.get(ex.id)).node);
}
_ { ret none; }
}
}
fn ty_can_unsafely_include(cx: ctx, needle: unsafe_ty, haystack: ty::t,
mut: bool) -> bool {
fn get_mut(cur: bool, mt: ty::mt) -> bool {
ret cur || mt.mut != ast::imm;
}
fn helper(tcx: ty::ctxt, needle: unsafe_ty, haystack: ty::t, mut: bool)
-> bool {
if alt needle {
contains(ty) { ty == haystack }
mut_contains(ty) { mut && ty == haystack }
} { ret true; }
alt ty::struct(tcx, haystack) {
ty::ty_tag(_, ts) {
for t: ty::t in ts {
if helper(tcx, needle, t, mut) { ret true; }
}
ret false;
}
ty::ty_box(mt) | ty::ty_ptr(mt) | ty::ty_uniq(mt) {
ret helper(tcx, needle, mt.ty, get_mut(mut, mt));
}
ty::ty_rec(fields) {
for f: ty::field in fields {
if helper(tcx, needle, f.mt.ty, get_mut(mut, f.mt)) {
ret true;
}
}
ret false;
}
ty::ty_tup(ts) {
for t in ts { if helper(tcx, needle, t, mut) { ret true; } }
ret false;
}
ty::ty_fn({proto: ast::proto_bare., _}) { ret false; }
// These may contain anything.
ty::ty_fn(_) | ty::ty_obj(_) { ret true; }
// A type param may include everything, but can only be
// treated as opaque downstream, and is thus safe unless we
// saw mutable fields, in which case the whole thing can be
// overwritten.
ty::ty_param(_, _) { ret mut; }
_ { ret false; }
}
}
ret helper(cx.tcx, needle, haystack, mut);
}
fn def_is_local(d: ast::def) -> bool {
alt d {
ast::def_local(_, _) | ast::def_arg(_, _) | ast::def_binding(_) |
ast::def_upvar(_, _, _) | ast::def_self(_) |
ast::def_obj_field(_, _) { true }
_ { false }
}
}
fn local_id_of_node(cx: ctx, id: node_id) -> uint {
alt cx.tcx.items.find(id) {
some(ast_map::node_arg(_, id)) | some(ast_map::node_local(id)) { id }
_ { 0u }
}
}
// Heuristic, somewhat random way to decide whether to warn when inserting an
// implicit copy.
fn copy_is_expensive(tcx: ty::ctxt, ty: ty::t) -> bool {
fn score_ty(tcx: ty::ctxt, ty: ty::t) -> uint {
ret alt ty::struct(tcx, ty) {
ty::ty_nil. | ty::ty_bot. | ty::ty_bool. | ty::ty_int(_) |
ty::ty_uint(_) | ty::ty_float(_) | ty::ty_type. | ty::ty_native(_) |
ty::ty_ptr(_) { 1u }
ty::ty_box(_) { 3u }
ty::ty_constr(t, _) | ty::ty_res(_, t, _) { score_ty(tcx, t) }
ty::ty_fn(_) | ty::ty_native_fn(_, _) |
ty::ty_obj(_) { 4u }
ty::ty_str. | ty::ty_vec(_) | ty::ty_param(_, _) { 50u }
ty::ty_uniq(mt) { 1u + score_ty(tcx, mt.ty) }
ty::ty_tag(_, ts) | ty::ty_tup(ts) {
let sum = 0u;
for t in ts { sum += score_ty(tcx, t); }
sum
}
ty::ty_rec(fs) {
let sum = 0u;
for f in fs { sum += score_ty(tcx, f.mt.ty); }
sum
}
};
}
ret score_ty(tcx, ty) > 8u;
}
type pattern_root = {id: node_id,
name: ident,
mut: option::t<unsafe_ty>,
span: span};
fn pattern_roots(tcx: ty::ctxt, mut: option::t<unsafe_ty>, pat: @ast::pat)
-> [pattern_root] {
fn walk(tcx: ty::ctxt, mut: option::t<unsafe_ty>, pat: @ast::pat,
&set: [pattern_root]) {
alt pat.node {
ast::pat_wild. | ast::pat_lit(_) | ast::pat_range(_, _) {}
ast::pat_bind(nm, sub) {
set += [{id: pat.id, name: nm, mut: mut, span: pat.span}];
alt sub { some(p) { walk(tcx, mut, p, set); } _ {} }
}
ast::pat_tag(_, ps) | ast::pat_tup(ps) {
for p in ps { walk(tcx, mut, p, set); }
}
ast::pat_rec(fs, _) { | }
}
ast::pat_box(p) {
let ty = ty::node_id_to_type(tcx, pat.id);
let m = alt ty::struct(tcx, ty) {
ty::ty_box(mt) { mt.mut != ast::imm }
};
walk(tcx, m ? some(contains(ty)) : mut, p, set);
}
ast::pat_uniq(p) {
let ty = ty::node_id_to_type(tcx, pat.id);
let m = alt ty::struct(tcx, ty) {
ty::ty_uniq(mt) { mt.mut != ast::imm }
};
walk(tcx, m ? some(contains(ty)) : mut, p, set);
}
}
}
let set = [];
walk(tcx, mut, pat, set);
ret set;
}
// Wraps the expr_root in mut.rs to also handle roots that exist through
// return-by-reference
fn expr_root(cx: ctx, ex: @ast::expr, autoderef: bool)
-> {ex: @ast::expr, mut: option::t<unsafe_ty>} {
let base_root = mut::expr_root(cx.tcx, ex, autoderef);
let unsafe_ty = none;
for d in *base_root.ds {
if d.mut { unsafe_ty = some(contains(d.outer_t)); break; }
}
alt base_root.ex.node {
ast::expr_path(_) {
alt cx.tcx.def_map.get(base_root.ex.id) {
ast::def_obj_field(_, ast::mut.) {
unsafe_ty = some(mut_contains(ty::expr_ty(cx.tcx, base_root.ex)));
}
_ {}
}
}
_ {}
}
ret {ex: base_root.ex, mut: unsafe_ty};
}
fn unsafe_set(from: option::t<unsafe_ty>) -> [unsafe_ty] {
alt from { some(t) { [t] } _ { [] } }
}
fn find_invalid(id: node_id, lst: list<@invalid>)
-> option::t<@invalid> {
let cur = lst;
while true {
alt cur {
list::nil. { break; }
list::cons(head, tail) {
if head.node_id == id { ret some(head); }
cur = *tail;
}
}
}
ret none;
}
fn append_invalid(dest: list<@invalid>, src: list<@invalid>,
stop: list<@invalid>) -> list<@invalid> {
let cur = src, dest = dest;
while cur != stop {
alt cur {
list::cons(head, tail) {
if is_none(find_invalid(head.node_id, dest)) {
dest = list::cons(head, @dest);
}
cur = *tail;
}
}
}
ret dest;
}
fn filter_invalid(src: list<@invalid>, bs: [binding]) -> list<@invalid> {
let out = list::nil, cur = src;
while cur != list::nil {
alt cur {
list::cons(head, tail) {
let p = vec::position_pred(bs, {|b| b.node_id == head.node_id});
if !is_none(p) { out = list::cons(head, @out); }
cur = *tail;
}
}
}
ret out;
}
fn err(cx: ctx, sp: span, err: str) {
if !cx.silent || !cx.tcx.sess.has_errors() {
cx.tcx.sess.span_err(sp, err);
}
}
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End: | let ty = ty::node_id_to_type(tcx, pat.id);
for f in fs {
let m = ty::get_field(tcx, ty, f.ident).mt.mut != ast::imm;
walk(tcx, m ? some(contains(ty)) : mut, f.pat, set); | random_line_split |
alias.rs |
import syntax::{ast, ast_util};
import ast::{ident, fn_ident, node_id};
import syntax::codemap::span;
import syntax::visit;
import visit::vt;
import core::{vec, option};
import std::list;
import option::{some, none, is_none};
import list::list;
// This is not an alias-analyser (though it would merit from becoming one, or
// getting input from one, to be more precise). It is a pass that checks
// whether aliases are used in a safe way.
tag copied { not_allowed; copied; not_copied; }
tag invalid_reason { overwritten; val_taken; }
type invalid = {reason: invalid_reason,
node_id: node_id,
sp: span, path: @ast::path};
tag unsafe_ty { contains(ty::t); mut_contains(ty::t); }
type binding = @{node_id: node_id,
span: span,
root_var: option::t<node_id>,
local_id: uint,
unsafe_tys: [unsafe_ty],
mutable copied: copied};
// FIXME it may be worthwhile to use a linked list of bindings instead
type scope = {bs: [binding],
invalid: @mutable list<@invalid>};
fn mk_binding(cx: ctx, id: node_id, span: span, root_var: option::t<node_id>,
unsafe_tys: [unsafe_ty]) -> binding {
alt root_var {
some(r_id) { cx.ref_map.insert(id, r_id); }
_ {}
}
ret @{node_id: id, span: span, root_var: root_var,
local_id: local_id_of_node(cx, id),
unsafe_tys: unsafe_tys,
mutable copied: not_copied};
}
tag local_info { local(uint); }
type copy_map = std::map::hashmap<node_id, ()>;
type ref_map = std::map::hashmap<node_id, node_id>;
type ctx = {tcx: ty::ctxt,
copy_map: copy_map,
ref_map: ref_map,
mutable silent: bool};
fn check_crate(tcx: ty::ctxt, crate: @ast::crate) -> (copy_map, ref_map) {
// Stores information about object fields and function
// arguments that's otherwise not easily available.
let cx = @{tcx: tcx,
copy_map: std::map::new_int_hash(),
ref_map: std::map::new_int_hash(),
mutable silent: false};
let v = @{visit_fn: bind visit_fn(cx, _, _, _, _, _, _, _),
visit_expr: bind visit_expr(cx, _, _, _),
visit_block: bind visit_block(cx, _, _, _)
with *visit::default_visitor::<scope>()};
let sc = {bs: [], invalid: @mutable list::nil};
visit::visit_crate(*crate, sc, visit::mk_vt(v));
tcx.sess.abort_if_errors();
ret (cx.copy_map, cx.ref_map);
}
fn visit_fn(cx: @ctx, _fk: visit::fn_kind, decl: ast::fn_decl,
body: ast::blk, sp: span,
id: ast::node_id, sc: scope, v: vt<scope>) {
visit::visit_fn_decl(decl, sc, v);
let fty = ty::node_id_to_type(cx.tcx, id);
let args = ty::ty_fn_args(cx.tcx, fty);
for arg in args {
if arg.mode == ast::by_val &&
ty::type_has_dynamic_size(cx.tcx, arg.ty) {
err(*cx, sp, "can not pass a dynamically-sized type by value");
}
}
// Blocks need to obey any restrictions from the enclosing scope, and may
// be called multiple times.
let proto = ty::ty_fn_proto(cx.tcx, fty);
if proto == ast::proto_block {
check_loop(*cx, sc) {|| v.visit_block(body, sc, v);}
} else {
let sc = {bs: [], invalid: @mutable list::nil};
v.visit_block(body, sc, v);
}
}
fn visit_expr(cx: @ctx, ex: @ast::expr, sc: scope, v: vt<scope>) {
let handled = true;
alt ex.node {
ast::expr_call(f, args, _) {
check_call(*cx, sc, f, args);
handled = false;
}
ast::expr_alt(input, arms) { check_alt(*cx, input, arms, sc, v); }
ast::expr_for(decl, seq, blk) {
v.visit_expr(seq, sc, v);
check_loop(*cx, sc) {|| check_for(*cx, decl, seq, blk, sc, v); }
}
ast::expr_path(pt) {
check_var(*cx, ex, pt, ex.id, false, sc);
handled = false;
}
ast::expr_swap(lhs, rhs) {
check_lval(cx, lhs, sc, v);
check_lval(cx, rhs, sc, v);
handled = false;
}
ast::expr_move(dest, src) {
check_assign(cx, dest, src, sc, v);
check_lval(cx, src, sc, v);
}
ast::expr_assign(dest, src) | ast::expr_assign_op(_, dest, src) {
check_assign(cx, dest, src, sc, v);
}
ast::expr_if(c, then, els) { check_if(c, then, els, sc, v); }
ast::expr_while(_, _) | ast::expr_do_while(_, _) {
check_loop(*cx, sc) {|| visit::visit_expr(ex, sc, v); }
}
_ { handled = false; }
}
if !handled { visit::visit_expr(ex, sc, v); }
}
fn visit_block(cx: @ctx, b: ast::blk, sc: scope, v: vt<scope>) {
let bs = sc.bs, sc = sc;
for stmt in b.node.stmts {
alt stmt.node {
ast::stmt_decl(@{node: ast::decl_item(it), _}, _) {
v.visit_item(it, sc, v);
}
ast::stmt_decl(@{node: ast::decl_local(locs), _}, _) {
for (st, loc) in locs {
if st == ast::let_ref {
add_bindings_for_let(*cx, bs, loc);
sc = {bs: bs with sc};
}
alt loc.node.init {
some(init) {
if init.op == ast::init_move {
check_lval(cx, init.expr, sc, v);
}
}
none. { }
}
}
}
ast::stmt_expr(ex, _) | ast::stmt_semi(ex, _) {
v.visit_expr(ex, sc, v);
}
}
}
visit::visit_expr_opt(b.node.expr, sc, v);
}
fn add_bindings_for_let(cx: ctx, &bs: [binding], loc: @ast::local) {
alt loc.node.init {
some(init) {
if init.op == ast::init_move {
err(cx, loc.span, "can not move into a by-reference binding");
}
let root = expr_root(cx, init.expr, false);
let root_var = path_def_id(cx, root.ex);
if is_none(root_var) {
err(cx, loc.span, "a reference binding can't be \
rooted in a temporary");
}
for proot in pattern_roots(cx.tcx, root.mut, loc.node.pat) {
let bnd = mk_binding(cx, proot.id, proot.span, root_var,
unsafe_set(proot.mut));
// Don't implicitly copy explicit references
bnd.copied = not_allowed;
bs += [bnd];
}
}
_ {
err(cx, loc.span, "by-reference bindings must be initialized");
}
}
}
fn cant_copy(cx: ctx, b: binding) -> bool {
alt b.copied {
not_allowed. { ret true; }
copied. { ret false; }
not_copied. {}
}
let ty = ty::node_id_to_type(cx.tcx, b.node_id);
if ty::type_allows_implicit_copy(cx.tcx, ty) {
b.copied = copied;
cx.copy_map.insert(b.node_id, ());
if copy_is_expensive(cx.tcx, ty) {
cx.tcx.sess.span_warn(b.span,
"inserting an implicit copy for type " +
util::ppaux::ty_to_str(cx.tcx, ty));
}
ret false;
} else { ret true; }
}
fn check_call(cx: ctx, sc: scope, f: @ast::expr, args: [@ast::expr])
-> [binding] {
let fty = ty::expr_ty(cx.tcx, f);
let arg_ts = ty::ty_fn_args(cx.tcx, fty);
let mut_roots: [{arg: uint, node: node_id}] = [];
let bindings = [];
let i = 0u;
for arg_t: ty::arg in arg_ts {
let arg = args[i];
let root = expr_root(cx, arg, false);
if arg_t.mode == ast::by_mut_ref {
alt path_def(cx, arg) {
some(def) {
let dnum = ast_util::def_id_of_def(def).node;
mut_roots += [{arg: i, node: dnum}];
}
_ { }
}
}
let root_var = path_def_id(cx, root.ex);
bindings += [@{node_id: arg.id,
span: arg.span,
root_var: root_var,
local_id: 0u,
unsafe_tys: unsafe_set(root.mut),
mutable copied: alt arg_t.mode {
ast::by_move. | ast::by_copy. { copied }
ast::by_mut_ref. { not_allowed }
_ { not_copied }
}}];
i += 1u;
}
let f_may_close =
alt f.node {
ast::expr_path(_) { def_is_local(cx.tcx.def_map.get(f.id)) }
_ { true }
};
if f_may_close {
let i = 0u;
for b in bindings {
let unsfe = vec::len(b.unsafe_tys) > 0u;
alt b.root_var {
some(rid) {
for o in sc.bs {
if o.node_id == rid && vec::len(o.unsafe_tys) > 0u {
unsfe = true; break;
}
}
}
_ {}
}
if unsfe && cant_copy(cx, b) {
err(cx, f.span, #fmt["function may alias with argument \
%u, which is not immutably rooted", i]);
}
i += 1u;
}
}
let j = 0u;
for b in bindings {
for unsafe_ty in b.unsafe_tys {
let i = 0u;
for arg_t: ty::arg in arg_ts {
let mut_alias = arg_t.mode == ast::by_mut_ref;
if i != j &&
ty_can_unsafely_include(cx, unsafe_ty, arg_t.ty,
mut_alias) &&
cant_copy(cx, b) {
err(cx, args[i].span,
#fmt["argument %u may alias with argument %u, \
which is not immutably rooted", i, j]);
}
i += 1u;
}
}
j += 1u;
}
// Ensure we're not passing a root by mutable alias.
for {node: node, arg: arg} in mut_roots {
let i = 0u;
for b in bindings {
if i != arg {
alt b.root_var {
some(root) {
if node == root && cant_copy(cx, b) {
err(cx, args[arg].span,
"passing a mutable reference to a \
variable that roots another reference");
break;
}
}
none. { }
}
}
i += 1u;
}
}
ret bindings;
}
fn check_alt(cx: ctx, input: @ast::expr, arms: [ast::arm], sc: scope,
v: vt<scope>) {
v.visit_expr(input, sc, v);
let orig_invalid = *sc.invalid;
let all_invalid = orig_invalid;
let root = expr_root(cx, input, true);
for a: ast::arm in arms {
let new_bs = sc.bs;
let root_var = path_def_id(cx, root.ex);
let pat_id_map = ast_util::pat_id_map(a.pats[0]);
type info = {
id: node_id,
mutable unsafe_tys: [unsafe_ty],
span: span};
let binding_info: [info] = [];
for pat in a.pats {
for proot in pattern_roots(cx.tcx, root.mut, pat) {
let canon_id = pat_id_map.get(proot.name);
alt vec::find(binding_info, {|x| x.id == canon_id}) {
some(s) { s.unsafe_tys += unsafe_set(proot.mut); }
none. {
binding_info += [
{id: canon_id,
mutable unsafe_tys: unsafe_set(proot.mut),
span: proot.span}];
}
}
}
}
for info in binding_info {
new_bs += [mk_binding(cx, info.id, info.span, root_var,
copy info.unsafe_tys)];
}
*sc.invalid = orig_invalid;
visit::visit_arm(a, {bs: new_bs with sc}, v);
all_invalid = append_invalid(all_invalid, *sc.invalid, orig_invalid);
}
*sc.invalid = all_invalid;
}
fn check_for(cx: ctx, local: @ast::local, seq: @ast::expr, blk: ast::blk,
sc: scope, v: vt<scope>) {
let root = expr_root(cx, seq, false);
// If this is a mutable vector, don't allow it to be touched.
let seq_t = ty::expr_ty(cx.tcx, seq);
let cur_mut = root.mut;
alt ty::struct(cx.tcx, seq_t) {
ty::ty_vec(mt) {
if mt.mut != ast::imm {
cur_mut = some(contains(seq_t));
}
}
_ {}
}
let root_var = path_def_id(cx, root.ex);
let new_bs = sc.bs;
for proot in pattern_roots(cx.tcx, cur_mut, local.node.pat) {
new_bs += [mk_binding(cx, proot.id, proot.span, root_var,
unsafe_set(proot.mut))];
}
visit::visit_block(blk, {bs: new_bs with sc}, v);
}
fn check_var(cx: ctx, ex: @ast::expr, p: @ast::path, id: ast::node_id,
assign: bool, sc: scope) {
let def = cx.tcx.def_map.get(id);
if !def_is_local(def) { ret; }
let my_defnum = ast_util::def_id_of_def(def).node;
let my_local_id = local_id_of_node(cx, my_defnum);
let var_t = ty::expr_ty(cx.tcx, ex);
for b in sc.bs {
// excludes variables introduced since the alias was made
if my_local_id < b.local_id {
for unsafe_ty in b.unsafe_tys {
if ty_can_unsafely_include(cx, unsafe_ty, var_t, assign) {
let inv = @{reason: val_taken, node_id: b.node_id,
sp: ex.span, path: p};
*sc.invalid = list::cons(inv, @*sc.invalid);
}
}
} else if b.node_id == my_defnum {
test_scope(cx, sc, b, p);
}
}
}
fn check_lval(cx: @ctx, dest: @ast::expr, sc: scope, v: vt<scope>) {
alt dest.node {
ast::expr_path(p) {
let def = cx.tcx.def_map.get(dest.id);
let dnum = ast_util::def_id_of_def(def).node;
for b in sc.bs {
if b.root_var == some(dnum) {
let inv = @{reason: overwritten, node_id: b.node_id,
sp: dest.span, path: p};
*sc.invalid = list::cons(inv, @*sc.invalid);
}
}
}
_ { visit_expr(cx, dest, sc, v); }
}
}
fn check_assign(cx: @ctx, dest: @ast::expr, src: @ast::expr, sc: scope,
v: vt<scope>) {
visit_expr(cx, src, sc, v);
check_lval(cx, dest, sc, v);
}
fn check_if(c: @ast::expr, then: ast::blk, els: option::t<@ast::expr>,
sc: scope, v: vt<scope>) {
v.visit_expr(c, sc, v);
let orig_invalid = *sc.invalid;
v.visit_block(then, sc, v);
let then_invalid = *sc.invalid;
*sc.invalid = orig_invalid;
visit::visit_expr_opt(els, sc, v);
*sc.invalid = append_invalid(*sc.invalid, then_invalid, orig_invalid);
}
fn check_loop(cx: ctx, sc: scope, checker: block()) {
let orig_invalid = filter_invalid(*sc.invalid, sc.bs);
checker();
let new_invalid = filter_invalid(*sc.invalid, sc.bs);
// Have to check contents of loop again if it invalidated an alias
if list::len(orig_invalid) < list::len(new_invalid) {
let old_silent = cx.silent;
cx.silent = true;
checker();
cx.silent = old_silent;
}
*sc.invalid = new_invalid;
}
fn test_scope(cx: ctx, sc: scope, b: binding, p: @ast::path) {
let prob = find_invalid(b.node_id, *sc.invalid);
alt b.root_var {
some(dn) {
for other in sc.bs {
if !is_none(prob) { break; }
if other.node_id == dn {
prob = find_invalid(other.node_id, *sc.invalid);
}
}
}
_ {}
}
if !is_none(prob) && cant_copy(cx, b) {
let i = option::get(prob);
let msg = alt i.reason {
overwritten. { "overwriting " + ast_util::path_name(i.path) }
val_taken. { "taking the value of " + ast_util::path_name(i.path) }
};
err(cx, i.sp, msg + " will invalidate reference " +
ast_util::path_name(p) + ", which is still used");
}
}
fn path_def(cx: ctx, ex: @ast::expr) -> option::t<ast::def> {
ret alt ex.node {
ast::expr_path(_) { some(cx.tcx.def_map.get(ex.id)) }
_ { none }
}
}
fn path_def_id(cx: ctx, ex: @ast::expr) -> option::t<ast::node_id> {
alt ex.node {
ast::expr_path(_) {
ret some(ast_util::def_id_of_def(cx.tcx.def_map.get(ex.id)).node);
}
_ { ret none; }
}
}
fn ty_can_unsafely_include(cx: ctx, needle: unsafe_ty, haystack: ty::t,
mut: bool) -> bool {
fn get_mut(cur: bool, mt: ty::mt) -> bool {
ret cur || mt.mut != ast::imm;
}
fn helper(tcx: ty::ctxt, needle: unsafe_ty, haystack: ty::t, mut: bool)
-> bool {
if alt needle {
contains(ty) { ty == haystack }
mut_contains(ty) { mut && ty == haystack }
} { ret true; }
alt ty::struct(tcx, haystack) {
ty::ty_tag(_, ts) {
for t: ty::t in ts {
if helper(tcx, needle, t, mut) { ret true; }
}
ret false;
}
ty::ty_box(mt) | ty::ty_ptr(mt) | ty::ty_uniq(mt) {
ret helper(tcx, needle, mt.ty, get_mut(mut, mt));
}
ty::ty_rec(fields) {
for f: ty::field in fields {
if helper(tcx, needle, f.mt.ty, get_mut(mut, f.mt)) {
ret true;
}
}
ret false;
}
ty::ty_tup(ts) {
for t in ts { if helper(tcx, needle, t, mut) { ret true; } }
ret false;
}
ty::ty_fn({proto: ast::proto_bare., _}) { ret false; }
// These may contain anything.
ty::ty_fn(_) | ty::ty_obj(_) { ret true; }
// A type param may include everything, but can only be
// treated as opaque downstream, and is thus safe unless we
// saw mutable fields, in which case the whole thing can be
// overwritten.
ty::ty_param(_, _) { ret mut; }
_ { ret false; }
}
}
ret helper(cx.tcx, needle, haystack, mut);
}
fn def_is_local(d: ast::def) -> bool {
alt d {
ast::def_local(_, _) | ast::def_arg(_, _) | ast::def_binding(_) |
ast::def_upvar(_, _, _) | ast::def_self(_) |
ast::def_obj_field(_, _) { true }
_ { false }
}
}
fn local_id_of_node(cx: ctx, id: node_id) -> uint {
alt cx.tcx.items.find(id) {
some(ast_map::node_arg(_, id)) | some(ast_map::node_local(id)) { id }
_ { 0u }
}
}
// Heuristic, somewhat random way to decide whether to warn when inserting an
// implicit copy.
fn copy_is_expensive(tcx: ty::ctxt, ty: ty::t) -> bool {
fn score_ty(tcx: ty::ctxt, ty: ty::t) -> uint {
ret alt ty::struct(tcx, ty) {
ty::ty_nil. | ty::ty_bot. | ty::ty_bool. | ty::ty_int(_) |
ty::ty_uint(_) | ty::ty_float(_) | ty::ty_type. | ty::ty_native(_) |
ty::ty_ptr(_) { 1u }
ty::ty_box(_) { 3u }
ty::ty_constr(t, _) | ty::ty_res(_, t, _) { score_ty(tcx, t) }
ty::ty_fn(_) | ty::ty_native_fn(_, _) |
ty::ty_obj(_) { 4u }
ty::ty_str. | ty::ty_vec(_) | ty::ty_param(_, _) { 50u }
ty::ty_uniq(mt) { 1u + score_ty(tcx, mt.ty) }
ty::ty_tag(_, ts) | ty::ty_tup(ts) {
let sum = 0u;
for t in ts { sum += score_ty(tcx, t); }
sum
}
ty::ty_rec(fs) {
let sum = 0u;
for f in fs { sum += score_ty(tcx, f.mt.ty); }
sum
}
};
}
ret score_ty(tcx, ty) > 8u;
}
type pattern_root = {id: node_id,
name: ident,
mut: option::t<unsafe_ty>,
span: span};
fn pattern_roots(tcx: ty::ctxt, mut: option::t<unsafe_ty>, pat: @ast::pat)
-> [pattern_root] {
fn walk(tcx: ty::ctxt, mut: option::t<unsafe_ty>, pat: @ast::pat,
&set: [pattern_root]) {
alt pat.node {
ast::pat_wild. | ast::pat_lit(_) | ast::pat_range(_, _) {}
ast::pat_bind(nm, sub) {
set += [{id: pat.id, name: nm, mut: mut, span: pat.span}];
alt sub { some(p) { walk(tcx, mut, p, set); } _ {} }
}
ast::pat_tag(_, ps) | ast::pat_tup(ps) {
for p in ps { walk(tcx, mut, p, set); }
}
ast::pat_rec(fs, _) {
let ty = ty::node_id_to_type(tcx, pat.id);
for f in fs {
let m = ty::get_field(tcx, ty, f.ident).mt.mut != ast::imm;
walk(tcx, m ? some(contains(ty)) : mut, f.pat, set);
}
}
ast::pat_box(p) {
let ty = ty::node_id_to_type(tcx, pat.id);
let m = alt ty::struct(tcx, ty) {
ty::ty_box(mt) { mt.mut != ast::imm }
};
walk(tcx, m ? some(contains(ty)) : mut, p, set);
}
ast::pat_uniq(p) {
let ty = ty::node_id_to_type(tcx, pat.id);
let m = alt ty::struct(tcx, ty) {
ty::ty_uniq(mt) { mt.mut != ast::imm }
};
walk(tcx, m ? some(contains(ty)) : mut, p, set);
}
}
}
let set = [];
walk(tcx, mut, pat, set);
ret set;
}
// Wraps the expr_root in mut.rs to also handle roots that exist through
// return-by-reference
fn expr_root(cx: ctx, ex: @ast::expr, autoderef: bool)
-> {ex: @ast::expr, mut: option::t<unsafe_ty>} {
let base_root = mut::expr_root(cx.tcx, ex, autoderef);
let unsafe_ty = none;
for d in *base_root.ds {
if d.mut { unsafe_ty = some(contains(d.outer_t)); break; }
}
alt base_root.ex.node {
ast::expr_path(_) {
alt cx.tcx.def_map.get(base_root.ex.id) {
ast::def_obj_field(_, ast::mut.) {
unsafe_ty = some(mut_contains(ty::expr_ty(cx.tcx, base_root.ex)));
}
_ {}
}
}
_ {}
}
ret {ex: base_root.ex, mut: unsafe_ty};
}
fn unsafe_set(from: option::t<unsafe_ty>) -> [unsafe_ty] {
alt from { some(t) { [t] } _ { [] } }
}
fn find_invalid(id: node_id, lst: list<@invalid>)
-> option::t<@invalid> {
let cur = lst;
while true {
alt cur {
list::nil. { break; }
list::cons(head, tail) {
if head.node_id == id { ret some(head); }
cur = *tail;
}
}
}
ret none;
}
fn append_invalid(dest: list<@invalid>, src: list<@invalid>,
stop: list<@invalid>) -> list<@invalid> {
let cur = src, dest = dest;
while cur != stop {
alt cur {
list::cons(head, tail) {
if is_none(find_invalid(head.node_id, dest)) {
dest = list::cons(head, @dest);
}
cur = *tail;
}
}
}
ret dest;
}
fn filter_invalid(src: list<@invalid>, bs: [binding]) -> list<@invalid> {
let out = list::nil, cur = src;
while cur != list::nil {
alt cur {
list::cons(head, tail) {
let p = vec::position_pred(bs, {|b| b.node_id == head.node_id});
if !is_none(p) { out = list::cons(head, @out); }
cur = *tail;
}
}
}
ret out;
}
fn err(cx: ctx, sp: span, err: str) {
if !cx.silent || !cx.tcx.sess.has_errors() |
}
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End:
| {
cx.tcx.sess.span_err(sp, err);
} | conditional_block |
alias.rs |
import syntax::{ast, ast_util};
import ast::{ident, fn_ident, node_id};
import syntax::codemap::span;
import syntax::visit;
import visit::vt;
import core::{vec, option};
import std::list;
import option::{some, none, is_none};
import list::list;
// This is not an alias-analyser (though it would merit from becoming one, or
// getting input from one, to be more precise). It is a pass that checks
// whether aliases are used in a safe way.
tag copied { not_allowed; copied; not_copied; }
tag invalid_reason { overwritten; val_taken; }
type invalid = {reason: invalid_reason,
node_id: node_id,
sp: span, path: @ast::path};
tag unsafe_ty { contains(ty::t); mut_contains(ty::t); }
type binding = @{node_id: node_id,
span: span,
root_var: option::t<node_id>,
local_id: uint,
unsafe_tys: [unsafe_ty],
mutable copied: copied};
// FIXME it may be worthwhile to use a linked list of bindings instead
type scope = {bs: [binding],
invalid: @mutable list<@invalid>};
fn mk_binding(cx: ctx, id: node_id, span: span, root_var: option::t<node_id>,
unsafe_tys: [unsafe_ty]) -> binding {
alt root_var {
some(r_id) { cx.ref_map.insert(id, r_id); }
_ {}
}
ret @{node_id: id, span: span, root_var: root_var,
local_id: local_id_of_node(cx, id),
unsafe_tys: unsafe_tys,
mutable copied: not_copied};
}
tag local_info { local(uint); }
type copy_map = std::map::hashmap<node_id, ()>;
type ref_map = std::map::hashmap<node_id, node_id>;
type ctx = {tcx: ty::ctxt,
copy_map: copy_map,
ref_map: ref_map,
mutable silent: bool};
fn check_crate(tcx: ty::ctxt, crate: @ast::crate) -> (copy_map, ref_map) {
// Stores information about object fields and function
// arguments that's otherwise not easily available.
let cx = @{tcx: tcx,
copy_map: std::map::new_int_hash(),
ref_map: std::map::new_int_hash(),
mutable silent: false};
let v = @{visit_fn: bind visit_fn(cx, _, _, _, _, _, _, _),
visit_expr: bind visit_expr(cx, _, _, _),
visit_block: bind visit_block(cx, _, _, _)
with *visit::default_visitor::<scope>()};
let sc = {bs: [], invalid: @mutable list::nil};
visit::visit_crate(*crate, sc, visit::mk_vt(v));
tcx.sess.abort_if_errors();
ret (cx.copy_map, cx.ref_map);
}
fn visit_fn(cx: @ctx, _fk: visit::fn_kind, decl: ast::fn_decl,
body: ast::blk, sp: span,
id: ast::node_id, sc: scope, v: vt<scope>) {
visit::visit_fn_decl(decl, sc, v);
let fty = ty::node_id_to_type(cx.tcx, id);
let args = ty::ty_fn_args(cx.tcx, fty);
for arg in args {
if arg.mode == ast::by_val &&
ty::type_has_dynamic_size(cx.tcx, arg.ty) {
err(*cx, sp, "can not pass a dynamically-sized type by value");
}
}
// Blocks need to obey any restrictions from the enclosing scope, and may
// be called multiple times.
let proto = ty::ty_fn_proto(cx.tcx, fty);
if proto == ast::proto_block {
check_loop(*cx, sc) {|| v.visit_block(body, sc, v);}
} else {
let sc = {bs: [], invalid: @mutable list::nil};
v.visit_block(body, sc, v);
}
}
fn visit_expr(cx: @ctx, ex: @ast::expr, sc: scope, v: vt<scope>) {
let handled = true;
alt ex.node {
ast::expr_call(f, args, _) {
check_call(*cx, sc, f, args);
handled = false;
}
ast::expr_alt(input, arms) { check_alt(*cx, input, arms, sc, v); }
ast::expr_for(decl, seq, blk) {
v.visit_expr(seq, sc, v);
check_loop(*cx, sc) {|| check_for(*cx, decl, seq, blk, sc, v); }
}
ast::expr_path(pt) {
check_var(*cx, ex, pt, ex.id, false, sc);
handled = false;
}
ast::expr_swap(lhs, rhs) {
check_lval(cx, lhs, sc, v);
check_lval(cx, rhs, sc, v);
handled = false;
}
ast::expr_move(dest, src) {
check_assign(cx, dest, src, sc, v);
check_lval(cx, src, sc, v);
}
ast::expr_assign(dest, src) | ast::expr_assign_op(_, dest, src) {
check_assign(cx, dest, src, sc, v);
}
ast::expr_if(c, then, els) { check_if(c, then, els, sc, v); }
ast::expr_while(_, _) | ast::expr_do_while(_, _) {
check_loop(*cx, sc) {|| visit::visit_expr(ex, sc, v); }
}
_ { handled = false; }
}
if !handled { visit::visit_expr(ex, sc, v); }
}
fn visit_block(cx: @ctx, b: ast::blk, sc: scope, v: vt<scope>) {
let bs = sc.bs, sc = sc;
for stmt in b.node.stmts {
alt stmt.node {
ast::stmt_decl(@{node: ast::decl_item(it), _}, _) {
v.visit_item(it, sc, v);
}
ast::stmt_decl(@{node: ast::decl_local(locs), _}, _) {
for (st, loc) in locs {
if st == ast::let_ref {
add_bindings_for_let(*cx, bs, loc);
sc = {bs: bs with sc};
}
alt loc.node.init {
some(init) {
if init.op == ast::init_move {
check_lval(cx, init.expr, sc, v);
}
}
none. { }
}
}
}
ast::stmt_expr(ex, _) | ast::stmt_semi(ex, _) {
v.visit_expr(ex, sc, v);
}
}
}
visit::visit_expr_opt(b.node.expr, sc, v);
}
fn add_bindings_for_let(cx: ctx, &bs: [binding], loc: @ast::local) {
alt loc.node.init {
some(init) {
if init.op == ast::init_move {
err(cx, loc.span, "can not move into a by-reference binding");
}
let root = expr_root(cx, init.expr, false);
let root_var = path_def_id(cx, root.ex);
if is_none(root_var) {
err(cx, loc.span, "a reference binding can't be \
rooted in a temporary");
}
for proot in pattern_roots(cx.tcx, root.mut, loc.node.pat) {
let bnd = mk_binding(cx, proot.id, proot.span, root_var,
unsafe_set(proot.mut));
// Don't implicitly copy explicit references
bnd.copied = not_allowed;
bs += [bnd];
}
}
_ {
err(cx, loc.span, "by-reference bindings must be initialized");
}
}
}
fn cant_copy(cx: ctx, b: binding) -> bool {
alt b.copied {
not_allowed. { ret true; }
copied. { ret false; }
not_copied. {}
}
let ty = ty::node_id_to_type(cx.tcx, b.node_id);
if ty::type_allows_implicit_copy(cx.tcx, ty) {
b.copied = copied;
cx.copy_map.insert(b.node_id, ());
if copy_is_expensive(cx.tcx, ty) {
cx.tcx.sess.span_warn(b.span,
"inserting an implicit copy for type " +
util::ppaux::ty_to_str(cx.tcx, ty));
}
ret false;
} else { ret true; }
}
fn check_call(cx: ctx, sc: scope, f: @ast::expr, args: [@ast::expr])
-> [binding] {
let fty = ty::expr_ty(cx.tcx, f);
let arg_ts = ty::ty_fn_args(cx.tcx, fty);
let mut_roots: [{arg: uint, node: node_id}] = [];
let bindings = [];
let i = 0u;
for arg_t: ty::arg in arg_ts {
let arg = args[i];
let root = expr_root(cx, arg, false);
if arg_t.mode == ast::by_mut_ref {
alt path_def(cx, arg) {
some(def) {
let dnum = ast_util::def_id_of_def(def).node;
mut_roots += [{arg: i, node: dnum}];
}
_ { }
}
}
let root_var = path_def_id(cx, root.ex);
bindings += [@{node_id: arg.id,
span: arg.span,
root_var: root_var,
local_id: 0u,
unsafe_tys: unsafe_set(root.mut),
mutable copied: alt arg_t.mode {
ast::by_move. | ast::by_copy. { copied }
ast::by_mut_ref. { not_allowed }
_ { not_copied }
}}];
i += 1u;
}
let f_may_close =
alt f.node {
ast::expr_path(_) { def_is_local(cx.tcx.def_map.get(f.id)) }
_ { true }
};
if f_may_close {
let i = 0u;
for b in bindings {
let unsfe = vec::len(b.unsafe_tys) > 0u;
alt b.root_var {
some(rid) {
for o in sc.bs {
if o.node_id == rid && vec::len(o.unsafe_tys) > 0u {
unsfe = true; break;
}
}
}
_ {}
}
if unsfe && cant_copy(cx, b) {
err(cx, f.span, #fmt["function may alias with argument \
%u, which is not immutably rooted", i]);
}
i += 1u;
}
}
let j = 0u;
for b in bindings {
for unsafe_ty in b.unsafe_tys {
let i = 0u;
for arg_t: ty::arg in arg_ts {
let mut_alias = arg_t.mode == ast::by_mut_ref;
if i != j &&
ty_can_unsafely_include(cx, unsafe_ty, arg_t.ty,
mut_alias) &&
cant_copy(cx, b) {
err(cx, args[i].span,
#fmt["argument %u may alias with argument %u, \
which is not immutably rooted", i, j]);
}
i += 1u;
}
}
j += 1u;
}
// Ensure we're not passing a root by mutable alias.
for {node: node, arg: arg} in mut_roots {
let i = 0u;
for b in bindings {
if i != arg {
alt b.root_var {
some(root) {
if node == root && cant_copy(cx, b) {
err(cx, args[arg].span,
"passing a mutable reference to a \
variable that roots another reference");
break;
}
}
none. { }
}
}
i += 1u;
}
}
ret bindings;
}
fn check_alt(cx: ctx, input: @ast::expr, arms: [ast::arm], sc: scope,
v: vt<scope>) {
v.visit_expr(input, sc, v);
let orig_invalid = *sc.invalid;
let all_invalid = orig_invalid;
let root = expr_root(cx, input, true);
for a: ast::arm in arms {
let new_bs = sc.bs;
let root_var = path_def_id(cx, root.ex);
let pat_id_map = ast_util::pat_id_map(a.pats[0]);
type info = {
id: node_id,
mutable unsafe_tys: [unsafe_ty],
span: span};
let binding_info: [info] = [];
for pat in a.pats {
for proot in pattern_roots(cx.tcx, root.mut, pat) {
let canon_id = pat_id_map.get(proot.name);
alt vec::find(binding_info, {|x| x.id == canon_id}) {
some(s) { s.unsafe_tys += unsafe_set(proot.mut); }
none. {
binding_info += [
{id: canon_id,
mutable unsafe_tys: unsafe_set(proot.mut),
span: proot.span}];
}
}
}
}
for info in binding_info {
new_bs += [mk_binding(cx, info.id, info.span, root_var,
copy info.unsafe_tys)];
}
*sc.invalid = orig_invalid;
visit::visit_arm(a, {bs: new_bs with sc}, v);
all_invalid = append_invalid(all_invalid, *sc.invalid, orig_invalid);
}
*sc.invalid = all_invalid;
}
fn check_for(cx: ctx, local: @ast::local, seq: @ast::expr, blk: ast::blk,
sc: scope, v: vt<scope>) {
let root = expr_root(cx, seq, false);
// If this is a mutable vector, don't allow it to be touched.
let seq_t = ty::expr_ty(cx.tcx, seq);
let cur_mut = root.mut;
alt ty::struct(cx.tcx, seq_t) {
ty::ty_vec(mt) {
if mt.mut != ast::imm {
cur_mut = some(contains(seq_t));
}
}
_ {}
}
let root_var = path_def_id(cx, root.ex);
let new_bs = sc.bs;
for proot in pattern_roots(cx.tcx, cur_mut, local.node.pat) {
new_bs += [mk_binding(cx, proot.id, proot.span, root_var,
unsafe_set(proot.mut))];
}
visit::visit_block(blk, {bs: new_bs with sc}, v);
}
fn check_var(cx: ctx, ex: @ast::expr, p: @ast::path, id: ast::node_id,
assign: bool, sc: scope) |
fn check_lval(cx: @ctx, dest: @ast::expr, sc: scope, v: vt<scope>) {
alt dest.node {
ast::expr_path(p) {
let def = cx.tcx.def_map.get(dest.id);
let dnum = ast_util::def_id_of_def(def).node;
for b in sc.bs {
if b.root_var == some(dnum) {
let inv = @{reason: overwritten, node_id: b.node_id,
sp: dest.span, path: p};
*sc.invalid = list::cons(inv, @*sc.invalid);
}
}
}
_ { visit_expr(cx, dest, sc, v); }
}
}
fn check_assign(cx: @ctx, dest: @ast::expr, src: @ast::expr, sc: scope,
v: vt<scope>) {
visit_expr(cx, src, sc, v);
check_lval(cx, dest, sc, v);
}
fn check_if(c: @ast::expr, then: ast::blk, els: option::t<@ast::expr>,
sc: scope, v: vt<scope>) {
v.visit_expr(c, sc, v);
let orig_invalid = *sc.invalid;
v.visit_block(then, sc, v);
let then_invalid = *sc.invalid;
*sc.invalid = orig_invalid;
visit::visit_expr_opt(els, sc, v);
*sc.invalid = append_invalid(*sc.invalid, then_invalid, orig_invalid);
}
fn check_loop(cx: ctx, sc: scope, checker: block()) {
let orig_invalid = filter_invalid(*sc.invalid, sc.bs);
checker();
let new_invalid = filter_invalid(*sc.invalid, sc.bs);
// Have to check contents of loop again if it invalidated an alias
if list::len(orig_invalid) < list::len(new_invalid) {
let old_silent = cx.silent;
cx.silent = true;
checker();
cx.silent = old_silent;
}
*sc.invalid = new_invalid;
}
fn test_scope(cx: ctx, sc: scope, b: binding, p: @ast::path) {
let prob = find_invalid(b.node_id, *sc.invalid);
alt b.root_var {
some(dn) {
for other in sc.bs {
if !is_none(prob) { break; }
if other.node_id == dn {
prob = find_invalid(other.node_id, *sc.invalid);
}
}
}
_ {}
}
if !is_none(prob) && cant_copy(cx, b) {
let i = option::get(prob);
let msg = alt i.reason {
overwritten. { "overwriting " + ast_util::path_name(i.path) }
val_taken. { "taking the value of " + ast_util::path_name(i.path) }
};
err(cx, i.sp, msg + " will invalidate reference " +
ast_util::path_name(p) + ", which is still used");
}
}
fn path_def(cx: ctx, ex: @ast::expr) -> option::t<ast::def> {
ret alt ex.node {
ast::expr_path(_) { some(cx.tcx.def_map.get(ex.id)) }
_ { none }
}
}
fn path_def_id(cx: ctx, ex: @ast::expr) -> option::t<ast::node_id> {
alt ex.node {
ast::expr_path(_) {
ret some(ast_util::def_id_of_def(cx.tcx.def_map.get(ex.id)).node);
}
_ { ret none; }
}
}
fn ty_can_unsafely_include(cx: ctx, needle: unsafe_ty, haystack: ty::t,
mut: bool) -> bool {
fn get_mut(cur: bool, mt: ty::mt) -> bool {
ret cur || mt.mut != ast::imm;
}
fn helper(tcx: ty::ctxt, needle: unsafe_ty, haystack: ty::t, mut: bool)
-> bool {
if alt needle {
contains(ty) { ty == haystack }
mut_contains(ty) { mut && ty == haystack }
} { ret true; }
alt ty::struct(tcx, haystack) {
ty::ty_tag(_, ts) {
for t: ty::t in ts {
if helper(tcx, needle, t, mut) { ret true; }
}
ret false;
}
ty::ty_box(mt) | ty::ty_ptr(mt) | ty::ty_uniq(mt) {
ret helper(tcx, needle, mt.ty, get_mut(mut, mt));
}
ty::ty_rec(fields) {
for f: ty::field in fields {
if helper(tcx, needle, f.mt.ty, get_mut(mut, f.mt)) {
ret true;
}
}
ret false;
}
ty::ty_tup(ts) {
for t in ts { if helper(tcx, needle, t, mut) { ret true; } }
ret false;
}
ty::ty_fn({proto: ast::proto_bare., _}) { ret false; }
// These may contain anything.
ty::ty_fn(_) | ty::ty_obj(_) { ret true; }
// A type param may include everything, but can only be
// treated as opaque downstream, and is thus safe unless we
// saw mutable fields, in which case the whole thing can be
// overwritten.
ty::ty_param(_, _) { ret mut; }
_ { ret false; }
}
}
ret helper(cx.tcx, needle, haystack, mut);
}
fn def_is_local(d: ast::def) -> bool {
alt d {
ast::def_local(_, _) | ast::def_arg(_, _) | ast::def_binding(_) |
ast::def_upvar(_, _, _) | ast::def_self(_) |
ast::def_obj_field(_, _) { true }
_ { false }
}
}
fn local_id_of_node(cx: ctx, id: node_id) -> uint {
alt cx.tcx.items.find(id) {
some(ast_map::node_arg(_, id)) | some(ast_map::node_local(id)) { id }
_ { 0u }
}
}
// Heuristic, somewhat random way to decide whether to warn when inserting an
// implicit copy.
fn copy_is_expensive(tcx: ty::ctxt, ty: ty::t) -> bool {
fn score_ty(tcx: ty::ctxt, ty: ty::t) -> uint {
ret alt ty::struct(tcx, ty) {
ty::ty_nil. | ty::ty_bot. | ty::ty_bool. | ty::ty_int(_) |
ty::ty_uint(_) | ty::ty_float(_) | ty::ty_type. | ty::ty_native(_) |
ty::ty_ptr(_) { 1u }
ty::ty_box(_) { 3u }
ty::ty_constr(t, _) | ty::ty_res(_, t, _) { score_ty(tcx, t) }
ty::ty_fn(_) | ty::ty_native_fn(_, _) |
ty::ty_obj(_) { 4u }
ty::ty_str. | ty::ty_vec(_) | ty::ty_param(_, _) { 50u }
ty::ty_uniq(mt) { 1u + score_ty(tcx, mt.ty) }
ty::ty_tag(_, ts) | ty::ty_tup(ts) {
let sum = 0u;
for t in ts { sum += score_ty(tcx, t); }
sum
}
ty::ty_rec(fs) {
let sum = 0u;
for f in fs { sum += score_ty(tcx, f.mt.ty); }
sum
}
};
}
ret score_ty(tcx, ty) > 8u;
}
type pattern_root = {id: node_id,
name: ident,
mut: option::t<unsafe_ty>,
span: span};
fn pattern_roots(tcx: ty::ctxt, mut: option::t<unsafe_ty>, pat: @ast::pat)
-> [pattern_root] {
fn walk(tcx: ty::ctxt, mut: option::t<unsafe_ty>, pat: @ast::pat,
&set: [pattern_root]) {
alt pat.node {
ast::pat_wild. | ast::pat_lit(_) | ast::pat_range(_, _) {}
ast::pat_bind(nm, sub) {
set += [{id: pat.id, name: nm, mut: mut, span: pat.span}];
alt sub { some(p) { walk(tcx, mut, p, set); } _ {} }
}
ast::pat_tag(_, ps) | ast::pat_tup(ps) {
for p in ps { walk(tcx, mut, p, set); }
}
ast::pat_rec(fs, _) {
let ty = ty::node_id_to_type(tcx, pat.id);
for f in fs {
let m = ty::get_field(tcx, ty, f.ident).mt.mut != ast::imm;
walk(tcx, m ? some(contains(ty)) : mut, f.pat, set);
}
}
ast::pat_box(p) {
let ty = ty::node_id_to_type(tcx, pat.id);
let m = alt ty::struct(tcx, ty) {
ty::ty_box(mt) { mt.mut != ast::imm }
};
walk(tcx, m ? some(contains(ty)) : mut, p, set);
}
ast::pat_uniq(p) {
let ty = ty::node_id_to_type(tcx, pat.id);
let m = alt ty::struct(tcx, ty) {
ty::ty_uniq(mt) { mt.mut != ast::imm }
};
walk(tcx, m ? some(contains(ty)) : mut, p, set);
}
}
}
let set = [];
walk(tcx, mut, pat, set);
ret set;
}
// Wraps the expr_root in mut.rs to also handle roots that exist through
// return-by-reference
fn expr_root(cx: ctx, ex: @ast::expr, autoderef: bool)
-> {ex: @ast::expr, mut: option::t<unsafe_ty>} {
let base_root = mut::expr_root(cx.tcx, ex, autoderef);
let unsafe_ty = none;
for d in *base_root.ds {
if d.mut { unsafe_ty = some(contains(d.outer_t)); break; }
}
alt base_root.ex.node {
ast::expr_path(_) {
alt cx.tcx.def_map.get(base_root.ex.id) {
ast::def_obj_field(_, ast::mut.) {
unsafe_ty = some(mut_contains(ty::expr_ty(cx.tcx, base_root.ex)));
}
_ {}
}
}
_ {}
}
ret {ex: base_root.ex, mut: unsafe_ty};
}
fn unsafe_set(from: option::t<unsafe_ty>) -> [unsafe_ty] {
alt from { some(t) { [t] } _ { [] } }
}
fn find_invalid(id: node_id, lst: list<@invalid>)
-> option::t<@invalid> {
let cur = lst;
while true {
alt cur {
list::nil. { break; }
list::cons(head, tail) {
if head.node_id == id { ret some(head); }
cur = *tail;
}
}
}
ret none;
}
fn append_invalid(dest: list<@invalid>, src: list<@invalid>,
stop: list<@invalid>) -> list<@invalid> {
let cur = src, dest = dest;
while cur != stop {
alt cur {
list::cons(head, tail) {
if is_none(find_invalid(head.node_id, dest)) {
dest = list::cons(head, @dest);
}
cur = *tail;
}
}
}
ret dest;
}
fn filter_invalid(src: list<@invalid>, bs: [binding]) -> list<@invalid> {
let out = list::nil, cur = src;
while cur != list::nil {
alt cur {
list::cons(head, tail) {
let p = vec::position_pred(bs, {|b| b.node_id == head.node_id});
if !is_none(p) { out = list::cons(head, @out); }
cur = *tail;
}
}
}
ret out;
}
fn err(cx: ctx, sp: span, err: str) {
if !cx.silent || !cx.tcx.sess.has_errors() {
cx.tcx.sess.span_err(sp, err);
}
}
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End:
| {
let def = cx.tcx.def_map.get(id);
if !def_is_local(def) { ret; }
let my_defnum = ast_util::def_id_of_def(def).node;
let my_local_id = local_id_of_node(cx, my_defnum);
let var_t = ty::expr_ty(cx.tcx, ex);
for b in sc.bs {
// excludes variables introduced since the alias was made
if my_local_id < b.local_id {
for unsafe_ty in b.unsafe_tys {
if ty_can_unsafely_include(cx, unsafe_ty, var_t, assign) {
let inv = @{reason: val_taken, node_id: b.node_id,
sp: ex.span, path: p};
*sc.invalid = list::cons(inv, @*sc.invalid);
}
}
} else if b.node_id == my_defnum {
test_scope(cx, sc, b, p);
}
}
} | identifier_body |
alias.rs |
import syntax::{ast, ast_util};
import ast::{ident, fn_ident, node_id};
import syntax::codemap::span;
import syntax::visit;
import visit::vt;
import core::{vec, option};
import std::list;
import option::{some, none, is_none};
import list::list;
// This is not an alias-analyser (though it would merit from becoming one, or
// getting input from one, to be more precise). It is a pass that checks
// whether aliases are used in a safe way.
tag copied { not_allowed; copied; not_copied; }
tag invalid_reason { overwritten; val_taken; }
type invalid = {reason: invalid_reason,
node_id: node_id,
sp: span, path: @ast::path};
tag unsafe_ty { contains(ty::t); mut_contains(ty::t); }
type binding = @{node_id: node_id,
span: span,
root_var: option::t<node_id>,
local_id: uint,
unsafe_tys: [unsafe_ty],
mutable copied: copied};
// FIXME it may be worthwhile to use a linked list of bindings instead
type scope = {bs: [binding],
invalid: @mutable list<@invalid>};
fn mk_binding(cx: ctx, id: node_id, span: span, root_var: option::t<node_id>,
unsafe_tys: [unsafe_ty]) -> binding {
alt root_var {
some(r_id) { cx.ref_map.insert(id, r_id); }
_ {}
}
ret @{node_id: id, span: span, root_var: root_var,
local_id: local_id_of_node(cx, id),
unsafe_tys: unsafe_tys,
mutable copied: not_copied};
}
tag local_info { local(uint); }
type copy_map = std::map::hashmap<node_id, ()>;
type ref_map = std::map::hashmap<node_id, node_id>;
type ctx = {tcx: ty::ctxt,
copy_map: copy_map,
ref_map: ref_map,
mutable silent: bool};
fn check_crate(tcx: ty::ctxt, crate: @ast::crate) -> (copy_map, ref_map) {
// Stores information about object fields and function
// arguments that's otherwise not easily available.
let cx = @{tcx: tcx,
copy_map: std::map::new_int_hash(),
ref_map: std::map::new_int_hash(),
mutable silent: false};
let v = @{visit_fn: bind visit_fn(cx, _, _, _, _, _, _, _),
visit_expr: bind visit_expr(cx, _, _, _),
visit_block: bind visit_block(cx, _, _, _)
with *visit::default_visitor::<scope>()};
let sc = {bs: [], invalid: @mutable list::nil};
visit::visit_crate(*crate, sc, visit::mk_vt(v));
tcx.sess.abort_if_errors();
ret (cx.copy_map, cx.ref_map);
}
fn visit_fn(cx: @ctx, _fk: visit::fn_kind, decl: ast::fn_decl,
body: ast::blk, sp: span,
id: ast::node_id, sc: scope, v: vt<scope>) {
visit::visit_fn_decl(decl, sc, v);
let fty = ty::node_id_to_type(cx.tcx, id);
let args = ty::ty_fn_args(cx.tcx, fty);
for arg in args {
if arg.mode == ast::by_val &&
ty::type_has_dynamic_size(cx.tcx, arg.ty) {
err(*cx, sp, "can not pass a dynamically-sized type by value");
}
}
// Blocks need to obey any restrictions from the enclosing scope, and may
// be called multiple times.
let proto = ty::ty_fn_proto(cx.tcx, fty);
if proto == ast::proto_block {
check_loop(*cx, sc) {|| v.visit_block(body, sc, v);}
} else {
let sc = {bs: [], invalid: @mutable list::nil};
v.visit_block(body, sc, v);
}
}
fn visit_expr(cx: @ctx, ex: @ast::expr, sc: scope, v: vt<scope>) {
let handled = true;
alt ex.node {
ast::expr_call(f, args, _) {
check_call(*cx, sc, f, args);
handled = false;
}
ast::expr_alt(input, arms) { check_alt(*cx, input, arms, sc, v); }
ast::expr_for(decl, seq, blk) {
v.visit_expr(seq, sc, v);
check_loop(*cx, sc) {|| check_for(*cx, decl, seq, blk, sc, v); }
}
ast::expr_path(pt) {
check_var(*cx, ex, pt, ex.id, false, sc);
handled = false;
}
ast::expr_swap(lhs, rhs) {
check_lval(cx, lhs, sc, v);
check_lval(cx, rhs, sc, v);
handled = false;
}
ast::expr_move(dest, src) {
check_assign(cx, dest, src, sc, v);
check_lval(cx, src, sc, v);
}
ast::expr_assign(dest, src) | ast::expr_assign_op(_, dest, src) {
check_assign(cx, dest, src, sc, v);
}
ast::expr_if(c, then, els) { check_if(c, then, els, sc, v); }
ast::expr_while(_, _) | ast::expr_do_while(_, _) {
check_loop(*cx, sc) {|| visit::visit_expr(ex, sc, v); }
}
_ { handled = false; }
}
if !handled { visit::visit_expr(ex, sc, v); }
}
fn visit_block(cx: @ctx, b: ast::blk, sc: scope, v: vt<scope>) {
let bs = sc.bs, sc = sc;
for stmt in b.node.stmts {
alt stmt.node {
ast::stmt_decl(@{node: ast::decl_item(it), _}, _) {
v.visit_item(it, sc, v);
}
ast::stmt_decl(@{node: ast::decl_local(locs), _}, _) {
for (st, loc) in locs {
if st == ast::let_ref {
add_bindings_for_let(*cx, bs, loc);
sc = {bs: bs with sc};
}
alt loc.node.init {
some(init) {
if init.op == ast::init_move {
check_lval(cx, init.expr, sc, v);
}
}
none. { }
}
}
}
ast::stmt_expr(ex, _) | ast::stmt_semi(ex, _) {
v.visit_expr(ex, sc, v);
}
}
}
visit::visit_expr_opt(b.node.expr, sc, v);
}
fn add_bindings_for_let(cx: ctx, &bs: [binding], loc: @ast::local) {
alt loc.node.init {
some(init) {
if init.op == ast::init_move {
err(cx, loc.span, "can not move into a by-reference binding");
}
let root = expr_root(cx, init.expr, false);
let root_var = path_def_id(cx, root.ex);
if is_none(root_var) {
err(cx, loc.span, "a reference binding can't be \
rooted in a temporary");
}
for proot in pattern_roots(cx.tcx, root.mut, loc.node.pat) {
let bnd = mk_binding(cx, proot.id, proot.span, root_var,
unsafe_set(proot.mut));
// Don't implicitly copy explicit references
bnd.copied = not_allowed;
bs += [bnd];
}
}
_ {
err(cx, loc.span, "by-reference bindings must be initialized");
}
}
}
fn cant_copy(cx: ctx, b: binding) -> bool {
alt b.copied {
not_allowed. { ret true; }
copied. { ret false; }
not_copied. {}
}
let ty = ty::node_id_to_type(cx.tcx, b.node_id);
if ty::type_allows_implicit_copy(cx.tcx, ty) {
b.copied = copied;
cx.copy_map.insert(b.node_id, ());
if copy_is_expensive(cx.tcx, ty) {
cx.tcx.sess.span_warn(b.span,
"inserting an implicit copy for type " +
util::ppaux::ty_to_str(cx.tcx, ty));
}
ret false;
} else { ret true; }
}
fn check_call(cx: ctx, sc: scope, f: @ast::expr, args: [@ast::expr])
-> [binding] {
let fty = ty::expr_ty(cx.tcx, f);
let arg_ts = ty::ty_fn_args(cx.tcx, fty);
let mut_roots: [{arg: uint, node: node_id}] = [];
let bindings = [];
let i = 0u;
for arg_t: ty::arg in arg_ts {
let arg = args[i];
let root = expr_root(cx, arg, false);
if arg_t.mode == ast::by_mut_ref {
alt path_def(cx, arg) {
some(def) {
let dnum = ast_util::def_id_of_def(def).node;
mut_roots += [{arg: i, node: dnum}];
}
_ { }
}
}
let root_var = path_def_id(cx, root.ex);
bindings += [@{node_id: arg.id,
span: arg.span,
root_var: root_var,
local_id: 0u,
unsafe_tys: unsafe_set(root.mut),
mutable copied: alt arg_t.mode {
ast::by_move. | ast::by_copy. { copied }
ast::by_mut_ref. { not_allowed }
_ { not_copied }
}}];
i += 1u;
}
let f_may_close =
alt f.node {
ast::expr_path(_) { def_is_local(cx.tcx.def_map.get(f.id)) }
_ { true }
};
if f_may_close {
let i = 0u;
for b in bindings {
let unsfe = vec::len(b.unsafe_tys) > 0u;
alt b.root_var {
some(rid) {
for o in sc.bs {
if o.node_id == rid && vec::len(o.unsafe_tys) > 0u {
unsfe = true; break;
}
}
}
_ {}
}
if unsfe && cant_copy(cx, b) {
err(cx, f.span, #fmt["function may alias with argument \
%u, which is not immutably rooted", i]);
}
i += 1u;
}
}
let j = 0u;
for b in bindings {
for unsafe_ty in b.unsafe_tys {
let i = 0u;
for arg_t: ty::arg in arg_ts {
let mut_alias = arg_t.mode == ast::by_mut_ref;
if i != j &&
ty_can_unsafely_include(cx, unsafe_ty, arg_t.ty,
mut_alias) &&
cant_copy(cx, b) {
err(cx, args[i].span,
#fmt["argument %u may alias with argument %u, \
which is not immutably rooted", i, j]);
}
i += 1u;
}
}
j += 1u;
}
// Ensure we're not passing a root by mutable alias.
for {node: node, arg: arg} in mut_roots {
let i = 0u;
for b in bindings {
if i != arg {
alt b.root_var {
some(root) {
if node == root && cant_copy(cx, b) {
err(cx, args[arg].span,
"passing a mutable reference to a \
variable that roots another reference");
break;
}
}
none. { }
}
}
i += 1u;
}
}
ret bindings;
}
fn check_alt(cx: ctx, input: @ast::expr, arms: [ast::arm], sc: scope,
v: vt<scope>) {
v.visit_expr(input, sc, v);
let orig_invalid = *sc.invalid;
let all_invalid = orig_invalid;
let root = expr_root(cx, input, true);
for a: ast::arm in arms {
let new_bs = sc.bs;
let root_var = path_def_id(cx, root.ex);
let pat_id_map = ast_util::pat_id_map(a.pats[0]);
type info = {
id: node_id,
mutable unsafe_tys: [unsafe_ty],
span: span};
let binding_info: [info] = [];
for pat in a.pats {
for proot in pattern_roots(cx.tcx, root.mut, pat) {
let canon_id = pat_id_map.get(proot.name);
alt vec::find(binding_info, {|x| x.id == canon_id}) {
some(s) { s.unsafe_tys += unsafe_set(proot.mut); }
none. {
binding_info += [
{id: canon_id,
mutable unsafe_tys: unsafe_set(proot.mut),
span: proot.span}];
}
}
}
}
for info in binding_info {
new_bs += [mk_binding(cx, info.id, info.span, root_var,
copy info.unsafe_tys)];
}
*sc.invalid = orig_invalid;
visit::visit_arm(a, {bs: new_bs with sc}, v);
all_invalid = append_invalid(all_invalid, *sc.invalid, orig_invalid);
}
*sc.invalid = all_invalid;
}
fn check_for(cx: ctx, local: @ast::local, seq: @ast::expr, blk: ast::blk,
sc: scope, v: vt<scope>) {
let root = expr_root(cx, seq, false);
// If this is a mutable vector, don't allow it to be touched.
let seq_t = ty::expr_ty(cx.tcx, seq);
let cur_mut = root.mut;
alt ty::struct(cx.tcx, seq_t) {
ty::ty_vec(mt) {
if mt.mut != ast::imm {
cur_mut = some(contains(seq_t));
}
}
_ {}
}
let root_var = path_def_id(cx, root.ex);
let new_bs = sc.bs;
for proot in pattern_roots(cx.tcx, cur_mut, local.node.pat) {
new_bs += [mk_binding(cx, proot.id, proot.span, root_var,
unsafe_set(proot.mut))];
}
visit::visit_block(blk, {bs: new_bs with sc}, v);
}
fn check_var(cx: ctx, ex: @ast::expr, p: @ast::path, id: ast::node_id,
assign: bool, sc: scope) {
let def = cx.tcx.def_map.get(id);
if !def_is_local(def) { ret; }
let my_defnum = ast_util::def_id_of_def(def).node;
let my_local_id = local_id_of_node(cx, my_defnum);
let var_t = ty::expr_ty(cx.tcx, ex);
for b in sc.bs {
// excludes variables introduced since the alias was made
if my_local_id < b.local_id {
for unsafe_ty in b.unsafe_tys {
if ty_can_unsafely_include(cx, unsafe_ty, var_t, assign) {
let inv = @{reason: val_taken, node_id: b.node_id,
sp: ex.span, path: p};
*sc.invalid = list::cons(inv, @*sc.invalid);
}
}
} else if b.node_id == my_defnum {
test_scope(cx, sc, b, p);
}
}
}
fn check_lval(cx: @ctx, dest: @ast::expr, sc: scope, v: vt<scope>) {
alt dest.node {
ast::expr_path(p) {
let def = cx.tcx.def_map.get(dest.id);
let dnum = ast_util::def_id_of_def(def).node;
for b in sc.bs {
if b.root_var == some(dnum) {
let inv = @{reason: overwritten, node_id: b.node_id,
sp: dest.span, path: p};
*sc.invalid = list::cons(inv, @*sc.invalid);
}
}
}
_ { visit_expr(cx, dest, sc, v); }
}
}
fn check_assign(cx: @ctx, dest: @ast::expr, src: @ast::expr, sc: scope,
v: vt<scope>) {
visit_expr(cx, src, sc, v);
check_lval(cx, dest, sc, v);
}
fn check_if(c: @ast::expr, then: ast::blk, els: option::t<@ast::expr>,
sc: scope, v: vt<scope>) {
v.visit_expr(c, sc, v);
let orig_invalid = *sc.invalid;
v.visit_block(then, sc, v);
let then_invalid = *sc.invalid;
*sc.invalid = orig_invalid;
visit::visit_expr_opt(els, sc, v);
*sc.invalid = append_invalid(*sc.invalid, then_invalid, orig_invalid);
}
fn check_loop(cx: ctx, sc: scope, checker: block()) {
let orig_invalid = filter_invalid(*sc.invalid, sc.bs);
checker();
let new_invalid = filter_invalid(*sc.invalid, sc.bs);
// Have to check contents of loop again if it invalidated an alias
if list::len(orig_invalid) < list::len(new_invalid) {
let old_silent = cx.silent;
cx.silent = true;
checker();
cx.silent = old_silent;
}
*sc.invalid = new_invalid;
}
fn test_scope(cx: ctx, sc: scope, b: binding, p: @ast::path) {
let prob = find_invalid(b.node_id, *sc.invalid);
alt b.root_var {
some(dn) {
for other in sc.bs {
if !is_none(prob) { break; }
if other.node_id == dn {
prob = find_invalid(other.node_id, *sc.invalid);
}
}
}
_ {}
}
if !is_none(prob) && cant_copy(cx, b) {
let i = option::get(prob);
let msg = alt i.reason {
overwritten. { "overwriting " + ast_util::path_name(i.path) }
val_taken. { "taking the value of " + ast_util::path_name(i.path) }
};
err(cx, i.sp, msg + " will invalidate reference " +
ast_util::path_name(p) + ", which is still used");
}
}
fn path_def(cx: ctx, ex: @ast::expr) -> option::t<ast::def> {
ret alt ex.node {
ast::expr_path(_) { some(cx.tcx.def_map.get(ex.id)) }
_ { none }
}
}
fn path_def_id(cx: ctx, ex: @ast::expr) -> option::t<ast::node_id> {
alt ex.node {
ast::expr_path(_) {
ret some(ast_util::def_id_of_def(cx.tcx.def_map.get(ex.id)).node);
}
_ { ret none; }
}
}
fn ty_can_unsafely_include(cx: ctx, needle: unsafe_ty, haystack: ty::t,
mut: bool) -> bool {
fn get_mut(cur: bool, mt: ty::mt) -> bool {
ret cur || mt.mut != ast::imm;
}
fn helper(tcx: ty::ctxt, needle: unsafe_ty, haystack: ty::t, mut: bool)
-> bool {
if alt needle {
contains(ty) { ty == haystack }
mut_contains(ty) { mut && ty == haystack }
} { ret true; }
alt ty::struct(tcx, haystack) {
ty::ty_tag(_, ts) {
for t: ty::t in ts {
if helper(tcx, needle, t, mut) { ret true; }
}
ret false;
}
ty::ty_box(mt) | ty::ty_ptr(mt) | ty::ty_uniq(mt) {
ret helper(tcx, needle, mt.ty, get_mut(mut, mt));
}
ty::ty_rec(fields) {
for f: ty::field in fields {
if helper(tcx, needle, f.mt.ty, get_mut(mut, f.mt)) {
ret true;
}
}
ret false;
}
ty::ty_tup(ts) {
for t in ts { if helper(tcx, needle, t, mut) { ret true; } }
ret false;
}
ty::ty_fn({proto: ast::proto_bare., _}) { ret false; }
// These may contain anything.
ty::ty_fn(_) | ty::ty_obj(_) { ret true; }
// A type param may include everything, but can only be
// treated as opaque downstream, and is thus safe unless we
// saw mutable fields, in which case the whole thing can be
// overwritten.
ty::ty_param(_, _) { ret mut; }
_ { ret false; }
}
}
ret helper(cx.tcx, needle, haystack, mut);
}
fn def_is_local(d: ast::def) -> bool {
alt d {
ast::def_local(_, _) | ast::def_arg(_, _) | ast::def_binding(_) |
ast::def_upvar(_, _, _) | ast::def_self(_) |
ast::def_obj_field(_, _) { true }
_ { false }
}
}
fn local_id_of_node(cx: ctx, id: node_id) -> uint {
alt cx.tcx.items.find(id) {
some(ast_map::node_arg(_, id)) | some(ast_map::node_local(id)) { id }
_ { 0u }
}
}
// Heuristic, somewhat random way to decide whether to warn when inserting an
// implicit copy.
fn copy_is_expensive(tcx: ty::ctxt, ty: ty::t) -> bool {
fn score_ty(tcx: ty::ctxt, ty: ty::t) -> uint {
ret alt ty::struct(tcx, ty) {
ty::ty_nil. | ty::ty_bot. | ty::ty_bool. | ty::ty_int(_) |
ty::ty_uint(_) | ty::ty_float(_) | ty::ty_type. | ty::ty_native(_) |
ty::ty_ptr(_) { 1u }
ty::ty_box(_) { 3u }
ty::ty_constr(t, _) | ty::ty_res(_, t, _) { score_ty(tcx, t) }
ty::ty_fn(_) | ty::ty_native_fn(_, _) |
ty::ty_obj(_) { 4u }
ty::ty_str. | ty::ty_vec(_) | ty::ty_param(_, _) { 50u }
ty::ty_uniq(mt) { 1u + score_ty(tcx, mt.ty) }
ty::ty_tag(_, ts) | ty::ty_tup(ts) {
let sum = 0u;
for t in ts { sum += score_ty(tcx, t); }
sum
}
ty::ty_rec(fs) {
let sum = 0u;
for f in fs { sum += score_ty(tcx, f.mt.ty); }
sum
}
};
}
ret score_ty(tcx, ty) > 8u;
}
type pattern_root = {id: node_id,
name: ident,
mut: option::t<unsafe_ty>,
span: span};
fn pattern_roots(tcx: ty::ctxt, mut: option::t<unsafe_ty>, pat: @ast::pat)
-> [pattern_root] {
fn walk(tcx: ty::ctxt, mut: option::t<unsafe_ty>, pat: @ast::pat,
&set: [pattern_root]) {
alt pat.node {
ast::pat_wild. | ast::pat_lit(_) | ast::pat_range(_, _) {}
ast::pat_bind(nm, sub) {
set += [{id: pat.id, name: nm, mut: mut, span: pat.span}];
alt sub { some(p) { walk(tcx, mut, p, set); } _ {} }
}
ast::pat_tag(_, ps) | ast::pat_tup(ps) {
for p in ps { walk(tcx, mut, p, set); }
}
ast::pat_rec(fs, _) {
let ty = ty::node_id_to_type(tcx, pat.id);
for f in fs {
let m = ty::get_field(tcx, ty, f.ident).mt.mut != ast::imm;
walk(tcx, m ? some(contains(ty)) : mut, f.pat, set);
}
}
ast::pat_box(p) {
let ty = ty::node_id_to_type(tcx, pat.id);
let m = alt ty::struct(tcx, ty) {
ty::ty_box(mt) { mt.mut != ast::imm }
};
walk(tcx, m ? some(contains(ty)) : mut, p, set);
}
ast::pat_uniq(p) {
let ty = ty::node_id_to_type(tcx, pat.id);
let m = alt ty::struct(tcx, ty) {
ty::ty_uniq(mt) { mt.mut != ast::imm }
};
walk(tcx, m ? some(contains(ty)) : mut, p, set);
}
}
}
let set = [];
walk(tcx, mut, pat, set);
ret set;
}
// Wraps the expr_root in mut.rs to also handle roots that exist through
// return-by-reference
fn expr_root(cx: ctx, ex: @ast::expr, autoderef: bool)
-> {ex: @ast::expr, mut: option::t<unsafe_ty>} {
let base_root = mut::expr_root(cx.tcx, ex, autoderef);
let unsafe_ty = none;
for d in *base_root.ds {
if d.mut { unsafe_ty = some(contains(d.outer_t)); break; }
}
alt base_root.ex.node {
ast::expr_path(_) {
alt cx.tcx.def_map.get(base_root.ex.id) {
ast::def_obj_field(_, ast::mut.) {
unsafe_ty = some(mut_contains(ty::expr_ty(cx.tcx, base_root.ex)));
}
_ {}
}
}
_ {}
}
ret {ex: base_root.ex, mut: unsafe_ty};
}
fn | (from: option::t<unsafe_ty>) -> [unsafe_ty] {
alt from { some(t) { [t] } _ { [] } }
}
fn find_invalid(id: node_id, lst: list<@invalid>)
-> option::t<@invalid> {
let cur = lst;
while true {
alt cur {
list::nil. { break; }
list::cons(head, tail) {
if head.node_id == id { ret some(head); }
cur = *tail;
}
}
}
ret none;
}
fn append_invalid(dest: list<@invalid>, src: list<@invalid>,
stop: list<@invalid>) -> list<@invalid> {
let cur = src, dest = dest;
while cur != stop {
alt cur {
list::cons(head, tail) {
if is_none(find_invalid(head.node_id, dest)) {
dest = list::cons(head, @dest);
}
cur = *tail;
}
}
}
ret dest;
}
fn filter_invalid(src: list<@invalid>, bs: [binding]) -> list<@invalid> {
let out = list::nil, cur = src;
while cur != list::nil {
alt cur {
list::cons(head, tail) {
let p = vec::position_pred(bs, {|b| b.node_id == head.node_id});
if !is_none(p) { out = list::cons(head, @out); }
cur = *tail;
}
}
}
ret out;
}
fn err(cx: ctx, sp: span, err: str) {
if !cx.silent || !cx.tcx.sess.has_errors() {
cx.tcx.sess.span_err(sp, err);
}
}
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End:
| unsafe_set | identifier_name |
core.go | package core
import (
"errors"
"fmt"
"regexp"
"unicode/utf8"
dht "gx/ipfs/QmSY3nkMNLzh9GdbFKK5tT7YMfLpf52iUZ8ZRkr29MJaa5/go-libp2p-kad-dht"
libp2p "gx/ipfs/QmTW4SdgBWq9GjsBsHeUx8WuGxzhgzAf88UMH2w62PC8yK/go-libp2p-crypto"
ma "gx/ipfs/QmTZBfrPJmjWsCvHEtX5FE6KimVJhsJg5sBbqEFYf4UZtL/go-multiaddr"
cid "gx/ipfs/QmTbxNB1NwDesLmKTscr4udL2tVP7MaxvXnD1D9yX7g3PN/go-cid"
peer "gx/ipfs/QmYVXrKrKHDC9FobgmcmshCDyWwdrfwfanNQN4oxJ9Fk3h/go-libp2p-peer"
routing "gx/ipfs/QmYxUdYY9S6yg5tSPVin5GFTvtfsLauVcr7reHDD3dM8xf/go-libp2p-routing"
"path"
"sync"
"time"
"github.com/OpenBazaar/multiwallet"
"github.com/btcsuite/btcutil/hdkeychain"
"github.com/gosimple/slug"
"github.com/ipfs/go-ipfs/core"
"github.com/kimitzu/kimitzu-go/ipfs"
"github.com/kimitzu/kimitzu-go/net"
rep "github.com/kimitzu/kimitzu-go/net/repointer"
ret "github.com/kimitzu/kimitzu-go/net/retriever"
"github.com/kimitzu/kimitzu-go/repo"
sto "github.com/kimitzu/kimitzu-go/storage"
logging "github.com/op/go-logging"
"golang.org/x/net/context"
"golang.org/x/net/proxy"
)
const (
// KIMITZU_VERSION - Kimitzu Development Version
KIMITZU_VERSION = "0.2.0-alpha.1"
// VERSION - current version
VERSION = "0.13.7-kimitzu"
// USERAGENT - user-agent header string
// Useragent for Kimitzu Nodes would be "openbazaar-kimitzu-go:0.13.3,0.0.1-dev"
USERAGENT = "/openbazaar-kimitzu-go: v" + VERSION + ", v" + KIMITZU_VERSION + "/"
)
var log = logging.MustGetLogger("core")
const EmojiPattern = "[\\x{2712}\\x{2714}\\x{2716}\\x{271d}\\x{2721}\\x{2728}\\x{2733}" +
"\\x{2734}\\x{2744}\\x{2747}\\x{274c}\\x{274e}\\x{2753}-\\x{2755}\\x{2757}" +
"\\x{2763}\\x{2764}\\x{2795}-\\x{2797}\\x{27a1}\\x{27b0}\\x{27bf}\\x{2934}" +
"\\x{2935}\\x{2b05}-\\x{2b07}\\x{2b1b}\\x{2b1c}\\x{2b50}\\x{2b55}\\x{3030}" +
"\\x{303d}\\x{1f004}\\x{1f0cf}\\x{1f170}\\x{1f171}\\x{1f17e}\\x{1f17f}" +
"\\x{1f18e}\\x{1f191}-\\x{1f19a}\\x{1f201}\\x{1f202}\\x{1f21a}\\x{1f22f}" +
"\\x{1f232}-\\x{1f23a}\\x{1f250}\\x{1f251}\\x{1f300}-\\x{1f321}\\x{1f324}-" +
"\\x{1f393}\\x{1f396}\\x{1f397}\\x{1f399}-\\x{1f39b}\\x{1f39e}-\\x{1f3f0}" +
"\\x{1f3f3}-\\x{1f3f5}\\x{1f3f7}-\\x{1f4fd}\\x{1f4ff}-\\x{1f53d}\\x{1f549}-" +
"\\x{1f54e}\\x{1f550}-\\x{1f567}\\x{1f56f}\\x{1f570}\\x{1f573}-\\x{1f579}" +
"\\x{1f587}\\x{1f58a}-\\x{1f58d}\\x{1f590}\\x{1f595}\\x{1f596}\\x{1f5a5}" +
"\\x{1f5a8}\\x{1f5b1}\\x{1f5b2}\\x{1f5bc}\\x{1f5c2}-\\x{1f5c4}\\x{1f5d1}-" +
"\\x{1f5d3}\\x{1f5dc}-\\x{1f5de}\\x{1f5e1}\\x{1f5e3}\\x{1f5ef}\\x{1f5f3}" +
"\\x{1f5fa}-\\x{1f64f}\\x{1f680}-\\x{1f6c5}\\x{1f6cb}-\\x{1f6d0}\\x{1f6e0}-" +
"\\x{1f6e5}\\x{1f6e9}\\x{1f6eb}\\x{1f6ec}\\x{1f6f0}\\x{1f6f3}\\x{1f910}-" +
"\\x{1f918}\\x{1f980}-\\x{1f984}\\x{1f9c0}\\x{3297}\\x{3299}\\x{a9}\\x{ae}" +
"\\x{203c}\\x{2049}\\x{2122}\\x{2139}\\x{2194}-\\x{2199}\\x{21a9}\\x{21aa}" +
"\\x{231a}\\x{231b}\\x{2328}\\x{2388}\\x{23cf}\\x{23e9}-\\x{23f3}\\x{23f8}-" +
"\\x{23fa}\\x{24c2}\\x{25aa}\\x{25ab}\\x{25b6}\\x{25c0}\\x{25fb}-\\x{25fe}" +
"\\x{2600}-\\x{2604}\\x{260e}\\x{2611}\\x{2614}\\x{2615}\\x{2618}\\x{261d}" +
"\\x{2620}\\x{2622}\\x{2623}\\x{2626}\\x{262a}\\x{262e}\\x{262f}\\x{2638}-" +
"\\x{263a}\\x{2648}-\\x{2653}\\x{2660}\\x{2663}\\x{2665}\\x{2666}\\x{2668}" +
"\\x{267b}\\x{267f}\\x{2692}-\\x{2694}\\x{2696}\\x{2697}\\x{2699}\\x{269b}" +
"\\x{269c}\\x{26a0}\\x{26a1}\\x{26aa}\\x{26ab}\\x{26b0}\\x{26b1}\\x{26bd}" +
"\\x{26be}\\x{26c4}\\x{26c5}\\x{26c8}\\x{26ce}\\x{26cf}\\x{26d1}\\x{26d3}" +
"\\x{26d4}\\x{26e9}\\x{26ea}\\x{26f0}-\\x{26f5}\\x{26f7}-\\x{26fa}\\x{26fd}" +
"\\x{2702}\\x{2705}\\x{2708}-\\x{270d}\\x{270f}]|\\x{23}\\x{20e3}|\\x{2a}" +
"\\x{20e3}|\\x{30}\\x{20e3}|\\x{31}\\x{20e3}|\\x{32}\\x{20e3}|\\x{33}\\x{20e3}|" +
"\\x{34}\\x{20e3}|\\x{35}\\x{20e3}|\\x{36}\\x{20e3}|\\x{37}\\x{20e3}|\\x{38}" +
"\\x{20e3}|\\x{39}\\x{20e3}|\\x{1f1e6}[\\x{1f1e8}-\\x{1f1ec}\\x{1f1ee}" +
"\\x{1f1f1}\\x{1f1f2}\\x{1f1f4}\\x{1f1f6}-\\x{1f1fa}\\x{1f1fc}\\x{1f1fd}" +
"\\x{1f1ff}]|\\x{1f1e7}[\\x{1f1e6}\\x{1f1e7}\\x{1f1e9}-\\x{1f1ef}\\x{1f1f1}-" +
"\\x{1f1f4}\\x{1f1f6}-\\x{1f1f9}\\x{1f1fb}\\x{1f1fc}\\x{1f1fe}\\x{1f1ff}]|" +
"\\x{1f1e8}[\\x{1f1e6}\\x{1f1e8}\\x{1f1e9}\\x{1f1eb}-\\x{1f1ee}\\x{1f1f0}-" +
"\\x{1f1f5}\\x{1f1f7}\\x{1f1fa}-\\x{1f1ff}]|\\x{1f1e9}[\\x{1f1ea}\\x{1f1ec}" +
"\\x{1f1ef}\\x{1f1f0}\\x{1f1f2}\\x{1f1f4}\\x{1f1ff}]|\\x{1f1ea}[\\x{1f1e6}" +
"\\x{1f1e8}\\x{1f1ea}\\x{1f1ec}\\x{1f1ed}\\x{1f1f7}-\\x{1f1fa}]|\\x{1f1eb}[" +
"\\x{1f1ee}-\\x{1f1f0}\\x{1f1f2}\\x{1f1f4}\\x{1f1f7}]|\\x{1f1ec}[\\x{1f1e6}" +
"\\x{1f1e7}\\x{1f1e9}-\\x{1f1ee}\\x{1f1f1}-\\x{1f1f3}\\x{1f1f5}-\\x{1f1fa}" +
"\\x{1f1fc}\\x{1f1fe}]|\\x{1f1ed}[\\x{1f1f0}\\x{1f1f2}\\x{1f1f3}\\x{1f1f7}" +
"\\x{1f1f9}\\x{1f1fa}]|\\x{1f1ee}[\\x{1f1e8}-\\x{1f1ea}\\x{1f1f1}-\\x{1f1f4}" +
"\\x{1f1f6}-\\x{1f1f9}]|\\x{1f1ef}[\\x{1f1ea}\\x{1f1f2}\\x{1f1f4}\\x{1f1f5}]" +
"|\\x{1f1f0}[\\x{1f1ea}\\x{1f1ec}-\\x{1f1ee}\\x{1f1f2}\\x{1f1f3}\\x{1f1f5}" +
"\\x{1f1f7}\\x{1f1fc}\\x{1f1fe}\\x{1f1ff}]|\\x{1f1f1}[\\x{1f1e6}-\\x{1f1e8}" +
"\\x{1f1ee}\\x{1f1f0}\\x{1f1f7}-\\x{1f1fb}\\x{1f1fe}]|\\x{1f1f2}[\\x{1f1e6}" +
"\\x{1f1e8}-\\x{1f1ed}\\x{1f1f0}-\\x{1f1ff}]|\\x{1f1f3}[\\x{1f1e6}\\x{1f1e8}" +
"\\x{1f1ea}-\\x{1f1ec}\\x{1f1ee}\\x{1f1f1}\\x{1f1f4}\\x{1f1f5}\\x{1f1f7}" +
"\\x{1f1fa}\\x{1f1ff}]|\\x{1f1f4}\\x{1f1f2}|\\x{1f1f5}[\\x{1f1e6}\\x{1f1ea}-" +
"\\x{1f1ed}\\x{1f1f0}-\\x{1f1f3}\\x{1f1f7}-\\x{1f1f9}\\x{1f1fc}\\x{1f1fe}]|" +
"\\x{1f1f6}\\x{1f1e6}|\\x{1f1f7}[\\x{1f1ea}\\x{1f1f4}\\x{1f1f8}\\x{1f1fa}" +
"\\x{1f1fc}]|\\x{1f1f8}[\\x{1f1e6}-\\x{1f1ea}\\x{1f1ec}-\\x{1f1f4}\\x{1f1f7}-" +
"\\x{1f1f9}\\x{1f1fb}\\x{1f1fd}-\\x{1f1ff}]|\\x{1f1f9}[\\x{1f1e6}\\x{1f1e8}" +
"\\x{1f1e9}\\x{1f1eb}-\\x{1f1ed}\\x{1f1ef}-\\x{1f1f4}\\x{1f1f7}\\x{1f1f9}" +
"\\x{1f1fb}\\x{1f1fc}\\x{1f1ff}]|\\x{1f1fa}[\\x{1f1e6}\\x{1f1ec}\\x{1f1f2}" +
"\\x{1f1f8}\\x{1f1fe}\\x{1f1ff}]|\\x{1f1fb}[\\x{1f1e6}\\x{1f1e8}\\x{1f1ea}" +
"\\x{1f1ec}\\x{1f1ee}\\x{1f1f3}\\x{1f1fa}]|\\x{1f1fc}[\\x{1f1eb}\\x{1f1f8}]|" +
"\\x{1f1fd}\\x{1f1f0}|\\x{1f1fe}[\\x{1f1ea}\\x{1f1f9}]|\\x{1f1ff}[\\x{1f1e6}" +
"\\x{1f1f2}\\x{1f1fc}]"
// Node - ob node
var Node *OpenBazaarNode
var inflightPublishRequests int
// OpenBazaarNode - represent ob node which encapsulates ipfsnode, wallet etc
type OpenBazaarNode struct {
// IPFS node object
IpfsNode *core.IpfsNode
// An implementation of the custom DHT used by OpenBazaar
DHT *dht.IpfsDHT
// The roothash of the node directory inside the openbazaar repo.
// This directory hash is published on IPNS at our peer ID making
// the directory publicly viewable on the network.
RootHash string
// The path to the openbazaar repo in the file system
RepoPath string
// The OpenBazaar network service for direct communication between peers
Service net.NetworkService
// Database for storing node specific data
Datastore repo.Datastore
// Websocket channel used for pushing data to the UI
Broadcast chan repo.Notifier
// A map of cryptocurrency wallets
Multiwallet multiwallet.MultiWallet
// Storage for our outgoing messages
MessageStorage sto.OfflineMessagingStorage
// A service that periodically checks the dht for outstanding messages
MessageRetriever *ret.MessageRetriever
// OfflineMessageFailoverTimeout is the duration until the protocol
// will stop looking for the peer to send a direct message and failover to
// sending an offline message
OfflineMessageFailoverTimeout time.Duration
// A service that periodically republishes active pointers
PointerRepublisher *rep.PointerRepublisher
// Optional nodes to push user data to
PushNodes []peer.ID
// The user-agent for this node
UserAgent string
// A dialer for Tor if available
TorDialer proxy.Dialer
// Manage blocked peers
BanManager *net.BanManager
// Allow other nodes to push data to this node for storage
AcceptStoreRequests bool
// RecordAgingNotifier is a worker that walks the cases datastore to
// notify the user as disputes age past certain thresholds
RecordAgingNotifier *recordAgingNotifier
// Generic pubsub interface
Pubsub ipfs.Pubsub
// The master private key derived from the mnemonic
MasterPrivateKey *hdkeychain.ExtendedKey
// The number of DHT records to collect before returning. The larger the number
// the slower the query but the less likely we will get an old record.
IPNSQuorumSize uint
TestnetEnable bool
RegressionTestEnable bool
PublishLock sync.Mutex
seedLock sync.Mutex
InitalPublishComplete bool
// Daemon version
Version string
}
// TestNetworkEnabled indicates whether the node is operating with test parameters
func (n *OpenBazaarNode) TestNetworkEnabled() bool |
// RegressionNetworkEnabled indicates whether the node is operating with regression parameters
func (n *OpenBazaarNode) RegressionNetworkEnabled() bool { return n.RegressionTestEnable }
// SeedNode - publish to IPNS
func (n *OpenBazaarNode) SeedNode() error {
n.seedLock.Lock()
ipfs.UnPinDir(n.IpfsNode, n.RootHash)
var aerr error
var rootHash string
// There's an IPFS bug on Windows that might be related to the Windows indexer that could cause this to fail
// If we fail the first time, let's retry a couple times before giving up.
for i := 0; i < 3; i++ {
rootHash, aerr = ipfs.AddDirectory(n.IpfsNode, path.Join(n.RepoPath, "root"))
if aerr == nil {
break
}
time.Sleep(time.Millisecond * 500)
}
if aerr != nil {
n.seedLock.Unlock()
return aerr
}
n.RootHash = rootHash
n.seedLock.Unlock()
n.InitalPublishComplete = true
go n.publish(rootHash)
return nil
}
func (n *OpenBazaarNode) publish(hash string) {
// Multiple publishes may have been queued
// We only need to publish the most recent
n.PublishLock.Lock()
defer n.PublishLock.Unlock()
if hash != n.RootHash {
return
}
if inflightPublishRequests == 0 {
n.Broadcast <- repo.StatusNotification{Status: "publishing"}
}
err := n.sendToPushNodes(hash)
if err != nil {
log.Error(err)
return
}
inflightPublishRequests++
err = ipfs.Publish(n.IpfsNode, hash)
inflightPublishRequests--
if inflightPublishRequests == 0 {
if err != nil {
log.Error(err)
n.Broadcast <- repo.StatusNotification{Status: "error publishing"}
} else {
n.Broadcast <- repo.StatusNotification{Status: "publish complete"}
}
}
}
func (n *OpenBazaarNode) sendToPushNodes(hash string) error {
id, err := cid.Decode(hash)
if err != nil {
return err
}
var graph []cid.Cid
if len(n.PushNodes) > 0 {
graph, err = ipfs.FetchGraph(n.IpfsNode, &id)
if err != nil {
return err
}
pointers, err := n.Datastore.Pointers().GetByPurpose(ipfs.MESSAGE)
if err != nil {
return err
}
// Check if we're seeding any outgoing messages and add their CIDs to the graph
for _, p := range pointers {
if len(p.Value.Addrs) > 0 {
s, err := p.Value.Addrs[0].ValueForProtocol(ma.P_IPFS)
if err != nil {
continue
}
c, err := cid.Decode(s)
if err != nil {
continue
}
graph = append(graph, c)
}
}
}
for _, p := range n.PushNodes {
go n.retryableSeedStoreToPeer(p, hash, graph)
}
return nil
}
func (n *OpenBazaarNode) retryableSeedStoreToPeer(pid peer.ID, graphHash string, graph []cid.Cid) {
var retryTimeout = 2 * time.Second
for {
if graphHash != n.RootHash {
log.Errorf("root hash has changed, aborting push to %s", pid.Pretty())
return
}
err := n.SendStore(pid.Pretty(), graph)
if err != nil {
if retryTimeout > 60*time.Second {
log.Errorf("error pushing to peer %s: %s", pid.Pretty(), err.Error())
return
}
log.Errorf("error pushing to peer %s...backing off: %s", pid.Pretty(), err.Error())
time.Sleep(retryTimeout)
retryTimeout *= 2
continue
}
return
}
}
// SetUpRepublisher - periodic publishing to IPNS
func (n *OpenBazaarNode) SetUpRepublisher(interval time.Duration) {
if interval == 0 {
return
}
ticker := time.NewTicker(interval)
go func() {
for range ticker.C {
n.UpdateFollow()
n.SeedNode()
}
}()
}
/*EncryptMessage This is a placeholder until the libsignal is operational.
For now we will just encrypt outgoing offline messages with the long lived identity key.
Optionally you may provide a public key, to avoid doing an IPFS lookup */
func (n *OpenBazaarNode) EncryptMessage(peerID peer.ID, peerKey *libp2p.PubKey, message []byte) (ct []byte, rerr error) {
ctx, cancel := context.WithTimeout(context.Background(), n.OfflineMessageFailoverTimeout)
defer cancel()
if peerKey == nil {
var (
pubKey libp2p.PubKey
store = n.IpfsNode.Repo.Datastore()
)
keyval, err := ipfs.GetCachedPubkey(store, peerID.Pretty())
if err != nil {
pubKey, err = routing.GetPublicKey(n.IpfsNode.Routing, ctx, peerID)
if err != nil {
log.Errorf("Failed to find public key for %s", peerID.Pretty())
return nil, err
}
} else {
pubKey, err = libp2p.UnmarshalPublicKey(keyval)
if err != nil {
log.Errorf("Failed to find public key for %s", peerID.Pretty())
return nil, err
}
}
peerKey = &pubKey
}
if peerID.MatchesPublicKey(*peerKey) {
ciphertext, err := net.Encrypt(*peerKey, message)
if err != nil {
return nil, err
}
return ciphertext, nil
}
log.Errorf("peer public key and id do not match for peer: %s", peerID.Pretty())
return nil, errors.New("peer public key and id do not match")
}
// IPFSIdentityString - IPFS identifier
func (n *OpenBazaarNode) IPFSIdentityString() string {
return n.IpfsNode.Identity.Pretty()
}
func ToHtmlEntities(str string) string {
var rx = regexp.MustCompile(EmojiPattern)
return rx.ReplaceAllStringFunc(str, func(s string) string {
r, _ := utf8.DecodeRuneInString(s)
html := fmt.Sprintf(`&#x%X;`, r)
return html
})
}
// createSlugFor Create a slug from a multi-lang string
func createSlugFor(slugName string) string {
l := SentenceMaxCharacters - SlugBuffer
slugName = ToHtmlEntities(slugName)
slug := slug.Make(slugName)
if len(slug) < SentenceMaxCharacters-SlugBuffer {
l = len(slug)
}
return slug[:l]
}
| { return n.TestnetEnable } | identifier_body |
core.go | package core
import (
"errors"
"fmt"
"regexp"
"unicode/utf8"
dht "gx/ipfs/QmSY3nkMNLzh9GdbFKK5tT7YMfLpf52iUZ8ZRkr29MJaa5/go-libp2p-kad-dht"
libp2p "gx/ipfs/QmTW4SdgBWq9GjsBsHeUx8WuGxzhgzAf88UMH2w62PC8yK/go-libp2p-crypto"
ma "gx/ipfs/QmTZBfrPJmjWsCvHEtX5FE6KimVJhsJg5sBbqEFYf4UZtL/go-multiaddr"
cid "gx/ipfs/QmTbxNB1NwDesLmKTscr4udL2tVP7MaxvXnD1D9yX7g3PN/go-cid"
peer "gx/ipfs/QmYVXrKrKHDC9FobgmcmshCDyWwdrfwfanNQN4oxJ9Fk3h/go-libp2p-peer"
routing "gx/ipfs/QmYxUdYY9S6yg5tSPVin5GFTvtfsLauVcr7reHDD3dM8xf/go-libp2p-routing"
"path"
"sync"
"time"
"github.com/OpenBazaar/multiwallet"
"github.com/btcsuite/btcutil/hdkeychain"
"github.com/gosimple/slug"
"github.com/ipfs/go-ipfs/core"
"github.com/kimitzu/kimitzu-go/ipfs"
"github.com/kimitzu/kimitzu-go/net"
rep "github.com/kimitzu/kimitzu-go/net/repointer"
ret "github.com/kimitzu/kimitzu-go/net/retriever"
"github.com/kimitzu/kimitzu-go/repo"
sto "github.com/kimitzu/kimitzu-go/storage"
logging "github.com/op/go-logging"
"golang.org/x/net/context"
"golang.org/x/net/proxy"
)
const (
// KIMITZU_VERSION - Kimitzu Development Version
KIMITZU_VERSION = "0.2.0-alpha.1"
// VERSION - current version
VERSION = "0.13.7-kimitzu"
// USERAGENT - user-agent header string
// Useragent for Kimitzu Nodes would be "openbazaar-kimitzu-go:0.13.3,0.0.1-dev"
USERAGENT = "/openbazaar-kimitzu-go: v" + VERSION + ", v" + KIMITZU_VERSION + "/"
)
var log = logging.MustGetLogger("core")
const EmojiPattern = "[\\x{2712}\\x{2714}\\x{2716}\\x{271d}\\x{2721}\\x{2728}\\x{2733}" +
"\\x{2734}\\x{2744}\\x{2747}\\x{274c}\\x{274e}\\x{2753}-\\x{2755}\\x{2757}" +
"\\x{2763}\\x{2764}\\x{2795}-\\x{2797}\\x{27a1}\\x{27b0}\\x{27bf}\\x{2934}" +
"\\x{2935}\\x{2b05}-\\x{2b07}\\x{2b1b}\\x{2b1c}\\x{2b50}\\x{2b55}\\x{3030}" +
"\\x{303d}\\x{1f004}\\x{1f0cf}\\x{1f170}\\x{1f171}\\x{1f17e}\\x{1f17f}" +
"\\x{1f18e}\\x{1f191}-\\x{1f19a}\\x{1f201}\\x{1f202}\\x{1f21a}\\x{1f22f}" +
"\\x{1f232}-\\x{1f23a}\\x{1f250}\\x{1f251}\\x{1f300}-\\x{1f321}\\x{1f324}-" +
"\\x{1f393}\\x{1f396}\\x{1f397}\\x{1f399}-\\x{1f39b}\\x{1f39e}-\\x{1f3f0}" +
"\\x{1f3f3}-\\x{1f3f5}\\x{1f3f7}-\\x{1f4fd}\\x{1f4ff}-\\x{1f53d}\\x{1f549}-" +
"\\x{1f54e}\\x{1f550}-\\x{1f567}\\x{1f56f}\\x{1f570}\\x{1f573}-\\x{1f579}" +
"\\x{1f587}\\x{1f58a}-\\x{1f58d}\\x{1f590}\\x{1f595}\\x{1f596}\\x{1f5a5}" +
"\\x{1f5a8}\\x{1f5b1}\\x{1f5b2}\\x{1f5bc}\\x{1f5c2}-\\x{1f5c4}\\x{1f5d1}-" +
"\\x{1f5d3}\\x{1f5dc}-\\x{1f5de}\\x{1f5e1}\\x{1f5e3}\\x{1f5ef}\\x{1f5f3}" +
"\\x{1f5fa}-\\x{1f64f}\\x{1f680}-\\x{1f6c5}\\x{1f6cb}-\\x{1f6d0}\\x{1f6e0}-" +
"\\x{1f6e5}\\x{1f6e9}\\x{1f6eb}\\x{1f6ec}\\x{1f6f0}\\x{1f6f3}\\x{1f910}-" +
"\\x{1f918}\\x{1f980}-\\x{1f984}\\x{1f9c0}\\x{3297}\\x{3299}\\x{a9}\\x{ae}" +
"\\x{203c}\\x{2049}\\x{2122}\\x{2139}\\x{2194}-\\x{2199}\\x{21a9}\\x{21aa}" +
"\\x{231a}\\x{231b}\\x{2328}\\x{2388}\\x{23cf}\\x{23e9}-\\x{23f3}\\x{23f8}-" +
"\\x{23fa}\\x{24c2}\\x{25aa}\\x{25ab}\\x{25b6}\\x{25c0}\\x{25fb}-\\x{25fe}" +
"\\x{2600}-\\x{2604}\\x{260e}\\x{2611}\\x{2614}\\x{2615}\\x{2618}\\x{261d}" +
"\\x{2620}\\x{2622}\\x{2623}\\x{2626}\\x{262a}\\x{262e}\\x{262f}\\x{2638}-" +
"\\x{263a}\\x{2648}-\\x{2653}\\x{2660}\\x{2663}\\x{2665}\\x{2666}\\x{2668}" +
"\\x{267b}\\x{267f}\\x{2692}-\\x{2694}\\x{2696}\\x{2697}\\x{2699}\\x{269b}" +
"\\x{269c}\\x{26a0}\\x{26a1}\\x{26aa}\\x{26ab}\\x{26b0}\\x{26b1}\\x{26bd}" +
"\\x{26be}\\x{26c4}\\x{26c5}\\x{26c8}\\x{26ce}\\x{26cf}\\x{26d1}\\x{26d3}" +
"\\x{26d4}\\x{26e9}\\x{26ea}\\x{26f0}-\\x{26f5}\\x{26f7}-\\x{26fa}\\x{26fd}" +
"\\x{2702}\\x{2705}\\x{2708}-\\x{270d}\\x{270f}]|\\x{23}\\x{20e3}|\\x{2a}" +
"\\x{20e3}|\\x{30}\\x{20e3}|\\x{31}\\x{20e3}|\\x{32}\\x{20e3}|\\x{33}\\x{20e3}|" +
"\\x{34}\\x{20e3}|\\x{35}\\x{20e3}|\\x{36}\\x{20e3}|\\x{37}\\x{20e3}|\\x{38}" +
"\\x{20e3}|\\x{39}\\x{20e3}|\\x{1f1e6}[\\x{1f1e8}-\\x{1f1ec}\\x{1f1ee}" +
"\\x{1f1f1}\\x{1f1f2}\\x{1f1f4}\\x{1f1f6}-\\x{1f1fa}\\x{1f1fc}\\x{1f1fd}" +
"\\x{1f1ff}]|\\x{1f1e7}[\\x{1f1e6}\\x{1f1e7}\\x{1f1e9}-\\x{1f1ef}\\x{1f1f1}-" +
"\\x{1f1f4}\\x{1f1f6}-\\x{1f1f9}\\x{1f1fb}\\x{1f1fc}\\x{1f1fe}\\x{1f1ff}]|" +
"\\x{1f1e8}[\\x{1f1e6}\\x{1f1e8}\\x{1f1e9}\\x{1f1eb}-\\x{1f1ee}\\x{1f1f0}-" +
"\\x{1f1f5}\\x{1f1f7}\\x{1f1fa}-\\x{1f1ff}]|\\x{1f1e9}[\\x{1f1ea}\\x{1f1ec}" +
"\\x{1f1ef}\\x{1f1f0}\\x{1f1f2}\\x{1f1f4}\\x{1f1ff}]|\\x{1f1ea}[\\x{1f1e6}" +
"\\x{1f1e8}\\x{1f1ea}\\x{1f1ec}\\x{1f1ed}\\x{1f1f7}-\\x{1f1fa}]|\\x{1f1eb}[" +
"\\x{1f1ee}-\\x{1f1f0}\\x{1f1f2}\\x{1f1f4}\\x{1f1f7}]|\\x{1f1ec}[\\x{1f1e6}" +
"\\x{1f1e7}\\x{1f1e9}-\\x{1f1ee}\\x{1f1f1}-\\x{1f1f3}\\x{1f1f5}-\\x{1f1fa}" +
"\\x{1f1fc}\\x{1f1fe}]|\\x{1f1ed}[\\x{1f1f0}\\x{1f1f2}\\x{1f1f3}\\x{1f1f7}" +
"\\x{1f1f9}\\x{1f1fa}]|\\x{1f1ee}[\\x{1f1e8}-\\x{1f1ea}\\x{1f1f1}-\\x{1f1f4}" +
"\\x{1f1f6}-\\x{1f1f9}]|\\x{1f1ef}[\\x{1f1ea}\\x{1f1f2}\\x{1f1f4}\\x{1f1f5}]" +
"|\\x{1f1f0}[\\x{1f1ea}\\x{1f1ec}-\\x{1f1ee}\\x{1f1f2}\\x{1f1f3}\\x{1f1f5}" +
"\\x{1f1f7}\\x{1f1fc}\\x{1f1fe}\\x{1f1ff}]|\\x{1f1f1}[\\x{1f1e6}-\\x{1f1e8}" +
"\\x{1f1ee}\\x{1f1f0}\\x{1f1f7}-\\x{1f1fb}\\x{1f1fe}]|\\x{1f1f2}[\\x{1f1e6}" +
"\\x{1f1e8}-\\x{1f1ed}\\x{1f1f0}-\\x{1f1ff}]|\\x{1f1f3}[\\x{1f1e6}\\x{1f1e8}" +
"\\x{1f1ea}-\\x{1f1ec}\\x{1f1ee}\\x{1f1f1}\\x{1f1f4}\\x{1f1f5}\\x{1f1f7}" +
"\\x{1f1fa}\\x{1f1ff}]|\\x{1f1f4}\\x{1f1f2}|\\x{1f1f5}[\\x{1f1e6}\\x{1f1ea}-" +
"\\x{1f1ed}\\x{1f1f0}-\\x{1f1f3}\\x{1f1f7}-\\x{1f1f9}\\x{1f1fc}\\x{1f1fe}]|" +
"\\x{1f1f6}\\x{1f1e6}|\\x{1f1f7}[\\x{1f1ea}\\x{1f1f4}\\x{1f1f8}\\x{1f1fa}" +
"\\x{1f1fc}]|\\x{1f1f8}[\\x{1f1e6}-\\x{1f1ea}\\x{1f1ec}-\\x{1f1f4}\\x{1f1f7}-" +
"\\x{1f1f9}\\x{1f1fb}\\x{1f1fd}-\\x{1f1ff}]|\\x{1f1f9}[\\x{1f1e6}\\x{1f1e8}" +
"\\x{1f1e9}\\x{1f1eb}-\\x{1f1ed}\\x{1f1ef}-\\x{1f1f4}\\x{1f1f7}\\x{1f1f9}" +
"\\x{1f1fb}\\x{1f1fc}\\x{1f1ff}]|\\x{1f1fa}[\\x{1f1e6}\\x{1f1ec}\\x{1f1f2}" +
"\\x{1f1f8}\\x{1f1fe}\\x{1f1ff}]|\\x{1f1fb}[\\x{1f1e6}\\x{1f1e8}\\x{1f1ea}" +
"\\x{1f1ec}\\x{1f1ee}\\x{1f1f3}\\x{1f1fa}]|\\x{1f1fc}[\\x{1f1eb}\\x{1f1f8}]|" +
"\\x{1f1fd}\\x{1f1f0}|\\x{1f1fe}[\\x{1f1ea}\\x{1f1f9}]|\\x{1f1ff}[\\x{1f1e6}" +
"\\x{1f1f2}\\x{1f1fc}]"
// Node - ob node
var Node *OpenBazaarNode
var inflightPublishRequests int
// OpenBazaarNode - represent ob node which encapsulates ipfsnode, wallet etc
type OpenBazaarNode struct {
// IPFS node object
IpfsNode *core.IpfsNode
// An implementation of the custom DHT used by OpenBazaar
DHT *dht.IpfsDHT
// The roothash of the node directory inside the openbazaar repo.
// This directory hash is published on IPNS at our peer ID making
// the directory publicly viewable on the network.
RootHash string
// The path to the openbazaar repo in the file system
RepoPath string
// The OpenBazaar network service for direct communication between peers
Service net.NetworkService
// Database for storing node specific data
Datastore repo.Datastore
// Websocket channel used for pushing data to the UI
Broadcast chan repo.Notifier
// A map of cryptocurrency wallets
Multiwallet multiwallet.MultiWallet
// Storage for our outgoing messages
MessageStorage sto.OfflineMessagingStorage
// A service that periodically checks the dht for outstanding messages
MessageRetriever *ret.MessageRetriever
// OfflineMessageFailoverTimeout is the duration until the protocol
// will stop looking for the peer to send a direct message and failover to
// sending an offline message
OfflineMessageFailoverTimeout time.Duration
// A service that periodically republishes active pointers
PointerRepublisher *rep.PointerRepublisher
// Optional nodes to push user data to
PushNodes []peer.ID
// The user-agent for this node
UserAgent string
// A dialer for Tor if available
TorDialer proxy.Dialer
// Manage blocked peers
BanManager *net.BanManager
// Allow other nodes to push data to this node for storage
AcceptStoreRequests bool
// RecordAgingNotifier is a worker that walks the cases datastore to
// notify the user as disputes age past certain thresholds
RecordAgingNotifier *recordAgingNotifier
// Generic pubsub interface
Pubsub ipfs.Pubsub
// The master private key derived from the mnemonic
MasterPrivateKey *hdkeychain.ExtendedKey
// The number of DHT records to collect before returning. The larger the number
// the slower the query but the less likely we will get an old record.
IPNSQuorumSize uint
TestnetEnable bool
RegressionTestEnable bool
PublishLock sync.Mutex
seedLock sync.Mutex
InitalPublishComplete bool
// Daemon version
Version string
}
// TestNetworkEnabled indicates whether the node is operating with test parameters
func (n *OpenBazaarNode) TestNetworkEnabled() bool { return n.TestnetEnable }
// RegressionNetworkEnabled indicates whether the node is operating with regression parameters
func (n *OpenBazaarNode) RegressionNetworkEnabled() bool { return n.RegressionTestEnable }
// SeedNode - publish to IPNS
func (n *OpenBazaarNode) SeedNode() error {
n.seedLock.Lock()
ipfs.UnPinDir(n.IpfsNode, n.RootHash)
var aerr error
var rootHash string
// There's an IPFS bug on Windows that might be related to the Windows indexer that could cause this to fail
// If we fail the first time, let's retry a couple times before giving up.
for i := 0; i < 3; i++ {
rootHash, aerr = ipfs.AddDirectory(n.IpfsNode, path.Join(n.RepoPath, "root"))
if aerr == nil {
break
}
time.Sleep(time.Millisecond * 500)
}
if aerr != nil {
n.seedLock.Unlock()
return aerr
}
n.RootHash = rootHash
n.seedLock.Unlock()
n.InitalPublishComplete = true
go n.publish(rootHash)
return nil
}
func (n *OpenBazaarNode) publish(hash string) {
// Multiple publishes may have been queued
// We only need to publish the most recent
n.PublishLock.Lock()
defer n.PublishLock.Unlock()
if hash != n.RootHash {
return
}
if inflightPublishRequests == 0 {
n.Broadcast <- repo.StatusNotification{Status: "publishing"}
}
err := n.sendToPushNodes(hash)
if err != nil {
log.Error(err)
return
}
inflightPublishRequests++
err = ipfs.Publish(n.IpfsNode, hash)
inflightPublishRequests--
if inflightPublishRequests == 0 {
if err != nil {
log.Error(err)
n.Broadcast <- repo.StatusNotification{Status: "error publishing"}
} else {
n.Broadcast <- repo.StatusNotification{Status: "publish complete"}
}
}
}
func (n *OpenBazaarNode) sendToPushNodes(hash string) error {
id, err := cid.Decode(hash)
if err != nil {
return err
}
var graph []cid.Cid
if len(n.PushNodes) > 0 {
graph, err = ipfs.FetchGraph(n.IpfsNode, &id)
if err != nil {
return err
}
pointers, err := n.Datastore.Pointers().GetByPurpose(ipfs.MESSAGE)
if err != nil {
return err
}
// Check if we're seeding any outgoing messages and add their CIDs to the graph
for _, p := range pointers {
if len(p.Value.Addrs) > 0 {
s, err := p.Value.Addrs[0].ValueForProtocol(ma.P_IPFS)
if err != nil {
continue
}
c, err := cid.Decode(s)
if err != nil {
continue
}
graph = append(graph, c)
}
}
}
for _, p := range n.PushNodes {
go n.retryableSeedStoreToPeer(p, hash, graph)
}
return nil
}
func (n *OpenBazaarNode) retryableSeedStoreToPeer(pid peer.ID, graphHash string, graph []cid.Cid) {
var retryTimeout = 2 * time.Second
for {
if graphHash != n.RootHash {
log.Errorf("root hash has changed, aborting push to %s", pid.Pretty())
return
}
err := n.SendStore(pid.Pretty(), graph)
if err != nil {
if retryTimeout > 60*time.Second {
log.Errorf("error pushing to peer %s: %s", pid.Pretty(), err.Error())
return
}
log.Errorf("error pushing to peer %s...backing off: %s", pid.Pretty(), err.Error())
time.Sleep(retryTimeout)
retryTimeout *= 2
continue
}
return
}
}
// SetUpRepublisher - periodic publishing to IPNS
func (n *OpenBazaarNode) | (interval time.Duration) {
if interval == 0 {
return
}
ticker := time.NewTicker(interval)
go func() {
for range ticker.C {
n.UpdateFollow()
n.SeedNode()
}
}()
}
/*EncryptMessage This is a placeholder until the libsignal is operational.
For now we will just encrypt outgoing offline messages with the long lived identity key.
Optionally you may provide a public key, to avoid doing an IPFS lookup */
func (n *OpenBazaarNode) EncryptMessage(peerID peer.ID, peerKey *libp2p.PubKey, message []byte) (ct []byte, rerr error) {
ctx, cancel := context.WithTimeout(context.Background(), n.OfflineMessageFailoverTimeout)
defer cancel()
if peerKey == nil {
var (
pubKey libp2p.PubKey
store = n.IpfsNode.Repo.Datastore()
)
keyval, err := ipfs.GetCachedPubkey(store, peerID.Pretty())
if err != nil {
pubKey, err = routing.GetPublicKey(n.IpfsNode.Routing, ctx, peerID)
if err != nil {
log.Errorf("Failed to find public key for %s", peerID.Pretty())
return nil, err
}
} else {
pubKey, err = libp2p.UnmarshalPublicKey(keyval)
if err != nil {
log.Errorf("Failed to find public key for %s", peerID.Pretty())
return nil, err
}
}
peerKey = &pubKey
}
if peerID.MatchesPublicKey(*peerKey) {
ciphertext, err := net.Encrypt(*peerKey, message)
if err != nil {
return nil, err
}
return ciphertext, nil
}
log.Errorf("peer public key and id do not match for peer: %s", peerID.Pretty())
return nil, errors.New("peer public key and id do not match")
}
// IPFSIdentityString - IPFS identifier
func (n *OpenBazaarNode) IPFSIdentityString() string {
return n.IpfsNode.Identity.Pretty()
}
func ToHtmlEntities(str string) string {
var rx = regexp.MustCompile(EmojiPattern)
return rx.ReplaceAllStringFunc(str, func(s string) string {
r, _ := utf8.DecodeRuneInString(s)
html := fmt.Sprintf(`&#x%X;`, r)
return html
})
}
// createSlugFor Create a slug from a multi-lang string
func createSlugFor(slugName string) string {
l := SentenceMaxCharacters - SlugBuffer
slugName = ToHtmlEntities(slugName)
slug := slug.Make(slugName)
if len(slug) < SentenceMaxCharacters-SlugBuffer {
l = len(slug)
}
return slug[:l]
}
| SetUpRepublisher | identifier_name |
core.go | package core
import (
"errors"
"fmt"
"regexp"
"unicode/utf8"
dht "gx/ipfs/QmSY3nkMNLzh9GdbFKK5tT7YMfLpf52iUZ8ZRkr29MJaa5/go-libp2p-kad-dht"
libp2p "gx/ipfs/QmTW4SdgBWq9GjsBsHeUx8WuGxzhgzAf88UMH2w62PC8yK/go-libp2p-crypto"
ma "gx/ipfs/QmTZBfrPJmjWsCvHEtX5FE6KimVJhsJg5sBbqEFYf4UZtL/go-multiaddr"
cid "gx/ipfs/QmTbxNB1NwDesLmKTscr4udL2tVP7MaxvXnD1D9yX7g3PN/go-cid"
peer "gx/ipfs/QmYVXrKrKHDC9FobgmcmshCDyWwdrfwfanNQN4oxJ9Fk3h/go-libp2p-peer"
routing "gx/ipfs/QmYxUdYY9S6yg5tSPVin5GFTvtfsLauVcr7reHDD3dM8xf/go-libp2p-routing"
"path"
"sync"
"time"
"github.com/OpenBazaar/multiwallet"
"github.com/btcsuite/btcutil/hdkeychain"
"github.com/gosimple/slug"
"github.com/ipfs/go-ipfs/core"
"github.com/kimitzu/kimitzu-go/ipfs"
"github.com/kimitzu/kimitzu-go/net"
rep "github.com/kimitzu/kimitzu-go/net/repointer"
ret "github.com/kimitzu/kimitzu-go/net/retriever"
"github.com/kimitzu/kimitzu-go/repo"
sto "github.com/kimitzu/kimitzu-go/storage"
logging "github.com/op/go-logging"
"golang.org/x/net/context"
"golang.org/x/net/proxy"
)
const (
// KIMITZU_VERSION - Kimitzu Development Version
KIMITZU_VERSION = "0.2.0-alpha.1"
// VERSION - current version
VERSION = "0.13.7-kimitzu"
// USERAGENT - user-agent header string
// Useragent for Kimitzu Nodes would be "openbazaar-kimitzu-go:0.13.3,0.0.1-dev"
USERAGENT = "/openbazaar-kimitzu-go: v" + VERSION + ", v" + KIMITZU_VERSION + "/"
)
var log = logging.MustGetLogger("core")
const EmojiPattern = "[\\x{2712}\\x{2714}\\x{2716}\\x{271d}\\x{2721}\\x{2728}\\x{2733}" +
"\\x{2734}\\x{2744}\\x{2747}\\x{274c}\\x{274e}\\x{2753}-\\x{2755}\\x{2757}" +
"\\x{2763}\\x{2764}\\x{2795}-\\x{2797}\\x{27a1}\\x{27b0}\\x{27bf}\\x{2934}" +
"\\x{2935}\\x{2b05}-\\x{2b07}\\x{2b1b}\\x{2b1c}\\x{2b50}\\x{2b55}\\x{3030}" +
"\\x{303d}\\x{1f004}\\x{1f0cf}\\x{1f170}\\x{1f171}\\x{1f17e}\\x{1f17f}" +
"\\x{1f18e}\\x{1f191}-\\x{1f19a}\\x{1f201}\\x{1f202}\\x{1f21a}\\x{1f22f}" +
"\\x{1f232}-\\x{1f23a}\\x{1f250}\\x{1f251}\\x{1f300}-\\x{1f321}\\x{1f324}-" +
"\\x{1f393}\\x{1f396}\\x{1f397}\\x{1f399}-\\x{1f39b}\\x{1f39e}-\\x{1f3f0}" +
"\\x{1f3f3}-\\x{1f3f5}\\x{1f3f7}-\\x{1f4fd}\\x{1f4ff}-\\x{1f53d}\\x{1f549}-" +
"\\x{1f54e}\\x{1f550}-\\x{1f567}\\x{1f56f}\\x{1f570}\\x{1f573}-\\x{1f579}" +
"\\x{1f587}\\x{1f58a}-\\x{1f58d}\\x{1f590}\\x{1f595}\\x{1f596}\\x{1f5a5}" +
"\\x{1f5a8}\\x{1f5b1}\\x{1f5b2}\\x{1f5bc}\\x{1f5c2}-\\x{1f5c4}\\x{1f5d1}-" +
"\\x{1f5d3}\\x{1f5dc}-\\x{1f5de}\\x{1f5e1}\\x{1f5e3}\\x{1f5ef}\\x{1f5f3}" +
"\\x{1f5fa}-\\x{1f64f}\\x{1f680}-\\x{1f6c5}\\x{1f6cb}-\\x{1f6d0}\\x{1f6e0}-" +
"\\x{1f6e5}\\x{1f6e9}\\x{1f6eb}\\x{1f6ec}\\x{1f6f0}\\x{1f6f3}\\x{1f910}-" +
"\\x{1f918}\\x{1f980}-\\x{1f984}\\x{1f9c0}\\x{3297}\\x{3299}\\x{a9}\\x{ae}" +
"\\x{203c}\\x{2049}\\x{2122}\\x{2139}\\x{2194}-\\x{2199}\\x{21a9}\\x{21aa}" +
"\\x{231a}\\x{231b}\\x{2328}\\x{2388}\\x{23cf}\\x{23e9}-\\x{23f3}\\x{23f8}-" +
"\\x{23fa}\\x{24c2}\\x{25aa}\\x{25ab}\\x{25b6}\\x{25c0}\\x{25fb}-\\x{25fe}" +
"\\x{2600}-\\x{2604}\\x{260e}\\x{2611}\\x{2614}\\x{2615}\\x{2618}\\x{261d}" +
"\\x{2620}\\x{2622}\\x{2623}\\x{2626}\\x{262a}\\x{262e}\\x{262f}\\x{2638}-" +
"\\x{263a}\\x{2648}-\\x{2653}\\x{2660}\\x{2663}\\x{2665}\\x{2666}\\x{2668}" +
"\\x{267b}\\x{267f}\\x{2692}-\\x{2694}\\x{2696}\\x{2697}\\x{2699}\\x{269b}" +
"\\x{269c}\\x{26a0}\\x{26a1}\\x{26aa}\\x{26ab}\\x{26b0}\\x{26b1}\\x{26bd}" +
"\\x{26be}\\x{26c4}\\x{26c5}\\x{26c8}\\x{26ce}\\x{26cf}\\x{26d1}\\x{26d3}" +
"\\x{26d4}\\x{26e9}\\x{26ea}\\x{26f0}-\\x{26f5}\\x{26f7}-\\x{26fa}\\x{26fd}" +
"\\x{2702}\\x{2705}\\x{2708}-\\x{270d}\\x{270f}]|\\x{23}\\x{20e3}|\\x{2a}" +
"\\x{20e3}|\\x{30}\\x{20e3}|\\x{31}\\x{20e3}|\\x{32}\\x{20e3}|\\x{33}\\x{20e3}|" +
"\\x{34}\\x{20e3}|\\x{35}\\x{20e3}|\\x{36}\\x{20e3}|\\x{37}\\x{20e3}|\\x{38}" +
"\\x{20e3}|\\x{39}\\x{20e3}|\\x{1f1e6}[\\x{1f1e8}-\\x{1f1ec}\\x{1f1ee}" +
"\\x{1f1f1}\\x{1f1f2}\\x{1f1f4}\\x{1f1f6}-\\x{1f1fa}\\x{1f1fc}\\x{1f1fd}" +
"\\x{1f1ff}]|\\x{1f1e7}[\\x{1f1e6}\\x{1f1e7}\\x{1f1e9}-\\x{1f1ef}\\x{1f1f1}-" +
"\\x{1f1f4}\\x{1f1f6}-\\x{1f1f9}\\x{1f1fb}\\x{1f1fc}\\x{1f1fe}\\x{1f1ff}]|" +
"\\x{1f1e8}[\\x{1f1e6}\\x{1f1e8}\\x{1f1e9}\\x{1f1eb}-\\x{1f1ee}\\x{1f1f0}-" +
"\\x{1f1f5}\\x{1f1f7}\\x{1f1fa}-\\x{1f1ff}]|\\x{1f1e9}[\\x{1f1ea}\\x{1f1ec}" +
"\\x{1f1ef}\\x{1f1f0}\\x{1f1f2}\\x{1f1f4}\\x{1f1ff}]|\\x{1f1ea}[\\x{1f1e6}" +
"\\x{1f1e8}\\x{1f1ea}\\x{1f1ec}\\x{1f1ed}\\x{1f1f7}-\\x{1f1fa}]|\\x{1f1eb}[" +
"\\x{1f1ee}-\\x{1f1f0}\\x{1f1f2}\\x{1f1f4}\\x{1f1f7}]|\\x{1f1ec}[\\x{1f1e6}" +
"\\x{1f1e7}\\x{1f1e9}-\\x{1f1ee}\\x{1f1f1}-\\x{1f1f3}\\x{1f1f5}-\\x{1f1fa}" +
"\\x{1f1fc}\\x{1f1fe}]|\\x{1f1ed}[\\x{1f1f0}\\x{1f1f2}\\x{1f1f3}\\x{1f1f7}" +
"\\x{1f1f9}\\x{1f1fa}]|\\x{1f1ee}[\\x{1f1e8}-\\x{1f1ea}\\x{1f1f1}-\\x{1f1f4}" +
"\\x{1f1f6}-\\x{1f1f9}]|\\x{1f1ef}[\\x{1f1ea}\\x{1f1f2}\\x{1f1f4}\\x{1f1f5}]" +
"|\\x{1f1f0}[\\x{1f1ea}\\x{1f1ec}-\\x{1f1ee}\\x{1f1f2}\\x{1f1f3}\\x{1f1f5}" +
"\\x{1f1f7}\\x{1f1fc}\\x{1f1fe}\\x{1f1ff}]|\\x{1f1f1}[\\x{1f1e6}-\\x{1f1e8}" +
"\\x{1f1ee}\\x{1f1f0}\\x{1f1f7}-\\x{1f1fb}\\x{1f1fe}]|\\x{1f1f2}[\\x{1f1e6}" +
"\\x{1f1e8}-\\x{1f1ed}\\x{1f1f0}-\\x{1f1ff}]|\\x{1f1f3}[\\x{1f1e6}\\x{1f1e8}" +
"\\x{1f1ea}-\\x{1f1ec}\\x{1f1ee}\\x{1f1f1}\\x{1f1f4}\\x{1f1f5}\\x{1f1f7}" +
"\\x{1f1fa}\\x{1f1ff}]|\\x{1f1f4}\\x{1f1f2}|\\x{1f1f5}[\\x{1f1e6}\\x{1f1ea}-" +
"\\x{1f1ed}\\x{1f1f0}-\\x{1f1f3}\\x{1f1f7}-\\x{1f1f9}\\x{1f1fc}\\x{1f1fe}]|" +
"\\x{1f1f6}\\x{1f1e6}|\\x{1f1f7}[\\x{1f1ea}\\x{1f1f4}\\x{1f1f8}\\x{1f1fa}" +
"\\x{1f1fc}]|\\x{1f1f8}[\\x{1f1e6}-\\x{1f1ea}\\x{1f1ec}-\\x{1f1f4}\\x{1f1f7}-" +
"\\x{1f1f9}\\x{1f1fb}\\x{1f1fd}-\\x{1f1ff}]|\\x{1f1f9}[\\x{1f1e6}\\x{1f1e8}" +
"\\x{1f1e9}\\x{1f1eb}-\\x{1f1ed}\\x{1f1ef}-\\x{1f1f4}\\x{1f1f7}\\x{1f1f9}" +
"\\x{1f1fb}\\x{1f1fc}\\x{1f1ff}]|\\x{1f1fa}[\\x{1f1e6}\\x{1f1ec}\\x{1f1f2}" +
"\\x{1f1f8}\\x{1f1fe}\\x{1f1ff}]|\\x{1f1fb}[\\x{1f1e6}\\x{1f1e8}\\x{1f1ea}" +
"\\x{1f1ec}\\x{1f1ee}\\x{1f1f3}\\x{1f1fa}]|\\x{1f1fc}[\\x{1f1eb}\\x{1f1f8}]|" +
"\\x{1f1fd}\\x{1f1f0}|\\x{1f1fe}[\\x{1f1ea}\\x{1f1f9}]|\\x{1f1ff}[\\x{1f1e6}" +
"\\x{1f1f2}\\x{1f1fc}]"
// Node - ob node
var Node *OpenBazaarNode
var inflightPublishRequests int
// OpenBazaarNode - represent ob node which encapsulates ipfsnode, wallet etc
type OpenBazaarNode struct {
// IPFS node object
IpfsNode *core.IpfsNode
// An implementation of the custom DHT used by OpenBazaar
DHT *dht.IpfsDHT
// The roothash of the node directory inside the openbazaar repo.
// This directory hash is published on IPNS at our peer ID making
// the directory publicly viewable on the network.
RootHash string
// The path to the openbazaar repo in the file system
RepoPath string
// The OpenBazaar network service for direct communication between peers
Service net.NetworkService
// Database for storing node specific data
Datastore repo.Datastore
// Websocket channel used for pushing data to the UI
Broadcast chan repo.Notifier
// A map of cryptocurrency wallets
Multiwallet multiwallet.MultiWallet
// Storage for our outgoing messages
MessageStorage sto.OfflineMessagingStorage
// A service that periodically checks the dht for outstanding messages
MessageRetriever *ret.MessageRetriever
// OfflineMessageFailoverTimeout is the duration until the protocol
// will stop looking for the peer to send a direct message and failover to
// sending an offline message
OfflineMessageFailoverTimeout time.Duration
// A service that periodically republishes active pointers
PointerRepublisher *rep.PointerRepublisher
// Optional nodes to push user data to
PushNodes []peer.ID
// The user-agent for this node
UserAgent string
// A dialer for Tor if available
TorDialer proxy.Dialer
// Manage blocked peers
BanManager *net.BanManager
// Allow other nodes to push data to this node for storage
AcceptStoreRequests bool
// RecordAgingNotifier is a worker that walks the cases datastore to
// notify the user as disputes age past certain thresholds
RecordAgingNotifier *recordAgingNotifier
// Generic pubsub interface
Pubsub ipfs.Pubsub
// The master private key derived from the mnemonic
MasterPrivateKey *hdkeychain.ExtendedKey
// The number of DHT records to collect before returning. The larger the number
// the slower the query but the less likely we will get an old record.
IPNSQuorumSize uint
TestnetEnable bool
RegressionTestEnable bool
PublishLock sync.Mutex
seedLock sync.Mutex
InitalPublishComplete bool
// Daemon version
Version string
}
// TestNetworkEnabled indicates whether the node is operating with test parameters
func (n *OpenBazaarNode) TestNetworkEnabled() bool { return n.TestnetEnable }
// RegressionNetworkEnabled indicates whether the node is operating with regression parameters
func (n *OpenBazaarNode) RegressionNetworkEnabled() bool { return n.RegressionTestEnable }
// SeedNode - publish to IPNS
func (n *OpenBazaarNode) SeedNode() error {
n.seedLock.Lock()
ipfs.UnPinDir(n.IpfsNode, n.RootHash)
var aerr error
var rootHash string
// There's an IPFS bug on Windows that might be related to the Windows indexer that could cause this to fail
// If we fail the first time, let's retry a couple times before giving up.
for i := 0; i < 3; i++ {
rootHash, aerr = ipfs.AddDirectory(n.IpfsNode, path.Join(n.RepoPath, "root"))
if aerr == nil {
break
}
time.Sleep(time.Millisecond * 500)
}
if aerr != nil {
n.seedLock.Unlock()
return aerr
}
n.RootHash = rootHash
n.seedLock.Unlock()
n.InitalPublishComplete = true
go n.publish(rootHash)
return nil
}
func (n *OpenBazaarNode) publish(hash string) {
// Multiple publishes may have been queued
// We only need to publish the most recent
n.PublishLock.Lock()
defer n.PublishLock.Unlock()
if hash != n.RootHash {
return
}
if inflightPublishRequests == 0 |
err := n.sendToPushNodes(hash)
if err != nil {
log.Error(err)
return
}
inflightPublishRequests++
err = ipfs.Publish(n.IpfsNode, hash)
inflightPublishRequests--
if inflightPublishRequests == 0 {
if err != nil {
log.Error(err)
n.Broadcast <- repo.StatusNotification{Status: "error publishing"}
} else {
n.Broadcast <- repo.StatusNotification{Status: "publish complete"}
}
}
}
func (n *OpenBazaarNode) sendToPushNodes(hash string) error {
id, err := cid.Decode(hash)
if err != nil {
return err
}
var graph []cid.Cid
if len(n.PushNodes) > 0 {
graph, err = ipfs.FetchGraph(n.IpfsNode, &id)
if err != nil {
return err
}
pointers, err := n.Datastore.Pointers().GetByPurpose(ipfs.MESSAGE)
if err != nil {
return err
}
// Check if we're seeding any outgoing messages and add their CIDs to the graph
for _, p := range pointers {
if len(p.Value.Addrs) > 0 {
s, err := p.Value.Addrs[0].ValueForProtocol(ma.P_IPFS)
if err != nil {
continue
}
c, err := cid.Decode(s)
if err != nil {
continue
}
graph = append(graph, c)
}
}
}
for _, p := range n.PushNodes {
go n.retryableSeedStoreToPeer(p, hash, graph)
}
return nil
}
func (n *OpenBazaarNode) retryableSeedStoreToPeer(pid peer.ID, graphHash string, graph []cid.Cid) {
var retryTimeout = 2 * time.Second
for {
if graphHash != n.RootHash {
log.Errorf("root hash has changed, aborting push to %s", pid.Pretty())
return
}
err := n.SendStore(pid.Pretty(), graph)
if err != nil {
if retryTimeout > 60*time.Second {
log.Errorf("error pushing to peer %s: %s", pid.Pretty(), err.Error())
return
}
log.Errorf("error pushing to peer %s...backing off: %s", pid.Pretty(), err.Error())
time.Sleep(retryTimeout)
retryTimeout *= 2
continue
}
return
}
}
// SetUpRepublisher - periodic publishing to IPNS
func (n *OpenBazaarNode) SetUpRepublisher(interval time.Duration) {
if interval == 0 {
return
}
ticker := time.NewTicker(interval)
go func() {
for range ticker.C {
n.UpdateFollow()
n.SeedNode()
}
}()
}
/*EncryptMessage This is a placeholder until the libsignal is operational.
For now we will just encrypt outgoing offline messages with the long lived identity key.
Optionally you may provide a public key, to avoid doing an IPFS lookup */
func (n *OpenBazaarNode) EncryptMessage(peerID peer.ID, peerKey *libp2p.PubKey, message []byte) (ct []byte, rerr error) {
ctx, cancel := context.WithTimeout(context.Background(), n.OfflineMessageFailoverTimeout)
defer cancel()
if peerKey == nil {
var (
pubKey libp2p.PubKey
store = n.IpfsNode.Repo.Datastore()
)
keyval, err := ipfs.GetCachedPubkey(store, peerID.Pretty())
if err != nil {
pubKey, err = routing.GetPublicKey(n.IpfsNode.Routing, ctx, peerID)
if err != nil {
log.Errorf("Failed to find public key for %s", peerID.Pretty())
return nil, err
}
} else {
pubKey, err = libp2p.UnmarshalPublicKey(keyval)
if err != nil {
log.Errorf("Failed to find public key for %s", peerID.Pretty())
return nil, err
}
}
peerKey = &pubKey
}
if peerID.MatchesPublicKey(*peerKey) {
ciphertext, err := net.Encrypt(*peerKey, message)
if err != nil {
return nil, err
}
return ciphertext, nil
}
log.Errorf("peer public key and id do not match for peer: %s", peerID.Pretty())
return nil, errors.New("peer public key and id do not match")
}
// IPFSIdentityString - IPFS identifier
func (n *OpenBazaarNode) IPFSIdentityString() string {
return n.IpfsNode.Identity.Pretty()
}
func ToHtmlEntities(str string) string {
var rx = regexp.MustCompile(EmojiPattern)
return rx.ReplaceAllStringFunc(str, func(s string) string {
r, _ := utf8.DecodeRuneInString(s)
html := fmt.Sprintf(`&#x%X;`, r)
return html
})
}
// createSlugFor Create a slug from a multi-lang string
func createSlugFor(slugName string) string {
l := SentenceMaxCharacters - SlugBuffer
slugName = ToHtmlEntities(slugName)
slug := slug.Make(slugName)
if len(slug) < SentenceMaxCharacters-SlugBuffer {
l = len(slug)
}
return slug[:l]
}
| {
n.Broadcast <- repo.StatusNotification{Status: "publishing"}
} | conditional_block |
core.go | package core
import (
"errors"
"fmt"
"regexp"
"unicode/utf8"
dht "gx/ipfs/QmSY3nkMNLzh9GdbFKK5tT7YMfLpf52iUZ8ZRkr29MJaa5/go-libp2p-kad-dht"
libp2p "gx/ipfs/QmTW4SdgBWq9GjsBsHeUx8WuGxzhgzAf88UMH2w62PC8yK/go-libp2p-crypto"
ma "gx/ipfs/QmTZBfrPJmjWsCvHEtX5FE6KimVJhsJg5sBbqEFYf4UZtL/go-multiaddr"
cid "gx/ipfs/QmTbxNB1NwDesLmKTscr4udL2tVP7MaxvXnD1D9yX7g3PN/go-cid"
peer "gx/ipfs/QmYVXrKrKHDC9FobgmcmshCDyWwdrfwfanNQN4oxJ9Fk3h/go-libp2p-peer"
routing "gx/ipfs/QmYxUdYY9S6yg5tSPVin5GFTvtfsLauVcr7reHDD3dM8xf/go-libp2p-routing"
"path"
"sync"
"time"
"github.com/OpenBazaar/multiwallet"
"github.com/btcsuite/btcutil/hdkeychain"
"github.com/gosimple/slug"
"github.com/ipfs/go-ipfs/core"
"github.com/kimitzu/kimitzu-go/ipfs"
"github.com/kimitzu/kimitzu-go/net"
rep "github.com/kimitzu/kimitzu-go/net/repointer"
ret "github.com/kimitzu/kimitzu-go/net/retriever"
"github.com/kimitzu/kimitzu-go/repo"
sto "github.com/kimitzu/kimitzu-go/storage"
logging "github.com/op/go-logging"
"golang.org/x/net/context"
"golang.org/x/net/proxy"
)
const (
// KIMITZU_VERSION - Kimitzu Development Version
KIMITZU_VERSION = "0.2.0-alpha.1"
// VERSION - current version
VERSION = "0.13.7-kimitzu"
// USERAGENT - user-agent header string
// Useragent for Kimitzu Nodes would be "openbazaar-kimitzu-go:0.13.3,0.0.1-dev"
USERAGENT = "/openbazaar-kimitzu-go: v" + VERSION + ", v" + KIMITZU_VERSION + "/"
)
var log = logging.MustGetLogger("core")
const EmojiPattern = "[\\x{2712}\\x{2714}\\x{2716}\\x{271d}\\x{2721}\\x{2728}\\x{2733}" +
"\\x{2734}\\x{2744}\\x{2747}\\x{274c}\\x{274e}\\x{2753}-\\x{2755}\\x{2757}" +
"\\x{2763}\\x{2764}\\x{2795}-\\x{2797}\\x{27a1}\\x{27b0}\\x{27bf}\\x{2934}" +
"\\x{2935}\\x{2b05}-\\x{2b07}\\x{2b1b}\\x{2b1c}\\x{2b50}\\x{2b55}\\x{3030}" +
"\\x{303d}\\x{1f004}\\x{1f0cf}\\x{1f170}\\x{1f171}\\x{1f17e}\\x{1f17f}" +
"\\x{1f18e}\\x{1f191}-\\x{1f19a}\\x{1f201}\\x{1f202}\\x{1f21a}\\x{1f22f}" +
"\\x{1f232}-\\x{1f23a}\\x{1f250}\\x{1f251}\\x{1f300}-\\x{1f321}\\x{1f324}-" +
"\\x{1f393}\\x{1f396}\\x{1f397}\\x{1f399}-\\x{1f39b}\\x{1f39e}-\\x{1f3f0}" +
"\\x{1f3f3}-\\x{1f3f5}\\x{1f3f7}-\\x{1f4fd}\\x{1f4ff}-\\x{1f53d}\\x{1f549}-" +
"\\x{1f54e}\\x{1f550}-\\x{1f567}\\x{1f56f}\\x{1f570}\\x{1f573}-\\x{1f579}" +
"\\x{1f587}\\x{1f58a}-\\x{1f58d}\\x{1f590}\\x{1f595}\\x{1f596}\\x{1f5a5}" +
"\\x{1f5a8}\\x{1f5b1}\\x{1f5b2}\\x{1f5bc}\\x{1f5c2}-\\x{1f5c4}\\x{1f5d1}-" +
"\\x{1f5d3}\\x{1f5dc}-\\x{1f5de}\\x{1f5e1}\\x{1f5e3}\\x{1f5ef}\\x{1f5f3}" +
"\\x{1f5fa}-\\x{1f64f}\\x{1f680}-\\x{1f6c5}\\x{1f6cb}-\\x{1f6d0}\\x{1f6e0}-" +
"\\x{1f6e5}\\x{1f6e9}\\x{1f6eb}\\x{1f6ec}\\x{1f6f0}\\x{1f6f3}\\x{1f910}-" +
"\\x{1f918}\\x{1f980}-\\x{1f984}\\x{1f9c0}\\x{3297}\\x{3299}\\x{a9}\\x{ae}" +
"\\x{203c}\\x{2049}\\x{2122}\\x{2139}\\x{2194}-\\x{2199}\\x{21a9}\\x{21aa}" +
"\\x{231a}\\x{231b}\\x{2328}\\x{2388}\\x{23cf}\\x{23e9}-\\x{23f3}\\x{23f8}-" +
"\\x{23fa}\\x{24c2}\\x{25aa}\\x{25ab}\\x{25b6}\\x{25c0}\\x{25fb}-\\x{25fe}" +
"\\x{2600}-\\x{2604}\\x{260e}\\x{2611}\\x{2614}\\x{2615}\\x{2618}\\x{261d}" +
"\\x{2620}\\x{2622}\\x{2623}\\x{2626}\\x{262a}\\x{262e}\\x{262f}\\x{2638}-" +
"\\x{263a}\\x{2648}-\\x{2653}\\x{2660}\\x{2663}\\x{2665}\\x{2666}\\x{2668}" +
"\\x{267b}\\x{267f}\\x{2692}-\\x{2694}\\x{2696}\\x{2697}\\x{2699}\\x{269b}" +
"\\x{269c}\\x{26a0}\\x{26a1}\\x{26aa}\\x{26ab}\\x{26b0}\\x{26b1}\\x{26bd}" +
"\\x{26be}\\x{26c4}\\x{26c5}\\x{26c8}\\x{26ce}\\x{26cf}\\x{26d1}\\x{26d3}" +
"\\x{26d4}\\x{26e9}\\x{26ea}\\x{26f0}-\\x{26f5}\\x{26f7}-\\x{26fa}\\x{26fd}" +
"\\x{2702}\\x{2705}\\x{2708}-\\x{270d}\\x{270f}]|\\x{23}\\x{20e3}|\\x{2a}" +
"\\x{20e3}|\\x{30}\\x{20e3}|\\x{31}\\x{20e3}|\\x{32}\\x{20e3}|\\x{33}\\x{20e3}|" +
"\\x{34}\\x{20e3}|\\x{35}\\x{20e3}|\\x{36}\\x{20e3}|\\x{37}\\x{20e3}|\\x{38}" +
"\\x{20e3}|\\x{39}\\x{20e3}|\\x{1f1e6}[\\x{1f1e8}-\\x{1f1ec}\\x{1f1ee}" +
"\\x{1f1f1}\\x{1f1f2}\\x{1f1f4}\\x{1f1f6}-\\x{1f1fa}\\x{1f1fc}\\x{1f1fd}" +
"\\x{1f1ff}]|\\x{1f1e7}[\\x{1f1e6}\\x{1f1e7}\\x{1f1e9}-\\x{1f1ef}\\x{1f1f1}-" +
"\\x{1f1f4}\\x{1f1f6}-\\x{1f1f9}\\x{1f1fb}\\x{1f1fc}\\x{1f1fe}\\x{1f1ff}]|" +
"\\x{1f1e8}[\\x{1f1e6}\\x{1f1e8}\\x{1f1e9}\\x{1f1eb}-\\x{1f1ee}\\x{1f1f0}-" +
"\\x{1f1f5}\\x{1f1f7}\\x{1f1fa}-\\x{1f1ff}]|\\x{1f1e9}[\\x{1f1ea}\\x{1f1ec}" +
"\\x{1f1ef}\\x{1f1f0}\\x{1f1f2}\\x{1f1f4}\\x{1f1ff}]|\\x{1f1ea}[\\x{1f1e6}" +
"\\x{1f1e8}\\x{1f1ea}\\x{1f1ec}\\x{1f1ed}\\x{1f1f7}-\\x{1f1fa}]|\\x{1f1eb}[" +
"\\x{1f1ee}-\\x{1f1f0}\\x{1f1f2}\\x{1f1f4}\\x{1f1f7}]|\\x{1f1ec}[\\x{1f1e6}" +
"\\x{1f1e7}\\x{1f1e9}-\\x{1f1ee}\\x{1f1f1}-\\x{1f1f3}\\x{1f1f5}-\\x{1f1fa}" +
"\\x{1f1fc}\\x{1f1fe}]|\\x{1f1ed}[\\x{1f1f0}\\x{1f1f2}\\x{1f1f3}\\x{1f1f7}" +
"\\x{1f1f9}\\x{1f1fa}]|\\x{1f1ee}[\\x{1f1e8}-\\x{1f1ea}\\x{1f1f1}-\\x{1f1f4}" +
"\\x{1f1f6}-\\x{1f1f9}]|\\x{1f1ef}[\\x{1f1ea}\\x{1f1f2}\\x{1f1f4}\\x{1f1f5}]" +
"|\\x{1f1f0}[\\x{1f1ea}\\x{1f1ec}-\\x{1f1ee}\\x{1f1f2}\\x{1f1f3}\\x{1f1f5}" +
"\\x{1f1f7}\\x{1f1fc}\\x{1f1fe}\\x{1f1ff}]|\\x{1f1f1}[\\x{1f1e6}-\\x{1f1e8}" +
"\\x{1f1ee}\\x{1f1f0}\\x{1f1f7}-\\x{1f1fb}\\x{1f1fe}]|\\x{1f1f2}[\\x{1f1e6}" +
"\\x{1f1e8}-\\x{1f1ed}\\x{1f1f0}-\\x{1f1ff}]|\\x{1f1f3}[\\x{1f1e6}\\x{1f1e8}" +
"\\x{1f1ea}-\\x{1f1ec}\\x{1f1ee}\\x{1f1f1}\\x{1f1f4}\\x{1f1f5}\\x{1f1f7}" +
"\\x{1f1fa}\\x{1f1ff}]|\\x{1f1f4}\\x{1f1f2}|\\x{1f1f5}[\\x{1f1e6}\\x{1f1ea}-" +
"\\x{1f1ed}\\x{1f1f0}-\\x{1f1f3}\\x{1f1f7}-\\x{1f1f9}\\x{1f1fc}\\x{1f1fe}]|" +
"\\x{1f1f6}\\x{1f1e6}|\\x{1f1f7}[\\x{1f1ea}\\x{1f1f4}\\x{1f1f8}\\x{1f1fa}" +
"\\x{1f1fc}]|\\x{1f1f8}[\\x{1f1e6}-\\x{1f1ea}\\x{1f1ec}-\\x{1f1f4}\\x{1f1f7}-" +
"\\x{1f1f9}\\x{1f1fb}\\x{1f1fd}-\\x{1f1ff}]|\\x{1f1f9}[\\x{1f1e6}\\x{1f1e8}" +
"\\x{1f1e9}\\x{1f1eb}-\\x{1f1ed}\\x{1f1ef}-\\x{1f1f4}\\x{1f1f7}\\x{1f1f9}" +
"\\x{1f1fb}\\x{1f1fc}\\x{1f1ff}]|\\x{1f1fa}[\\x{1f1e6}\\x{1f1ec}\\x{1f1f2}" +
"\\x{1f1f8}\\x{1f1fe}\\x{1f1ff}]|\\x{1f1fb}[\\x{1f1e6}\\x{1f1e8}\\x{1f1ea}" +
"\\x{1f1ec}\\x{1f1ee}\\x{1f1f3}\\x{1f1fa}]|\\x{1f1fc}[\\x{1f1eb}\\x{1f1f8}]|" +
"\\x{1f1fd}\\x{1f1f0}|\\x{1f1fe}[\\x{1f1ea}\\x{1f1f9}]|\\x{1f1ff}[\\x{1f1e6}" +
"\\x{1f1f2}\\x{1f1fc}]"
// Node - ob node
var Node *OpenBazaarNode
var inflightPublishRequests int
// OpenBazaarNode - represent ob node which encapsulates ipfsnode, wallet etc
type OpenBazaarNode struct {
// IPFS node object
IpfsNode *core.IpfsNode
// An implementation of the custom DHT used by OpenBazaar
DHT *dht.IpfsDHT
// The roothash of the node directory inside the openbazaar repo.
// This directory hash is published on IPNS at our peer ID making
// the directory publicly viewable on the network.
RootHash string
// The path to the openbazaar repo in the file system
RepoPath string
// The OpenBazaar network service for direct communication between peers
Service net.NetworkService
// Database for storing node specific data
Datastore repo.Datastore
// Websocket channel used for pushing data to the UI
Broadcast chan repo.Notifier
// A map of cryptocurrency wallets
Multiwallet multiwallet.MultiWallet
// Storage for our outgoing messages
MessageStorage sto.OfflineMessagingStorage
// A service that periodically checks the dht for outstanding messages
MessageRetriever *ret.MessageRetriever
// OfflineMessageFailoverTimeout is the duration until the protocol
// will stop looking for the peer to send a direct message and failover to
// sending an offline message
OfflineMessageFailoverTimeout time.Duration
// A service that periodically republishes active pointers
PointerRepublisher *rep.PointerRepublisher
// Optional nodes to push user data to
PushNodes []peer.ID
// The user-agent for this node
UserAgent string
// A dialer for Tor if available
TorDialer proxy.Dialer
// Manage blocked peers
BanManager *net.BanManager
// Allow other nodes to push data to this node for storage
AcceptStoreRequests bool
// RecordAgingNotifier is a worker that walks the cases datastore to
// notify the user as disputes age past certain thresholds
RecordAgingNotifier *recordAgingNotifier
// Generic pubsub interface
Pubsub ipfs.Pubsub
// The master private key derived from the mnemonic
MasterPrivateKey *hdkeychain.ExtendedKey
// The number of DHT records to collect before returning. The larger the number
// the slower the query but the less likely we will get an old record.
IPNSQuorumSize uint
TestnetEnable bool
RegressionTestEnable bool
PublishLock sync.Mutex
seedLock sync.Mutex
InitalPublishComplete bool
// Daemon version
Version string
}
// TestNetworkEnabled indicates whether the node is operating with test parameters
func (n *OpenBazaarNode) TestNetworkEnabled() bool { return n.TestnetEnable }
// RegressionNetworkEnabled indicates whether the node is operating with regression parameters
func (n *OpenBazaarNode) RegressionNetworkEnabled() bool { return n.RegressionTestEnable }
// SeedNode - publish to IPNS
func (n *OpenBazaarNode) SeedNode() error {
n.seedLock.Lock()
ipfs.UnPinDir(n.IpfsNode, n.RootHash)
var aerr error
var rootHash string
// There's an IPFS bug on Windows that might be related to the Windows indexer that could cause this to fail
// If we fail the first time, let's retry a couple times before giving up.
for i := 0; i < 3; i++ {
rootHash, aerr = ipfs.AddDirectory(n.IpfsNode, path.Join(n.RepoPath, "root"))
if aerr == nil {
break
}
time.Sleep(time.Millisecond * 500)
}
if aerr != nil {
n.seedLock.Unlock()
return aerr
}
n.RootHash = rootHash
n.seedLock.Unlock()
n.InitalPublishComplete = true
go n.publish(rootHash)
return nil
}
func (n *OpenBazaarNode) publish(hash string) {
// Multiple publishes may have been queued
// We only need to publish the most recent
n.PublishLock.Lock()
defer n.PublishLock.Unlock()
if hash != n.RootHash {
return
}
if inflightPublishRequests == 0 {
n.Broadcast <- repo.StatusNotification{Status: "publishing"}
}
err := n.sendToPushNodes(hash)
if err != nil {
log.Error(err)
return
}
inflightPublishRequests++
err = ipfs.Publish(n.IpfsNode, hash)
inflightPublishRequests--
if inflightPublishRequests == 0 {
if err != nil {
log.Error(err)
n.Broadcast <- repo.StatusNotification{Status: "error publishing"}
} else {
n.Broadcast <- repo.StatusNotification{Status: "publish complete"}
}
}
}
func (n *OpenBazaarNode) sendToPushNodes(hash string) error {
id, err := cid.Decode(hash)
if err != nil {
return err
}
var graph []cid.Cid
if len(n.PushNodes) > 0 {
graph, err = ipfs.FetchGraph(n.IpfsNode, &id)
if err != nil {
return err
}
pointers, err := n.Datastore.Pointers().GetByPurpose(ipfs.MESSAGE)
if err != nil {
return err
}
// Check if we're seeding any outgoing messages and add their CIDs to the graph
for _, p := range pointers {
if len(p.Value.Addrs) > 0 {
s, err := p.Value.Addrs[0].ValueForProtocol(ma.P_IPFS)
if err != nil { | continue
}
c, err := cid.Decode(s)
if err != nil {
continue
}
graph = append(graph, c)
}
}
}
for _, p := range n.PushNodes {
go n.retryableSeedStoreToPeer(p, hash, graph)
}
return nil
}
func (n *OpenBazaarNode) retryableSeedStoreToPeer(pid peer.ID, graphHash string, graph []cid.Cid) {
var retryTimeout = 2 * time.Second
for {
if graphHash != n.RootHash {
log.Errorf("root hash has changed, aborting push to %s", pid.Pretty())
return
}
err := n.SendStore(pid.Pretty(), graph)
if err != nil {
if retryTimeout > 60*time.Second {
log.Errorf("error pushing to peer %s: %s", pid.Pretty(), err.Error())
return
}
log.Errorf("error pushing to peer %s...backing off: %s", pid.Pretty(), err.Error())
time.Sleep(retryTimeout)
retryTimeout *= 2
continue
}
return
}
}
// SetUpRepublisher - periodic publishing to IPNS
func (n *OpenBazaarNode) SetUpRepublisher(interval time.Duration) {
if interval == 0 {
return
}
ticker := time.NewTicker(interval)
go func() {
for range ticker.C {
n.UpdateFollow()
n.SeedNode()
}
}()
}
/*EncryptMessage This is a placeholder until the libsignal is operational.
For now we will just encrypt outgoing offline messages with the long lived identity key.
Optionally you may provide a public key, to avoid doing an IPFS lookup */
func (n *OpenBazaarNode) EncryptMessage(peerID peer.ID, peerKey *libp2p.PubKey, message []byte) (ct []byte, rerr error) {
ctx, cancel := context.WithTimeout(context.Background(), n.OfflineMessageFailoverTimeout)
defer cancel()
if peerKey == nil {
var (
pubKey libp2p.PubKey
store = n.IpfsNode.Repo.Datastore()
)
keyval, err := ipfs.GetCachedPubkey(store, peerID.Pretty())
if err != nil {
pubKey, err = routing.GetPublicKey(n.IpfsNode.Routing, ctx, peerID)
if err != nil {
log.Errorf("Failed to find public key for %s", peerID.Pretty())
return nil, err
}
} else {
pubKey, err = libp2p.UnmarshalPublicKey(keyval)
if err != nil {
log.Errorf("Failed to find public key for %s", peerID.Pretty())
return nil, err
}
}
peerKey = &pubKey
}
if peerID.MatchesPublicKey(*peerKey) {
ciphertext, err := net.Encrypt(*peerKey, message)
if err != nil {
return nil, err
}
return ciphertext, nil
}
log.Errorf("peer public key and id do not match for peer: %s", peerID.Pretty())
return nil, errors.New("peer public key and id do not match")
}
// IPFSIdentityString - IPFS identifier
func (n *OpenBazaarNode) IPFSIdentityString() string {
return n.IpfsNode.Identity.Pretty()
}
func ToHtmlEntities(str string) string {
var rx = regexp.MustCompile(EmojiPattern)
return rx.ReplaceAllStringFunc(str, func(s string) string {
r, _ := utf8.DecodeRuneInString(s)
html := fmt.Sprintf(`&#x%X;`, r)
return html
})
}
// createSlugFor Create a slug from a multi-lang string
func createSlugFor(slugName string) string {
l := SentenceMaxCharacters - SlugBuffer
slugName = ToHtmlEntities(slugName)
slug := slug.Make(slugName)
if len(slug) < SentenceMaxCharacters-SlugBuffer {
l = len(slug)
}
return slug[:l]
} | random_line_split |
|
BaceraGeneTestingProjectExpressGrid.js | var rowEditing = Ext.create('Ext.grid.plugin.RowEditing',{
pluginId:'rowEditing',
saveBtnText: '保存',
cancelBtnText: "取消",
autoCancel: false,
clicksToEdit:2 //双击进行修改 1-单击 2-双击 0-可取消双击/单击事件
});
Ext.define('Rds.bacera.panel.BaceraGeneTestingProjectExpressGrid', {
extend : 'Ext.grid.Panel',
loadMask: true,
viewConfig: {
trackOver: false,
stripeRows: false
},
pageSize:25,
selType: 'rowmodel',
plugins: [rowEditing],
initComponent : function() {
var me = this;
var reportif=new Ext.form.field.ComboBox({
fieldLabel : '是否发报告',
width:'20%',
labelWidth : 70,
editable : false,
triggerAction : 'all',
displayField : 'Name',
labelAlign : 'left',
valueField : 'Code',
store : new Ext.data.ArrayStore(
{
fields : ['Name','Code' ],
data : [['全部',0],['是',1 ],['否',2 ] ]
}),
value : '',
mode : 'local',
name : 'reportif',
value: 0
});
var consumer_name = Ext.create('Ext.form.field.Text',{
name:'consumer_name',
labelWidth:80,
width:'20%',
fieldLabel:'客户姓名'
});
var sample_number = Ext.create('Ext.form.field.Text',{
name:'sample_number',
labelWidth:80,
width:'20%',
fieldLabel:'样本编号'
});
var consumer_sex=new Ext.form.field.ComboBox({
fieldLabel : '性别',
width:'20%',
labelWidth : 70,
editable : false,
triggerAction : 'all',
displayField : 'Name',
labelAlign : 'left',
valueField : 'Code',
store : new Ext.data.ArrayStore(
{
fields : ['Name','Code' ],
data : [['全部',''],['男','M' ],['女','F' ] ]
}),
value : '',
mode : 'local',
name : 'sex',
});
var test_number = Ext.create('Ext.form.field.Text',{
name:'test_number',
labelWidth:80,
width:'20%',
fieldLabel:'案例编号'
});
var test_package_name = Ext.create('Ext.form.field.Text',{
name:'test_package_name',
labelWidth:80,
width:'20%',
fieldLabel:'检测套餐名称'
});
var agency_name = Ext.create('Ext.form.field.Text',{
name:'agency_name',
labelWidth:80,
width:'20%',
fieldLabel:'代理商名称'
});
var test_item_names = Ext.create('Ext.form.field.Text',{
name:'test_item_names',
labelWidth:80,
width:'20%',
fieldLabel:'检测项目名称'
});
var charge_standard_id = Ext.create('Ext.form.field.Text',{
name:'charge_standard_id',
labelWidth:80,
width:'20%',
fieldLabel:'归属人id'
});
var charge_standard_name = Ext.create('Ext.form.field.Text',{
name:'charge_standard_name',
labelWidth:80,
width:'20%',
fieldLabel:'归属人姓名'
});
var mailStore = Ext.create('Ext.data.Store', {
fields:['key','value'],
proxy : {
type : 'jsonajax',
actionMethods : {
read : 'POST'
},
url : 'judicial/dicvalues/getMailModels.do',
reader : {
type : 'json',
root : 'data'
}
},
autoLoad : true,
remoteSort : true
});
var gene_express_starttime = new Ext.form.DateField({
id:'gene_express_starttime',
name : 'gene_express_starttime',
width:'20%',
fieldLabel : '添加日期从',
labelWidth : 80,
labelAlign : 'left',
emptyText : '请选择日期',
format : 'Y-m-d',
value : Ext.Date.add(
new Date(),
Ext.Date.DAY,-7),
listeners:{
'select':function(){
var start = Ext.getCmp('gene_express_starttime').getValue();
var endDate = Ext.getCmp('gene_express_endtime').getValue();
if (start > endDate) {
Ext.getCmp('gene_express_starttime').setValue(endDate);
}
}
}
});
var gene_express_endtime = new Ext.form.DateField({
id:'gene_express_endtime',
name : 'gene_express_endtime',
width:'20%',
labelWidth : 40,
fieldLabel : '到 ',
labelAlign : 'left',
emptyText : '请选择日期',
format : 'Y-m-d',
value : Ext.Date.add(new Date(), Ext.Date.DAY,1),
listeners:{
'select':function(){
var start = Ext.getCmp('gene_express_starttime').getValue();
var endDate = Ext.getCmp('gene_express_endtime').getValue();
if (start > endDate) {
Ext.getCmp('gene_express_starttime').setValue(endDate);
}
}
}
});
me.store = Ext.create('Ext.data.Store',{
fields:['id','add_time','consumer_name','consumer_sex','consumer_birthday','consumer_phone','sample_number','test_number','report_date','test_package_id','test_package_name',
'agency_id','agency_name','test_item_ids','test_item_names','expressnum','expresstype','recive','expresstime','expressremark','paragraphtime','account_type','remarks','remark','charge_standard_name'],
proxy: {
type: 'jsonajax',
actionMethods:{read:'POST'},
url: 'bacera/Gene/queryallpage.do',
params:{
},
reader: {
type: 'json',
root:'data',
totalProperty:'total'
}
},
listeners:{
'beforeload':function(ds, operation, opt){
me.getSelectionModel().clearSelections();
Ext.apply(me.store.proxy.params, {
consumer_name:consumer_name.getValue().trim(),
sample_number:sample_number.getValue().trim(),
gene_starttime:dateformat(gene_express_starttime.getValue()),
gene_endtime:dateformat(gene_express_endtime.getValue()),
test_number:test_number.getValue(),
test_package_name:test_package_name.getValue(),
reportif:reportif.getValue(),
// agency_name:agency_name.getValue().trim(),
test_item_names:test_item_names.getValue(),
charge_standard_name:charge_standard_name.getValue().trim(),
consumer_sex:consumer_sex.getValue().trim()
//consumer_phone:consumer_phone.getValue().trim()
});
}
}
});
me.selModel = Ext.create('Ext.selection.CheckboxModel',{
// mode: 'SINGLE'
});
me.bbar = Ext.create('Ext.PagingToolbar', {
store : me.store,
pageSize : me.pageSize,
displayInfo : true,
displayMsg : "第 {0} - {1} 条 共 {2} 条",
emptyMsg : "没有符合条件的记录"
});
//me.bbar = {xtype: 'label',id:'totalBbarGene_express', text: '',style:'height:25px;line-height:25px;text-align:right;margin-right:10px;'};
me.columns = [
{ text: '案例编号', dataIndex: 'test_number', menuDisabled:true, width : 120},
{ text: '样本编号', dataIndex: 'sample_number', menuDisabled:true, width : 120},
{ text: '检测套餐名', dataIndex: 'test_package_name', menuDisabled:true, width : 170},
{ text: '快递单号', dataIndex: 'expressnum', menuDisabled:true, width : 100,
editor:'textfield'
},
{ text: '快递类型', dataIndex: 'expresstype', menuDisabled:true, width : 110,
editor:new Ext.form.ComboBox({
autoSelect : true,
editable:true,
name:'expresstype',
triggerAction: 'all',
queryMode: 'local',
emptyText : "请选择",
selectOnTab: true,
store: mailStore,
maxLength: 50,
fieldStyle: me.fieldStyle,
displayField:'value',
valueField:'value',
listClass: 'x-combo-list-small'
})
},
{ text: '快递日期', dataIndex: 'expresstime', menuDisabled:true, width : 110 },
{ text: '收件人', dataIndex: 'recive', menuDisabled:true, width : 120,
editor:'textfield'
},{ text: '快递备注', dataIndex: 'expressremark', menuDisabled:true, width : 150,
editor:'textfield'
},
{ text: '账户类型', dataIndex: 'account_type', menuDisabled:true, width : 150},
{ text: '到款日期', dataIndex: 'paragraphtime', menuDisabled:true, width : 95},
{ text: '登记时间', dataIndex: 'add_time', menuDisabled:true, width:120,
renderer:Ext.util.Format.dateRenderer('Y-m-d')
},
{ text: '客户姓名', dataIndex: 'consumer_name', menuDisabled:true, width:80},
{ text: '归属人全称', dataIndex: 'charge_standard_name', menuDisabled:true, width : 225},
{ text: '备注', dataIndex: 'remark', menuDisabled:true, width : 200},
{ text: '被代理人', dataIndex: 'agentname', menuDisabled:true, width : 80},
{ text: '财务备注', dataIndex: 'remarks', menuDisabled:true, width : 150},
{ text: '检测项目名', dataIndex: 'test_item_names', menuDisabled:true, width:200},
{ text: '是否发报告', dataIndex: 'reportif', menuDisabled:true, width : 100,
renderer : function(value, cellmeta,
record, rowIndex, columnIndex,
store) {
var isnull= record.data["expresstype"];
var isll= record.data["expressnum"];
if ( null !=isnull||null!=isll) {
return "是";
} else {
return "<span style='color:red'>否</span>";
}
}
} ];
me.dockedItems = [{
xtype:'toolbar',
name:'search',
dock:'top',
items:[test_item_names,test_package_name,consumer_name,consumer_sex]
},{
style : {
borderTopWidth : '0px !important',
borderBottomWidth : '0px !important'
},
xtype:'toolbar',
name:'search',
dock:'top',
items:[sample_number,gene_express_starttime,gene_express_endtime,reportif]
},{
style : {
borderTopWidth : '0px !important',
borderBottomWidth : '0px !important'
},
xtype:'toolbar',
name:'search',
dock:'top',
items:[test_number,charge_standard_name,{
text:'查询',
iconCls:'Find',
handler:me.onSearch
}]
},{
style : {
borderTopWidth : '0px !important',
borderBottomWidth : '0px !important'
},
xtype:'toolbar',
name:'search',
dock:'top',
items:[{
text:'案例详情',
iconCls:'Find',
handler:me.casef
},{
text:'案例编号',
iconCls:'Find',
handler:me.casecode
}]
}];
me.store.load();
me.callParent(arguments);
// me.store.on("load",function(){
// Ext.getCmp('totalBbarGene_express').setText("共 "+me.store.getCount()+" 条");
// });
},
casef:function(){
var me = this.up("gridpanel");
var selections = me.getView().getSelectionModel().getSelection();
if(selections.length<1){
Ext.Msg.alert("提示", "请选择一条记录!");
return;
};
var form = Ext.create("Rds.bacera.form.BaceraGeneTestingProjectExpressForm",{
region:"center",
grid:me
});
var win = Ext.create("Ext.window.Window",{
title:'案例详情',
width:580,
iconCls:'Pageedit',
modal:true,
height:400,
layout:'border',
items:[form]
});
win.show();
form.loadRecord(selections[0]);
},
caseCode:function(){
var me = this.up("gridpanel");
var selections = me.getView().getSelectionModel().getSelection();
if(selections.length<1){
Ext.Msg.alert("提示", "请选择需要查看的案例编号!");
return;
};
var num="";
for(var i = 1 ; i < selections.length+1 ; i ++)
{
num += selections[i-1].get("test_number")+";";
}
Ext.Msg.alert("我是案例编号", num);
},
onSearch:function(){
var me = this.up("gridpanel");
me.store.currentPage = 1;
me.getStore().load();
},
listeners : {
'beforeedit':function(editor, e,s){
function afterEdit(e,s){
| method: "POST",
headers: { 'Content-Type': 'application/json' },
jsonData: {
id:s.record.data.id,
num:s.record.data.test_number,
expressnum:s.record.data.expressnum,
recive:s.record.data.recive,
expresstime:(Ext.Date.format(new Date(), 'Y-m-d')),
case_type:s.record.data.test_item_names,
expresstype:s.record.data.expresstype,
expressremark:s.record.data.expressremark
},
success: function (response, options) {
response = Ext.JSON.decode(response.responseText);
if (response==false) {
Ext.MessageBox.alert("错误信息", "修改快递失败,请查看");
}
},
failure: function () {
Ext.Msg.alert("提示", "保存失败<br>请联系管理员!");
}
});
}
rowEditing.on('edit',afterEdit);
},
'afterrender' : function() {
this.store.load();
}
}
});
| Ext.Ajax.request({
url:"bacera/Gene/saveGeneExpress.do",
| conditional_block |
BaceraGeneTestingProjectExpressGrid.js | var rowEditing = Ext.create('Ext.grid.plugin.RowEditing',{
pluginId:'rowEditing',
saveBtnText: '保存',
cancelBtnText: "取消",
autoCancel: false,
clicksToEdit:2 //双击进行修改 1-单击 2-双击 0-可取消双击/单击事件
});
Ext.define('Rds.bacera.panel.BaceraGeneTestingProjectExpressGrid', {
extend : 'Ext.grid.Panel',
loadMask: true,
viewConfig: {
trackOver: false,
stripeRows: false
},
pageSize:25,
selType: 'rowmodel',
plugins: [rowEditing],
initComponent : function() {
var me = this;
var reportif=new Ext.form.field.ComboBox({
fieldLabel : '是否发报告',
width:'20%',
labelWidth : 70,
editable : false,
triggerAction : 'all',
displayField : 'Name',
labelAlign : 'left',
valueField : 'Code',
store : new Ext.data.ArrayStore(
{
fields : ['Name','Code' ],
data : [['全部',0],['是',1 ],['否',2 ] ]
}),
value : '',
mode : 'local',
name : 'reportif',
value: 0
});
var consumer_name = Ext.create('Ext.form.field.Text',{
name:'consumer_name',
labelWidth:80,
width:'20%',
fieldLabel:'客户姓名'
});
var sample_number = Ext.create('Ext.form.field.Text',{
name:'sample_number',
labelWidth:80,
width:'20%',
fieldLabel:'样本编号'
});
var consumer_sex=new Ext.form.field.ComboBox({
fieldLabel : '性别',
width:'20%',
labelWidth : 70,
editable : false,
triggerAction : 'all',
displayField : 'Name',
labelAlign : 'left',
valueField : 'Code',
store : new Ext.data.ArrayStore(
{
fields : ['Name','Code' ],
data : [['全部',''],['男','M' ],['女','F' ] ]
}),
value : '',
mode : 'local',
name : 'sex',
});
var test_number = Ext.create('Ext.form.field.Text',{
name:'test_number',
labelWidth:80,
width:'20%',
fieldLabel:'案例编号'
});
var test_package_name = Ext.create('Ext.form.field.Text',{
name:'test_package_name',
labelWidth:80,
width:'20%',
fieldLabel:'检测套餐名称'
});
var agency_name = Ext.create('Ext.form.field.Text',{
name:'agency_name',
labelWidth:80,
width:'20%',
fieldLabel:'代理商名称'
});
var test_item_names = Ext.create('Ext.form.field.Text',{
name:'test_item_names',
labelWidth:80,
width:'20%',
fieldLabel:'检测项目名称'
});
var charge_standard_id = Ext.create('Ext.form.field.Text',{
name:'charge_standard_id',
labelWidth:80,
width:'20%',
fieldLabel:'归属人id'
});
var charge_standard_name = Ext.create('Ext.form.field.Text',{
name:'charge_standard_name',
labelWidth:80,
width:'20%',
fieldLabel:'归属人姓名'
});
var mailStore = Ext.create('Ext.data.Store', {
fields:['key','value'],
proxy : {
type : 'jsonajax',
actionMethods : {
read : 'POST'
},
url : 'judicial/dicvalues/getMailModels.do',
reader : {
type : 'json',
root : 'data'
}
},
autoLoad : true,
remoteSort : true
});
var gene_express_starttime = new Ext.form.DateField({
id:'gene_express_starttime',
name : 'gene_express_starttime',
width:'20%',
fieldLabel : '添加日期从',
labelWidth : 80,
labelAlign : 'left',
emptyText : '请选择日期',
format : 'Y-m-d',
value : Ext.Date.add(
new Date(),
Ext.Date.DAY,-7),
listeners:{
'select':function(){
var start = Ext.getCmp('gene_express_starttime').getValue();
var endDate = Ext.getCmp('gene_express_endtime').getValue();
if (start > endDate) {
Ext.getCmp('gene_express_starttime').setValue(endDate);
}
}
}
});
var gene_express_endtime = new Ext.form.DateField({
id:'gene_express_endtime',
name : 'gene_express_endtime',
width:'20%',
labelWidth : 40,
fieldLabel : '到 ',
labelAlign : 'left',
emptyText : '请选择日期',
format : 'Y-m-d',
value : Ext.Date.add(new Date(), Ext.Date.DAY,1),
listeners:{
'select':function(){
var start = Ext.getCmp('gene_express_starttime').getValue();
var endDate = Ext.getCmp('gene_express_endtime').getValue();
if (start > endDate) {
Ext.getCmp('gene_express_starttime').setValue(endDate);
}
}
}
});
me.store = Ext.create('Ext.data.Store',{
fields:['id','add_time','consumer_name','consumer_sex','consumer_birthday','consumer_phone','sample_number','test_number','report_date','test_package_id','test_package_name',
'agency_id','agency_name','test_item_ids','test_item_names','expressnum','expresstype','recive','expresstime','expressremark','paragraphtime','account_type','remarks','remark','charge_standard_name'],
proxy: {
type: 'jsonajax',
actionMethods:{read:'POST'},
url: 'bacera/Gene/queryallpage.do',
params:{
},
reader: {
type: 'json',
root:'data',
totalProperty:'total'
}
},
listeners:{
'beforeload':function(ds, operation, opt){
me.getSelectionModel().clearSelections();
Ext.apply(me.store.proxy.params, {
consumer_name:consumer_name.getValue().trim(),
sample_number:sample_number.getValue().trim(),
gene_starttime:dateformat(gene_express_starttime.getValue()),
gene_endtime:dateformat(gene_express_endtime.getValue()),
test_number:test_number.getValue(),
test_package_name:test_package_name.getValue(),
reportif:reportif.getValue(),
// agency_name:agency_name.getValue().trim(),
test_item_names:test_item_names.getValue(),
charge_standard_name:charge_standard_name.getValue().trim(),
consumer_sex:consumer_sex.getValue().trim()
//consumer_phone:consumer_phone.getValue().trim()
});
}
}
});
me.selModel = Ext.create('Ext.selection.CheckboxModel',{
// mode: 'SINGLE'
});
me.bbar = Ext.create('Ext.PagingToolbar', {
store : me.store,
pageSize : me.pageSize,
displayInfo : true,
displayMsg : "第 {0} - {1} 条 共 {2} 条",
emptyMsg : "没有符合条件的记录"
});
//me.bbar = {xtype: 'label',id:'totalBbarGene_express', text: '',style:'height:25px;line-height:25px;text-align:right;margin-right:10px;'};
me.columns = [
{ text: '案例编号', dataIndex: 'test_number', menuDisabled:true, width : 120},
{ text: '样本编号', dataIndex: 'sample_number', menuDisabled:true, width : 120},
{ text: '检测套餐名', dataIndex: 'test_package_name', menuDisabled:true, width : 170},
{ text: '快递单号', dataIndex: 'expressnum', menuDisabled:true, width : 100,
editor:'textfield'
},
{ text: '快递类型', dataIndex: 'expresstype', menuDisabled:true, width : 110,
editor:new Ext.form.ComboBox({
autoSelect : true,
editable:true,
name:'expresstype',
triggerAction: 'all',
queryMode: 'local',
emptyText : "请选择",
selectOnTab: true,
store: mailStore,
maxLength: 50,
fieldStyle: me.fieldStyle,
displayField:'value',
valueField:'value',
listClass: 'x-combo-list-small'
})
},
{ text: '快递日期', dataIndex: 'expresstime', menuDisabled:true, width : 110 },
{ text: '收件人', dataIndex: 'recive', menuDisabled:true, width : 120,
editor:'textfield'
},{ text: '快递备注', dataIndex: 'expressremark', menuDisabled:true, width : 150,
editor:'textfield'
},
{ text: '账户类型', dataIndex: 'account_type', menuDisabled:true, width : 150},
{ text: '到款日期', dataIndex: 'paragraphtime', menuDisabled:true, width : 95},
{ text: '登记时间', dataIndex: 'add_time', menuDisabled:true, width:120,
renderer:Ext.util.Format.dateRenderer('Y-m-d')
},
{ text: '客户姓名', dataIndex: 'consumer_name', menuDisabled:true, width:80},
{ text: '归属人全称', dataIndex: 'charge_standard_name', menuDisabled:true, width : 225},
{ text: '备注', dataIndex: 'remark', menuDisabled:true, width : 200},
{ text: '被代理人', dataIndex: 'agentname', menuDisabled:true, width : 80},
{ text: '财务备注', dataIndex: 'remarks', menuDisabled:true, width : 150},
{ text: '检测项目名', dataIndex: 'test_item_names', menuDisabled:true, width:200},
{ text: '是否发报告', dataIndex: 'reportif', menuDisabled:true, width : 100,
renderer : function(value, cellmeta,
record, rowIndex, columnIndex,
store) {
var isnull= record.data["expresstype"];
var isll= record.data["expressnum"];
if ( null !=isnull||null!=isll) {
return "是";
} else {
return "<span style='color:red'>否</span>";
}
}
} ];
me.dockedItems = [{
xtype:'toolbar',
name:'search',
dock:'top',
items:[test_item_names,test_package_name,consumer_name,consumer_sex]
},{
style : {
borderTopWidth : '0px !important',
borderBottomWidth : '0px !important'
},
xtype:'toolbar',
name:'search',
dock:'top',
items:[sample_number,gene_express_starttime,gene_express_endtime,reportif]
},{
style : {
borderTopWidth : '0px !important',
borderBottomWidth : '0px !important'
},
xtype:'toolbar',
name:'search',
dock:'top',
items:[test_number,charge_standard_name,{
text:'查询',
iconCls:'Find',
handler:me.onSearch
}]
},{
style : {
borderTopWidth : '0px !important',
borderBottomWidth : '0px !important'
},
xtype:'toolbar',
name:'search',
dock:'top',
items:[{
text:'案例详情',
iconCls:'Find',
handler:me.casef
},{
text:'案例编号',
iconCls:'Find',
handler:me.casecode
}]
}];
me.store.load();
me.callParent(arguments);
// me.store.on("load",function(){
// Ext.getCmp('totalBbarGene_express').setText("共 "+me.store.getCount()+" 条");
// });
},
casef:function(){
var me = this.up("gridpanel");
var selections = me.getView().getSelectionModel().getSelection();
if(selections.length<1){
Ext.Msg.alert("提示", "请选择一条记录!");
return;
};
var form = Ext.create("Rds.bacera.form.BaceraGeneTestingProjectExpressForm",{
region:"center",
grid:me
});
var win = Ext.create("Ext.window.Window",{
title:'案例详情',
width:580,
iconCls:'Pageedit',
modal:true,
height:400,
layout:'border',
items:[form]
});
win.show();
form.loadRecord(selections[0]);
},
caseCode:function(){
var me = this.up("gridpanel");
var selections = me.getView().getSelectionModel().getSelection();
if(selections.length<1){
Ext.Msg.alert("提示", "请选择需要查看的案例编号!");
return;
};
var num="";
for(var i = 1 ; i < selections.length+1 ; i ++)
{
num += selections[i-1].get("test_number")+";";
}
Ext.Msg.alert("我是案例编号", num);
},
onSearch:function(){
var me = this.up("gridpanel");
me.store.currentPage = 1;
me.getStore().load();
},
listeners : {
'beforeedit':function(editor, e,s){
function afterEdit(e,s){
Ext.Ajax.request({
url:"bacera/Gene/saveGeneExpress.do",
method: "POST",
headers: { 'Content-Type': 'application/json' },
jsonData: {
id:s.record.data.id,
num:s.record.data.test_number,
expressnum:s.record.data.expressnum,
recive:s.record.data.recive,
expresstime:(Ext.Date.format(new Date(), 'Y-m-d')),
case_type:s.record.data.test_it |
expresstype:s.record.data.expresstype,
expressremark:s.record.data.expressremark
},
success: function (response, options) {
response = Ext.JSON.decode(response.responseText);
if (response==false) {
Ext.MessageBox.alert("错误信息", "修改快递失败,请查看");
}
},
failure: function () {
Ext.Msg.alert("提示", "保存失败<br>请联系管理员!");
}
});
}
rowEditing.on('edit',afterEdit);
},
'afterrender' : function() {
this.store.load();
}
}
});
| em_names, | identifier_name |
BaceraGeneTestingProjectExpressGrid.js | var rowEditing = Ext.create('Ext.grid.plugin.RowEditing',{
pluginId:'rowEditing',
saveBtnText: '保存',
cancelBtnText: "取消",
autoCancel: false,
clicksToEdit:2 //双击进行修改 1-单击 2-双击 0-可取消双击/单击事件
});
Ext.define('Rds.bacera.panel.BaceraGeneTestingProjectExpressGrid', {
extend : 'Ext.grid.Panel',
loadMask: true,
viewConfig: {
trackOver: false,
stripeRows: false
},
pageSize:25,
selType: 'rowmodel',
plugins: [rowEditing],
initComponent : function() {
var me = this;
var reportif=new Ext.form.field.ComboBox({
fieldLabel : '是否发报告',
width:'20%',
labelWidth : 70,
editable : false,
triggerAction : 'all',
displayField : 'Name',
labelAlign : 'left',
valueField : 'Code',
store : new Ext.data.ArrayStore(
{
fields : ['Name','Code' ],
data : [['全部',0],['是',1 ],['否',2 ] ]
}),
value : '',
mode : 'local',
name : 'reportif',
value: 0
});
var consumer_name = Ext.create('Ext.form.field.Text',{
name:'consumer_name',
labelWidth:80,
width:'20%',
fieldLabel:'客户姓名'
});
var sample_number = Ext.create('Ext.form.field.Text',{
name:'sample_number',
labelWidth:80,
width:'20%',
fieldLabel:'样本编号'
});
var consumer_sex=new Ext.form.field.ComboBox({
fieldLabel : '性别',
width:'20%',
labelWidth : 70,
editable : false,
triggerAction : 'all',
displayField : 'Name',
labelAlign : 'left',
valueField : 'Code',
store : new Ext.data.ArrayStore(
{
fields : ['Name','Code' ],
data : [['全部',''],['男','M' ],['女','F' ] ]
}),
value : '',
mode : 'local',
name : 'sex',
});
var test_number = Ext.create('Ext.form.field.Text',{
name:'test_number',
labelWidth:80,
width:'20%',
fieldLabel:'案例编号'
});
var test_package_name = Ext.create('Ext.form.field.Text',{
name:'test_package_name',
labelWidth:80,
width:'20%',
fieldLabel:'检测套餐名称'
});
var agency_name = Ext.create('Ext.form.field.Text',{
name:'agency_name',
labelWidth:80,
width:'20%',
fieldLabel:'代理商名称'
});
var test_item_names = Ext.create('Ext.form.field.Text',{
name:'test_item_names',
labelWidth:80,
width:'20%',
fieldLabel:'检测项目名称'
});
var charge_standard_id = Ext.create('Ext.form.field.Text',{
name:'charge_standard_id',
labelWidth:80,
width:'20%',
fieldLabel:'归属人id'
});
var charge_standard_name = Ext.create('Ext.form.field.Text',{
name:'charge_standard_name',
labelWidth:80,
width:'20%',
fieldLabel:'归属人姓名'
});
var mailStore = Ext.create('Ext.data.Store', {
fields:['key','value'],
proxy : {
type : 'jsonajax',
actionMethods : {
read : 'POST'
},
url : 'judicial/dicvalues/getMailModels.do',
reader : {
type : 'json',
root : 'data'
}
},
autoLoad : true,
remoteSort : true
});
var gene_express_starttime = new Ext.form.DateField({
id:'gene_express_starttime',
name : 'gene_express_starttime',
width:'20%',
fieldLabel : '添加日期从',
labelWidth : 80,
labelAlign : 'left',
emptyText : '请选择日期',
format : 'Y-m-d',
value : Ext.Date.add(
new Date(),
Ext.Date.DAY,-7),
listeners:{
'select':function(){
var start = Ext.getCmp('gene_express_starttime').getValue();
var endDate = Ext.getCmp('gene_express_endtime').getValue();
if (start > endDate) {
Ext.getCmp('gene_express_starttime').setValue(endDate);
}
}
}
});
var gene_express_endtime = new Ext.form.DateField({
id:'gene_express_endtime',
name : 'gene_express_endtime',
width:'20%',
labelWidth : 40,
fieldLabel : '到 ',
labelAlign : 'left',
emptyText : '请选择日期',
format : 'Y-m-d',
value : Ext.Date.add(new Date(), Ext.Date.DAY,1),
listeners:{
'select':function(){
var start = Ext.getCmp('gene_express_starttime').getValue();
var endDate = Ext.getCmp('gene_express_endtime').getValue();
if (start > endDate) {
Ext.getCmp('gene_express_starttime').setValue(endDate);
}
}
}
});
me.store = Ext.create('Ext.data.Store',{
fields:['id','add_time','consumer_name','consumer_sex','consumer_birthday','consumer_phone','sample_number','test_number','report_date','test_package_id','test_package_name',
'agency_id','agency_name','test_item_ids','test_item_names','expressnum','expresstype','recive','expresstime','expressremark','paragraphtime','account_type','remarks','remark','charge_standard_name'],
proxy: {
type: 'jsonajax',
actionMethods:{read:'POST'},
url: 'bacera/Gene/queryallpage.do',
params:{
},
reader: {
type: 'json',
root:'data',
totalProperty:'total'
}
},
listeners:{
'beforeload':function(ds, operation, opt){
me.getSelectionModel().clearSelections();
Ext.apply(me.store.proxy.params, {
consumer_name:consumer_name.getValue().trim(),
sample_number:sample_number.getValue().trim(),
gene_starttime:dateformat(gene_express_starttime.getValue()),
gene_endtime:dateformat(gene_express_endtime.getValue()),
test_number:test_number.getValue(),
test_package_name:test_package_name.getValue(),
reportif:reportif.getValue(),
// agency_name:agency_name.getValue().trim(),
test_item_names:test_item_names.getValue(),
charge_standard_name:charge_standard_name.getValue().trim(),
consumer_sex:consumer_sex.getValue().trim()
//consumer_phone:consumer_phone.getValue().trim()
});
}
}
});
me.selModel = Ext.create('Ext.selection.CheckboxModel',{
// mode: 'SINGLE'
});
me.bbar = Ext.create('Ext.PagingToolbar', {
store : me.store,
pageSize : me.pageSize,
displayInfo : true,
displayMsg : "第 {0} - {1} 条 共 {2} 条",
emptyMsg : "没有符合条件的记录"
});
//me.bbar = {xtype: 'label',id:'totalBbarGene_express', text: '',style:'height:25px;line-height:25px;text-align:right;margin-right:10px;'};
me.columns = [
{ text: '案例编号', dataIndex: 'test_number', menuDisabled:true, width : 120},
{ text: '样本编号', dataIndex: 'sample_number', menuDisabled:true, width : 120},
{ text: '检测套餐名', dataIndex: 'test_package_name', menuDisabled:true, width : 170},
{ text: '快递单号', dataIndex: 'expressnum', menuDisabled:true, width : 100,
editor:'textfield'
},
{ text: '快递类型', dataIndex: 'expresstype', menuDisabled:true, width : 110,
editor:new Ext.form.ComboBox({
autoSelect : true,
editable:true,
name:'expresstype',
triggerAction: 'all',
queryMode: 'local',
emptyText : "请选择",
selectOnTab: true,
store: mailStore,
maxLength: 50,
fieldStyle: me.fieldStyle,
displayField:'value',
valueField:'value',
listClass: 'x-combo-list-small'
})
},
{ text: '快递日期', dataIndex: 'expresstime', menuDisabled:true, width : 110 },
{ text: '收件人', dataIndex: 'recive', menuDisabled:true, width : 120,
editor:'textfield'
},{ text: '快递备注', dataIndex: 'expressremark', menuDisabled:true, width : 150,
editor:'textfield'
},
{ text: '账户类型', dataIndex: 'account_type', menuDisabled:true, width : 150},
{ text: '到款日期', dataIndex: 'paragraphtime', menuDisabled:true, width : 95},
{ text: '登记时间', dataIndex: 'add_time', menuDisabled:true, width:120,
renderer:Ext.util.Format.dateRenderer('Y-m-d')
},
{ text: '客户姓名', dataIndex: 'consumer_name', menuDisabled:true, width:80},
{ text: '归属人全称', dataIndex: 'charge_standard_name', menuDisabled:true, width : 225},
{ text: '备注', dataIndex: 'remark', menuDisabled:true, width : 200},
{ text: '被代理人', dataIndex: 'agentname', menuDisabled:true, width : 80},
{ text: '财务备注', dataIndex: 'remarks', menuDisabled:true, width : 150},
{ text: '检测项目名', dataIndex: 'test_item_names', menuDisabled:true, width:200},
{ text: '是否发报告', dataIndex: 'reportif', menuDisabled:true, width : 100,
renderer : function(value, cellmeta,
record, rowIndex, columnIndex,
store) {
var isnull= record.data["expresstype"];
var isll= record.data["expressnum"];
if ( null !=isnull||null!=isll) {
return "是";
} else {
return "<span style='color:red'>否</span>";
}
}
} ];
me.dockedItems = [{
xtype:'toolbar',
name:'search',
dock:'top',
items:[test_item_names,test_package_name,consumer_name,consumer_sex]
},{
style : {
borderTopWidth : '0px !important',
borderBottomWidth : '0px !important'
},
xtype:'toolbar',
name:'search',
dock:'top',
items:[sample_number,gene_express_starttime,gene_express_endtime,reportif]
},{
style : {
borderTopWidth : '0px !important',
borderBottomWidth : '0px !important'
},
xtype:'toolbar',
name:'search',
dock:'top',
items:[test_number,charge_standard_name,{
text:'查询',
iconCls:'Find',
handler:me.onSearch
}]
},{
style : {
borderTopWidth : '0px !important',
borderBottomWidth : '0px !important'
},
xtype:'toolbar',
name:'search',
dock:'top',
items:[{
text:'案例详情',
iconCls:'Find',
handler:me.casef
},{
text:'案例编号',
iconCls:'Find',
handler:me.casecode
}]
}];
me.store.load();
me.callParent(arguments);
// me.store.on("load",function(){
// Ext.getCmp('totalBbarGene_express').setText("共 "+me.store.getCount()+" 条");
// });
},
casef:function(){
var me = this.up("gridpanel");
var selections = me.getView().getSelectionModel().getSelection();
if(selections.length<1){
Ext.Msg.alert("提示", "请选择一条记录!");
return;
};
var form = Ext.create("Rds.bacera.form.BaceraGeneTestingProjectExpressForm",{
region:"center",
grid:me
});
var win = Ext.create("Ext.window.Window",{
title:'案例详情',
width:580,
iconCls:'Pageedit',
modal:true,
height:400,
layout:'border',
items:[form]
});
win.show();
form.loadRecord(selections[0]);
},
caseCode:function(){
var me = this.up("gridpanel");
var selections = me.getView().getSelectionModel().getSelection();
if(selections.length<1){
Ext.Msg.alert("提示", "请选择需要查看的案例编号!");
return;
};
var num="";
for(var i = 1 ; i < selections.length+1 ; i ++)
{
num += selections[i-1].get("test_number")+";";
}
Ext.Msg.alert("我是案例编号", num);
},
onSearch:function(){
var me = this.up("gridpanel");
me.store.currentPage = 1;
me.getStore().load();
},
listeners : {
'beforeedit':function(editor, e,s){
function afterEdit(e,s){
Ext.Ajax.request({
url:"bacera/Gene/saveGeneExpress.do",
method: "POST",
headers: { 'Content-Type': 'application/json' },
jsonData: {
id:s.record.data.id,
num:s.record.data.test_number,
expressnum:s.record.data.expressnum,
recive:s.record.data.recive,
expresstime:(Ext.Date.format(new Date(), 'Y-m-d')),
case_type:s.record.data.test_item_names,
| expresstype:s.record.data.expresstype,
expressremark:s.record.data.expressremark
},
success: function (response, options) {
response = Ext.JSON.decode(response.responseText);
if (response==false) {
Ext.MessageBox.alert("错误信息", "修改快递失败,请查看");
}
},
failure: function () {
Ext.Msg.alert("提示", "保存失败<br>请联系管理员!");
}
});
}
rowEditing.on('edit',afterEdit);
},
'afterrender' : function() {
this.store.load();
}
}
});
| identifier_body |
|
BaceraGeneTestingProjectExpressGrid.js | var rowEditing = Ext.create('Ext.grid.plugin.RowEditing',{
pluginId:'rowEditing',
saveBtnText: '保存',
cancelBtnText: "取消",
autoCancel: false,
clicksToEdit:2 //双击进行修改 1-单击 2-双击 0-可取消双击/单击事件
});
Ext.define('Rds.bacera.panel.BaceraGeneTestingProjectExpressGrid', {
extend : 'Ext.grid.Panel',
loadMask: true,
viewConfig: {
trackOver: false,
stripeRows: false
},
pageSize:25,
selType: 'rowmodel',
plugins: [rowEditing],
initComponent : function() {
var me = this;
var reportif=new Ext.form.field.ComboBox({
fieldLabel : '是否发报告',
width:'20%',
labelWidth : 70,
editable : false,
triggerAction : 'all',
displayField : 'Name',
labelAlign : 'left',
valueField : 'Code',
store : new Ext.data.ArrayStore(
{
fields : ['Name','Code' ],
data : [['全部',0],['是',1 ],['否',2 ] ]
}),
value : '',
mode : 'local',
name : 'reportif',
value: 0
});
var consumer_name = Ext.create('Ext.form.field.Text',{
name:'consumer_name',
labelWidth:80,
width:'20%',
fieldLabel:'客户姓名'
});
var sample_number = Ext.create('Ext.form.field.Text',{
name:'sample_number',
labelWidth:80,
width:'20%',
fieldLabel:'样本编号'
});
var consumer_sex=new Ext.form.field.ComboBox({
fieldLabel : '性别',
width:'20%',
labelWidth : 70,
editable : false,
triggerAction : 'all',
displayField : 'Name',
labelAlign : 'left',
valueField : 'Code',
store : new Ext.data.ArrayStore(
{
fields : ['Name','Code' ],
data : [['全部',''],['男','M' ],['女','F' ] ]
}),
value : '',
mode : 'local',
name : 'sex',
});
var test_number = Ext.create('Ext.form.field.Text',{
name:'test_number',
labelWidth:80,
width:'20%',
fieldLabel:'案例编号'
});
var test_package_name = Ext.create('Ext.form.field.Text',{
name:'test_package_name',
labelWidth:80,
width:'20%',
fieldLabel:'检测套餐名称'
});
var agency_name = Ext.create('Ext.form.field.Text',{
name:'agency_name',
labelWidth:80,
width:'20%',
fieldLabel:'代理商名称'
});
var test_item_names = Ext.create('Ext.form.field.Text',{
name:'test_item_names',
labelWidth:80,
width:'20%',
fieldLabel:'检测项目名称'
});
var charge_standard_id = Ext.create('Ext.form.field.Text',{
name:'charge_standard_id',
labelWidth:80,
width:'20%',
fieldLabel:'归属人id'
});
var charge_standard_name = Ext.create('Ext.form.field.Text',{
name:'charge_standard_name',
labelWidth:80,
width:'20%',
fieldLabel:'归属人姓名'
});
var mailStore = Ext.create('Ext.data.Store', {
fields:['key','value'],
proxy : {
type : 'jsonajax',
actionMethods : {
read : 'POST'
},
url : 'judicial/dicvalues/getMailModels.do',
reader : {
type : 'json',
root : 'data'
}
},
autoLoad : true,
remoteSort : true
});
var gene_express_starttime = new Ext.form.DateField({
id:'gene_express_starttime',
name : 'gene_express_starttime',
width:'20%',
fieldLabel : '添加日期从',
labelWidth : 80,
labelAlign : 'left',
emptyText : '请选择日期',
format : 'Y-m-d',
value : Ext.Date.add(
new Date(),
Ext.Date.DAY,-7),
listeners:{
'select':function(){
var start = Ext.getCmp('gene_express_starttime').getValue();
var endDate = Ext.getCmp('gene_express_endtime').getValue();
if (start > endDate) {
Ext.getCmp('gene_express_starttime').setValue(endDate);
}
}
}
});
var gene_express_endtime = new Ext.form.DateField({
id:'gene_express_endtime',
name : 'gene_express_endtime',
width:'20%',
labelWidth : 40,
fieldLabel : '到 ',
labelAlign : 'left',
emptyText : '请选择日期',
format : 'Y-m-d',
value : Ext.Date.add(new Date(), Ext.Date.DAY,1),
listeners:{
'select':function(){
var start = Ext.getCmp('gene_express_starttime').getValue();
var endDate = Ext.getCmp('gene_express_endtime').getValue();
if (start > endDate) {
Ext.getCmp('gene_express_starttime').setValue(endDate);
}
}
}
});
me.store = Ext.create('Ext.data.Store',{
fields:['id','add_time','consumer_name','consumer_sex','consumer_birthday','consumer_phone','sample_number','test_number','report_date','test_package_id','test_package_name',
'agency_id','agency_name','test_item_ids','test_item_names','expressnum','expresstype','recive','expresstime','expressremark','paragraphtime','account_type','remarks','remark','charge_standard_name'],
proxy: {
type: 'jsonajax',
actionMethods:{read:'POST'},
url: 'bacera/Gene/queryallpage.do',
params:{
},
reader: {
type: 'json',
root:'data',
totalProperty:'total'
}
},
listeners:{
'beforeload':function(ds, operation, opt){
me.getSelectionModel().clearSelections();
Ext.apply(me.store.proxy.params, {
consumer_name:consumer_name.getValue().trim(),
sample_number:sample_number.getValue().trim(),
gene_starttime:dateformat(gene_express_starttime.getValue()),
gene_endtime:dateformat(gene_express_endtime.getValue()),
test_number:test_number.getValue(),
test_package_name:test_package_name.getValue(),
reportif:reportif.getValue(),
// agency_name:agency_name.getValue().trim(),
test_item_names:test_item_names.getValue(), | consumer_sex:consumer_sex.getValue().trim()
//consumer_phone:consumer_phone.getValue().trim()
});
}
}
});
me.selModel = Ext.create('Ext.selection.CheckboxModel',{
// mode: 'SINGLE'
});
me.bbar = Ext.create('Ext.PagingToolbar', {
store : me.store,
pageSize : me.pageSize,
displayInfo : true,
displayMsg : "第 {0} - {1} 条 共 {2} 条",
emptyMsg : "没有符合条件的记录"
});
//me.bbar = {xtype: 'label',id:'totalBbarGene_express', text: '',style:'height:25px;line-height:25px;text-align:right;margin-right:10px;'};
me.columns = [
{ text: '案例编号', dataIndex: 'test_number', menuDisabled:true, width : 120},
{ text: '样本编号', dataIndex: 'sample_number', menuDisabled:true, width : 120},
{ text: '检测套餐名', dataIndex: 'test_package_name', menuDisabled:true, width : 170},
{ text: '快递单号', dataIndex: 'expressnum', menuDisabled:true, width : 100,
editor:'textfield'
},
{ text: '快递类型', dataIndex: 'expresstype', menuDisabled:true, width : 110,
editor:new Ext.form.ComboBox({
autoSelect : true,
editable:true,
name:'expresstype',
triggerAction: 'all',
queryMode: 'local',
emptyText : "请选择",
selectOnTab: true,
store: mailStore,
maxLength: 50,
fieldStyle: me.fieldStyle,
displayField:'value',
valueField:'value',
listClass: 'x-combo-list-small'
})
},
{ text: '快递日期', dataIndex: 'expresstime', menuDisabled:true, width : 110 },
{ text: '收件人', dataIndex: 'recive', menuDisabled:true, width : 120,
editor:'textfield'
},{ text: '快递备注', dataIndex: 'expressremark', menuDisabled:true, width : 150,
editor:'textfield'
},
{ text: '账户类型', dataIndex: 'account_type', menuDisabled:true, width : 150},
{ text: '到款日期', dataIndex: 'paragraphtime', menuDisabled:true, width : 95},
{ text: '登记时间', dataIndex: 'add_time', menuDisabled:true, width:120,
renderer:Ext.util.Format.dateRenderer('Y-m-d')
},
{ text: '客户姓名', dataIndex: 'consumer_name', menuDisabled:true, width:80},
{ text: '归属人全称', dataIndex: 'charge_standard_name', menuDisabled:true, width : 225},
{ text: '备注', dataIndex: 'remark', menuDisabled:true, width : 200},
{ text: '被代理人', dataIndex: 'agentname', menuDisabled:true, width : 80},
{ text: '财务备注', dataIndex: 'remarks', menuDisabled:true, width : 150},
{ text: '检测项目名', dataIndex: 'test_item_names', menuDisabled:true, width:200},
{ text: '是否发报告', dataIndex: 'reportif', menuDisabled:true, width : 100,
renderer : function(value, cellmeta,
record, rowIndex, columnIndex,
store) {
var isnull= record.data["expresstype"];
var isll= record.data["expressnum"];
if ( null !=isnull||null!=isll) {
return "是";
} else {
return "<span style='color:red'>否</span>";
}
}
} ];
me.dockedItems = [{
xtype:'toolbar',
name:'search',
dock:'top',
items:[test_item_names,test_package_name,consumer_name,consumer_sex]
},{
style : {
borderTopWidth : '0px !important',
borderBottomWidth : '0px !important'
},
xtype:'toolbar',
name:'search',
dock:'top',
items:[sample_number,gene_express_starttime,gene_express_endtime,reportif]
},{
style : {
borderTopWidth : '0px !important',
borderBottomWidth : '0px !important'
},
xtype:'toolbar',
name:'search',
dock:'top',
items:[test_number,charge_standard_name,{
text:'查询',
iconCls:'Find',
handler:me.onSearch
}]
},{
style : {
borderTopWidth : '0px !important',
borderBottomWidth : '0px !important'
},
xtype:'toolbar',
name:'search',
dock:'top',
items:[{
text:'案例详情',
iconCls:'Find',
handler:me.casef
},{
text:'案例编号',
iconCls:'Find',
handler:me.casecode
}]
}];
me.store.load();
me.callParent(arguments);
// me.store.on("load",function(){
// Ext.getCmp('totalBbarGene_express').setText("共 "+me.store.getCount()+" 条");
// });
},
casef:function(){
var me = this.up("gridpanel");
var selections = me.getView().getSelectionModel().getSelection();
if(selections.length<1){
Ext.Msg.alert("提示", "请选择一条记录!");
return;
};
var form = Ext.create("Rds.bacera.form.BaceraGeneTestingProjectExpressForm",{
region:"center",
grid:me
});
var win = Ext.create("Ext.window.Window",{
title:'案例详情',
width:580,
iconCls:'Pageedit',
modal:true,
height:400,
layout:'border',
items:[form]
});
win.show();
form.loadRecord(selections[0]);
},
caseCode:function(){
var me = this.up("gridpanel");
var selections = me.getView().getSelectionModel().getSelection();
if(selections.length<1){
Ext.Msg.alert("提示", "请选择需要查看的案例编号!");
return;
};
var num="";
for(var i = 1 ; i < selections.length+1 ; i ++)
{
num += selections[i-1].get("test_number")+";";
}
Ext.Msg.alert("我是案例编号", num);
},
onSearch:function(){
var me = this.up("gridpanel");
me.store.currentPage = 1;
me.getStore().load();
},
listeners : {
'beforeedit':function(editor, e,s){
function afterEdit(e,s){
Ext.Ajax.request({
url:"bacera/Gene/saveGeneExpress.do",
method: "POST",
headers: { 'Content-Type': 'application/json' },
jsonData: {
id:s.record.data.id,
num:s.record.data.test_number,
expressnum:s.record.data.expressnum,
recive:s.record.data.recive,
expresstime:(Ext.Date.format(new Date(), 'Y-m-d')),
case_type:s.record.data.test_item_names,
expresstype:s.record.data.expresstype,
expressremark:s.record.data.expressremark
},
success: function (response, options) {
response = Ext.JSON.decode(response.responseText);
if (response==false) {
Ext.MessageBox.alert("错误信息", "修改快递失败,请查看");
}
},
failure: function () {
Ext.Msg.alert("提示", "保存失败<br>请联系管理员!");
}
});
}
rowEditing.on('edit',afterEdit);
},
'afterrender' : function() {
this.store.load();
}
}
}); | charge_standard_name:charge_standard_name.getValue().trim(), | random_line_split |
main.rs | #[macro_use]
extern crate failure;
use nbted::unstable::{data, read, string_read, string_write, write};
use nbted::Result;
use std::env;
use std::fs::File;
use std::io;
use std::io::{BufReader, BufWriter};
use std::path::Path;
use std::process::exit;
use std::process::Command;
use getopts::Options;
use tempdir::TempDir;
use failure::ResultExt;
fn main() |
/// Main entrypoint for program.
///
/// Returns an integer representing the program's exit status.
fn run_cmdline() -> Result<i32> {
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
let _: &Options = opts.optflagopt("e", "edit", "edit a NBT file with your $EDITOR.
If [FILE] is specified, then that file is edited in place, but specifying --input and/or --output will override the input/output.
If no file is specified, default to read from --input and writing to --output.", "FILE");
let _: &Options = opts.optflagopt("p", "print", "print NBT file to text format. Adding an argument to this is the same as specifying --input", "FILE");
let _: &Options = opts.optflagopt("r", "reverse", "reverse a file in text format to NBT format. Adding an argument to this is the same as specifying --input", "FILE");
let _: &Options = opts.optopt(
"i",
"input",
"specify the input file, defaults to stdin",
"FILE",
);
let _: &Options = opts.optopt(
"o",
"output",
"specify the output file, defaults to stdout",
"FILE",
);
let _: &Options = opts.optflag("", "man", "print the nbted man page source and exit");
let _: &Options = opts.optflag("h", "help", "print the help menu and exit");
let _: &Options = opts.optflag("", "version", "print program version and exit");
let matches = opts.parse(&args[1..]).context("error parsing options")?;
if matches.opt_present("h") {
let brief = "Usage: nbted [options] FILE";
print!("{}", opts.usage(&brief));
println!("\nThe default action, taken if no action is explicitly selected, is to --edit.");
println!(
"\nFor detailed usage information, read the nbted man page. If the nbted man page\
\nwas not installed on your system, such as if you installed using `cargo install`,\
\nthen you can use `nbted --man | nroff -man | less` to read the nbted man page."
);
return Ok(0);
}
if matches.opt_present("version") {
println!(
"{} {} {}",
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_VERSION"),
/* See build.rs for the git-revision.txt file */
include!(concat!(env!("OUT_DIR"), "/git-revision.txt"))
);
println!("https://github.com/C4K3/nbted");
return Ok(0);
}
if matches.opt_present("man") {
print!(include_str!("../nbted.1"));
return Ok(0);
}
let is_print: bool = matches.opt_present("print");
let is_reverse: bool = matches.opt_present("reverse");
let is_edit: bool = if matches.opt_present("edit") {
true
} else {
/* If edit is not explicitly defined, it is the default action and is
* selected if no other action is specified */
!(is_reverse || is_print)
};
/* Hopefully this is a simpler way of ensuring that only one action can be
* taken than having a long logical expression */
let mut action_count = 0;
if is_print {
action_count += 1;
}
if is_reverse {
action_count += 1;
}
if is_edit {
action_count += 1;
}
if action_count > 1 {
bail!("You can only specify one action at a time.");
}
/* Figure out the input file, by trying to read the arguments for all of
* --input, --edit, --print and --reverse, prioritizing --input over the
* other arguments, if none of the arguments are specified but there is a
* free argument, use that, else we finally default to - (stdin) */
let input = if let Some(x) = matches.opt_str("input") {
x
} else if let Some(x) = matches.opt_str("edit") {
x
} else if let Some(x) = matches.opt_str("print") {
x
} else if let Some(x) = matches.opt_str("reverse") {
x
} else if matches.free.len() == 1 {
matches.free[0].clone()
} else {
/* stdin */
"-".to_string()
};
let output = if let Some(x) = matches.opt_str("output") {
x
} else if let Some(x) = matches.opt_str("edit") {
x
} else if is_edit && matches.free.len() == 1 {
/* Only want to default to the free argument if we're editing
* (DO NOT WRITE BACK TO THE READ FILE UNLESS EDITING!) */
matches.free[0].clone()
} else {
/* stdout */
"-".to_string()
};
if matches.free.len() > 1 {
bail!("nbted was given multiple arguments, but only supports editing one file at a time.");
}
if is_print {
print(&input, &output)
} else if is_reverse {
reverse(&input, &output)
} else if is_edit {
edit(&input, &output)
} else {
bail!("Internal error: No action selected. (Please report this.)");
}
}
/// When the user wants to edit a specific file in place
///
/// Returns an integer representing the program's exit status.
fn edit(input: &str, output: &str) -> Result<i32> {
/* First we read the NBT data from the input */
let nbt = if input == "-" {
// let mut f = BufReader::new(io::stdin());
let f = io::stdin();
let mut f = f.lock();
read::read_file(&mut f).context("Unable to parse any NBT files from stdin")?
} else {
let path: &Path = Path::new(input);
let f = File::open(path).context(format!("Unable to open file {}", input))?;
let mut f = BufReader::new(f);
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
};
/* Then we create a temporary file and write the NBT data in text format
* to the temporary file */
let tmpdir = TempDir::new("nbted").context("Unable to create temporary directory")?;
let tmp = match Path::new(input).file_name() {
Some(x) => {
let mut x = x.to_os_string();
x.push(".txt");
x
}
None => bail!("Error reading file name"),
};
let tmp_path = tmpdir.path().join(tmp);
{
let mut f = File::create(&tmp_path).context("Unable to create temporary file")?;
string_write::write_file(&mut f, &nbt).context("Unable to write temporary file")?;
f.sync_all().context("Unable to synchronize file")?;
}
let new_nbt = {
let mut new_nbt = open_editor(&tmp_path);
while let Err(e) = new_nbt {
eprintln!("Unable to parse edited file");
for e in e.iter_chain() {
eprintln!(" caused by: {}", e);
}
eprintln!("Do you want to open the file for editing again? (y/N)");
let mut line = String::new();
let _: usize = io::stdin()
.read_line(&mut line)
.context("Error reading from stdin. Nothing was changed")?;
if line.trim() == "y" {
new_nbt = open_editor(&tmp_path);
} else {
eprintln!("Exiting ... File is unchanged.");
return Ok(0);
}
}
new_nbt.expect("new_nbt was Error")
};
if nbt == new_nbt {
eprintln!("No changes, will do nothing.");
return Ok(0);
}
/* And finally we write the edited nbt (new_nbt) into the output file */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match write::write_file(&mut f, &new_nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed",
output
))?;
let mut f = BufWriter::new(f);
write::write_file(&mut f, &new_nbt).context(
format_err!("Error writing NBT file {}. State of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
eprintln!("File edited successfully.");
Ok(0)
}
/// Open the user's $EDITOR on the temporary file, wait until the editor is
/// closed again, read the temporary file and attempt to parse it into NBT,
/// returning the result.
fn open_editor(tmp_path: &Path) -> Result<data::NBTFile> {
let editor = match env::var("VISUAL") {
Ok(x) => x,
Err(_) => match env::var("EDITOR") {
Ok(x) => x,
Err(_) => bail!("Unable to find $EDITOR"),
},
};
let mut cmd = Command::new(editor);
let _: &mut Command = cmd.arg(&tmp_path.as_os_str());
let mut cmd = cmd.spawn().context("Error opening editor")?;
match cmd.wait().context("error executing editor")? {
x if x.success() => (),
_ => bail!("Editor did not exit correctly"),
}
/* Then we parse the text format in the temporary file into NBT */
let mut f = File::open(&tmp_path).context(format_err!(
"Unable to read temporary file. Nothing was changed."
))?;
string_read::read_file(&mut f)
}
/// When the user wants to print an NBT file to text format
fn print(input: &str, output: &str) -> Result<i32> {
/* First we read a NBTFile from the input */
let nbt = if input == "-" {
let f = io::stdin();
let mut f = f.lock();
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
} else {
let path: &Path = Path::new(input);
let f = File::open(path).context(format_err!("Unable to open file {}", input))?;
let mut f = BufReader::new(f);
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
};
/* Then we write the NBTFile to the output in text format */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match string_write::write_file(&mut f, &nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed.",
output
))?;
let mut f = BufWriter::new(f);
string_write::write_file(&mut f, &nbt).context(
format_err!("Error writing NBT file {}. State of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
Ok(0)
}
/// When the user wants to convert a text format file into an NBT file
///
/// Returns an integer representing the program's exit status.
fn reverse(input: &str, output: &str) -> Result<i32> {
/* First we read the input file in the text format */
let path: &Path = Path::new(input);
let mut f = File::open(&path).context(format_err!("Unable to read text file {}", input))?;
let nbt = string_read::read_file(&mut f)
.context(format_err!("Unable to parse text file {}", input))?;
/* Then we write the parsed NBT to the output file in NBT format */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match write::write_file(&mut f, &nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed",
output
))?;
let mut f = BufWriter::new(f);
write::write_file(&mut f, &nbt).context(
format_err!("error writing to NBT FILE {}, state of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
Ok(0)
}
| {
match run_cmdline() {
Ok(ret) => {
exit(ret);
}
Err(e) => {
eprintln!("{}", e.backtrace());
eprintln!("Error: {}", e);
for e in e.iter_chain().skip(1) {
eprintln!(" caused by: {}", e);
}
eprintln!("For help, run with --help or read the manpage.");
exit(1);
}
}
} | identifier_body |
main.rs | #[macro_use]
extern crate failure;
use nbted::unstable::{data, read, string_read, string_write, write};
use nbted::Result;
use std::env;
use std::fs::File;
use std::io;
use std::io::{BufReader, BufWriter};
use std::path::Path;
use std::process::exit;
use std::process::Command;
use getopts::Options;
use tempdir::TempDir;
use failure::ResultExt;
fn main() {
match run_cmdline() {
Ok(ret) => {
exit(ret);
}
Err(e) => {
eprintln!("{}", e.backtrace());
eprintln!("Error: {}", e);
for e in e.iter_chain().skip(1) {
eprintln!(" caused by: {}", e);
}
eprintln!("For help, run with --help or read the manpage.");
exit(1);
}
}
}
/// Main entrypoint for program.
///
/// Returns an integer representing the program's exit status.
fn run_cmdline() -> Result<i32> {
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
let _: &Options = opts.optflagopt("e", "edit", "edit a NBT file with your $EDITOR.
If [FILE] is specified, then that file is edited in place, but specifying --input and/or --output will override the input/output.
If no file is specified, default to read from --input and writing to --output.", "FILE");
let _: &Options = opts.optflagopt("p", "print", "print NBT file to text format. Adding an argument to this is the same as specifying --input", "FILE");
let _: &Options = opts.optflagopt("r", "reverse", "reverse a file in text format to NBT format. Adding an argument to this is the same as specifying --input", "FILE");
let _: &Options = opts.optopt(
"i",
"input",
"specify the input file, defaults to stdin",
"FILE",
);
let _: &Options = opts.optopt(
"o",
"output",
"specify the output file, defaults to stdout",
"FILE",
);
let _: &Options = opts.optflag("", "man", "print the nbted man page source and exit");
let _: &Options = opts.optflag("h", "help", "print the help menu and exit");
let _: &Options = opts.optflag("", "version", "print program version and exit");
let matches = opts.parse(&args[1..]).context("error parsing options")?;
if matches.opt_present("h") {
let brief = "Usage: nbted [options] FILE";
print!("{}", opts.usage(&brief));
println!("\nThe default action, taken if no action is explicitly selected, is to --edit.");
println!(
"\nFor detailed usage information, read the nbted man page. If the nbted man page\
\nwas not installed on your system, such as if you installed using `cargo install`,\
\nthen you can use `nbted --man | nroff -man | less` to read the nbted man page."
);
return Ok(0);
}
if matches.opt_present("version") {
println!(
"{} {} {}",
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_VERSION"),
/* See build.rs for the git-revision.txt file */
include!(concat!(env!("OUT_DIR"), "/git-revision.txt"))
);
println!("https://github.com/C4K3/nbted");
return Ok(0);
}
if matches.opt_present("man") {
print!(include_str!("../nbted.1"));
return Ok(0);
}
let is_print: bool = matches.opt_present("print");
let is_reverse: bool = matches.opt_present("reverse");
let is_edit: bool = if matches.opt_present("edit") {
true
} else {
/* If edit is not explicitly defined, it is the default action and is
* selected if no other action is specified */
!(is_reverse || is_print)
};
/* Hopefully this is a simpler way of ensuring that only one action can be
* taken than having a long logical expression */
let mut action_count = 0;
if is_print {
action_count += 1;
}
if is_reverse {
action_count += 1;
}
if is_edit {
action_count += 1;
}
if action_count > 1 {
bail!("You can only specify one action at a time.");
}
/* Figure out the input file, by trying to read the arguments for all of
* --input, --edit, --print and --reverse, prioritizing --input over the
* other arguments, if none of the arguments are specified but there is a
* free argument, use that, else we finally default to - (stdin) */
let input = if let Some(x) = matches.opt_str("input") {
x
} else if let Some(x) = matches.opt_str("edit") {
x
} else if let Some(x) = matches.opt_str("print") {
x
} else if let Some(x) = matches.opt_str("reverse") {
x
} else if matches.free.len() == 1 {
matches.free[0].clone()
} else {
/* stdin */
"-".to_string()
};
let output = if let Some(x) = matches.opt_str("output") {
x
} else if let Some(x) = matches.opt_str("edit") {
x
} else if is_edit && matches.free.len() == 1 {
/* Only want to default to the free argument if we're editing
* (DO NOT WRITE BACK TO THE READ FILE UNLESS EDITING!) */
matches.free[0].clone()
} else {
/* stdout */
"-".to_string()
};
if matches.free.len() > 1 {
bail!("nbted was given multiple arguments, but only supports editing one file at a time.");
}
if is_print {
print(&input, &output)
} else if is_reverse {
reverse(&input, &output)
} else if is_edit {
edit(&input, &output)
} else {
bail!("Internal error: No action selected. (Please report this.)");
}
}
/// When the user wants to edit a specific file in place
///
/// Returns an integer representing the program's exit status.
fn edit(input: &str, output: &str) -> Result<i32> {
/* First we read the NBT data from the input */
let nbt = if input == "-" {
// let mut f = BufReader::new(io::stdin());
let f = io::stdin();
let mut f = f.lock();
read::read_file(&mut f).context("Unable to parse any NBT files from stdin")?
} else {
let path: &Path = Path::new(input);
let f = File::open(path).context(format!("Unable to open file {}", input))?;
let mut f = BufReader::new(f);
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
};
/* Then we create a temporary file and write the NBT data in text format | Some(x) => {
let mut x = x.to_os_string();
x.push(".txt");
x
}
None => bail!("Error reading file name"),
};
let tmp_path = tmpdir.path().join(tmp);
{
let mut f = File::create(&tmp_path).context("Unable to create temporary file")?;
string_write::write_file(&mut f, &nbt).context("Unable to write temporary file")?;
f.sync_all().context("Unable to synchronize file")?;
}
let new_nbt = {
let mut new_nbt = open_editor(&tmp_path);
while let Err(e) = new_nbt {
eprintln!("Unable to parse edited file");
for e in e.iter_chain() {
eprintln!(" caused by: {}", e);
}
eprintln!("Do you want to open the file for editing again? (y/N)");
let mut line = String::new();
let _: usize = io::stdin()
.read_line(&mut line)
.context("Error reading from stdin. Nothing was changed")?;
if line.trim() == "y" {
new_nbt = open_editor(&tmp_path);
} else {
eprintln!("Exiting ... File is unchanged.");
return Ok(0);
}
}
new_nbt.expect("new_nbt was Error")
};
if nbt == new_nbt {
eprintln!("No changes, will do nothing.");
return Ok(0);
}
/* And finally we write the edited nbt (new_nbt) into the output file */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match write::write_file(&mut f, &new_nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed",
output
))?;
let mut f = BufWriter::new(f);
write::write_file(&mut f, &new_nbt).context(
format_err!("Error writing NBT file {}. State of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
eprintln!("File edited successfully.");
Ok(0)
}
/// Open the user's $EDITOR on the temporary file, wait until the editor is
/// closed again, read the temporary file and attempt to parse it into NBT,
/// returning the result.
fn open_editor(tmp_path: &Path) -> Result<data::NBTFile> {
let editor = match env::var("VISUAL") {
Ok(x) => x,
Err(_) => match env::var("EDITOR") {
Ok(x) => x,
Err(_) => bail!("Unable to find $EDITOR"),
},
};
let mut cmd = Command::new(editor);
let _: &mut Command = cmd.arg(&tmp_path.as_os_str());
let mut cmd = cmd.spawn().context("Error opening editor")?;
match cmd.wait().context("error executing editor")? {
x if x.success() => (),
_ => bail!("Editor did not exit correctly"),
}
/* Then we parse the text format in the temporary file into NBT */
let mut f = File::open(&tmp_path).context(format_err!(
"Unable to read temporary file. Nothing was changed."
))?;
string_read::read_file(&mut f)
}
/// When the user wants to print an NBT file to text format
fn print(input: &str, output: &str) -> Result<i32> {
/* First we read a NBTFile from the input */
let nbt = if input == "-" {
let f = io::stdin();
let mut f = f.lock();
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
} else {
let path: &Path = Path::new(input);
let f = File::open(path).context(format_err!("Unable to open file {}", input))?;
let mut f = BufReader::new(f);
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
};
/* Then we write the NBTFile to the output in text format */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match string_write::write_file(&mut f, &nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed.",
output
))?;
let mut f = BufWriter::new(f);
string_write::write_file(&mut f, &nbt).context(
format_err!("Error writing NBT file {}. State of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
Ok(0)
}
/// When the user wants to convert a text format file into an NBT file
///
/// Returns an integer representing the program's exit status.
fn reverse(input: &str, output: &str) -> Result<i32> {
/* First we read the input file in the text format */
let path: &Path = Path::new(input);
let mut f = File::open(&path).context(format_err!("Unable to read text file {}", input))?;
let nbt = string_read::read_file(&mut f)
.context(format_err!("Unable to parse text file {}", input))?;
/* Then we write the parsed NBT to the output file in NBT format */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match write::write_file(&mut f, &nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed",
output
))?;
let mut f = BufWriter::new(f);
write::write_file(&mut f, &nbt).context(
format_err!("error writing to NBT FILE {}, state of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
Ok(0)
} | * to the temporary file */
let tmpdir = TempDir::new("nbted").context("Unable to create temporary directory")?;
let tmp = match Path::new(input).file_name() { | random_line_split |
main.rs | #[macro_use]
extern crate failure;
use nbted::unstable::{data, read, string_read, string_write, write};
use nbted::Result;
use std::env;
use std::fs::File;
use std::io;
use std::io::{BufReader, BufWriter};
use std::path::Path;
use std::process::exit;
use std::process::Command;
use getopts::Options;
use tempdir::TempDir;
use failure::ResultExt;
fn main() {
match run_cmdline() {
Ok(ret) => {
exit(ret);
}
Err(e) => {
eprintln!("{}", e.backtrace());
eprintln!("Error: {}", e);
for e in e.iter_chain().skip(1) {
eprintln!(" caused by: {}", e);
}
eprintln!("For help, run with --help or read the manpage.");
exit(1);
}
}
}
/// Main entrypoint for program.
///
/// Returns an integer representing the program's exit status.
fn run_cmdline() -> Result<i32> {
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
let _: &Options = opts.optflagopt("e", "edit", "edit a NBT file with your $EDITOR.
If [FILE] is specified, then that file is edited in place, but specifying --input and/or --output will override the input/output.
If no file is specified, default to read from --input and writing to --output.", "FILE");
let _: &Options = opts.optflagopt("p", "print", "print NBT file to text format. Adding an argument to this is the same as specifying --input", "FILE");
let _: &Options = opts.optflagopt("r", "reverse", "reverse a file in text format to NBT format. Adding an argument to this is the same as specifying --input", "FILE");
let _: &Options = opts.optopt(
"i",
"input",
"specify the input file, defaults to stdin",
"FILE",
);
let _: &Options = opts.optopt(
"o",
"output",
"specify the output file, defaults to stdout",
"FILE",
);
let _: &Options = opts.optflag("", "man", "print the nbted man page source and exit");
let _: &Options = opts.optflag("h", "help", "print the help menu and exit");
let _: &Options = opts.optflag("", "version", "print program version and exit");
let matches = opts.parse(&args[1..]).context("error parsing options")?;
if matches.opt_present("h") {
let brief = "Usage: nbted [options] FILE";
print!("{}", opts.usage(&brief));
println!("\nThe default action, taken if no action is explicitly selected, is to --edit.");
println!(
"\nFor detailed usage information, read the nbted man page. If the nbted man page\
\nwas not installed on your system, such as if you installed using `cargo install`,\
\nthen you can use `nbted --man | nroff -man | less` to read the nbted man page."
);
return Ok(0);
}
if matches.opt_present("version") {
println!(
"{} {} {}",
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_VERSION"),
/* See build.rs for the git-revision.txt file */
include!(concat!(env!("OUT_DIR"), "/git-revision.txt"))
);
println!("https://github.com/C4K3/nbted");
return Ok(0);
}
if matches.opt_present("man") {
print!(include_str!("../nbted.1"));
return Ok(0);
}
let is_print: bool = matches.opt_present("print");
let is_reverse: bool = matches.opt_present("reverse");
let is_edit: bool = if matches.opt_present("edit") {
true
} else {
/* If edit is not explicitly defined, it is the default action and is
* selected if no other action is specified */
!(is_reverse || is_print)
};
/* Hopefully this is a simpler way of ensuring that only one action can be
* taken than having a long logical expression */
let mut action_count = 0;
if is_print {
action_count += 1;
}
if is_reverse {
action_count += 1;
}
if is_edit {
action_count += 1;
}
if action_count > 1 {
bail!("You can only specify one action at a time.");
}
/* Figure out the input file, by trying to read the arguments for all of
* --input, --edit, --print and --reverse, prioritizing --input over the
* other arguments, if none of the arguments are specified but there is a
* free argument, use that, else we finally default to - (stdin) */
let input = if let Some(x) = matches.opt_str("input") {
x
} else if let Some(x) = matches.opt_str("edit") {
x
} else if let Some(x) = matches.opt_str("print") {
x
} else if let Some(x) = matches.opt_str("reverse") {
x
} else if matches.free.len() == 1 {
matches.free[0].clone()
} else {
/* stdin */
"-".to_string()
};
let output = if let Some(x) = matches.opt_str("output") {
x
} else if let Some(x) = matches.opt_str("edit") {
x
} else if is_edit && matches.free.len() == 1 {
/* Only want to default to the free argument if we're editing
* (DO NOT WRITE BACK TO THE READ FILE UNLESS EDITING!) */
matches.free[0].clone()
} else {
/* stdout */
"-".to_string()
};
if matches.free.len() > 1 {
bail!("nbted was given multiple arguments, but only supports editing one file at a time.");
}
if is_print {
print(&input, &output)
} else if is_reverse {
reverse(&input, &output)
} else if is_edit | else {
bail!("Internal error: No action selected. (Please report this.)");
}
}
/// When the user wants to edit a specific file in place
///
/// Returns an integer representing the program's exit status.
fn edit(input: &str, output: &str) -> Result<i32> {
/* First we read the NBT data from the input */
let nbt = if input == "-" {
// let mut f = BufReader::new(io::stdin());
let f = io::stdin();
let mut f = f.lock();
read::read_file(&mut f).context("Unable to parse any NBT files from stdin")?
} else {
let path: &Path = Path::new(input);
let f = File::open(path).context(format!("Unable to open file {}", input))?;
let mut f = BufReader::new(f);
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
};
/* Then we create a temporary file and write the NBT data in text format
* to the temporary file */
let tmpdir = TempDir::new("nbted").context("Unable to create temporary directory")?;
let tmp = match Path::new(input).file_name() {
Some(x) => {
let mut x = x.to_os_string();
x.push(".txt");
x
}
None => bail!("Error reading file name"),
};
let tmp_path = tmpdir.path().join(tmp);
{
let mut f = File::create(&tmp_path).context("Unable to create temporary file")?;
string_write::write_file(&mut f, &nbt).context("Unable to write temporary file")?;
f.sync_all().context("Unable to synchronize file")?;
}
let new_nbt = {
let mut new_nbt = open_editor(&tmp_path);
while let Err(e) = new_nbt {
eprintln!("Unable to parse edited file");
for e in e.iter_chain() {
eprintln!(" caused by: {}", e);
}
eprintln!("Do you want to open the file for editing again? (y/N)");
let mut line = String::new();
let _: usize = io::stdin()
.read_line(&mut line)
.context("Error reading from stdin. Nothing was changed")?;
if line.trim() == "y" {
new_nbt = open_editor(&tmp_path);
} else {
eprintln!("Exiting ... File is unchanged.");
return Ok(0);
}
}
new_nbt.expect("new_nbt was Error")
};
if nbt == new_nbt {
eprintln!("No changes, will do nothing.");
return Ok(0);
}
/* And finally we write the edited nbt (new_nbt) into the output file */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match write::write_file(&mut f, &new_nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed",
output
))?;
let mut f = BufWriter::new(f);
write::write_file(&mut f, &new_nbt).context(
format_err!("Error writing NBT file {}. State of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
eprintln!("File edited successfully.");
Ok(0)
}
/// Open the user's $EDITOR on the temporary file, wait until the editor is
/// closed again, read the temporary file and attempt to parse it into NBT,
/// returning the result.
fn open_editor(tmp_path: &Path) -> Result<data::NBTFile> {
let editor = match env::var("VISUAL") {
Ok(x) => x,
Err(_) => match env::var("EDITOR") {
Ok(x) => x,
Err(_) => bail!("Unable to find $EDITOR"),
},
};
let mut cmd = Command::new(editor);
let _: &mut Command = cmd.arg(&tmp_path.as_os_str());
let mut cmd = cmd.spawn().context("Error opening editor")?;
match cmd.wait().context("error executing editor")? {
x if x.success() => (),
_ => bail!("Editor did not exit correctly"),
}
/* Then we parse the text format in the temporary file into NBT */
let mut f = File::open(&tmp_path).context(format_err!(
"Unable to read temporary file. Nothing was changed."
))?;
string_read::read_file(&mut f)
}
/// When the user wants to print an NBT file to text format
fn print(input: &str, output: &str) -> Result<i32> {
/* First we read a NBTFile from the input */
let nbt = if input == "-" {
let f = io::stdin();
let mut f = f.lock();
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
} else {
let path: &Path = Path::new(input);
let f = File::open(path).context(format_err!("Unable to open file {}", input))?;
let mut f = BufReader::new(f);
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
};
/* Then we write the NBTFile to the output in text format */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match string_write::write_file(&mut f, &nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed.",
output
))?;
let mut f = BufWriter::new(f);
string_write::write_file(&mut f, &nbt).context(
format_err!("Error writing NBT file {}. State of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
Ok(0)
}
/// When the user wants to convert a text format file into an NBT file
///
/// Returns an integer representing the program's exit status.
fn reverse(input: &str, output: &str) -> Result<i32> {
/* First we read the input file in the text format */
let path: &Path = Path::new(input);
let mut f = File::open(&path).context(format_err!("Unable to read text file {}", input))?;
let nbt = string_read::read_file(&mut f)
.context(format_err!("Unable to parse text file {}", input))?;
/* Then we write the parsed NBT to the output file in NBT format */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match write::write_file(&mut f, &nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed",
output
))?;
let mut f = BufWriter::new(f);
write::write_file(&mut f, &nbt).context(
format_err!("error writing to NBT FILE {}, state of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
Ok(0)
}
| {
edit(&input, &output)
} | conditional_block |
main.rs | #[macro_use]
extern crate failure;
use nbted::unstable::{data, read, string_read, string_write, write};
use nbted::Result;
use std::env;
use std::fs::File;
use std::io;
use std::io::{BufReader, BufWriter};
use std::path::Path;
use std::process::exit;
use std::process::Command;
use getopts::Options;
use tempdir::TempDir;
use failure::ResultExt;
fn main() {
match run_cmdline() {
Ok(ret) => {
exit(ret);
}
Err(e) => {
eprintln!("{}", e.backtrace());
eprintln!("Error: {}", e);
for e in e.iter_chain().skip(1) {
eprintln!(" caused by: {}", e);
}
eprintln!("For help, run with --help or read the manpage.");
exit(1);
}
}
}
/// Main entrypoint for program.
///
/// Returns an integer representing the program's exit status.
fn run_cmdline() -> Result<i32> {
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
let _: &Options = opts.optflagopt("e", "edit", "edit a NBT file with your $EDITOR.
If [FILE] is specified, then that file is edited in place, but specifying --input and/or --output will override the input/output.
If no file is specified, default to read from --input and writing to --output.", "FILE");
let _: &Options = opts.optflagopt("p", "print", "print NBT file to text format. Adding an argument to this is the same as specifying --input", "FILE");
let _: &Options = opts.optflagopt("r", "reverse", "reverse a file in text format to NBT format. Adding an argument to this is the same as specifying --input", "FILE");
let _: &Options = opts.optopt(
"i",
"input",
"specify the input file, defaults to stdin",
"FILE",
);
let _: &Options = opts.optopt(
"o",
"output",
"specify the output file, defaults to stdout",
"FILE",
);
let _: &Options = opts.optflag("", "man", "print the nbted man page source and exit");
let _: &Options = opts.optflag("h", "help", "print the help menu and exit");
let _: &Options = opts.optflag("", "version", "print program version and exit");
let matches = opts.parse(&args[1..]).context("error parsing options")?;
if matches.opt_present("h") {
let brief = "Usage: nbted [options] FILE";
print!("{}", opts.usage(&brief));
println!("\nThe default action, taken if no action is explicitly selected, is to --edit.");
println!(
"\nFor detailed usage information, read the nbted man page. If the nbted man page\
\nwas not installed on your system, such as if you installed using `cargo install`,\
\nthen you can use `nbted --man | nroff -man | less` to read the nbted man page."
);
return Ok(0);
}
if matches.opt_present("version") {
println!(
"{} {} {}",
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_VERSION"),
/* See build.rs for the git-revision.txt file */
include!(concat!(env!("OUT_DIR"), "/git-revision.txt"))
);
println!("https://github.com/C4K3/nbted");
return Ok(0);
}
if matches.opt_present("man") {
print!(include_str!("../nbted.1"));
return Ok(0);
}
let is_print: bool = matches.opt_present("print");
let is_reverse: bool = matches.opt_present("reverse");
let is_edit: bool = if matches.opt_present("edit") {
true
} else {
/* If edit is not explicitly defined, it is the default action and is
* selected if no other action is specified */
!(is_reverse || is_print)
};
/* Hopefully this is a simpler way of ensuring that only one action can be
* taken than having a long logical expression */
let mut action_count = 0;
if is_print {
action_count += 1;
}
if is_reverse {
action_count += 1;
}
if is_edit {
action_count += 1;
}
if action_count > 1 {
bail!("You can only specify one action at a time.");
}
/* Figure out the input file, by trying to read the arguments for all of
* --input, --edit, --print and --reverse, prioritizing --input over the
* other arguments, if none of the arguments are specified but there is a
* free argument, use that, else we finally default to - (stdin) */
let input = if let Some(x) = matches.opt_str("input") {
x
} else if let Some(x) = matches.opt_str("edit") {
x
} else if let Some(x) = matches.opt_str("print") {
x
} else if let Some(x) = matches.opt_str("reverse") {
x
} else if matches.free.len() == 1 {
matches.free[0].clone()
} else {
/* stdin */
"-".to_string()
};
let output = if let Some(x) = matches.opt_str("output") {
x
} else if let Some(x) = matches.opt_str("edit") {
x
} else if is_edit && matches.free.len() == 1 {
/* Only want to default to the free argument if we're editing
* (DO NOT WRITE BACK TO THE READ FILE UNLESS EDITING!) */
matches.free[0].clone()
} else {
/* stdout */
"-".to_string()
};
if matches.free.len() > 1 {
bail!("nbted was given multiple arguments, but only supports editing one file at a time.");
}
if is_print {
print(&input, &output)
} else if is_reverse {
reverse(&input, &output)
} else if is_edit {
edit(&input, &output)
} else {
bail!("Internal error: No action selected. (Please report this.)");
}
}
/// When the user wants to edit a specific file in place
///
/// Returns an integer representing the program's exit status.
fn edit(input: &str, output: &str) -> Result<i32> {
/* First we read the NBT data from the input */
let nbt = if input == "-" {
// let mut f = BufReader::new(io::stdin());
let f = io::stdin();
let mut f = f.lock();
read::read_file(&mut f).context("Unable to parse any NBT files from stdin")?
} else {
let path: &Path = Path::new(input);
let f = File::open(path).context(format!("Unable to open file {}", input))?;
let mut f = BufReader::new(f);
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
};
/* Then we create a temporary file and write the NBT data in text format
* to the temporary file */
let tmpdir = TempDir::new("nbted").context("Unable to create temporary directory")?;
let tmp = match Path::new(input).file_name() {
Some(x) => {
let mut x = x.to_os_string();
x.push(".txt");
x
}
None => bail!("Error reading file name"),
};
let tmp_path = tmpdir.path().join(tmp);
{
let mut f = File::create(&tmp_path).context("Unable to create temporary file")?;
string_write::write_file(&mut f, &nbt).context("Unable to write temporary file")?;
f.sync_all().context("Unable to synchronize file")?;
}
let new_nbt = {
let mut new_nbt = open_editor(&tmp_path);
while let Err(e) = new_nbt {
eprintln!("Unable to parse edited file");
for e in e.iter_chain() {
eprintln!(" caused by: {}", e);
}
eprintln!("Do you want to open the file for editing again? (y/N)");
let mut line = String::new();
let _: usize = io::stdin()
.read_line(&mut line)
.context("Error reading from stdin. Nothing was changed")?;
if line.trim() == "y" {
new_nbt = open_editor(&tmp_path);
} else {
eprintln!("Exiting ... File is unchanged.");
return Ok(0);
}
}
new_nbt.expect("new_nbt was Error")
};
if nbt == new_nbt {
eprintln!("No changes, will do nothing.");
return Ok(0);
}
/* And finally we write the edited nbt (new_nbt) into the output file */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match write::write_file(&mut f, &new_nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed",
output
))?;
let mut f = BufWriter::new(f);
write::write_file(&mut f, &new_nbt).context(
format_err!("Error writing NBT file {}. State of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
eprintln!("File edited successfully.");
Ok(0)
}
/// Open the user's $EDITOR on the temporary file, wait until the editor is
/// closed again, read the temporary file and attempt to parse it into NBT,
/// returning the result.
fn | (tmp_path: &Path) -> Result<data::NBTFile> {
let editor = match env::var("VISUAL") {
Ok(x) => x,
Err(_) => match env::var("EDITOR") {
Ok(x) => x,
Err(_) => bail!("Unable to find $EDITOR"),
},
};
let mut cmd = Command::new(editor);
let _: &mut Command = cmd.arg(&tmp_path.as_os_str());
let mut cmd = cmd.spawn().context("Error opening editor")?;
match cmd.wait().context("error executing editor")? {
x if x.success() => (),
_ => bail!("Editor did not exit correctly"),
}
/* Then we parse the text format in the temporary file into NBT */
let mut f = File::open(&tmp_path).context(format_err!(
"Unable to read temporary file. Nothing was changed."
))?;
string_read::read_file(&mut f)
}
/// When the user wants to print an NBT file to text format
fn print(input: &str, output: &str) -> Result<i32> {
/* First we read a NBTFile from the input */
let nbt = if input == "-" {
let f = io::stdin();
let mut f = f.lock();
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
} else {
let path: &Path = Path::new(input);
let f = File::open(path).context(format_err!("Unable to open file {}", input))?;
let mut f = BufReader::new(f);
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
};
/* Then we write the NBTFile to the output in text format */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match string_write::write_file(&mut f, &nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed.",
output
))?;
let mut f = BufWriter::new(f);
string_write::write_file(&mut f, &nbt).context(
format_err!("Error writing NBT file {}. State of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
Ok(0)
}
/// When the user wants to convert a text format file into an NBT file
///
/// Returns an integer representing the program's exit status.
fn reverse(input: &str, output: &str) -> Result<i32> {
/* First we read the input file in the text format */
let path: &Path = Path::new(input);
let mut f = File::open(&path).context(format_err!("Unable to read text file {}", input))?;
let nbt = string_read::read_file(&mut f)
.context(format_err!("Unable to parse text file {}", input))?;
/* Then we write the parsed NBT to the output file in NBT format */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match write::write_file(&mut f, &nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed",
output
))?;
let mut f = BufWriter::new(f);
write::write_file(&mut f, &nbt).context(
format_err!("error writing to NBT FILE {}, state of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
Ok(0)
}
| open_editor | identifier_name |
rtio.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use c_str::CString;
use cast;
use comm::{Sender, Receiver};
use libc::c_int;
use libc;
use kinds::Send;
use ops::Drop;
use option::{Option, Some, None};
use path::Path;
use result::Err;
use rt::local::Local;
use rt::task::Task;
use vec::Vec;
use ai = io::net::addrinfo;
use io;
use io::IoResult;
use io::net::ip::{IpAddr, SocketAddr};
use io::process::{ProcessConfig, ProcessExit};
use io::signal::Signum;
use io::{FileMode, FileAccess, FileStat, FilePermission};
use io::{SeekStyle};
pub trait Callback {
fn call(&mut self);
}
pub trait EventLoop {
fn run(&mut self);
fn callback(&mut self, arg: proc():Send);
fn pausable_idle_callback(&mut self,
~Callback:Send) -> ~PausableIdleCallback:Send;
fn remote_callback(&mut self, ~Callback:Send) -> ~RemoteCallback:Send;
/// The asynchronous I/O services. Not all event loops may provide one.
fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory>;
fn has_active_io(&self) -> bool;
}
pub trait RemoteCallback {
/// Trigger the remote callback. Note that the number of times the
/// callback is run is not guaranteed. All that is guaranteed is
/// that, after calling 'fire', the callback will be called at
/// least once, but multiple callbacks may be coalesced and
/// callbacks may be called more often requested. Destruction also
/// triggers the callback.
fn fire(&mut self);
}
/// Data needed to make a successful open(2) call
/// Using unix flag conventions for now, which happens to also be what's supported
/// libuv (it does translation to windows under the hood).
pub struct FileOpenConfig {
/// Path to file to be opened
pub path: Path,
/// Flags for file access mode (as per open(2))
pub flags: int,
/// File creation mode, ignored unless O_CREAT is passed as part of flags
pub mode: int
}
/// Description of what to do when a file handle is closed
pub enum CloseBehavior {
/// Do not close this handle when the object is destroyed
DontClose,
/// Synchronously close the handle, meaning that the task will block when
/// the handle is destroyed until it has been fully closed.
CloseSynchronously,
/// Asynchronously closes a handle, meaning that the task will *not* block
/// when the handle is destroyed, but the handle will still get deallocated
/// and cleaned up (but this will happen asynchronously on the local event
/// loop).
CloseAsynchronously,
}
pub struct LocalIo<'a> {
factory: &'a mut IoFactory,
}
#[unsafe_destructor]
impl<'a> Drop for LocalIo<'a> {
fn drop(&mut self) {
// FIXME(pcwalton): Do nothing here for now, but eventually we may want
// something. For now this serves to make `LocalIo` noncopyable.
}
}
impl<'a> LocalIo<'a> {
/// Returns the local I/O: either the local scheduler's I/O services or
/// the native I/O services.
pub fn borrow() -> Option<LocalIo> {
// FIXME(#11053): bad
//
// This is currently very unsafely implemented. We don't actually
// *take* the local I/O so there's a very real possibility that we
// can have two borrows at once. Currently there is not a clear way
// to actually borrow the local I/O factory safely because even if
// ownership were transferred down to the functions that the I/O
// factory implements it's just too much of a pain to know when to
// relinquish ownership back into the local task (but that would be
// the safe way of implementing this function).
//
// In order to get around this, we just transmute a copy out of the task
// in order to have what is likely a static lifetime (bad).
let mut t: ~Task = Local::take();
let ret = t.local_io().map(|t| {
unsafe { cast::transmute_copy(&t) }
});
Local::put(t);
return ret;
}
pub fn maybe_raise<T>(f: |io: &mut IoFactory| -> IoResult<T>)
-> IoResult<T>
|
pub fn new<'a>(io: &'a mut IoFactory) -> LocalIo<'a> {
LocalIo { factory: io }
}
/// Returns the underlying I/O factory as a trait reference.
#[inline]
pub fn get<'a>(&'a mut self) -> &'a mut IoFactory {
// FIXME(pcwalton): I think this is actually sound? Could borrow check
// allow this safely?
unsafe {
cast::transmute_copy(&self.factory)
}
}
}
pub trait IoFactory {
// networking
fn tcp_connect(&mut self, addr: SocketAddr) -> IoResult<~RtioTcpStream:Send>;
fn tcp_bind(&mut self, addr: SocketAddr) -> IoResult<~RtioTcpListener:Send>;
fn udp_bind(&mut self, addr: SocketAddr) -> IoResult<~RtioUdpSocket:Send>;
fn unix_bind(&mut self, path: &CString)
-> IoResult<~RtioUnixListener:Send>;
fn unix_connect(&mut self, path: &CString) -> IoResult<~RtioPipe:Send>;
fn get_host_addresses(&mut self, host: Option<&str>, servname: Option<&str>,
hint: Option<ai::Hint>) -> IoResult<~[ai::Info]>;
// filesystem operations
fn fs_from_raw_fd(&mut self, fd: c_int, close: CloseBehavior)
-> ~RtioFileStream:Send;
fn fs_open(&mut self, path: &CString, fm: FileMode, fa: FileAccess)
-> IoResult<~RtioFileStream:Send>;
fn fs_unlink(&mut self, path: &CString) -> IoResult<()>;
fn fs_stat(&mut self, path: &CString) -> IoResult<FileStat>;
fn fs_mkdir(&mut self, path: &CString,
mode: FilePermission) -> IoResult<()>;
fn fs_chmod(&mut self, path: &CString,
mode: FilePermission) -> IoResult<()>;
fn fs_rmdir(&mut self, path: &CString) -> IoResult<()>;
fn fs_rename(&mut self, path: &CString, to: &CString) -> IoResult<()>;
fn fs_readdir(&mut self, path: &CString, flags: c_int) ->
IoResult<Vec<Path>>;
fn fs_lstat(&mut self, path: &CString) -> IoResult<FileStat>;
fn fs_chown(&mut self, path: &CString, uid: int, gid: int) ->
IoResult<()>;
fn fs_readlink(&mut self, path: &CString) -> IoResult<Path>;
fn fs_symlink(&mut self, src: &CString, dst: &CString) -> IoResult<()>;
fn fs_link(&mut self, src: &CString, dst: &CString) -> IoResult<()>;
fn fs_utime(&mut self, src: &CString, atime: u64, mtime: u64) ->
IoResult<()>;
// misc
fn timer_init(&mut self) -> IoResult<~RtioTimer:Send>;
fn spawn(&mut self, config: ProcessConfig)
-> IoResult<(~RtioProcess:Send, ~[Option<~RtioPipe:Send>])>;
fn kill(&mut self, pid: libc::pid_t, signal: int) -> IoResult<()>;
fn pipe_open(&mut self, fd: c_int) -> IoResult<~RtioPipe:Send>;
fn tty_open(&mut self, fd: c_int, readable: bool)
-> IoResult<~RtioTTY:Send>;
fn signal(&mut self, signal: Signum, channel: Sender<Signum>)
-> IoResult<~RtioSignal:Send>;
}
pub trait RtioTcpListener : RtioSocket {
fn listen(~self) -> IoResult<~RtioTcpAcceptor:Send>;
}
pub trait RtioTcpAcceptor : RtioSocket {
fn accept(&mut self) -> IoResult<~RtioTcpStream:Send>;
fn accept_simultaneously(&mut self) -> IoResult<()>;
fn dont_accept_simultaneously(&mut self) -> IoResult<()>;
}
pub trait RtioTcpStream : RtioSocket {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn peer_name(&mut self) -> IoResult<SocketAddr>;
fn control_congestion(&mut self) -> IoResult<()>;
fn nodelay(&mut self) -> IoResult<()>;
fn keepalive(&mut self, delay_in_seconds: uint) -> IoResult<()>;
fn letdie(&mut self) -> IoResult<()>;
fn clone(&self) -> ~RtioTcpStream:Send;
fn close_write(&mut self) -> IoResult<()>;
}
pub trait RtioSocket {
fn socket_name(&mut self) -> IoResult<SocketAddr>;
}
pub trait RtioUdpSocket : RtioSocket {
fn recvfrom(&mut self, buf: &mut [u8]) -> IoResult<(uint, SocketAddr)>;
fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()>;
fn join_multicast(&mut self, multi: IpAddr) -> IoResult<()>;
fn leave_multicast(&mut self, multi: IpAddr) -> IoResult<()>;
fn loop_multicast_locally(&mut self) -> IoResult<()>;
fn dont_loop_multicast_locally(&mut self) -> IoResult<()>;
fn multicast_time_to_live(&mut self, ttl: int) -> IoResult<()>;
fn time_to_live(&mut self, ttl: int) -> IoResult<()>;
fn hear_broadcasts(&mut self) -> IoResult<()>;
fn ignore_broadcasts(&mut self) -> IoResult<()>;
fn clone(&self) -> ~RtioUdpSocket:Send;
}
pub trait RtioTimer {
fn sleep(&mut self, msecs: u64);
fn oneshot(&mut self, msecs: u64) -> Receiver<()>;
fn period(&mut self, msecs: u64) -> Receiver<()>;
}
pub trait RtioFileStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<int>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn pread(&mut self, buf: &mut [u8], offset: u64) -> IoResult<int>;
fn pwrite(&mut self, buf: &[u8], offset: u64) -> IoResult<()>;
fn seek(&mut self, pos: i64, whence: SeekStyle) -> IoResult<u64>;
fn tell(&self) -> IoResult<u64>;
fn fsync(&mut self) -> IoResult<()>;
fn datasync(&mut self) -> IoResult<()>;
fn truncate(&mut self, offset: i64) -> IoResult<()>;
}
pub trait RtioProcess {
fn id(&self) -> libc::pid_t;
fn kill(&mut self, signal: int) -> IoResult<()>;
fn wait(&mut self) -> ProcessExit;
}
pub trait RtioPipe {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn clone(&self) -> ~RtioPipe:Send;
}
pub trait RtioUnixListener {
fn listen(~self) -> IoResult<~RtioUnixAcceptor:Send>;
}
pub trait RtioUnixAcceptor {
fn accept(&mut self) -> IoResult<~RtioPipe:Send>;
}
pub trait RtioTTY {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn set_raw(&mut self, raw: bool) -> IoResult<()>;
fn get_winsize(&mut self) -> IoResult<(int, int)>;
fn isatty(&self) -> bool;
}
pub trait PausableIdleCallback {
fn pause(&mut self);
fn resume(&mut self);
}
pub trait RtioSignal {}
| {
match LocalIo::borrow() {
None => Err(io::standard_error(io::IoUnavailable)),
Some(mut io) => f(io.get()),
}
} | identifier_body |
rtio.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use c_str::CString;
use cast;
use comm::{Sender, Receiver};
use libc::c_int;
use libc;
use kinds::Send;
use ops::Drop;
use option::{Option, Some, None};
use path::Path;
use result::Err;
use rt::local::Local;
use rt::task::Task;
use vec::Vec;
use ai = io::net::addrinfo;
use io;
use io::IoResult;
use io::net::ip::{IpAddr, SocketAddr};
use io::process::{ProcessConfig, ProcessExit};
use io::signal::Signum;
use io::{FileMode, FileAccess, FileStat, FilePermission};
use io::{SeekStyle};
pub trait Callback {
fn call(&mut self);
}
pub trait EventLoop {
fn run(&mut self);
fn callback(&mut self, arg: proc():Send);
fn pausable_idle_callback(&mut self,
~Callback:Send) -> ~PausableIdleCallback:Send;
fn remote_callback(&mut self, ~Callback:Send) -> ~RemoteCallback:Send;
/// The asynchronous I/O services. Not all event loops may provide one.
fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory>;
fn has_active_io(&self) -> bool;
}
pub trait RemoteCallback {
/// Trigger the remote callback. Note that the number of times the
/// callback is run is not guaranteed. All that is guaranteed is
/// that, after calling 'fire', the callback will be called at
/// least once, but multiple callbacks may be coalesced and
/// callbacks may be called more often requested. Destruction also
/// triggers the callback.
fn fire(&mut self);
}
/// Data needed to make a successful open(2) call
/// Using unix flag conventions for now, which happens to also be what's supported
/// libuv (it does translation to windows under the hood).
pub struct FileOpenConfig {
/// Path to file to be opened
pub path: Path,
/// Flags for file access mode (as per open(2))
pub flags: int,
/// File creation mode, ignored unless O_CREAT is passed as part of flags
pub mode: int
}
/// Description of what to do when a file handle is closed
pub enum CloseBehavior {
/// Do not close this handle when the object is destroyed
DontClose,
/// Synchronously close the handle, meaning that the task will block when
/// the handle is destroyed until it has been fully closed.
CloseSynchronously,
/// Asynchronously closes a handle, meaning that the task will *not* block
/// when the handle is destroyed, but the handle will still get deallocated
/// and cleaned up (but this will happen asynchronously on the local event
/// loop).
CloseAsynchronously,
}
pub struct LocalIo<'a> {
factory: &'a mut IoFactory,
}
#[unsafe_destructor]
impl<'a> Drop for LocalIo<'a> {
fn drop(&mut self) {
// FIXME(pcwalton): Do nothing here for now, but eventually we may want
// something. For now this serves to make `LocalIo` noncopyable.
}
}
impl<'a> LocalIo<'a> {
/// Returns the local I/O: either the local scheduler's I/O services or
/// the native I/O services.
pub fn borrow() -> Option<LocalIo> {
// FIXME(#11053): bad
//
// This is currently very unsafely implemented. We don't actually
// *take* the local I/O so there's a very real possibility that we
// can have two borrows at once. Currently there is not a clear way
// to actually borrow the local I/O factory safely because even if
// ownership were transferred down to the functions that the I/O
// factory implements it's just too much of a pain to know when to
// relinquish ownership back into the local task (but that would be
// the safe way of implementing this function).
//
// In order to get around this, we just transmute a copy out of the task
// in order to have what is likely a static lifetime (bad).
let mut t: ~Task = Local::take();
let ret = t.local_io().map(|t| {
unsafe { cast::transmute_copy(&t) }
});
Local::put(t);
return ret;
}
pub fn maybe_raise<T>(f: |io: &mut IoFactory| -> IoResult<T>)
-> IoResult<T>
{
match LocalIo::borrow() {
None => Err(io::standard_error(io::IoUnavailable)),
Some(mut io) => f(io.get()),
}
}
pub fn new<'a>(io: &'a mut IoFactory) -> LocalIo<'a> {
LocalIo { factory: io }
}
/// Returns the underlying I/O factory as a trait reference.
#[inline]
pub fn get<'a>(&'a mut self) -> &'a mut IoFactory {
// FIXME(pcwalton): I think this is actually sound? Could borrow check
// allow this safely?
unsafe {
cast::transmute_copy(&self.factory)
}
}
}
pub trait IoFactory {
// networking
fn tcp_connect(&mut self, addr: SocketAddr) -> IoResult<~RtioTcpStream:Send>;
fn tcp_bind(&mut self, addr: SocketAddr) -> IoResult<~RtioTcpListener:Send>;
fn udp_bind(&mut self, addr: SocketAddr) -> IoResult<~RtioUdpSocket:Send>;
fn unix_bind(&mut self, path: &CString)
-> IoResult<~RtioUnixListener:Send>;
fn unix_connect(&mut self, path: &CString) -> IoResult<~RtioPipe:Send>;
fn get_host_addresses(&mut self, host: Option<&str>, servname: Option<&str>,
hint: Option<ai::Hint>) -> IoResult<~[ai::Info]>;
// filesystem operations
fn fs_from_raw_fd(&mut self, fd: c_int, close: CloseBehavior)
-> ~RtioFileStream:Send;
fn fs_open(&mut self, path: &CString, fm: FileMode, fa: FileAccess)
-> IoResult<~RtioFileStream:Send>;
fn fs_unlink(&mut self, path: &CString) -> IoResult<()>;
fn fs_stat(&mut self, path: &CString) -> IoResult<FileStat>;
fn fs_mkdir(&mut self, path: &CString,
mode: FilePermission) -> IoResult<()>;
fn fs_chmod(&mut self, path: &CString,
mode: FilePermission) -> IoResult<()>;
fn fs_rmdir(&mut self, path: &CString) -> IoResult<()>;
fn fs_rename(&mut self, path: &CString, to: &CString) -> IoResult<()>;
fn fs_readdir(&mut self, path: &CString, flags: c_int) ->
IoResult<Vec<Path>>;
fn fs_lstat(&mut self, path: &CString) -> IoResult<FileStat>;
fn fs_chown(&mut self, path: &CString, uid: int, gid: int) ->
IoResult<()>;
fn fs_readlink(&mut self, path: &CString) -> IoResult<Path>;
fn fs_symlink(&mut self, src: &CString, dst: &CString) -> IoResult<()>;
fn fs_link(&mut self, src: &CString, dst: &CString) -> IoResult<()>;
fn fs_utime(&mut self, src: &CString, atime: u64, mtime: u64) ->
IoResult<()>;
// misc
fn timer_init(&mut self) -> IoResult<~RtioTimer:Send>;
fn spawn(&mut self, config: ProcessConfig)
-> IoResult<(~RtioProcess:Send, ~[Option<~RtioPipe:Send>])>;
fn kill(&mut self, pid: libc::pid_t, signal: int) -> IoResult<()>;
fn pipe_open(&mut self, fd: c_int) -> IoResult<~RtioPipe:Send>;
fn tty_open(&mut self, fd: c_int, readable: bool)
-> IoResult<~RtioTTY:Send>;
fn signal(&mut self, signal: Signum, channel: Sender<Signum>)
-> IoResult<~RtioSignal:Send>;
}
pub trait RtioTcpListener : RtioSocket {
fn listen(~self) -> IoResult<~RtioTcpAcceptor:Send>;
}
pub trait RtioTcpAcceptor : RtioSocket {
fn accept(&mut self) -> IoResult<~RtioTcpStream:Send>;
fn accept_simultaneously(&mut self) -> IoResult<()>;
fn dont_accept_simultaneously(&mut self) -> IoResult<()>;
}
pub trait RtioTcpStream : RtioSocket {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn peer_name(&mut self) -> IoResult<SocketAddr>;
fn control_congestion(&mut self) -> IoResult<()>;
fn nodelay(&mut self) -> IoResult<()>;
fn keepalive(&mut self, delay_in_seconds: uint) -> IoResult<()>;
fn letdie(&mut self) -> IoResult<()>;
fn clone(&self) -> ~RtioTcpStream:Send;
fn close_write(&mut self) -> IoResult<()>;
}
pub trait RtioSocket {
fn socket_name(&mut self) -> IoResult<SocketAddr>;
}
pub trait RtioUdpSocket : RtioSocket {
fn recvfrom(&mut self, buf: &mut [u8]) -> IoResult<(uint, SocketAddr)>;
fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()>;
fn join_multicast(&mut self, multi: IpAddr) -> IoResult<()>;
fn leave_multicast(&mut self, multi: IpAddr) -> IoResult<()>;
fn loop_multicast_locally(&mut self) -> IoResult<()>;
fn dont_loop_multicast_locally(&mut self) -> IoResult<()>;
fn multicast_time_to_live(&mut self, ttl: int) -> IoResult<()>;
fn time_to_live(&mut self, ttl: int) -> IoResult<()>;
fn hear_broadcasts(&mut self) -> IoResult<()>;
fn ignore_broadcasts(&mut self) -> IoResult<()>;
fn clone(&self) -> ~RtioUdpSocket:Send;
}
pub trait RtioTimer {
fn sleep(&mut self, msecs: u64);
fn oneshot(&mut self, msecs: u64) -> Receiver<()>;
fn period(&mut self, msecs: u64) -> Receiver<()>;
}
pub trait RtioFileStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<int>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn pread(&mut self, buf: &mut [u8], offset: u64) -> IoResult<int>;
fn pwrite(&mut self, buf: &[u8], offset: u64) -> IoResult<()>;
fn seek(&mut self, pos: i64, whence: SeekStyle) -> IoResult<u64>;
fn tell(&self) -> IoResult<u64>;
fn fsync(&mut self) -> IoResult<()>;
fn datasync(&mut self) -> IoResult<()>; | fn id(&self) -> libc::pid_t;
fn kill(&mut self, signal: int) -> IoResult<()>;
fn wait(&mut self) -> ProcessExit;
}
pub trait RtioPipe {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn clone(&self) -> ~RtioPipe:Send;
}
pub trait RtioUnixListener {
fn listen(~self) -> IoResult<~RtioUnixAcceptor:Send>;
}
pub trait RtioUnixAcceptor {
fn accept(&mut self) -> IoResult<~RtioPipe:Send>;
}
pub trait RtioTTY {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn set_raw(&mut self, raw: bool) -> IoResult<()>;
fn get_winsize(&mut self) -> IoResult<(int, int)>;
fn isatty(&self) -> bool;
}
pub trait PausableIdleCallback {
fn pause(&mut self);
fn resume(&mut self);
}
pub trait RtioSignal {} | fn truncate(&mut self, offset: i64) -> IoResult<()>;
}
pub trait RtioProcess { | random_line_split |
rtio.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use c_str::CString;
use cast;
use comm::{Sender, Receiver};
use libc::c_int;
use libc;
use kinds::Send;
use ops::Drop;
use option::{Option, Some, None};
use path::Path;
use result::Err;
use rt::local::Local;
use rt::task::Task;
use vec::Vec;
use ai = io::net::addrinfo;
use io;
use io::IoResult;
use io::net::ip::{IpAddr, SocketAddr};
use io::process::{ProcessConfig, ProcessExit};
use io::signal::Signum;
use io::{FileMode, FileAccess, FileStat, FilePermission};
use io::{SeekStyle};
pub trait Callback {
fn call(&mut self);
}
pub trait EventLoop {
fn run(&mut self);
fn callback(&mut self, arg: proc():Send);
fn pausable_idle_callback(&mut self,
~Callback:Send) -> ~PausableIdleCallback:Send;
fn remote_callback(&mut self, ~Callback:Send) -> ~RemoteCallback:Send;
/// The asynchronous I/O services. Not all event loops may provide one.
fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory>;
fn has_active_io(&self) -> bool;
}
pub trait RemoteCallback {
/// Trigger the remote callback. Note that the number of times the
/// callback is run is not guaranteed. All that is guaranteed is
/// that, after calling 'fire', the callback will be called at
/// least once, but multiple callbacks may be coalesced and
/// callbacks may be called more often requested. Destruction also
/// triggers the callback.
fn fire(&mut self);
}
/// Data needed to make a successful open(2) call
/// Using unix flag conventions for now, which happens to also be what's supported
/// libuv (it does translation to windows under the hood).
pub struct | {
/// Path to file to be opened
pub path: Path,
/// Flags for file access mode (as per open(2))
pub flags: int,
/// File creation mode, ignored unless O_CREAT is passed as part of flags
pub mode: int
}
/// Description of what to do when a file handle is closed
pub enum CloseBehavior {
/// Do not close this handle when the object is destroyed
DontClose,
/// Synchronously close the handle, meaning that the task will block when
/// the handle is destroyed until it has been fully closed.
CloseSynchronously,
/// Asynchronously closes a handle, meaning that the task will *not* block
/// when the handle is destroyed, but the handle will still get deallocated
/// and cleaned up (but this will happen asynchronously on the local event
/// loop).
CloseAsynchronously,
}
pub struct LocalIo<'a> {
factory: &'a mut IoFactory,
}
#[unsafe_destructor]
impl<'a> Drop for LocalIo<'a> {
fn drop(&mut self) {
// FIXME(pcwalton): Do nothing here for now, but eventually we may want
// something. For now this serves to make `LocalIo` noncopyable.
}
}
impl<'a> LocalIo<'a> {
/// Returns the local I/O: either the local scheduler's I/O services or
/// the native I/O services.
pub fn borrow() -> Option<LocalIo> {
// FIXME(#11053): bad
//
// This is currently very unsafely implemented. We don't actually
// *take* the local I/O so there's a very real possibility that we
// can have two borrows at once. Currently there is not a clear way
// to actually borrow the local I/O factory safely because even if
// ownership were transferred down to the functions that the I/O
// factory implements it's just too much of a pain to know when to
// relinquish ownership back into the local task (but that would be
// the safe way of implementing this function).
//
// In order to get around this, we just transmute a copy out of the task
// in order to have what is likely a static lifetime (bad).
let mut t: ~Task = Local::take();
let ret = t.local_io().map(|t| {
unsafe { cast::transmute_copy(&t) }
});
Local::put(t);
return ret;
}
pub fn maybe_raise<T>(f: |io: &mut IoFactory| -> IoResult<T>)
-> IoResult<T>
{
match LocalIo::borrow() {
None => Err(io::standard_error(io::IoUnavailable)),
Some(mut io) => f(io.get()),
}
}
pub fn new<'a>(io: &'a mut IoFactory) -> LocalIo<'a> {
LocalIo { factory: io }
}
/// Returns the underlying I/O factory as a trait reference.
#[inline]
pub fn get<'a>(&'a mut self) -> &'a mut IoFactory {
// FIXME(pcwalton): I think this is actually sound? Could borrow check
// allow this safely?
unsafe {
cast::transmute_copy(&self.factory)
}
}
}
pub trait IoFactory {
// networking
fn tcp_connect(&mut self, addr: SocketAddr) -> IoResult<~RtioTcpStream:Send>;
fn tcp_bind(&mut self, addr: SocketAddr) -> IoResult<~RtioTcpListener:Send>;
fn udp_bind(&mut self, addr: SocketAddr) -> IoResult<~RtioUdpSocket:Send>;
fn unix_bind(&mut self, path: &CString)
-> IoResult<~RtioUnixListener:Send>;
fn unix_connect(&mut self, path: &CString) -> IoResult<~RtioPipe:Send>;
fn get_host_addresses(&mut self, host: Option<&str>, servname: Option<&str>,
hint: Option<ai::Hint>) -> IoResult<~[ai::Info]>;
// filesystem operations
fn fs_from_raw_fd(&mut self, fd: c_int, close: CloseBehavior)
-> ~RtioFileStream:Send;
fn fs_open(&mut self, path: &CString, fm: FileMode, fa: FileAccess)
-> IoResult<~RtioFileStream:Send>;
fn fs_unlink(&mut self, path: &CString) -> IoResult<()>;
fn fs_stat(&mut self, path: &CString) -> IoResult<FileStat>;
fn fs_mkdir(&mut self, path: &CString,
mode: FilePermission) -> IoResult<()>;
fn fs_chmod(&mut self, path: &CString,
mode: FilePermission) -> IoResult<()>;
fn fs_rmdir(&mut self, path: &CString) -> IoResult<()>;
fn fs_rename(&mut self, path: &CString, to: &CString) -> IoResult<()>;
fn fs_readdir(&mut self, path: &CString, flags: c_int) ->
IoResult<Vec<Path>>;
fn fs_lstat(&mut self, path: &CString) -> IoResult<FileStat>;
fn fs_chown(&mut self, path: &CString, uid: int, gid: int) ->
IoResult<()>;
fn fs_readlink(&mut self, path: &CString) -> IoResult<Path>;
fn fs_symlink(&mut self, src: &CString, dst: &CString) -> IoResult<()>;
fn fs_link(&mut self, src: &CString, dst: &CString) -> IoResult<()>;
fn fs_utime(&mut self, src: &CString, atime: u64, mtime: u64) ->
IoResult<()>;
// misc
fn timer_init(&mut self) -> IoResult<~RtioTimer:Send>;
fn spawn(&mut self, config: ProcessConfig)
-> IoResult<(~RtioProcess:Send, ~[Option<~RtioPipe:Send>])>;
fn kill(&mut self, pid: libc::pid_t, signal: int) -> IoResult<()>;
fn pipe_open(&mut self, fd: c_int) -> IoResult<~RtioPipe:Send>;
fn tty_open(&mut self, fd: c_int, readable: bool)
-> IoResult<~RtioTTY:Send>;
fn signal(&mut self, signal: Signum, channel: Sender<Signum>)
-> IoResult<~RtioSignal:Send>;
}
pub trait RtioTcpListener : RtioSocket {
fn listen(~self) -> IoResult<~RtioTcpAcceptor:Send>;
}
pub trait RtioTcpAcceptor : RtioSocket {
fn accept(&mut self) -> IoResult<~RtioTcpStream:Send>;
fn accept_simultaneously(&mut self) -> IoResult<()>;
fn dont_accept_simultaneously(&mut self) -> IoResult<()>;
}
pub trait RtioTcpStream : RtioSocket {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn peer_name(&mut self) -> IoResult<SocketAddr>;
fn control_congestion(&mut self) -> IoResult<()>;
fn nodelay(&mut self) -> IoResult<()>;
fn keepalive(&mut self, delay_in_seconds: uint) -> IoResult<()>;
fn letdie(&mut self) -> IoResult<()>;
fn clone(&self) -> ~RtioTcpStream:Send;
fn close_write(&mut self) -> IoResult<()>;
}
pub trait RtioSocket {
fn socket_name(&mut self) -> IoResult<SocketAddr>;
}
pub trait RtioUdpSocket : RtioSocket {
fn recvfrom(&mut self, buf: &mut [u8]) -> IoResult<(uint, SocketAddr)>;
fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()>;
fn join_multicast(&mut self, multi: IpAddr) -> IoResult<()>;
fn leave_multicast(&mut self, multi: IpAddr) -> IoResult<()>;
fn loop_multicast_locally(&mut self) -> IoResult<()>;
fn dont_loop_multicast_locally(&mut self) -> IoResult<()>;
fn multicast_time_to_live(&mut self, ttl: int) -> IoResult<()>;
fn time_to_live(&mut self, ttl: int) -> IoResult<()>;
fn hear_broadcasts(&mut self) -> IoResult<()>;
fn ignore_broadcasts(&mut self) -> IoResult<()>;
fn clone(&self) -> ~RtioUdpSocket:Send;
}
pub trait RtioTimer {
fn sleep(&mut self, msecs: u64);
fn oneshot(&mut self, msecs: u64) -> Receiver<()>;
fn period(&mut self, msecs: u64) -> Receiver<()>;
}
pub trait RtioFileStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<int>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn pread(&mut self, buf: &mut [u8], offset: u64) -> IoResult<int>;
fn pwrite(&mut self, buf: &[u8], offset: u64) -> IoResult<()>;
fn seek(&mut self, pos: i64, whence: SeekStyle) -> IoResult<u64>;
fn tell(&self) -> IoResult<u64>;
fn fsync(&mut self) -> IoResult<()>;
fn datasync(&mut self) -> IoResult<()>;
fn truncate(&mut self, offset: i64) -> IoResult<()>;
}
pub trait RtioProcess {
fn id(&self) -> libc::pid_t;
fn kill(&mut self, signal: int) -> IoResult<()>;
fn wait(&mut self) -> ProcessExit;
}
pub trait RtioPipe {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn clone(&self) -> ~RtioPipe:Send;
}
pub trait RtioUnixListener {
fn listen(~self) -> IoResult<~RtioUnixAcceptor:Send>;
}
pub trait RtioUnixAcceptor {
fn accept(&mut self) -> IoResult<~RtioPipe:Send>;
}
pub trait RtioTTY {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn set_raw(&mut self, raw: bool) -> IoResult<()>;
fn get_winsize(&mut self) -> IoResult<(int, int)>;
fn isatty(&self) -> bool;
}
pub trait PausableIdleCallback {
fn pause(&mut self);
fn resume(&mut self);
}
pub trait RtioSignal {}
| FileOpenConfig | identifier_name |
traffic_signal.rs | use crate::options::TrafficSignalStyle;
use crate::render::{DrawCtx, DrawTurnGroup, BIG_ARROW_THICKNESS};
use crate::ui::UI;
use ezgui::{
Button, Color, Composite, DrawBoth, EventCtx, GeomBatch, GfxCtx, Line, ManagedWidget,
ModalMenu, Outcome, Text,
};
use geom::{Circle, Distance, Duration, Polygon};
use map_model::{IntersectionID, Phase, TurnPriority};
use std::collections::BTreeSet;
// Only draws a box when time_left is present
pub fn draw_signal_phase(
phase: &Phase,
i: IntersectionID,
time_left: Option<Duration>,
batch: &mut GeomBatch,
ctx: &DrawCtx,
) |
pub struct TrafficSignalDiagram {
pub i: IntersectionID,
composite: Composite,
current_phase: usize,
}
impl TrafficSignalDiagram {
pub fn new(
i: IntersectionID,
current_phase: usize,
ui: &UI,
ctx: &EventCtx,
) -> TrafficSignalDiagram {
TrafficSignalDiagram {
i,
composite: make_diagram(i, current_phase, ui, ctx),
current_phase,
}
}
pub fn event(&mut self, ctx: &mut EventCtx, ui: &mut UI, menu: &mut ModalMenu) {
if self.current_phase != 0 && menu.action("select previous phase") {
self.change_phase(self.current_phase - 1, ui, ctx);
}
if self.current_phase != ui.primary.map.get_traffic_signal(self.i).phases.len() - 1
&& menu.action("select next phase")
{
self.change_phase(self.current_phase + 1, ui, ctx);
}
match self.composite.event(ctx) {
Some(Outcome::Clicked(x)) => {
self.change_phase(x["phase ".len()..].parse::<usize>().unwrap() - 1, ui, ctx);
}
None => {}
}
}
fn change_phase(&mut self, idx: usize, ui: &UI, ctx: &EventCtx) {
if self.current_phase != idx {
let preserve_scroll = self.composite.preserve_scroll(ctx);
self.current_phase = idx;
self.composite = make_diagram(self.i, self.current_phase, ui, ctx);
self.composite.restore_scroll(ctx, preserve_scroll);
}
}
pub fn current_phase(&self) -> usize {
self.current_phase
}
pub fn draw(&self, g: &mut GfxCtx) {
self.composite.draw(g);
}
}
fn make_diagram(i: IntersectionID, selected: usize, ui: &UI, ctx: &EventCtx) -> Composite {
// Slightly inaccurate -- the turn rendering may slightly exceed the intersection polygon --
// but this is close enough.
let bounds = ui.primary.map.get_i(i).polygon.get_bounds();
// Pick a zoom so that we fit some percentage of the screen
let zoom = 0.2 * ctx.canvas.window_width / (bounds.max_x - bounds.min_x);
let bbox = Polygon::rectangle(
zoom * (bounds.max_x - bounds.min_x),
zoom * (bounds.max_y - bounds.min_y),
);
let signal = ui.primary.map.get_traffic_signal(i);
let mut col = vec![ManagedWidget::draw_text(ctx, {
let mut txt = Text::new();
txt.add(Line(i.to_string()).roboto());
let road_names = ui
.primary
.map
.get_i(i)
.roads
.iter()
.map(|r| ui.primary.map.get_r(*r).get_name())
.collect::<BTreeSet<_>>();
let len = road_names.len();
// TODO Some kind of reusable TextStyle thing
// TODO Need to wrap this
txt.add(Line("").roboto().size(21).fg(Color::WHITE.alpha(0.54)));
for (idx, n) in road_names.into_iter().enumerate() {
txt.append(Line(n).roboto().fg(Color::WHITE.alpha(0.54)));
if idx != len - 1 {
txt.append(Line(", ").roboto().fg(Color::WHITE.alpha(0.54)));
}
}
txt.add(Line(format!("{} phases", signal.phases.len())));
txt.add(Line(""));
txt.add(Line(format!("Signal offset: {}", signal.offset)));
txt.add(Line(format!("One cycle lasts {}", signal.cycle_length())));
txt
})];
for (idx, phase) in signal.phases.iter().enumerate() {
col.push(
ManagedWidget::row(vec![
ManagedWidget::draw_text(ctx, Text::from(Line(format!("#{}", idx + 1)))),
ManagedWidget::draw_text(ctx, Text::from(Line(phase.duration.to_string()))),
])
.margin(5)
.evenly_spaced(),
);
let mut orig_batch = GeomBatch::new();
draw_signal_phase(phase, i, None, &mut orig_batch, &ui.draw_ctx());
let mut normal = GeomBatch::new();
// TODO Ideally no background here, but we have to force the dimensions of normal and
// hovered to be the same. For some reason the bbox is slightly different.
if idx == selected {
normal.push(Color::RED.alpha(0.15), bbox.clone());
} else {
normal.push(Color::CYAN.alpha(0.05), bbox.clone());
}
// Move to the origin and apply zoom
for (color, poly) in orig_batch.consume() {
normal.push(
color,
poly.translate(-bounds.min_x, -bounds.min_y).scale(zoom),
);
}
let mut hovered = GeomBatch::new();
hovered.push(Color::RED.alpha(0.95), bbox.clone());
hovered.append(normal.clone());
col.push(
ManagedWidget::btn(Button::new(
DrawBoth::new(ctx, normal, Vec::new()),
DrawBoth::new(ctx, hovered, Vec::new()),
None,
&format!("phase {}", idx + 1),
bbox.clone(),
))
.margin(5),
);
}
Composite::scrollable(ctx, ManagedWidget::col(col).bg(Color::hex("#545454")))
}
| {
let protected_color = ctx
.cs
.get_def("turn protected by traffic signal", Color::GREEN);
let yield_color = ctx.cs.get_def(
"turn that can yield by traffic signal",
Color::rgba(255, 105, 180, 0.8),
);
let signal = ctx.map.get_traffic_signal(i);
for (id, crosswalk) in &ctx.draw_map.get_i(i).crosswalks {
if phase.get_priority_of_turn(*id, signal) == TurnPriority::Protected {
batch.append(crosswalk.clone());
}
}
match ctx.opts.traffic_signal_style {
TrafficSignalStyle::GroupArrows => {
for g in &phase.protected_groups {
if g.crosswalk.is_none() {
batch.push(
protected_color,
signal.turn_groups[g]
.geom
.make_arrow(BIG_ARROW_THICKNESS * 2.0)
.unwrap(),
);
}
}
for g in &phase.yield_groups {
if g.crosswalk.is_none() {
batch.extend(
yield_color,
signal.turn_groups[g]
.geom
.make_arrow_outline(
BIG_ARROW_THICKNESS * 2.0,
BIG_ARROW_THICKNESS / 2.0,
)
.unwrap(),
);
}
}
}
TrafficSignalStyle::Icons => {
for g in DrawTurnGroup::for_i(i, ctx.map) {
batch.push(ctx.cs.get("turn block background"), g.block.clone());
let arrow_color = match phase.get_priority_of_group(g.id) {
TurnPriority::Protected => ctx.cs.get("turn protected by traffic signal"),
TurnPriority::Yield => ctx
.cs
.get("turn that can yield by traffic signal")
.alpha(1.0),
TurnPriority::Banned => ctx.cs.get("turn not in current phase"),
};
batch.push(arrow_color, g.arrow.clone());
}
}
TrafficSignalStyle::IndividualTurnArrows => {
for turn in ctx.map.get_turns_in_intersection(i) {
if turn.between_sidewalks() {
continue;
}
match phase.get_priority_of_turn(turn.id, signal) {
TurnPriority::Protected => {
batch.push(
protected_color,
turn.geom.make_arrow(BIG_ARROW_THICKNESS * 2.0).unwrap(),
);
}
TurnPriority::Yield => {
batch.extend(
yield_color,
turn.geom
.make_arrow_outline(
BIG_ARROW_THICKNESS * 2.0,
BIG_ARROW_THICKNESS / 2.0,
)
.unwrap(),
);
}
TurnPriority::Banned => {}
}
}
}
}
if time_left.is_none() {
return;
}
let radius = Distance::meters(0.5);
let box_width = (2.5 * radius).inner_meters();
let box_height = (6.5 * radius).inner_meters();
let center = ctx.map.get_i(i).polygon.center();
let top_left = center.offset(-box_width / 2.0, -box_height / 2.0);
let percent = time_left.unwrap() / phase.duration;
// TODO Tune colors.
batch.push(
ctx.cs.get_def("traffic signal box", Color::grey(0.5)),
Polygon::rectangle(box_width, box_height).translate(top_left.x(), top_left.y()),
);
batch.push(
Color::RED,
Circle::new(center.offset(0.0, -2.0 * radius.inner_meters()), radius).to_polygon(),
);
batch.push(Color::grey(0.4), Circle::new(center, radius).to_polygon());
batch.push(
Color::YELLOW,
Circle::new(center, radius).to_partial_polygon(percent),
);
batch.push(
Color::GREEN,
Circle::new(center.offset(0.0, 2.0 * radius.inner_meters()), radius).to_polygon(),
);
} | identifier_body |
traffic_signal.rs | use crate::options::TrafficSignalStyle;
use crate::render::{DrawCtx, DrawTurnGroup, BIG_ARROW_THICKNESS};
use crate::ui::UI;
use ezgui::{
Button, Color, Composite, DrawBoth, EventCtx, GeomBatch, GfxCtx, Line, ManagedWidget,
ModalMenu, Outcome, Text,
};
use geom::{Circle, Distance, Duration, Polygon};
use map_model::{IntersectionID, Phase, TurnPriority};
use std::collections::BTreeSet;
// Only draws a box when time_left is present
pub fn draw_signal_phase(
phase: &Phase,
i: IntersectionID,
time_left: Option<Duration>,
batch: &mut GeomBatch,
ctx: &DrawCtx,
) {
let protected_color = ctx
.cs
.get_def("turn protected by traffic signal", Color::GREEN);
let yield_color = ctx.cs.get_def(
"turn that can yield by traffic signal",
Color::rgba(255, 105, 180, 0.8),
);
let signal = ctx.map.get_traffic_signal(i);
for (id, crosswalk) in &ctx.draw_map.get_i(i).crosswalks {
if phase.get_priority_of_turn(*id, signal) == TurnPriority::Protected {
batch.append(crosswalk.clone());
}
}
match ctx.opts.traffic_signal_style {
TrafficSignalStyle::GroupArrows => {
for g in &phase.protected_groups {
if g.crosswalk.is_none() {
batch.push(
protected_color,
signal.turn_groups[g]
.geom
.make_arrow(BIG_ARROW_THICKNESS * 2.0)
.unwrap(),
);
}
}
for g in &phase.yield_groups {
if g.crosswalk.is_none() {
batch.extend(
yield_color,
signal.turn_groups[g]
.geom
.make_arrow_outline(
BIG_ARROW_THICKNESS * 2.0,
BIG_ARROW_THICKNESS / 2.0,
)
.unwrap(),
);
}
}
}
TrafficSignalStyle::Icons => {
for g in DrawTurnGroup::for_i(i, ctx.map) {
batch.push(ctx.cs.get("turn block background"), g.block.clone());
let arrow_color = match phase.get_priority_of_group(g.id) {
TurnPriority::Protected => ctx.cs.get("turn protected by traffic signal"),
TurnPriority::Yield => ctx
.cs
.get("turn that can yield by traffic signal")
.alpha(1.0),
TurnPriority::Banned => ctx.cs.get("turn not in current phase"),
};
batch.push(arrow_color, g.arrow.clone());
}
}
TrafficSignalStyle::IndividualTurnArrows => {
for turn in ctx.map.get_turns_in_intersection(i) {
if turn.between_sidewalks() {
continue;
}
match phase.get_priority_of_turn(turn.id, signal) {
TurnPriority::Protected => {
batch.push(
protected_color,
turn.geom.make_arrow(BIG_ARROW_THICKNESS * 2.0).unwrap(),
);
}
TurnPriority::Yield => {
batch.extend(
yield_color,
turn.geom
.make_arrow_outline(
BIG_ARROW_THICKNESS * 2.0,
BIG_ARROW_THICKNESS / 2.0,
)
.unwrap(),
);
}
TurnPriority::Banned => {}
}
}
}
}
if time_left.is_none() {
return;
}
let radius = Distance::meters(0.5);
let box_width = (2.5 * radius).inner_meters();
let box_height = (6.5 * radius).inner_meters();
let center = ctx.map.get_i(i).polygon.center();
let top_left = center.offset(-box_width / 2.0, -box_height / 2.0);
let percent = time_left.unwrap() / phase.duration;
// TODO Tune colors.
batch.push(
ctx.cs.get_def("traffic signal box", Color::grey(0.5)),
Polygon::rectangle(box_width, box_height).translate(top_left.x(), top_left.y()),
);
batch.push(
Color::RED,
Circle::new(center.offset(0.0, -2.0 * radius.inner_meters()), radius).to_polygon(),
);
batch.push(Color::grey(0.4), Circle::new(center, radius).to_polygon());
batch.push(
Color::YELLOW,
Circle::new(center, radius).to_partial_polygon(percent),
);
batch.push(
Color::GREEN,
Circle::new(center.offset(0.0, 2.0 * radius.inner_meters()), radius).to_polygon(),
);
}
pub struct TrafficSignalDiagram {
pub i: IntersectionID,
composite: Composite,
current_phase: usize,
}
impl TrafficSignalDiagram {
pub fn new(
i: IntersectionID,
current_phase: usize,
ui: &UI,
ctx: &EventCtx,
) -> TrafficSignalDiagram {
TrafficSignalDiagram {
i,
composite: make_diagram(i, current_phase, ui, ctx),
current_phase,
}
}
pub fn event(&mut self, ctx: &mut EventCtx, ui: &mut UI, menu: &mut ModalMenu) {
if self.current_phase != 0 && menu.action("select previous phase") {
self.change_phase(self.current_phase - 1, ui, ctx);
}
if self.current_phase != ui.primary.map.get_traffic_signal(self.i).phases.len() - 1
&& menu.action("select next phase")
{
self.change_phase(self.current_phase + 1, ui, ctx);
}
match self.composite.event(ctx) {
Some(Outcome::Clicked(x)) => {
self.change_phase(x["phase ".len()..].parse::<usize>().unwrap() - 1, ui, ctx);
}
None => {}
}
}
fn change_phase(&mut self, idx: usize, ui: &UI, ctx: &EventCtx) {
if self.current_phase != idx {
let preserve_scroll = self.composite.preserve_scroll(ctx);
self.current_phase = idx;
self.composite = make_diagram(self.i, self.current_phase, ui, ctx);
self.composite.restore_scroll(ctx, preserve_scroll);
}
}
pub fn current_phase(&self) -> usize {
self.current_phase
}
pub fn draw(&self, g: &mut GfxCtx) {
self.composite.draw(g);
}
}
fn make_diagram(i: IntersectionID, selected: usize, ui: &UI, ctx: &EventCtx) -> Composite {
// Slightly inaccurate -- the turn rendering may slightly exceed the intersection polygon --
// but this is close enough.
let bounds = ui.primary.map.get_i(i).polygon.get_bounds();
// Pick a zoom so that we fit some percentage of the screen
let zoom = 0.2 * ctx.canvas.window_width / (bounds.max_x - bounds.min_x);
let bbox = Polygon::rectangle(
zoom * (bounds.max_x - bounds.min_x),
zoom * (bounds.max_y - bounds.min_y),
);
let signal = ui.primary.map.get_traffic_signal(i);
let mut col = vec![ManagedWidget::draw_text(ctx, {
let mut txt = Text::new();
txt.add(Line(i.to_string()).roboto());
let road_names = ui
.primary
.map
.get_i(i) | .iter()
.map(|r| ui.primary.map.get_r(*r).get_name())
.collect::<BTreeSet<_>>();
let len = road_names.len();
// TODO Some kind of reusable TextStyle thing
// TODO Need to wrap this
txt.add(Line("").roboto().size(21).fg(Color::WHITE.alpha(0.54)));
for (idx, n) in road_names.into_iter().enumerate() {
txt.append(Line(n).roboto().fg(Color::WHITE.alpha(0.54)));
if idx != len - 1 {
txt.append(Line(", ").roboto().fg(Color::WHITE.alpha(0.54)));
}
}
txt.add(Line(format!("{} phases", signal.phases.len())));
txt.add(Line(""));
txt.add(Line(format!("Signal offset: {}", signal.offset)));
txt.add(Line(format!("One cycle lasts {}", signal.cycle_length())));
txt
})];
for (idx, phase) in signal.phases.iter().enumerate() {
col.push(
ManagedWidget::row(vec![
ManagedWidget::draw_text(ctx, Text::from(Line(format!("#{}", idx + 1)))),
ManagedWidget::draw_text(ctx, Text::from(Line(phase.duration.to_string()))),
])
.margin(5)
.evenly_spaced(),
);
let mut orig_batch = GeomBatch::new();
draw_signal_phase(phase, i, None, &mut orig_batch, &ui.draw_ctx());
let mut normal = GeomBatch::new();
// TODO Ideally no background here, but we have to force the dimensions of normal and
// hovered to be the same. For some reason the bbox is slightly different.
if idx == selected {
normal.push(Color::RED.alpha(0.15), bbox.clone());
} else {
normal.push(Color::CYAN.alpha(0.05), bbox.clone());
}
// Move to the origin and apply zoom
for (color, poly) in orig_batch.consume() {
normal.push(
color,
poly.translate(-bounds.min_x, -bounds.min_y).scale(zoom),
);
}
let mut hovered = GeomBatch::new();
hovered.push(Color::RED.alpha(0.95), bbox.clone());
hovered.append(normal.clone());
col.push(
ManagedWidget::btn(Button::new(
DrawBoth::new(ctx, normal, Vec::new()),
DrawBoth::new(ctx, hovered, Vec::new()),
None,
&format!("phase {}", idx + 1),
bbox.clone(),
))
.margin(5),
);
}
Composite::scrollable(ctx, ManagedWidget::col(col).bg(Color::hex("#545454")))
} | .roads | random_line_split |
traffic_signal.rs | use crate::options::TrafficSignalStyle;
use crate::render::{DrawCtx, DrawTurnGroup, BIG_ARROW_THICKNESS};
use crate::ui::UI;
use ezgui::{
Button, Color, Composite, DrawBoth, EventCtx, GeomBatch, GfxCtx, Line, ManagedWidget,
ModalMenu, Outcome, Text,
};
use geom::{Circle, Distance, Duration, Polygon};
use map_model::{IntersectionID, Phase, TurnPriority};
use std::collections::BTreeSet;
// Only draws a box when time_left is present
pub fn draw_signal_phase(
phase: &Phase,
i: IntersectionID,
time_left: Option<Duration>,
batch: &mut GeomBatch,
ctx: &DrawCtx,
) {
let protected_color = ctx
.cs
.get_def("turn protected by traffic signal", Color::GREEN);
let yield_color = ctx.cs.get_def(
"turn that can yield by traffic signal",
Color::rgba(255, 105, 180, 0.8),
);
let signal = ctx.map.get_traffic_signal(i);
for (id, crosswalk) in &ctx.draw_map.get_i(i).crosswalks {
if phase.get_priority_of_turn(*id, signal) == TurnPriority::Protected {
batch.append(crosswalk.clone());
}
}
match ctx.opts.traffic_signal_style {
TrafficSignalStyle::GroupArrows => {
for g in &phase.protected_groups {
if g.crosswalk.is_none() {
batch.push(
protected_color,
signal.turn_groups[g]
.geom
.make_arrow(BIG_ARROW_THICKNESS * 2.0)
.unwrap(),
);
}
}
for g in &phase.yield_groups {
if g.crosswalk.is_none() {
batch.extend(
yield_color,
signal.turn_groups[g]
.geom
.make_arrow_outline(
BIG_ARROW_THICKNESS * 2.0,
BIG_ARROW_THICKNESS / 2.0,
)
.unwrap(),
);
}
}
}
TrafficSignalStyle::Icons => {
for g in DrawTurnGroup::for_i(i, ctx.map) {
batch.push(ctx.cs.get("turn block background"), g.block.clone());
let arrow_color = match phase.get_priority_of_group(g.id) {
TurnPriority::Protected => ctx.cs.get("turn protected by traffic signal"),
TurnPriority::Yield => ctx
.cs
.get("turn that can yield by traffic signal")
.alpha(1.0),
TurnPriority::Banned => ctx.cs.get("turn not in current phase"),
};
batch.push(arrow_color, g.arrow.clone());
}
}
TrafficSignalStyle::IndividualTurnArrows => {
for turn in ctx.map.get_turns_in_intersection(i) {
if turn.between_sidewalks() {
continue;
}
match phase.get_priority_of_turn(turn.id, signal) {
TurnPriority::Protected => {
batch.push(
protected_color,
turn.geom.make_arrow(BIG_ARROW_THICKNESS * 2.0).unwrap(),
);
}
TurnPriority::Yield => |
TurnPriority::Banned => {}
}
}
}
}
if time_left.is_none() {
return;
}
let radius = Distance::meters(0.5);
let box_width = (2.5 * radius).inner_meters();
let box_height = (6.5 * radius).inner_meters();
let center = ctx.map.get_i(i).polygon.center();
let top_left = center.offset(-box_width / 2.0, -box_height / 2.0);
let percent = time_left.unwrap() / phase.duration;
// TODO Tune colors.
batch.push(
ctx.cs.get_def("traffic signal box", Color::grey(0.5)),
Polygon::rectangle(box_width, box_height).translate(top_left.x(), top_left.y()),
);
batch.push(
Color::RED,
Circle::new(center.offset(0.0, -2.0 * radius.inner_meters()), radius).to_polygon(),
);
batch.push(Color::grey(0.4), Circle::new(center, radius).to_polygon());
batch.push(
Color::YELLOW,
Circle::new(center, radius).to_partial_polygon(percent),
);
batch.push(
Color::GREEN,
Circle::new(center.offset(0.0, 2.0 * radius.inner_meters()), radius).to_polygon(),
);
}
pub struct TrafficSignalDiagram {
pub i: IntersectionID,
composite: Composite,
current_phase: usize,
}
impl TrafficSignalDiagram {
pub fn new(
i: IntersectionID,
current_phase: usize,
ui: &UI,
ctx: &EventCtx,
) -> TrafficSignalDiagram {
TrafficSignalDiagram {
i,
composite: make_diagram(i, current_phase, ui, ctx),
current_phase,
}
}
pub fn event(&mut self, ctx: &mut EventCtx, ui: &mut UI, menu: &mut ModalMenu) {
if self.current_phase != 0 && menu.action("select previous phase") {
self.change_phase(self.current_phase - 1, ui, ctx);
}
if self.current_phase != ui.primary.map.get_traffic_signal(self.i).phases.len() - 1
&& menu.action("select next phase")
{
self.change_phase(self.current_phase + 1, ui, ctx);
}
match self.composite.event(ctx) {
Some(Outcome::Clicked(x)) => {
self.change_phase(x["phase ".len()..].parse::<usize>().unwrap() - 1, ui, ctx);
}
None => {}
}
}
fn change_phase(&mut self, idx: usize, ui: &UI, ctx: &EventCtx) {
if self.current_phase != idx {
let preserve_scroll = self.composite.preserve_scroll(ctx);
self.current_phase = idx;
self.composite = make_diagram(self.i, self.current_phase, ui, ctx);
self.composite.restore_scroll(ctx, preserve_scroll);
}
}
pub fn current_phase(&self) -> usize {
self.current_phase
}
pub fn draw(&self, g: &mut GfxCtx) {
self.composite.draw(g);
}
}
fn make_diagram(i: IntersectionID, selected: usize, ui: &UI, ctx: &EventCtx) -> Composite {
// Slightly inaccurate -- the turn rendering may slightly exceed the intersection polygon --
// but this is close enough.
let bounds = ui.primary.map.get_i(i).polygon.get_bounds();
// Pick a zoom so that we fit some percentage of the screen
let zoom = 0.2 * ctx.canvas.window_width / (bounds.max_x - bounds.min_x);
let bbox = Polygon::rectangle(
zoom * (bounds.max_x - bounds.min_x),
zoom * (bounds.max_y - bounds.min_y),
);
let signal = ui.primary.map.get_traffic_signal(i);
let mut col = vec![ManagedWidget::draw_text(ctx, {
let mut txt = Text::new();
txt.add(Line(i.to_string()).roboto());
let road_names = ui
.primary
.map
.get_i(i)
.roads
.iter()
.map(|r| ui.primary.map.get_r(*r).get_name())
.collect::<BTreeSet<_>>();
let len = road_names.len();
// TODO Some kind of reusable TextStyle thing
// TODO Need to wrap this
txt.add(Line("").roboto().size(21).fg(Color::WHITE.alpha(0.54)));
for (idx, n) in road_names.into_iter().enumerate() {
txt.append(Line(n).roboto().fg(Color::WHITE.alpha(0.54)));
if idx != len - 1 {
txt.append(Line(", ").roboto().fg(Color::WHITE.alpha(0.54)));
}
}
txt.add(Line(format!("{} phases", signal.phases.len())));
txt.add(Line(""));
txt.add(Line(format!("Signal offset: {}", signal.offset)));
txt.add(Line(format!("One cycle lasts {}", signal.cycle_length())));
txt
})];
for (idx, phase) in signal.phases.iter().enumerate() {
col.push(
ManagedWidget::row(vec![
ManagedWidget::draw_text(ctx, Text::from(Line(format!("#{}", idx + 1)))),
ManagedWidget::draw_text(ctx, Text::from(Line(phase.duration.to_string()))),
])
.margin(5)
.evenly_spaced(),
);
let mut orig_batch = GeomBatch::new();
draw_signal_phase(phase, i, None, &mut orig_batch, &ui.draw_ctx());
let mut normal = GeomBatch::new();
// TODO Ideally no background here, but we have to force the dimensions of normal and
// hovered to be the same. For some reason the bbox is slightly different.
if idx == selected {
normal.push(Color::RED.alpha(0.15), bbox.clone());
} else {
normal.push(Color::CYAN.alpha(0.05), bbox.clone());
}
// Move to the origin and apply zoom
for (color, poly) in orig_batch.consume() {
normal.push(
color,
poly.translate(-bounds.min_x, -bounds.min_y).scale(zoom),
);
}
let mut hovered = GeomBatch::new();
hovered.push(Color::RED.alpha(0.95), bbox.clone());
hovered.append(normal.clone());
col.push(
ManagedWidget::btn(Button::new(
DrawBoth::new(ctx, normal, Vec::new()),
DrawBoth::new(ctx, hovered, Vec::new()),
None,
&format!("phase {}", idx + 1),
bbox.clone(),
))
.margin(5),
);
}
Composite::scrollable(ctx, ManagedWidget::col(col).bg(Color::hex("#545454")))
}
| {
batch.extend(
yield_color,
turn.geom
.make_arrow_outline(
BIG_ARROW_THICKNESS * 2.0,
BIG_ARROW_THICKNESS / 2.0,
)
.unwrap(),
);
} | conditional_block |
traffic_signal.rs | use crate::options::TrafficSignalStyle;
use crate::render::{DrawCtx, DrawTurnGroup, BIG_ARROW_THICKNESS};
use crate::ui::UI;
use ezgui::{
Button, Color, Composite, DrawBoth, EventCtx, GeomBatch, GfxCtx, Line, ManagedWidget,
ModalMenu, Outcome, Text,
};
use geom::{Circle, Distance, Duration, Polygon};
use map_model::{IntersectionID, Phase, TurnPriority};
use std::collections::BTreeSet;
// Only draws a box when time_left is present
pub fn draw_signal_phase(
phase: &Phase,
i: IntersectionID,
time_left: Option<Duration>,
batch: &mut GeomBatch,
ctx: &DrawCtx,
) {
let protected_color = ctx
.cs
.get_def("turn protected by traffic signal", Color::GREEN);
let yield_color = ctx.cs.get_def(
"turn that can yield by traffic signal",
Color::rgba(255, 105, 180, 0.8),
);
let signal = ctx.map.get_traffic_signal(i);
for (id, crosswalk) in &ctx.draw_map.get_i(i).crosswalks {
if phase.get_priority_of_turn(*id, signal) == TurnPriority::Protected {
batch.append(crosswalk.clone());
}
}
match ctx.opts.traffic_signal_style {
TrafficSignalStyle::GroupArrows => {
for g in &phase.protected_groups {
if g.crosswalk.is_none() {
batch.push(
protected_color,
signal.turn_groups[g]
.geom
.make_arrow(BIG_ARROW_THICKNESS * 2.0)
.unwrap(),
);
}
}
for g in &phase.yield_groups {
if g.crosswalk.is_none() {
batch.extend(
yield_color,
signal.turn_groups[g]
.geom
.make_arrow_outline(
BIG_ARROW_THICKNESS * 2.0,
BIG_ARROW_THICKNESS / 2.0,
)
.unwrap(),
);
}
}
}
TrafficSignalStyle::Icons => {
for g in DrawTurnGroup::for_i(i, ctx.map) {
batch.push(ctx.cs.get("turn block background"), g.block.clone());
let arrow_color = match phase.get_priority_of_group(g.id) {
TurnPriority::Protected => ctx.cs.get("turn protected by traffic signal"),
TurnPriority::Yield => ctx
.cs
.get("turn that can yield by traffic signal")
.alpha(1.0),
TurnPriority::Banned => ctx.cs.get("turn not in current phase"),
};
batch.push(arrow_color, g.arrow.clone());
}
}
TrafficSignalStyle::IndividualTurnArrows => {
for turn in ctx.map.get_turns_in_intersection(i) {
if turn.between_sidewalks() {
continue;
}
match phase.get_priority_of_turn(turn.id, signal) {
TurnPriority::Protected => {
batch.push(
protected_color,
turn.geom.make_arrow(BIG_ARROW_THICKNESS * 2.0).unwrap(),
);
}
TurnPriority::Yield => {
batch.extend(
yield_color,
turn.geom
.make_arrow_outline(
BIG_ARROW_THICKNESS * 2.0,
BIG_ARROW_THICKNESS / 2.0,
)
.unwrap(),
);
}
TurnPriority::Banned => {}
}
}
}
}
if time_left.is_none() {
return;
}
let radius = Distance::meters(0.5);
let box_width = (2.5 * radius).inner_meters();
let box_height = (6.5 * radius).inner_meters();
let center = ctx.map.get_i(i).polygon.center();
let top_left = center.offset(-box_width / 2.0, -box_height / 2.0);
let percent = time_left.unwrap() / phase.duration;
// TODO Tune colors.
batch.push(
ctx.cs.get_def("traffic signal box", Color::grey(0.5)),
Polygon::rectangle(box_width, box_height).translate(top_left.x(), top_left.y()),
);
batch.push(
Color::RED,
Circle::new(center.offset(0.0, -2.0 * radius.inner_meters()), radius).to_polygon(),
);
batch.push(Color::grey(0.4), Circle::new(center, radius).to_polygon());
batch.push(
Color::YELLOW,
Circle::new(center, radius).to_partial_polygon(percent),
);
batch.push(
Color::GREEN,
Circle::new(center.offset(0.0, 2.0 * radius.inner_meters()), radius).to_polygon(),
);
}
pub struct TrafficSignalDiagram {
pub i: IntersectionID,
composite: Composite,
current_phase: usize,
}
impl TrafficSignalDiagram {
pub fn new(
i: IntersectionID,
current_phase: usize,
ui: &UI,
ctx: &EventCtx,
) -> TrafficSignalDiagram {
TrafficSignalDiagram {
i,
composite: make_diagram(i, current_phase, ui, ctx),
current_phase,
}
}
pub fn | (&mut self, ctx: &mut EventCtx, ui: &mut UI, menu: &mut ModalMenu) {
if self.current_phase != 0 && menu.action("select previous phase") {
self.change_phase(self.current_phase - 1, ui, ctx);
}
if self.current_phase != ui.primary.map.get_traffic_signal(self.i).phases.len() - 1
&& menu.action("select next phase")
{
self.change_phase(self.current_phase + 1, ui, ctx);
}
match self.composite.event(ctx) {
Some(Outcome::Clicked(x)) => {
self.change_phase(x["phase ".len()..].parse::<usize>().unwrap() - 1, ui, ctx);
}
None => {}
}
}
fn change_phase(&mut self, idx: usize, ui: &UI, ctx: &EventCtx) {
if self.current_phase != idx {
let preserve_scroll = self.composite.preserve_scroll(ctx);
self.current_phase = idx;
self.composite = make_diagram(self.i, self.current_phase, ui, ctx);
self.composite.restore_scroll(ctx, preserve_scroll);
}
}
pub fn current_phase(&self) -> usize {
self.current_phase
}
pub fn draw(&self, g: &mut GfxCtx) {
self.composite.draw(g);
}
}
fn make_diagram(i: IntersectionID, selected: usize, ui: &UI, ctx: &EventCtx) -> Composite {
// Slightly inaccurate -- the turn rendering may slightly exceed the intersection polygon --
// but this is close enough.
let bounds = ui.primary.map.get_i(i).polygon.get_bounds();
// Pick a zoom so that we fit some percentage of the screen
let zoom = 0.2 * ctx.canvas.window_width / (bounds.max_x - bounds.min_x);
let bbox = Polygon::rectangle(
zoom * (bounds.max_x - bounds.min_x),
zoom * (bounds.max_y - bounds.min_y),
);
let signal = ui.primary.map.get_traffic_signal(i);
let mut col = vec![ManagedWidget::draw_text(ctx, {
let mut txt = Text::new();
txt.add(Line(i.to_string()).roboto());
let road_names = ui
.primary
.map
.get_i(i)
.roads
.iter()
.map(|r| ui.primary.map.get_r(*r).get_name())
.collect::<BTreeSet<_>>();
let len = road_names.len();
// TODO Some kind of reusable TextStyle thing
// TODO Need to wrap this
txt.add(Line("").roboto().size(21).fg(Color::WHITE.alpha(0.54)));
for (idx, n) in road_names.into_iter().enumerate() {
txt.append(Line(n).roboto().fg(Color::WHITE.alpha(0.54)));
if idx != len - 1 {
txt.append(Line(", ").roboto().fg(Color::WHITE.alpha(0.54)));
}
}
txt.add(Line(format!("{} phases", signal.phases.len())));
txt.add(Line(""));
txt.add(Line(format!("Signal offset: {}", signal.offset)));
txt.add(Line(format!("One cycle lasts {}", signal.cycle_length())));
txt
})];
for (idx, phase) in signal.phases.iter().enumerate() {
col.push(
ManagedWidget::row(vec![
ManagedWidget::draw_text(ctx, Text::from(Line(format!("#{}", idx + 1)))),
ManagedWidget::draw_text(ctx, Text::from(Line(phase.duration.to_string()))),
])
.margin(5)
.evenly_spaced(),
);
let mut orig_batch = GeomBatch::new();
draw_signal_phase(phase, i, None, &mut orig_batch, &ui.draw_ctx());
let mut normal = GeomBatch::new();
// TODO Ideally no background here, but we have to force the dimensions of normal and
// hovered to be the same. For some reason the bbox is slightly different.
if idx == selected {
normal.push(Color::RED.alpha(0.15), bbox.clone());
} else {
normal.push(Color::CYAN.alpha(0.05), bbox.clone());
}
// Move to the origin and apply zoom
for (color, poly) in orig_batch.consume() {
normal.push(
color,
poly.translate(-bounds.min_x, -bounds.min_y).scale(zoom),
);
}
let mut hovered = GeomBatch::new();
hovered.push(Color::RED.alpha(0.95), bbox.clone());
hovered.append(normal.clone());
col.push(
ManagedWidget::btn(Button::new(
DrawBoth::new(ctx, normal, Vec::new()),
DrawBoth::new(ctx, hovered, Vec::new()),
None,
&format!("phase {}", idx + 1),
bbox.clone(),
))
.margin(5),
);
}
Composite::scrollable(ctx, ManagedWidget::col(col).bg(Color::hex("#545454")))
}
| event | identifier_name |
source.py | import copy
from dataclasses import dataclass
from typing import Literal, Union
import matplotlib.pyplot as plt
import numpy as np
from astropy.utils import lazyproperty
from matplotlib.patches import Circle, Ellipse
from photutils.aperture import *
from photutils.isophote import Ellipse as IsoEllipse
from photutils.isophote import EllipseGeometry
color = [0.51, 0.86, 1.0]
__all__ = [
"Source",
"PointSource",
"ExtendedSource",
"TraceSource",
"auto_source",
"Sources",
]
def distance(p1, p2):
return np.sqrt(np.power(p1[0] - p2[0], 2) + np.power(p1[1] - p2[1], 2))
def clean_stars_positions(positions, tolerance=50):
keep = []
distance_to_others = np.array(
[[distance(v, w) for w in positions] for v in positions]
)
for i, _distances in enumerate(distance_to_others):
_distances[i] = np.inf
close_stars = np.flatnonzero(_distances < tolerance)
if len(close_stars) == 0:
keep.append(i)
return np.unique(keep)
# Note: Why not using photutils.segmentation.SourceCatalog?
# source: https://photutils.readthedocs.io/en/stable/api/photutils.segmentation.SourceCatalog.html#photutils.segmentation.SourceCatalog
#
# Main reason is full control and no need to subclass SourceCatalog. Reasons:
# - Ability to Source.plot and Source.aperture differently depending of the type of source
# - Ability to easily instantiate a fake/incomplete source only defined by its coords (output of many detection algorithms like DAOPHOT)
# - We will use it, as region so that users have access to it if needed
# - I don't like this as_scalar behavior, I prefer separate Source and Sources
@dataclass
class Source:
"""A object containing a source information
This is a Python Data Class, so that most attributes described below can be used as
keyword-arguments when instantiated
"""
a: float = 1.0
"""Semi-major axis of the source"""
b: float = 1.0
"""Semi-minor axis of the source"""
orientation: float = 0.0
"""Orientation of the source in radians"""
coords: np.ndarray = None
"""(x,y) pixel coordinates of the source"""
peak: float = 0.0
"""Peak ADU value of the source"""
i: int = None
"""Index of the source"""
discarded: bool = False
"""Whether source is discarded"""
@classmethod
def from_region(cls, region, keep_region: bool = False, **kwargs):
"""Source from region
Parameters
----------
region : skimage.measure.RegionProperties
An skimage RegionProperties containing the source
keep_region: bool, optional
whether to keep region object in source
**kwargs:
other sources attributes to set
"""
source = cls(
a=region.axis_major_length / 2,
b=region.axis_minor_length / 2,
orientation=np.pi / 2 - region.orientation,
coords=np.array(region.centroid_weighted[::-1]),
peak=region.intensity_max,
**kwargs,
)
return source
@property
def vertexes(self):
"""Coordinates of the Ellipse vertexes, endpoints of the major axis
Returns
-------
np.array
vertexes coordinates
"""
theta = self.orientation
shifts = np.array([np.cos(theta), np.sin(theta)]) * self.a
return self.coords + (shifts[:, None] * [-1, 1]).T
@property
def co_vertexes(self):
"""Coordinates of the Ellipse co-vertexes, endpoints of the minor axis
Returns
-------
np.array
co-vertexes coordinates
"""
theta = self.orientation + np.pi / 2
shifts = np.array([np.cos(theta), np.sin(theta)]) * self.b
return self.coords + (shifts[:, None] * [-1, 1]).T
@lazyproperty
def eccentricity(self):
"""Eccentricity of the source
Returns
-------
float
"""
return self.b / self.a
def copy(self):
"""Return a copy of the Source
Returns
-------
Source
copy
"""
copy = self.__class__()
copy.a = self.a
copy.b = self.b
copy.peak = self.peak
copy.orientation = self.orientation
copy.i = self.i
copy.coords = self.coords.copy()
return copy
def __copy__(self):
return self.copy()
def plot_circle(self, radius, c=color, ax=None, label=True, fontsize=12, **kwargs):
"""Plot a circle centered on source
Parameters
----------
radius : float
radii of the circle in pixels
c : str, optional
color of the circle, by default color
ax : Axe, optional
pyplot axe in which to plot the circle, by default None
label : bool, optional
whether to display the Source.i index, by default True
fontsize : int, optional
Font size for the source index, by default 12
"""
if ax is None:
ax = plt.gca()
circle = Circle(self.coords, radius, fill=None, ec=c, **kwargs)
ax.add_artist(circle)
if label and self.i is not None:
plt.text(
*(np.array(self.coords) - [0, 1.5 * radius]),
self.i,
c=c,
ha="center",
va="top",
fontsize=fontsize,
)
def plot_ellipse(self, a=None, c=color, ax=None, label=True, fontsize=12, **kwargs):
"""Plot an ellipse centered on source, with semi-major/minor length defined by the source itself
Parameters
----------
n : float
offset added to the major and minor axis (major axis of the plotted ellipse will be `Source.a + n`)
c : str, optional
color of the circle, by default color
ax : Axe, optional
pyplot axe in which to plot the circle, by default None
label : bool, optional
whether to display the Source.i index, by default True
fontsize : int, optional
Font size for the source index, by default 12
"""
if ax is None:
ax = plt.gca()
if a is None:
a = 2 * self.a * 1.1
ax = plt.gca()
e = Ellipse(
xy=self.coords,
width=a,
height=a * self.eccentricity,
angle=np.rad2deg(self.orientation),
**kwargs,
)
e.set_facecolor("none")
e.set_edgecolor(c)
ax.add_artist(e)
if label and self.i is not None:
rad = self.orientation
label_coord = self.coords + [0, -(np.abs(self.a * rad) + self.b)]
plt.text(
*label_coord, self.i, c=c, ha="center", va="top", fontsize=fontsize
)
def circular_aperture(self, r, scale=True):
"""`photutils.aperture.CircularAperture` centered on the source
Parameters
----------
r : float
radius
scale : bool, optional
whether to scale r to Source.a, by default True
Returns
-------
photutils.aperture.CircularAperture
"""
if scale:
radius = r * self.a
else:
radius = r
return CircularAperture(self.coords, float(np.abs(radius)))
def elliptical_aperture(self, r, scale=True):
"""`photutils.aperture.EllipticalAperture` centered on the source
Parameters
----------
r : float
semi-major axis of the aperture. Semi minor will be `r*Source.b/Source.a`
scale : bool, optional
whether to scale r to Source.a, by default True
Returns
-------
photutils.aperture.CircularAperture
"""
if scale:
a, b = r * self.a, r * self.b
else:
a, b = r, r * self.eccentricity
return EllipticalAperture(self.coords, a, b, self.orientation)
def rectangular_aperture(self, r, scale=True):
if scale:
a, b = 2 * r * self.a, 2 * r * self.b
else:
a, b = 2 * r, 2 * r * self.eccentricity
a = np.max([0.01, a])
b = np.max([0.01, b])
return RectangularAperture(
self.coords, float(np.abs(a)), float(np.abs(b)), self.orientation
)
def circular_annulus(self, r0, r1, scale=False):
if scale:
r0 = r0 * self.a
r1 = r1 * self.a
else:
r0 = r0
r1 = r1
return CircularAnnulus(self.coords, r0, r1)
def elliptical_annulus(self, r0, r1, scale=False):
if scale:
a0 = r0 * self.a
a1, b1 = r1 * self.a, r1 * self.b
else:
a0 = (r0,)
a1, b1 = r1, r1 * self.eccentricity
return EllipticalAnnulus(self.coords, a0, a1, b1, theta=self.orientation)
def rectangular_annulus(self, r0, r1, scale=False):
if scale:
a0 = 2 * r0 * self.a
a1, b1 = 2 * r1 * self.a, 2 * r1 * self.b
else:
a0 = r0
a1, b1 = r1, r1 * self.eccentricity
a0 = np.max([0.01, a0])
a1 = np.max([a0 + 0.001, a1])
b1 = np.max([0.01, b1])
return RectangularAnnulus(self.coords, a0, a1, b1, theta=self.orientation)
def fit_isophotes(self, debug=False):
"""Fit a photutils.isophote.Ellipse to the source. Requires the source to be instantiated from a skimage RegionProperties
Parameters
----------
debug : bool, optional
whether to plot the result for debugging, by default False
Returns
-------
output of photutils.isophote.Ellipse.fit_image
"""
data = self._region.image_intensity
y0, x0 = np.unravel_index(np.argmax(data), data.shape)
geometry = EllipseGeometry(
x0, y0, sma=self.a / 2, eps=self.eccentricity, pa=self.orientation
)
ellipse = IsoEllipse(data - np.median(data), geometry)
isolist = ellipse.fit_image()
if debug:
plt.imshow(data)
smas = np.linspace(3, 20, 15)
for sma in smas:
iso = isolist.get_closest(sma)
(
x,
y,
) = iso.sampled_coordinates()
plt.plot(x, y, color="white")
return isolist
@property
def _symbol(self):
return "?"
@property
def _desc(self):
return (
f"{self._symbol} {self.__class__.__name__}" + f" {self.i}"
if self.i is not None
else ""
)
def _repr_dict(self, n=8):
return {
"coords": f"{self.coords[0]:.2f}".rjust(n)
+ f"{self.coords[1]:.2f}".rjust(n),
"a, b": f"{self.a:.2f}".rjust(n) + f"{self.b:.2f}".rjust(n),
"e": f"{self.b/self.a:.2f}".rjust(n),
}
def __str__(self):
table = "\n".join(
[f" {n}".ljust(8) + f"{v}" for n, v in self._repr_dict().items()]
)
return f"{self._desc}\n {'-'*(len(self._desc)-2)}\n{table}"
def centroid_isophote(self):
isolist = self.fit_isophotes()
origin = np.array(self._region.bbox)[0:2][::-1]
return np.array([isolist[0].x0, isolist[0].y0]) + origin
def centroid_max(self):
y0, x0 = np.unravel_index(
np.argmax(self._region.image_intensity), self._region.image.shape
)
dy, dx, _, _ = self._region.bbox
return np.array([x0 + dx, y0 + dy])
@property
def area(self):
"""Area of the source as :code:`a*b`
Returns
-------
float
"""
return self.a * self.b
def auto_source(region, i=None, trace=0.3, extended=0.9, discard=False):
if region is None:
return DiscardedSource.from_region(region, i=i)
a = region.axis_major_length
b = region.axis_minor_length
if a == 0.0:
if discard:
return DiscardedSource.from_region(region, i=i)
else:
return PointSource.from_region(region, i=i)
eccentricity = b / a
if eccentricity <= extended:
if eccentricity <= trace:
return TraceSource.from_region(region, i=i)
else:
return ExtendedSource.from_region(region, i=i)
else:
return PointSource.from_region(region, i=i)
class DiscardedSource(Source):
def __init__(self, region, i=None):
super().__init__(region, i=i)
self.discarded = True
def plot(self, ms=15, c="C0", ax=None, **kwargs):
if ax is None:
ax = plt.gca()
ax.plot(*self.coords, "x", c=c, ms=ms, **kwargs)
class PointSource(Source):
"""Point source (star)"""
@property
def _symbol(self):
return chr(8226)
def plot(self, radius=15, **kwargs):
"""Plot circle centered on source
Parameters
----------
radius : int, optional
radius, by default 15
"""
self.plot_circle(radius, **kwargs)
def aperture(self, r=1, scale=True):
return self.circular_aperture(r, scale=scale)
def annulus(self, r0=1.05, r1=1.4, scale=True):
return self.circular_annulus(r0, r1, scale=scale)
class | (Source):
"""Extended source (comet, galaxy or lensed source)"""
@property
def _symbol(self):
return chr(11053)
def plot(self, radius=None, **kwargs):
"""Plot Ellipse on source
Parameters
----------
radius : int, optional
extension to minor/major axis, by default 6
"""
self.plot_ellipse(radius, **kwargs)
def aperture(self, r=1, scale=True):
return self.elliptical_aperture(r, scale=scale)
def annulus(self, r0=1.05, r1=1.4, scale=True):
return self.elliptical_annulus(r0, r1, scale=scale)
class TraceSource(Source):
"""Trace source (diffracted spectrum, satellite streak or cosmic ray)"""
def plot(self, offset=10, ax=None, c=color, label=True, fontsize=12):
if ax is None:
ax = plt.gca()
ax.plot(*self.vertexes.T, c=c)
if label and self.i is not None:
label_coords = self.coords + [0, -offset]
plt.text(
*label_coords, self.i, c=c, ha="center", va="top", fontsize=fontsize
)
def aperture(self, r=1, scale=True):
return self.rectangular_aperture(r, scale=scale)
def annulus(self, r0=1.05, r1=1.4, scale=True):
return self.rectangular_annulus(r0, r1, scale=scale)
@dataclass
class Sources:
sources: list = None
"""List of sources"""
type: Literal["PointSource", None] = None
"""Source type"""
def __post_init__(self):
if self.sources is None:
self.sources = []
if isinstance(self.sources, np.ndarray):
if self.sources.dtype != object:
self.sources = [
PointSource(coords=s, i=i) for i, s in enumerate(self.sources)
]
self.type = "PointSource"
if self.type is not None:
for s in self.sources:
assert (
s.__class__.__name__ == self.type
), f"list can only contain {self.type}"
self.sources = np.array(self.sources)
def __getitem__(self, i):
if np.isscalar(i):
i = int(i)
return self.sources[i]
else:
return self.__class__(self.sources[i])
def __len__(self):
return len(self.sources)
def __str__(self):
return str(self.sources)
def __repr__(self):
return self.sources.__repr__()
def copy(self):
return copy.deepcopy(self)
def __copy__(self):
return self.copy()
@property
def coords(self):
return np.array([source.coords for source in self.sources])
@coords.setter
def coords(self, new_coords):
for source, new_coord in zip(self.sources, new_coords):
source.coords = new_coord
def apertures(self, r, scale=False):
if self.type == "PointSource":
return CircularAperture(self.coords, r)
else:
return [source.aperture(r, scale=scale) for source in self.sources]
def annulus(self, rin, rout, scale=False):
if self.type == "PointSource":
return CircularAnnulus(self.coords, rin, rout)
else:
return [source.annulus(rin, rout, scale=scale) for source in self.sources]
def plot(self, *args, **kwargs):
for s in self.sources:
s.plot(*args, **kwargs)
| ExtendedSource | identifier_name |
source.py | import copy
from dataclasses import dataclass
from typing import Literal, Union
import matplotlib.pyplot as plt
import numpy as np
from astropy.utils import lazyproperty
from matplotlib.patches import Circle, Ellipse
from photutils.aperture import *
from photutils.isophote import Ellipse as IsoEllipse
from photutils.isophote import EllipseGeometry
color = [0.51, 0.86, 1.0]
__all__ = [
"Source",
"PointSource",
"ExtendedSource",
"TraceSource",
"auto_source",
"Sources",
]
def distance(p1, p2):
return np.sqrt(np.power(p1[0] - p2[0], 2) + np.power(p1[1] - p2[1], 2))
def clean_stars_positions(positions, tolerance=50):
keep = []
distance_to_others = np.array(
[[distance(v, w) for w in positions] for v in positions]
)
for i, _distances in enumerate(distance_to_others):
_distances[i] = np.inf
close_stars = np.flatnonzero(_distances < tolerance)
if len(close_stars) == 0:
keep.append(i)
return np.unique(keep)
# Note: Why not using photutils.segmentation.SourceCatalog?
# source: https://photutils.readthedocs.io/en/stable/api/photutils.segmentation.SourceCatalog.html#photutils.segmentation.SourceCatalog
#
# Main reason is full control and no need to subclass SourceCatalog. Reasons:
# - Ability to Source.plot and Source.aperture differently depending of the type of source
# - Ability to easily instantiate a fake/incomplete source only defined by its coords (output of many detection algorithms like DAOPHOT)
# - We will use it, as region so that users have access to it if needed
# - I don't like this as_scalar behavior, I prefer separate Source and Sources
@dataclass
class Source:
"""A object containing a source information
This is a Python Data Class, so that most attributes described below can be used as
keyword-arguments when instantiated
"""
a: float = 1.0
"""Semi-major axis of the source"""
b: float = 1.0
"""Semi-minor axis of the source"""
orientation: float = 0.0
"""Orientation of the source in radians"""
coords: np.ndarray = None
"""(x,y) pixel coordinates of the source"""
peak: float = 0.0
"""Peak ADU value of the source"""
i: int = None
"""Index of the source"""
discarded: bool = False
"""Whether source is discarded"""
@classmethod
def from_region(cls, region, keep_region: bool = False, **kwargs):
"""Source from region
Parameters
----------
region : skimage.measure.RegionProperties
An skimage RegionProperties containing the source
keep_region: bool, optional
whether to keep region object in source
**kwargs:
other sources attributes to set
"""
source = cls(
a=region.axis_major_length / 2,
b=region.axis_minor_length / 2,
orientation=np.pi / 2 - region.orientation,
coords=np.array(region.centroid_weighted[::-1]),
peak=region.intensity_max,
**kwargs,
)
return source
@property
def vertexes(self):
"""Coordinates of the Ellipse vertexes, endpoints of the major axis
Returns
-------
np.array
vertexes coordinates
"""
theta = self.orientation
shifts = np.array([np.cos(theta), np.sin(theta)]) * self.a
return self.coords + (shifts[:, None] * [-1, 1]).T
@property
def co_vertexes(self):
"""Coordinates of the Ellipse co-vertexes, endpoints of the minor axis
Returns
-------
np.array
co-vertexes coordinates
"""
theta = self.orientation + np.pi / 2
shifts = np.array([np.cos(theta), np.sin(theta)]) * self.b
return self.coords + (shifts[:, None] * [-1, 1]).T
@lazyproperty
def eccentricity(self):
"""Eccentricity of the source
Returns
-------
float
"""
return self.b / self.a
def copy(self):
"""Return a copy of the Source
Returns
-------
Source
copy
"""
copy = self.__class__()
copy.a = self.a
copy.b = self.b
copy.peak = self.peak
copy.orientation = self.orientation
copy.i = self.i
copy.coords = self.coords.copy()
return copy
def __copy__(self):
return self.copy()
def plot_circle(self, radius, c=color, ax=None, label=True, fontsize=12, **kwargs):
"""Plot a circle centered on source
Parameters
----------
radius : float
radii of the circle in pixels
c : str, optional
color of the circle, by default color
ax : Axe, optional
pyplot axe in which to plot the circle, by default None
label : bool, optional
whether to display the Source.i index, by default True
fontsize : int, optional
Font size for the source index, by default 12
"""
if ax is None:
ax = plt.gca()
circle = Circle(self.coords, radius, fill=None, ec=c, **kwargs)
ax.add_artist(circle)
if label and self.i is not None:
plt.text(
*(np.array(self.coords) - [0, 1.5 * radius]),
self.i,
c=c,
ha="center",
va="top",
fontsize=fontsize,
)
def plot_ellipse(self, a=None, c=color, ax=None, label=True, fontsize=12, **kwargs):
"""Plot an ellipse centered on source, with semi-major/minor length defined by the source itself
Parameters
----------
n : float
offset added to the major and minor axis (major axis of the plotted ellipse will be `Source.a + n`)
c : str, optional
color of the circle, by default color
ax : Axe, optional
pyplot axe in which to plot the circle, by default None
label : bool, optional
whether to display the Source.i index, by default True
fontsize : int, optional
Font size for the source index, by default 12
"""
if ax is None:
ax = plt.gca()
if a is None:
a = 2 * self.a * 1.1
ax = plt.gca()
e = Ellipse(
xy=self.coords,
width=a,
height=a * self.eccentricity,
angle=np.rad2deg(self.orientation),
**kwargs,
)
e.set_facecolor("none")
e.set_edgecolor(c)
ax.add_artist(e)
if label and self.i is not None:
rad = self.orientation
label_coord = self.coords + [0, -(np.abs(self.a * rad) + self.b)]
plt.text(
*label_coord, self.i, c=c, ha="center", va="top", fontsize=fontsize
)
def circular_aperture(self, r, scale=True):
"""`photutils.aperture.CircularAperture` centered on the source
Parameters
----------
r : float
radius
scale : bool, optional
whether to scale r to Source.a, by default True
Returns
-------
photutils.aperture.CircularAperture
"""
if scale:
radius = r * self.a
else:
radius = r
return CircularAperture(self.coords, float(np.abs(radius)))
def elliptical_aperture(self, r, scale=True):
"""`photutils.aperture.EllipticalAperture` centered on the source
Parameters
----------
r : float
semi-major axis of the aperture. Semi minor will be `r*Source.b/Source.a`
scale : bool, optional
whether to scale r to Source.a, by default True
Returns
-------
photutils.aperture.CircularAperture
"""
if scale:
a, b = r * self.a, r * self.b
else:
a, b = r, r * self.eccentricity
return EllipticalAperture(self.coords, a, b, self.orientation)
def rectangular_aperture(self, r, scale=True):
if scale:
a, b = 2 * r * self.a, 2 * r * self.b
else:
a, b = 2 * r, 2 * r * self.eccentricity
a = np.max([0.01, a])
b = np.max([0.01, b])
return RectangularAperture(
self.coords, float(np.abs(a)), float(np.abs(b)), self.orientation
)
def circular_annulus(self, r0, r1, scale=False):
if scale:
r0 = r0 * self.a
r1 = r1 * self.a
else:
r0 = r0
r1 = r1
return CircularAnnulus(self.coords, r0, r1)
def elliptical_annulus(self, r0, r1, scale=False):
|
def rectangular_annulus(self, r0, r1, scale=False):
if scale:
a0 = 2 * r0 * self.a
a1, b1 = 2 * r1 * self.a, 2 * r1 * self.b
else:
a0 = r0
a1, b1 = r1, r1 * self.eccentricity
a0 = np.max([0.01, a0])
a1 = np.max([a0 + 0.001, a1])
b1 = np.max([0.01, b1])
return RectangularAnnulus(self.coords, a0, a1, b1, theta=self.orientation)
def fit_isophotes(self, debug=False):
"""Fit a photutils.isophote.Ellipse to the source. Requires the source to be instantiated from a skimage RegionProperties
Parameters
----------
debug : bool, optional
whether to plot the result for debugging, by default False
Returns
-------
output of photutils.isophote.Ellipse.fit_image
"""
data = self._region.image_intensity
y0, x0 = np.unravel_index(np.argmax(data), data.shape)
geometry = EllipseGeometry(
x0, y0, sma=self.a / 2, eps=self.eccentricity, pa=self.orientation
)
ellipse = IsoEllipse(data - np.median(data), geometry)
isolist = ellipse.fit_image()
if debug:
plt.imshow(data)
smas = np.linspace(3, 20, 15)
for sma in smas:
iso = isolist.get_closest(sma)
(
x,
y,
) = iso.sampled_coordinates()
plt.plot(x, y, color="white")
return isolist
@property
def _symbol(self):
return "?"
@property
def _desc(self):
return (
f"{self._symbol} {self.__class__.__name__}" + f" {self.i}"
if self.i is not None
else ""
)
def _repr_dict(self, n=8):
return {
"coords": f"{self.coords[0]:.2f}".rjust(n)
+ f"{self.coords[1]:.2f}".rjust(n),
"a, b": f"{self.a:.2f}".rjust(n) + f"{self.b:.2f}".rjust(n),
"e": f"{self.b/self.a:.2f}".rjust(n),
}
def __str__(self):
table = "\n".join(
[f" {n}".ljust(8) + f"{v}" for n, v in self._repr_dict().items()]
)
return f"{self._desc}\n {'-'*(len(self._desc)-2)}\n{table}"
def centroid_isophote(self):
isolist = self.fit_isophotes()
origin = np.array(self._region.bbox)[0:2][::-1]
return np.array([isolist[0].x0, isolist[0].y0]) + origin
def centroid_max(self):
y0, x0 = np.unravel_index(
np.argmax(self._region.image_intensity), self._region.image.shape
)
dy, dx, _, _ = self._region.bbox
return np.array([x0 + dx, y0 + dy])
@property
def area(self):
"""Area of the source as :code:`a*b`
Returns
-------
float
"""
return self.a * self.b
def auto_source(region, i=None, trace=0.3, extended=0.9, discard=False):
if region is None:
return DiscardedSource.from_region(region, i=i)
a = region.axis_major_length
b = region.axis_minor_length
if a == 0.0:
if discard:
return DiscardedSource.from_region(region, i=i)
else:
return PointSource.from_region(region, i=i)
eccentricity = b / a
if eccentricity <= extended:
if eccentricity <= trace:
return TraceSource.from_region(region, i=i)
else:
return ExtendedSource.from_region(region, i=i)
else:
return PointSource.from_region(region, i=i)
class DiscardedSource(Source):
def __init__(self, region, i=None):
super().__init__(region, i=i)
self.discarded = True
def plot(self, ms=15, c="C0", ax=None, **kwargs):
if ax is None:
ax = plt.gca()
ax.plot(*self.coords, "x", c=c, ms=ms, **kwargs)
class PointSource(Source):
"""Point source (star)"""
@property
def _symbol(self):
return chr(8226)
def plot(self, radius=15, **kwargs):
"""Plot circle centered on source
Parameters
----------
radius : int, optional
radius, by default 15
"""
self.plot_circle(radius, **kwargs)
def aperture(self, r=1, scale=True):
return self.circular_aperture(r, scale=scale)
def annulus(self, r0=1.05, r1=1.4, scale=True):
return self.circular_annulus(r0, r1, scale=scale)
class ExtendedSource(Source):
"""Extended source (comet, galaxy or lensed source)"""
@property
def _symbol(self):
return chr(11053)
def plot(self, radius=None, **kwargs):
"""Plot Ellipse on source
Parameters
----------
radius : int, optional
extension to minor/major axis, by default 6
"""
self.plot_ellipse(radius, **kwargs)
def aperture(self, r=1, scale=True):
return self.elliptical_aperture(r, scale=scale)
def annulus(self, r0=1.05, r1=1.4, scale=True):
return self.elliptical_annulus(r0, r1, scale=scale)
class TraceSource(Source):
"""Trace source (diffracted spectrum, satellite streak or cosmic ray)"""
def plot(self, offset=10, ax=None, c=color, label=True, fontsize=12):
if ax is None:
ax = plt.gca()
ax.plot(*self.vertexes.T, c=c)
if label and self.i is not None:
label_coords = self.coords + [0, -offset]
plt.text(
*label_coords, self.i, c=c, ha="center", va="top", fontsize=fontsize
)
def aperture(self, r=1, scale=True):
return self.rectangular_aperture(r, scale=scale)
def annulus(self, r0=1.05, r1=1.4, scale=True):
return self.rectangular_annulus(r0, r1, scale=scale)
@dataclass
class Sources:
sources: list = None
"""List of sources"""
type: Literal["PointSource", None] = None
"""Source type"""
def __post_init__(self):
if self.sources is None:
self.sources = []
if isinstance(self.sources, np.ndarray):
if self.sources.dtype != object:
self.sources = [
PointSource(coords=s, i=i) for i, s in enumerate(self.sources)
]
self.type = "PointSource"
if self.type is not None:
for s in self.sources:
assert (
s.__class__.__name__ == self.type
), f"list can only contain {self.type}"
self.sources = np.array(self.sources)
def __getitem__(self, i):
if np.isscalar(i):
i = int(i)
return self.sources[i]
else:
return self.__class__(self.sources[i])
def __len__(self):
return len(self.sources)
def __str__(self):
return str(self.sources)
def __repr__(self):
return self.sources.__repr__()
def copy(self):
return copy.deepcopy(self)
def __copy__(self):
return self.copy()
@property
def coords(self):
return np.array([source.coords for source in self.sources])
@coords.setter
def coords(self, new_coords):
for source, new_coord in zip(self.sources, new_coords):
source.coords = new_coord
def apertures(self, r, scale=False):
if self.type == "PointSource":
return CircularAperture(self.coords, r)
else:
return [source.aperture(r, scale=scale) for source in self.sources]
def annulus(self, rin, rout, scale=False):
if self.type == "PointSource":
return CircularAnnulus(self.coords, rin, rout)
else:
return [source.annulus(rin, rout, scale=scale) for source in self.sources]
def plot(self, *args, **kwargs):
for s in self.sources:
s.plot(*args, **kwargs)
| if scale:
a0 = r0 * self.a
a1, b1 = r1 * self.a, r1 * self.b
else:
a0 = (r0,)
a1, b1 = r1, r1 * self.eccentricity
return EllipticalAnnulus(self.coords, a0, a1, b1, theta=self.orientation) | identifier_body |
source.py | import copy
from dataclasses import dataclass
from typing import Literal, Union
import matplotlib.pyplot as plt
import numpy as np
from astropy.utils import lazyproperty
from matplotlib.patches import Circle, Ellipse
from photutils.aperture import *
from photutils.isophote import Ellipse as IsoEllipse
from photutils.isophote import EllipseGeometry
color = [0.51, 0.86, 1.0]
__all__ = [
"Source",
"PointSource",
"ExtendedSource",
"TraceSource",
"auto_source",
"Sources",
]
def distance(p1, p2):
return np.sqrt(np.power(p1[0] - p2[0], 2) + np.power(p1[1] - p2[1], 2))
def clean_stars_positions(positions, tolerance=50):
keep = []
distance_to_others = np.array(
[[distance(v, w) for w in positions] for v in positions]
)
for i, _distances in enumerate(distance_to_others):
_distances[i] = np.inf
close_stars = np.flatnonzero(_distances < tolerance)
if len(close_stars) == 0:
keep.append(i)
return np.unique(keep)
# Note: Why not using photutils.segmentation.SourceCatalog?
# source: https://photutils.readthedocs.io/en/stable/api/photutils.segmentation.SourceCatalog.html#photutils.segmentation.SourceCatalog
#
# Main reason is full control and no need to subclass SourceCatalog. Reasons:
# - Ability to Source.plot and Source.aperture differently depending of the type of source
# - Ability to easily instantiate a fake/incomplete source only defined by its coords (output of many detection algorithms like DAOPHOT)
# - We will use it, as region so that users have access to it if needed
# - I don't like this as_scalar behavior, I prefer separate Source and Sources
@dataclass
class Source:
"""A object containing a source information
This is a Python Data Class, so that most attributes described below can be used as
keyword-arguments when instantiated
"""
a: float = 1.0
"""Semi-major axis of the source"""
b: float = 1.0
"""Semi-minor axis of the source"""
orientation: float = 0.0
"""Orientation of the source in radians"""
coords: np.ndarray = None
"""(x,y) pixel coordinates of the source"""
peak: float = 0.0
"""Peak ADU value of the source"""
i: int = None
"""Index of the source"""
discarded: bool = False
"""Whether source is discarded"""
@classmethod
def from_region(cls, region, keep_region: bool = False, **kwargs):
"""Source from region
Parameters
----------
region : skimage.measure.RegionProperties
An skimage RegionProperties containing the source
keep_region: bool, optional
whether to keep region object in source
**kwargs:
other sources attributes to set
"""
source = cls(
a=region.axis_major_length / 2,
b=region.axis_minor_length / 2,
orientation=np.pi / 2 - region.orientation,
coords=np.array(region.centroid_weighted[::-1]),
peak=region.intensity_max,
**kwargs,
)
return source
@property
def vertexes(self):
"""Coordinates of the Ellipse vertexes, endpoints of the major axis
Returns
-------
np.array
vertexes coordinates
"""
theta = self.orientation
shifts = np.array([np.cos(theta), np.sin(theta)]) * self.a
return self.coords + (shifts[:, None] * [-1, 1]).T
@property
def co_vertexes(self):
"""Coordinates of the Ellipse co-vertexes, endpoints of the minor axis
Returns
-------
np.array
co-vertexes coordinates
"""
theta = self.orientation + np.pi / 2
shifts = np.array([np.cos(theta), np.sin(theta)]) * self.b
return self.coords + (shifts[:, None] * [-1, 1]).T
@lazyproperty
def eccentricity(self):
"""Eccentricity of the source
Returns
-------
float
"""
return self.b / self.a
def copy(self):
"""Return a copy of the Source
Returns
-------
Source
copy
"""
copy = self.__class__()
copy.a = self.a
copy.b = self.b
copy.peak = self.peak
copy.orientation = self.orientation
copy.i = self.i
copy.coords = self.coords.copy()
return copy
def __copy__(self):
return self.copy()
def plot_circle(self, radius, c=color, ax=None, label=True, fontsize=12, **kwargs):
"""Plot a circle centered on source
Parameters
----------
radius : float
radii of the circle in pixels
c : str, optional
color of the circle, by default color
ax : Axe, optional
pyplot axe in which to plot the circle, by default None
label : bool, optional
whether to display the Source.i index, by default True
fontsize : int, optional
Font size for the source index, by default 12
"""
if ax is None:
ax = plt.gca()
circle = Circle(self.coords, radius, fill=None, ec=c, **kwargs)
ax.add_artist(circle)
if label and self.i is not None:
plt.text(
*(np.array(self.coords) - [0, 1.5 * radius]),
self.i,
c=c,
ha="center",
va="top",
fontsize=fontsize,
)
def plot_ellipse(self, a=None, c=color, ax=None, label=True, fontsize=12, **kwargs):
"""Plot an ellipse centered on source, with semi-major/minor length defined by the source itself
Parameters
----------
n : float
offset added to the major and minor axis (major axis of the plotted ellipse will be `Source.a + n`)
c : str, optional
color of the circle, by default color
ax : Axe, optional
pyplot axe in which to plot the circle, by default None
label : bool, optional
whether to display the Source.i index, by default True
fontsize : int, optional
Font size for the source index, by default 12
"""
if ax is None:
ax = plt.gca()
if a is None:
a = 2 * self.a * 1.1
ax = plt.gca()
e = Ellipse(
xy=self.coords,
width=a,
height=a * self.eccentricity,
angle=np.rad2deg(self.orientation),
**kwargs,
)
e.set_facecolor("none")
e.set_edgecolor(c)
ax.add_artist(e)
if label and self.i is not None:
rad = self.orientation
label_coord = self.coords + [0, -(np.abs(self.a * rad) + self.b)]
plt.text(
*label_coord, self.i, c=c, ha="center", va="top", fontsize=fontsize
)
def circular_aperture(self, r, scale=True):
"""`photutils.aperture.CircularAperture` centered on the source
Parameters
----------
r : float
radius
scale : bool, optional
whether to scale r to Source.a, by default True
Returns
-------
photutils.aperture.CircularAperture
"""
if scale:
radius = r * self.a
else:
radius = r
return CircularAperture(self.coords, float(np.abs(radius)))
def elliptical_aperture(self, r, scale=True):
"""`photutils.aperture.EllipticalAperture` centered on the source
Parameters
----------
r : float
semi-major axis of the aperture. Semi minor will be `r*Source.b/Source.a`
scale : bool, optional
whether to scale r to Source.a, by default True
Returns
-------
photutils.aperture.CircularAperture
"""
if scale:
a, b = r * self.a, r * self.b
else:
a, b = r, r * self.eccentricity
return EllipticalAperture(self.coords, a, b, self.orientation)
def rectangular_aperture(self, r, scale=True):
if scale:
a, b = 2 * r * self.a, 2 * r * self.b
else:
a, b = 2 * r, 2 * r * self.eccentricity
a = np.max([0.01, a])
b = np.max([0.01, b])
return RectangularAperture(
self.coords, float(np.abs(a)), float(np.abs(b)), self.orientation
)
def circular_annulus(self, r0, r1, scale=False):
if scale:
r0 = r0 * self.a
r1 = r1 * self.a
else:
r0 = r0
r1 = r1
return CircularAnnulus(self.coords, r0, r1)
def elliptical_annulus(self, r0, r1, scale=False):
if scale:
a0 = r0 * self.a
a1, b1 = r1 * self.a, r1 * self.b
else:
a0 = (r0,)
a1, b1 = r1, r1 * self.eccentricity
return EllipticalAnnulus(self.coords, a0, a1, b1, theta=self.orientation)
def rectangular_annulus(self, r0, r1, scale=False):
if scale:
a0 = 2 * r0 * self.a
a1, b1 = 2 * r1 * self.a, 2 * r1 * self.b
else:
a0 = r0
a1, b1 = r1, r1 * self.eccentricity
a0 = np.max([0.01, a0])
a1 = np.max([a0 + 0.001, a1])
b1 = np.max([0.01, b1])
return RectangularAnnulus(self.coords, a0, a1, b1, theta=self.orientation)
def fit_isophotes(self, debug=False):
"""Fit a photutils.isophote.Ellipse to the source. Requires the source to be instantiated from a skimage RegionProperties
Parameters
----------
debug : bool, optional
whether to plot the result for debugging, by default False
Returns
-------
output of photutils.isophote.Ellipse.fit_image
"""
data = self._region.image_intensity
y0, x0 = np.unravel_index(np.argmax(data), data.shape)
geometry = EllipseGeometry(
x0, y0, sma=self.a / 2, eps=self.eccentricity, pa=self.orientation
)
ellipse = IsoEllipse(data - np.median(data), geometry)
isolist = ellipse.fit_image()
if debug:
plt.imshow(data)
smas = np.linspace(3, 20, 15)
for sma in smas:
iso = isolist.get_closest(sma)
(
x,
y,
) = iso.sampled_coordinates()
plt.plot(x, y, color="white")
return isolist
@property
def _symbol(self):
return "?"
@property
def _desc(self):
return (
f"{self._symbol} {self.__class__.__name__}" + f" {self.i}"
if self.i is not None
else ""
)
def _repr_dict(self, n=8):
return {
"coords": f"{self.coords[0]:.2f}".rjust(n)
+ f"{self.coords[1]:.2f}".rjust(n),
"a, b": f"{self.a:.2f}".rjust(n) + f"{self.b:.2f}".rjust(n),
"e": f"{self.b/self.a:.2f}".rjust(n),
}
def __str__(self):
table = "\n".join(
[f" {n}".ljust(8) + f"{v}" for n, v in self._repr_dict().items()]
)
return f"{self._desc}\n {'-'*(len(self._desc)-2)}\n{table}"
def centroid_isophote(self):
isolist = self.fit_isophotes()
origin = np.array(self._region.bbox)[0:2][::-1]
return np.array([isolist[0].x0, isolist[0].y0]) + origin
def centroid_max(self):
y0, x0 = np.unravel_index(
np.argmax(self._region.image_intensity), self._region.image.shape
)
dy, dx, _, _ = self._region.bbox
return np.array([x0 + dx, y0 + dy])
@property
def area(self):
"""Area of the source as :code:`a*b`
Returns
-------
float
"""
return self.a * self.b
def auto_source(region, i=None, trace=0.3, extended=0.9, discard=False):
if region is None:
return DiscardedSource.from_region(region, i=i)
a = region.axis_major_length
b = region.axis_minor_length
if a == 0.0:
if discard:
return DiscardedSource.from_region(region, i=i)
else:
return PointSource.from_region(region, i=i)
eccentricity = b / a
if eccentricity <= extended:
if eccentricity <= trace:
return TraceSource.from_region(region, i=i)
else:
return ExtendedSource.from_region(region, i=i)
else:
return PointSource.from_region(region, i=i)
class DiscardedSource(Source):
def __init__(self, region, i=None):
super().__init__(region, i=i)
self.discarded = True
def plot(self, ms=15, c="C0", ax=None, **kwargs):
if ax is None:
ax = plt.gca()
ax.plot(*self.coords, "x", c=c, ms=ms, **kwargs)
class PointSource(Source):
"""Point source (star)"""
@property
def _symbol(self):
return chr(8226)
def plot(self, radius=15, **kwargs):
"""Plot circle centered on source
Parameters
----------
radius : int, optional
radius, by default 15
"""
self.plot_circle(radius, **kwargs)
def aperture(self, r=1, scale=True):
return self.circular_aperture(r, scale=scale)
def annulus(self, r0=1.05, r1=1.4, scale=True):
return self.circular_annulus(r0, r1, scale=scale)
class ExtendedSource(Source):
"""Extended source (comet, galaxy or lensed source)"""
@property
def _symbol(self):
return chr(11053)
def plot(self, radius=None, **kwargs):
"""Plot Ellipse on source
Parameters
----------
radius : int, optional
extension to minor/major axis, by default 6
"""
self.plot_ellipse(radius, **kwargs)
def aperture(self, r=1, scale=True):
return self.elliptical_aperture(r, scale=scale)
def annulus(self, r0=1.05, r1=1.4, scale=True):
return self.elliptical_annulus(r0, r1, scale=scale)
class TraceSource(Source):
"""Trace source (diffracted spectrum, satellite streak or cosmic ray)"""
def plot(self, offset=10, ax=None, c=color, label=True, fontsize=12):
if ax is None:
ax = plt.gca()
ax.plot(*self.vertexes.T, c=c)
if label and self.i is not None:
label_coords = self.coords + [0, -offset]
plt.text(
*label_coords, self.i, c=c, ha="center", va="top", fontsize=fontsize
)
def aperture(self, r=1, scale=True):
return self.rectangular_aperture(r, scale=scale)
def annulus(self, r0=1.05, r1=1.4, scale=True):
return self.rectangular_annulus(r0, r1, scale=scale)
@dataclass
class Sources:
sources: list = None
"""List of sources"""
type: Literal["PointSource", None] = None
"""Source type"""
def __post_init__(self):
if self.sources is None:
self.sources = []
if isinstance(self.sources, np.ndarray):
if self.sources.dtype != object:
self.sources = [
PointSource(coords=s, i=i) for i, s in enumerate(self.sources)
]
self.type = "PointSource"
if self.type is not None:
for s in self.sources:
assert (
s.__class__.__name__ == self.type
), f"list can only contain {self.type}"
self.sources = np.array(self.sources)
def __getitem__(self, i):
if np.isscalar(i):
i = int(i)
return self.sources[i]
else:
return self.__class__(self.sources[i])
def __len__(self):
return len(self.sources)
def __str__(self):
return str(self.sources)
def __repr__(self):
return self.sources.__repr__()
def copy(self):
return copy.deepcopy(self)
def __copy__(self):
return self.copy()
@property
def coords(self):
return np.array([source.coords for source in self.sources])
@coords.setter
def coords(self, new_coords):
for source, new_coord in zip(self.sources, new_coords):
source.coords = new_coord
def apertures(self, r, scale=False):
if self.type == "PointSource":
return CircularAperture(self.coords, r)
else:
|
def annulus(self, rin, rout, scale=False):
if self.type == "PointSource":
return CircularAnnulus(self.coords, rin, rout)
else:
return [source.annulus(rin, rout, scale=scale) for source in self.sources]
def plot(self, *args, **kwargs):
for s in self.sources:
s.plot(*args, **kwargs)
| return [source.aperture(r, scale=scale) for source in self.sources] | conditional_block |
source.py | import copy
from dataclasses import dataclass
from typing import Literal, Union
import matplotlib.pyplot as plt
import numpy as np
from astropy.utils import lazyproperty
from matplotlib.patches import Circle, Ellipse
from photutils.aperture import *
from photutils.isophote import Ellipse as IsoEllipse
from photutils.isophote import EllipseGeometry
color = [0.51, 0.86, 1.0]
__all__ = [
"Source",
"PointSource",
"ExtendedSource",
"TraceSource",
"auto_source",
"Sources",
]
def distance(p1, p2):
return np.sqrt(np.power(p1[0] - p2[0], 2) + np.power(p1[1] - p2[1], 2))
def clean_stars_positions(positions, tolerance=50):
keep = []
distance_to_others = np.array(
[[distance(v, w) for w in positions] for v in positions]
)
for i, _distances in enumerate(distance_to_others):
_distances[i] = np.inf
close_stars = np.flatnonzero(_distances < tolerance)
if len(close_stars) == 0:
keep.append(i)
return np.unique(keep)
# Note: Why not using photutils.segmentation.SourceCatalog?
# source: https://photutils.readthedocs.io/en/stable/api/photutils.segmentation.SourceCatalog.html#photutils.segmentation.SourceCatalog
#
# Main reason is full control and no need to subclass SourceCatalog. Reasons:
# - Ability to Source.plot and Source.aperture differently depending of the type of source
# - Ability to easily instantiate a fake/incomplete source only defined by its coords (output of many detection algorithms like DAOPHOT)
# - We will use it, as region so that users have access to it if needed
# - I don't like this as_scalar behavior, I prefer separate Source and Sources
@dataclass
class Source:
"""A object containing a source information
This is a Python Data Class, so that most attributes described below can be used as
keyword-arguments when instantiated
"""
a: float = 1.0
"""Semi-major axis of the source"""
b: float = 1.0
"""Semi-minor axis of the source"""
orientation: float = 0.0
"""Orientation of the source in radians"""
coords: np.ndarray = None
"""(x,y) pixel coordinates of the source"""
peak: float = 0.0
"""Peak ADU value of the source"""
i: int = None
"""Index of the source"""
discarded: bool = False
"""Whether source is discarded"""
@classmethod
def from_region(cls, region, keep_region: bool = False, **kwargs):
"""Source from region
Parameters
----------
region : skimage.measure.RegionProperties
An skimage RegionProperties containing the source
keep_region: bool, optional
whether to keep region object in source
**kwargs:
other sources attributes to set
"""
source = cls(
a=region.axis_major_length / 2,
b=region.axis_minor_length / 2,
orientation=np.pi / 2 - region.orientation,
coords=np.array(region.centroid_weighted[::-1]),
peak=region.intensity_max,
**kwargs,
)
return source
@property
def vertexes(self):
"""Coordinates of the Ellipse vertexes, endpoints of the major axis
Returns
-------
np.array
vertexes coordinates
"""
theta = self.orientation
shifts = np.array([np.cos(theta), np.sin(theta)]) * self.a
return self.coords + (shifts[:, None] * [-1, 1]).T
@property
def co_vertexes(self):
"""Coordinates of the Ellipse co-vertexes, endpoints of the minor axis
Returns
-------
np.array
co-vertexes coordinates
"""
theta = self.orientation + np.pi / 2
shifts = np.array([np.cos(theta), np.sin(theta)]) * self.b
return self.coords + (shifts[:, None] * [-1, 1]).T
@lazyproperty
def eccentricity(self):
"""Eccentricity of the source
Returns
-------
float
"""
return self.b / self.a
def copy(self):
"""Return a copy of the Source
Returns
-------
Source
copy
"""
copy = self.__class__()
copy.a = self.a
copy.b = self.b
copy.peak = self.peak
copy.orientation = self.orientation
copy.i = self.i
copy.coords = self.coords.copy()
return copy
def __copy__(self):
return self.copy()
def plot_circle(self, radius, c=color, ax=None, label=True, fontsize=12, **kwargs):
"""Plot a circle centered on source
Parameters
----------
radius : float
radii of the circle in pixels
c : str, optional
color of the circle, by default color
ax : Axe, optional
pyplot axe in which to plot the circle, by default None
label : bool, optional
whether to display the Source.i index, by default True
fontsize : int, optional
Font size for the source index, by default 12
"""
if ax is None:
ax = plt.gca()
circle = Circle(self.coords, radius, fill=None, ec=c, **kwargs)
ax.add_artist(circle)
if label and self.i is not None:
plt.text(
*(np.array(self.coords) - [0, 1.5 * radius]),
self.i,
c=c,
ha="center",
va="top",
fontsize=fontsize,
)
def plot_ellipse(self, a=None, c=color, ax=None, label=True, fontsize=12, **kwargs):
"""Plot an ellipse centered on source, with semi-major/minor length defined by the source itself
Parameters
----------
n : float
offset added to the major and minor axis (major axis of the plotted ellipse will be `Source.a + n`)
c : str, optional
color of the circle, by default color
ax : Axe, optional
pyplot axe in which to plot the circle, by default None
label : bool, optional
whether to display the Source.i index, by default True
fontsize : int, optional
Font size for the source index, by default 12
"""
if ax is None:
ax = plt.gca()
if a is None:
a = 2 * self.a * 1.1
ax = plt.gca()
e = Ellipse(
xy=self.coords,
width=a,
height=a * self.eccentricity,
angle=np.rad2deg(self.orientation),
**kwargs,
)
e.set_facecolor("none")
e.set_edgecolor(c)
ax.add_artist(e)
if label and self.i is not None:
rad = self.orientation
label_coord = self.coords + [0, -(np.abs(self.a * rad) + self.b)]
plt.text(
*label_coord, self.i, c=c, ha="center", va="top", fontsize=fontsize
)
def circular_aperture(self, r, scale=True):
"""`photutils.aperture.CircularAperture` centered on the source
Parameters
----------
r : float
radius
scale : bool, optional
whether to scale r to Source.a, by default True
Returns
-------
photutils.aperture.CircularAperture
"""
if scale:
radius = r * self.a
else:
radius = r
return CircularAperture(self.coords, float(np.abs(radius)))
def elliptical_aperture(self, r, scale=True):
"""`photutils.aperture.EllipticalAperture` centered on the source
Parameters
----------
r : float
semi-major axis of the aperture. Semi minor will be `r*Source.b/Source.a`
scale : bool, optional
whether to scale r to Source.a, by default True
Returns
-------
photutils.aperture.CircularAperture
"""
if scale:
a, b = r * self.a, r * self.b
else:
a, b = r, r * self.eccentricity
return EllipticalAperture(self.coords, a, b, self.orientation)
def rectangular_aperture(self, r, scale=True):
if scale:
a, b = 2 * r * self.a, 2 * r * self.b
else:
a, b = 2 * r, 2 * r * self.eccentricity
a = np.max([0.01, a])
b = np.max([0.01, b])
return RectangularAperture(
self.coords, float(np.abs(a)), float(np.abs(b)), self.orientation
)
def circular_annulus(self, r0, r1, scale=False):
if scale:
r0 = r0 * self.a
r1 = r1 * self.a
else:
r0 = r0
r1 = r1
return CircularAnnulus(self.coords, r0, r1)
def elliptical_annulus(self, r0, r1, scale=False):
if scale:
a0 = r0 * self.a
a1, b1 = r1 * self.a, r1 * self.b
else:
a0 = (r0,)
a1, b1 = r1, r1 * self.eccentricity
return EllipticalAnnulus(self.coords, a0, a1, b1, theta=self.orientation)
def rectangular_annulus(self, r0, r1, scale=False):
if scale:
a0 = 2 * r0 * self.a
a1, b1 = 2 * r1 * self.a, 2 * r1 * self.b
else:
a0 = r0
a1, b1 = r1, r1 * self.eccentricity
a0 = np.max([0.01, a0])
a1 = np.max([a0 + 0.001, a1])
b1 = np.max([0.01, b1])
return RectangularAnnulus(self.coords, a0, a1, b1, theta=self.orientation)
def fit_isophotes(self, debug=False):
"""Fit a photutils.isophote.Ellipse to the source. Requires the source to be instantiated from a skimage RegionProperties
Parameters
----------
debug : bool, optional
whether to plot the result for debugging, by default False
Returns
-------
output of photutils.isophote.Ellipse.fit_image
"""
data = self._region.image_intensity
y0, x0 = np.unravel_index(np.argmax(data), data.shape)
geometry = EllipseGeometry(
x0, y0, sma=self.a / 2, eps=self.eccentricity, pa=self.orientation
)
ellipse = IsoEllipse(data - np.median(data), geometry)
isolist = ellipse.fit_image()
if debug:
plt.imshow(data)
smas = np.linspace(3, 20, 15)
for sma in smas:
iso = isolist.get_closest(sma)
(
x,
y,
) = iso.sampled_coordinates()
plt.plot(x, y, color="white")
return isolist
@property
def _symbol(self):
return "?"
@property
def _desc(self):
return (
f"{self._symbol} {self.__class__.__name__}" + f" {self.i}"
if self.i is not None
else ""
)
def _repr_dict(self, n=8):
return {
"coords": f"{self.coords[0]:.2f}".rjust(n)
+ f"{self.coords[1]:.2f}".rjust(n),
"a, b": f"{self.a:.2f}".rjust(n) + f"{self.b:.2f}".rjust(n),
"e": f"{self.b/self.a:.2f}".rjust(n),
}
def __str__(self):
table = "\n".join(
[f" {n}".ljust(8) + f"{v}" for n, v in self._repr_dict().items()]
)
return f"{self._desc}\n {'-'*(len(self._desc)-2)}\n{table}"
def centroid_isophote(self):
isolist = self.fit_isophotes()
origin = np.array(self._region.bbox)[0:2][::-1]
return np.array([isolist[0].x0, isolist[0].y0]) + origin
def centroid_max(self):
y0, x0 = np.unravel_index(
np.argmax(self._region.image_intensity), self._region.image.shape
)
dy, dx, _, _ = self._region.bbox
return np.array([x0 + dx, y0 + dy])
@property
def area(self):
"""Area of the source as :code:`a*b`
Returns
-------
float
"""
return self.a * self.b
def auto_source(region, i=None, trace=0.3, extended=0.9, discard=False):
if region is None:
return DiscardedSource.from_region(region, i=i)
a = region.axis_major_length
b = region.axis_minor_length
if a == 0.0:
if discard:
return DiscardedSource.from_region(region, i=i)
else:
return PointSource.from_region(region, i=i)
eccentricity = b / a
if eccentricity <= extended:
if eccentricity <= trace:
return TraceSource.from_region(region, i=i)
else:
return ExtendedSource.from_region(region, i=i)
else:
return PointSource.from_region(region, i=i)
class DiscardedSource(Source):
def __init__(self, region, i=None):
super().__init__(region, i=i)
self.discarded = True
def plot(self, ms=15, c="C0", ax=None, **kwargs):
if ax is None:
ax = plt.gca()
ax.plot(*self.coords, "x", c=c, ms=ms, **kwargs)
class PointSource(Source):
"""Point source (star)"""
@property
def _symbol(self):
return chr(8226)
def plot(self, radius=15, **kwargs):
"""Plot circle centered on source
Parameters
----------
radius : int, optional
radius, by default 15
"""
self.plot_circle(radius, **kwargs)
def aperture(self, r=1, scale=True):
return self.circular_aperture(r, scale=scale)
def annulus(self, r0=1.05, r1=1.4, scale=True):
return self.circular_annulus(r0, r1, scale=scale)
class ExtendedSource(Source):
"""Extended source (comet, galaxy or lensed source)"""
@property
def _symbol(self):
return chr(11053)
def plot(self, radius=None, **kwargs):
"""Plot Ellipse on source
Parameters
----------
radius : int, optional
extension to minor/major axis, by default 6
"""
self.plot_ellipse(radius, **kwargs)
def aperture(self, r=1, scale=True):
return self.elliptical_aperture(r, scale=scale)
def annulus(self, r0=1.05, r1=1.4, scale=True):
return self.elliptical_annulus(r0, r1, scale=scale)
class TraceSource(Source):
"""Trace source (diffracted spectrum, satellite streak or cosmic ray)"""
def plot(self, offset=10, ax=None, c=color, label=True, fontsize=12):
if ax is None:
ax = plt.gca()
ax.plot(*self.vertexes.T, c=c)
if label and self.i is not None:
label_coords = self.coords + [0, -offset] | *label_coords, self.i, c=c, ha="center", va="top", fontsize=fontsize
)
def aperture(self, r=1, scale=True):
return self.rectangular_aperture(r, scale=scale)
def annulus(self, r0=1.05, r1=1.4, scale=True):
return self.rectangular_annulus(r0, r1, scale=scale)
@dataclass
class Sources:
sources: list = None
"""List of sources"""
type: Literal["PointSource", None] = None
"""Source type"""
def __post_init__(self):
if self.sources is None:
self.sources = []
if isinstance(self.sources, np.ndarray):
if self.sources.dtype != object:
self.sources = [
PointSource(coords=s, i=i) for i, s in enumerate(self.sources)
]
self.type = "PointSource"
if self.type is not None:
for s in self.sources:
assert (
s.__class__.__name__ == self.type
), f"list can only contain {self.type}"
self.sources = np.array(self.sources)
def __getitem__(self, i):
if np.isscalar(i):
i = int(i)
return self.sources[i]
else:
return self.__class__(self.sources[i])
def __len__(self):
return len(self.sources)
def __str__(self):
return str(self.sources)
def __repr__(self):
return self.sources.__repr__()
def copy(self):
return copy.deepcopy(self)
def __copy__(self):
return self.copy()
@property
def coords(self):
return np.array([source.coords for source in self.sources])
@coords.setter
def coords(self, new_coords):
for source, new_coord in zip(self.sources, new_coords):
source.coords = new_coord
def apertures(self, r, scale=False):
if self.type == "PointSource":
return CircularAperture(self.coords, r)
else:
return [source.aperture(r, scale=scale) for source in self.sources]
def annulus(self, rin, rout, scale=False):
if self.type == "PointSource":
return CircularAnnulus(self.coords, rin, rout)
else:
return [source.annulus(rin, rout, scale=scale) for source in self.sources]
def plot(self, *args, **kwargs):
for s in self.sources:
s.plot(*args, **kwargs) | plt.text( | random_line_split |
util.rs | // Copyright 2022 TiKV Project Authors. Licensed under Apache-2.0.
use std::{fmt::Write, path::Path, sync::Arc, thread, time::Duration};
use encryption_export::{data_key_manager_from_config, DataKeyManager};
use engine_rocks::{RocksEngine, RocksStatistics};
use engine_test::raft::RaftTestEngine;
use engine_traits::{CfName, KvEngine, TabletRegistry, CF_DEFAULT};
use file_system::IoRateLimiter;
use futures::future::BoxFuture;
use kvproto::{
encryptionpb::EncryptionMethod,
kvrpcpb::Context,
metapb,
raft_cmdpb::{CmdType, RaftCmdRequest, RaftCmdResponse},
};
use raftstore::{store::ReadResponse, Result};
use rand::{prelude::SliceRandom, RngCore};
use server::common::ConfiguredRaftEngine;
use tempfile::TempDir;
use test_raftstore::{new_get_cmd, new_put_cf_cmd, new_request, new_snap_cmd, Config};
use tikv::{
server::KvEngineFactoryBuilder,
storage::{
kv::{SnapContext, SnapshotExt},
point_key_range, Engine, Snapshot,
},
};
use tikv_util::{
config::ReadableDuration, escape, future::block_on_timeout, worker::LazyWorker, HandyRwLock,
};
use txn_types::Key;
use crate::{bootstrap_store, cluster::Cluster, ServerCluster, Simulator};
pub fn create_test_engine(
// TODO: pass it in for all cases.
id: Option<(u64, u64)>,
limiter: Option<Arc<IoRateLimiter>>,
cfg: &Config,
) -> (
TabletRegistry<RocksEngine>,
RaftTestEngine,
Option<Arc<DataKeyManager>>,
TempDir,
LazyWorker<String>,
Arc<RocksStatistics>,
Option<Arc<RocksStatistics>>,
) {
let dir = test_util::temp_dir("test_cluster", cfg.prefer_mem);
let mut cfg = cfg.clone();
cfg.storage.data_dir = dir.path().to_str().unwrap().to_string();
cfg.raft_store.raftdb_path = cfg.infer_raft_db_path(None).unwrap();
cfg.raft_engine.mut_config().dir = cfg.infer_raft_engine_path(None).unwrap();
let key_manager =
data_key_manager_from_config(&cfg.security.encryption, dir.path().to_str().unwrap())
.unwrap()
.map(Arc::new);
let cache = cfg.storage.block_cache.build_shared_cache();
let env = cfg
.build_shared_rocks_env(key_manager.clone(), limiter)
.unwrap();
let sst_worker = LazyWorker::new("sst-recovery");
let scheduler = sst_worker.scheduler();
let (raft_engine, raft_statistics) = RaftTestEngine::build(&cfg, &env, &key_manager, &cache);
if let Some((cluster_id, store_id)) = id {
assert_ne!(store_id, 0);
bootstrap_store(&raft_engine, cluster_id, store_id).unwrap();
}
let builder = KvEngineFactoryBuilder::new(env, &cfg.tikv, cache, key_manager.clone())
.sst_recovery_sender(Some(scheduler));
let factory = Box::new(builder.build());
let rocks_statistics = factory.rocks_statistics();
let reg = TabletRegistry::new(factory, dir.path().join("tablet")).unwrap();
(
reg,
raft_engine,
key_manager,
dir,
sst_worker,
rocks_statistics,
raft_statistics,
)
}
/// Keep putting random kvs until specified size limit is reached.
pub fn put_till_size<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
limit: u64,
range: &mut dyn Iterator<Item = u64>,
) -> Vec<u8> {
put_cf_till_size(cluster, CF_DEFAULT, limit, range)
}
pub fn put_cf_till_size<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
cf: &'static str,
limit: u64,
range: &mut dyn Iterator<Item = u64>,
) -> Vec<u8> {
assert!(limit > 0);
let mut len = 0;
let mut rng = rand::thread_rng();
let mut key = String::new();
let mut value = vec![0; 64];
while len < limit {
let batch_size = std::cmp::min(1024, limit - len);
let mut reqs = vec![];
for _ in 0..batch_size / 74 + 1 {
key.clear();
let key_id = range.next().unwrap();
write!(key, "{:09}", key_id).unwrap();
rng.fill_bytes(&mut value);
// plus 1 for the extra encoding prefix
len += key.len() as u64 + 1;
len += value.len() as u64;
reqs.push(new_put_cf_cmd(cf, key.as_bytes(), &value));
}
cluster.batch_put(key.as_bytes(), reqs).unwrap();
// Approximate size of memtable is inaccurate for small data,
// we flush it to SST so we can use the size properties instead.
cluster.must_flush_cf(cf, true);
}
key.into_bytes()
}
pub fn configure_for_encryption(config: &mut Config) {
let manifest_dir = Path::new(env!("CARGO_MANIFEST_DIR"));
let cfg = &mut config.security.encryption;
cfg.data_encryption_method = EncryptionMethod::Aes128Ctr;
cfg.data_key_rotation_period = ReadableDuration(Duration::from_millis(100));
cfg.master_key = test_util::new_test_file_master_key(manifest_dir);
}
pub fn configure_for_snapshot(config: &mut Config) {
// Truncate the log quickly so that we can force sending snapshot.
config.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(20);
config.raft_store.raft_log_gc_count_limit = Some(2);
config.raft_store.merge_max_log_gap = 1;
config.raft_store.snap_mgr_gc_tick_interval = ReadableDuration::millis(50);
configure_for_encryption(config);
}
pub fn configure_for_lease_read_v2<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
base_tick_ms: Option<u64>,
election_ticks: Option<usize>,
) -> Duration {
if let Some(base_tick_ms) = base_tick_ms |
let base_tick_interval = cluster.cfg.raft_store.raft_base_tick_interval.0;
if let Some(election_ticks) = election_ticks {
cluster.cfg.raft_store.raft_election_timeout_ticks = election_ticks;
}
let election_ticks = cluster.cfg.raft_store.raft_election_timeout_ticks as u32;
let election_timeout = base_tick_interval * election_ticks;
// Adjust max leader lease.
cluster.cfg.raft_store.raft_store_max_leader_lease =
ReadableDuration(election_timeout - base_tick_interval);
// Use large peer check interval, abnormal and max leader missing duration to
// make a valid config, that is election timeout x 2 < peer stale state
// check < abnormal < max leader missing duration.
cluster.cfg.raft_store.peer_stale_state_check_interval = ReadableDuration(election_timeout * 3);
cluster.cfg.raft_store.abnormal_leader_missing_duration =
ReadableDuration(election_timeout * 4);
cluster.cfg.raft_store.max_leader_missing_duration = ReadableDuration(election_timeout * 5);
election_timeout
}
pub fn wait_for_synced(
cluster: &mut Cluster<ServerCluster<RocksEngine>, RocksEngine>,
node_id: u64,
region_id: u64,
) {
let mut storage = cluster
.sim
.read()
.unwrap()
.storages
.get(&node_id)
.unwrap()
.clone();
let leader = cluster.leader_of_region(region_id).unwrap();
let epoch = cluster.get_region_epoch(region_id);
let mut ctx = Context::default();
ctx.set_region_id(region_id);
ctx.set_peer(leader);
ctx.set_region_epoch(epoch);
let snap_ctx = SnapContext {
pb_ctx: &ctx,
..Default::default()
};
let snapshot = storage.snapshot(snap_ctx).unwrap();
let txn_ext = snapshot.txn_ext.clone().unwrap();
for retry in 0..10 {
if txn_ext.is_max_ts_synced() {
break;
}
thread::sleep(Duration::from_millis(1 << retry));
}
assert!(snapshot.ext().is_max_ts_synced());
}
// Issue a read request on the specified peer.
pub fn read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_get_cmd(key)],
read_quorum,
);
request.mut_header().set_peer(peer);
cluster.read(None, request, timeout)
}
pub fn async_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
replica_read: bool,
) -> BoxFuture<'static, RaftCmdResponse> {
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_get_cmd(key)],
read_quorum,
);
request.mut_header().set_peer(peer);
request.mut_header().set_replica_read(replica_read);
let node_id = request.get_header().get_peer().get_store_id();
let f = cluster.sim.wl().async_read(node_id, request);
Box::pin(async move { f.await.unwrap() })
}
pub fn batch_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
requests: &[(metapb::Peer, metapb::Region)],
) -> Vec<ReadResponse<<EK as KvEngine>::Snapshot>> {
let mut results = vec![];
for (peer, region) in requests {
let node_id = peer.get_store_id();
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_snap_cmd()],
false,
);
request.mut_header().set_peer(peer.clone());
let snap = cluster.sim.wl().async_snapshot(node_id, request);
let resp = block_on_timeout(
Box::pin(async move {
match snap.await {
Ok(snap) => ReadResponse {
response: Default::default(),
snapshot: Some(snap),
txn_extra_op: Default::default(),
},
Err(resp) => ReadResponse {
response: resp,
snapshot: None,
txn_extra_op: Default::default(),
},
}
}),
Duration::from_secs(1),
)
.unwrap();
results.push(resp);
}
results
}
pub fn async_read_index_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
) -> BoxFuture<'static, RaftCmdResponse> {
let mut cmd = new_get_cmd(key);
cmd.mut_read_index().set_start_ts(u64::MAX);
cmd.mut_read_index()
.mut_key_ranges()
.push(point_key_range(Key::from_raw(key)));
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![cmd],
read_quorum,
);
// Use replica read to issue a read index.
request.mut_header().set_replica_read(true);
request.mut_header().set_peer(peer);
let node_id = request.get_header().get_peer().get_store_id();
let f = cluster.sim.wl().async_read(node_id, request);
Box::pin(async move { f.await.unwrap() })
}
pub fn async_command_on_node<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
node_id: u64,
request: RaftCmdRequest,
) -> BoxFuture<'static, RaftCmdResponse> {
cluster.sim.wl().async_command_on_node(node_id, request)
}
pub fn test_delete_range<T: Simulator<EK>, EK: KvEngine>(cluster: &mut Cluster<T, EK>, cf: CfName) {
let data_set: Vec<_> = (1..500)
.map(|i| {
(
format!("key{:08}", i).into_bytes(),
format!("value{}", i).into_bytes(),
)
})
.collect();
for kvs in data_set.chunks(50) {
let requests = kvs.iter().map(|(k, v)| new_put_cf_cmd(cf, k, v)).collect();
// key9 is always the last region.
cluster.batch_put(b"key9", requests).unwrap();
}
// delete_range request with notify_only set should not actually delete data.
cluster.must_notify_delete_range_cf(cf, b"", b"");
let mut rng = rand::thread_rng();
for _ in 0..50 {
let (k, v) = data_set.choose(&mut rng).unwrap();
assert_eq!(cluster.get_cf(cf, k).unwrap(), *v);
}
// Empty keys means the whole range.
cluster.must_delete_range_cf(cf, b"", b"");
for _ in 0..50 {
let k = &data_set.choose(&mut rng).unwrap().0;
assert!(cluster.get_cf(cf, k).is_none());
}
}
pub fn must_get_value(resp: &RaftCmdResponse) -> Vec<u8> {
if resp.get_header().has_error() {
panic!("failed to read {:?}", resp);
}
assert_eq!(resp.get_responses().len(), 1);
assert_eq!(resp.get_responses()[0].get_cmd_type(), CmdType::Get);
assert!(resp.get_responses()[0].has_get());
resp.get_responses()[0].get_get().get_value().to_vec()
}
pub fn must_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
value: &[u8],
) {
let timeout = Duration::from_secs(5);
match read_on_peer(cluster, peer, region, key, false, timeout) {
Ok(ref resp) if value == must_get_value(resp).as_slice() => (),
other => panic!(
"read key {}, expect value {:?}, got {:?}",
log_wrappers::hex_encode_upper(key),
value,
other
),
}
}
pub fn must_error_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
timeout: Duration,
) {
if let Ok(mut resp) = read_on_peer(cluster, peer, region, key, false, timeout) {
if !resp.get_header().has_error() {
let value = resp.mut_responses()[0].mut_get().take_value();
panic!(
"key {}, expect error but got {}",
log_wrappers::hex_encode_upper(key),
escape(&value)
);
}
}
}
pub fn put_with_timeout<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
node_id: u64,
key: &[u8],
value: &[u8],
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut region = cluster.get_region(key);
let region_id = region.get_id();
let mut req = new_request(
region_id,
region.take_region_epoch(),
vec![new_put_cf_cmd(CF_DEFAULT, key, value)],
false,
);
req.mut_header().set_peer(
region
.get_peers()
.iter()
.find(|p| p.store_id == node_id)
.unwrap()
.clone(),
);
cluster.call_command_on_node(node_id, req, timeout)
}
| {
cluster.cfg.raft_store.raft_base_tick_interval = ReadableDuration::millis(base_tick_ms);
} | conditional_block |
util.rs | // Copyright 2022 TiKV Project Authors. Licensed under Apache-2.0.
use std::{fmt::Write, path::Path, sync::Arc, thread, time::Duration};
use encryption_export::{data_key_manager_from_config, DataKeyManager};
use engine_rocks::{RocksEngine, RocksStatistics};
use engine_test::raft::RaftTestEngine;
use engine_traits::{CfName, KvEngine, TabletRegistry, CF_DEFAULT};
use file_system::IoRateLimiter;
use futures::future::BoxFuture;
use kvproto::{
encryptionpb::EncryptionMethod,
kvrpcpb::Context,
metapb,
raft_cmdpb::{CmdType, RaftCmdRequest, RaftCmdResponse},
};
use raftstore::{store::ReadResponse, Result};
use rand::{prelude::SliceRandom, RngCore};
use server::common::ConfiguredRaftEngine;
use tempfile::TempDir;
use test_raftstore::{new_get_cmd, new_put_cf_cmd, new_request, new_snap_cmd, Config};
use tikv::{
server::KvEngineFactoryBuilder,
storage::{
kv::{SnapContext, SnapshotExt},
point_key_range, Engine, Snapshot,
},
};
use tikv_util::{
config::ReadableDuration, escape, future::block_on_timeout, worker::LazyWorker, HandyRwLock,
};
use txn_types::Key;
use crate::{bootstrap_store, cluster::Cluster, ServerCluster, Simulator};
pub fn create_test_engine(
// TODO: pass it in for all cases.
id: Option<(u64, u64)>,
limiter: Option<Arc<IoRateLimiter>>,
cfg: &Config,
) -> (
TabletRegistry<RocksEngine>,
RaftTestEngine,
Option<Arc<DataKeyManager>>,
TempDir,
LazyWorker<String>,
Arc<RocksStatistics>,
Option<Arc<RocksStatistics>>,
) {
let dir = test_util::temp_dir("test_cluster", cfg.prefer_mem);
let mut cfg = cfg.clone();
cfg.storage.data_dir = dir.path().to_str().unwrap().to_string();
cfg.raft_store.raftdb_path = cfg.infer_raft_db_path(None).unwrap();
cfg.raft_engine.mut_config().dir = cfg.infer_raft_engine_path(None).unwrap();
let key_manager =
data_key_manager_from_config(&cfg.security.encryption, dir.path().to_str().unwrap())
.unwrap()
.map(Arc::new);
let cache = cfg.storage.block_cache.build_shared_cache();
let env = cfg
.build_shared_rocks_env(key_manager.clone(), limiter)
.unwrap();
let sst_worker = LazyWorker::new("sst-recovery");
let scheduler = sst_worker.scheduler();
let (raft_engine, raft_statistics) = RaftTestEngine::build(&cfg, &env, &key_manager, &cache);
if let Some((cluster_id, store_id)) = id {
assert_ne!(store_id, 0);
bootstrap_store(&raft_engine, cluster_id, store_id).unwrap();
}
let builder = KvEngineFactoryBuilder::new(env, &cfg.tikv, cache, key_manager.clone())
.sst_recovery_sender(Some(scheduler));
let factory = Box::new(builder.build());
let rocks_statistics = factory.rocks_statistics();
let reg = TabletRegistry::new(factory, dir.path().join("tablet")).unwrap();
(
reg,
raft_engine,
key_manager,
dir,
sst_worker,
rocks_statistics,
raft_statistics,
)
}
/// Keep putting random kvs until specified size limit is reached.
pub fn put_till_size<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
limit: u64,
range: &mut dyn Iterator<Item = u64>,
) -> Vec<u8> {
put_cf_till_size(cluster, CF_DEFAULT, limit, range)
}
pub fn put_cf_till_size<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
cf: &'static str,
limit: u64,
range: &mut dyn Iterator<Item = u64>,
) -> Vec<u8> {
assert!(limit > 0);
let mut len = 0;
let mut rng = rand::thread_rng();
let mut key = String::new();
let mut value = vec![0; 64];
while len < limit {
let batch_size = std::cmp::min(1024, limit - len);
let mut reqs = vec![];
for _ in 0..batch_size / 74 + 1 {
key.clear();
let key_id = range.next().unwrap();
write!(key, "{:09}", key_id).unwrap();
rng.fill_bytes(&mut value);
// plus 1 for the extra encoding prefix
len += key.len() as u64 + 1;
len += value.len() as u64;
reqs.push(new_put_cf_cmd(cf, key.as_bytes(), &value));
}
cluster.batch_put(key.as_bytes(), reqs).unwrap();
// Approximate size of memtable is inaccurate for small data,
// we flush it to SST so we can use the size properties instead.
cluster.must_flush_cf(cf, true);
}
key.into_bytes()
}
pub fn configure_for_encryption(config: &mut Config) {
let manifest_dir = Path::new(env!("CARGO_MANIFEST_DIR"));
let cfg = &mut config.security.encryption;
cfg.data_encryption_method = EncryptionMethod::Aes128Ctr;
cfg.data_key_rotation_period = ReadableDuration(Duration::from_millis(100));
cfg.master_key = test_util::new_test_file_master_key(manifest_dir);
}
pub fn configure_for_snapshot(config: &mut Config) {
// Truncate the log quickly so that we can force sending snapshot.
config.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(20);
config.raft_store.raft_log_gc_count_limit = Some(2);
config.raft_store.merge_max_log_gap = 1;
config.raft_store.snap_mgr_gc_tick_interval = ReadableDuration::millis(50);
configure_for_encryption(config);
}
pub fn configure_for_lease_read_v2<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
base_tick_ms: Option<u64>,
election_ticks: Option<usize>,
) -> Duration {
if let Some(base_tick_ms) = base_tick_ms {
cluster.cfg.raft_store.raft_base_tick_interval = ReadableDuration::millis(base_tick_ms);
}
let base_tick_interval = cluster.cfg.raft_store.raft_base_tick_interval.0;
if let Some(election_ticks) = election_ticks {
cluster.cfg.raft_store.raft_election_timeout_ticks = election_ticks;
}
let election_ticks = cluster.cfg.raft_store.raft_election_timeout_ticks as u32;
let election_timeout = base_tick_interval * election_ticks;
// Adjust max leader lease.
cluster.cfg.raft_store.raft_store_max_leader_lease =
ReadableDuration(election_timeout - base_tick_interval);
// Use large peer check interval, abnormal and max leader missing duration to
// make a valid config, that is election timeout x 2 < peer stale state |
election_timeout
}
pub fn wait_for_synced(
cluster: &mut Cluster<ServerCluster<RocksEngine>, RocksEngine>,
node_id: u64,
region_id: u64,
) {
let mut storage = cluster
.sim
.read()
.unwrap()
.storages
.get(&node_id)
.unwrap()
.clone();
let leader = cluster.leader_of_region(region_id).unwrap();
let epoch = cluster.get_region_epoch(region_id);
let mut ctx = Context::default();
ctx.set_region_id(region_id);
ctx.set_peer(leader);
ctx.set_region_epoch(epoch);
let snap_ctx = SnapContext {
pb_ctx: &ctx,
..Default::default()
};
let snapshot = storage.snapshot(snap_ctx).unwrap();
let txn_ext = snapshot.txn_ext.clone().unwrap();
for retry in 0..10 {
if txn_ext.is_max_ts_synced() {
break;
}
thread::sleep(Duration::from_millis(1 << retry));
}
assert!(snapshot.ext().is_max_ts_synced());
}
// Issue a read request on the specified peer.
pub fn read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_get_cmd(key)],
read_quorum,
);
request.mut_header().set_peer(peer);
cluster.read(None, request, timeout)
}
pub fn async_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
replica_read: bool,
) -> BoxFuture<'static, RaftCmdResponse> {
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_get_cmd(key)],
read_quorum,
);
request.mut_header().set_peer(peer);
request.mut_header().set_replica_read(replica_read);
let node_id = request.get_header().get_peer().get_store_id();
let f = cluster.sim.wl().async_read(node_id, request);
Box::pin(async move { f.await.unwrap() })
}
pub fn batch_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
requests: &[(metapb::Peer, metapb::Region)],
) -> Vec<ReadResponse<<EK as KvEngine>::Snapshot>> {
let mut results = vec![];
for (peer, region) in requests {
let node_id = peer.get_store_id();
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_snap_cmd()],
false,
);
request.mut_header().set_peer(peer.clone());
let snap = cluster.sim.wl().async_snapshot(node_id, request);
let resp = block_on_timeout(
Box::pin(async move {
match snap.await {
Ok(snap) => ReadResponse {
response: Default::default(),
snapshot: Some(snap),
txn_extra_op: Default::default(),
},
Err(resp) => ReadResponse {
response: resp,
snapshot: None,
txn_extra_op: Default::default(),
},
}
}),
Duration::from_secs(1),
)
.unwrap();
results.push(resp);
}
results
}
pub fn async_read_index_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
) -> BoxFuture<'static, RaftCmdResponse> {
let mut cmd = new_get_cmd(key);
cmd.mut_read_index().set_start_ts(u64::MAX);
cmd.mut_read_index()
.mut_key_ranges()
.push(point_key_range(Key::from_raw(key)));
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![cmd],
read_quorum,
);
// Use replica read to issue a read index.
request.mut_header().set_replica_read(true);
request.mut_header().set_peer(peer);
let node_id = request.get_header().get_peer().get_store_id();
let f = cluster.sim.wl().async_read(node_id, request);
Box::pin(async move { f.await.unwrap() })
}
pub fn async_command_on_node<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
node_id: u64,
request: RaftCmdRequest,
) -> BoxFuture<'static, RaftCmdResponse> {
cluster.sim.wl().async_command_on_node(node_id, request)
}
pub fn test_delete_range<T: Simulator<EK>, EK: KvEngine>(cluster: &mut Cluster<T, EK>, cf: CfName) {
let data_set: Vec<_> = (1..500)
.map(|i| {
(
format!("key{:08}", i).into_bytes(),
format!("value{}", i).into_bytes(),
)
})
.collect();
for kvs in data_set.chunks(50) {
let requests = kvs.iter().map(|(k, v)| new_put_cf_cmd(cf, k, v)).collect();
// key9 is always the last region.
cluster.batch_put(b"key9", requests).unwrap();
}
// delete_range request with notify_only set should not actually delete data.
cluster.must_notify_delete_range_cf(cf, b"", b"");
let mut rng = rand::thread_rng();
for _ in 0..50 {
let (k, v) = data_set.choose(&mut rng).unwrap();
assert_eq!(cluster.get_cf(cf, k).unwrap(), *v);
}
// Empty keys means the whole range.
cluster.must_delete_range_cf(cf, b"", b"");
for _ in 0..50 {
let k = &data_set.choose(&mut rng).unwrap().0;
assert!(cluster.get_cf(cf, k).is_none());
}
}
pub fn must_get_value(resp: &RaftCmdResponse) -> Vec<u8> {
if resp.get_header().has_error() {
panic!("failed to read {:?}", resp);
}
assert_eq!(resp.get_responses().len(), 1);
assert_eq!(resp.get_responses()[0].get_cmd_type(), CmdType::Get);
assert!(resp.get_responses()[0].has_get());
resp.get_responses()[0].get_get().get_value().to_vec()
}
pub fn must_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
value: &[u8],
) {
let timeout = Duration::from_secs(5);
match read_on_peer(cluster, peer, region, key, false, timeout) {
Ok(ref resp) if value == must_get_value(resp).as_slice() => (),
other => panic!(
"read key {}, expect value {:?}, got {:?}",
log_wrappers::hex_encode_upper(key),
value,
other
),
}
}
pub fn must_error_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
timeout: Duration,
) {
if let Ok(mut resp) = read_on_peer(cluster, peer, region, key, false, timeout) {
if !resp.get_header().has_error() {
let value = resp.mut_responses()[0].mut_get().take_value();
panic!(
"key {}, expect error but got {}",
log_wrappers::hex_encode_upper(key),
escape(&value)
);
}
}
}
pub fn put_with_timeout<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
node_id: u64,
key: &[u8],
value: &[u8],
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut region = cluster.get_region(key);
let region_id = region.get_id();
let mut req = new_request(
region_id,
region.take_region_epoch(),
vec![new_put_cf_cmd(CF_DEFAULT, key, value)],
false,
);
req.mut_header().set_peer(
region
.get_peers()
.iter()
.find(|p| p.store_id == node_id)
.unwrap()
.clone(),
);
cluster.call_command_on_node(node_id, req, timeout)
} | // check < abnormal < max leader missing duration.
cluster.cfg.raft_store.peer_stale_state_check_interval = ReadableDuration(election_timeout * 3);
cluster.cfg.raft_store.abnormal_leader_missing_duration =
ReadableDuration(election_timeout * 4);
cluster.cfg.raft_store.max_leader_missing_duration = ReadableDuration(election_timeout * 5); | random_line_split |
util.rs | // Copyright 2022 TiKV Project Authors. Licensed under Apache-2.0.
use std::{fmt::Write, path::Path, sync::Arc, thread, time::Duration};
use encryption_export::{data_key_manager_from_config, DataKeyManager};
use engine_rocks::{RocksEngine, RocksStatistics};
use engine_test::raft::RaftTestEngine;
use engine_traits::{CfName, KvEngine, TabletRegistry, CF_DEFAULT};
use file_system::IoRateLimiter;
use futures::future::BoxFuture;
use kvproto::{
encryptionpb::EncryptionMethod,
kvrpcpb::Context,
metapb,
raft_cmdpb::{CmdType, RaftCmdRequest, RaftCmdResponse},
};
use raftstore::{store::ReadResponse, Result};
use rand::{prelude::SliceRandom, RngCore};
use server::common::ConfiguredRaftEngine;
use tempfile::TempDir;
use test_raftstore::{new_get_cmd, new_put_cf_cmd, new_request, new_snap_cmd, Config};
use tikv::{
server::KvEngineFactoryBuilder,
storage::{
kv::{SnapContext, SnapshotExt},
point_key_range, Engine, Snapshot,
},
};
use tikv_util::{
config::ReadableDuration, escape, future::block_on_timeout, worker::LazyWorker, HandyRwLock,
};
use txn_types::Key;
use crate::{bootstrap_store, cluster::Cluster, ServerCluster, Simulator};
pub fn create_test_engine(
// TODO: pass it in for all cases.
id: Option<(u64, u64)>,
limiter: Option<Arc<IoRateLimiter>>,
cfg: &Config,
) -> (
TabletRegistry<RocksEngine>,
RaftTestEngine,
Option<Arc<DataKeyManager>>,
TempDir,
LazyWorker<String>,
Arc<RocksStatistics>,
Option<Arc<RocksStatistics>>,
) {
let dir = test_util::temp_dir("test_cluster", cfg.prefer_mem);
let mut cfg = cfg.clone();
cfg.storage.data_dir = dir.path().to_str().unwrap().to_string();
cfg.raft_store.raftdb_path = cfg.infer_raft_db_path(None).unwrap();
cfg.raft_engine.mut_config().dir = cfg.infer_raft_engine_path(None).unwrap();
let key_manager =
data_key_manager_from_config(&cfg.security.encryption, dir.path().to_str().unwrap())
.unwrap()
.map(Arc::new);
let cache = cfg.storage.block_cache.build_shared_cache();
let env = cfg
.build_shared_rocks_env(key_manager.clone(), limiter)
.unwrap();
let sst_worker = LazyWorker::new("sst-recovery");
let scheduler = sst_worker.scheduler();
let (raft_engine, raft_statistics) = RaftTestEngine::build(&cfg, &env, &key_manager, &cache);
if let Some((cluster_id, store_id)) = id {
assert_ne!(store_id, 0);
bootstrap_store(&raft_engine, cluster_id, store_id).unwrap();
}
let builder = KvEngineFactoryBuilder::new(env, &cfg.tikv, cache, key_manager.clone())
.sst_recovery_sender(Some(scheduler));
let factory = Box::new(builder.build());
let rocks_statistics = factory.rocks_statistics();
let reg = TabletRegistry::new(factory, dir.path().join("tablet")).unwrap();
(
reg,
raft_engine,
key_manager,
dir,
sst_worker,
rocks_statistics,
raft_statistics,
)
}
/// Keep putting random kvs until specified size limit is reached.
pub fn put_till_size<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
limit: u64,
range: &mut dyn Iterator<Item = u64>,
) -> Vec<u8> {
put_cf_till_size(cluster, CF_DEFAULT, limit, range)
}
pub fn put_cf_till_size<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
cf: &'static str,
limit: u64,
range: &mut dyn Iterator<Item = u64>,
) -> Vec<u8> {
assert!(limit > 0);
let mut len = 0;
let mut rng = rand::thread_rng();
let mut key = String::new();
let mut value = vec![0; 64];
while len < limit {
let batch_size = std::cmp::min(1024, limit - len);
let mut reqs = vec![];
for _ in 0..batch_size / 74 + 1 {
key.clear();
let key_id = range.next().unwrap();
write!(key, "{:09}", key_id).unwrap();
rng.fill_bytes(&mut value);
// plus 1 for the extra encoding prefix
len += key.len() as u64 + 1;
len += value.len() as u64;
reqs.push(new_put_cf_cmd(cf, key.as_bytes(), &value));
}
cluster.batch_put(key.as_bytes(), reqs).unwrap();
// Approximate size of memtable is inaccurate for small data,
// we flush it to SST so we can use the size properties instead.
cluster.must_flush_cf(cf, true);
}
key.into_bytes()
}
pub fn configure_for_encryption(config: &mut Config) {
let manifest_dir = Path::new(env!("CARGO_MANIFEST_DIR"));
let cfg = &mut config.security.encryption;
cfg.data_encryption_method = EncryptionMethod::Aes128Ctr;
cfg.data_key_rotation_period = ReadableDuration(Duration::from_millis(100));
cfg.master_key = test_util::new_test_file_master_key(manifest_dir);
}
pub fn configure_for_snapshot(config: &mut Config) {
// Truncate the log quickly so that we can force sending snapshot.
config.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(20);
config.raft_store.raft_log_gc_count_limit = Some(2);
config.raft_store.merge_max_log_gap = 1;
config.raft_store.snap_mgr_gc_tick_interval = ReadableDuration::millis(50);
configure_for_encryption(config);
}
pub fn configure_for_lease_read_v2<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
base_tick_ms: Option<u64>,
election_ticks: Option<usize>,
) -> Duration {
if let Some(base_tick_ms) = base_tick_ms {
cluster.cfg.raft_store.raft_base_tick_interval = ReadableDuration::millis(base_tick_ms);
}
let base_tick_interval = cluster.cfg.raft_store.raft_base_tick_interval.0;
if let Some(election_ticks) = election_ticks {
cluster.cfg.raft_store.raft_election_timeout_ticks = election_ticks;
}
let election_ticks = cluster.cfg.raft_store.raft_election_timeout_ticks as u32;
let election_timeout = base_tick_interval * election_ticks;
// Adjust max leader lease.
cluster.cfg.raft_store.raft_store_max_leader_lease =
ReadableDuration(election_timeout - base_tick_interval);
// Use large peer check interval, abnormal and max leader missing duration to
// make a valid config, that is election timeout x 2 < peer stale state
// check < abnormal < max leader missing duration.
cluster.cfg.raft_store.peer_stale_state_check_interval = ReadableDuration(election_timeout * 3);
cluster.cfg.raft_store.abnormal_leader_missing_duration =
ReadableDuration(election_timeout * 4);
cluster.cfg.raft_store.max_leader_missing_duration = ReadableDuration(election_timeout * 5);
election_timeout
}
pub fn wait_for_synced(
cluster: &mut Cluster<ServerCluster<RocksEngine>, RocksEngine>,
node_id: u64,
region_id: u64,
) {
let mut storage = cluster
.sim
.read()
.unwrap()
.storages
.get(&node_id)
.unwrap()
.clone();
let leader = cluster.leader_of_region(region_id).unwrap();
let epoch = cluster.get_region_epoch(region_id);
let mut ctx = Context::default();
ctx.set_region_id(region_id);
ctx.set_peer(leader);
ctx.set_region_epoch(epoch);
let snap_ctx = SnapContext {
pb_ctx: &ctx,
..Default::default()
};
let snapshot = storage.snapshot(snap_ctx).unwrap();
let txn_ext = snapshot.txn_ext.clone().unwrap();
for retry in 0..10 {
if txn_ext.is_max_ts_synced() {
break;
}
thread::sleep(Duration::from_millis(1 << retry));
}
assert!(snapshot.ext().is_max_ts_synced());
}
// Issue a read request on the specified peer.
pub fn read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_get_cmd(key)],
read_quorum,
);
request.mut_header().set_peer(peer);
cluster.read(None, request, timeout)
}
pub fn async_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
replica_read: bool,
) -> BoxFuture<'static, RaftCmdResponse> {
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_get_cmd(key)],
read_quorum,
);
request.mut_header().set_peer(peer);
request.mut_header().set_replica_read(replica_read);
let node_id = request.get_header().get_peer().get_store_id();
let f = cluster.sim.wl().async_read(node_id, request);
Box::pin(async move { f.await.unwrap() })
}
pub fn batch_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
requests: &[(metapb::Peer, metapb::Region)],
) -> Vec<ReadResponse<<EK as KvEngine>::Snapshot>> {
let mut results = vec![];
for (peer, region) in requests {
let node_id = peer.get_store_id();
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_snap_cmd()],
false,
);
request.mut_header().set_peer(peer.clone());
let snap = cluster.sim.wl().async_snapshot(node_id, request);
let resp = block_on_timeout(
Box::pin(async move {
match snap.await {
Ok(snap) => ReadResponse {
response: Default::default(),
snapshot: Some(snap),
txn_extra_op: Default::default(),
},
Err(resp) => ReadResponse {
response: resp,
snapshot: None,
txn_extra_op: Default::default(),
},
}
}),
Duration::from_secs(1),
)
.unwrap();
results.push(resp);
}
results
}
pub fn async_read_index_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
) -> BoxFuture<'static, RaftCmdResponse> {
let mut cmd = new_get_cmd(key);
cmd.mut_read_index().set_start_ts(u64::MAX);
cmd.mut_read_index()
.mut_key_ranges()
.push(point_key_range(Key::from_raw(key)));
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![cmd],
read_quorum,
);
// Use replica read to issue a read index.
request.mut_header().set_replica_read(true);
request.mut_header().set_peer(peer);
let node_id = request.get_header().get_peer().get_store_id();
let f = cluster.sim.wl().async_read(node_id, request);
Box::pin(async move { f.await.unwrap() })
}
pub fn async_command_on_node<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
node_id: u64,
request: RaftCmdRequest,
) -> BoxFuture<'static, RaftCmdResponse> {
cluster.sim.wl().async_command_on_node(node_id, request)
}
pub fn test_delete_range<T: Simulator<EK>, EK: KvEngine>(cluster: &mut Cluster<T, EK>, cf: CfName) |
pub fn must_get_value(resp: &RaftCmdResponse) -> Vec<u8> {
if resp.get_header().has_error() {
panic!("failed to read {:?}", resp);
}
assert_eq!(resp.get_responses().len(), 1);
assert_eq!(resp.get_responses()[0].get_cmd_type(), CmdType::Get);
assert!(resp.get_responses()[0].has_get());
resp.get_responses()[0].get_get().get_value().to_vec()
}
pub fn must_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
value: &[u8],
) {
let timeout = Duration::from_secs(5);
match read_on_peer(cluster, peer, region, key, false, timeout) {
Ok(ref resp) if value == must_get_value(resp).as_slice() => (),
other => panic!(
"read key {}, expect value {:?}, got {:?}",
log_wrappers::hex_encode_upper(key),
value,
other
),
}
}
pub fn must_error_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
timeout: Duration,
) {
if let Ok(mut resp) = read_on_peer(cluster, peer, region, key, false, timeout) {
if !resp.get_header().has_error() {
let value = resp.mut_responses()[0].mut_get().take_value();
panic!(
"key {}, expect error but got {}",
log_wrappers::hex_encode_upper(key),
escape(&value)
);
}
}
}
pub fn put_with_timeout<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
node_id: u64,
key: &[u8],
value: &[u8],
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut region = cluster.get_region(key);
let region_id = region.get_id();
let mut req = new_request(
region_id,
region.take_region_epoch(),
vec![new_put_cf_cmd(CF_DEFAULT, key, value)],
false,
);
req.mut_header().set_peer(
region
.get_peers()
.iter()
.find(|p| p.store_id == node_id)
.unwrap()
.clone(),
);
cluster.call_command_on_node(node_id, req, timeout)
}
| {
let data_set: Vec<_> = (1..500)
.map(|i| {
(
format!("key{:08}", i).into_bytes(),
format!("value{}", i).into_bytes(),
)
})
.collect();
for kvs in data_set.chunks(50) {
let requests = kvs.iter().map(|(k, v)| new_put_cf_cmd(cf, k, v)).collect();
// key9 is always the last region.
cluster.batch_put(b"key9", requests).unwrap();
}
// delete_range request with notify_only set should not actually delete data.
cluster.must_notify_delete_range_cf(cf, b"", b"");
let mut rng = rand::thread_rng();
for _ in 0..50 {
let (k, v) = data_set.choose(&mut rng).unwrap();
assert_eq!(cluster.get_cf(cf, k).unwrap(), *v);
}
// Empty keys means the whole range.
cluster.must_delete_range_cf(cf, b"", b"");
for _ in 0..50 {
let k = &data_set.choose(&mut rng).unwrap().0;
assert!(cluster.get_cf(cf, k).is_none());
}
} | identifier_body |
util.rs | // Copyright 2022 TiKV Project Authors. Licensed under Apache-2.0.
use std::{fmt::Write, path::Path, sync::Arc, thread, time::Duration};
use encryption_export::{data_key_manager_from_config, DataKeyManager};
use engine_rocks::{RocksEngine, RocksStatistics};
use engine_test::raft::RaftTestEngine;
use engine_traits::{CfName, KvEngine, TabletRegistry, CF_DEFAULT};
use file_system::IoRateLimiter;
use futures::future::BoxFuture;
use kvproto::{
encryptionpb::EncryptionMethod,
kvrpcpb::Context,
metapb,
raft_cmdpb::{CmdType, RaftCmdRequest, RaftCmdResponse},
};
use raftstore::{store::ReadResponse, Result};
use rand::{prelude::SliceRandom, RngCore};
use server::common::ConfiguredRaftEngine;
use tempfile::TempDir;
use test_raftstore::{new_get_cmd, new_put_cf_cmd, new_request, new_snap_cmd, Config};
use tikv::{
server::KvEngineFactoryBuilder,
storage::{
kv::{SnapContext, SnapshotExt},
point_key_range, Engine, Snapshot,
},
};
use tikv_util::{
config::ReadableDuration, escape, future::block_on_timeout, worker::LazyWorker, HandyRwLock,
};
use txn_types::Key;
use crate::{bootstrap_store, cluster::Cluster, ServerCluster, Simulator};
pub fn create_test_engine(
// TODO: pass it in for all cases.
id: Option<(u64, u64)>,
limiter: Option<Arc<IoRateLimiter>>,
cfg: &Config,
) -> (
TabletRegistry<RocksEngine>,
RaftTestEngine,
Option<Arc<DataKeyManager>>,
TempDir,
LazyWorker<String>,
Arc<RocksStatistics>,
Option<Arc<RocksStatistics>>,
) {
let dir = test_util::temp_dir("test_cluster", cfg.prefer_mem);
let mut cfg = cfg.clone();
cfg.storage.data_dir = dir.path().to_str().unwrap().to_string();
cfg.raft_store.raftdb_path = cfg.infer_raft_db_path(None).unwrap();
cfg.raft_engine.mut_config().dir = cfg.infer_raft_engine_path(None).unwrap();
let key_manager =
data_key_manager_from_config(&cfg.security.encryption, dir.path().to_str().unwrap())
.unwrap()
.map(Arc::new);
let cache = cfg.storage.block_cache.build_shared_cache();
let env = cfg
.build_shared_rocks_env(key_manager.clone(), limiter)
.unwrap();
let sst_worker = LazyWorker::new("sst-recovery");
let scheduler = sst_worker.scheduler();
let (raft_engine, raft_statistics) = RaftTestEngine::build(&cfg, &env, &key_manager, &cache);
if let Some((cluster_id, store_id)) = id {
assert_ne!(store_id, 0);
bootstrap_store(&raft_engine, cluster_id, store_id).unwrap();
}
let builder = KvEngineFactoryBuilder::new(env, &cfg.tikv, cache, key_manager.clone())
.sst_recovery_sender(Some(scheduler));
let factory = Box::new(builder.build());
let rocks_statistics = factory.rocks_statistics();
let reg = TabletRegistry::new(factory, dir.path().join("tablet")).unwrap();
(
reg,
raft_engine,
key_manager,
dir,
sst_worker,
rocks_statistics,
raft_statistics,
)
}
/// Keep putting random kvs until specified size limit is reached.
pub fn put_till_size<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
limit: u64,
range: &mut dyn Iterator<Item = u64>,
) -> Vec<u8> {
put_cf_till_size(cluster, CF_DEFAULT, limit, range)
}
pub fn put_cf_till_size<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
cf: &'static str,
limit: u64,
range: &mut dyn Iterator<Item = u64>,
) -> Vec<u8> {
assert!(limit > 0);
let mut len = 0;
let mut rng = rand::thread_rng();
let mut key = String::new();
let mut value = vec![0; 64];
while len < limit {
let batch_size = std::cmp::min(1024, limit - len);
let mut reqs = vec![];
for _ in 0..batch_size / 74 + 1 {
key.clear();
let key_id = range.next().unwrap();
write!(key, "{:09}", key_id).unwrap();
rng.fill_bytes(&mut value);
// plus 1 for the extra encoding prefix
len += key.len() as u64 + 1;
len += value.len() as u64;
reqs.push(new_put_cf_cmd(cf, key.as_bytes(), &value));
}
cluster.batch_put(key.as_bytes(), reqs).unwrap();
// Approximate size of memtable is inaccurate for small data,
// we flush it to SST so we can use the size properties instead.
cluster.must_flush_cf(cf, true);
}
key.into_bytes()
}
pub fn | (config: &mut Config) {
let manifest_dir = Path::new(env!("CARGO_MANIFEST_DIR"));
let cfg = &mut config.security.encryption;
cfg.data_encryption_method = EncryptionMethod::Aes128Ctr;
cfg.data_key_rotation_period = ReadableDuration(Duration::from_millis(100));
cfg.master_key = test_util::new_test_file_master_key(manifest_dir);
}
pub fn configure_for_snapshot(config: &mut Config) {
// Truncate the log quickly so that we can force sending snapshot.
config.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(20);
config.raft_store.raft_log_gc_count_limit = Some(2);
config.raft_store.merge_max_log_gap = 1;
config.raft_store.snap_mgr_gc_tick_interval = ReadableDuration::millis(50);
configure_for_encryption(config);
}
pub fn configure_for_lease_read_v2<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
base_tick_ms: Option<u64>,
election_ticks: Option<usize>,
) -> Duration {
if let Some(base_tick_ms) = base_tick_ms {
cluster.cfg.raft_store.raft_base_tick_interval = ReadableDuration::millis(base_tick_ms);
}
let base_tick_interval = cluster.cfg.raft_store.raft_base_tick_interval.0;
if let Some(election_ticks) = election_ticks {
cluster.cfg.raft_store.raft_election_timeout_ticks = election_ticks;
}
let election_ticks = cluster.cfg.raft_store.raft_election_timeout_ticks as u32;
let election_timeout = base_tick_interval * election_ticks;
// Adjust max leader lease.
cluster.cfg.raft_store.raft_store_max_leader_lease =
ReadableDuration(election_timeout - base_tick_interval);
// Use large peer check interval, abnormal and max leader missing duration to
// make a valid config, that is election timeout x 2 < peer stale state
// check < abnormal < max leader missing duration.
cluster.cfg.raft_store.peer_stale_state_check_interval = ReadableDuration(election_timeout * 3);
cluster.cfg.raft_store.abnormal_leader_missing_duration =
ReadableDuration(election_timeout * 4);
cluster.cfg.raft_store.max_leader_missing_duration = ReadableDuration(election_timeout * 5);
election_timeout
}
pub fn wait_for_synced(
cluster: &mut Cluster<ServerCluster<RocksEngine>, RocksEngine>,
node_id: u64,
region_id: u64,
) {
let mut storage = cluster
.sim
.read()
.unwrap()
.storages
.get(&node_id)
.unwrap()
.clone();
let leader = cluster.leader_of_region(region_id).unwrap();
let epoch = cluster.get_region_epoch(region_id);
let mut ctx = Context::default();
ctx.set_region_id(region_id);
ctx.set_peer(leader);
ctx.set_region_epoch(epoch);
let snap_ctx = SnapContext {
pb_ctx: &ctx,
..Default::default()
};
let snapshot = storage.snapshot(snap_ctx).unwrap();
let txn_ext = snapshot.txn_ext.clone().unwrap();
for retry in 0..10 {
if txn_ext.is_max_ts_synced() {
break;
}
thread::sleep(Duration::from_millis(1 << retry));
}
assert!(snapshot.ext().is_max_ts_synced());
}
// Issue a read request on the specified peer.
pub fn read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_get_cmd(key)],
read_quorum,
);
request.mut_header().set_peer(peer);
cluster.read(None, request, timeout)
}
pub fn async_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
replica_read: bool,
) -> BoxFuture<'static, RaftCmdResponse> {
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_get_cmd(key)],
read_quorum,
);
request.mut_header().set_peer(peer);
request.mut_header().set_replica_read(replica_read);
let node_id = request.get_header().get_peer().get_store_id();
let f = cluster.sim.wl().async_read(node_id, request);
Box::pin(async move { f.await.unwrap() })
}
pub fn batch_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
requests: &[(metapb::Peer, metapb::Region)],
) -> Vec<ReadResponse<<EK as KvEngine>::Snapshot>> {
let mut results = vec![];
for (peer, region) in requests {
let node_id = peer.get_store_id();
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_snap_cmd()],
false,
);
request.mut_header().set_peer(peer.clone());
let snap = cluster.sim.wl().async_snapshot(node_id, request);
let resp = block_on_timeout(
Box::pin(async move {
match snap.await {
Ok(snap) => ReadResponse {
response: Default::default(),
snapshot: Some(snap),
txn_extra_op: Default::default(),
},
Err(resp) => ReadResponse {
response: resp,
snapshot: None,
txn_extra_op: Default::default(),
},
}
}),
Duration::from_secs(1),
)
.unwrap();
results.push(resp);
}
results
}
pub fn async_read_index_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
) -> BoxFuture<'static, RaftCmdResponse> {
let mut cmd = new_get_cmd(key);
cmd.mut_read_index().set_start_ts(u64::MAX);
cmd.mut_read_index()
.mut_key_ranges()
.push(point_key_range(Key::from_raw(key)));
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![cmd],
read_quorum,
);
// Use replica read to issue a read index.
request.mut_header().set_replica_read(true);
request.mut_header().set_peer(peer);
let node_id = request.get_header().get_peer().get_store_id();
let f = cluster.sim.wl().async_read(node_id, request);
Box::pin(async move { f.await.unwrap() })
}
pub fn async_command_on_node<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
node_id: u64,
request: RaftCmdRequest,
) -> BoxFuture<'static, RaftCmdResponse> {
cluster.sim.wl().async_command_on_node(node_id, request)
}
pub fn test_delete_range<T: Simulator<EK>, EK: KvEngine>(cluster: &mut Cluster<T, EK>, cf: CfName) {
let data_set: Vec<_> = (1..500)
.map(|i| {
(
format!("key{:08}", i).into_bytes(),
format!("value{}", i).into_bytes(),
)
})
.collect();
for kvs in data_set.chunks(50) {
let requests = kvs.iter().map(|(k, v)| new_put_cf_cmd(cf, k, v)).collect();
// key9 is always the last region.
cluster.batch_put(b"key9", requests).unwrap();
}
// delete_range request with notify_only set should not actually delete data.
cluster.must_notify_delete_range_cf(cf, b"", b"");
let mut rng = rand::thread_rng();
for _ in 0..50 {
let (k, v) = data_set.choose(&mut rng).unwrap();
assert_eq!(cluster.get_cf(cf, k).unwrap(), *v);
}
// Empty keys means the whole range.
cluster.must_delete_range_cf(cf, b"", b"");
for _ in 0..50 {
let k = &data_set.choose(&mut rng).unwrap().0;
assert!(cluster.get_cf(cf, k).is_none());
}
}
pub fn must_get_value(resp: &RaftCmdResponse) -> Vec<u8> {
if resp.get_header().has_error() {
panic!("failed to read {:?}", resp);
}
assert_eq!(resp.get_responses().len(), 1);
assert_eq!(resp.get_responses()[0].get_cmd_type(), CmdType::Get);
assert!(resp.get_responses()[0].has_get());
resp.get_responses()[0].get_get().get_value().to_vec()
}
pub fn must_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
value: &[u8],
) {
let timeout = Duration::from_secs(5);
match read_on_peer(cluster, peer, region, key, false, timeout) {
Ok(ref resp) if value == must_get_value(resp).as_slice() => (),
other => panic!(
"read key {}, expect value {:?}, got {:?}",
log_wrappers::hex_encode_upper(key),
value,
other
),
}
}
pub fn must_error_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
timeout: Duration,
) {
if let Ok(mut resp) = read_on_peer(cluster, peer, region, key, false, timeout) {
if !resp.get_header().has_error() {
let value = resp.mut_responses()[0].mut_get().take_value();
panic!(
"key {}, expect error but got {}",
log_wrappers::hex_encode_upper(key),
escape(&value)
);
}
}
}
pub fn put_with_timeout<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
node_id: u64,
key: &[u8],
value: &[u8],
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut region = cluster.get_region(key);
let region_id = region.get_id();
let mut req = new_request(
region_id,
region.take_region_epoch(),
vec![new_put_cf_cmd(CF_DEFAULT, key, value)],
false,
);
req.mut_header().set_peer(
region
.get_peers()
.iter()
.find(|p| p.store_id == node_id)
.unwrap()
.clone(),
);
cluster.call_command_on_node(node_id, req, timeout)
}
| configure_for_encryption | identifier_name |
generate_feature.py | import os
import cv2
import dlib
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy import signal
import pandas as pd
import datetime
from RWCSV import rw_csv
detector_face_cut = cv2.CascadeClassifier('F:/data/haarcascade_frontalface_default.xml')
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('F:/data/shape_predictor_68_face_landmarks.dat')
def face_cut(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector_face_cut.detectMultiScale(gray, 1.1, 5)
c = 1
y = 1.1
while len(faces) == 0 or faces[0][2] * faces[0][3] < 200 * 200:
print('未检测到人脸')
if c % 2 == 1:
x = 5
else:
x = 3
faces = detector_face_cut.detectMultiScale(gray, y, x)
c += 1
if c > 10:
y = 1.1
if c > 20:
y = 1.2
if c > 30:
y = 1.3
if c > 40:
y = 1.4
if c > 50:
y = 1.5
print('检测到人脸')
x = faces[0][0]
y = faces[0][1]
w = faces[0][2]
h = faces[0][3]
# temp_img = img[y:y + h - 1, x:x + w - 1]
return x, y, w, h
def face_detector(frame):
| = frame
img = frame
rects = detector(img_gray, 0)
face_key_point = np.empty([0, 1, 2], dtype=np.float32)
for i in range(len(rects)):
landmarks = np.matrix([[p.x, p.y] for p in predictor(img, rects[i]).parts()])
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
# print(idx,pos)
temp_point = np.empty([1, 1, 2], dtype=np.float32)
temp_point[0, 0] = pos
face_key_point = np.concatenate([face_key_point, temp_point])
return face_key_point
def get_point(face_key_point, eyebrow_index):
temp_point = []
for i in range(len(eyebrow_index)):
p = eyebrow_index[i]
temp_point.append(face_key_point[p][0])
return temp_point
def extractROI(flow, center, margin):
ROI_mod = []
ROI_angle = []
ROI_flow = []
for k in range(len(center)):
x = int(center[k][0] - margin)
y = int(center[k][1] - margin)
for i in range(margin * 2):
for j in range(margin * 2):
v = flow[x + i][y + j]
temp_m = np.sqrt(np.dot(np.transpose(v), v))
temp_a = math.atan2(v[1], v[0])
ROI_mod.append(temp_m)
ROI_angle.append(temp_a)
ROI_flow.append(v)
return ROI_flow, ROI_mod, ROI_angle
def globalMovment(base_ROI):
n = len(base_ROI)
for i in range(n):
temp_flow = base_ROI[i]
v = np.sqrt(np.dot(np.transpose(temp_flow), temp_flow))
if i == 0:
sum_flow = temp_flow
sum_mod = v
else:
sum_flow += temp_flow
sum_mod += v
one_norm = sum_flow / (math.sqrt(math.pow(sum_flow[0], 2) + math.pow(sum_flow[1], 2)))
mod_mean = sum_mod / n
global_movment = one_norm * mod_mean
return global_movment
def removeGlobal(flow, global_movment):
x = np.zeros_like(flow)
T = np.full(x.shape, global_movment)
flow -= T
return flow
def angle_domin(domin_count):
pi = np.pi
dur = pi / (domin_count / 2)
left = 0
area = []
for i in range(int(domin_count / 2)):
right = left + dur
area.append([left, right])
left += dur
left = -pi
for i in range(int(domin_count / 2)):
right = left + dur
area.append([left, right])
left += dur
return area
def getMean(ROI_flow, ROI_mod, ROI_angle):
domin_count = 6
n = len(ROI_mod)
area = angle_domin(domin_count)
max = 0
bin = 0
v_sum = None
c = 0
for i in range(len(area)):
mod_sum = 0
flow_sum = np.array([0, 0], dtype=np.float32)
count = 0
if len(area[i]) == 2:
left = area[i][0]
right = area[i][1]
for j in range(n):
if left <= ROI_angle[j] < right:
count += 1
mod_sum += abs(ROI_mod[j])
flow_sum[0] = flow_sum[0] + ROI_flow[j][0]
flow_sum[1] = flow_sum[1] + ROI_flow[j][1]
if len(area[i]) == 4:
left1 = area[i][0]
right1 = area[i][1]
left2 = area[i][2]
right2 = area[i][3]
for k in range(n):
if left1 <= ROI_angle[k] <= right1 or left2 <= ROI_angle[k] < right2:
count += 1
mod_sum += abs(ROI_mod[k])
flow_sum[0] = flow_sum[0] + ROI_flow[j][0]
flow_sum[1] = flow_sum[1] + ROI_flow[j][1]
if mod_sum > max:
max = mod_sum
bin = i + 1
v_sum = flow_sum
c = count
mod_mean = max / c
angle_mean = math.atan2(v_sum[1], v_sum[0])
return mod_mean, angle_mean, bin
def get_page(label_path):
excel = pd.ExcelFile(label_path)
pages = excel.sheet_names
page0 = np.array(pd.read_excel(label_path, pages[0]))
page1 = np.array(pd.read_excel(label_path, pages[1]))
page2 = np.array(pd.read_excel(label_path, pages[2]))
return page0, page1, page2
def generate_label(path, s, code, page0, page1, page2):
cap = cv2.VideoCapture(path)
total_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
column0 = page1[:, 0:1].reshape((-1))
s = int(s)
idx = np.argwhere(column0 == s)
first_idx = idx[0][0]
convert_s = page1[first_idx][2]
column0 = page2[:, 0:1].reshape((-1))
idx = np.argwhere(column0 == int(code[1:]))
first_idx = idx[0][0]
convert_code = page2[first_idx][1]
# print(s, convert_s, code, convert_code)
column0 = page0[:, 0:1].reshape((-1))
idx = np.argwhere(column0 == convert_s)
column1 = page0[:, 1:2].reshape((-1))
column1 = column1[idx[0][0]: idx[-1][0] + 1]
base_idx = idx[0][0]
for i in range(column1.shape[0]):
column1[i] = column1[i][0:-2]
if column1[i][-1] == '_':
column1[i] = column1[i][0:-1]
# print(column1)
idx = np.argwhere(column1 == convert_code)
express_list = []
for i in range(idx.shape[0]):
current = idx[i][0] + base_idx
start = page0[current][2]
peak = page0[current][3]
end = page0[current][4]
if end == 0:
end = peak
express_list.append([start, end])
label_list = np.zeros(total_frame)
for i in range(len(express_list)):
start = express_list[i][0]
end = express_list[i][1]
for j in range(end - start + 1):
label_list[start - 1 + j] = 1
print(express_list)
return label_list
def solve(video_path, temp_label):
print(video_path)
eyebrow_index = np.array([18, 19, 20, 23, 24, 25], dtype=np.int8)
nose_index = np.array([30], dtype=np.int8)
mouth_index = np.array([48, 51, 54, 57], dtype=np.int8)
point_index = np.concatenate([eyebrow_index, nose_index])
point_index = np.concatenate([point_index, mouth_index])
base_index = np.array([28], dtype=np.int8)
point_index = np.concatenate([point_index, base_index])
cap = cv2.VideoCapture(video_path)
total_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
pre_frame = None
frame = None
margin = 8
start = 0
feature = None
frame_count = total_frame
for i in range(frame_count):
ret, temp_frame = cap.read()
if i == start:
x, y, w, h = face_cut(temp_frame)
temp_img = temp_frame[y:y + h, x:x + w]
pre_frame = temp_img
face_key_point = face_detector(temp_img)
if i > start:
temp_img = temp_frame[y:y + h, x:x + w]
frame = temp_img
face_key_point = face_detector(temp_img)
flow = calcOpticalFlow(pre_frame, frame)
center = get_point(face_key_point, point_index)
base_center = get_point(face_key_point, base_index)
base_ROI_flow, _, _ = extractROI(flow, base_center, margin)
global_movment = globalMovment(base_ROI_flow)
flow = removeGlobal(flow, global_movment)
for j in range(len(center)):
ROI_flow, ROI_mod, ROI_angle = extractROI(flow, [center[j]], margin)
mod_mean, angle_mean, bin = getMean(ROI_flow, ROI_mod, ROI_angle)
if j == 0:
m_a = np.array([mod_mean, angle_mean], dtype=np.float32)
else:
m_a = np.concatenate([m_a, [mod_mean, angle_mean]])
label = np.array([temp_label[i]])
m_a = np.concatenate([m_a, label])
m_a = np.array([m_a])
if i == 1:
feature = m_a
if i > 1:
feature = np.concatenate([feature, m_a])
pre_frame = frame
print(feature.shape)
return feature
if __name__ == '__main__':
label_path = ''
base_path = ''
dirs = os.listdir(base_path)
page0, page1, page2 = get_page(label_path)
for i in range(len(dirs)):
dir_path = 'F:/feature/' + dirs[i]
if not os.path.exists(dir_path):
print('create ' + dir_path)
os.mkdir(dir_path)
current_path = base_path + dirs[i]
files = os.listdir(current_path)
count = 0
if i == len(dirs)-1:
for j in range(len(files)):
count += 1
print(count)
s = files[j][0:2]
code = files[j][3:7]
current_path = base_path + dirs[i] + '/' + files[j]
p0 = page0.copy()
p1 = page1.copy()
p2 = page2.copy()
temp_label = generate_label(current_path, s, code, p0, p1, p2)
temp_feature = solve(current_path, temp_label)
current_save_path = dir_path + '/' + files[j][:-4] + '.xlsx'
rw_csv.save_data_to_excel(temp_feature, current_save_path)
|
img_gray | identifier_name |
generate_feature.py | import os
import cv2
import dlib
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy import signal
import pandas as pd
import datetime
from RWCSV import rw_csv
detector_face_cut = cv2.CascadeClassifier('F:/data/haarcascade_frontalface_default.xml')
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('F:/data/shape_predictor_68_face_landmarks.dat')
def face_cut(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector_face_cut.detectMultiScale(gray, 1.1, 5)
c = 1
y = 1.1
while len(faces) == 0 or faces[0][2] * faces[0][3] < 200 * 200:
print('未检测到人脸')
if c % 2 == 1:
x = 5
else:
x = 3
faces = detector_face_cut.detectMultiScale(gray, y, x)
c += 1
if c > 10:
y = 1.1
if c > 20:
y = 1.2
if c > 30:
y = 1.3
| if c > 40:
y = 1.4
if c > 50:
y = 1.5
print('检测到人脸')
x = faces[0][0]
y = faces[0][1]
w = faces[0][2]
h = faces[0][3]
# temp_img = img[y:y + h - 1, x:x + w - 1]
return x, y, w, h
def face_detector(frame):
img_gray = frame
img = frame
rects = detector(img_gray, 0)
face_key_point = np.empty([0, 1, 2], dtype=np.float32)
for i in range(len(rects)):
landmarks = np.matrix([[p.x, p.y] for p in predictor(img, rects[i]).parts()])
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
# print(idx,pos)
temp_point = np.empty([1, 1, 2], dtype=np.float32)
temp_point[0, 0] = pos
face_key_point = np.concatenate([face_key_point, temp_point])
return face_key_point
def get_point(face_key_point, eyebrow_index):
temp_point = []
for i in range(len(eyebrow_index)):
p = eyebrow_index[i]
temp_point.append(face_key_point[p][0])
return temp_point
def extractROI(flow, center, margin):
ROI_mod = []
ROI_angle = []
ROI_flow = []
for k in range(len(center)):
x = int(center[k][0] - margin)
y = int(center[k][1] - margin)
for i in range(margin * 2):
for j in range(margin * 2):
v = flow[x + i][y + j]
temp_m = np.sqrt(np.dot(np.transpose(v), v))
temp_a = math.atan2(v[1], v[0])
ROI_mod.append(temp_m)
ROI_angle.append(temp_a)
ROI_flow.append(v)
return ROI_flow, ROI_mod, ROI_angle
def globalMovment(base_ROI):
n = len(base_ROI)
for i in range(n):
temp_flow = base_ROI[i]
v = np.sqrt(np.dot(np.transpose(temp_flow), temp_flow))
if i == 0:
sum_flow = temp_flow
sum_mod = v
else:
sum_flow += temp_flow
sum_mod += v
one_norm = sum_flow / (math.sqrt(math.pow(sum_flow[0], 2) + math.pow(sum_flow[1], 2)))
mod_mean = sum_mod / n
global_movment = one_norm * mod_mean
return global_movment
def removeGlobal(flow, global_movment):
x = np.zeros_like(flow)
T = np.full(x.shape, global_movment)
flow -= T
return flow
def angle_domin(domin_count):
pi = np.pi
dur = pi / (domin_count / 2)
left = 0
area = []
for i in range(int(domin_count / 2)):
right = left + dur
area.append([left, right])
left += dur
left = -pi
for i in range(int(domin_count / 2)):
right = left + dur
area.append([left, right])
left += dur
return area
def getMean(ROI_flow, ROI_mod, ROI_angle):
domin_count = 6
n = len(ROI_mod)
area = angle_domin(domin_count)
max = 0
bin = 0
v_sum = None
c = 0
for i in range(len(area)):
mod_sum = 0
flow_sum = np.array([0, 0], dtype=np.float32)
count = 0
if len(area[i]) == 2:
left = area[i][0]
right = area[i][1]
for j in range(n):
if left <= ROI_angle[j] < right:
count += 1
mod_sum += abs(ROI_mod[j])
flow_sum[0] = flow_sum[0] + ROI_flow[j][0]
flow_sum[1] = flow_sum[1] + ROI_flow[j][1]
if len(area[i]) == 4:
left1 = area[i][0]
right1 = area[i][1]
left2 = area[i][2]
right2 = area[i][3]
for k in range(n):
if left1 <= ROI_angle[k] <= right1 or left2 <= ROI_angle[k] < right2:
count += 1
mod_sum += abs(ROI_mod[k])
flow_sum[0] = flow_sum[0] + ROI_flow[j][0]
flow_sum[1] = flow_sum[1] + ROI_flow[j][1]
if mod_sum > max:
max = mod_sum
bin = i + 1
v_sum = flow_sum
c = count
mod_mean = max / c
angle_mean = math.atan2(v_sum[1], v_sum[0])
return mod_mean, angle_mean, bin
def get_page(label_path):
excel = pd.ExcelFile(label_path)
pages = excel.sheet_names
page0 = np.array(pd.read_excel(label_path, pages[0]))
page1 = np.array(pd.read_excel(label_path, pages[1]))
page2 = np.array(pd.read_excel(label_path, pages[2]))
return page0, page1, page2
def generate_label(path, s, code, page0, page1, page2):
cap = cv2.VideoCapture(path)
total_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
column0 = page1[:, 0:1].reshape((-1))
s = int(s)
idx = np.argwhere(column0 == s)
first_idx = idx[0][0]
convert_s = page1[first_idx][2]
column0 = page2[:, 0:1].reshape((-1))
idx = np.argwhere(column0 == int(code[1:]))
first_idx = idx[0][0]
convert_code = page2[first_idx][1]
# print(s, convert_s, code, convert_code)
column0 = page0[:, 0:1].reshape((-1))
idx = np.argwhere(column0 == convert_s)
column1 = page0[:, 1:2].reshape((-1))
column1 = column1[idx[0][0]: idx[-1][0] + 1]
base_idx = idx[0][0]
for i in range(column1.shape[0]):
column1[i] = column1[i][0:-2]
if column1[i][-1] == '_':
column1[i] = column1[i][0:-1]
# print(column1)
idx = np.argwhere(column1 == convert_code)
express_list = []
for i in range(idx.shape[0]):
current = idx[i][0] + base_idx
start = page0[current][2]
peak = page0[current][3]
end = page0[current][4]
if end == 0:
end = peak
express_list.append([start, end])
label_list = np.zeros(total_frame)
for i in range(len(express_list)):
start = express_list[i][0]
end = express_list[i][1]
for j in range(end - start + 1):
label_list[start - 1 + j] = 1
print(express_list)
return label_list
def solve(video_path, temp_label):
print(video_path)
eyebrow_index = np.array([18, 19, 20, 23, 24, 25], dtype=np.int8)
nose_index = np.array([30], dtype=np.int8)
mouth_index = np.array([48, 51, 54, 57], dtype=np.int8)
point_index = np.concatenate([eyebrow_index, nose_index])
point_index = np.concatenate([point_index, mouth_index])
base_index = np.array([28], dtype=np.int8)
point_index = np.concatenate([point_index, base_index])
cap = cv2.VideoCapture(video_path)
total_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
pre_frame = None
frame = None
margin = 8
start = 0
feature = None
frame_count = total_frame
for i in range(frame_count):
ret, temp_frame = cap.read()
if i == start:
x, y, w, h = face_cut(temp_frame)
temp_img = temp_frame[y:y + h, x:x + w]
pre_frame = temp_img
face_key_point = face_detector(temp_img)
if i > start:
temp_img = temp_frame[y:y + h, x:x + w]
frame = temp_img
face_key_point = face_detector(temp_img)
flow = calcOpticalFlow(pre_frame, frame)
center = get_point(face_key_point, point_index)
base_center = get_point(face_key_point, base_index)
base_ROI_flow, _, _ = extractROI(flow, base_center, margin)
global_movment = globalMovment(base_ROI_flow)
flow = removeGlobal(flow, global_movment)
for j in range(len(center)):
ROI_flow, ROI_mod, ROI_angle = extractROI(flow, [center[j]], margin)
mod_mean, angle_mean, bin = getMean(ROI_flow, ROI_mod, ROI_angle)
if j == 0:
m_a = np.array([mod_mean, angle_mean], dtype=np.float32)
else:
m_a = np.concatenate([m_a, [mod_mean, angle_mean]])
label = np.array([temp_label[i]])
m_a = np.concatenate([m_a, label])
m_a = np.array([m_a])
if i == 1:
feature = m_a
if i > 1:
feature = np.concatenate([feature, m_a])
pre_frame = frame
print(feature.shape)
return feature
if __name__ == '__main__':
label_path = ''
base_path = ''
dirs = os.listdir(base_path)
page0, page1, page2 = get_page(label_path)
for i in range(len(dirs)):
dir_path = 'F:/feature/' + dirs[i]
if not os.path.exists(dir_path):
print('create ' + dir_path)
os.mkdir(dir_path)
current_path = base_path + dirs[i]
files = os.listdir(current_path)
count = 0
if i == len(dirs)-1:
for j in range(len(files)):
count += 1
print(count)
s = files[j][0:2]
code = files[j][3:7]
current_path = base_path + dirs[i] + '/' + files[j]
p0 = page0.copy()
p1 = page1.copy()
p2 = page2.copy()
temp_label = generate_label(current_path, s, code, p0, p1, p2)
temp_feature = solve(current_path, temp_label)
current_save_path = dir_path + '/' + files[j][:-4] + '.xlsx'
rw_csv.save_data_to_excel(temp_feature, current_save_path) | random_line_split |
|
generate_feature.py | import os
import cv2
import dlib
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy import signal
import pandas as pd
import datetime
from RWCSV import rw_csv
detector_face_cut = cv2.CascadeClassifier('F:/data/haarcascade_frontalface_default.xml')
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('F:/data/shape_predictor_68_face_landmarks.dat')
def face_cut(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector_face_cut.detectMultiScale(gray, 1.1, 5)
c = 1
y = 1.1
while len(faces) == 0 or faces[0][2] * faces[0][3] < 200 * 200:
print('未检测到人脸')
if c % 2 == 1:
x = 5
else:
x = 3
faces = detector_face_cut.detectMultiScale(gray, y, x)
c += 1
if c > 10:
y = 1.1
if c > 20:
y = 1.2
if c > 30:
y = 1.3
| c > 40:
y = 1.4
if c > 50:
y = 1.5
print('检测到人脸')
x = faces[0][0]
y = faces[0][1]
w = faces[0][2]
h = faces[0][3]
# temp_img = img[y:y + h - 1, x:x + w - 1]
return x, y, w, h
def face_detector(frame):
img_gray = frame
img = frame
rects = detector(img_gray, 0)
face_key_point = np.empty([0, 1, 2], dtype=np.float32)
for i in range(len(rects)):
landmarks = np.matrix([[p.x, p.y] for p in predictor(img, rects[i]).parts()])
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
# print(idx,pos)
temp_point = np.empty([1, 1, 2], dtype=np.float32)
temp_point[0, 0] = pos
face_key_point = np.concatenate([face_key_point, temp_point])
return face_key_point
def get_point(face_key_point, eyebrow_index):
temp_point = []
for i in range(len(eyebrow_index)):
p = eyebrow_index[i]
temp_point.append(face_key_point[p][0])
return temp_point
def extractROI(flow, center, margin):
ROI_mod = []
ROI_angle = []
ROI_flow = []
for k in range(len(center)):
x = int(center[k][0] - margin)
y = int(center[k][1] - margin)
for i in range(margin * 2):
for j in range(margin * 2):
v = flow[x + i][y + j]
temp_m = np.sqrt(np.dot(np.transpose(v), v))
temp_a = math.atan2(v[1], v[0])
ROI_mod.append(temp_m)
ROI_angle.append(temp_a)
ROI_flow.append(v)
return ROI_flow, ROI_mod, ROI_angle
def globalMovment(base_ROI):
n = len(base_ROI)
for i in range(n):
temp_flow = base_ROI[i]
v = np.sqrt(np.dot(np.transpose(temp_flow), temp_flow))
if i == 0:
sum_flow = temp_flow
sum_mod = v
else:
sum_flow += temp_flow
sum_mod += v
one_norm = sum_flow / (math.sqrt(math.pow(sum_flow[0], 2) + math.pow(sum_flow[1], 2)))
mod_mean = sum_mod / n
global_movment = one_norm * mod_mean
return global_movment
def removeGlobal(flow, global_movment):
x = np.zeros_like(flow)
T = np.full(x.shape, global_movment)
flow -= T
return flow
def angle_domin(domin_count):
pi = np.pi
dur = pi / (domin_count / 2)
left = 0
area = []
for i in range(int(domin_count / 2)):
right = left + dur
area.append([left, right])
left += dur
left = -pi
for i in range(int(domin_count / 2)):
right = left + dur
area.append([left, right])
left += dur
return area
def getMean(ROI_flow, ROI_mod, ROI_angle):
domin_count = 6
n = len(ROI_mod)
area = angle_domin(domin_count)
max = 0
bin = 0
v_sum = None
c = 0
for i in range(len(area)):
mod_sum = 0
flow_sum = np.array([0, 0], dtype=np.float32)
count = 0
if len(area[i]) == 2:
left = area[i][0]
right = area[i][1]
for j in range(n):
if left <= ROI_angle[j] < right:
count += 1
mod_sum += abs(ROI_mod[j])
flow_sum[0] = flow_sum[0] + ROI_flow[j][0]
flow_sum[1] = flow_sum[1] + ROI_flow[j][1]
if len(area[i]) == 4:
left1 = area[i][0]
right1 = area[i][1]
left2 = area[i][2]
right2 = area[i][3]
for k in range(n):
if left1 <= ROI_angle[k] <= right1 or left2 <= ROI_angle[k] < right2:
count += 1
mod_sum += abs(ROI_mod[k])
flow_sum[0] = flow_sum[0] + ROI_flow[j][0]
flow_sum[1] = flow_sum[1] + ROI_flow[j][1]
if mod_sum > max:
max = mod_sum
bin = i + 1
v_sum = flow_sum
c = count
mod_mean = max / c
angle_mean = math.atan2(v_sum[1], v_sum[0])
return mod_mean, angle_mean, bin
def get_page(label_path):
excel = pd.ExcelFile(label_path)
pages = excel.sheet_names
page0 = np.array(pd.read_excel(label_path, pages[0]))
page1 = np.array(pd.read_excel(label_path, pages[1]))
page2 = np.array(pd.read_excel(label_path, pages[2]))
return page0, page1, page2
def generate_label(path, s, code, page0, page1, page2):
cap = cv2.VideoCapture(path)
total_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
column0 = page1[:, 0:1].reshape((-1))
s = int(s)
idx = np.argwhere(column0 == s)
first_idx = idx[0][0]
convert_s = page1[first_idx][2]
column0 = page2[:, 0:1].reshape((-1))
idx = np.argwhere(column0 == int(code[1:]))
first_idx = idx[0][0]
convert_code = page2[first_idx][1]
# print(s, convert_s, code, convert_code)
column0 = page0[:, 0:1].reshape((-1))
idx = np.argwhere(column0 == convert_s)
column1 = page0[:, 1:2].reshape((-1))
column1 = column1[idx[0][0]: idx[-1][0] + 1]
base_idx = idx[0][0]
for i in range(column1.shape[0]):
column1[i] = column1[i][0:-2]
if column1[i][-1] == '_':
column1[i] = column1[i][0:-1]
# print(column1)
idx = np.argwhere(column1 == convert_code)
express_list = []
for i in range(idx.shape[0]):
current = idx[i][0] + base_idx
start = page0[current][2]
peak = page0[current][3]
end = page0[current][4]
if end == 0:
end = peak
express_list.append([start, end])
label_list = np.zeros(total_frame)
for i in range(len(express_list)):
start = express_list[i][0]
end = express_list[i][1]
for j in range(end - start + 1):
label_list[start - 1 + j] = 1
print(express_list)
return label_list
def solve(video_path, temp_label):
print(video_path)
eyebrow_index = np.array([18, 19, 20, 23, 24, 25], dtype=np.int8)
nose_index = np.array([30], dtype=np.int8)
mouth_index = np.array([48, 51, 54, 57], dtype=np.int8)
point_index = np.concatenate([eyebrow_index, nose_index])
point_index = np.concatenate([point_index, mouth_index])
base_index = np.array([28], dtype=np.int8)
point_index = np.concatenate([point_index, base_index])
cap = cv2.VideoCapture(video_path)
total_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
pre_frame = None
frame = None
margin = 8
start = 0
feature = None
frame_count = total_frame
for i in range(frame_count):
ret, temp_frame = cap.read()
if i == start:
x, y, w, h = face_cut(temp_frame)
temp_img = temp_frame[y:y + h, x:x + w]
pre_frame = temp_img
face_key_point = face_detector(temp_img)
if i > start:
temp_img = temp_frame[y:y + h, x:x + w]
frame = temp_img
face_key_point = face_detector(temp_img)
flow = calcOpticalFlow(pre_frame, frame)
center = get_point(face_key_point, point_index)
base_center = get_point(face_key_point, base_index)
base_ROI_flow, _, _ = extractROI(flow, base_center, margin)
global_movment = globalMovment(base_ROI_flow)
flow = removeGlobal(flow, global_movment)
for j in range(len(center)):
ROI_flow, ROI_mod, ROI_angle = extractROI(flow, [center[j]], margin)
mod_mean, angle_mean, bin = getMean(ROI_flow, ROI_mod, ROI_angle)
if j == 0:
m_a = np.array([mod_mean, angle_mean], dtype=np.float32)
else:
m_a = np.concatenate([m_a, [mod_mean, angle_mean]])
label = np.array([temp_label[i]])
m_a = np.concatenate([m_a, label])
m_a = np.array([m_a])
if i == 1:
feature = m_a
if i > 1:
feature = np.concatenate([feature, m_a])
pre_frame = frame
print(feature.shape)
return feature
if __name__ == '__main__':
label_path = ''
base_path = ''
dirs = os.listdir(base_path)
page0, page1, page2 = get_page(label_path)
for i in range(len(dirs)):
dir_path = 'F:/feature/' + dirs[i]
if not os.path.exists(dir_path):
print('create ' + dir_path)
os.mkdir(dir_path)
current_path = base_path + dirs[i]
files = os.listdir(current_path)
count = 0
if i == len(dirs)-1:
for j in range(len(files)):
count += 1
print(count)
s = files[j][0:2]
code = files[j][3:7]
current_path = base_path + dirs[i] + '/' + files[j]
p0 = page0.copy()
p1 = page1.copy()
p2 = page2.copy()
temp_label = generate_label(current_path, s, code, p0, p1, p2)
temp_feature = solve(current_path, temp_label)
current_save_path = dir_path + '/' + files[j][:-4] + '.xlsx'
rw_csv.save_data_to_excel(temp_feature, current_save_path)
| if | conditional_block |
generate_feature.py | import os
import cv2
import dlib
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy import signal
import pandas as pd
import datetime
from RWCSV import rw_csv
detector_face_cut = cv2.CascadeClassifier('F:/data/haarcascade_frontalface_default.xml')
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('F:/data/shape_predictor_68_face_landmarks.dat')
def face_cut(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector_face_cut.detectMultiScale(gray, 1.1, 5)
c = 1
y = 1.1
while len(faces) == 0 or faces[0][2] * faces[0][3] < 200 * 200:
print('未检测到人脸')
if c % 2 == 1:
x = 5
else:
x = 3
faces = detector_face_cut.detectMultiScale(gray, y, x)
c += 1
if c > 10:
y = 1.1
if c > 20:
y = 1.2
if c > 30:
y = 1.3
if c > 40:
y = 1.4
if c > 50:
y = 1.5
print('检测到人脸')
x = faces[0][0]
y = faces[0][1]
w = faces[0][2]
h = faces[0][3]
# temp_img = img[y:y + h - 1, x:x + w - 1]
return x, y, w, h
def face_detector(frame):
img_gray = frame
img = frame
rects = detector(img_gray, 0)
face_key_point = np.empty([0, 1, 2], dtype=np.float32)
for i in range(len(rects)):
landmarks = np.matrix([[p.x, p.y] for p in predictor(img, rects[i]).parts()])
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
# print(idx,pos)
temp_point = np.empty([1, 1, 2], dtype=np.float32)
temp_point[0, 0] = pos
face_key_point = np.concatenate([face_key_point, temp_point])
return face_key_point
def get_point(face_key_point, eyebrow_index):
temp_point = []
for i in range(len(eyebrow_index)):
p = eyebrow_index[i]
temp_point.append(face_key_point[p][0])
return temp_point
def extractROI(flow, center, margin):
ROI_mod = []
ROI_angle = []
ROI_flow = []
for k in range(len(center)):
x = int(center[k][0] - margin)
y = int(center[k][1] - margin)
for i in range(margin * 2):
for j in range(margin * 2):
v = flow[x + i][y + j]
temp_m = np.sqrt(np.dot(np.transpose(v), v))
temp_a = math.atan2(v[1], v[0])
ROI_mod.append(temp_m)
ROI_angle.append(temp_a)
ROI_flow.append(v)
return ROI_flow, ROI_mod, ROI_angle
def globalMovment(base_ROI):
n = len(base_ROI)
for i in range(n):
temp_flow = base_ROI[i]
v = np.sqrt(np.dot(np.transpose(temp_flow), temp_flow))
if i == 0:
sum_flow = temp_flow
sum_mod = v
else:
sum_flow += temp_flow
sum_mod += v
one_norm = sum_flow / (math.sqrt(math.pow(sum_flow[0], 2) + math.pow(sum_flow[1], 2)))
mod_mean = sum_mod / n
global_movment = one_norm * mod_mean
return global_movment
def removeGlobal(flow, global_movment):
x = np.zeros_like(flow)
T = np.full(x.shape, global_movment)
flow -= T
return flow
def angle_domin(domin_count):
pi = np.pi
dur = pi / (domin_count / 2)
left = 0
area = []
for i in range(int(domin_count / 2)):
right = left + dur
area.append([left, right])
left += dur
left = -pi
for i in range(int(domin_count / 2)):
right = left + dur
area.append([left, right])
left += dur
return area
def getMean(ROI_flow, ROI_mod, ROI_angle):
domin_count = 6
n = len(ROI_mod)
area = angle_domin(domin_count)
max = 0
bin = 0
v_sum = None
c = 0
for i in range(len(area)):
mod_sum = 0
flow_sum = np.array([0, 0], dtype=np.float32)
count = 0
if len(area[i]) == 2:
left = area[i][0]
right = area[i][1]
for j in range(n):
if left <= ROI_angle[j] < right:
count += 1
mod_sum += abs(ROI_mod[j])
flow_sum[0] = flow_sum[0] + ROI_flow[j][0]
flow_sum[1] = flow_sum[1] + ROI_flow[j][1]
if len(area[i]) == 4:
left1 = area[i][0]
right1 = area[i][1]
left2 = area[i][2]
right2 = area[i][3]
for k in range(n):
if left1 <= ROI_angle[k] <= right1 or left2 <= ROI_angle[k] < right2:
count += 1
mod_sum += abs(ROI_mod[k])
flow_sum[0] = flow_sum[0] + ROI_flow[j][0]
flow_sum[1] = flow_sum[1] + ROI_flow[j][1]
if mod_sum > max:
max = mod_sum
bin = i + 1
v_sum = flow_sum
c = count
mod_mean = max / c
angle_mean = math.atan2(v_sum[1], v_sum[0])
return mod_mean, angle_mean, bin
def get_page(label_path):
excel = pd.ExcelFile(label_path)
pages = excel.sheet_names
page0 = np.array(pd.read_excel(label_path, pages[0]))
page1 = np.array(pd.read_excel(label_path, pages[1]))
page2 = np.array(pd.read_excel(label_path, pages[2]))
return page0, page1, page2
def generate_label(path, s, code, page0, page1, page2):
cap = cv2.VideoCapture | path, temp_label):
print(video_path)
eyebrow_index = np.array([18, 19, 20, 23, 24, 25], dtype=np.int8)
nose_index = np.array([30], dtype=np.int8)
mouth_index = np.array([48, 51, 54, 57], dtype=np.int8)
point_index = np.concatenate([eyebrow_index, nose_index])
point_index = np.concatenate([point_index, mouth_index])
base_index = np.array([28], dtype=np.int8)
point_index = np.concatenate([point_index, base_index])
cap = cv2.VideoCapture(video_path)
total_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
pre_frame = None
frame = None
margin = 8
start = 0
feature = None
frame_count = total_frame
for i in range(frame_count):
ret, temp_frame = cap.read()
if i == start:
x, y, w, h = face_cut(temp_frame)
temp_img = temp_frame[y:y + h, x:x + w]
pre_frame = temp_img
face_key_point = face_detector(temp_img)
if i > start:
temp_img = temp_frame[y:y + h, x:x + w]
frame = temp_img
face_key_point = face_detector(temp_img)
flow = calcOpticalFlow(pre_frame, frame)
center = get_point(face_key_point, point_index)
base_center = get_point(face_key_point, base_index)
base_ROI_flow, _, _ = extractROI(flow, base_center, margin)
global_movment = globalMovment(base_ROI_flow)
flow = removeGlobal(flow, global_movment)
for j in range(len(center)):
ROI_flow, ROI_mod, ROI_angle = extractROI(flow, [center[j]], margin)
mod_mean, angle_mean, bin = getMean(ROI_flow, ROI_mod, ROI_angle)
if j == 0:
m_a = np.array([mod_mean, angle_mean], dtype=np.float32)
else:
m_a = np.concatenate([m_a, [mod_mean, angle_mean]])
label = np.array([temp_label[i]])
m_a = np.concatenate([m_a, label])
m_a = np.array([m_a])
if i == 1:
feature = m_a
if i > 1:
feature = np.concatenate([feature, m_a])
pre_frame = frame
print(feature.shape)
return feature
if __name__ == '__main__':
label_path = ''
base_path = ''
dirs = os.listdir(base_path)
page0, page1, page2 = get_page(label_path)
for i in range(len(dirs)):
dir_path = 'F:/feature/' + dirs[i]
if not os.path.exists(dir_path):
print('create ' + dir_path)
os.mkdir(dir_path)
current_path = base_path + dirs[i]
files = os.listdir(current_path)
count = 0
if i == len(dirs)-1:
for j in range(len(files)):
count += 1
print(count)
s = files[j][0:2]
code = files[j][3:7]
current_path = base_path + dirs[i] + '/' + files[j]
p0 = page0.copy()
p1 = page1.copy()
p2 = page2.copy()
temp_label = generate_label(current_path, s, code, p0, p1, p2)
temp_feature = solve(current_path, temp_label)
current_save_path = dir_path + '/' + files[j][:-4] + '.xlsx'
rw_csv.save_data_to_excel(temp_feature, current_save_path)
| (path)
total_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
column0 = page1[:, 0:1].reshape((-1))
s = int(s)
idx = np.argwhere(column0 == s)
first_idx = idx[0][0]
convert_s = page1[first_idx][2]
column0 = page2[:, 0:1].reshape((-1))
idx = np.argwhere(column0 == int(code[1:]))
first_idx = idx[0][0]
convert_code = page2[first_idx][1]
# print(s, convert_s, code, convert_code)
column0 = page0[:, 0:1].reshape((-1))
idx = np.argwhere(column0 == convert_s)
column1 = page0[:, 1:2].reshape((-1))
column1 = column1[idx[0][0]: idx[-1][0] + 1]
base_idx = idx[0][0]
for i in range(column1.shape[0]):
column1[i] = column1[i][0:-2]
if column1[i][-1] == '_':
column1[i] = column1[i][0:-1]
# print(column1)
idx = np.argwhere(column1 == convert_code)
express_list = []
for i in range(idx.shape[0]):
current = idx[i][0] + base_idx
start = page0[current][2]
peak = page0[current][3]
end = page0[current][4]
if end == 0:
end = peak
express_list.append([start, end])
label_list = np.zeros(total_frame)
for i in range(len(express_list)):
start = express_list[i][0]
end = express_list[i][1]
for j in range(end - start + 1):
label_list[start - 1 + j] = 1
print(express_list)
return label_list
def solve(video_ | identifier_body |
server.go | package shardmaster
import "../raft"
import "../labrpc"
import "sync"
import "../labgob"
import "log"
//import "fmt"
const Debug = 0
func DPrintf(format string, a ...interface{}) (n int, err error) {
if Debug > 0 {
log.Printf(format, a...)
}
return
}
func TPrintf(format string, a ...interface{}) (n int, err error) {
if Debug ==-1 {
log.Printf(format, a...)
}
return
}
type ShardMaster struct {
mu sync.Mutex
me int
rf *raft.Raft
applyCh chan raft.ApplyMsg
// Your data here.
duplicate map[int64]map[string]int64 // [clientId]([key]requestId)
requestHandlers map[int]chan raft.ApplyMsg
configs []Config // indexed by config num
commitIndex int
}
func (sm *ShardMaster) Lock() {
sm.mu.Lock()
}
func (sm *ShardMaster) Unlock() {
sm.mu.Unlock()
}
func (sm *ShardMaster) registerIndexHandler(index int) chan raft.ApplyMsg {
sm.Lock()
defer sm.Unlock()
awaitChan := make(chan raft.ApplyMsg, 1)
sm.requestHandlers[index] = awaitChan
return awaitChan
}
func (sm *ShardMaster) GetDuplicate(clientId int64, method string) (int64, bool) {
sm.Lock()
defer sm.Unlock()
clientRequest, haveClient := sm.duplicate[clientId]
if !haveClient {
return 0,false
}
val, ok := clientRequest[method]
return val, ok
}
func (sm *ShardMaster) SetDuplicateNolock(clientId int64, method string, requestId int64) {
_, haveClient := sm.duplicate[clientId]
if !haveClient {
sm.duplicate[clientId] = make(map[string]int64)
}
sm.duplicate[clientId][method] = requestId
}
type Op struct {
// Your data here.
Method string
Config []Config
RequestId int64
ClientId int64
}
func Clone(a, b *Config) {
b.Num = a.Num
for i,gid := range a.Shards {
b.Shards[i] = gid
}
for gid := range a.Groups {
// 假定了一个group中的机器不变
b.Groups[gid] = a.Groups[gid]
}
}
func (sm *ShardMaster) AppendConfigAfterJoinNolock(args *JoinArgs) []Config {
newConfig := Config{}
newConfig.Groups = map[int][]string{}
lastConfig := sm.configs[len(sm.configs) - 1]
Clone(&lastConfig, &newConfig)
newConfig.Num = len(sm.configs)
for gid, names := range args.Servers {
newConfig.Groups[gid] = names
}
DPrintf("NewConfigAfterJoin, lastConfig=%+v, newConfig=%+v, args=%+v",
lastConfig, newConfig, args)
sm.RebalanceNolock(&newConfig)
sm.configs = append(sm.configs, newConfig)
return sm.configs
}
func (sm *ShardMaster) AppendConfigAfterLeaveNolock(args *LeaveArgs) []Config {
newConfig := Config{}
newConfig.Groups = map[int][]string{}
lastConfig := sm.configs[len(sm.configs) - 1]
Clone(&lastConfig, &newConfig)
newConfig.Num = len(sm.configs)
for _,gid := range args.GIDs {
delete(newConfig.Groups,gid)
}
DPrintf("NewConfigAfterLeave, lastConfig=%+v, newConfig=%+v, args=%+v",
lastConfig, newConfig, args)
sm.RebalanceNolock(&newConfig)
sm.configs = append(sm.configs, newConfig)
return sm.configs
}
func (sm *ShardMaster) AppendConfigAfterMoveNolock(args *MoveArgs) []Config {
newConfig := Config{}
newConfig.Groups = map[int][]string{}
lastConfig := sm.configs[len(sm.configs) - 1]
Clone(&lastConfig, &newConfig)
newConfig.Num = len(sm.configs)
newConfig.Shards[args.Shard] = args.GID
DPrintf("NewConfigAfterMove, lastConfig=%+v, newConfig=%+v, args=%+v",
lastConfig, newConfig, args)
sm.configs = append(sm.configs, newConfig)
return sm.configs
}
func (sm *ShardMaster) RebalanceNolock(config *Config) {
// balance shards to latest groups
numOfGroup := len(config.Groups)
if numOfGroup > 0 {
//numOfNodesPerGroup := NShards / numOfGroup
//log.Println("num of shards per group is", numOfNodesPerGroup)
leftOver := NShards % numOfGroup
for i:=0; i< NShards - leftOver; {
for gid := range config.Groups {
//log.Println("shard is", i, "group id is", gid)
config.Shards[i] = gid
i++
}
}
groupList := make([]int, 0)
for gid := range config.Groups {
groupList = append(groupList, gid)
}
// add left over shards
for j:=NShards-leftOver; j<NShards && len(groupList) > 0; j++ {
nextGroup := (j % numOfGroup)
config.Shards[j] = groupList[nextGroup]
}
DPrintf("RebalanceNolock result %+v\n", config.Shards)
}
}
func (sm *ShardMaster) Join(args *JoinArgs, reply *JoinReply) {
// Your code here.
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Join"
dup, ok := sm.GetDuplicate(args.ClientId, methodName)
if ok && (dup == args.RequestId) {
reply.Err = OK
return
}
sm.Lock()
newConfig := sm.AppendConfigAfterJoinNolock(args)
ops := Op {
Method: methodName,
Config: newConfig,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Err = OK
DPrintf("Join Success: args:%v\n", args)
return
}
}
func (sm *ShardMaster) Leave(args *LeaveArgs, reply *LeaveReply) {
// Your code here.
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Leave"
dup, ok := sm.GetDuplicate(args.ClientId, methodName)
if ok && (dup == args.RequestId) {
reply.Err = OK
return
}
sm.Lock()
newConfig := sm.AppendConfigAfterLeaveNolock(args)
ops := Op {
Method: methodName,
Config: newConfig,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Err = OK
DPrintf("Leave Success: args:%v\n", args)
return
}
}
func (sm *ShardMaster) Move(args *MoveArgs, reply *MoveReply) {
// Your code here.
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Move"
dup, ok := sm.GetDuplicate(args.ClientId, methodName)
if ok && (dup == args.RequestId) {
reply.Err = OK
return
}
sm.Lock()
newConfig := sm.AppendConfigAfterMoveNolock(args)
ops := Op {
Method: methodName,
Config: newConfig,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Err = OK
DPrintf("Move Success: args:%v\n", args)
return
}
}
func (sm *ShardMaster) getConfigNolock(index int) Config {
var config Config
config.Groups = map[int][]string{}
if (index < 0) || (index >sm.commitIndex) {
Clone(&sm.configs[sm.commitIndex], &config)
} else {
Clone(&sm.configs[index], &config)
}
return config
}
func (sm *ShardMaster) Query(args *QueryArgs, reply *QueryReply) {
// Your code here.
DPrintf("Query request: args:%v\n", args)
defer DPrintf("Query response: reply:%v\n", reply)
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Query"
sm.Lock()
theCareConfig := sm.getConfigNolock(args.Num)
ops := Op {
Method: methodName,
Config: nil,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Config = theCareConfig
reply.Err = OK
return
}
}
//
// the tester calls Kill() when a ShardMaster instance won't
// be needed again. you are not required to do anything
// in Kill(), but it might be convenient to (for example)
// turn off debug output from this instance.
//
func (sm *ShardMaster) Kill() {
sm.rf.Kill()
// Your code here, if desired.
}
// needed by shardkv tester
func (sm *ShardMaster) Raft() *raft.Raft {
return sm.rf
}
func (sm *ShardMaster) await(index int, term int, op Op) (success bool) {
awaitChan := sm.registerIndexHandler(index)
for {
select {
case message := <-awaitChan:
if sm.RaftBecomeFollower(&message) {
return false
}
if (message.CommandValid == true) &&
(index == message.CommandIndex) {
return (term == |
}
func Max(x, y int) int {
if x > y {
return x
}
return y
}
func (sm *ShardMaster) OnApplyEntry(m *raft.ApplyMsg) {
ops := m.Command.(Op)
dup, ok := sm.GetDuplicate(ops.ClientId, ops.Method)
sm.Lock()
defer sm.Unlock()
if !ok || (dup != ops.RequestId) {
switch ops.Method {
case "Leave":
fallthrough
case "Join":
fallthrough
case "Move":
if len(ops.Config) > len(sm.configs) {
// follower
sm.configs = ops.Config
}
sm.commitIndex = Max(sm.commitIndex, len(ops.Config) - 1)
sm.SetDuplicateNolock(ops.ClientId, ops.Method, ops.RequestId)
case "Query":
// nothing
}
}
ch, ok := sm.requestHandlers[m.CommandIndex]
if ok {
delete(sm.requestHandlers, m.CommandIndex)
ch <- *m
}
}
func (sm *ShardMaster) RaftBecomeFollower(m *raft.ApplyMsg) bool {
return (m.CommandValid == false) &&
(m.Type == raft.MsgTypeRole) &&
(m.Role == raft.RoleFollower)
}
func (sm *ShardMaster) OnRoleNotify(m *raft.ApplyMsg) {
sm.Lock()
defer sm.Unlock()
if sm.RaftBecomeFollower(m) {
for index, ch := range sm.requestHandlers {
delete(sm.requestHandlers, index)
ch <- *m
}
}
}
func (sm *ShardMaster) receivingApplyMsg() {
for {
select {
case m := <-sm.applyCh:
if m.CommandValid {
DPrintf("receivingApplyMsg receive entry message. %+v.", m)
sm.OnApplyEntry(&m)
DPrintf("new configs after apply. %+v.", sm.configs)
} else if(m.Type == raft.MsgTypeKill) {
DPrintf("receivingApplyMsg receive kill message. %+v.", m)
return
} else if(m.Type == raft.MsgTypeRole) {
//DPrintf("receivingApplyMsg receive role message. %+v.", m)
sm.OnRoleNotify(&m)
}
}
}
}
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func StartServer(servers []*labrpc.ClientEnd, me int, persister *raft.Persister) *ShardMaster {
sm := new(ShardMaster)
sm.me = me
sm.configs = make([]Config, 1)
sm.configs[0].Groups = map[int][]string{}
labgob.Register(Op{})
sm.applyCh = make(chan raft.ApplyMsg)
sm.rf = raft.Make(servers, me, persister, sm.applyCh)
// Your code here.
sm.duplicate = make(map[int64]map[string]int64)
sm.requestHandlers = make(map[int]chan raft.ApplyMsg)
sm.commitIndex = 0
go sm.receivingApplyMsg()
return sm
}
| message.CommandTerm)
}
// continue
}
} | conditional_block |
server.go | package shardmaster
import "../raft"
import "../labrpc"
import "sync"
import "../labgob"
import "log"
//import "fmt"
const Debug = 0
func DPrintf(format string, a ...interface{}) (n int, err error) {
if Debug > 0 {
log.Printf(format, a...)
}
return
}
func TPrintf(format string, a ...interface{}) (n int, err error) {
if Debug ==-1 {
log.Printf(format, a...)
}
return
}
type ShardMaster struct {
mu sync.Mutex
me int
rf *raft.Raft
applyCh chan raft.ApplyMsg
// Your data here.
duplicate map[int64]map[string]int64 // [clientId]([key]requestId)
requestHandlers map[int]chan raft.ApplyMsg
configs []Config // indexed by config num
commitIndex int
}
func (sm *ShardMaster) Lock() {
sm.mu.Lock()
}
func (sm *ShardMaster) Unlock() {
sm.mu.Unlock()
}
func (sm *ShardMaster) registerIndexHandler(index int) chan raft.ApplyMsg {
sm.Lock()
defer sm.Unlock()
awaitChan := make(chan raft.ApplyMsg, 1)
sm.requestHandlers[index] = awaitChan
return awaitChan
}
func (sm *ShardMaster) GetDuplicate(clientId int64, method string) (int64, bool) {
sm.Lock()
defer sm.Unlock()
clientRequest, haveClient := sm.duplicate[clientId]
if !haveClient {
return 0,false
}
val, ok := clientRequest[method]
return val, ok
}
func (sm *ShardMaster) SetDuplicateNolock(clientId int64, method string, requestId int64) {
_, haveClient := sm.duplicate[clientId]
if !haveClient {
sm.duplicate[clientId] = make(map[string]int64)
}
sm.duplicate[clientId][method] = requestId
}
type Op struct {
// Your data here.
Method string
Config []Config
RequestId int64
ClientId int64
}
func Clone(a, b *Config) {
b.Num = a.Num
for i,gid := range a.Shards {
b.Shards[i] = gid
}
for gid := range a.Groups {
// 假定了一个group中的机器不变
b.Groups[gid] = a.Groups[gid]
}
}
func (sm *ShardMaster) AppendConfigAfterJoinNolock(args *JoinArgs) []Config {
newConfig := Config{}
newConfig.Groups = map[int][]string{}
lastConfig := sm.configs[len(sm.configs) - 1]
Clone(&lastConfig, &newConfig)
newConfig.Num = len(sm.configs)
for gid, names := range args.Servers {
newConfig.Groups[gid] = names
}
DPrintf("NewConfigAfterJoin, lastConfig=%+v, newConfig=%+v, args=%+v",
lastConfig, newConfig, args)
sm.RebalanceNolock(&newConfig)
sm.configs = append(sm.configs, newConfig)
return sm.configs
}
func (sm *ShardMaster) AppendConfigAfterLeaveNolock(args *LeaveArgs) []Config {
newConfig := Config{}
newConfig.Groups = map[int][]string{}
lastConfig := sm.configs[len(sm.configs) - 1]
Clone(&lastConfig, &newConfig)
newConfig.Num = len(sm.configs)
for _,gid := range args.GIDs {
delete(newConfig.Groups,gid)
}
DPrintf("NewConfigAfterLeave, lastConfig=%+v, newConfig=%+v, args=%+v",
lastConfig, newConfig, args)
sm.RebalanceNolock(&newConfig)
sm.configs = append(sm.configs, newConfig)
return sm.configs
}
func (sm *ShardMaster) AppendConfigAfterMoveNolock(args *MoveArgs) []Config {
newConfig := Config{}
newConfig.Groups = map[int][]string{}
lastConfig := sm.configs[len(sm.configs) - 1]
Clone(&lastConfig, &newConfig)
newConfig.Num = len(sm.configs)
newConfig.Shards[args.Shard] = args.GID
DPrintf("NewConfigAfterMove, lastConfig=%+v, newConfig=%+v, args=%+v",
lastConfig, newConfig, args)
sm.configs = append(sm.configs, newConfig)
return sm.configs
}
func (sm *ShardMaster) RebalanceNolock(config *Config) {
// balance shards to latest groups
numOfGroup := len(config.Groups)
if numOfGroup > 0 {
//numOfNodesPerGroup := NShards / numOfGroup
//log.Println("num of shards per group is", numOfNodesPerGroup)
leftOver := NShards % numOfGroup
for i:=0; i< NShards - leftOver; {
for gid := range config.Groups {
//log.Println("shard is", i, "group id is", gid)
config.Shards[i] = gid
i++
}
}
groupList := make([]int, 0)
for gid := range config.Groups {
groupList = append(groupList, gid)
}
// add left over shards
for j:=NShards-leftOver; j<NShards && len(groupList) > 0; j++ {
nextGroup := (j % numOfGroup)
config.Shards[j] = groupList[nextGroup]
}
DPrintf("RebalanceNolock result %+v\n", config.Shards)
}
}
func (sm *ShardMaster) Join(args *JoinArgs, reply *JoinReply) {
// Your code here.
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Join"
dup, ok := sm.GetDuplicate(args.ClientId, methodName)
if ok && (dup == args.RequestId) {
reply.Err = OK
return
}
sm.Lock()
newConfig := sm.AppendConfigAfterJoinNolock(args)
ops := Op {
Method: methodName,
Config: newConfig,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Err = OK
DPrintf("Join Success: args:%v\n", args)
return
}
}
func (sm *ShardMaster) Leave(args *LeaveArgs, reply *LeaveReply) {
// Your code here.
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Leave"
dup, ok := sm.GetDuplicate(args.ClientId, methodName)
if ok && (dup == args.RequestId) {
reply.Err = OK
return
}
sm.Lock()
newConfig := sm.AppendConfigAfterLeaveNolock(args)
ops := Op {
Method: methodName,
Config: newConfig,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Err = OK
DPrintf("Leave Success: args:%v\n", args)
return
}
}
func (sm *ShardMaster) Move(args *MoveArgs, reply *MoveReply) {
// Your code here.
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Move"
dup, ok := sm.GetDuplicate(args.ClientId, methodName)
if ok && (dup == args.RequestId) {
reply.Err = OK
return
}
sm.Lock()
newConfig := sm.AppendConfigAfterMoveNolock(args)
ops := Op {
Method: methodName,
Config: newConfig,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Err = OK
DPrintf("Move Success: args:%v\n", args)
return
}
}
func (sm *ShardMaster) getConfigNolock(index int) Config {
var config Config
config.Groups = map[int][]string{}
if (index < 0) || (index >sm.commitIndex) {
Clone(&sm.configs[sm.commitIndex], &config)
} else {
Clone(&sm.configs[index], &config)
}
return config
}
func (sm *ShardMaster) Query(args *QueryArgs, reply *QueryReply) {
// Your code here.
DPrintf("Query request: args:%v\n", args)
defer DPrintf("Query response: reply:%v\n", reply)
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Query"
sm.Lock()
theCareConfig := sm.getConfigNolock(args.Num)
ops := Op {
Method: methodName,
Config: nil,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Config = theCareConfig
reply.Err = OK
return
}
}
//
// the tester calls Kill() when a ShardMaster instance won't
// be needed again. you are not required to do anything
// in Kill(), but it might be convenient to (for example)
// turn off debug output from this instance.
//
func (sm *ShardMaster) Kill() {
sm.rf.Kill()
// Your code here, if desired.
}
// needed by shardkv tester
func (sm *ShardMaster) Raft() *raft.Raft {
return sm.rf
}
func (sm *ShardMaster) await(index int, term int, op Op) (success bool) {
awaitChan := sm.registerIndexHandler(index)
for {
select {
case message := <-awaitChan:
if sm.RaftBecomeFollower(&message) {
return false
}
if (message.CommandValid == true) &&
(index == message.CommandIndex) {
return (term == message.CommandTerm)
}
// continue
}
}
}
func Max(x, y int) int {
if x > y {
return | r) OnApplyEntry(m *raft.ApplyMsg) {
ops := m.Command.(Op)
dup, ok := sm.GetDuplicate(ops.ClientId, ops.Method)
sm.Lock()
defer sm.Unlock()
if !ok || (dup != ops.RequestId) {
switch ops.Method {
case "Leave":
fallthrough
case "Join":
fallthrough
case "Move":
if len(ops.Config) > len(sm.configs) {
// follower
sm.configs = ops.Config
}
sm.commitIndex = Max(sm.commitIndex, len(ops.Config) - 1)
sm.SetDuplicateNolock(ops.ClientId, ops.Method, ops.RequestId)
case "Query":
// nothing
}
}
ch, ok := sm.requestHandlers[m.CommandIndex]
if ok {
delete(sm.requestHandlers, m.CommandIndex)
ch <- *m
}
}
func (sm *ShardMaster) RaftBecomeFollower(m *raft.ApplyMsg) bool {
return (m.CommandValid == false) &&
(m.Type == raft.MsgTypeRole) &&
(m.Role == raft.RoleFollower)
}
func (sm *ShardMaster) OnRoleNotify(m *raft.ApplyMsg) {
sm.Lock()
defer sm.Unlock()
if sm.RaftBecomeFollower(m) {
for index, ch := range sm.requestHandlers {
delete(sm.requestHandlers, index)
ch <- *m
}
}
}
func (sm *ShardMaster) receivingApplyMsg() {
for {
select {
case m := <-sm.applyCh:
if m.CommandValid {
DPrintf("receivingApplyMsg receive entry message. %+v.", m)
sm.OnApplyEntry(&m)
DPrintf("new configs after apply. %+v.", sm.configs)
} else if(m.Type == raft.MsgTypeKill) {
DPrintf("receivingApplyMsg receive kill message. %+v.", m)
return
} else if(m.Type == raft.MsgTypeRole) {
//DPrintf("receivingApplyMsg receive role message. %+v.", m)
sm.OnRoleNotify(&m)
}
}
}
}
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func StartServer(servers []*labrpc.ClientEnd, me int, persister *raft.Persister) *ShardMaster {
sm := new(ShardMaster)
sm.me = me
sm.configs = make([]Config, 1)
sm.configs[0].Groups = map[int][]string{}
labgob.Register(Op{})
sm.applyCh = make(chan raft.ApplyMsg)
sm.rf = raft.Make(servers, me, persister, sm.applyCh)
// Your code here.
sm.duplicate = make(map[int64]map[string]int64)
sm.requestHandlers = make(map[int]chan raft.ApplyMsg)
sm.commitIndex = 0
go sm.receivingApplyMsg()
return sm
}
| x
}
return y
}
func (sm *ShardMaste | identifier_body |
server.go | package shardmaster
import "../raft"
import "../labrpc"
import "sync"
import "../labgob"
import "log"
//import "fmt"
const Debug = 0
func DPrintf(format string, a ...interface{}) (n int, err error) {
if Debug > 0 {
log.Printf(format, a...)
}
return
}
func TPrintf(format string, a ...interface{}) (n int, err error) {
if Debug ==-1 {
log.Printf(format, a...)
}
return
}
type ShardMaster struct {
mu sync.Mutex
me int
rf *raft.Raft
applyCh chan raft.ApplyMsg
// Your data here.
duplicate map[int64]map[string]int64 // [clientId]([key]requestId)
requestHandlers map[int]chan raft.ApplyMsg
configs []Config // indexed by config num
commitIndex int
}
func (sm *ShardMaster) Lock() {
sm.mu.Lock()
}
func (sm *ShardMaster) Unlock() {
sm.mu.Unlock()
}
func (sm *ShardMaster) registerIndexHandler(index int) chan raft.ApplyMsg {
sm.Lock()
defer sm.Unlock()
awaitChan := make(chan raft.ApplyMsg, 1)
sm.requestHandlers[index] = awaitChan
return awaitChan
}
func (sm *ShardMaster) GetDuplicate(clientId int64, method string) (int64, bool) {
sm.Lock()
defer sm.Unlock()
clientRequest, haveClient := sm.duplicate[clientId]
if !haveClient {
return 0,false
}
val, ok := clientRequest[method]
return val, ok
}
func (sm *ShardMaster) SetDuplicateNolock(clientId int64, method string, requestId int64) {
_, haveClient := sm.duplicate[clientId]
if !haveClient {
sm.duplicate[clientId] = make(map[string]int64)
}
sm.duplicate[clientId][method] = requestId
}
type Op struct {
// Your data here.
Method string
Config []Config
RequestId int64
ClientId int64
}
func Clone(a, b *Config) {
b.Num = a.Num
for i,gid := range a.Shards {
b.Shards[i] = gid
}
for gid := range a.Groups {
// 假定了一个group中的机器不变
b.Groups[gid] = a.Groups[gid]
}
}
func (sm *ShardMaster) AppendConfigAfterJoinNolock(args *JoinArgs) []Config {
newConfig := Config{}
newConfig.Groups = map[int][]string{}
lastConfig := sm.configs[len(sm.configs) - 1]
Clone(&lastConfig, &newConfig)
newConfig.Num = len(sm.configs)
for gid, names := range args.Servers {
newConfig.Groups[gid] = names
}
DPrintf("NewConfigAfterJoin, lastConfig=%+v, newConfig=%+v, args=%+v",
lastConfig, newConfig, args)
sm.RebalanceNolock(&newConfig)
sm.configs = append(sm.configs, newConfig)
return sm.configs
}
func (sm *ShardMaster) AppendConfigAfterLeaveNolock(args *LeaveArgs) []Config {
newConfig := Config{}
newConfig.Groups = map[int][]string{}
lastConfig := sm.configs[len(sm.configs) - 1]
Clone(&lastConfig, &newConfig)
newConfig.Num = len(sm.configs)
for _,gid := range args.GIDs {
delete(newConfig.Groups,gid)
}
DPrintf("NewConfigAfterLeave, lastConfig=%+v, newConfig=%+v, args=%+v",
lastConfig, newConfig, args)
sm.RebalanceNolock(&newConfig)
sm.configs = append(sm.configs, newConfig)
return sm.configs
}
func (sm *ShardMaster) AppendConfigAfterMoveNolock(args *MoveArgs) []Config {
newConfig := Config{}
newConfig.Groups = map[int][]string{}
lastConfig := sm.configs[len(sm.configs) - 1]
Clone(&lastConfig, &newConfig)
newConfig.Num = len(sm.configs)
newConfig.Shards[args.Shard] = args.GID
DPrintf("NewConfigAfterMove, lastConfig=%+v, newConfig=%+v, args=%+v",
lastConfig, newConfig, args)
sm.configs = append(sm.configs, newConfig)
return sm.configs
}
func (sm *ShardMaster) RebalanceNolock(config *Config) {
// balance shards to latest groups
numOfGroup := len(config.Groups)
if numOfGroup > 0 {
//numOfNodesPerGroup := NShards / numOfGroup
//log.Println("num of shards per group is", numOfNodesPerGroup)
leftOver := NShards % numOfGroup
for i:=0; i< NShards - leftOver; {
for gid := range config.Groups {
//log.Println("shard is", i, "group id is", gid)
config.Shards[i] = gid
i++
}
}
groupList := make([]int, 0)
for gid := range config.Groups {
groupList = append(groupList, gid)
}
// add left over shards
for j:=NShards-leftOver; j<NShards && len(groupList) > 0; j++ {
nextGroup := (j % numOfGroup)
config.Shards[j] = groupList[nextGroup]
}
DPrintf("RebalanceNolock result %+v\n", config.Shards)
}
}
func (sm *ShardMaster) Join(args *JoinArgs, reply *JoinReply) {
// Your code here.
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Join"
dup, ok := sm.GetDuplicate(args.ClientId, methodName)
if ok && (dup == args.RequestId) {
reply.Err = OK
return
}
sm.Lock()
newConfig := sm.AppendConfigAfterJoinNolock(args)
ops := Op {
Method: methodName,
Config: newConfig,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Err = OK
DPrintf("Join Success: args:%v\n", args)
return
}
}
func (sm *ShardMaster) Leave(args *LeaveArgs, reply *LeaveReply) {
// Your code here.
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Leave"
dup, ok := sm.GetDuplicate(args.ClientId, methodName)
if ok && (dup == args.RequestId) {
reply.Err = OK
return
}
sm.Lock()
newConfig := sm.AppendConfigAfterLeaveNolock(args)
ops := Op {
Method: methodName,
Config: newConfig,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Err = OK
DPrintf("Leave Success: args:%v\n", args)
return
}
}
func (sm *ShardMaster) Move(args *MoveArgs, reply *MoveReply) {
// Your code here.
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Move"
dup, ok := sm.GetDuplicate(args.ClientId, methodName)
if ok && (dup == args.RequestId) {
reply.Err = OK
return
}
sm.Lock()
newConfig := sm.AppendConfigAfterMoveNolock(args)
ops := Op {
Method: methodName,
Config: newConfig,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Err = OK
DPrintf("Move Success: args:%v\n", args)
return
}
}
func (sm *ShardMaster) getConfigNolock(index int) Config {
var config Config
config.Groups = map[int][]string{}
if (index < 0) || (index >sm.commitIndex) {
Clone(&sm.configs[sm.commitIndex], &config)
} else {
Clone(&sm.configs[index], &config)
}
return config
}
func (sm *ShardMaster) Query(args *QueryArgs, reply *QueryReply) {
// Your code here.
DPrintf("Query request: args:%v\n", args)
defer DPrintf("Query response: reply:%v\n", reply)
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Query"
sm.Lock()
theCareConfig := sm.getConfigNolock(args.Num)
ops := Op {
Method: methodName,
Config: nil,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Config = theCareConfig
reply.Err = OK
return
}
}
//
// the tester calls Kill() when a ShardMaster instance won't
// be needed again. you are not required to do anything
// in Kill(), but it might be convenient to (for example)
// turn off debug output from this instance.
//
func (sm *ShardMaster) Kill() {
sm.rf.Kill()
// Your code here, if desired.
}
// needed by shardkv tester
func (sm *ShardMaster) Raft() *raft.Raft {
return sm.rf
}
func (sm *ShardMaster) await(index int, term int, op Op) (success bool) {
awaitChan := sm.registerIndexHandler(index)
for {
select {
case message := <-awaitChan:
if sm.RaftBecomeFollower(&message) {
return false
}
if (message.CommandValid == true) &&
(index == message.CommandIndex) {
return (term == message.CommandTerm)
}
// continue
}
}
}
func Max(x, y int) int {
if x > y {
return x
}
return y
}
func (sm *ShardMaster) OnApplyEntry(m *raft.ApplyMsg) {
ops := m.Command.(Op)
dup, ok := sm.GetDuplicate(ops.ClientId, ops.Method)
sm.Lock()
defer sm.Unlock()
if !ok || (dup != ops.RequestId) {
switch ops.Method {
case "Leave":
fallthrough
case "Join":
fallthrough
case "Move":
if len(ops.Config) > len(sm.configs) {
// follower
sm.configs = ops.Config
}
sm.commitIndex = Max(sm.commitIndex, len(ops.Config) - 1)
sm.SetDuplicateNolock(ops.ClientId, ops.Method, ops.RequestId)
case "Query":
// nothing
}
}
ch, ok := sm.requestHandlers[m.CommandIndex]
if ok {
delete(sm.requestHandlers, m.CommandIndex)
ch <- *m
}
}
func (sm *ShardMaster) RaftBecomeFollower(m *raft.ApplyMsg) bool {
return (m.CommandValid == false) &&
(m.Type == raft.MsgTypeRole) &&
(m.Role == raft.RoleFollower)
}
func (sm *ShardMaster) OnRoleNotify(m *raft.A | sm.Lock()
defer sm.Unlock()
if sm.RaftBecomeFollower(m) {
for index, ch := range sm.requestHandlers {
delete(sm.requestHandlers, index)
ch <- *m
}
}
}
func (sm *ShardMaster) receivingApplyMsg() {
for {
select {
case m := <-sm.applyCh:
if m.CommandValid {
DPrintf("receivingApplyMsg receive entry message. %+v.", m)
sm.OnApplyEntry(&m)
DPrintf("new configs after apply. %+v.", sm.configs)
} else if(m.Type == raft.MsgTypeKill) {
DPrintf("receivingApplyMsg receive kill message. %+v.", m)
return
} else if(m.Type == raft.MsgTypeRole) {
//DPrintf("receivingApplyMsg receive role message. %+v.", m)
sm.OnRoleNotify(&m)
}
}
}
}
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func StartServer(servers []*labrpc.ClientEnd, me int, persister *raft.Persister) *ShardMaster {
sm := new(ShardMaster)
sm.me = me
sm.configs = make([]Config, 1)
sm.configs[0].Groups = map[int][]string{}
labgob.Register(Op{})
sm.applyCh = make(chan raft.ApplyMsg)
sm.rf = raft.Make(servers, me, persister, sm.applyCh)
// Your code here.
sm.duplicate = make(map[int64]map[string]int64)
sm.requestHandlers = make(map[int]chan raft.ApplyMsg)
sm.commitIndex = 0
go sm.receivingApplyMsg()
return sm
}
| pplyMsg) {
| identifier_name |
server.go | package shardmaster
import "../raft"
import "../labrpc"
import "sync"
import "../labgob"
import "log"
//import "fmt"
const Debug = 0
func DPrintf(format string, a ...interface{}) (n int, err error) {
if Debug > 0 {
log.Printf(format, a...)
}
return
}
func TPrintf(format string, a ...interface{}) (n int, err error) {
if Debug ==-1 {
log.Printf(format, a...)
}
return
}
type ShardMaster struct {
mu sync.Mutex
me int
rf *raft.Raft
applyCh chan raft.ApplyMsg
// Your data here.
duplicate map[int64]map[string]int64 // [clientId]([key]requestId)
requestHandlers map[int]chan raft.ApplyMsg
configs []Config // indexed by config num
commitIndex int
}
func (sm *ShardMaster) Lock() {
sm.mu.Lock()
}
func (sm *ShardMaster) Unlock() {
sm.mu.Unlock()
}
func (sm *ShardMaster) registerIndexHandler(index int) chan raft.ApplyMsg {
sm.Lock()
defer sm.Unlock()
awaitChan := make(chan raft.ApplyMsg, 1)
sm.requestHandlers[index] = awaitChan
return awaitChan
}
func (sm *ShardMaster) GetDuplicate(clientId int64, method string) (int64, bool) {
sm.Lock()
defer sm.Unlock()
clientRequest, haveClient := sm.duplicate[clientId]
if !haveClient {
return 0,false
}
val, ok := clientRequest[method]
return val, ok
}
func (sm *ShardMaster) SetDuplicateNolock(clientId int64, method string, requestId int64) {
_, haveClient := sm.duplicate[clientId]
if !haveClient {
sm.duplicate[clientId] = make(map[string]int64)
}
sm.duplicate[clientId][method] = requestId
}
type Op struct {
// Your data here.
Method string
Config []Config
RequestId int64
ClientId int64
}
func Clone(a, b *Config) {
b.Num = a.Num
for i,gid := range a.Shards {
b.Shards[i] = gid
}
for gid := range a.Groups {
// 假定了一个group中的机器不变
b.Groups[gid] = a.Groups[gid]
}
}
func (sm *ShardMaster) AppendConfigAfterJoinNolock(args *JoinArgs) []Config {
newConfig := Config{}
newConfig.Groups = map[int][]string{}
lastConfig := sm.configs[len(sm.configs) - 1]
Clone(&lastConfig, &newConfig)
newConfig.Num = len(sm.configs)
for gid, names := range args.Servers {
newConfig.Groups[gid] = names
}
DPrintf("NewConfigAfterJoin, lastConfig=%+v, newConfig=%+v, args=%+v",
lastConfig, newConfig, args)
sm.RebalanceNolock(&newConfig)
sm.configs = append(sm.configs, newConfig)
return sm.configs
}
func (sm *ShardMaster) AppendConfigAfterLeaveNolock(args *LeaveArgs) []Config {
newConfig := Config{}
newConfig.Groups = map[int][]string{}
lastConfig := sm.configs[len(sm.configs) - 1]
Clone(&lastConfig, &newConfig)
newConfig.Num = len(sm.configs)
for _,gid := range args.GIDs {
delete(newConfig.Groups,gid)
}
DPrintf("NewConfigAfterLeave, lastConfig=%+v, newConfig=%+v, args=%+v",
lastConfig, newConfig, args)
sm.RebalanceNolock(&newConfig)
sm.configs = append(sm.configs, newConfig)
return sm.configs
}
func (sm *ShardMaster) AppendConfigAfterMoveNolock(args *MoveArgs) []Config {
newConfig := Config{}
newConfig.Groups = map[int][]string{}
lastConfig := sm.configs[len(sm.configs) - 1]
Clone(&lastConfig, &newConfig)
newConfig.Num = len(sm.configs)
newConfig.Shards[args.Shard] = args.GID
DPrintf("NewConfigAfterMove, lastConfig=%+v, newConfig=%+v, args=%+v",
lastConfig, newConfig, args)
sm.configs = append(sm.configs, newConfig)
return sm.configs
}
func (sm *ShardMaster) RebalanceNolock(config *Config) {
// balance shards to latest groups
numOfGroup := len(config.Groups)
if numOfGroup > 0 {
//numOfNodesPerGroup := NShards / numOfGroup
//log.Println("num of shards per group is", numOfNodesPerGroup)
leftOver := NShards % numOfGroup
for i:=0; i< NShards - leftOver; {
for gid := range config.Groups {
//log.Println("shard is", i, "group id is", gid)
config.Shards[i] = gid
i++
}
}
groupList := make([]int, 0)
for gid := range config.Groups {
groupList = append(groupList, gid)
}
// add left over shards
for j:=NShards-leftOver; j<NShards && len(groupList) > 0; j++ {
nextGroup := (j % numOfGroup)
config.Shards[j] = groupList[nextGroup]
}
DPrintf("RebalanceNolock result %+v\n", config.Shards)
}
}
func (sm *ShardMaster) Join(args *JoinArgs, reply *JoinReply) {
// Your code here.
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Join"
dup, ok := sm.GetDuplicate(args.ClientId, methodName)
if ok && (dup == args.RequestId) {
reply.Err = OK
return
}
sm.Lock()
newConfig := sm.AppendConfigAfterJoinNolock(args)
ops := Op {
Method: methodName,
Config: newConfig,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Err = OK
DPrintf("Join Success: args:%v\n", args)
return
}
}
func (sm *ShardMaster) Leave(args *LeaveArgs, reply *LeaveReply) {
// Your code here.
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Leave"
dup, ok := sm.GetDuplicate(args.ClientId, methodName)
if ok && (dup == args.RequestId) {
reply.Err = OK
return
}
sm.Lock()
newConfig := sm.AppendConfigAfterLeaveNolock(args)
ops := Op {
Method: methodName,
Config: newConfig,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Err = OK
DPrintf("Leave Success: args:%v\n", args)
return
}
}
func (sm *ShardMaster) Move(args *MoveArgs, reply *MoveReply) {
// Your code here.
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Move"
dup, ok := sm.GetDuplicate(args.ClientId, methodName)
if ok && (dup == args.RequestId) {
reply.Err = OK
return
}
sm.Lock()
newConfig := sm.AppendConfigAfterMoveNolock(args)
ops := Op {
Method: methodName,
Config: newConfig,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Err = OK
DPrintf("Move Success: args:%v\n", args)
return
}
}
func (sm *ShardMaster) getConfigNolock(index int) Config {
var config Config
config.Groups = map[int][]string{}
if (index < 0) || (index >sm.commitIndex) {
Clone(&sm.configs[sm.commitIndex], &config)
} else {
Clone(&sm.configs[index], &config)
}
return config
}
func (sm *ShardMaster) Query(args *QueryArgs, reply *QueryReply) {
// Your code here.
DPrintf("Query request: args:%v\n", args)
defer DPrintf("Query response: reply:%v\n", reply)
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Query"
sm.Lock()
theCareConfig := sm.getConfigNolock(args.Num)
ops := Op {
Method: methodName,
Config: nil,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Config = theCareConfig
reply.Err = OK
return
}
}
//
// the tester calls Kill() when a ShardMaster instance won't
// be needed again. you are not required to do anything
// in Kill(), but it might be convenient to (for example)
// turn off debug output from this instance.
//
func (sm *ShardMaster) Kill() { | func (sm *ShardMaster) Raft() *raft.Raft {
return sm.rf
}
func (sm *ShardMaster) await(index int, term int, op Op) (success bool) {
awaitChan := sm.registerIndexHandler(index)
for {
select {
case message := <-awaitChan:
if sm.RaftBecomeFollower(&message) {
return false
}
if (message.CommandValid == true) &&
(index == message.CommandIndex) {
return (term == message.CommandTerm)
}
// continue
}
}
}
func Max(x, y int) int {
if x > y {
return x
}
return y
}
func (sm *ShardMaster) OnApplyEntry(m *raft.ApplyMsg) {
ops := m.Command.(Op)
dup, ok := sm.GetDuplicate(ops.ClientId, ops.Method)
sm.Lock()
defer sm.Unlock()
if !ok || (dup != ops.RequestId) {
switch ops.Method {
case "Leave":
fallthrough
case "Join":
fallthrough
case "Move":
if len(ops.Config) > len(sm.configs) {
// follower
sm.configs = ops.Config
}
sm.commitIndex = Max(sm.commitIndex, len(ops.Config) - 1)
sm.SetDuplicateNolock(ops.ClientId, ops.Method, ops.RequestId)
case "Query":
// nothing
}
}
ch, ok := sm.requestHandlers[m.CommandIndex]
if ok {
delete(sm.requestHandlers, m.CommandIndex)
ch <- *m
}
}
func (sm *ShardMaster) RaftBecomeFollower(m *raft.ApplyMsg) bool {
return (m.CommandValid == false) &&
(m.Type == raft.MsgTypeRole) &&
(m.Role == raft.RoleFollower)
}
func (sm *ShardMaster) OnRoleNotify(m *raft.ApplyMsg) {
sm.Lock()
defer sm.Unlock()
if sm.RaftBecomeFollower(m) {
for index, ch := range sm.requestHandlers {
delete(sm.requestHandlers, index)
ch <- *m
}
}
}
func (sm *ShardMaster) receivingApplyMsg() {
for {
select {
case m := <-sm.applyCh:
if m.CommandValid {
DPrintf("receivingApplyMsg receive entry message. %+v.", m)
sm.OnApplyEntry(&m)
DPrintf("new configs after apply. %+v.", sm.configs)
} else if(m.Type == raft.MsgTypeKill) {
DPrintf("receivingApplyMsg receive kill message. %+v.", m)
return
} else if(m.Type == raft.MsgTypeRole) {
//DPrintf("receivingApplyMsg receive role message. %+v.", m)
sm.OnRoleNotify(&m)
}
}
}
}
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func StartServer(servers []*labrpc.ClientEnd, me int, persister *raft.Persister) *ShardMaster {
sm := new(ShardMaster)
sm.me = me
sm.configs = make([]Config, 1)
sm.configs[0].Groups = map[int][]string{}
labgob.Register(Op{})
sm.applyCh = make(chan raft.ApplyMsg)
sm.rf = raft.Make(servers, me, persister, sm.applyCh)
// Your code here.
sm.duplicate = make(map[int64]map[string]int64)
sm.requestHandlers = make(map[int]chan raft.ApplyMsg)
sm.commitIndex = 0
go sm.receivingApplyMsg()
return sm
} | sm.rf.Kill()
// Your code here, if desired.
}
// needed by shardkv tester | random_line_split |
multiexp.rs | use super::error::{GPUError, GPUResult};
use super::locks;
use super::sources;
use super::utils;
use crate::bls::Engine;
use crate::multicore::Worker;
use crate::multiexp::{multiexp as cpu_multiexp, FullDensity};
use ff::{PrimeField, ScalarEngine};
use groupy::{CurveAffine, CurveProjective};
use log::{error, info};
use rust_gpu_tools::*;
use std::any::TypeId;
use std::sync::Arc;
const MAX_WINDOW_SIZE: usize = 10;
const LOCAL_WORK_SIZE: usize = 256;
const MEMORY_PADDING: f64 = 0.2f64; // Let 20% of GPU memory be free
pub fn get_cpu_utilization() -> f64 {
use std::env;
env::var("BELLMAN_CPU_UTILIZATION")
.and_then(|v| match v.parse() {
Ok(val) => Ok(val),
Err(_) => {
error!("Invalid BELLMAN_CPU_UTILIZATION! Defaulting to 0...");
Ok(0f64)
}
})
.unwrap_or(0f64)
.max(0f64)
.min(1f64)
}
// Multiexp kernel for a single GPU
pub struct SingleMultiexpKernel<E>
where
E: Engine,
{
program: opencl::Program,
core_count: usize,
n: usize,
priority: bool,
_phantom: std::marker::PhantomData<E::Fr>,
}
fn calc_num_groups(core_count: usize, num_windows: usize) -> usize {
// Observations show that we get the best performance when num_groups * num_windows ~= 2 * CUDA_CORES
2 * core_count / num_windows
}
fn calc_window_size(n: usize, exp_bits: usize, core_count: usize) -> usize {
// window_size = ln(n / num_groups)
// num_windows = exp_bits / window_size
// num_groups = 2 * core_count / num_windows = 2 * core_count * window_size / exp_bits
// window_size = ln(n / num_groups) = ln(n * exp_bits / (2 * core_count * window_size))
// window_size = ln(exp_bits * n / (2 * core_count)) - ln(window_size)
//
// Thus we need to solve the following equation:
// window_size + ln(window_size) = ln(exp_bits * n / (2 * core_count))
let lower_bound = (((exp_bits * n) as f64) / ((2 * core_count) as f64)).ln();
for w in 0..MAX_WINDOW_SIZE {
if (w as f64) + (w as f64).ln() > lower_bound {
return w;
}
}
MAX_WINDOW_SIZE
}
fn calc_best_chunk_size(max_window_size: usize, core_count: usize, exp_bits: usize) -> usize {
// Best chunk-size (N) can also be calculated using the same logic as calc_window_size:
// n = e^window_size * window_size * 2 * core_count / exp_bits
(((max_window_size as f64).exp() as f64)
* (max_window_size as f64)
* 2f64
* (core_count as f64)
/ (exp_bits as f64))
.ceil() as usize
}
fn calc_chunk_size<E>(mem: u64, core_count: usize) -> usize
where
E: Engine,
{
let aff_size = std::mem::size_of::<E::G1Affine>() + std::mem::size_of::<E::G2Affine>();
let exp_size = exp_size::<E>();
let proj_size = std::mem::size_of::<E::G1>() + std::mem::size_of::<E::G2>();
((((mem as f64) * (1f64 - MEMORY_PADDING)) as usize)
- (2 * core_count * ((1 << MAX_WINDOW_SIZE) + 1) * proj_size))
/ (aff_size + exp_size)
}
fn exp_size<E: Engine>() -> usize {
std::mem::size_of::<<E::Fr as ff::PrimeField>::Repr>()
}
impl<E> SingleMultiexpKernel<E>
where
E: Engine,
{
pub fn create(d: opencl::Device, priority: bool) -> GPUResult<SingleMultiexpKernel<E>> {
let src = sources::kernel::<E>(d.brand() == opencl::Brand::Nvidia);
let exp_bits = exp_size::<E>() * 8;
let core_count = utils::get_core_count(&d);
let mem = d.memory();
let max_n = calc_chunk_size::<E>(mem, core_count);
let best_n = calc_best_chunk_size(MAX_WINDOW_SIZE, core_count, exp_bits);
let n = std::cmp::min(max_n, best_n);
Ok(SingleMultiexpKernel {
program: opencl::Program::from_opencl(d, &src)?,
core_count,
n,
priority,
_phantom: std::marker::PhantomData,
})
}
pub fn multiexp<G>(
&mut self,
bases: &[G],
exps: &[<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
n: usize,
) -> GPUResult<<G as CurveAffine>::Projective>
where
G: CurveAffine,
{
if locks::PriorityLock::should_break(self.priority) {
return Err(GPUError::GPUTaken);
}
let exp_bits = exp_size::<E>() * 8;
let window_size = calc_window_size(n as usize, exp_bits, self.core_count);
let num_windows = ((exp_bits as f64) / (window_size as f64)).ceil() as usize;
let num_groups = calc_num_groups(self.core_count, num_windows);
let bucket_len = 1 << window_size;
// Each group will have `num_windows` threads and as there are `num_groups` groups, there will
// be `num_groups` * `num_windows` threads in total.
// Each thread will use `num_groups` * `num_windows` * `bucket_len` buckets.
let mut base_buffer = self.program.create_buffer::<G>(n)?;
base_buffer.write_from(0, bases)?;
let mut exp_buffer = self
.program
.create_buffer::<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>(n)?;
exp_buffer.write_from(0, exps)?;
let bucket_buffer = self
.program
.create_buffer::<<G as CurveAffine>::Projective>(2 * self.core_count * bucket_len)?;
let result_buffer = self
.program
.create_buffer::<<G as CurveAffine>::Projective>(2 * self.core_count)?;
// Make global work size divisible by `LOCAL_WORK_SIZE`
let mut global_work_size = num_windows * num_groups;
global_work_size +=
(LOCAL_WORK_SIZE - (global_work_size % LOCAL_WORK_SIZE)) % LOCAL_WORK_SIZE;
let kernel = self.program.create_kernel(
if TypeId::of::<G>() == TypeId::of::<E::G1Affine>() {
"G1_bellman_multiexp"
} else if TypeId::of::<G>() == TypeId::of::<E::G2Affine>() {
"G2_bellman_multiexp"
} else {
return Err(GPUError::Simple("Only E::G1 and E::G2 are supported!"));
},
global_work_size,
None,
);
call_kernel!(
kernel,
&base_buffer,
&bucket_buffer,
&result_buffer,
&exp_buffer,
n as u32,
num_groups as u32,
num_windows as u32,
window_size as u32
)?;
let mut results = vec![<G as CurveAffine>::Projective::zero(); num_groups * num_windows];
result_buffer.read_into(0, &mut results)?;
// Using the algorithm below, we can calculate the final result by accumulating the results
// of those `NUM_GROUPS` * `NUM_WINDOWS` threads.
let mut acc = <G as CurveAffine>::Projective::zero();
let mut bits = 0;
for i in 0..num_windows {
let w = std::cmp::min(window_size, exp_bits - bits);
for _ in 0..w {
acc.double();
}
for g in 0..num_groups {
acc.add_assign(&results[g * num_windows + i]);
}
bits += w; // Process the next window
}
Ok(acc)
}
}
// A struct that containts several multiexp kernels for different devices
pub struct MultiexpKernel<E>
where
E: Engine,
{
kernels: Vec<SingleMultiexpKernel<E>>,
_lock: locks::GPULock, // RFC 1857: struct fields are dropped in the same order as they are declared.
}
impl<E> MultiexpKernel<E>
where
E: Engine,
{
pub fn create(priority: bool) -> GPUResult<MultiexpKernel<E>> {
let lock = locks::GPULock::lock();
let devices = opencl::Device::all()?;
let kernels: Vec<_> = devices
.into_iter()
.map(|d| (d.clone(), SingleMultiexpKernel::<E>::create(d, priority)))
.filter_map(|(device, res)| {
if let Err(ref e) = res {
error!(
"Cannot initialize kernel for device '{}'! Error: {}",
device.name(),
e
);
}
res.ok()
})
.collect();
if kernels.is_empty() {
return Err(GPUError::Simple("No working GPUs found!"));
}
info!(
"Multiexp: {} working device(s) selected. (CPU utilization: {})",
kernels.len(),
get_cpu_utilization()
);
for (i, k) in kernels.iter().enumerate() {
info!(
"Multiexp: Device {}: {} (Chunk-size: {})",
i,
k.program.device().name(),
k.n
);
}
Ok(MultiexpKernel::<E> {
kernels,
_lock: lock,
})
}
pub fn | <G>(
&mut self,
pool: &Worker,
bases: Arc<Vec<G>>,
exps: Arc<Vec<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>>,
skip: usize,
n: usize,
) -> GPUResult<<G as CurveAffine>::Projective>
where
G: CurveAffine,
<G as groupy::CurveAffine>::Engine: crate::bls::Engine,
{
let num_devices = self.kernels.len();
// Bases are skipped by `self.1` elements, when converted from (Arc<Vec<G>>, usize) to Source
// https://github.com/zkcrypto/bellman/blob/10c5010fd9c2ca69442dc9775ea271e286e776d8/src/multiexp.rs#L38
let bases = &bases[skip..(skip + n)];
let exps = &exps[..n];
let cpu_n = ((n as f64) * get_cpu_utilization()) as usize;
let n = n - cpu_n;
let (cpu_bases, bases) = bases.split_at(cpu_n);
let (cpu_exps, exps) = exps.split_at(cpu_n);
let chunk_size = ((n as f64) / (num_devices as f64)).ceil() as usize;
crate::multicore::THREAD_POOL.install(|| {
use rayon::prelude::*;
let mut acc = <G as CurveAffine>::Projective::zero();
let results = if n > 0 {
bases
.par_chunks(chunk_size)
.zip(exps.par_chunks(chunk_size))
.zip(self.kernels.par_iter_mut())
.map(|((bases, exps), kern)| -> Result<<G as CurveAffine>::Projective, GPUError> {
let mut acc = <G as CurveAffine>::Projective::zero();
for (bases, exps) in bases.chunks(kern.n).zip(exps.chunks(kern.n)) {
let result = kern.multiexp(bases, exps, bases.len())?;
acc.add_assign(&result);
}
Ok(acc)
})
.collect::<Vec<_>>()
} else {
Vec::new()
};
let cpu_acc = cpu_multiexp(
&pool,
(Arc::new(cpu_bases.to_vec()), 0),
FullDensity,
Arc::new(cpu_exps.to_vec()),
&mut None,
);
for r in results {
acc.add_assign(&r?);
}
acc.add_assign(&cpu_acc.wait().unwrap());
Ok(acc)
})
}
}
| multiexp | identifier_name |
multiexp.rs | use super::error::{GPUError, GPUResult};
use super::locks;
use super::sources;
use super::utils;
use crate::bls::Engine;
use crate::multicore::Worker;
use crate::multiexp::{multiexp as cpu_multiexp, FullDensity};
use ff::{PrimeField, ScalarEngine};
use groupy::{CurveAffine, CurveProjective};
use log::{error, info};
use rust_gpu_tools::*;
use std::any::TypeId;
use std::sync::Arc;
const MAX_WINDOW_SIZE: usize = 10;
const LOCAL_WORK_SIZE: usize = 256;
const MEMORY_PADDING: f64 = 0.2f64; // Let 20% of GPU memory be free
pub fn get_cpu_utilization() -> f64 {
use std::env;
env::var("BELLMAN_CPU_UTILIZATION")
.and_then(|v| match v.parse() {
Ok(val) => Ok(val),
Err(_) => {
error!("Invalid BELLMAN_CPU_UTILIZATION! Defaulting to 0...");
Ok(0f64)
}
})
.unwrap_or(0f64)
.max(0f64)
.min(1f64)
}
// Multiexp kernel for a single GPU
pub struct SingleMultiexpKernel<E>
where
E: Engine,
{
program: opencl::Program,
core_count: usize,
n: usize,
priority: bool,
_phantom: std::marker::PhantomData<E::Fr>,
}
fn calc_num_groups(core_count: usize, num_windows: usize) -> usize {
// Observations show that we get the best performance when num_groups * num_windows ~= 2 * CUDA_CORES
2 * core_count / num_windows
}
fn calc_window_size(n: usize, exp_bits: usize, core_count: usize) -> usize {
// window_size = ln(n / num_groups)
// num_windows = exp_bits / window_size
// num_groups = 2 * core_count / num_windows = 2 * core_count * window_size / exp_bits
// window_size = ln(n / num_groups) = ln(n * exp_bits / (2 * core_count * window_size))
// window_size = ln(exp_bits * n / (2 * core_count)) - ln(window_size)
//
// Thus we need to solve the following equation:
// window_size + ln(window_size) = ln(exp_bits * n / (2 * core_count))
let lower_bound = (((exp_bits * n) as f64) / ((2 * core_count) as f64)).ln();
for w in 0..MAX_WINDOW_SIZE {
if (w as f64) + (w as f64).ln() > lower_bound |
}
MAX_WINDOW_SIZE
}
fn calc_best_chunk_size(max_window_size: usize, core_count: usize, exp_bits: usize) -> usize {
// Best chunk-size (N) can also be calculated using the same logic as calc_window_size:
// n = e^window_size * window_size * 2 * core_count / exp_bits
(((max_window_size as f64).exp() as f64)
* (max_window_size as f64)
* 2f64
* (core_count as f64)
/ (exp_bits as f64))
.ceil() as usize
}
fn calc_chunk_size<E>(mem: u64, core_count: usize) -> usize
where
E: Engine,
{
let aff_size = std::mem::size_of::<E::G1Affine>() + std::mem::size_of::<E::G2Affine>();
let exp_size = exp_size::<E>();
let proj_size = std::mem::size_of::<E::G1>() + std::mem::size_of::<E::G2>();
((((mem as f64) * (1f64 - MEMORY_PADDING)) as usize)
- (2 * core_count * ((1 << MAX_WINDOW_SIZE) + 1) * proj_size))
/ (aff_size + exp_size)
}
fn exp_size<E: Engine>() -> usize {
std::mem::size_of::<<E::Fr as ff::PrimeField>::Repr>()
}
impl<E> SingleMultiexpKernel<E>
where
E: Engine,
{
pub fn create(d: opencl::Device, priority: bool) -> GPUResult<SingleMultiexpKernel<E>> {
let src = sources::kernel::<E>(d.brand() == opencl::Brand::Nvidia);
let exp_bits = exp_size::<E>() * 8;
let core_count = utils::get_core_count(&d);
let mem = d.memory();
let max_n = calc_chunk_size::<E>(mem, core_count);
let best_n = calc_best_chunk_size(MAX_WINDOW_SIZE, core_count, exp_bits);
let n = std::cmp::min(max_n, best_n);
Ok(SingleMultiexpKernel {
program: opencl::Program::from_opencl(d, &src)?,
core_count,
n,
priority,
_phantom: std::marker::PhantomData,
})
}
pub fn multiexp<G>(
&mut self,
bases: &[G],
exps: &[<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
n: usize,
) -> GPUResult<<G as CurveAffine>::Projective>
where
G: CurveAffine,
{
if locks::PriorityLock::should_break(self.priority) {
return Err(GPUError::GPUTaken);
}
let exp_bits = exp_size::<E>() * 8;
let window_size = calc_window_size(n as usize, exp_bits, self.core_count);
let num_windows = ((exp_bits as f64) / (window_size as f64)).ceil() as usize;
let num_groups = calc_num_groups(self.core_count, num_windows);
let bucket_len = 1 << window_size;
// Each group will have `num_windows` threads and as there are `num_groups` groups, there will
// be `num_groups` * `num_windows` threads in total.
// Each thread will use `num_groups` * `num_windows` * `bucket_len` buckets.
let mut base_buffer = self.program.create_buffer::<G>(n)?;
base_buffer.write_from(0, bases)?;
let mut exp_buffer = self
.program
.create_buffer::<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>(n)?;
exp_buffer.write_from(0, exps)?;
let bucket_buffer = self
.program
.create_buffer::<<G as CurveAffine>::Projective>(2 * self.core_count * bucket_len)?;
let result_buffer = self
.program
.create_buffer::<<G as CurveAffine>::Projective>(2 * self.core_count)?;
// Make global work size divisible by `LOCAL_WORK_SIZE`
let mut global_work_size = num_windows * num_groups;
global_work_size +=
(LOCAL_WORK_SIZE - (global_work_size % LOCAL_WORK_SIZE)) % LOCAL_WORK_SIZE;
let kernel = self.program.create_kernel(
if TypeId::of::<G>() == TypeId::of::<E::G1Affine>() {
"G1_bellman_multiexp"
} else if TypeId::of::<G>() == TypeId::of::<E::G2Affine>() {
"G2_bellman_multiexp"
} else {
return Err(GPUError::Simple("Only E::G1 and E::G2 are supported!"));
},
global_work_size,
None,
);
call_kernel!(
kernel,
&base_buffer,
&bucket_buffer,
&result_buffer,
&exp_buffer,
n as u32,
num_groups as u32,
num_windows as u32,
window_size as u32
)?;
let mut results = vec![<G as CurveAffine>::Projective::zero(); num_groups * num_windows];
result_buffer.read_into(0, &mut results)?;
// Using the algorithm below, we can calculate the final result by accumulating the results
// of those `NUM_GROUPS` * `NUM_WINDOWS` threads.
let mut acc = <G as CurveAffine>::Projective::zero();
let mut bits = 0;
for i in 0..num_windows {
let w = std::cmp::min(window_size, exp_bits - bits);
for _ in 0..w {
acc.double();
}
for g in 0..num_groups {
acc.add_assign(&results[g * num_windows + i]);
}
bits += w; // Process the next window
}
Ok(acc)
}
}
// A struct that containts several multiexp kernels for different devices
pub struct MultiexpKernel<E>
where
E: Engine,
{
kernels: Vec<SingleMultiexpKernel<E>>,
_lock: locks::GPULock, // RFC 1857: struct fields are dropped in the same order as they are declared.
}
impl<E> MultiexpKernel<E>
where
E: Engine,
{
pub fn create(priority: bool) -> GPUResult<MultiexpKernel<E>> {
let lock = locks::GPULock::lock();
let devices = opencl::Device::all()?;
let kernels: Vec<_> = devices
.into_iter()
.map(|d| (d.clone(), SingleMultiexpKernel::<E>::create(d, priority)))
.filter_map(|(device, res)| {
if let Err(ref e) = res {
error!(
"Cannot initialize kernel for device '{}'! Error: {}",
device.name(),
e
);
}
res.ok()
})
.collect();
if kernels.is_empty() {
return Err(GPUError::Simple("No working GPUs found!"));
}
info!(
"Multiexp: {} working device(s) selected. (CPU utilization: {})",
kernels.len(),
get_cpu_utilization()
);
for (i, k) in kernels.iter().enumerate() {
info!(
"Multiexp: Device {}: {} (Chunk-size: {})",
i,
k.program.device().name(),
k.n
);
}
Ok(MultiexpKernel::<E> {
kernels,
_lock: lock,
})
}
pub fn multiexp<G>(
&mut self,
pool: &Worker,
bases: Arc<Vec<G>>,
exps: Arc<Vec<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>>,
skip: usize,
n: usize,
) -> GPUResult<<G as CurveAffine>::Projective>
where
G: CurveAffine,
<G as groupy::CurveAffine>::Engine: crate::bls::Engine,
{
let num_devices = self.kernels.len();
// Bases are skipped by `self.1` elements, when converted from (Arc<Vec<G>>, usize) to Source
// https://github.com/zkcrypto/bellman/blob/10c5010fd9c2ca69442dc9775ea271e286e776d8/src/multiexp.rs#L38
let bases = &bases[skip..(skip + n)];
let exps = &exps[..n];
let cpu_n = ((n as f64) * get_cpu_utilization()) as usize;
let n = n - cpu_n;
let (cpu_bases, bases) = bases.split_at(cpu_n);
let (cpu_exps, exps) = exps.split_at(cpu_n);
let chunk_size = ((n as f64) / (num_devices as f64)).ceil() as usize;
crate::multicore::THREAD_POOL.install(|| {
use rayon::prelude::*;
let mut acc = <G as CurveAffine>::Projective::zero();
let results = if n > 0 {
bases
.par_chunks(chunk_size)
.zip(exps.par_chunks(chunk_size))
.zip(self.kernels.par_iter_mut())
.map(|((bases, exps), kern)| -> Result<<G as CurveAffine>::Projective, GPUError> {
let mut acc = <G as CurveAffine>::Projective::zero();
for (bases, exps) in bases.chunks(kern.n).zip(exps.chunks(kern.n)) {
let result = kern.multiexp(bases, exps, bases.len())?;
acc.add_assign(&result);
}
Ok(acc)
})
.collect::<Vec<_>>()
} else {
Vec::new()
};
let cpu_acc = cpu_multiexp(
&pool,
(Arc::new(cpu_bases.to_vec()), 0),
FullDensity,
Arc::new(cpu_exps.to_vec()),
&mut None,
);
for r in results {
acc.add_assign(&r?);
}
acc.add_assign(&cpu_acc.wait().unwrap());
Ok(acc)
})
}
}
| {
return w;
} | conditional_block |
multiexp.rs | use super::error::{GPUError, GPUResult};
use super::locks;
use super::sources;
use super::utils;
use crate::bls::Engine;
use crate::multicore::Worker;
use crate::multiexp::{multiexp as cpu_multiexp, FullDensity};
use ff::{PrimeField, ScalarEngine};
use groupy::{CurveAffine, CurveProjective};
use log::{error, info};
use rust_gpu_tools::*;
use std::any::TypeId;
use std::sync::Arc;
const MAX_WINDOW_SIZE: usize = 10;
const LOCAL_WORK_SIZE: usize = 256;
const MEMORY_PADDING: f64 = 0.2f64; // Let 20% of GPU memory be free
pub fn get_cpu_utilization() -> f64 {
use std::env;
env::var("BELLMAN_CPU_UTILIZATION")
.and_then(|v| match v.parse() {
Ok(val) => Ok(val),
Err(_) => {
error!("Invalid BELLMAN_CPU_UTILIZATION! Defaulting to 0...");
Ok(0f64)
}
})
.unwrap_or(0f64)
.max(0f64)
.min(1f64)
}
// Multiexp kernel for a single GPU
pub struct SingleMultiexpKernel<E>
where
E: Engine,
{
program: opencl::Program,
core_count: usize,
n: usize,
priority: bool,
_phantom: std::marker::PhantomData<E::Fr>,
}
fn calc_num_groups(core_count: usize, num_windows: usize) -> usize {
// Observations show that we get the best performance when num_groups * num_windows ~= 2 * CUDA_CORES
2 * core_count / num_windows
}
fn calc_window_size(n: usize, exp_bits: usize, core_count: usize) -> usize {
// window_size = ln(n / num_groups)
// num_windows = exp_bits / window_size
// num_groups = 2 * core_count / num_windows = 2 * core_count * window_size / exp_bits
// window_size = ln(n / num_groups) = ln(n * exp_bits / (2 * core_count * window_size))
// window_size = ln(exp_bits * n / (2 * core_count)) - ln(window_size)
//
// Thus we need to solve the following equation:
// window_size + ln(window_size) = ln(exp_bits * n / (2 * core_count))
let lower_bound = (((exp_bits * n) as f64) / ((2 * core_count) as f64)).ln();
for w in 0..MAX_WINDOW_SIZE {
if (w as f64) + (w as f64).ln() > lower_bound {
return w;
}
}
MAX_WINDOW_SIZE
}
fn calc_best_chunk_size(max_window_size: usize, core_count: usize, exp_bits: usize) -> usize {
// Best chunk-size (N) can also be calculated using the same logic as calc_window_size:
// n = e^window_size * window_size * 2 * core_count / exp_bits
(((max_window_size as f64).exp() as f64)
* (max_window_size as f64)
* 2f64
* (core_count as f64)
/ (exp_bits as f64))
.ceil() as usize
}
fn calc_chunk_size<E>(mem: u64, core_count: usize) -> usize
where
E: Engine,
{
let aff_size = std::mem::size_of::<E::G1Affine>() + std::mem::size_of::<E::G2Affine>();
let exp_size = exp_size::<E>();
let proj_size = std::mem::size_of::<E::G1>() + std::mem::size_of::<E::G2>();
((((mem as f64) * (1f64 - MEMORY_PADDING)) as usize)
- (2 * core_count * ((1 << MAX_WINDOW_SIZE) + 1) * proj_size))
/ (aff_size + exp_size)
}
fn exp_size<E: Engine>() -> usize {
std::mem::size_of::<<E::Fr as ff::PrimeField>::Repr>()
}
impl<E> SingleMultiexpKernel<E>
where
E: Engine,
{
pub fn create(d: opencl::Device, priority: bool) -> GPUResult<SingleMultiexpKernel<E>> {
let src = sources::kernel::<E>(d.brand() == opencl::Brand::Nvidia);
let exp_bits = exp_size::<E>() * 8;
let core_count = utils::get_core_count(&d);
let mem = d.memory();
let max_n = calc_chunk_size::<E>(mem, core_count);
let best_n = calc_best_chunk_size(MAX_WINDOW_SIZE, core_count, exp_bits);
let n = std::cmp::min(max_n, best_n);
Ok(SingleMultiexpKernel {
program: opencl::Program::from_opencl(d, &src)?,
core_count,
n,
priority,
_phantom: std::marker::PhantomData,
})
}
pub fn multiexp<G>(
&mut self,
bases: &[G],
exps: &[<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
n: usize,
) -> GPUResult<<G as CurveAffine>::Projective>
where
G: CurveAffine,
{
if locks::PriorityLock::should_break(self.priority) {
return Err(GPUError::GPUTaken);
}
let exp_bits = exp_size::<E>() * 8;
let window_size = calc_window_size(n as usize, exp_bits, self.core_count);
let num_windows = ((exp_bits as f64) / (window_size as f64)).ceil() as usize;
let num_groups = calc_num_groups(self.core_count, num_windows);
let bucket_len = 1 << window_size;
// Each group will have `num_windows` threads and as there are `num_groups` groups, there will
// be `num_groups` * `num_windows` threads in total.
// Each thread will use `num_groups` * `num_windows` * `bucket_len` buckets.
let mut base_buffer = self.program.create_buffer::<G>(n)?;
base_buffer.write_from(0, bases)?;
let mut exp_buffer = self
.program
.create_buffer::<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>(n)?;
exp_buffer.write_from(0, exps)?;
let bucket_buffer = self
.program
.create_buffer::<<G as CurveAffine>::Projective>(2 * self.core_count * bucket_len)?;
let result_buffer = self
.program
.create_buffer::<<G as CurveAffine>::Projective>(2 * self.core_count)?;
// Make global work size divisible by `LOCAL_WORK_SIZE`
let mut global_work_size = num_windows * num_groups;
global_work_size +=
(LOCAL_WORK_SIZE - (global_work_size % LOCAL_WORK_SIZE)) % LOCAL_WORK_SIZE;
let kernel = self.program.create_kernel(
if TypeId::of::<G>() == TypeId::of::<E::G1Affine>() {
"G1_bellman_multiexp"
} else if TypeId::of::<G>() == TypeId::of::<E::G2Affine>() {
"G2_bellman_multiexp"
} else {
return Err(GPUError::Simple("Only E::G1 and E::G2 are supported!"));
},
global_work_size,
None,
);
call_kernel!(
kernel,
&base_buffer,
&bucket_buffer,
&result_buffer,
&exp_buffer,
n as u32,
num_groups as u32,
num_windows as u32,
window_size as u32
)?;
let mut results = vec![<G as CurveAffine>::Projective::zero(); num_groups * num_windows];
result_buffer.read_into(0, &mut results)?;
// Using the algorithm below, we can calculate the final result by accumulating the results
// of those `NUM_GROUPS` * `NUM_WINDOWS` threads.
let mut acc = <G as CurveAffine>::Projective::zero();
let mut bits = 0;
for i in 0..num_windows {
let w = std::cmp::min(window_size, exp_bits - bits);
for _ in 0..w {
acc.double();
}
for g in 0..num_groups {
acc.add_assign(&results[g * num_windows + i]);
}
bits += w; // Process the next window
}
Ok(acc)
}
}
// A struct that containts several multiexp kernels for different devices
pub struct MultiexpKernel<E>
where
E: Engine,
{
kernels: Vec<SingleMultiexpKernel<E>>,
_lock: locks::GPULock, // RFC 1857: struct fields are dropped in the same order as they are declared.
}
impl<E> MultiexpKernel<E>
where
E: Engine,
{
pub fn create(priority: bool) -> GPUResult<MultiexpKernel<E>> {
let lock = locks::GPULock::lock();
let devices = opencl::Device::all()?;
let kernels: Vec<_> = devices
.into_iter()
.map(|d| (d.clone(), SingleMultiexpKernel::<E>::create(d, priority)))
.filter_map(|(device, res)| {
if let Err(ref e) = res {
error!(
"Cannot initialize kernel for device '{}'! Error: {}",
device.name(),
e
); | })
.collect();
if kernels.is_empty() {
return Err(GPUError::Simple("No working GPUs found!"));
}
info!(
"Multiexp: {} working device(s) selected. (CPU utilization: {})",
kernels.len(),
get_cpu_utilization()
);
for (i, k) in kernels.iter().enumerate() {
info!(
"Multiexp: Device {}: {} (Chunk-size: {})",
i,
k.program.device().name(),
k.n
);
}
Ok(MultiexpKernel::<E> {
kernels,
_lock: lock,
})
}
pub fn multiexp<G>(
&mut self,
pool: &Worker,
bases: Arc<Vec<G>>,
exps: Arc<Vec<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>>,
skip: usize,
n: usize,
) -> GPUResult<<G as CurveAffine>::Projective>
where
G: CurveAffine,
<G as groupy::CurveAffine>::Engine: crate::bls::Engine,
{
let num_devices = self.kernels.len();
// Bases are skipped by `self.1` elements, when converted from (Arc<Vec<G>>, usize) to Source
// https://github.com/zkcrypto/bellman/blob/10c5010fd9c2ca69442dc9775ea271e286e776d8/src/multiexp.rs#L38
let bases = &bases[skip..(skip + n)];
let exps = &exps[..n];
let cpu_n = ((n as f64) * get_cpu_utilization()) as usize;
let n = n - cpu_n;
let (cpu_bases, bases) = bases.split_at(cpu_n);
let (cpu_exps, exps) = exps.split_at(cpu_n);
let chunk_size = ((n as f64) / (num_devices as f64)).ceil() as usize;
crate::multicore::THREAD_POOL.install(|| {
use rayon::prelude::*;
let mut acc = <G as CurveAffine>::Projective::zero();
let results = if n > 0 {
bases
.par_chunks(chunk_size)
.zip(exps.par_chunks(chunk_size))
.zip(self.kernels.par_iter_mut())
.map(|((bases, exps), kern)| -> Result<<G as CurveAffine>::Projective, GPUError> {
let mut acc = <G as CurveAffine>::Projective::zero();
for (bases, exps) in bases.chunks(kern.n).zip(exps.chunks(kern.n)) {
let result = kern.multiexp(bases, exps, bases.len())?;
acc.add_assign(&result);
}
Ok(acc)
})
.collect::<Vec<_>>()
} else {
Vec::new()
};
let cpu_acc = cpu_multiexp(
&pool,
(Arc::new(cpu_bases.to_vec()), 0),
FullDensity,
Arc::new(cpu_exps.to_vec()),
&mut None,
);
for r in results {
acc.add_assign(&r?);
}
acc.add_assign(&cpu_acc.wait().unwrap());
Ok(acc)
})
}
} | }
res.ok() | random_line_split |
multiexp.rs | use super::error::{GPUError, GPUResult};
use super::locks;
use super::sources;
use super::utils;
use crate::bls::Engine;
use crate::multicore::Worker;
use crate::multiexp::{multiexp as cpu_multiexp, FullDensity};
use ff::{PrimeField, ScalarEngine};
use groupy::{CurveAffine, CurveProjective};
use log::{error, info};
use rust_gpu_tools::*;
use std::any::TypeId;
use std::sync::Arc;
const MAX_WINDOW_SIZE: usize = 10;
const LOCAL_WORK_SIZE: usize = 256;
const MEMORY_PADDING: f64 = 0.2f64; // Let 20% of GPU memory be free
pub fn get_cpu_utilization() -> f64 {
use std::env;
env::var("BELLMAN_CPU_UTILIZATION")
.and_then(|v| match v.parse() {
Ok(val) => Ok(val),
Err(_) => {
error!("Invalid BELLMAN_CPU_UTILIZATION! Defaulting to 0...");
Ok(0f64)
}
})
.unwrap_or(0f64)
.max(0f64)
.min(1f64)
}
// Multiexp kernel for a single GPU
pub struct SingleMultiexpKernel<E>
where
E: Engine,
{
program: opencl::Program,
core_count: usize,
n: usize,
priority: bool,
_phantom: std::marker::PhantomData<E::Fr>,
}
fn calc_num_groups(core_count: usize, num_windows: usize) -> usize {
// Observations show that we get the best performance when num_groups * num_windows ~= 2 * CUDA_CORES
2 * core_count / num_windows
}
fn calc_window_size(n: usize, exp_bits: usize, core_count: usize) -> usize {
// window_size = ln(n / num_groups)
// num_windows = exp_bits / window_size
// num_groups = 2 * core_count / num_windows = 2 * core_count * window_size / exp_bits
// window_size = ln(n / num_groups) = ln(n * exp_bits / (2 * core_count * window_size))
// window_size = ln(exp_bits * n / (2 * core_count)) - ln(window_size)
//
// Thus we need to solve the following equation:
// window_size + ln(window_size) = ln(exp_bits * n / (2 * core_count))
let lower_bound = (((exp_bits * n) as f64) / ((2 * core_count) as f64)).ln();
for w in 0..MAX_WINDOW_SIZE {
if (w as f64) + (w as f64).ln() > lower_bound {
return w;
}
}
MAX_WINDOW_SIZE
}
fn calc_best_chunk_size(max_window_size: usize, core_count: usize, exp_bits: usize) -> usize {
// Best chunk-size (N) can also be calculated using the same logic as calc_window_size:
// n = e^window_size * window_size * 2 * core_count / exp_bits
(((max_window_size as f64).exp() as f64)
* (max_window_size as f64)
* 2f64
* (core_count as f64)
/ (exp_bits as f64))
.ceil() as usize
}
fn calc_chunk_size<E>(mem: u64, core_count: usize) -> usize
where
E: Engine,
|
fn exp_size<E: Engine>() -> usize {
std::mem::size_of::<<E::Fr as ff::PrimeField>::Repr>()
}
impl<E> SingleMultiexpKernel<E>
where
E: Engine,
{
pub fn create(d: opencl::Device, priority: bool) -> GPUResult<SingleMultiexpKernel<E>> {
let src = sources::kernel::<E>(d.brand() == opencl::Brand::Nvidia);
let exp_bits = exp_size::<E>() * 8;
let core_count = utils::get_core_count(&d);
let mem = d.memory();
let max_n = calc_chunk_size::<E>(mem, core_count);
let best_n = calc_best_chunk_size(MAX_WINDOW_SIZE, core_count, exp_bits);
let n = std::cmp::min(max_n, best_n);
Ok(SingleMultiexpKernel {
program: opencl::Program::from_opencl(d, &src)?,
core_count,
n,
priority,
_phantom: std::marker::PhantomData,
})
}
pub fn multiexp<G>(
&mut self,
bases: &[G],
exps: &[<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
n: usize,
) -> GPUResult<<G as CurveAffine>::Projective>
where
G: CurveAffine,
{
if locks::PriorityLock::should_break(self.priority) {
return Err(GPUError::GPUTaken);
}
let exp_bits = exp_size::<E>() * 8;
let window_size = calc_window_size(n as usize, exp_bits, self.core_count);
let num_windows = ((exp_bits as f64) / (window_size as f64)).ceil() as usize;
let num_groups = calc_num_groups(self.core_count, num_windows);
let bucket_len = 1 << window_size;
// Each group will have `num_windows` threads and as there are `num_groups` groups, there will
// be `num_groups` * `num_windows` threads in total.
// Each thread will use `num_groups` * `num_windows` * `bucket_len` buckets.
let mut base_buffer = self.program.create_buffer::<G>(n)?;
base_buffer.write_from(0, bases)?;
let mut exp_buffer = self
.program
.create_buffer::<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>(n)?;
exp_buffer.write_from(0, exps)?;
let bucket_buffer = self
.program
.create_buffer::<<G as CurveAffine>::Projective>(2 * self.core_count * bucket_len)?;
let result_buffer = self
.program
.create_buffer::<<G as CurveAffine>::Projective>(2 * self.core_count)?;
// Make global work size divisible by `LOCAL_WORK_SIZE`
let mut global_work_size = num_windows * num_groups;
global_work_size +=
(LOCAL_WORK_SIZE - (global_work_size % LOCAL_WORK_SIZE)) % LOCAL_WORK_SIZE;
let kernel = self.program.create_kernel(
if TypeId::of::<G>() == TypeId::of::<E::G1Affine>() {
"G1_bellman_multiexp"
} else if TypeId::of::<G>() == TypeId::of::<E::G2Affine>() {
"G2_bellman_multiexp"
} else {
return Err(GPUError::Simple("Only E::G1 and E::G2 are supported!"));
},
global_work_size,
None,
);
call_kernel!(
kernel,
&base_buffer,
&bucket_buffer,
&result_buffer,
&exp_buffer,
n as u32,
num_groups as u32,
num_windows as u32,
window_size as u32
)?;
let mut results = vec![<G as CurveAffine>::Projective::zero(); num_groups * num_windows];
result_buffer.read_into(0, &mut results)?;
// Using the algorithm below, we can calculate the final result by accumulating the results
// of those `NUM_GROUPS` * `NUM_WINDOWS` threads.
let mut acc = <G as CurveAffine>::Projective::zero();
let mut bits = 0;
for i in 0..num_windows {
let w = std::cmp::min(window_size, exp_bits - bits);
for _ in 0..w {
acc.double();
}
for g in 0..num_groups {
acc.add_assign(&results[g * num_windows + i]);
}
bits += w; // Process the next window
}
Ok(acc)
}
}
// A struct that containts several multiexp kernels for different devices
pub struct MultiexpKernel<E>
where
E: Engine,
{
kernels: Vec<SingleMultiexpKernel<E>>,
_lock: locks::GPULock, // RFC 1857: struct fields are dropped in the same order as they are declared.
}
impl<E> MultiexpKernel<E>
where
E: Engine,
{
pub fn create(priority: bool) -> GPUResult<MultiexpKernel<E>> {
let lock = locks::GPULock::lock();
let devices = opencl::Device::all()?;
let kernels: Vec<_> = devices
.into_iter()
.map(|d| (d.clone(), SingleMultiexpKernel::<E>::create(d, priority)))
.filter_map(|(device, res)| {
if let Err(ref e) = res {
error!(
"Cannot initialize kernel for device '{}'! Error: {}",
device.name(),
e
);
}
res.ok()
})
.collect();
if kernels.is_empty() {
return Err(GPUError::Simple("No working GPUs found!"));
}
info!(
"Multiexp: {} working device(s) selected. (CPU utilization: {})",
kernels.len(),
get_cpu_utilization()
);
for (i, k) in kernels.iter().enumerate() {
info!(
"Multiexp: Device {}: {} (Chunk-size: {})",
i,
k.program.device().name(),
k.n
);
}
Ok(MultiexpKernel::<E> {
kernels,
_lock: lock,
})
}
pub fn multiexp<G>(
&mut self,
pool: &Worker,
bases: Arc<Vec<G>>,
exps: Arc<Vec<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>>,
skip: usize,
n: usize,
) -> GPUResult<<G as CurveAffine>::Projective>
where
G: CurveAffine,
<G as groupy::CurveAffine>::Engine: crate::bls::Engine,
{
let num_devices = self.kernels.len();
// Bases are skipped by `self.1` elements, when converted from (Arc<Vec<G>>, usize) to Source
// https://github.com/zkcrypto/bellman/blob/10c5010fd9c2ca69442dc9775ea271e286e776d8/src/multiexp.rs#L38
let bases = &bases[skip..(skip + n)];
let exps = &exps[..n];
let cpu_n = ((n as f64) * get_cpu_utilization()) as usize;
let n = n - cpu_n;
let (cpu_bases, bases) = bases.split_at(cpu_n);
let (cpu_exps, exps) = exps.split_at(cpu_n);
let chunk_size = ((n as f64) / (num_devices as f64)).ceil() as usize;
crate::multicore::THREAD_POOL.install(|| {
use rayon::prelude::*;
let mut acc = <G as CurveAffine>::Projective::zero();
let results = if n > 0 {
bases
.par_chunks(chunk_size)
.zip(exps.par_chunks(chunk_size))
.zip(self.kernels.par_iter_mut())
.map(|((bases, exps), kern)| -> Result<<G as CurveAffine>::Projective, GPUError> {
let mut acc = <G as CurveAffine>::Projective::zero();
for (bases, exps) in bases.chunks(kern.n).zip(exps.chunks(kern.n)) {
let result = kern.multiexp(bases, exps, bases.len())?;
acc.add_assign(&result);
}
Ok(acc)
})
.collect::<Vec<_>>()
} else {
Vec::new()
};
let cpu_acc = cpu_multiexp(
&pool,
(Arc::new(cpu_bases.to_vec()), 0),
FullDensity,
Arc::new(cpu_exps.to_vec()),
&mut None,
);
for r in results {
acc.add_assign(&r?);
}
acc.add_assign(&cpu_acc.wait().unwrap());
Ok(acc)
})
}
}
| {
let aff_size = std::mem::size_of::<E::G1Affine>() + std::mem::size_of::<E::G2Affine>();
let exp_size = exp_size::<E>();
let proj_size = std::mem::size_of::<E::G1>() + std::mem::size_of::<E::G2>();
((((mem as f64) * (1f64 - MEMORY_PADDING)) as usize)
- (2 * core_count * ((1 << MAX_WINDOW_SIZE) + 1) * proj_size))
/ (aff_size + exp_size)
} | identifier_body |
pso.rs | // Copyright 2015 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Raw Pipeline State Objects
//!
//! This module contains items used to create and manage a raw pipeline state object. Most users
//! will want to use the typed and safe `PipelineState`. See the `pso` module inside the `gfx`
//! crate.
use {MAX_COLOR_TARGETS, MAX_VERTEX_ATTRIBUTES, MAX_CONSTANT_BUFFERS,
MAX_RESOURCE_VIEWS, MAX_UNORDERED_VIEWS, MAX_SAMPLERS};
use {ConstantBufferSlot, ColorSlot, ResourceViewSlot,
UnorderedViewSlot, SamplerSlot,
Primitive, Resources};
use {format, state as s, texture};
use shade::Usage;
use std::error::Error;
use std::fmt;
/// Maximum number of vertex buffers used in a PSO definition.
pub const MAX_VERTEX_BUFFERS: usize = 4;
/// An offset inside a vertex buffer, in bytes.
pub type BufferOffset = usize;
/// Error types happening upon PSO creation on the device side.
#[derive(Clone, Debug, PartialEq)]
pub struct CreationError;
impl fmt::Display for CreationError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
impl Error for CreationError {
fn description(&self) -> &str {
"Could not create PSO on device."
}
}
/// Color output configuration of the PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct ColorInfo {
/// Color channel mask
pub mask: s::ColorMask,
/// Optional color blending
pub color: Option<s::BlendChannel>,
/// Optional alpha blending
pub alpha: Option<s::BlendChannel>,
}
impl From<s::ColorMask> for ColorInfo {
fn from(mask: s::ColorMask) -> ColorInfo {
ColorInfo {
mask: mask,
color: None,
alpha: None,
}
}
}
impl From<s::Blend> for ColorInfo {
fn from(blend: s::Blend) -> ColorInfo {
ColorInfo {
mask: s::MASK_ALL,
color: Some(blend.color),
alpha: Some(blend.alpha),
}
}
}
/// Depth and stencil state of the PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct DepthStencilInfo {
/// Optional depth test configuration
pub depth: Option<s::Depth>,
/// Optional stencil test on the front faces
pub front: Option<s::StencilSide>,
/// Optional stencil test on the back faces
pub back: Option<s::StencilSide>,
}
impl From<s::Depth> for DepthStencilInfo {
fn from(depth: s::Depth) -> DepthStencilInfo {
DepthStencilInfo {
depth: Some(depth),
front: None,
back: None,
}
}
}
impl From<s::Stencil> for DepthStencilInfo {
fn from(stencil: s::Stencil) -> DepthStencilInfo {
DepthStencilInfo {
depth: None,
front: Some(stencil.front),
back: Some(stencil.back),
}
}
}
impl From<(s::Depth, s::Stencil)> for DepthStencilInfo {
fn from(ds: (s::Depth, s::Stencil)) -> DepthStencilInfo {
DepthStencilInfo {
depth: Some(ds.0),
front: Some(ds.1.front),
back: Some(ds.1.back),
}
}
}
/// Index of a vertex buffer.
pub type BufferIndex = u8;
/// Offset of an attribute from the start of the buffer, in bytes
pub type ElemOffset = u32;
/// Offset between attribute values, in bytes
pub type ElemStride = u8;
/// The number of instances between each subsequent attribute value
pub type InstanceRate = u8;
/// A struct element descriptor.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Element<F> {
/// Element format
pub format: F,
/// Offset from the beginning of the container, in bytes
pub offset: ElemOffset,
}
/// Vertex buffer descriptor
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct VertexBufferDesc {
/// Total container size, in bytes
pub stride: ElemStride,
/// Rate of the input for the given buffer
pub rate: InstanceRate,
}
/// PSO vertex attribute descriptor
pub type AttributeDesc = (BufferIndex, Element<format::Format>);
/// PSO constant buffer descriptor
pub type ConstantBufferDesc = Usage;
/// PSO shader resource view descriptor
pub type ResourceViewDesc = Usage;
/// PSO unordered access view descriptor
pub type UnorderedViewDesc = Usage;
/// PSO sampler descriptor
pub type SamplerDesc = Usage;
/// PSO color target descriptor
pub type ColorTargetDesc = (format::Format, ColorInfo);
/// PSO depth-stencil target descriptor
pub type DepthStencilDesc = (format::Format, DepthStencilInfo);
/// All the information surrounding a shader program that is required
/// for PSO creation, including the formats of vertex buffers and pixel targets;
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Descriptor {
/// Type of the primitive
pub primitive: Primitive,
/// Rasterizer setup
pub rasterizer: s::Rasterizer,
/// Enable scissor test
pub scissor: bool,
/// Vertex buffers
pub vertex_buffers: [Option<VertexBufferDesc>; MAX_VERTEX_BUFFERS],
/// Vertex attributes
pub attributes: [Option<AttributeDesc>; MAX_VERTEX_ATTRIBUTES],
/// Constant buffers
pub constant_buffers: [Option<ConstantBufferDesc>; MAX_CONSTANT_BUFFERS],
/// Shader resource views
pub resource_views: [Option<ResourceViewDesc>; MAX_RESOURCE_VIEWS],
/// Unordered access views
pub unordered_views: [Option<UnorderedViewDesc>; MAX_UNORDERED_VIEWS],
/// Samplers
pub samplers: [Option<SamplerDesc>; MAX_SAMPLERS],
/// Render target views (RTV)
pub color_targets: [Option<ColorTargetDesc>; MAX_COLOR_TARGETS],
/// Depth stencil view (DSV)
pub depth_stencil: Option<DepthStencilDesc>,
}
impl Descriptor {
/// Create a new empty PSO descriptor.
pub fn new(primitive: Primitive, rast: s::Rasterizer) -> Descriptor {
Descriptor {
primitive: primitive,
rasterizer: rast,
scissor: false,
vertex_buffers: [None; MAX_VERTEX_BUFFERS],
attributes: [None; MAX_VERTEX_ATTRIBUTES],
constant_buffers: [None; MAX_CONSTANT_BUFFERS],
resource_views: [None; MAX_RESOURCE_VIEWS],
unordered_views: [None; MAX_UNORDERED_VIEWS],
samplers: [None; MAX_SAMPLERS],
color_targets: [None; MAX_COLOR_TARGETS],
depth_stencil: None,
}
}
}
/// A complete set of vertex buffers to be used for vertex import in PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct VertexBufferSet<R: Resources>(
/// Array of buffer handles with offsets in them
pub [Option<(R::Buffer, BufferOffset)>; MAX_VERTEX_ATTRIBUTES]
);
impl<R: Resources> VertexBufferSet<R> {
/// Create an empty set
pub fn new() -> VertexBufferSet<R> {
VertexBufferSet([None; MAX_VERTEX_ATTRIBUTES])
}
}
/// A constant buffer run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ConstantBufferParam<R: Resources>(pub R::Buffer, pub Usage, pub ConstantBufferSlot);
/// A shader resource view (SRV) run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ResourceViewParam<R: Resources>(pub R::ShaderResourceView, pub Usage, pub ResourceViewSlot);
/// An unordered access view (UAV) run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct UnorderedViewParam<R: Resources>(pub R::UnorderedAccessView, pub Usage, pub UnorderedViewSlot);
/// A sampler run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct SamplerParam<R: Resources>(pub R::Sampler, pub Usage, pub SamplerSlot);
/// A complete set of render targets to be used for pixel export in PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct PixelTargetSet<R: Resources> {
/// Array of color target views
pub colors: [Option<R::RenderTargetView>; MAX_COLOR_TARGETS],
/// Depth target view
pub depth: Option<R::DepthStencilView>,
/// Stencil target view
pub stencil: Option<R::DepthStencilView>,
/// Rendering dimensions
pub dimensions: Option<texture::Dimensions>,
}
impl<R: Resources> PixelTargetSet<R> {
/// Create an empty set
pub fn new() -> PixelTargetSet<R> {
PixelTargetSet {
colors: [None; MAX_COLOR_TARGETS],
depth: None,
stencil: None,
dimensions: None,
}
}
/// Add a color view to the specified slot
pub fn add_color(&mut self,
slot: ColorSlot,
view: &R::RenderTargetView,
dim: texture::Dimensions) {
self.colors[slot as usize] = Some(view.clone());
self.set_dimensions(dim);
}
/// Add a depth or stencil view to the specified slot
pub fn add_depth_stencil(&mut self,
view: &R::DepthStencilView,
has_depth: bool,
has_stencil: bool,
dim: texture::Dimensions) {
if has_depth {
self.depth = Some(view.clone());
}
if has_stencil {
self.stencil = Some(view.clone());
}
self.set_dimensions(dim);
}
fn set_dimensions(&mut self, dim: texture::Dimensions) {
debug_assert!(self.dimensions.map(|d| d == dim).unwrap_or(true));
self.dimensions = Some(dim);
}
/// Get the rendering view (returns values > 0)
pub fn get_view(&self) -> (u16, u16, u16) {
use std::cmp::max;
self.dimensions
.map(|(w, h, d, _)| (max(w, 1), max(h, 1), max(d, 1)))
.unwrap_or((1, 1, 1))
}
}
| {
write!(f, "{}", self.description())
} | identifier_body |
pso.rs | // Copyright 2015 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Raw Pipeline State Objects
//!
//! This module contains items used to create and manage a raw pipeline state object. Most users
//! will want to use the typed and safe `PipelineState`. See the `pso` module inside the `gfx`
//! crate.
use {MAX_COLOR_TARGETS, MAX_VERTEX_ATTRIBUTES, MAX_CONSTANT_BUFFERS,
MAX_RESOURCE_VIEWS, MAX_UNORDERED_VIEWS, MAX_SAMPLERS};
use {ConstantBufferSlot, ColorSlot, ResourceViewSlot,
UnorderedViewSlot, SamplerSlot,
Primitive, Resources};
use {format, state as s, texture};
use shade::Usage;
use std::error::Error;
use std::fmt;
/// Maximum number of vertex buffers used in a PSO definition.
pub const MAX_VERTEX_BUFFERS: usize = 4;
/// An offset inside a vertex buffer, in bytes.
pub type BufferOffset = usize;
/// Error types happening upon PSO creation on the device side.
#[derive(Clone, Debug, PartialEq)]
pub struct CreationError;
impl fmt::Display for CreationError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description())
}
}
impl Error for CreationError {
fn description(&self) -> &str {
"Could not create PSO on device."
}
}
/// Color output configuration of the PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct ColorInfo {
/// Color channel mask
pub mask: s::ColorMask,
/// Optional color blending
pub color: Option<s::BlendChannel>,
/// Optional alpha blending
pub alpha: Option<s::BlendChannel>,
}
impl From<s::ColorMask> for ColorInfo {
fn from(mask: s::ColorMask) -> ColorInfo {
ColorInfo {
mask: mask,
color: None,
alpha: None,
}
}
}
impl From<s::Blend> for ColorInfo {
fn from(blend: s::Blend) -> ColorInfo {
ColorInfo {
mask: s::MASK_ALL,
color: Some(blend.color),
alpha: Some(blend.alpha),
}
}
}
/// Depth and stencil state of the PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct DepthStencilInfo {
/// Optional depth test configuration
pub depth: Option<s::Depth>,
/// Optional stencil test on the front faces
pub front: Option<s::StencilSide>,
/// Optional stencil test on the back faces
pub back: Option<s::StencilSide>,
}
impl From<s::Depth> for DepthStencilInfo {
fn from(depth: s::Depth) -> DepthStencilInfo {
DepthStencilInfo {
depth: Some(depth),
front: None,
back: None,
}
}
}
impl From<s::Stencil> for DepthStencilInfo {
fn from(stencil: s::Stencil) -> DepthStencilInfo {
DepthStencilInfo {
depth: None,
front: Some(stencil.front),
back: Some(stencil.back),
}
}
}
impl From<(s::Depth, s::Stencil)> for DepthStencilInfo {
fn from(ds: (s::Depth, s::Stencil)) -> DepthStencilInfo {
DepthStencilInfo {
depth: Some(ds.0),
front: Some(ds.1.front),
back: Some(ds.1.back),
}
}
}
/// Index of a vertex buffer.
pub type BufferIndex = u8;
/// Offset of an attribute from the start of the buffer, in bytes
pub type ElemOffset = u32;
/// Offset between attribute values, in bytes
pub type ElemStride = u8;
/// The number of instances between each subsequent attribute value
pub type InstanceRate = u8;
/// A struct element descriptor.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Element<F> {
/// Element format
pub format: F,
/// Offset from the beginning of the container, in bytes
pub offset: ElemOffset,
}
/// Vertex buffer descriptor
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct VertexBufferDesc {
/// Total container size, in bytes
pub stride: ElemStride,
/// Rate of the input for the given buffer
pub rate: InstanceRate,
}
/// PSO vertex attribute descriptor
pub type AttributeDesc = (BufferIndex, Element<format::Format>);
/// PSO constant buffer descriptor
pub type ConstantBufferDesc = Usage;
/// PSO shader resource view descriptor
pub type ResourceViewDesc = Usage;
/// PSO unordered access view descriptor
pub type UnorderedViewDesc = Usage;
/// PSO sampler descriptor
pub type SamplerDesc = Usage;
/// PSO color target descriptor
pub type ColorTargetDesc = (format::Format, ColorInfo);
/// PSO depth-stencil target descriptor
pub type DepthStencilDesc = (format::Format, DepthStencilInfo);
/// All the information surrounding a shader program that is required
/// for PSO creation, including the formats of vertex buffers and pixel targets;
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Descriptor {
/// Type of the primitive
pub primitive: Primitive,
/// Rasterizer setup
pub rasterizer: s::Rasterizer,
/// Enable scissor test
pub scissor: bool,
/// Vertex buffers
pub vertex_buffers: [Option<VertexBufferDesc>; MAX_VERTEX_BUFFERS],
/// Vertex attributes
pub attributes: [Option<AttributeDesc>; MAX_VERTEX_ATTRIBUTES],
/// Constant buffers
pub constant_buffers: [Option<ConstantBufferDesc>; MAX_CONSTANT_BUFFERS],
/// Shader resource views
pub resource_views: [Option<ResourceViewDesc>; MAX_RESOURCE_VIEWS],
/// Unordered access views
pub unordered_views: [Option<UnorderedViewDesc>; MAX_UNORDERED_VIEWS],
/// Samplers
pub samplers: [Option<SamplerDesc>; MAX_SAMPLERS],
/// Render target views (RTV)
pub color_targets: [Option<ColorTargetDesc>; MAX_COLOR_TARGETS],
/// Depth stencil view (DSV)
pub depth_stencil: Option<DepthStencilDesc>,
}
impl Descriptor {
/// Create a new empty PSO descriptor.
pub fn new(primitive: Primitive, rast: s::Rasterizer) -> Descriptor {
Descriptor {
primitive: primitive,
rasterizer: rast,
scissor: false,
vertex_buffers: [None; MAX_VERTEX_BUFFERS],
attributes: [None; MAX_VERTEX_ATTRIBUTES],
constant_buffers: [None; MAX_CONSTANT_BUFFERS],
resource_views: [None; MAX_RESOURCE_VIEWS],
unordered_views: [None; MAX_UNORDERED_VIEWS],
samplers: [None; MAX_SAMPLERS],
color_targets: [None; MAX_COLOR_TARGETS],
depth_stencil: None,
}
}
}
/// A complete set of vertex buffers to be used for vertex import in PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct VertexBufferSet<R: Resources>(
/// Array of buffer handles with offsets in them
pub [Option<(R::Buffer, BufferOffset)>; MAX_VERTEX_ATTRIBUTES]
);
impl<R: Resources> VertexBufferSet<R> {
/// Create an empty set
pub fn new() -> VertexBufferSet<R> {
VertexBufferSet([None; MAX_VERTEX_ATTRIBUTES])
}
}
/// A constant buffer run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ConstantBufferParam<R: Resources>(pub R::Buffer, pub Usage, pub ConstantBufferSlot);
/// A shader resource view (SRV) run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ResourceViewParam<R: Resources>(pub R::ShaderResourceView, pub Usage, pub ResourceViewSlot);
/// An unordered access view (UAV) run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct UnorderedViewParam<R: Resources>(pub R::UnorderedAccessView, pub Usage, pub UnorderedViewSlot);
/// A sampler run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct SamplerParam<R: Resources>(pub R::Sampler, pub Usage, pub SamplerSlot);
/// A complete set of render targets to be used for pixel export in PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct PixelTargetSet<R: Resources> {
/// Array of color target views
pub colors: [Option<R::RenderTargetView>; MAX_COLOR_TARGETS],
/// Depth target view
pub depth: Option<R::DepthStencilView>,
/// Stencil target view
pub stencil: Option<R::DepthStencilView>,
/// Rendering dimensions
pub dimensions: Option<texture::Dimensions>,
}
impl<R: Resources> PixelTargetSet<R> {
/// Create an empty set
pub fn new() -> PixelTargetSet<R> {
PixelTargetSet {
colors: [None; MAX_COLOR_TARGETS],
depth: None,
stencil: None,
dimensions: None,
}
}
/// Add a color view to the specified slot
pub fn add_color(&mut self,
slot: ColorSlot,
view: &R::RenderTargetView,
dim: texture::Dimensions) {
self.colors[slot as usize] = Some(view.clone());
self.set_dimensions(dim);
}
/// Add a depth or stencil view to the specified slot
pub fn add_depth_stencil(&mut self,
view: &R::DepthStencilView,
has_depth: bool,
has_stencil: bool,
dim: texture::Dimensions) {
if has_depth {
self.depth = Some(view.clone());
}
if has_stencil |
self.set_dimensions(dim);
}
fn set_dimensions(&mut self, dim: texture::Dimensions) {
debug_assert!(self.dimensions.map(|d| d == dim).unwrap_or(true));
self.dimensions = Some(dim);
}
/// Get the rendering view (returns values > 0)
pub fn get_view(&self) -> (u16, u16, u16) {
use std::cmp::max;
self.dimensions
.map(|(w, h, d, _)| (max(w, 1), max(h, 1), max(d, 1)))
.unwrap_or((1, 1, 1))
}
}
| {
self.stencil = Some(view.clone());
} | conditional_block |
pso.rs | // Copyright 2015 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Raw Pipeline State Objects
//!
//! This module contains items used to create and manage a raw pipeline state object. Most users
//! will want to use the typed and safe `PipelineState`. See the `pso` module inside the `gfx`
//! crate.
use {MAX_COLOR_TARGETS, MAX_VERTEX_ATTRIBUTES, MAX_CONSTANT_BUFFERS,
MAX_RESOURCE_VIEWS, MAX_UNORDERED_VIEWS, MAX_SAMPLERS};
use {ConstantBufferSlot, ColorSlot, ResourceViewSlot,
UnorderedViewSlot, SamplerSlot,
Primitive, Resources};
use {format, state as s, texture};
use shade::Usage;
use std::error::Error;
use std::fmt;
/// Maximum number of vertex buffers used in a PSO definition.
pub const MAX_VERTEX_BUFFERS: usize = 4;
/// An offset inside a vertex buffer, in bytes.
pub type BufferOffset = usize;
/// Error types happening upon PSO creation on the device side.
#[derive(Clone, Debug, PartialEq)]
pub struct CreationError;
impl fmt::Display for CreationError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description())
}
}
impl Error for CreationError {
fn description(&self) -> &str {
"Could not create PSO on device."
}
}
/// Color output configuration of the PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct ColorInfo {
/// Color channel mask
pub mask: s::ColorMask,
/// Optional color blending
pub color: Option<s::BlendChannel>,
/// Optional alpha blending
pub alpha: Option<s::BlendChannel>,
}
impl From<s::ColorMask> for ColorInfo {
fn from(mask: s::ColorMask) -> ColorInfo {
ColorInfo {
mask: mask,
color: None,
alpha: None,
}
}
}
impl From<s::Blend> for ColorInfo {
fn from(blend: s::Blend) -> ColorInfo {
ColorInfo {
mask: s::MASK_ALL,
color: Some(blend.color),
alpha: Some(blend.alpha),
}
}
}
/// Depth and stencil state of the PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct DepthStencilInfo {
/// Optional depth test configuration
pub depth: Option<s::Depth>,
/// Optional stencil test on the front faces
pub front: Option<s::StencilSide>,
/// Optional stencil test on the back faces
pub back: Option<s::StencilSide>,
}
impl From<s::Depth> for DepthStencilInfo {
fn from(depth: s::Depth) -> DepthStencilInfo {
DepthStencilInfo {
depth: Some(depth),
front: None,
back: None,
}
}
}
impl From<s::Stencil> for DepthStencilInfo {
fn from(stencil: s::Stencil) -> DepthStencilInfo {
DepthStencilInfo {
depth: None,
front: Some(stencil.front),
back: Some(stencil.back),
}
}
}
impl From<(s::Depth, s::Stencil)> for DepthStencilInfo {
fn from(ds: (s::Depth, s::Stencil)) -> DepthStencilInfo {
DepthStencilInfo {
depth: Some(ds.0),
front: Some(ds.1.front),
back: Some(ds.1.back),
}
}
}
/// Index of a vertex buffer.
pub type BufferIndex = u8;
/// Offset of an attribute from the start of the buffer, in bytes
pub type ElemOffset = u32;
/// Offset between attribute values, in bytes
pub type ElemStride = u8;
/// The number of instances between each subsequent attribute value
pub type InstanceRate = u8;
/// A struct element descriptor.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Element<F> {
/// Element format
pub format: F,
/// Offset from the beginning of the container, in bytes
pub offset: ElemOffset,
}
/// Vertex buffer descriptor
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct VertexBufferDesc {
/// Total container size, in bytes
pub stride: ElemStride,
/// Rate of the input for the given buffer
pub rate: InstanceRate,
}
/// PSO vertex attribute descriptor
pub type AttributeDesc = (BufferIndex, Element<format::Format>);
/// PSO constant buffer descriptor
pub type ConstantBufferDesc = Usage;
/// PSO shader resource view descriptor
pub type ResourceViewDesc = Usage;
/// PSO unordered access view descriptor
pub type UnorderedViewDesc = Usage;
/// PSO sampler descriptor
pub type SamplerDesc = Usage;
/// PSO color target descriptor
pub type ColorTargetDesc = (format::Format, ColorInfo);
/// PSO depth-stencil target descriptor
pub type DepthStencilDesc = (format::Format, DepthStencilInfo);
/// All the information surrounding a shader program that is required
/// for PSO creation, including the formats of vertex buffers and pixel targets;
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Descriptor {
/// Type of the primitive
pub primitive: Primitive,
/// Rasterizer setup
pub rasterizer: s::Rasterizer,
/// Enable scissor test
pub scissor: bool,
/// Vertex buffers
pub vertex_buffers: [Option<VertexBufferDesc>; MAX_VERTEX_BUFFERS],
/// Vertex attributes
pub attributes: [Option<AttributeDesc>; MAX_VERTEX_ATTRIBUTES],
/// Constant buffers
pub constant_buffers: [Option<ConstantBufferDesc>; MAX_CONSTANT_BUFFERS],
/// Shader resource views
pub resource_views: [Option<ResourceViewDesc>; MAX_RESOURCE_VIEWS],
/// Unordered access views
pub unordered_views: [Option<UnorderedViewDesc>; MAX_UNORDERED_VIEWS],
/// Samplers
pub samplers: [Option<SamplerDesc>; MAX_SAMPLERS],
/// Render target views (RTV)
pub color_targets: [Option<ColorTargetDesc>; MAX_COLOR_TARGETS],
/// Depth stencil view (DSV)
pub depth_stencil: Option<DepthStencilDesc>,
}
impl Descriptor {
/// Create a new empty PSO descriptor.
pub fn new(primitive: Primitive, rast: s::Rasterizer) -> Descriptor {
Descriptor {
primitive: primitive,
rasterizer: rast,
scissor: false,
vertex_buffers: [None; MAX_VERTEX_BUFFERS],
attributes: [None; MAX_VERTEX_ATTRIBUTES],
constant_buffers: [None; MAX_CONSTANT_BUFFERS],
resource_views: [None; MAX_RESOURCE_VIEWS],
unordered_views: [None; MAX_UNORDERED_VIEWS],
samplers: [None; MAX_SAMPLERS],
color_targets: [None; MAX_COLOR_TARGETS],
depth_stencil: None,
}
}
}
/// A complete set of vertex buffers to be used for vertex import in PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct VertexBufferSet<R: Resources>(
/// Array of buffer handles with offsets in them
pub [Option<(R::Buffer, BufferOffset)>; MAX_VERTEX_ATTRIBUTES]
);
impl<R: Resources> VertexBufferSet<R> {
/// Create an empty set
pub fn new() -> VertexBufferSet<R> {
VertexBufferSet([None; MAX_VERTEX_ATTRIBUTES])
}
}
/// A constant buffer run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ConstantBufferParam<R: Resources>(pub R::Buffer, pub Usage, pub ConstantBufferSlot);
/// A shader resource view (SRV) run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ResourceViewParam<R: Resources>(pub R::ShaderResourceView, pub Usage, pub ResourceViewSlot);
/// An unordered access view (UAV) run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct UnorderedViewParam<R: Resources>(pub R::UnorderedAccessView, pub Usage, pub UnorderedViewSlot);
/// A sampler run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct SamplerParam<R: Resources>(pub R::Sampler, pub Usage, pub SamplerSlot);
/// A complete set of render targets to be used for pixel export in PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct | <R: Resources> {
/// Array of color target views
pub colors: [Option<R::RenderTargetView>; MAX_COLOR_TARGETS],
/// Depth target view
pub depth: Option<R::DepthStencilView>,
/// Stencil target view
pub stencil: Option<R::DepthStencilView>,
/// Rendering dimensions
pub dimensions: Option<texture::Dimensions>,
}
impl<R: Resources> PixelTargetSet<R> {
/// Create an empty set
pub fn new() -> PixelTargetSet<R> {
PixelTargetSet {
colors: [None; MAX_COLOR_TARGETS],
depth: None,
stencil: None,
dimensions: None,
}
}
/// Add a color view to the specified slot
pub fn add_color(&mut self,
slot: ColorSlot,
view: &R::RenderTargetView,
dim: texture::Dimensions) {
self.colors[slot as usize] = Some(view.clone());
self.set_dimensions(dim);
}
/// Add a depth or stencil view to the specified slot
pub fn add_depth_stencil(&mut self,
view: &R::DepthStencilView,
has_depth: bool,
has_stencil: bool,
dim: texture::Dimensions) {
if has_depth {
self.depth = Some(view.clone());
}
if has_stencil {
self.stencil = Some(view.clone());
}
self.set_dimensions(dim);
}
fn set_dimensions(&mut self, dim: texture::Dimensions) {
debug_assert!(self.dimensions.map(|d| d == dim).unwrap_or(true));
self.dimensions = Some(dim);
}
/// Get the rendering view (returns values > 0)
pub fn get_view(&self) -> (u16, u16, u16) {
use std::cmp::max;
self.dimensions
.map(|(w, h, d, _)| (max(w, 1), max(h, 1), max(d, 1)))
.unwrap_or((1, 1, 1))
}
}
| PixelTargetSet | identifier_name |
pso.rs | // Copyright 2015 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Raw Pipeline State Objects
//!
//! This module contains items used to create and manage a raw pipeline state object. Most users
//! will want to use the typed and safe `PipelineState`. See the `pso` module inside the `gfx`
//! crate.
use {MAX_COLOR_TARGETS, MAX_VERTEX_ATTRIBUTES, MAX_CONSTANT_BUFFERS,
MAX_RESOURCE_VIEWS, MAX_UNORDERED_VIEWS, MAX_SAMPLERS}; | UnorderedViewSlot, SamplerSlot,
Primitive, Resources};
use {format, state as s, texture};
use shade::Usage;
use std::error::Error;
use std::fmt;
/// Maximum number of vertex buffers used in a PSO definition.
pub const MAX_VERTEX_BUFFERS: usize = 4;
/// An offset inside a vertex buffer, in bytes.
pub type BufferOffset = usize;
/// Error types happening upon PSO creation on the device side.
#[derive(Clone, Debug, PartialEq)]
pub struct CreationError;
impl fmt::Display for CreationError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description())
}
}
impl Error for CreationError {
fn description(&self) -> &str {
"Could not create PSO on device."
}
}
/// Color output configuration of the PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct ColorInfo {
/// Color channel mask
pub mask: s::ColorMask,
/// Optional color blending
pub color: Option<s::BlendChannel>,
/// Optional alpha blending
pub alpha: Option<s::BlendChannel>,
}
impl From<s::ColorMask> for ColorInfo {
fn from(mask: s::ColorMask) -> ColorInfo {
ColorInfo {
mask: mask,
color: None,
alpha: None,
}
}
}
impl From<s::Blend> for ColorInfo {
fn from(blend: s::Blend) -> ColorInfo {
ColorInfo {
mask: s::MASK_ALL,
color: Some(blend.color),
alpha: Some(blend.alpha),
}
}
}
/// Depth and stencil state of the PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct DepthStencilInfo {
/// Optional depth test configuration
pub depth: Option<s::Depth>,
/// Optional stencil test on the front faces
pub front: Option<s::StencilSide>,
/// Optional stencil test on the back faces
pub back: Option<s::StencilSide>,
}
impl From<s::Depth> for DepthStencilInfo {
fn from(depth: s::Depth) -> DepthStencilInfo {
DepthStencilInfo {
depth: Some(depth),
front: None,
back: None,
}
}
}
impl From<s::Stencil> for DepthStencilInfo {
fn from(stencil: s::Stencil) -> DepthStencilInfo {
DepthStencilInfo {
depth: None,
front: Some(stencil.front),
back: Some(stencil.back),
}
}
}
impl From<(s::Depth, s::Stencil)> for DepthStencilInfo {
fn from(ds: (s::Depth, s::Stencil)) -> DepthStencilInfo {
DepthStencilInfo {
depth: Some(ds.0),
front: Some(ds.1.front),
back: Some(ds.1.back),
}
}
}
/// Index of a vertex buffer.
pub type BufferIndex = u8;
/// Offset of an attribute from the start of the buffer, in bytes
pub type ElemOffset = u32;
/// Offset between attribute values, in bytes
pub type ElemStride = u8;
/// The number of instances between each subsequent attribute value
pub type InstanceRate = u8;
/// A struct element descriptor.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Element<F> {
/// Element format
pub format: F,
/// Offset from the beginning of the container, in bytes
pub offset: ElemOffset,
}
/// Vertex buffer descriptor
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct VertexBufferDesc {
/// Total container size, in bytes
pub stride: ElemStride,
/// Rate of the input for the given buffer
pub rate: InstanceRate,
}
/// PSO vertex attribute descriptor
pub type AttributeDesc = (BufferIndex, Element<format::Format>);
/// PSO constant buffer descriptor
pub type ConstantBufferDesc = Usage;
/// PSO shader resource view descriptor
pub type ResourceViewDesc = Usage;
/// PSO unordered access view descriptor
pub type UnorderedViewDesc = Usage;
/// PSO sampler descriptor
pub type SamplerDesc = Usage;
/// PSO color target descriptor
pub type ColorTargetDesc = (format::Format, ColorInfo);
/// PSO depth-stencil target descriptor
pub type DepthStencilDesc = (format::Format, DepthStencilInfo);
/// All the information surrounding a shader program that is required
/// for PSO creation, including the formats of vertex buffers and pixel targets;
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Descriptor {
/// Type of the primitive
pub primitive: Primitive,
/// Rasterizer setup
pub rasterizer: s::Rasterizer,
/// Enable scissor test
pub scissor: bool,
/// Vertex buffers
pub vertex_buffers: [Option<VertexBufferDesc>; MAX_VERTEX_BUFFERS],
/// Vertex attributes
pub attributes: [Option<AttributeDesc>; MAX_VERTEX_ATTRIBUTES],
/// Constant buffers
pub constant_buffers: [Option<ConstantBufferDesc>; MAX_CONSTANT_BUFFERS],
/// Shader resource views
pub resource_views: [Option<ResourceViewDesc>; MAX_RESOURCE_VIEWS],
/// Unordered access views
pub unordered_views: [Option<UnorderedViewDesc>; MAX_UNORDERED_VIEWS],
/// Samplers
pub samplers: [Option<SamplerDesc>; MAX_SAMPLERS],
/// Render target views (RTV)
pub color_targets: [Option<ColorTargetDesc>; MAX_COLOR_TARGETS],
/// Depth stencil view (DSV)
pub depth_stencil: Option<DepthStencilDesc>,
}
impl Descriptor {
/// Create a new empty PSO descriptor.
pub fn new(primitive: Primitive, rast: s::Rasterizer) -> Descriptor {
Descriptor {
primitive: primitive,
rasterizer: rast,
scissor: false,
vertex_buffers: [None; MAX_VERTEX_BUFFERS],
attributes: [None; MAX_VERTEX_ATTRIBUTES],
constant_buffers: [None; MAX_CONSTANT_BUFFERS],
resource_views: [None; MAX_RESOURCE_VIEWS],
unordered_views: [None; MAX_UNORDERED_VIEWS],
samplers: [None; MAX_SAMPLERS],
color_targets: [None; MAX_COLOR_TARGETS],
depth_stencil: None,
}
}
}
/// A complete set of vertex buffers to be used for vertex import in PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct VertexBufferSet<R: Resources>(
/// Array of buffer handles with offsets in them
pub [Option<(R::Buffer, BufferOffset)>; MAX_VERTEX_ATTRIBUTES]
);
impl<R: Resources> VertexBufferSet<R> {
/// Create an empty set
pub fn new() -> VertexBufferSet<R> {
VertexBufferSet([None; MAX_VERTEX_ATTRIBUTES])
}
}
/// A constant buffer run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ConstantBufferParam<R: Resources>(pub R::Buffer, pub Usage, pub ConstantBufferSlot);
/// A shader resource view (SRV) run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ResourceViewParam<R: Resources>(pub R::ShaderResourceView, pub Usage, pub ResourceViewSlot);
/// An unordered access view (UAV) run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct UnorderedViewParam<R: Resources>(pub R::UnorderedAccessView, pub Usage, pub UnorderedViewSlot);
/// A sampler run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct SamplerParam<R: Resources>(pub R::Sampler, pub Usage, pub SamplerSlot);
/// A complete set of render targets to be used for pixel export in PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct PixelTargetSet<R: Resources> {
/// Array of color target views
pub colors: [Option<R::RenderTargetView>; MAX_COLOR_TARGETS],
/// Depth target view
pub depth: Option<R::DepthStencilView>,
/// Stencil target view
pub stencil: Option<R::DepthStencilView>,
/// Rendering dimensions
pub dimensions: Option<texture::Dimensions>,
}
impl<R: Resources> PixelTargetSet<R> {
/// Create an empty set
pub fn new() -> PixelTargetSet<R> {
PixelTargetSet {
colors: [None; MAX_COLOR_TARGETS],
depth: None,
stencil: None,
dimensions: None,
}
}
/// Add a color view to the specified slot
pub fn add_color(&mut self,
slot: ColorSlot,
view: &R::RenderTargetView,
dim: texture::Dimensions) {
self.colors[slot as usize] = Some(view.clone());
self.set_dimensions(dim);
}
/// Add a depth or stencil view to the specified slot
pub fn add_depth_stencil(&mut self,
view: &R::DepthStencilView,
has_depth: bool,
has_stencil: bool,
dim: texture::Dimensions) {
if has_depth {
self.depth = Some(view.clone());
}
if has_stencil {
self.stencil = Some(view.clone());
}
self.set_dimensions(dim);
}
fn set_dimensions(&mut self, dim: texture::Dimensions) {
debug_assert!(self.dimensions.map(|d| d == dim).unwrap_or(true));
self.dimensions = Some(dim);
}
/// Get the rendering view (returns values > 0)
pub fn get_view(&self) -> (u16, u16, u16) {
use std::cmp::max;
self.dimensions
.map(|(w, h, d, _)| (max(w, 1), max(h, 1), max(d, 1)))
.unwrap_or((1, 1, 1))
}
} | use {ConstantBufferSlot, ColorSlot, ResourceViewSlot, | random_line_split |
PyGenTools.py | """
PyGenTools.py by John Dorsey.
PyGenTools.py contains tools that work on python generators without handling their items (comparing items, etc.).
"""
import traceback
import itertools
from collections import deque
class ExhaustionError(Exception):
"""
ExhaustionError is raised when a function that eats items from an input generator runs out of items in that generator, but did not have a strict target count of items to eat.
"""
pass
class IterationFailure(Exception):
"""
IterationFailure is raised when an iterable runs out of items, in a situation where it never should have run out of items.
"""
pass
class SynchronicityViolation(Exception):
"""
SynchronicityViolation is raised when some code expected to perform exactly one operation per execution step of the code around it performs a different number of operations.
"""
pass
def isGen(thing):
#test whether something is a generator. Compatible with python2 and python3.
return type(thing) == type((i for i in range(1)))
def makeGen(thing):
#make anything iterable into a generator. This is useful for when certain functions are supposed to take only as many items as they need from the beginning of some data and leave the rest in a way that further iteration will begin where the first function stopped iterating, such as in parsing universal codes in Codes.py.
if isGen(thing):
return thing
return (item for item in thing)
def makeArr(thing):
if type(thing) == list:
return thing
try:
result = [item for item in thing]
except KeyboardInterrupt:
raise KeyboardInterrupt("PyGenTools.makeArr was stuck on " + str(thing) + ".")
return result
def handleOnExhaustion(methodName, yieldedCount, targetCount, onExhaustion):
if isinstance(onExhaustion,Exception):
raise onExhaustion
elif onExhaustion == "fail":
raise IterationFailure(methodName + " ran out of items, and its onExhaustion action is \"fail\".")
if onExhaustion == "ExhaustionError":
raise ExhaustionError(methodName + " ran out of items, and its onExhaustion action is \"ExhaustionError\".")
if "warn" in onExhaustion:
print("...\n" + "".join(traceback.format_list(traceback.extract_stack())) + ": " + methodName + " ran out of items. (yieldedCount,targetCount,onExhaustion)=" + str((yieldedCount, targetCount, onExhaustion)) + ".")
if "partial" in onExhaustion:
return
if "None" in onExhaustion:
raise ValueError(methodName + ": the onExhaustion action \"None\" is no longer supported.")
raise ValueError(methodName + ": the value of onExhaustion is invalid. (yieldedCount,targetCount,onExhaustion)=" + str((yieldedCount, targetCount, onExhaustion)) + ".")
def genTakeOnly(inputGen, targetCount, onExhaustion="partial"):
#take ONLY _count_ items from a generator _inputGen_ and yield them, so that if other functions call .next on the generator that was shared with this function, they will pick up exactly where this function's output left off (no missing items).
assert onExhaustion in ["fail","ExhaustionError","partial","warn"]
assert targetCount >= 0
if targetCount == 0:
return
i = 0
for item in inputGen:
if i < targetCount:
yield item
i += 1
if not i < targetCount:
return
handleOnExhaustion("PyGenTools.genTakeOnly", i, targetCount, onExhaustion)
def arrTakeOnly(inputGen, targetCount, onExhaustion="partial"):
#just like genTakeOnly, but bundle the taken items together into an array.
assert isinstance(onExhaustion,Exception) or onExhaustion in ["fail","ExhaustionError","warn+partial","partial"]
result = [item for item in genTakeOnly(inputGen,targetCount)]
if len(result) < targetCount:
handleOnExhaustion("PyGenTools.arrTakeOnly", len(result), targetCount, onExhaustion)
return result
def genSkipFirst(inputGen, count):
assert isGen(inputGen)
for i in range(count):
_ = next(inputGen)
return inputGen
def genTakeUntil(inputGen, stopFun, stopSignalsNeeded=1): #might be used in MarkovTools someday.
stopSignalCount = 0
for item in inputGen:
yield item
if stopFun(item):
stopSignalCount += 1
if stopSignalCount >= stopSignalsNeeded:
return
print("PyGenTools.genTakeUntil ran out of items.")
def arrTakeLast(inputGen, count):
if count == None:
raise ValueError("count can't be None.")
if type(inputGen) == list:
print("PyGenTools.arrTakeLast was called on a list. It will treat the list like a generator. This might be a waste of time.")
storage = [None for ii in range(count)]
i = -1
for item in inputGen:
i = (i+1)%count
storage[i] = item
splitPoint = i+1
return storage[splitPoint:]+storage[:splitPoint]
def getLast(inputGen):
if type(inputGen) == list:
print("PyGenTools.getLast was called on a list. It will treat the list like a generator. This might be a pointless waste of time.")
storage = None
loopRan = False
for item in inputGen:
loopRan = True
storage = item
assert loopRan
return storage
def indexOfValueInGen(testValue, testGen): #used in MarkovTools.
for i,item in enumerate(testGen):
if item == testValue:
return i
return None
def | (inputIndex, inputGen): #used in MarkovTools.
if inputIndex == None:
raise ValueError("inputIndex can't be None.")
return arrTakeLast(genTakeOnly(inputGen, inputIndex+1), 1)[0]
def sentinelize(inputSeq, sentinel=None, loopSentinel=False, failFun=None):
"""
Signals the end of a generator by yielding an additional item after its end. Note that sentinelize(x) makes a generator from any type of input, so combining it with makeGen is redundant.
"""
for item in inputSeq:
yield item
yield sentinel
while loopSentinel:
yield loopSentinel
if failFun:
failFun()
def zipGens(inputGens):
"""
This function gives a generator whose items are taken one at a time from each generator provided in a circular order. It runs until all the provided generators are empty. Technically, it can be given arrays instead of generators and it will correct for this. The array of generators may also be a generator instead of an array.
"""
gensRunning = [True for i in range(len(inputGens))] #this map prevents needing to catch the same StopIteration many times for each generator that stops sooner than the last one to stop.
workingGenArr = [makeGen(item) for item in inputGens] #in case the inputGens contains things that aren't generators _or_ inputGens itself is a generator, this fixes that.
while not all(not genIsRunning for genIsRunning in gensRunning):
for genIndex in range(len(workingGenArr)):
if gensRunning[genIndex]:
try:
yield next(workingGenArr[genIndex])
except StopIteration:
gensRunning[genIndex] = False #don't check this generator for items again.
def genAddInt(inputSeq, inputInt): #used in CodecTools.Codec.
for item in inputSeq:
yield item+inputInt
def arrAddInt(inputArr, inputInt): #used in CodecTools.Codec.
assert type(inputArr) == list
return [item+inputInt for item in inputArr]
def genDeduped(inputSeq):
if isGen(inputSeq) or type(inputSeq) == list:
history = set()
for item in inputSeq:
if item not in history:
history.add(item)
yield item
#this version uses less memory but isn't as fast. if re-enabling, change first branch's condition.
"""
elif type(inputSeq) == list:
for i,item in enumerate(inputSeq):
if i == 0:
yield item
continue
if item not in inputSeq[:i]:
yield item
"""
else:
raise ValueError("unsupported type: " + str(type(inputSeq)) + ".")
"""
def getAccumulatorFun(thing):
if type(thing) == str:
result = eval("(lambda x,y: x{}y)".format(thing))
elif type(thing) == type((lambda x: x)):
result = thing
else:
raise TypeError("must be string or function.")
return result
"""
def accumulate(inputSeq, inputFun):
inputSeq = makeGen(inputSeq)
#inputFun = getAccumulatorFun(inputFun) #not used anymore now that itertools.accumulate is sometimes used.
result = next(inputSeq)
for item in inputSeq:
result = inputFun(result, item)
return result
def product(inputSeq):
return accumulate(inputSeq, (lambda x,y:x*y))
def genChunksAsLists(inputGen, n=2, partialChunkHandling="warn partial"):
inputGen = makeGen(inputGen)
while True:
chunkAsList = arrTakeOnly(inputGen, n, onExhaustion="partial") #make list.
if len(chunkAsList) < n:
if "warn" in partialChunkHandling:
print("PyGenTools.genChunksAsLists: warning: partial chunk.")
if "fail" in partialChunkHandling:
raise IterationFailure("partial chunk.")
if not "partial" in partialChunkHandling:
if "discard" in partialChunkHandling:
return
else:
print("PyGenTools.genChunksAsLists: warning: partial chunk encountered, but the partialChunkHandling kwarg does not indicate what should be done (no \"partial\" and no \"discard\"). The chunk will be yielded.")
if len(chunkAsList) == 0:
return
yield chunkAsList
def genRollingWindowsAsLists(inputGen, n=3, step=1, defaultValue=None, includePartialChunks=True, includeEmptyChunks=False): # @ could be faster by using an index wrapping list.
if step != 1:
raise NotImplementedError("step != 1")
if includeEmptyChunks and not includePartialChunks:
raise ValueError("can't include empty chunks without including partial chunks.")
currentWindowDeque = deque([defaultValue for i in range(n)])
registeredCount = 0
def register(itemToRegister):
currentWindowDeque.append(itemToRegister)
currentWindowDeque.popleft()
if includeEmptyChunks:
yield list(currentWindowDeque)
for index, item in enumerate(inputGen):
register(item)
registeredCount += 1
if registeredCount%step != 0:
continue
if (index+1 >= n) or includePartialChunks:
yield list(currentWindowDeque)
if includePartialChunks:
for i in range(n-1):
register(defaultValue)
registeredCount += 1
if registeredCount%step != 0:
continue
yield list(currentWindowDeque)
if includeEmptyChunks:
yield list(currentWindowDeque)
def allAreEqual(inputSeq):
inputSeq = makeGen(inputSeq)
sharedValue = next(inputSeq)
for item in inputSeq:
if item != sharedValue:
return False
return True
def seqsAreEqual(*args):
return all(allAreEqual(item) for item in itertools.izip_longest(*args))
def countIn(inputSeq, testValue, includeDenominator=False):
return countTriggers(inputSeq,(lambda x: x==testValue),includeDenominator=includeDenominator)
def countTriggers(inputSeq, triggerFun, includeDenominator=False):
count, denominator = 0, 0
for item in inputSeq:
count, denominator = count+triggerFun(item), denominator+1
return (count, denominator) if includeDenominator else count
def genRunless(inputSeq, func=(lambda compA, compB: compA == compB)):
#this generator takes an input sequence and yields only the items that aren't the same as the previous item.
#this generator eats only as much as it yields.
previousItem = None
justStarted = True
for item in inputSeq:
if justStarted:
justStarted = False
previousItem = item
yield item
else:
if not func(item, previousItem):
previousItem = item
yield item
#not tested well.
def genTrackEnds(
inputSeq,
leftTag="left", middleTag="middle", rightTag="right", onlyTag="only",
useLookahead=True,
tagByExpanding=False,
supressOvereatingWarning=False):
if iter(inputSeq) is iter(inputSeq):
if useLookahead:
if not supressOvereatingWarning:
print("PyGenTools.genTrackEnds: over-eating warning: this function may take more items from inputSeq than it yields with the current args.")
if tagByExpanding:
def toTagged(workingItem, tagToApply):
return (type(workingItem))((tagToApply,)) + workingItem
else:
def toTagged(workingItem, tagToApply):
return (tagToApply, workingItem)
inputGen = makeGen(inputSeq)
if useLookahead:
previousItem = None
index = None
for index,currentItem in enumerate(inputGen):
#assert currentItem is not None, "can't handle Nones in inputSeq yet."
if index == 0:
previousItem = currentItem
elif index == 1:
yield toTagged(previousItem, leftTag)
else:
yield toTagged(previousItem, middleTag)
previousItem = currentItem
if index is None:
return
elif index == 0:
yield toTagged(currentItem, onlyTag)
else:
yield toTagged(currentItem, rightTag)
else:
for index,currentItem in enumerate(inputGen):
if index == 0:
yield toTagged(currentItem, leftTag)
else:
yield toTagged(currentItem, middleTag)
def enumerateFlatly(inputSeq, start=0):
commonLength = None
isProbablyTupleSeq = None
isProbablyListSeq = None
for index,value in enumerate(inputSeq, start=start):
assert value != None, "can't."
if commonLength is None:
isProbablyTupleSeq = isinstance(value, tuple)
isProbablyListSeq = isinstance(value, list)
if isTupleSeq or isListSeq:
commonLength = len(value)
isExpandableSeq = isProbablyTupleSeq or isProbablyListSeq
if not isExpandableSeq:
print("PyGenTools.enumerateFlatly: warning: this input sequence isn't of items that can be expanded to include enumeration (the first item is of type {}). New tuples will be created for each item... This is just like builtin enumerate, but with overhead.".format(repr(type(value))))
if isExpandableSeq:
if isinstance(value, tuple):
assert len(value) == commonLength
yield (index,) + value
elif isinstance(value, list):
assert len(value) == commonLength
yield [index,] + value
else:
assert False
else:
yield (index, value)
def genMonitored(inputSeq, text=""):
inputGen = makeGen(inputSeq)
i = 0
while True:
try:
item = next(inputGen)
print("PyGenTools.genMonitored: " + text + " i={}, item={}.".format(i, item))
yield item
i += 1
except StopIteration as se:
print("PyGenTools.genMonitored: " + text + " i={}, {}".format(i, repr(se)))
raise se
assert False, "Unreachable statement."
class GenBypass:
def __init__(self, useHistory=False):
#print("GenBypass init.")
self.inputGens = None
self.lastOutputs = None
self.modifiedInputGens = None
self.useHistory = useHistory
if self.useHistory:
self.history = deque()
def _create_callback_method(self, indexToWriteTo):
def callbackMethod(value):
#print("callback method: value {} into index {}.".format(value, indexToWriteTo))
if self.latestOutputs[indexToWriteTo] is not None:
raise SynchronicityViolation("multiple writes to output tracker for generator at index {}.".format(indexToWriteTo))
self.latestOutputs[indexToWriteTo] = value
if self.useHistory:
assert value is not None, "this breaks things."
if self.latestOutputs.count(None) == 0:
self._handle_full_outputs()
#print("self.lastOutputs is now {}.".format(self.lastOutputs))
return callbackMethod
def _gen_modify(self, genToModify, callbackMethod):
#print("_gen_modify called on {} with callbackMethod {}.".format(genToModify, callbackMethod))
for item in genToModify:
callbackMethod(item)
yield item
def _handle_full_outputs(self):
self.history.append([item for item in self.latestOutputs])
for i in range(len(self.latestOutputs)):
self.latestOutputs[i] = None
def intercept(self, *inputGens):
self.capture(inputGens)
return self.share()
def capture(self, *inputGens):
genCount = len(inputGens)
self.inputGens = [item for item in inputGens]
self.latestOutputs = [None for i in range(genCount)]
self.callbackMethods = [self._create_callback_method(i) for i in range(genCount)]
self.modifiedInputGens = [self._gen_modify(genToModify, self.callbackMethods[i]) for i,genToModify in enumerate(self.inputGens)]
def share(self):
assert len(self.modifiedInputGens) != 0
return self.modifiedInputGens
def wrap(self, inputGen):
#"unspecified_at_{}".format(i)
#result = [None for i in range(len(self.lastOutputs)+1)]
isLastRun = False
if self.useHistory:
sentinel = object() #get unique sentinel. only compare using is.
for inputGenItem in sentinelize(inputGen, sentinel=sentinel):
assert self.latestOutputs == [None for i in range(len(self.latestOutputs))]
while len(self.history) > 1:
yield [item for item in self.history.popleft()] + [None]
if inputGenItem is sentinel:
isLastRun = True
inputGenItem = None #don't yield sentinel.
if not len(self.history) >= 1:
return #don't try.
yield [item for item in self.history.popleft()] + [inputGenItem]
if isLastRun:
return #history dumped after last item, nothing left to do.
else:
yield [item for item in self.latestOutputs] + [inputGenItem]
for i in range(len(self.latestOutputs)):
self.latestOutputs[i] = None
#assert i != 0
#assert self.lastOutputs.count(None) == len(self.lastOutputs)
class CC:
"""
CC stands for ConverterClass.
An instance of CC can convert an iterable object to a python list by right multiplication (<iterable> * <CC instance> is equivalent to PyGenTools.makeArr(<iterable>)) or convert a list (or other iterable object) to a generator by right division (<iterable> / <CC instance> is equivalent to PyGenTools.makeGen(<iterable>)).
"""
def __init__(self,*args):
self.count = None
if len(args) > 0:
self.count = args[0]
if not self.count > 0:
raise ValueError("Zero or negative CC counts are not allowed.")
print("PyGenTools.ConverterClass.__init__: initialized with " + ("count " + str(self.count) if self.count != None else "indefinite count") + ".")
def __rmul__(self,other):
if self.count != None:
return arrTakeOnly(other,self.count)
else:
return makeArr(other)
def __rdiv__(self,other):
if self.count != None:
return genTakeOnly(other,self.count)
else:
return makeGen(other)
def __rtruediv__(self,other):
#python3 compatibility.
return self.__rdiv__(other)
cc = CC()
#tests:
assert len([item for item in genTakeOnly(range(256),10)]) == 10
assert arrTakeOnly(range(10),5) == [0,1,2,3,4]
assert arrTakeOnly(range(5),10) == [0,1,2,3,4]
assert "".join(zipGens(["hello","123456789","ABC"])) == "h1Ae2Bl3Cl4o56789"
assert range(10) * cc == [0,1,2,3,4,5,6,7,8,9]
assert isGen([0,1,2,3,4] / cc)
| valueAtIndexInGen | identifier_name |
PyGenTools.py | """
PyGenTools.py by John Dorsey.
PyGenTools.py contains tools that work on python generators without handling their items (comparing items, etc.).
"""
import traceback
import itertools
from collections import deque
class ExhaustionError(Exception):
"""
ExhaustionError is raised when a function that eats items from an input generator runs out of items in that generator, but did not have a strict target count of items to eat.
"""
pass
class IterationFailure(Exception):
"""
IterationFailure is raised when an iterable runs out of items, in a situation where it never should have run out of items.
"""
pass
class SynchronicityViolation(Exception):
"""
SynchronicityViolation is raised when some code expected to perform exactly one operation per execution step of the code around it performs a different number of operations.
"""
pass
def isGen(thing):
#test whether something is a generator. Compatible with python2 and python3.
return type(thing) == type((i for i in range(1)))
def makeGen(thing):
#make anything iterable into a generator. This is useful for when certain functions are supposed to take only as many items as they need from the beginning of some data and leave the rest in a way that further iteration will begin where the first function stopped iterating, such as in parsing universal codes in Codes.py.
if isGen(thing):
return thing
return (item for item in thing)
def makeArr(thing):
if type(thing) == list:
return thing
try:
result = [item for item in thing]
except KeyboardInterrupt:
raise KeyboardInterrupt("PyGenTools.makeArr was stuck on " + str(thing) + ".")
return result
def handleOnExhaustion(methodName, yieldedCount, targetCount, onExhaustion):
if isinstance(onExhaustion,Exception):
raise onExhaustion
elif onExhaustion == "fail":
raise IterationFailure(methodName + " ran out of items, and its onExhaustion action is \"fail\".")
if onExhaustion == "ExhaustionError":
raise ExhaustionError(methodName + " ran out of items, and its onExhaustion action is \"ExhaustionError\".")
if "warn" in onExhaustion:
print("...\n" + "".join(traceback.format_list(traceback.extract_stack())) + ": " + methodName + " ran out of items. (yieldedCount,targetCount,onExhaustion)=" + str((yieldedCount, targetCount, onExhaustion)) + ".")
if "partial" in onExhaustion:
return
if "None" in onExhaustion:
raise ValueError(methodName + ": the onExhaustion action \"None\" is no longer supported.")
raise ValueError(methodName + ": the value of onExhaustion is invalid. (yieldedCount,targetCount,onExhaustion)=" + str((yieldedCount, targetCount, onExhaustion)) + ".")
def genTakeOnly(inputGen, targetCount, onExhaustion="partial"):
#take ONLY _count_ items from a generator _inputGen_ and yield them, so that if other functions call .next on the generator that was shared with this function, they will pick up exactly where this function's output left off (no missing items).
assert onExhaustion in ["fail","ExhaustionError","partial","warn"]
assert targetCount >= 0
if targetCount == 0:
return
i = 0
for item in inputGen:
if i < targetCount:
yield item
i += 1
if not i < targetCount:
return
handleOnExhaustion("PyGenTools.genTakeOnly", i, targetCount, onExhaustion)
def arrTakeOnly(inputGen, targetCount, onExhaustion="partial"):
#just like genTakeOnly, but bundle the taken items together into an array.
assert isinstance(onExhaustion,Exception) or onExhaustion in ["fail","ExhaustionError","warn+partial","partial"]
result = [item for item in genTakeOnly(inputGen,targetCount)]
if len(result) < targetCount:
handleOnExhaustion("PyGenTools.arrTakeOnly", len(result), targetCount, onExhaustion)
return result
def genSkipFirst(inputGen, count):
assert isGen(inputGen)
for i in range(count):
_ = next(inputGen)
return inputGen
def genTakeUntil(inputGen, stopFun, stopSignalsNeeded=1): #might be used in MarkovTools someday.
stopSignalCount = 0
for item in inputGen:
yield item
if stopFun(item):
stopSignalCount += 1
if stopSignalCount >= stopSignalsNeeded:
return
print("PyGenTools.genTakeUntil ran out of items.")
def arrTakeLast(inputGen, count):
if count == None:
raise ValueError("count can't be None.")
if type(inputGen) == list:
print("PyGenTools.arrTakeLast was called on a list. It will treat the list like a generator. This might be a waste of time.")
storage = [None for ii in range(count)]
i = -1
for item in inputGen:
i = (i+1)%count
storage[i] = item
splitPoint = i+1
return storage[splitPoint:]+storage[:splitPoint]
def getLast(inputGen):
if type(inputGen) == list:
print("PyGenTools.getLast was called on a list. It will treat the list like a generator. This might be a pointless waste of time.")
storage = None
loopRan = False
for item in inputGen:
loopRan = True
storage = item
assert loopRan
return storage
def indexOfValueInGen(testValue, testGen): #used in MarkovTools.
for i,item in enumerate(testGen):
if item == testValue:
return i
return None
def valueAtIndexInGen(inputIndex, inputGen): #used in MarkovTools.
if inputIndex == None:
raise ValueError("inputIndex can't be None.")
return arrTakeLast(genTakeOnly(inputGen, inputIndex+1), 1)[0]
def sentinelize(inputSeq, sentinel=None, loopSentinel=False, failFun=None):
"""
Signals the end of a generator by yielding an additional item after its end. Note that sentinelize(x) makes a generator from any type of input, so combining it with makeGen is redundant.
"""
for item in inputSeq:
yield item
yield sentinel
while loopSentinel:
yield loopSentinel
if failFun:
failFun()
def zipGens(inputGens):
"""
This function gives a generator whose items are taken one at a time from each generator provided in a circular order. It runs until all the provided generators are empty. Technically, it can be given arrays instead of generators and it will correct for this. The array of generators may also be a generator instead of an array.
"""
gensRunning = [True for i in range(len(inputGens))] #this map prevents needing to catch the same StopIteration many times for each generator that stops sooner than the last one to stop.
workingGenArr = [makeGen(item) for item in inputGens] #in case the inputGens contains things that aren't generators _or_ inputGens itself is a generator, this fixes that.
while not all(not genIsRunning for genIsRunning in gensRunning):
for genIndex in range(len(workingGenArr)):
if gensRunning[genIndex]:
try:
yield next(workingGenArr[genIndex])
except StopIteration:
gensRunning[genIndex] = False #don't check this generator for items again.
def genAddInt(inputSeq, inputInt): #used in CodecTools.Codec.
for item in inputSeq:
yield item+inputInt
def arrAddInt(inputArr, inputInt): #used in CodecTools.Codec.
assert type(inputArr) == list
return [item+inputInt for item in inputArr]
def genDeduped(inputSeq):
if isGen(inputSeq) or type(inputSeq) == list:
history = set()
for item in inputSeq:
if item not in history:
history.add(item)
yield item
#this version uses less memory but isn't as fast. if re-enabling, change first branch's condition.
"""
elif type(inputSeq) == list:
for i,item in enumerate(inputSeq):
if i == 0:
yield item
continue
if item not in inputSeq[:i]:
yield item
"""
else:
raise ValueError("unsupported type: " + str(type(inputSeq)) + ".")
"""
def getAccumulatorFun(thing):
if type(thing) == str:
result = eval("(lambda x,y: x{}y)".format(thing))
elif type(thing) == type((lambda x: x)):
result = thing
else:
raise TypeError("must be string or function.")
return result
"""
def accumulate(inputSeq, inputFun):
inputSeq = makeGen(inputSeq)
#inputFun = getAccumulatorFun(inputFun) #not used anymore now that itertools.accumulate is sometimes used.
result = next(inputSeq)
for item in inputSeq:
result = inputFun(result, item)
return result
def product(inputSeq):
return accumulate(inputSeq, (lambda x,y:x*y))
def genChunksAsLists(inputGen, n=2, partialChunkHandling="warn partial"):
inputGen = makeGen(inputGen)
while True:
chunkAsList = arrTakeOnly(inputGen, n, onExhaustion="partial") #make list.
if len(chunkAsList) < n:
if "warn" in partialChunkHandling:
print("PyGenTools.genChunksAsLists: warning: partial chunk.")
if "fail" in partialChunkHandling:
raise IterationFailure("partial chunk.")
if not "partial" in partialChunkHandling:
if "discard" in partialChunkHandling:
return
else:
print("PyGenTools.genChunksAsLists: warning: partial chunk encountered, but the partialChunkHandling kwarg does not indicate what should be done (no \"partial\" and no \"discard\"). The chunk will be yielded.")
if len(chunkAsList) == 0:
return
yield chunkAsList
def genRollingWindowsAsLists(inputGen, n=3, step=1, defaultValue=None, includePartialChunks=True, includeEmptyChunks=False): # @ could be faster by using an index wrapping list.
if step != 1:
raise NotImplementedError("step != 1")
if includeEmptyChunks and not includePartialChunks:
raise ValueError("can't include empty chunks without including partial chunks.")
currentWindowDeque = deque([defaultValue for i in range(n)])
registeredCount = 0
def register(itemToRegister):
currentWindowDeque.append(itemToRegister)
currentWindowDeque.popleft()
if includeEmptyChunks:
yield list(currentWindowDeque)
for index, item in enumerate(inputGen):
register(item)
registeredCount += 1
if registeredCount%step != 0:
continue
if (index+1 >= n) or includePartialChunks:
yield list(currentWindowDeque)
if includePartialChunks:
for i in range(n-1):
register(defaultValue)
registeredCount += 1
if registeredCount%step != 0:
continue
yield list(currentWindowDeque)
if includeEmptyChunks:
yield list(currentWindowDeque)
def allAreEqual(inputSeq):
inputSeq = makeGen(inputSeq)
sharedValue = next(inputSeq)
for item in inputSeq:
if item != sharedValue:
return False
return True
def seqsAreEqual(*args):
return all(allAreEqual(item) for item in itertools.izip_longest(*args))
def countIn(inputSeq, testValue, includeDenominator=False):
return countTriggers(inputSeq,(lambda x: x==testValue),includeDenominator=includeDenominator)
def countTriggers(inputSeq, triggerFun, includeDenominator=False):
count, denominator = 0, 0
for item in inputSeq:
count, denominator = count+triggerFun(item), denominator+1
return (count, denominator) if includeDenominator else count
def genRunless(inputSeq, func=(lambda compA, compB: compA == compB)):
#this generator takes an input sequence and yields only the items that aren't the same as the previous item.
#this generator eats only as much as it yields.
previousItem = None
justStarted = True
for item in inputSeq:
if justStarted:
justStarted = False
previousItem = item
yield item
else:
if not func(item, previousItem):
previousItem = item
yield item
#not tested well.
def genTrackEnds(
inputSeq,
leftTag="left", middleTag="middle", rightTag="right", onlyTag="only",
useLookahead=True,
tagByExpanding=False,
supressOvereatingWarning=False):
if iter(inputSeq) is iter(inputSeq):
if useLookahead:
if not supressOvereatingWarning:
print("PyGenTools.genTrackEnds: over-eating warning: this function may take more items from inputSeq than it yields with the current args.")
if tagByExpanding:
def toTagged(workingItem, tagToApply):
return (type(workingItem))((tagToApply,)) + workingItem
else:
def toTagged(workingItem, tagToApply):
|
inputGen = makeGen(inputSeq)
if useLookahead:
previousItem = None
index = None
for index,currentItem in enumerate(inputGen):
#assert currentItem is not None, "can't handle Nones in inputSeq yet."
if index == 0:
previousItem = currentItem
elif index == 1:
yield toTagged(previousItem, leftTag)
else:
yield toTagged(previousItem, middleTag)
previousItem = currentItem
if index is None:
return
elif index == 0:
yield toTagged(currentItem, onlyTag)
else:
yield toTagged(currentItem, rightTag)
else:
for index,currentItem in enumerate(inputGen):
if index == 0:
yield toTagged(currentItem, leftTag)
else:
yield toTagged(currentItem, middleTag)
def enumerateFlatly(inputSeq, start=0):
commonLength = None
isProbablyTupleSeq = None
isProbablyListSeq = None
for index,value in enumerate(inputSeq, start=start):
assert value != None, "can't."
if commonLength is None:
isProbablyTupleSeq = isinstance(value, tuple)
isProbablyListSeq = isinstance(value, list)
if isTupleSeq or isListSeq:
commonLength = len(value)
isExpandableSeq = isProbablyTupleSeq or isProbablyListSeq
if not isExpandableSeq:
print("PyGenTools.enumerateFlatly: warning: this input sequence isn't of items that can be expanded to include enumeration (the first item is of type {}). New tuples will be created for each item... This is just like builtin enumerate, but with overhead.".format(repr(type(value))))
if isExpandableSeq:
if isinstance(value, tuple):
assert len(value) == commonLength
yield (index,) + value
elif isinstance(value, list):
assert len(value) == commonLength
yield [index,] + value
else:
assert False
else:
yield (index, value)
def genMonitored(inputSeq, text=""):
inputGen = makeGen(inputSeq)
i = 0
while True:
try:
item = next(inputGen)
print("PyGenTools.genMonitored: " + text + " i={}, item={}.".format(i, item))
yield item
i += 1
except StopIteration as se:
print("PyGenTools.genMonitored: " + text + " i={}, {}".format(i, repr(se)))
raise se
assert False, "Unreachable statement."
class GenBypass:
def __init__(self, useHistory=False):
#print("GenBypass init.")
self.inputGens = None
self.lastOutputs = None
self.modifiedInputGens = None
self.useHistory = useHistory
if self.useHistory:
self.history = deque()
def _create_callback_method(self, indexToWriteTo):
def callbackMethod(value):
#print("callback method: value {} into index {}.".format(value, indexToWriteTo))
if self.latestOutputs[indexToWriteTo] is not None:
raise SynchronicityViolation("multiple writes to output tracker for generator at index {}.".format(indexToWriteTo))
self.latestOutputs[indexToWriteTo] = value
if self.useHistory:
assert value is not None, "this breaks things."
if self.latestOutputs.count(None) == 0:
self._handle_full_outputs()
#print("self.lastOutputs is now {}.".format(self.lastOutputs))
return callbackMethod
def _gen_modify(self, genToModify, callbackMethod):
#print("_gen_modify called on {} with callbackMethod {}.".format(genToModify, callbackMethod))
for item in genToModify:
callbackMethod(item)
yield item
def _handle_full_outputs(self):
self.history.append([item for item in self.latestOutputs])
for i in range(len(self.latestOutputs)):
self.latestOutputs[i] = None
def intercept(self, *inputGens):
self.capture(inputGens)
return self.share()
def capture(self, *inputGens):
genCount = len(inputGens)
self.inputGens = [item for item in inputGens]
self.latestOutputs = [None for i in range(genCount)]
self.callbackMethods = [self._create_callback_method(i) for i in range(genCount)]
self.modifiedInputGens = [self._gen_modify(genToModify, self.callbackMethods[i]) for i,genToModify in enumerate(self.inputGens)]
def share(self):
assert len(self.modifiedInputGens) != 0
return self.modifiedInputGens
def wrap(self, inputGen):
#"unspecified_at_{}".format(i)
#result = [None for i in range(len(self.lastOutputs)+1)]
isLastRun = False
if self.useHistory:
sentinel = object() #get unique sentinel. only compare using is.
for inputGenItem in sentinelize(inputGen, sentinel=sentinel):
assert self.latestOutputs == [None for i in range(len(self.latestOutputs))]
while len(self.history) > 1:
yield [item for item in self.history.popleft()] + [None]
if inputGenItem is sentinel:
isLastRun = True
inputGenItem = None #don't yield sentinel.
if not len(self.history) >= 1:
return #don't try.
yield [item for item in self.history.popleft()] + [inputGenItem]
if isLastRun:
return #history dumped after last item, nothing left to do.
else:
yield [item for item in self.latestOutputs] + [inputGenItem]
for i in range(len(self.latestOutputs)):
self.latestOutputs[i] = None
#assert i != 0
#assert self.lastOutputs.count(None) == len(self.lastOutputs)
class CC:
"""
CC stands for ConverterClass.
An instance of CC can convert an iterable object to a python list by right multiplication (<iterable> * <CC instance> is equivalent to PyGenTools.makeArr(<iterable>)) or convert a list (or other iterable object) to a generator by right division (<iterable> / <CC instance> is equivalent to PyGenTools.makeGen(<iterable>)).
"""
def __init__(self,*args):
self.count = None
if len(args) > 0:
self.count = args[0]
if not self.count > 0:
raise ValueError("Zero or negative CC counts are not allowed.")
print("PyGenTools.ConverterClass.__init__: initialized with " + ("count " + str(self.count) if self.count != None else "indefinite count") + ".")
def __rmul__(self,other):
if self.count != None:
return arrTakeOnly(other,self.count)
else:
return makeArr(other)
def __rdiv__(self,other):
if self.count != None:
return genTakeOnly(other,self.count)
else:
return makeGen(other)
def __rtruediv__(self,other):
#python3 compatibility.
return self.__rdiv__(other)
cc = CC()
#tests:
assert len([item for item in genTakeOnly(range(256),10)]) == 10
assert arrTakeOnly(range(10),5) == [0,1,2,3,4]
assert arrTakeOnly(range(5),10) == [0,1,2,3,4]
assert "".join(zipGens(["hello","123456789","ABC"])) == "h1Ae2Bl3Cl4o56789"
assert range(10) * cc == [0,1,2,3,4,5,6,7,8,9]
assert isGen([0,1,2,3,4] / cc)
| return (tagToApply, workingItem) | identifier_body |
PyGenTools.py | """
PyGenTools.py by John Dorsey.
PyGenTools.py contains tools that work on python generators without handling their items (comparing items, etc.).
"""
import traceback
import itertools
from collections import deque
class ExhaustionError(Exception):
"""
ExhaustionError is raised when a function that eats items from an input generator runs out of items in that generator, but did not have a strict target count of items to eat.
"""
pass
class IterationFailure(Exception):
"""
IterationFailure is raised when an iterable runs out of items, in a situation where it never should have run out of items.
"""
pass
class SynchronicityViolation(Exception):
"""
SynchronicityViolation is raised when some code expected to perform exactly one operation per execution step of the code around it performs a different number of operations.
"""
pass
def isGen(thing):
#test whether something is a generator. Compatible with python2 and python3.
return type(thing) == type((i for i in range(1)))
def makeGen(thing):
#make anything iterable into a generator. This is useful for when certain functions are supposed to take only as many items as they need from the beginning of some data and leave the rest in a way that further iteration will begin where the first function stopped iterating, such as in parsing universal codes in Codes.py.
if isGen(thing):
return thing
return (item for item in thing) | try:
result = [item for item in thing]
except KeyboardInterrupt:
raise KeyboardInterrupt("PyGenTools.makeArr was stuck on " + str(thing) + ".")
return result
def handleOnExhaustion(methodName, yieldedCount, targetCount, onExhaustion):
if isinstance(onExhaustion,Exception):
raise onExhaustion
elif onExhaustion == "fail":
raise IterationFailure(methodName + " ran out of items, and its onExhaustion action is \"fail\".")
if onExhaustion == "ExhaustionError":
raise ExhaustionError(methodName + " ran out of items, and its onExhaustion action is \"ExhaustionError\".")
if "warn" in onExhaustion:
print("...\n" + "".join(traceback.format_list(traceback.extract_stack())) + ": " + methodName + " ran out of items. (yieldedCount,targetCount,onExhaustion)=" + str((yieldedCount, targetCount, onExhaustion)) + ".")
if "partial" in onExhaustion:
return
if "None" in onExhaustion:
raise ValueError(methodName + ": the onExhaustion action \"None\" is no longer supported.")
raise ValueError(methodName + ": the value of onExhaustion is invalid. (yieldedCount,targetCount,onExhaustion)=" + str((yieldedCount, targetCount, onExhaustion)) + ".")
def genTakeOnly(inputGen, targetCount, onExhaustion="partial"):
#take ONLY _count_ items from a generator _inputGen_ and yield them, so that if other functions call .next on the generator that was shared with this function, they will pick up exactly where this function's output left off (no missing items).
assert onExhaustion in ["fail","ExhaustionError","partial","warn"]
assert targetCount >= 0
if targetCount == 0:
return
i = 0
for item in inputGen:
if i < targetCount:
yield item
i += 1
if not i < targetCount:
return
handleOnExhaustion("PyGenTools.genTakeOnly", i, targetCount, onExhaustion)
def arrTakeOnly(inputGen, targetCount, onExhaustion="partial"):
#just like genTakeOnly, but bundle the taken items together into an array.
assert isinstance(onExhaustion,Exception) or onExhaustion in ["fail","ExhaustionError","warn+partial","partial"]
result = [item for item in genTakeOnly(inputGen,targetCount)]
if len(result) < targetCount:
handleOnExhaustion("PyGenTools.arrTakeOnly", len(result), targetCount, onExhaustion)
return result
def genSkipFirst(inputGen, count):
assert isGen(inputGen)
for i in range(count):
_ = next(inputGen)
return inputGen
def genTakeUntil(inputGen, stopFun, stopSignalsNeeded=1): #might be used in MarkovTools someday.
stopSignalCount = 0
for item in inputGen:
yield item
if stopFun(item):
stopSignalCount += 1
if stopSignalCount >= stopSignalsNeeded:
return
print("PyGenTools.genTakeUntil ran out of items.")
def arrTakeLast(inputGen, count):
if count == None:
raise ValueError("count can't be None.")
if type(inputGen) == list:
print("PyGenTools.arrTakeLast was called on a list. It will treat the list like a generator. This might be a waste of time.")
storage = [None for ii in range(count)]
i = -1
for item in inputGen:
i = (i+1)%count
storage[i] = item
splitPoint = i+1
return storage[splitPoint:]+storage[:splitPoint]
def getLast(inputGen):
if type(inputGen) == list:
print("PyGenTools.getLast was called on a list. It will treat the list like a generator. This might be a pointless waste of time.")
storage = None
loopRan = False
for item in inputGen:
loopRan = True
storage = item
assert loopRan
return storage
def indexOfValueInGen(testValue, testGen): #used in MarkovTools.
for i,item in enumerate(testGen):
if item == testValue:
return i
return None
def valueAtIndexInGen(inputIndex, inputGen): #used in MarkovTools.
if inputIndex == None:
raise ValueError("inputIndex can't be None.")
return arrTakeLast(genTakeOnly(inputGen, inputIndex+1), 1)[0]
def sentinelize(inputSeq, sentinel=None, loopSentinel=False, failFun=None):
"""
Signals the end of a generator by yielding an additional item after its end. Note that sentinelize(x) makes a generator from any type of input, so combining it with makeGen is redundant.
"""
for item in inputSeq:
yield item
yield sentinel
while loopSentinel:
yield loopSentinel
if failFun:
failFun()
def zipGens(inputGens):
"""
This function gives a generator whose items are taken one at a time from each generator provided in a circular order. It runs until all the provided generators are empty. Technically, it can be given arrays instead of generators and it will correct for this. The array of generators may also be a generator instead of an array.
"""
gensRunning = [True for i in range(len(inputGens))] #this map prevents needing to catch the same StopIteration many times for each generator that stops sooner than the last one to stop.
workingGenArr = [makeGen(item) for item in inputGens] #in case the inputGens contains things that aren't generators _or_ inputGens itself is a generator, this fixes that.
while not all(not genIsRunning for genIsRunning in gensRunning):
for genIndex in range(len(workingGenArr)):
if gensRunning[genIndex]:
try:
yield next(workingGenArr[genIndex])
except StopIteration:
gensRunning[genIndex] = False #don't check this generator for items again.
def genAddInt(inputSeq, inputInt): #used in CodecTools.Codec.
for item in inputSeq:
yield item+inputInt
def arrAddInt(inputArr, inputInt): #used in CodecTools.Codec.
assert type(inputArr) == list
return [item+inputInt for item in inputArr]
def genDeduped(inputSeq):
if isGen(inputSeq) or type(inputSeq) == list:
history = set()
for item in inputSeq:
if item not in history:
history.add(item)
yield item
#this version uses less memory but isn't as fast. if re-enabling, change first branch's condition.
"""
elif type(inputSeq) == list:
for i,item in enumerate(inputSeq):
if i == 0:
yield item
continue
if item not in inputSeq[:i]:
yield item
"""
else:
raise ValueError("unsupported type: " + str(type(inputSeq)) + ".")
"""
def getAccumulatorFun(thing):
if type(thing) == str:
result = eval("(lambda x,y: x{}y)".format(thing))
elif type(thing) == type((lambda x: x)):
result = thing
else:
raise TypeError("must be string or function.")
return result
"""
def accumulate(inputSeq, inputFun):
inputSeq = makeGen(inputSeq)
#inputFun = getAccumulatorFun(inputFun) #not used anymore now that itertools.accumulate is sometimes used.
result = next(inputSeq)
for item in inputSeq:
result = inputFun(result, item)
return result
def product(inputSeq):
return accumulate(inputSeq, (lambda x,y:x*y))
def genChunksAsLists(inputGen, n=2, partialChunkHandling="warn partial"):
inputGen = makeGen(inputGen)
while True:
chunkAsList = arrTakeOnly(inputGen, n, onExhaustion="partial") #make list.
if len(chunkAsList) < n:
if "warn" in partialChunkHandling:
print("PyGenTools.genChunksAsLists: warning: partial chunk.")
if "fail" in partialChunkHandling:
raise IterationFailure("partial chunk.")
if not "partial" in partialChunkHandling:
if "discard" in partialChunkHandling:
return
else:
print("PyGenTools.genChunksAsLists: warning: partial chunk encountered, but the partialChunkHandling kwarg does not indicate what should be done (no \"partial\" and no \"discard\"). The chunk will be yielded.")
if len(chunkAsList) == 0:
return
yield chunkAsList
def genRollingWindowsAsLists(inputGen, n=3, step=1, defaultValue=None, includePartialChunks=True, includeEmptyChunks=False): # @ could be faster by using an index wrapping list.
if step != 1:
raise NotImplementedError("step != 1")
if includeEmptyChunks and not includePartialChunks:
raise ValueError("can't include empty chunks without including partial chunks.")
currentWindowDeque = deque([defaultValue for i in range(n)])
registeredCount = 0
def register(itemToRegister):
currentWindowDeque.append(itemToRegister)
currentWindowDeque.popleft()
if includeEmptyChunks:
yield list(currentWindowDeque)
for index, item in enumerate(inputGen):
register(item)
registeredCount += 1
if registeredCount%step != 0:
continue
if (index+1 >= n) or includePartialChunks:
yield list(currentWindowDeque)
if includePartialChunks:
for i in range(n-1):
register(defaultValue)
registeredCount += 1
if registeredCount%step != 0:
continue
yield list(currentWindowDeque)
if includeEmptyChunks:
yield list(currentWindowDeque)
def allAreEqual(inputSeq):
inputSeq = makeGen(inputSeq)
sharedValue = next(inputSeq)
for item in inputSeq:
if item != sharedValue:
return False
return True
def seqsAreEqual(*args):
return all(allAreEqual(item) for item in itertools.izip_longest(*args))
def countIn(inputSeq, testValue, includeDenominator=False):
return countTriggers(inputSeq,(lambda x: x==testValue),includeDenominator=includeDenominator)
def countTriggers(inputSeq, triggerFun, includeDenominator=False):
count, denominator = 0, 0
for item in inputSeq:
count, denominator = count+triggerFun(item), denominator+1
return (count, denominator) if includeDenominator else count
def genRunless(inputSeq, func=(lambda compA, compB: compA == compB)):
#this generator takes an input sequence and yields only the items that aren't the same as the previous item.
#this generator eats only as much as it yields.
previousItem = None
justStarted = True
for item in inputSeq:
if justStarted:
justStarted = False
previousItem = item
yield item
else:
if not func(item, previousItem):
previousItem = item
yield item
#not tested well.
def genTrackEnds(
inputSeq,
leftTag="left", middleTag="middle", rightTag="right", onlyTag="only",
useLookahead=True,
tagByExpanding=False,
supressOvereatingWarning=False):
if iter(inputSeq) is iter(inputSeq):
if useLookahead:
if not supressOvereatingWarning:
print("PyGenTools.genTrackEnds: over-eating warning: this function may take more items from inputSeq than it yields with the current args.")
if tagByExpanding:
def toTagged(workingItem, tagToApply):
return (type(workingItem))((tagToApply,)) + workingItem
else:
def toTagged(workingItem, tagToApply):
return (tagToApply, workingItem)
inputGen = makeGen(inputSeq)
if useLookahead:
previousItem = None
index = None
for index,currentItem in enumerate(inputGen):
#assert currentItem is not None, "can't handle Nones in inputSeq yet."
if index == 0:
previousItem = currentItem
elif index == 1:
yield toTagged(previousItem, leftTag)
else:
yield toTagged(previousItem, middleTag)
previousItem = currentItem
if index is None:
return
elif index == 0:
yield toTagged(currentItem, onlyTag)
else:
yield toTagged(currentItem, rightTag)
else:
for index,currentItem in enumerate(inputGen):
if index == 0:
yield toTagged(currentItem, leftTag)
else:
yield toTagged(currentItem, middleTag)
def enumerateFlatly(inputSeq, start=0):
commonLength = None
isProbablyTupleSeq = None
isProbablyListSeq = None
for index,value in enumerate(inputSeq, start=start):
assert value != None, "can't."
if commonLength is None:
isProbablyTupleSeq = isinstance(value, tuple)
isProbablyListSeq = isinstance(value, list)
if isTupleSeq or isListSeq:
commonLength = len(value)
isExpandableSeq = isProbablyTupleSeq or isProbablyListSeq
if not isExpandableSeq:
print("PyGenTools.enumerateFlatly: warning: this input sequence isn't of items that can be expanded to include enumeration (the first item is of type {}). New tuples will be created for each item... This is just like builtin enumerate, but with overhead.".format(repr(type(value))))
if isExpandableSeq:
if isinstance(value, tuple):
assert len(value) == commonLength
yield (index,) + value
elif isinstance(value, list):
assert len(value) == commonLength
yield [index,] + value
else:
assert False
else:
yield (index, value)
def genMonitored(inputSeq, text=""):
inputGen = makeGen(inputSeq)
i = 0
while True:
try:
item = next(inputGen)
print("PyGenTools.genMonitored: " + text + " i={}, item={}.".format(i, item))
yield item
i += 1
except StopIteration as se:
print("PyGenTools.genMonitored: " + text + " i={}, {}".format(i, repr(se)))
raise se
assert False, "Unreachable statement."
class GenBypass:
def __init__(self, useHistory=False):
#print("GenBypass init.")
self.inputGens = None
self.lastOutputs = None
self.modifiedInputGens = None
self.useHistory = useHistory
if self.useHistory:
self.history = deque()
def _create_callback_method(self, indexToWriteTo):
def callbackMethod(value):
#print("callback method: value {} into index {}.".format(value, indexToWriteTo))
if self.latestOutputs[indexToWriteTo] is not None:
raise SynchronicityViolation("multiple writes to output tracker for generator at index {}.".format(indexToWriteTo))
self.latestOutputs[indexToWriteTo] = value
if self.useHistory:
assert value is not None, "this breaks things."
if self.latestOutputs.count(None) == 0:
self._handle_full_outputs()
#print("self.lastOutputs is now {}.".format(self.lastOutputs))
return callbackMethod
def _gen_modify(self, genToModify, callbackMethod):
#print("_gen_modify called on {} with callbackMethod {}.".format(genToModify, callbackMethod))
for item in genToModify:
callbackMethod(item)
yield item
def _handle_full_outputs(self):
self.history.append([item for item in self.latestOutputs])
for i in range(len(self.latestOutputs)):
self.latestOutputs[i] = None
def intercept(self, *inputGens):
self.capture(inputGens)
return self.share()
def capture(self, *inputGens):
genCount = len(inputGens)
self.inputGens = [item for item in inputGens]
self.latestOutputs = [None for i in range(genCount)]
self.callbackMethods = [self._create_callback_method(i) for i in range(genCount)]
self.modifiedInputGens = [self._gen_modify(genToModify, self.callbackMethods[i]) for i,genToModify in enumerate(self.inputGens)]
def share(self):
assert len(self.modifiedInputGens) != 0
return self.modifiedInputGens
def wrap(self, inputGen):
#"unspecified_at_{}".format(i)
#result = [None for i in range(len(self.lastOutputs)+1)]
isLastRun = False
if self.useHistory:
sentinel = object() #get unique sentinel. only compare using is.
for inputGenItem in sentinelize(inputGen, sentinel=sentinel):
assert self.latestOutputs == [None for i in range(len(self.latestOutputs))]
while len(self.history) > 1:
yield [item for item in self.history.popleft()] + [None]
if inputGenItem is sentinel:
isLastRun = True
inputGenItem = None #don't yield sentinel.
if not len(self.history) >= 1:
return #don't try.
yield [item for item in self.history.popleft()] + [inputGenItem]
if isLastRun:
return #history dumped after last item, nothing left to do.
else:
yield [item for item in self.latestOutputs] + [inputGenItem]
for i in range(len(self.latestOutputs)):
self.latestOutputs[i] = None
#assert i != 0
#assert self.lastOutputs.count(None) == len(self.lastOutputs)
class CC:
"""
CC stands for ConverterClass.
An instance of CC can convert an iterable object to a python list by right multiplication (<iterable> * <CC instance> is equivalent to PyGenTools.makeArr(<iterable>)) or convert a list (or other iterable object) to a generator by right division (<iterable> / <CC instance> is equivalent to PyGenTools.makeGen(<iterable>)).
"""
def __init__(self,*args):
self.count = None
if len(args) > 0:
self.count = args[0]
if not self.count > 0:
raise ValueError("Zero or negative CC counts are not allowed.")
print("PyGenTools.ConverterClass.__init__: initialized with " + ("count " + str(self.count) if self.count != None else "indefinite count") + ".")
def __rmul__(self,other):
if self.count != None:
return arrTakeOnly(other,self.count)
else:
return makeArr(other)
def __rdiv__(self,other):
if self.count != None:
return genTakeOnly(other,self.count)
else:
return makeGen(other)
def __rtruediv__(self,other):
#python3 compatibility.
return self.__rdiv__(other)
cc = CC()
#tests:
assert len([item for item in genTakeOnly(range(256),10)]) == 10
assert arrTakeOnly(range(10),5) == [0,1,2,3,4]
assert arrTakeOnly(range(5),10) == [0,1,2,3,4]
assert "".join(zipGens(["hello","123456789","ABC"])) == "h1Ae2Bl3Cl4o56789"
assert range(10) * cc == [0,1,2,3,4,5,6,7,8,9]
assert isGen([0,1,2,3,4] / cc) |
def makeArr(thing):
if type(thing) == list:
return thing | random_line_split |
PyGenTools.py | """
PyGenTools.py by John Dorsey.
PyGenTools.py contains tools that work on python generators without handling their items (comparing items, etc.).
"""
import traceback
import itertools
from collections import deque
class ExhaustionError(Exception):
"""
ExhaustionError is raised when a function that eats items from an input generator runs out of items in that generator, but did not have a strict target count of items to eat.
"""
pass
class IterationFailure(Exception):
"""
IterationFailure is raised when an iterable runs out of items, in a situation where it never should have run out of items.
"""
pass
class SynchronicityViolation(Exception):
"""
SynchronicityViolation is raised when some code expected to perform exactly one operation per execution step of the code around it performs a different number of operations.
"""
pass
def isGen(thing):
#test whether something is a generator. Compatible with python2 and python3.
return type(thing) == type((i for i in range(1)))
def makeGen(thing):
#make anything iterable into a generator. This is useful for when certain functions are supposed to take only as many items as they need from the beginning of some data and leave the rest in a way that further iteration will begin where the first function stopped iterating, such as in parsing universal codes in Codes.py.
if isGen(thing):
return thing
return (item for item in thing)
def makeArr(thing):
if type(thing) == list:
return thing
try:
result = [item for item in thing]
except KeyboardInterrupt:
raise KeyboardInterrupt("PyGenTools.makeArr was stuck on " + str(thing) + ".")
return result
def handleOnExhaustion(methodName, yieldedCount, targetCount, onExhaustion):
if isinstance(onExhaustion,Exception):
raise onExhaustion
elif onExhaustion == "fail":
raise IterationFailure(methodName + " ran out of items, and its onExhaustion action is \"fail\".")
if onExhaustion == "ExhaustionError":
raise ExhaustionError(methodName + " ran out of items, and its onExhaustion action is \"ExhaustionError\".")
if "warn" in onExhaustion:
print("...\n" + "".join(traceback.format_list(traceback.extract_stack())) + ": " + methodName + " ran out of items. (yieldedCount,targetCount,onExhaustion)=" + str((yieldedCount, targetCount, onExhaustion)) + ".")
if "partial" in onExhaustion:
return
if "None" in onExhaustion:
raise ValueError(methodName + ": the onExhaustion action \"None\" is no longer supported.")
raise ValueError(methodName + ": the value of onExhaustion is invalid. (yieldedCount,targetCount,onExhaustion)=" + str((yieldedCount, targetCount, onExhaustion)) + ".")
def genTakeOnly(inputGen, targetCount, onExhaustion="partial"):
#take ONLY _count_ items from a generator _inputGen_ and yield them, so that if other functions call .next on the generator that was shared with this function, they will pick up exactly where this function's output left off (no missing items).
assert onExhaustion in ["fail","ExhaustionError","partial","warn"]
assert targetCount >= 0
if targetCount == 0:
return
i = 0
for item in inputGen:
if i < targetCount:
yield item
i += 1
if not i < targetCount:
return
handleOnExhaustion("PyGenTools.genTakeOnly", i, targetCount, onExhaustion)
def arrTakeOnly(inputGen, targetCount, onExhaustion="partial"):
#just like genTakeOnly, but bundle the taken items together into an array.
assert isinstance(onExhaustion,Exception) or onExhaustion in ["fail","ExhaustionError","warn+partial","partial"]
result = [item for item in genTakeOnly(inputGen,targetCount)]
if len(result) < targetCount:
handleOnExhaustion("PyGenTools.arrTakeOnly", len(result), targetCount, onExhaustion)
return result
def genSkipFirst(inputGen, count):
assert isGen(inputGen)
for i in range(count):
_ = next(inputGen)
return inputGen
def genTakeUntil(inputGen, stopFun, stopSignalsNeeded=1): #might be used in MarkovTools someday.
stopSignalCount = 0
for item in inputGen:
yield item
if stopFun(item):
stopSignalCount += 1
if stopSignalCount >= stopSignalsNeeded:
return
print("PyGenTools.genTakeUntil ran out of items.")
def arrTakeLast(inputGen, count):
if count == None:
raise ValueError("count can't be None.")
if type(inputGen) == list:
print("PyGenTools.arrTakeLast was called on a list. It will treat the list like a generator. This might be a waste of time.")
storage = [None for ii in range(count)]
i = -1
for item in inputGen:
i = (i+1)%count
storage[i] = item
splitPoint = i+1
return storage[splitPoint:]+storage[:splitPoint]
def getLast(inputGen):
if type(inputGen) == list:
print("PyGenTools.getLast was called on a list. It will treat the list like a generator. This might be a pointless waste of time.")
storage = None
loopRan = False
for item in inputGen:
loopRan = True
storage = item
assert loopRan
return storage
def indexOfValueInGen(testValue, testGen): #used in MarkovTools.
for i,item in enumerate(testGen):
if item == testValue:
return i
return None
def valueAtIndexInGen(inputIndex, inputGen): #used in MarkovTools.
if inputIndex == None:
raise ValueError("inputIndex can't be None.")
return arrTakeLast(genTakeOnly(inputGen, inputIndex+1), 1)[0]
def sentinelize(inputSeq, sentinel=None, loopSentinel=False, failFun=None):
"""
Signals the end of a generator by yielding an additional item after its end. Note that sentinelize(x) makes a generator from any type of input, so combining it with makeGen is redundant.
"""
for item in inputSeq:
yield item
yield sentinel
while loopSentinel:
yield loopSentinel
if failFun:
failFun()
def zipGens(inputGens):
"""
This function gives a generator whose items are taken one at a time from each generator provided in a circular order. It runs until all the provided generators are empty. Technically, it can be given arrays instead of generators and it will correct for this. The array of generators may also be a generator instead of an array.
"""
gensRunning = [True for i in range(len(inputGens))] #this map prevents needing to catch the same StopIteration many times for each generator that stops sooner than the last one to stop.
workingGenArr = [makeGen(item) for item in inputGens] #in case the inputGens contains things that aren't generators _or_ inputGens itself is a generator, this fixes that.
while not all(not genIsRunning for genIsRunning in gensRunning):
for genIndex in range(len(workingGenArr)):
if gensRunning[genIndex]:
try:
yield next(workingGenArr[genIndex])
except StopIteration:
gensRunning[genIndex] = False #don't check this generator for items again.
def genAddInt(inputSeq, inputInt): #used in CodecTools.Codec.
for item in inputSeq:
yield item+inputInt
def arrAddInt(inputArr, inputInt): #used in CodecTools.Codec.
assert type(inputArr) == list
return [item+inputInt for item in inputArr]
def genDeduped(inputSeq):
if isGen(inputSeq) or type(inputSeq) == list:
history = set()
for item in inputSeq:
if item not in history:
history.add(item)
yield item
#this version uses less memory but isn't as fast. if re-enabling, change first branch's condition.
"""
elif type(inputSeq) == list:
for i,item in enumerate(inputSeq):
if i == 0:
yield item
continue
if item not in inputSeq[:i]:
yield item
"""
else:
|
"""
def getAccumulatorFun(thing):
if type(thing) == str:
result = eval("(lambda x,y: x{}y)".format(thing))
elif type(thing) == type((lambda x: x)):
result = thing
else:
raise TypeError("must be string or function.")
return result
"""
def accumulate(inputSeq, inputFun):
inputSeq = makeGen(inputSeq)
#inputFun = getAccumulatorFun(inputFun) #not used anymore now that itertools.accumulate is sometimes used.
result = next(inputSeq)
for item in inputSeq:
result = inputFun(result, item)
return result
def product(inputSeq):
return accumulate(inputSeq, (lambda x,y:x*y))
def genChunksAsLists(inputGen, n=2, partialChunkHandling="warn partial"):
inputGen = makeGen(inputGen)
while True:
chunkAsList = arrTakeOnly(inputGen, n, onExhaustion="partial") #make list.
if len(chunkAsList) < n:
if "warn" in partialChunkHandling:
print("PyGenTools.genChunksAsLists: warning: partial chunk.")
if "fail" in partialChunkHandling:
raise IterationFailure("partial chunk.")
if not "partial" in partialChunkHandling:
if "discard" in partialChunkHandling:
return
else:
print("PyGenTools.genChunksAsLists: warning: partial chunk encountered, but the partialChunkHandling kwarg does not indicate what should be done (no \"partial\" and no \"discard\"). The chunk will be yielded.")
if len(chunkAsList) == 0:
return
yield chunkAsList
def genRollingWindowsAsLists(inputGen, n=3, step=1, defaultValue=None, includePartialChunks=True, includeEmptyChunks=False): # @ could be faster by using an index wrapping list.
if step != 1:
raise NotImplementedError("step != 1")
if includeEmptyChunks and not includePartialChunks:
raise ValueError("can't include empty chunks without including partial chunks.")
currentWindowDeque = deque([defaultValue for i in range(n)])
registeredCount = 0
def register(itemToRegister):
currentWindowDeque.append(itemToRegister)
currentWindowDeque.popleft()
if includeEmptyChunks:
yield list(currentWindowDeque)
for index, item in enumerate(inputGen):
register(item)
registeredCount += 1
if registeredCount%step != 0:
continue
if (index+1 >= n) or includePartialChunks:
yield list(currentWindowDeque)
if includePartialChunks:
for i in range(n-1):
register(defaultValue)
registeredCount += 1
if registeredCount%step != 0:
continue
yield list(currentWindowDeque)
if includeEmptyChunks:
yield list(currentWindowDeque)
def allAreEqual(inputSeq):
inputSeq = makeGen(inputSeq)
sharedValue = next(inputSeq)
for item in inputSeq:
if item != sharedValue:
return False
return True
def seqsAreEqual(*args):
return all(allAreEqual(item) for item in itertools.izip_longest(*args))
def countIn(inputSeq, testValue, includeDenominator=False):
return countTriggers(inputSeq,(lambda x: x==testValue),includeDenominator=includeDenominator)
def countTriggers(inputSeq, triggerFun, includeDenominator=False):
count, denominator = 0, 0
for item in inputSeq:
count, denominator = count+triggerFun(item), denominator+1
return (count, denominator) if includeDenominator else count
def genRunless(inputSeq, func=(lambda compA, compB: compA == compB)):
#this generator takes an input sequence and yields only the items that aren't the same as the previous item.
#this generator eats only as much as it yields.
previousItem = None
justStarted = True
for item in inputSeq:
if justStarted:
justStarted = False
previousItem = item
yield item
else:
if not func(item, previousItem):
previousItem = item
yield item
#not tested well.
def genTrackEnds(
inputSeq,
leftTag="left", middleTag="middle", rightTag="right", onlyTag="only",
useLookahead=True,
tagByExpanding=False,
supressOvereatingWarning=False):
if iter(inputSeq) is iter(inputSeq):
if useLookahead:
if not supressOvereatingWarning:
print("PyGenTools.genTrackEnds: over-eating warning: this function may take more items from inputSeq than it yields with the current args.")
if tagByExpanding:
def toTagged(workingItem, tagToApply):
return (type(workingItem))((tagToApply,)) + workingItem
else:
def toTagged(workingItem, tagToApply):
return (tagToApply, workingItem)
inputGen = makeGen(inputSeq)
if useLookahead:
previousItem = None
index = None
for index,currentItem in enumerate(inputGen):
#assert currentItem is not None, "can't handle Nones in inputSeq yet."
if index == 0:
previousItem = currentItem
elif index == 1:
yield toTagged(previousItem, leftTag)
else:
yield toTagged(previousItem, middleTag)
previousItem = currentItem
if index is None:
return
elif index == 0:
yield toTagged(currentItem, onlyTag)
else:
yield toTagged(currentItem, rightTag)
else:
for index,currentItem in enumerate(inputGen):
if index == 0:
yield toTagged(currentItem, leftTag)
else:
yield toTagged(currentItem, middleTag)
def enumerateFlatly(inputSeq, start=0):
commonLength = None
isProbablyTupleSeq = None
isProbablyListSeq = None
for index,value in enumerate(inputSeq, start=start):
assert value != None, "can't."
if commonLength is None:
isProbablyTupleSeq = isinstance(value, tuple)
isProbablyListSeq = isinstance(value, list)
if isTupleSeq or isListSeq:
commonLength = len(value)
isExpandableSeq = isProbablyTupleSeq or isProbablyListSeq
if not isExpandableSeq:
print("PyGenTools.enumerateFlatly: warning: this input sequence isn't of items that can be expanded to include enumeration (the first item is of type {}). New tuples will be created for each item... This is just like builtin enumerate, but with overhead.".format(repr(type(value))))
if isExpandableSeq:
if isinstance(value, tuple):
assert len(value) == commonLength
yield (index,) + value
elif isinstance(value, list):
assert len(value) == commonLength
yield [index,] + value
else:
assert False
else:
yield (index, value)
def genMonitored(inputSeq, text=""):
inputGen = makeGen(inputSeq)
i = 0
while True:
try:
item = next(inputGen)
print("PyGenTools.genMonitored: " + text + " i={}, item={}.".format(i, item))
yield item
i += 1
except StopIteration as se:
print("PyGenTools.genMonitored: " + text + " i={}, {}".format(i, repr(se)))
raise se
assert False, "Unreachable statement."
class GenBypass:
def __init__(self, useHistory=False):
#print("GenBypass init.")
self.inputGens = None
self.lastOutputs = None
self.modifiedInputGens = None
self.useHistory = useHistory
if self.useHistory:
self.history = deque()
def _create_callback_method(self, indexToWriteTo):
def callbackMethod(value):
#print("callback method: value {} into index {}.".format(value, indexToWriteTo))
if self.latestOutputs[indexToWriteTo] is not None:
raise SynchronicityViolation("multiple writes to output tracker for generator at index {}.".format(indexToWriteTo))
self.latestOutputs[indexToWriteTo] = value
if self.useHistory:
assert value is not None, "this breaks things."
if self.latestOutputs.count(None) == 0:
self._handle_full_outputs()
#print("self.lastOutputs is now {}.".format(self.lastOutputs))
return callbackMethod
def _gen_modify(self, genToModify, callbackMethod):
#print("_gen_modify called on {} with callbackMethod {}.".format(genToModify, callbackMethod))
for item in genToModify:
callbackMethod(item)
yield item
def _handle_full_outputs(self):
self.history.append([item for item in self.latestOutputs])
for i in range(len(self.latestOutputs)):
self.latestOutputs[i] = None
def intercept(self, *inputGens):
self.capture(inputGens)
return self.share()
def capture(self, *inputGens):
genCount = len(inputGens)
self.inputGens = [item for item in inputGens]
self.latestOutputs = [None for i in range(genCount)]
self.callbackMethods = [self._create_callback_method(i) for i in range(genCount)]
self.modifiedInputGens = [self._gen_modify(genToModify, self.callbackMethods[i]) for i,genToModify in enumerate(self.inputGens)]
def share(self):
assert len(self.modifiedInputGens) != 0
return self.modifiedInputGens
def wrap(self, inputGen):
#"unspecified_at_{}".format(i)
#result = [None for i in range(len(self.lastOutputs)+1)]
isLastRun = False
if self.useHistory:
sentinel = object() #get unique sentinel. only compare using is.
for inputGenItem in sentinelize(inputGen, sentinel=sentinel):
assert self.latestOutputs == [None for i in range(len(self.latestOutputs))]
while len(self.history) > 1:
yield [item for item in self.history.popleft()] + [None]
if inputGenItem is sentinel:
isLastRun = True
inputGenItem = None #don't yield sentinel.
if not len(self.history) >= 1:
return #don't try.
yield [item for item in self.history.popleft()] + [inputGenItem]
if isLastRun:
return #history dumped after last item, nothing left to do.
else:
yield [item for item in self.latestOutputs] + [inputGenItem]
for i in range(len(self.latestOutputs)):
self.latestOutputs[i] = None
#assert i != 0
#assert self.lastOutputs.count(None) == len(self.lastOutputs)
class CC:
"""
CC stands for ConverterClass.
An instance of CC can convert an iterable object to a python list by right multiplication (<iterable> * <CC instance> is equivalent to PyGenTools.makeArr(<iterable>)) or convert a list (or other iterable object) to a generator by right division (<iterable> / <CC instance> is equivalent to PyGenTools.makeGen(<iterable>)).
"""
def __init__(self,*args):
self.count = None
if len(args) > 0:
self.count = args[0]
if not self.count > 0:
raise ValueError("Zero or negative CC counts are not allowed.")
print("PyGenTools.ConverterClass.__init__: initialized with " + ("count " + str(self.count) if self.count != None else "indefinite count") + ".")
def __rmul__(self,other):
if self.count != None:
return arrTakeOnly(other,self.count)
else:
return makeArr(other)
def __rdiv__(self,other):
if self.count != None:
return genTakeOnly(other,self.count)
else:
return makeGen(other)
def __rtruediv__(self,other):
#python3 compatibility.
return self.__rdiv__(other)
cc = CC()
#tests:
assert len([item for item in genTakeOnly(range(256),10)]) == 10
assert arrTakeOnly(range(10),5) == [0,1,2,3,4]
assert arrTakeOnly(range(5),10) == [0,1,2,3,4]
assert "".join(zipGens(["hello","123456789","ABC"])) == "h1Ae2Bl3Cl4o56789"
assert range(10) * cc == [0,1,2,3,4,5,6,7,8,9]
assert isGen([0,1,2,3,4] / cc)
| raise ValueError("unsupported type: " + str(type(inputSeq)) + ".") | conditional_block |
IPC.js | /* global Main, StatusLight */
/**
* ---------
* IPC class
* ---------
* @constructor
* @param {number} id
**/
function IPC(id) {
this._id = id;
this._numRetries = 0;
this._tokenPos = 0;
this._lastTime = new Date().getTime();
return this;
}
IPC.prototype.run = function() {
var lastClientProcess = new Date().getTime();
if (IPC.clients.length > 0) {
if (this.socketConnect()) {
this.postConnectClients();
}
} else {
if (lastClientProcess + IPC.CONNECTION_TIMEOUT < new Date().getTime()) {
this.cleanup();
}
}
}
IPC.prototype.cleanup = function(){
if (this._socket) {
this._socket.close();
this._socket = undefined;
}
IPC.nextClientId += 100;
IPC.connected = false;
IPC.clients = undefined;
IPC.currentClient = undefined;
IPC.master = undefined;
}
IPC.prototype.processClients = function() {
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
switch (client._state) {
case IPC_Client.STATE_START:
if (!IPC.connected)
break;
IPC.clientLogin(client);
break;
case IPC_Client.STATE_LOGOUT:
if (client._feed) {
IPC.clientLogout(client);
}
break;
}
}
}
/**
* @param {string} data
*/
IPC.prototype.processInputBuffer = function(data) {
if (typeof Main.getSession()._root._statusLights != 'undefined') {
Main.getSession()._root._statusLights.changeStatus(1);
}
if (data.length === 0)
return;
var newDate = new Date().getTime();
var b = (newDate - this._lastTime) > 500;
if (b) {
this._lastTime = newDate;
if (IPC.currentClient) {
if (IPC.currentClient._feed && IPC.currentClient._feed._name === "Chart") {
if (IPC.currentClient._feed._error !== -1) {
Main.getSession()._root._statusLights.changeStatus(StatusLight.SL_NODATA);
return;
}
}
IPC.currentClient.flush();
}
}
if (!this._sharp)
this._sharp = 0;
if (data === "#") {
this._sharp++;
} else
this._sharp = 0;
if (this._sharp > 15)
console.warn("Feed. Socket not receiving data, listening...");
var c0 = data[0];
if (c0 === '$') {
if (IPC.currentClient)
IPC.currentClient.flush();
IPC.currentClient = this.getClient(data.substring(1));
return;
}
if (!IPC.currentClient || !IPC.currentClient._feed)
return;
if (c0 === '{') {
// console.log("IPC. received data: " + data);
this._tokenPos = 1;
this.handleData(data);
return;
}
switch (c0) {
case '+':
/*
* possible incoming error codes STREAM_ERR_TIMEOUT 0 STREAM_ERR_WINDOW_LIMIT 1
* STREAM_ERR_DUPLICATE_PAGE 2 STREAM_ERR_CLIENT_VERSION 3 STREAM_ERR_INVALID_SID 4
* STREAM_ERR_NOT_AVAILABLE 5 STREAM_ERR_NOT_AUTHENTICATED 6
*
* do not die on a 5! all the others should die.
*/
var error = parseInt(data.substring(1, data.indexOf(":")), 10);
console.error(error);
IPC.currentClient._feed.onError(error);
break;
case 'i':
IPC.currentClient.add({'_id': -3});
break;
case 'p':
IPC.currentClient.add({'_id': -2});
break;
}
}
/**
* @param {string} data
*/
IPC.prototype.handleData = function(data) {
var farray = data.substring(1).split('~');
if (farray.length < 3)
return;
if (IPC.currentClient)
IPC.currentClient.add({'_id': parseInt(farray[0], 10), '_contents': farray[1], '_flags': parseInt(farray[2], 10)});
}
/**
* @param {string} tag
*/
IPC.prototype.getClient = function(tag) {
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
if (client._state === IPC_Client.STATE_LOGIN && client._tag === tag) {
return client;
}
}
}
IPC.prototype.socketConnect = function() {
if (IPC.master && IPC.master._socket && IPC.master._socket.readyState <= 1) {
IPC.connected = true;
this._numRetries = 0;
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
if (client._state !== IPC_Client.STATE_LOGOUT) {
client._state = IPC_Client.STATE_START;
}
}
return true;
}
var self = this;
this._numRetries++;
if (this._numRetries > IPC.MAX_CONNECTION_RETRIES) {
// todo: show alert for reconnect
this._numRetries = 0;
return false;
}
if (!IPC.master)
return;
IPC.master._socket = new WebSocket(Main.getWebSocketURL());
console.log("Feed. Socket connecting...");
IPC.master._socket.onopen = function() {
console.log("Feed. Socket opened.");
IPC.ipcTag = new Date().getTime();
this.send(JSON.stringify({'type': 'stream_request', 'ipc_tag': IPC.ipcTag}));
}
IPC.master._socket.onmessage = function(event) {
// console.log("IPC. Received data: " + event.data);
if (event.data === 'STREAM') {
IPC.connected = true;
self._numRetries = 0;
} else if (IPC.master) {
IPC.master.processInputBuffer(event.data);
}
if (IPC.master)
IPC.master.processClients();
}
IPC.master._socket.onclose = function(event) {
if (event.wasClean) {
console.log("Feed. Socket closed clean.");
} else {
var reason;
if (event.code == 1000)
reason = "Normal closure, meaning that the purpose for which the connection was established has been fulfilled.";
else if(event.code == 1001)
reason = "An endpoint is \"going away\", such as a server going down or a browser having navigated away from a page.";
else if(event.code == 1002)
reason = "An endpoint is terminating the connection due to a protocol error";
else if(event.code == 1003)
reason = "An endpoint is terminating the connection because it has received a type of data it cannot accept (e.g., an endpoint that understands only text data MAY send this if it receives a binary message).";
else if(event.code == 1004)
reason = "Reserved. The specific meaning might be defined in the future.";
else if(event.code == 1005)
reason = "No status code was actually present.";
else if(event.code == 1006)
reason = "The connection was closed abnormally, e.g., without sending or receiving a Close control frame";
else if(event.code == 1007)
reason = "An endpoint is terminating the connection because it has received data within a message that was not consistent with the type of the message (e.g., non-UTF-8 [http://tools.ietf.org/html/rfc3629] data within a text message).";
else if(event.code == 1008)
reason = "An endpoint is terminating the connection because it has received a message that \"violates its policy\". This reason is given either if there is no other sutible reason, or if there is a need to hide specific details about the policy.";
else if(event.code == 1009)
reason = "An endpoint is terminating the connection because it has received a message that is too big for it to process.";
else if(event.code == 1010) // Note that this status code is not used by the server, because it can fail the WebSocket handshake instead.
reason = "An endpoint (client) is terminating the connection because it has expected the server to negotiate one or more extension, but the server didn't return them in the response message of the WebSocket handshake. <br /> Specifically, the extensions that are needed are: " + event.reason;
else if(event.code == 1011)
reason = "A server is terminating the connection because it encountered an unexpected condition that prevented it from fulfilling the request.";
else if(event.code == 1015)
reason = "The connection was closed due to a failure to perform a TLS handshake (e.g., the server certificate can't be verified).";
else
reason = "Unknown reason";
console.log("Feed. Socket closed with error:", reason);
setTimeout(function() {
console.log("Feed. Socket restart.");
self.socketConnect();
}, 3000);
}
Main.getSession()._root._statusLights.changeStatus(StatusLight.SL_NODATA);
}
return true;
}
IPC.prototype._disconnect = function() {
if (IPC.master && IPC.master._socket) {
console.log("Feed. Socket disconnect.", IPC.master._socket.readyState);
IPC.master._socket.close(1000);
}
IPC.connected = false;
if (IPC.master)
IPC.master._socket = undefined;
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
if (client._state !== IPC_Client.STATE_LOGOUT) |
}
this.postDisconnectClients();
}
IPC.prototype.postConnectClients = function() {
if (IPC.connected)
return;
for (var i = 0; i < IPC.clients.length; i++) {
IPC.clients[i]._feed.handleConnect();
}
}
IPC.prototype.postDisconnectClients = function() {
if (!IPC.connected)
return;
for (var i = 0; i < IPC.clients.length; i++) {
IPC.clients[i]._feed.handleDisconnect();
}
}
/** @static */
IPC.MAX_CONNECTION_RETRIES = 4;
/** @static */
IPC.CONNECTION_TIMEOUT = 5000;
/** @static */
IPC.MAX_CLIENTS = 90;
/** @static */
IPC.masterId = 1;
/** @static */
IPC.nextClientId = 1;
/** @static */
IPC.connected = false;
/** @static */
IPC.clients = [];
/**
* @static
* @param {Feed} feed
*/
IPC.register = function(feed) {
if (!IPC.master) {
IPC.master = new IPC(IPC.masterId++);
}
// console.log("clients length", this.clients.length);
var client;
for (var i = 0; i < IPC.clients.length; i++) {
var c = IPC.clients[i];
if (c._feed && c._feed._name === feed._name) {
if (c._data === feed._dataBlock && c._state !== IPC_Client.STATE_LOGOUT) {
client = c;
} else {
c._feed.stop(c._data);
}
}
}
if (!client) {
// new one
if (IPC.clients.length === IPC.MAX_CLIENTS) {
console.warn("Feed. Max clients.")
return -1;
}
client = new IPC_Client(IPC.nextClientId++, feed);
IPC.clients.push(client);
console.log("Feed. Register client for " + client._feed._name + ":", client._id, client._feed._dataBlock);
}
IPC.master.run();
return client._id;
}
/**
* @static
* @param {string} feed_name
* @param {string} dataBlock
*/
IPC.unregister = function(feed_name, dataBlock) {
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
if (client._feed && client._feed._name === feed_name && client._data === dataBlock) {
if (client === IPC.currentClient) {
IPC.currentClient = undefined;
}
client._state = IPC_Client.STATE_LOGOUT;
}
}
}
/**
* @static
* @param {IPC_Client} client
*/
IPC.clientLogin = function(client) {
client._tag = IPC.ipcTag + "," + client._id;
var str = JSON.stringify({'type': 'subscribe', 'client_id': client._id, 'user': IPC.userName, 'sid': IPC.sid, 'page_key': Main.getParams()["page_key"], 'app': client._feed._name, 'ipc_tag': IPC.ipcTag, 'request': client._feed._dataBlock});
IPC.master._socket.send(str);
client._state = IPC_Client.STATE_LOGIN;
}
/**
* @static
* @param {IPC_Client} client
*/
IPC.clientLogout = function(client) {
if (IPC.currentClient && client._id === IPC.currentClient._id) {
IPC.currentClient = undefined;
}
var str = JSON.stringify({type: 'unsubscribe', client_id: client._id, ipc_tag: IPC.ipcTag});
if (IPC.master._socket.readyState === 1) {
IPC.master._socket.send(str);
console.log("Feed. Unregister client", client._id);
client._feed = undefined;
var idx;
for (idx = 0; idx < IPC.clients.length; idx++) {
var c = IPC.clients[idx];
if (c._id === client._id)
break;
}
if (idx < IPC.clients.length)
IPC.clients.splice(idx, 1);
}
}
/**
* ----------
* IPC_Client
* ----------
* @constructor
* @param {number} id
* @param {Feed} feed
*/
function IPC_Client(id, feed) {
this._id = id;
this._data = feed._dataBlock;
this._feed = feed;
this._feedItems = [];
this._state = IPC_Client.STATE_START;
}
/**
* @param {Array} fi
*/
IPC_Client.prototype.add = function(fi) {
this._feedItems.push(fi);
if (this._feedItems.length > 5000) {
console.warn("Feed client not flushing data.", this._feedItems.length);
this.flush();
}
}
IPC_Client.prototype.flush = function() {
this._feed.add(this._feedItems);
this._feedItems = [];
}
/** @static */
IPC_Client.STATE_START = 0;
/** @static */
IPC_Client.STATE_LOGIN = 1;
/** @static */
IPC_Client.STATE_LOGOUT = 2; | {
client._state = IPC_Client.STATE_START;
} | conditional_block |
IPC.js | /* global Main, StatusLight */
/**
* ---------
* IPC class
* ---------
* @constructor
* @param {number} id
**/
function | (id) {
this._id = id;
this._numRetries = 0;
this._tokenPos = 0;
this._lastTime = new Date().getTime();
return this;
}
IPC.prototype.run = function() {
var lastClientProcess = new Date().getTime();
if (IPC.clients.length > 0) {
if (this.socketConnect()) {
this.postConnectClients();
}
} else {
if (lastClientProcess + IPC.CONNECTION_TIMEOUT < new Date().getTime()) {
this.cleanup();
}
}
}
IPC.prototype.cleanup = function(){
if (this._socket) {
this._socket.close();
this._socket = undefined;
}
IPC.nextClientId += 100;
IPC.connected = false;
IPC.clients = undefined;
IPC.currentClient = undefined;
IPC.master = undefined;
}
IPC.prototype.processClients = function() {
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
switch (client._state) {
case IPC_Client.STATE_START:
if (!IPC.connected)
break;
IPC.clientLogin(client);
break;
case IPC_Client.STATE_LOGOUT:
if (client._feed) {
IPC.clientLogout(client);
}
break;
}
}
}
/**
* @param {string} data
*/
IPC.prototype.processInputBuffer = function(data) {
if (typeof Main.getSession()._root._statusLights != 'undefined') {
Main.getSession()._root._statusLights.changeStatus(1);
}
if (data.length === 0)
return;
var newDate = new Date().getTime();
var b = (newDate - this._lastTime) > 500;
if (b) {
this._lastTime = newDate;
if (IPC.currentClient) {
if (IPC.currentClient._feed && IPC.currentClient._feed._name === "Chart") {
if (IPC.currentClient._feed._error !== -1) {
Main.getSession()._root._statusLights.changeStatus(StatusLight.SL_NODATA);
return;
}
}
IPC.currentClient.flush();
}
}
if (!this._sharp)
this._sharp = 0;
if (data === "#") {
this._sharp++;
} else
this._sharp = 0;
if (this._sharp > 15)
console.warn("Feed. Socket not receiving data, listening...");
var c0 = data[0];
if (c0 === '$') {
if (IPC.currentClient)
IPC.currentClient.flush();
IPC.currentClient = this.getClient(data.substring(1));
return;
}
if (!IPC.currentClient || !IPC.currentClient._feed)
return;
if (c0 === '{') {
// console.log("IPC. received data: " + data);
this._tokenPos = 1;
this.handleData(data);
return;
}
switch (c0) {
case '+':
/*
* possible incoming error codes STREAM_ERR_TIMEOUT 0 STREAM_ERR_WINDOW_LIMIT 1
* STREAM_ERR_DUPLICATE_PAGE 2 STREAM_ERR_CLIENT_VERSION 3 STREAM_ERR_INVALID_SID 4
* STREAM_ERR_NOT_AVAILABLE 5 STREAM_ERR_NOT_AUTHENTICATED 6
*
* do not die on a 5! all the others should die.
*/
var error = parseInt(data.substring(1, data.indexOf(":")), 10);
console.error(error);
IPC.currentClient._feed.onError(error);
break;
case 'i':
IPC.currentClient.add({'_id': -3});
break;
case 'p':
IPC.currentClient.add({'_id': -2});
break;
}
}
/**
* @param {string} data
*/
IPC.prototype.handleData = function(data) {
var farray = data.substring(1).split('~');
if (farray.length < 3)
return;
if (IPC.currentClient)
IPC.currentClient.add({'_id': parseInt(farray[0], 10), '_contents': farray[1], '_flags': parseInt(farray[2], 10)});
}
/**
* @param {string} tag
*/
IPC.prototype.getClient = function(tag) {
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
if (client._state === IPC_Client.STATE_LOGIN && client._tag === tag) {
return client;
}
}
}
IPC.prototype.socketConnect = function() {
if (IPC.master && IPC.master._socket && IPC.master._socket.readyState <= 1) {
IPC.connected = true;
this._numRetries = 0;
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
if (client._state !== IPC_Client.STATE_LOGOUT) {
client._state = IPC_Client.STATE_START;
}
}
return true;
}
var self = this;
this._numRetries++;
if (this._numRetries > IPC.MAX_CONNECTION_RETRIES) {
// todo: show alert for reconnect
this._numRetries = 0;
return false;
}
if (!IPC.master)
return;
IPC.master._socket = new WebSocket(Main.getWebSocketURL());
console.log("Feed. Socket connecting...");
IPC.master._socket.onopen = function() {
console.log("Feed. Socket opened.");
IPC.ipcTag = new Date().getTime();
this.send(JSON.stringify({'type': 'stream_request', 'ipc_tag': IPC.ipcTag}));
}
IPC.master._socket.onmessage = function(event) {
// console.log("IPC. Received data: " + event.data);
if (event.data === 'STREAM') {
IPC.connected = true;
self._numRetries = 0;
} else if (IPC.master) {
IPC.master.processInputBuffer(event.data);
}
if (IPC.master)
IPC.master.processClients();
}
IPC.master._socket.onclose = function(event) {
if (event.wasClean) {
console.log("Feed. Socket closed clean.");
} else {
var reason;
if (event.code == 1000)
reason = "Normal closure, meaning that the purpose for which the connection was established has been fulfilled.";
else if(event.code == 1001)
reason = "An endpoint is \"going away\", such as a server going down or a browser having navigated away from a page.";
else if(event.code == 1002)
reason = "An endpoint is terminating the connection due to a protocol error";
else if(event.code == 1003)
reason = "An endpoint is terminating the connection because it has received a type of data it cannot accept (e.g., an endpoint that understands only text data MAY send this if it receives a binary message).";
else if(event.code == 1004)
reason = "Reserved. The specific meaning might be defined in the future.";
else if(event.code == 1005)
reason = "No status code was actually present.";
else if(event.code == 1006)
reason = "The connection was closed abnormally, e.g., without sending or receiving a Close control frame";
else if(event.code == 1007)
reason = "An endpoint is terminating the connection because it has received data within a message that was not consistent with the type of the message (e.g., non-UTF-8 [http://tools.ietf.org/html/rfc3629] data within a text message).";
else if(event.code == 1008)
reason = "An endpoint is terminating the connection because it has received a message that \"violates its policy\". This reason is given either if there is no other sutible reason, or if there is a need to hide specific details about the policy.";
else if(event.code == 1009)
reason = "An endpoint is terminating the connection because it has received a message that is too big for it to process.";
else if(event.code == 1010) // Note that this status code is not used by the server, because it can fail the WebSocket handshake instead.
reason = "An endpoint (client) is terminating the connection because it has expected the server to negotiate one or more extension, but the server didn't return them in the response message of the WebSocket handshake. <br /> Specifically, the extensions that are needed are: " + event.reason;
else if(event.code == 1011)
reason = "A server is terminating the connection because it encountered an unexpected condition that prevented it from fulfilling the request.";
else if(event.code == 1015)
reason = "The connection was closed due to a failure to perform a TLS handshake (e.g., the server certificate can't be verified).";
else
reason = "Unknown reason";
console.log("Feed. Socket closed with error:", reason);
setTimeout(function() {
console.log("Feed. Socket restart.");
self.socketConnect();
}, 3000);
}
Main.getSession()._root._statusLights.changeStatus(StatusLight.SL_NODATA);
}
return true;
}
IPC.prototype._disconnect = function() {
if (IPC.master && IPC.master._socket) {
console.log("Feed. Socket disconnect.", IPC.master._socket.readyState);
IPC.master._socket.close(1000);
}
IPC.connected = false;
if (IPC.master)
IPC.master._socket = undefined;
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
if (client._state !== IPC_Client.STATE_LOGOUT) {
client._state = IPC_Client.STATE_START;
}
}
this.postDisconnectClients();
}
IPC.prototype.postConnectClients = function() {
if (IPC.connected)
return;
for (var i = 0; i < IPC.clients.length; i++) {
IPC.clients[i]._feed.handleConnect();
}
}
IPC.prototype.postDisconnectClients = function() {
if (!IPC.connected)
return;
for (var i = 0; i < IPC.clients.length; i++) {
IPC.clients[i]._feed.handleDisconnect();
}
}
/** @static */
IPC.MAX_CONNECTION_RETRIES = 4;
/** @static */
IPC.CONNECTION_TIMEOUT = 5000;
/** @static */
IPC.MAX_CLIENTS = 90;
/** @static */
IPC.masterId = 1;
/** @static */
IPC.nextClientId = 1;
/** @static */
IPC.connected = false;
/** @static */
IPC.clients = [];
/**
* @static
* @param {Feed} feed
*/
IPC.register = function(feed) {
if (!IPC.master) {
IPC.master = new IPC(IPC.masterId++);
}
// console.log("clients length", this.clients.length);
var client;
for (var i = 0; i < IPC.clients.length; i++) {
var c = IPC.clients[i];
if (c._feed && c._feed._name === feed._name) {
if (c._data === feed._dataBlock && c._state !== IPC_Client.STATE_LOGOUT) {
client = c;
} else {
c._feed.stop(c._data);
}
}
}
if (!client) {
// new one
if (IPC.clients.length === IPC.MAX_CLIENTS) {
console.warn("Feed. Max clients.")
return -1;
}
client = new IPC_Client(IPC.nextClientId++, feed);
IPC.clients.push(client);
console.log("Feed. Register client for " + client._feed._name + ":", client._id, client._feed._dataBlock);
}
IPC.master.run();
return client._id;
}
/**
* @static
* @param {string} feed_name
* @param {string} dataBlock
*/
IPC.unregister = function(feed_name, dataBlock) {
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
if (client._feed && client._feed._name === feed_name && client._data === dataBlock) {
if (client === IPC.currentClient) {
IPC.currentClient = undefined;
}
client._state = IPC_Client.STATE_LOGOUT;
}
}
}
/**
* @static
* @param {IPC_Client} client
*/
IPC.clientLogin = function(client) {
client._tag = IPC.ipcTag + "," + client._id;
var str = JSON.stringify({'type': 'subscribe', 'client_id': client._id, 'user': IPC.userName, 'sid': IPC.sid, 'page_key': Main.getParams()["page_key"], 'app': client._feed._name, 'ipc_tag': IPC.ipcTag, 'request': client._feed._dataBlock});
IPC.master._socket.send(str);
client._state = IPC_Client.STATE_LOGIN;
}
/**
* @static
* @param {IPC_Client} client
*/
IPC.clientLogout = function(client) {
if (IPC.currentClient && client._id === IPC.currentClient._id) {
IPC.currentClient = undefined;
}
var str = JSON.stringify({type: 'unsubscribe', client_id: client._id, ipc_tag: IPC.ipcTag});
if (IPC.master._socket.readyState === 1) {
IPC.master._socket.send(str);
console.log("Feed. Unregister client", client._id);
client._feed = undefined;
var idx;
for (idx = 0; idx < IPC.clients.length; idx++) {
var c = IPC.clients[idx];
if (c._id === client._id)
break;
}
if (idx < IPC.clients.length)
IPC.clients.splice(idx, 1);
}
}
/**
* ----------
* IPC_Client
* ----------
* @constructor
* @param {number} id
* @param {Feed} feed
*/
function IPC_Client(id, feed) {
this._id = id;
this._data = feed._dataBlock;
this._feed = feed;
this._feedItems = [];
this._state = IPC_Client.STATE_START;
}
/**
* @param {Array} fi
*/
IPC_Client.prototype.add = function(fi) {
this._feedItems.push(fi);
if (this._feedItems.length > 5000) {
console.warn("Feed client not flushing data.", this._feedItems.length);
this.flush();
}
}
IPC_Client.prototype.flush = function() {
this._feed.add(this._feedItems);
this._feedItems = [];
}
/** @static */
IPC_Client.STATE_START = 0;
/** @static */
IPC_Client.STATE_LOGIN = 1;
/** @static */
IPC_Client.STATE_LOGOUT = 2; | IPC | identifier_name |
IPC.js | /* global Main, StatusLight */
/**
* ---------
* IPC class
* ---------
* @constructor
* @param {number} id
**/
function IPC(id) {
this._id = id;
this._numRetries = 0;
this._tokenPos = 0;
this._lastTime = new Date().getTime();
return this;
}
IPC.prototype.run = function() {
var lastClientProcess = new Date().getTime();
if (IPC.clients.length > 0) {
if (this.socketConnect()) {
this.postConnectClients();
}
} else {
if (lastClientProcess + IPC.CONNECTION_TIMEOUT < new Date().getTime()) {
this.cleanup();
}
}
}
IPC.prototype.cleanup = function(){
if (this._socket) {
this._socket.close();
this._socket = undefined;
}
IPC.nextClientId += 100;
IPC.connected = false;
IPC.clients = undefined;
IPC.currentClient = undefined;
IPC.master = undefined;
}
IPC.prototype.processClients = function() {
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
switch (client._state) {
case IPC_Client.STATE_START:
if (!IPC.connected)
break;
IPC.clientLogin(client);
break;
case IPC_Client.STATE_LOGOUT:
if (client._feed) {
IPC.clientLogout(client);
}
break;
}
}
}
/**
* @param {string} data
*/
IPC.prototype.processInputBuffer = function(data) {
if (typeof Main.getSession()._root._statusLights != 'undefined') {
Main.getSession()._root._statusLights.changeStatus(1);
}
if (data.length === 0)
return;
var newDate = new Date().getTime();
var b = (newDate - this._lastTime) > 500;
if (b) {
this._lastTime = newDate;
if (IPC.currentClient) {
if (IPC.currentClient._feed && IPC.currentClient._feed._name === "Chart") {
if (IPC.currentClient._feed._error !== -1) {
Main.getSession()._root._statusLights.changeStatus(StatusLight.SL_NODATA);
return;
}
}
IPC.currentClient.flush();
}
}
if (!this._sharp)
this._sharp = 0;
if (data === "#") {
this._sharp++;
} else
this._sharp = 0;
if (this._sharp > 15)
console.warn("Feed. Socket not receiving data, listening...");
var c0 = data[0];
if (c0 === '$') {
if (IPC.currentClient)
IPC.currentClient.flush();
IPC.currentClient = this.getClient(data.substring(1));
return;
}
if (!IPC.currentClient || !IPC.currentClient._feed)
return;
if (c0 === '{') {
// console.log("IPC. received data: " + data);
this._tokenPos = 1;
this.handleData(data);
return;
}
switch (c0) {
case '+':
/*
* possible incoming error codes STREAM_ERR_TIMEOUT 0 STREAM_ERR_WINDOW_LIMIT 1
* STREAM_ERR_DUPLICATE_PAGE 2 STREAM_ERR_CLIENT_VERSION 3 STREAM_ERR_INVALID_SID 4
* STREAM_ERR_NOT_AVAILABLE 5 STREAM_ERR_NOT_AUTHENTICATED 6
*
* do not die on a 5! all the others should die.
*/
var error = parseInt(data.substring(1, data.indexOf(":")), 10);
console.error(error);
IPC.currentClient._feed.onError(error);
break;
case 'i':
IPC.currentClient.add({'_id': -3});
break;
case 'p':
IPC.currentClient.add({'_id': -2});
break;
}
}
/**
* @param {string} data
*/
IPC.prototype.handleData = function(data) {
var farray = data.substring(1).split('~');
if (farray.length < 3)
return;
if (IPC.currentClient)
IPC.currentClient.add({'_id': parseInt(farray[0], 10), '_contents': farray[1], '_flags': parseInt(farray[2], 10)});
}
/**
* @param {string} tag
*/
IPC.prototype.getClient = function(tag) {
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
if (client._state === IPC_Client.STATE_LOGIN && client._tag === tag) {
return client;
}
}
}
IPC.prototype.socketConnect = function() {
if (IPC.master && IPC.master._socket && IPC.master._socket.readyState <= 1) {
IPC.connected = true;
this._numRetries = 0;
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
if (client._state !== IPC_Client.STATE_LOGOUT) {
client._state = IPC_Client.STATE_START;
}
}
return true;
}
var self = this;
this._numRetries++;
if (this._numRetries > IPC.MAX_CONNECTION_RETRIES) {
// todo: show alert for reconnect
this._numRetries = 0;
return false;
}
if (!IPC.master)
return;
IPC.master._socket = new WebSocket(Main.getWebSocketURL());
console.log("Feed. Socket connecting...");
IPC.master._socket.onopen = function() {
console.log("Feed. Socket opened.");
IPC.ipcTag = new Date().getTime();
this.send(JSON.stringify({'type': 'stream_request', 'ipc_tag': IPC.ipcTag}));
}
IPC.master._socket.onmessage = function(event) {
// console.log("IPC. Received data: " + event.data);
if (event.data === 'STREAM') {
IPC.connected = true;
self._numRetries = 0;
} else if (IPC.master) {
IPC.master.processInputBuffer(event.data);
}
if (IPC.master)
IPC.master.processClients();
}
IPC.master._socket.onclose = function(event) {
if (event.wasClean) {
console.log("Feed. Socket closed clean.");
} else {
var reason;
if (event.code == 1000)
reason = "Normal closure, meaning that the purpose for which the connection was established has been fulfilled.";
else if(event.code == 1001)
reason = "An endpoint is \"going away\", such as a server going down or a browser having navigated away from a page.";
else if(event.code == 1002)
reason = "An endpoint is terminating the connection due to a protocol error";
else if(event.code == 1003)
reason = "An endpoint is terminating the connection because it has received a type of data it cannot accept (e.g., an endpoint that understands only text data MAY send this if it receives a binary message).";
else if(event.code == 1004)
reason = "Reserved. The specific meaning might be defined in the future.";
else if(event.code == 1005)
reason = "No status code was actually present.";
else if(event.code == 1006)
reason = "The connection was closed abnormally, e.g., without sending or receiving a Close control frame";
else if(event.code == 1007)
reason = "An endpoint is terminating the connection because it has received data within a message that was not consistent with the type of the message (e.g., non-UTF-8 [http://tools.ietf.org/html/rfc3629] data within a text message).";
else if(event.code == 1008)
reason = "An endpoint is terminating the connection because it has received a message that \"violates its policy\". This reason is given either if there is no other sutible reason, or if there is a need to hide specific details about the policy.";
else if(event.code == 1009)
reason = "An endpoint is terminating the connection because it has received a message that is too big for it to process.";
else if(event.code == 1010) // Note that this status code is not used by the server, because it can fail the WebSocket handshake instead.
reason = "An endpoint (client) is terminating the connection because it has expected the server to negotiate one or more extension, but the server didn't return them in the response message of the WebSocket handshake. <br /> Specifically, the extensions that are needed are: " + event.reason;
else if(event.code == 1011)
reason = "A server is terminating the connection because it encountered an unexpected condition that prevented it from fulfilling the request.";
else if(event.code == 1015)
reason = "The connection was closed due to a failure to perform a TLS handshake (e.g., the server certificate can't be verified).";
else
reason = "Unknown reason";
console.log("Feed. Socket closed with error:", reason);
setTimeout(function() {
console.log("Feed. Socket restart.");
self.socketConnect();
}, 3000);
}
Main.getSession()._root._statusLights.changeStatus(StatusLight.SL_NODATA);
}
return true;
}
IPC.prototype._disconnect = function() {
if (IPC.master && IPC.master._socket) {
console.log("Feed. Socket disconnect.", IPC.master._socket.readyState);
IPC.master._socket.close(1000);
}
IPC.connected = false;
if (IPC.master)
IPC.master._socket = undefined;
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
if (client._state !== IPC_Client.STATE_LOGOUT) {
client._state = IPC_Client.STATE_START;
}
}
this.postDisconnectClients();
}
IPC.prototype.postConnectClients = function() {
if (IPC.connected)
return;
for (var i = 0; i < IPC.clients.length; i++) {
IPC.clients[i]._feed.handleConnect();
}
}
IPC.prototype.postDisconnectClients = function() {
if (!IPC.connected)
return;
for (var i = 0; i < IPC.clients.length; i++) {
IPC.clients[i]._feed.handleDisconnect();
}
}
/** @static */
IPC.MAX_CONNECTION_RETRIES = 4;
/** @static */
IPC.CONNECTION_TIMEOUT = 5000;
/** @static */
IPC.MAX_CLIENTS = 90;
/** @static */
IPC.masterId = 1;
/** @static */
IPC.nextClientId = 1;
/** @static */
IPC.connected = false;
/** @static */
IPC.clients = [];
/**
* @static
* @param {Feed} feed
*/
IPC.register = function(feed) {
if (!IPC.master) {
IPC.master = new IPC(IPC.masterId++);
}
// console.log("clients length", this.clients.length);
var client;
for (var i = 0; i < IPC.clients.length; i++) {
var c = IPC.clients[i];
if (c._feed && c._feed._name === feed._name) {
if (c._data === feed._dataBlock && c._state !== IPC_Client.STATE_LOGOUT) {
client = c;
} else {
c._feed.stop(c._data);
}
}
}
if (!client) {
// new one
if (IPC.clients.length === IPC.MAX_CLIENTS) {
console.warn("Feed. Max clients.")
return -1;
}
client = new IPC_Client(IPC.nextClientId++, feed);
IPC.clients.push(client);
console.log("Feed. Register client for " + client._feed._name + ":", client._id, client._feed._dataBlock);
}
IPC.master.run();
return client._id;
}
/**
* @static
* @param {string} feed_name
* @param {string} dataBlock
*/
IPC.unregister = function(feed_name, dataBlock) {
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
if (client._feed && client._feed._name === feed_name && client._data === dataBlock) {
if (client === IPC.currentClient) {
IPC.currentClient = undefined;
}
client._state = IPC_Client.STATE_LOGOUT;
}
}
}
/**
* @static
* @param {IPC_Client} client
*/
IPC.clientLogin = function(client) {
client._tag = IPC.ipcTag + "," + client._id;
var str = JSON.stringify({'type': 'subscribe', 'client_id': client._id, 'user': IPC.userName, 'sid': IPC.sid, 'page_key': Main.getParams()["page_key"], 'app': client._feed._name, 'ipc_tag': IPC.ipcTag, 'request': client._feed._dataBlock});
IPC.master._socket.send(str);
client._state = IPC_Client.STATE_LOGIN;
}
/**
* @static
* @param {IPC_Client} client
*/
IPC.clientLogout = function(client) {
if (IPC.currentClient && client._id === IPC.currentClient._id) {
IPC.currentClient = undefined;
}
var str = JSON.stringify({type: 'unsubscribe', client_id: client._id, ipc_tag: IPC.ipcTag});
if (IPC.master._socket.readyState === 1) {
IPC.master._socket.send(str);
console.log("Feed. Unregister client", client._id);
client._feed = undefined;
var idx;
for (idx = 0; idx < IPC.clients.length; idx++) {
var c = IPC.clients[idx];
if (c._id === client._id)
break;
}
if (idx < IPC.clients.length)
IPC.clients.splice(idx, 1);
}
}
/**
* ----------
* IPC_Client
* ----------
* @constructor
* @param {number} id
* @param {Feed} feed
*/
function IPC_Client(id, feed) |
/**
* @param {Array} fi
*/
IPC_Client.prototype.add = function(fi) {
this._feedItems.push(fi);
if (this._feedItems.length > 5000) {
console.warn("Feed client not flushing data.", this._feedItems.length);
this.flush();
}
}
IPC_Client.prototype.flush = function() {
this._feed.add(this._feedItems);
this._feedItems = [];
}
/** @static */
IPC_Client.STATE_START = 0;
/** @static */
IPC_Client.STATE_LOGIN = 1;
/** @static */
IPC_Client.STATE_LOGOUT = 2; | {
this._id = id;
this._data = feed._dataBlock;
this._feed = feed;
this._feedItems = [];
this._state = IPC_Client.STATE_START;
} | identifier_body |
IPC.js | /* global Main, StatusLight */
/**
* ---------
* IPC class
* ---------
* @constructor
* @param {number} id
**/
function IPC(id) {
this._id = id;
this._numRetries = 0;
this._tokenPos = 0;
this._lastTime = new Date().getTime();
return this;
}
IPC.prototype.run = function() {
var lastClientProcess = new Date().getTime();
if (IPC.clients.length > 0) {
if (this.socketConnect()) {
this.postConnectClients();
}
} else {
if (lastClientProcess + IPC.CONNECTION_TIMEOUT < new Date().getTime()) {
this.cleanup();
}
}
}
IPC.prototype.cleanup = function(){
if (this._socket) {
this._socket.close();
this._socket = undefined;
}
IPC.nextClientId += 100;
IPC.connected = false;
IPC.clients = undefined;
IPC.currentClient = undefined;
IPC.master = undefined;
}
IPC.prototype.processClients = function() {
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
switch (client._state) {
case IPC_Client.STATE_START:
if (!IPC.connected)
break;
IPC.clientLogin(client);
break;
case IPC_Client.STATE_LOGOUT:
if (client._feed) {
IPC.clientLogout(client);
}
break;
}
}
}
/**
* @param {string} data
*/
IPC.prototype.processInputBuffer = function(data) {
if (typeof Main.getSession()._root._statusLights != 'undefined') {
Main.getSession()._root._statusLights.changeStatus(1);
}
if (data.length === 0)
return;
var newDate = new Date().getTime();
var b = (newDate - this._lastTime) > 500;
if (b) {
this._lastTime = newDate;
if (IPC.currentClient) {
if (IPC.currentClient._feed && IPC.currentClient._feed._name === "Chart") {
if (IPC.currentClient._feed._error !== -1) {
Main.getSession()._root._statusLights.changeStatus(StatusLight.SL_NODATA);
return;
}
}
IPC.currentClient.flush();
}
}
if (!this._sharp)
this._sharp = 0;
if (data === "#") {
this._sharp++;
} else
this._sharp = 0;
if (this._sharp > 15)
console.warn("Feed. Socket not receiving data, listening...");
var c0 = data[0];
if (c0 === '$') {
if (IPC.currentClient)
IPC.currentClient.flush();
IPC.currentClient = this.getClient(data.substring(1));
return;
}
if (!IPC.currentClient || !IPC.currentClient._feed)
return;
if (c0 === '{') {
// console.log("IPC. received data: " + data);
this._tokenPos = 1;
this.handleData(data);
return;
}
switch (c0) {
case '+':
/*
* possible incoming error codes STREAM_ERR_TIMEOUT 0 STREAM_ERR_WINDOW_LIMIT 1
* STREAM_ERR_DUPLICATE_PAGE 2 STREAM_ERR_CLIENT_VERSION 3 STREAM_ERR_INVALID_SID 4
* STREAM_ERR_NOT_AVAILABLE 5 STREAM_ERR_NOT_AUTHENTICATED 6
*
* do not die on a 5! all the others should die.
*/
var error = parseInt(data.substring(1, data.indexOf(":")), 10);
console.error(error);
IPC.currentClient._feed.onError(error);
break;
case 'i':
IPC.currentClient.add({'_id': -3});
break;
case 'p':
IPC.currentClient.add({'_id': -2});
break;
}
}
/**
* @param {string} data
*/
IPC.prototype.handleData = function(data) {
var farray = data.substring(1).split('~');
if (farray.length < 3)
return;
if (IPC.currentClient)
IPC.currentClient.add({'_id': parseInt(farray[0], 10), '_contents': farray[1], '_flags': parseInt(farray[2], 10)});
}
/**
* @param {string} tag
*/
IPC.prototype.getClient = function(tag) {
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
if (client._state === IPC_Client.STATE_LOGIN && client._tag === tag) {
return client;
}
}
}
IPC.prototype.socketConnect = function() {
if (IPC.master && IPC.master._socket && IPC.master._socket.readyState <= 1) {
IPC.connected = true;
this._numRetries = 0;
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
if (client._state !== IPC_Client.STATE_LOGOUT) {
client._state = IPC_Client.STATE_START;
}
}
return true;
}
var self = this;
this._numRetries++;
if (this._numRetries > IPC.MAX_CONNECTION_RETRIES) {
// todo: show alert for reconnect
this._numRetries = 0;
return false;
}
if (!IPC.master)
return;
IPC.master._socket = new WebSocket(Main.getWebSocketURL());
console.log("Feed. Socket connecting...");
IPC.master._socket.onopen = function() {
console.log("Feed. Socket opened.");
IPC.ipcTag = new Date().getTime();
this.send(JSON.stringify({'type': 'stream_request', 'ipc_tag': IPC.ipcTag}));
}
IPC.master._socket.onmessage = function(event) {
// console.log("IPC. Received data: " + event.data);
if (event.data === 'STREAM') {
IPC.connected = true;
self._numRetries = 0;
} else if (IPC.master) {
IPC.master.processInputBuffer(event.data);
}
if (IPC.master)
IPC.master.processClients();
}
IPC.master._socket.onclose = function(event) {
if (event.wasClean) {
console.log("Feed. Socket closed clean.");
} else {
var reason;
if (event.code == 1000)
reason = "Normal closure, meaning that the purpose for which the connection was established has been fulfilled.";
else if(event.code == 1001)
reason = "An endpoint is \"going away\", such as a server going down or a browser having navigated away from a page.";
else if(event.code == 1002)
reason = "An endpoint is terminating the connection due to a protocol error";
else if(event.code == 1003)
reason = "An endpoint is terminating the connection because it has received a type of data it cannot accept (e.g., an endpoint that understands only text data MAY send this if it receives a binary message).";
else if(event.code == 1004)
reason = "Reserved. The specific meaning might be defined in the future.";
else if(event.code == 1005)
reason = "No status code was actually present.";
else if(event.code == 1006)
reason = "The connection was closed abnormally, e.g., without sending or receiving a Close control frame";
else if(event.code == 1007)
reason = "An endpoint is terminating the connection because it has received data within a message that was not consistent with the type of the message (e.g., non-UTF-8 [http://tools.ietf.org/html/rfc3629] data within a text message).";
else if(event.code == 1008)
reason = "An endpoint is terminating the connection because it has received a message that \"violates its policy\". This reason is given either if there is no other sutible reason, or if there is a need to hide specific details about the policy.";
else if(event.code == 1009)
reason = "An endpoint is terminating the connection because it has received a message that is too big for it to process.";
else if(event.code == 1010) // Note that this status code is not used by the server, because it can fail the WebSocket handshake instead.
reason = "An endpoint (client) is terminating the connection because it has expected the server to negotiate one or more extension, but the server didn't return them in the response message of the WebSocket handshake. <br /> Specifically, the extensions that are needed are: " + event.reason;
else if(event.code == 1011)
reason = "A server is terminating the connection because it encountered an unexpected condition that prevented it from fulfilling the request.";
else if(event.code == 1015)
reason = "The connection was closed due to a failure to perform a TLS handshake (e.g., the server certificate can't be verified).";
else
reason = "Unknown reason";
console.log("Feed. Socket closed with error:", reason);
setTimeout(function() {
console.log("Feed. Socket restart.");
self.socketConnect();
}, 3000);
}
Main.getSession()._root._statusLights.changeStatus(StatusLight.SL_NODATA);
}
return true;
}
IPC.prototype._disconnect = function() {
if (IPC.master && IPC.master._socket) {
console.log("Feed. Socket disconnect.", IPC.master._socket.readyState);
IPC.master._socket.close(1000);
}
IPC.connected = false;
if (IPC.master)
IPC.master._socket = undefined;
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
if (client._state !== IPC_Client.STATE_LOGOUT) {
client._state = IPC_Client.STATE_START;
}
}
this.postDisconnectClients();
}
IPC.prototype.postConnectClients = function() {
if (IPC.connected)
return;
for (var i = 0; i < IPC.clients.length; i++) {
IPC.clients[i]._feed.handleConnect();
}
}
IPC.prototype.postDisconnectClients = function() {
if (!IPC.connected)
return;
for (var i = 0; i < IPC.clients.length; i++) {
IPC.clients[i]._feed.handleDisconnect();
}
}
/** @static */
IPC.MAX_CONNECTION_RETRIES = 4;
/** @static */
IPC.CONNECTION_TIMEOUT = 5000;
/** @static */
IPC.MAX_CLIENTS = 90;
/** @static */
IPC.masterId = 1;
/** @static */
IPC.nextClientId = 1;
/** @static */
IPC.connected = false;
/** @static */
IPC.clients = [];
/**
* @static
* @param {Feed} feed
*/
IPC.register = function(feed) {
if (!IPC.master) {
IPC.master = new IPC(IPC.masterId++);
}
// console.log("clients length", this.clients.length);
var client;
for (var i = 0; i < IPC.clients.length; i++) {
var c = IPC.clients[i];
if (c._feed && c._feed._name === feed._name) {
if (c._data === feed._dataBlock && c._state !== IPC_Client.STATE_LOGOUT) {
client = c;
} else {
c._feed.stop(c._data);
}
}
}
if (!client) {
// new one
if (IPC.clients.length === IPC.MAX_CLIENTS) {
console.warn("Feed. Max clients.")
return -1;
}
client = new IPC_Client(IPC.nextClientId++, feed);
IPC.clients.push(client);
console.log("Feed. Register client for " + client._feed._name + ":", client._id, client._feed._dataBlock);
}
IPC.master.run();
return client._id;
}
/**
* @static
* @param {string} feed_name
* @param {string} dataBlock
*/
IPC.unregister = function(feed_name, dataBlock) {
for (var i = 0; i < IPC.clients.length; i++) {
var client = IPC.clients[i];
if (client._feed && client._feed._name === feed_name && client._data === dataBlock) {
if (client === IPC.currentClient) {
IPC.currentClient = undefined;
}
client._state = IPC_Client.STATE_LOGOUT; | * @static
* @param {IPC_Client} client
*/
IPC.clientLogin = function(client) {
client._tag = IPC.ipcTag + "," + client._id;
var str = JSON.stringify({'type': 'subscribe', 'client_id': client._id, 'user': IPC.userName, 'sid': IPC.sid, 'page_key': Main.getParams()["page_key"], 'app': client._feed._name, 'ipc_tag': IPC.ipcTag, 'request': client._feed._dataBlock});
IPC.master._socket.send(str);
client._state = IPC_Client.STATE_LOGIN;
}
/**
* @static
* @param {IPC_Client} client
*/
IPC.clientLogout = function(client) {
if (IPC.currentClient && client._id === IPC.currentClient._id) {
IPC.currentClient = undefined;
}
var str = JSON.stringify({type: 'unsubscribe', client_id: client._id, ipc_tag: IPC.ipcTag});
if (IPC.master._socket.readyState === 1) {
IPC.master._socket.send(str);
console.log("Feed. Unregister client", client._id);
client._feed = undefined;
var idx;
for (idx = 0; idx < IPC.clients.length; idx++) {
var c = IPC.clients[idx];
if (c._id === client._id)
break;
}
if (idx < IPC.clients.length)
IPC.clients.splice(idx, 1);
}
}
/**
* ----------
* IPC_Client
* ----------
* @constructor
* @param {number} id
* @param {Feed} feed
*/
function IPC_Client(id, feed) {
this._id = id;
this._data = feed._dataBlock;
this._feed = feed;
this._feedItems = [];
this._state = IPC_Client.STATE_START;
}
/**
* @param {Array} fi
*/
IPC_Client.prototype.add = function(fi) {
this._feedItems.push(fi);
if (this._feedItems.length > 5000) {
console.warn("Feed client not flushing data.", this._feedItems.length);
this.flush();
}
}
IPC_Client.prototype.flush = function() {
this._feed.add(this._feedItems);
this._feedItems = [];
}
/** @static */
IPC_Client.STATE_START = 0;
/** @static */
IPC_Client.STATE_LOGIN = 1;
/** @static */
IPC_Client.STATE_LOGOUT = 2; | }
}
}
/** | random_line_split |
ejob_soa.py | # -*- coding : utf-8 -*-
from odoo import fields, models, api, tools, _
#Cashier Management
class ejob_cashier(models.Model):
_name = 'ejob.cashier'
_description = 'Customer SOA'
_order = "id desc"
@api.depends('orders')
def _compute_charges(self):
if not self.ids:
#Update calculated fields
self.update({
'total_items_count': 0,
'total_services_fee': 0.0,
'total_discount': 0.0,
})
return True
for payment in self:
total_items_count = 0
total_services_fee = 0.0
total_discount = 0.0
#raise UserError('Debug: %s' % (list(table.pos_order_ids)))
if payment.orders:
for order in payment.orders:
for orderline in order.services:
#if chargeline.state == 'Done':
total_items_count += 1
total_services_fee += orderline.sub_total
total_discount += orderline.discount_amount
payment.update({
'total_items_count': total_items_count,
'total_services_fee': total_services_fee,
'total_discount': total_discount,
})
@api.depends('total_services_fee','total_paid')
def _compute_balance(self):
for payment in self:
if payment.payment_type == 'Down Payment':
payment.balance = 0
elif payment.payment_type == 'A/R Payment':
payment.balance = 0
else:
payment.balance = payment.total_services_fee - payment.total_paid
@api.model
def _default_currency(self):
return self.env.user.company_id.currency_id or None
@api.model
def _get_pricelist(self):
return self.partner_id.property_product_pricelist # and table.partner_id.property_product_pricelist.id or None
name = fields.Char('Payment Ref#')
partner_id = fields.Many2one('res.partner',string='Customer')
payment_date = fields.Date('Payment Date', default=fields.Date.today())
pricelist_id = fields.Many2one('product.pricelist', string='Pricelist', default=_get_pricelist)
currency_id = fields.Many2one('res.currency', string='Currency', default= _default_currency)
total_items_count = fields.Integer(compute='_compute_charges',string="Number of Charged Items")
orders = fields.One2many('e.job.orders','payment_id',string='Orders')
payments = fields.One2many('account.payment','payment_id',string='Payments')
invoices = fields.One2many('account.invoice', 'payment_id', string='Customer Invoices')
total_services_fee = fields.Float(compute='_compute_charges', string='Total Services')
total_discount = fields.Float(compute='_compute_charges', string='Total Discount')
total_paid = fields.Float(compute='_compute_amt_paid', string='Total Paid', digits=(12,2))
total_down_payments = fields.Float('Total Payments', default=0.0)
balance = fields.Float(compute='_compute_balance', string='Balance')
payment_type = fields.Selection([
('Cash','Cash'),
('Charge Slip','Charge Slip'),
('Down Payment','Down Payment'),
('A/R Payment','A/R Payment')],'Payment Type', readonly=True, default='Cash')
payment_due = fields.Date('Payment Due')
user_id = fields.Many2one('res.users', string='Cashier', required=True, readonly=True, default=lambda self: self.env.uid)
company_id = fields.Many2one('res.company', string='Company', required=True, readonly=True, default=lambda self: self.env.user.company_id)
state = fields.Selection([
('new','New'),
('inv','Invoiced'),
('partial','Partial-Paid'),
('paid','Paid')], 'Status', default='new')
@api.depends('payments')
def _compute_amt_paid(self):
if not self.ids:
#Update calculated fields
self.update({
'total_paid': 0.0,
})
return True
for payment in self:
total_paid = 0.0
#raise UserError('Debug: %s' % (list(table.pos_order_ids)))
if payment.payments:
for pay in payment.payments:
total_paid += pay.amount
#Update calculated fields
payment.update({
'total_paid': total_paid,
})
@api.depends('orders')
def _compute_total_services(self):
for rec in self:
service_total = 0.0
for line in rec.charge_line_ids:
charge_total += line.net
rec.charge_total = rec.currency_id.round(charge_total)
@api.multi
def create_invoice(self):
payment_id = self.id
payment_name = 'ORD ID: ' + self.name
invoice_name = self.env['ir.sequence'].next_by_code('ejob.invoice.id')
customer = self.partner_id.id
customer_name = self.partner_id.name
ar_account_id = self.partner_id.property_account_receivable_id and self.partner_id.property_account_receivable_id.id or False
if not ar_account_id:
raise UserError('Customer A/R account error! Please check the default receivable account of the customer.')
fiscal_position_id = self.partner_id.property_account_position_id and self.partner_id.property_account_position_id.id or False
invoice_obj = self.env['account.invoice']
journal_obj = self.env['account.journal']
ctr = 0
success = False
for orders in self.orders:
invoice_lines = []
invoice_discs = {}
#Change to appropriate journal
#journal = journal_obj.search([('charge_type','=',charges.charge_type)])
#if journal:
# journal_id = journal[0].id
#else:
# raise UserError('The Charge %s (%s) does not have a journal specified' % (charges.name,charges.charge_type))
journal_id = 1
#Process item invoices
for order_line in orders.services:
#if charge_line.state == 'draft':
if order_line.product_id:
account_id = order_line.product_id.property_account_income_id.id or order_line.product_id.categ_id.property_account_income_categ_id.id
if not account_id:
raise UserError(
_('There is no income account defined for this product: "%s". You may have to install a chart of account from Accounting app, settings menu.') % \
order_line.product_id.name)
unit_price = order_line.price_unit #- order_line.discount_amount
tax_ids = [(6, 0, [x.id for x in order_line.product_id.taxes_id])]
invoice_line = {
'name': order_line.name,
'origin': payment_name + ' [' + order_line.name + ']',
'account_id': account_id,
'price_unit': unit_price,
'quantity': order_line.qty,
'discount': order_line.discount_percentage,
'uom_id': order_line.product_id.uom_id.id,
'product_id': order_line.product_id.id,
'invoice_line_tax_ids': tax_ids,
'account_analytic_id': False,
}
invoice_lines.append(invoice_line)
#Generate Invoice
if len(invoice_lines) > 0:
invoice = {
'date_invoice': self.payment_date, #fields.Datetime.now(),
'name': invoice_name,
'origin': payment_name,
'type': 'out_invoice',
'reference': invoice_name + '-' + payment_name,
'account_id': ar_account_id,
'partner_id': customer,
'currency_id': self.currency_id.id,
'journal_id': journal_id,
'payment_term_id': False,
'fiscal_position_id': fiscal_position_id,
'comment': 'Charges for: ' + customer_name,
'company_id': self.env.user.company_id.id,
'user_id': self.env.user.id,
'payment_id': payment_id,
}
inv_lines = []
#Generate Invoice Lines
for invline in invoice_lines:
inv_lines.append((0, 0, invline))
invoice.update({'invoice_line_ids': inv_lines})
#Create the Invoice
invoice = invoice_obj.create(invoice)
invoice.compute_taxes()
success = True
#Update the Charge status to Invoiced
#charges.update({'state':'done'})
if success:
self.env.user.notify_info('Invoice created.',title='Invoice Generation', sticky=False)
self.update({'state':'inv'})
@api.multi
def cancel_invoice(self):
for rec in self:
if rec.invoices:
|
else:
raise UserError('Error! There are no invoices generated for these charges.')
class custom_account_invoice(models.Model):
_inherit = "account.invoice"
payment_id = fields.Many2one('e.job.orders', string="Payment Record")
date_invoice = fields.Date(string='Invoice Date',
readonly=True, states={'draft': [('readonly', False)],'open': [('readonly', False)]}, index=True,
help="Keep empty to use the current date", copy=False)
class custom_account_payment(models.Model):
_inherit = "account.payment"
payment_id = fields.Many2one('e.job.orders', string="Payment Record")
invoice_ref = fields.Char (string='Invoice Reference')
#tax_amt = fields.Float(string='Tax Amount', readonly=True, digits=(10,2))
#vatable_amt = fields.Float(string='VATable Sales', readonly=True, digits=(10,2))
#chk_payment_date = fields.Date (string='Check Date')
#chk_payment_bank = fields.Many2one('estl.ref.banks',string='Bank')
#chk_payment_branch = fields.Many2one('estl.ref.bank.branches',string='Branch')
#chk_payment_checkno = fields.Char(string='Check/Card No.',size=30)
#curr_bal = fields.Monetary(string='Current Balance', readonly=True)
#balance = fields.Monetary(string='Balance', store=False, readonly=True, compute='_compute_balance')
| for invoice in rec.invoices:
invoice.unlink()
rec.update({'state':'new'})
#Reset all Charges to draft
#for order in rec.orders:
# order.update({'state':'draft'})
self.env.user.notify_info('All generated invoices are cancelled.',title='Invoice Cancellation', sticky=False) | conditional_block |
ejob_soa.py | # -*- coding : utf-8 -*-
from odoo import fields, models, api, tools, _
#Cashier Management
class | (models.Model):
_name = 'ejob.cashier'
_description = 'Customer SOA'
_order = "id desc"
@api.depends('orders')
def _compute_charges(self):
if not self.ids:
#Update calculated fields
self.update({
'total_items_count': 0,
'total_services_fee': 0.0,
'total_discount': 0.0,
})
return True
for payment in self:
total_items_count = 0
total_services_fee = 0.0
total_discount = 0.0
#raise UserError('Debug: %s' % (list(table.pos_order_ids)))
if payment.orders:
for order in payment.orders:
for orderline in order.services:
#if chargeline.state == 'Done':
total_items_count += 1
total_services_fee += orderline.sub_total
total_discount += orderline.discount_amount
payment.update({
'total_items_count': total_items_count,
'total_services_fee': total_services_fee,
'total_discount': total_discount,
})
@api.depends('total_services_fee','total_paid')
def _compute_balance(self):
for payment in self:
if payment.payment_type == 'Down Payment':
payment.balance = 0
elif payment.payment_type == 'A/R Payment':
payment.balance = 0
else:
payment.balance = payment.total_services_fee - payment.total_paid
@api.model
def _default_currency(self):
return self.env.user.company_id.currency_id or None
@api.model
def _get_pricelist(self):
return self.partner_id.property_product_pricelist # and table.partner_id.property_product_pricelist.id or None
name = fields.Char('Payment Ref#')
partner_id = fields.Many2one('res.partner',string='Customer')
payment_date = fields.Date('Payment Date', default=fields.Date.today())
pricelist_id = fields.Many2one('product.pricelist', string='Pricelist', default=_get_pricelist)
currency_id = fields.Many2one('res.currency', string='Currency', default= _default_currency)
total_items_count = fields.Integer(compute='_compute_charges',string="Number of Charged Items")
orders = fields.One2many('e.job.orders','payment_id',string='Orders')
payments = fields.One2many('account.payment','payment_id',string='Payments')
invoices = fields.One2many('account.invoice', 'payment_id', string='Customer Invoices')
total_services_fee = fields.Float(compute='_compute_charges', string='Total Services')
total_discount = fields.Float(compute='_compute_charges', string='Total Discount')
total_paid = fields.Float(compute='_compute_amt_paid', string='Total Paid', digits=(12,2))
total_down_payments = fields.Float('Total Payments', default=0.0)
balance = fields.Float(compute='_compute_balance', string='Balance')
payment_type = fields.Selection([
('Cash','Cash'),
('Charge Slip','Charge Slip'),
('Down Payment','Down Payment'),
('A/R Payment','A/R Payment')],'Payment Type', readonly=True, default='Cash')
payment_due = fields.Date('Payment Due')
user_id = fields.Many2one('res.users', string='Cashier', required=True, readonly=True, default=lambda self: self.env.uid)
company_id = fields.Many2one('res.company', string='Company', required=True, readonly=True, default=lambda self: self.env.user.company_id)
state = fields.Selection([
('new','New'),
('inv','Invoiced'),
('partial','Partial-Paid'),
('paid','Paid')], 'Status', default='new')
@api.depends('payments')
def _compute_amt_paid(self):
if not self.ids:
#Update calculated fields
self.update({
'total_paid': 0.0,
})
return True
for payment in self:
total_paid = 0.0
#raise UserError('Debug: %s' % (list(table.pos_order_ids)))
if payment.payments:
for pay in payment.payments:
total_paid += pay.amount
#Update calculated fields
payment.update({
'total_paid': total_paid,
})
@api.depends('orders')
def _compute_total_services(self):
for rec in self:
service_total = 0.0
for line in rec.charge_line_ids:
charge_total += line.net
rec.charge_total = rec.currency_id.round(charge_total)
@api.multi
def create_invoice(self):
payment_id = self.id
payment_name = 'ORD ID: ' + self.name
invoice_name = self.env['ir.sequence'].next_by_code('ejob.invoice.id')
customer = self.partner_id.id
customer_name = self.partner_id.name
ar_account_id = self.partner_id.property_account_receivable_id and self.partner_id.property_account_receivable_id.id or False
if not ar_account_id:
raise UserError('Customer A/R account error! Please check the default receivable account of the customer.')
fiscal_position_id = self.partner_id.property_account_position_id and self.partner_id.property_account_position_id.id or False
invoice_obj = self.env['account.invoice']
journal_obj = self.env['account.journal']
ctr = 0
success = False
for orders in self.orders:
invoice_lines = []
invoice_discs = {}
#Change to appropriate journal
#journal = journal_obj.search([('charge_type','=',charges.charge_type)])
#if journal:
# journal_id = journal[0].id
#else:
# raise UserError('The Charge %s (%s) does not have a journal specified' % (charges.name,charges.charge_type))
journal_id = 1
#Process item invoices
for order_line in orders.services:
#if charge_line.state == 'draft':
if order_line.product_id:
account_id = order_line.product_id.property_account_income_id.id or order_line.product_id.categ_id.property_account_income_categ_id.id
if not account_id:
raise UserError(
_('There is no income account defined for this product: "%s". You may have to install a chart of account from Accounting app, settings menu.') % \
order_line.product_id.name)
unit_price = order_line.price_unit #- order_line.discount_amount
tax_ids = [(6, 0, [x.id for x in order_line.product_id.taxes_id])]
invoice_line = {
'name': order_line.name,
'origin': payment_name + ' [' + order_line.name + ']',
'account_id': account_id,
'price_unit': unit_price,
'quantity': order_line.qty,
'discount': order_line.discount_percentage,
'uom_id': order_line.product_id.uom_id.id,
'product_id': order_line.product_id.id,
'invoice_line_tax_ids': tax_ids,
'account_analytic_id': False,
}
invoice_lines.append(invoice_line)
#Generate Invoice
if len(invoice_lines) > 0:
invoice = {
'date_invoice': self.payment_date, #fields.Datetime.now(),
'name': invoice_name,
'origin': payment_name,
'type': 'out_invoice',
'reference': invoice_name + '-' + payment_name,
'account_id': ar_account_id,
'partner_id': customer,
'currency_id': self.currency_id.id,
'journal_id': journal_id,
'payment_term_id': False,
'fiscal_position_id': fiscal_position_id,
'comment': 'Charges for: ' + customer_name,
'company_id': self.env.user.company_id.id,
'user_id': self.env.user.id,
'payment_id': payment_id,
}
inv_lines = []
#Generate Invoice Lines
for invline in invoice_lines:
inv_lines.append((0, 0, invline))
invoice.update({'invoice_line_ids': inv_lines})
#Create the Invoice
invoice = invoice_obj.create(invoice)
invoice.compute_taxes()
success = True
#Update the Charge status to Invoiced
#charges.update({'state':'done'})
if success:
self.env.user.notify_info('Invoice created.',title='Invoice Generation', sticky=False)
self.update({'state':'inv'})
@api.multi
def cancel_invoice(self):
for rec in self:
if rec.invoices:
for invoice in rec.invoices:
invoice.unlink()
rec.update({'state':'new'})
#Reset all Charges to draft
#for order in rec.orders:
# order.update({'state':'draft'})
self.env.user.notify_info('All generated invoices are cancelled.',title='Invoice Cancellation', sticky=False)
else:
raise UserError('Error! There are no invoices generated for these charges.')
class custom_account_invoice(models.Model):
_inherit = "account.invoice"
payment_id = fields.Many2one('e.job.orders', string="Payment Record")
date_invoice = fields.Date(string='Invoice Date',
readonly=True, states={'draft': [('readonly', False)],'open': [('readonly', False)]}, index=True,
help="Keep empty to use the current date", copy=False)
class custom_account_payment(models.Model):
_inherit = "account.payment"
payment_id = fields.Many2one('e.job.orders', string="Payment Record")
invoice_ref = fields.Char (string='Invoice Reference')
#tax_amt = fields.Float(string='Tax Amount', readonly=True, digits=(10,2))
#vatable_amt = fields.Float(string='VATable Sales', readonly=True, digits=(10,2))
#chk_payment_date = fields.Date (string='Check Date')
#chk_payment_bank = fields.Many2one('estl.ref.banks',string='Bank')
#chk_payment_branch = fields.Many2one('estl.ref.bank.branches',string='Branch')
#chk_payment_checkno = fields.Char(string='Check/Card No.',size=30)
#curr_bal = fields.Monetary(string='Current Balance', readonly=True)
#balance = fields.Monetary(string='Balance', store=False, readonly=True, compute='_compute_balance')
| ejob_cashier | identifier_name |
ejob_soa.py | # -*- coding : utf-8 -*-
from odoo import fields, models, api, tools, _
#Cashier Management
class ejob_cashier(models.Model):
_name = 'ejob.cashier'
_description = 'Customer SOA'
_order = "id desc"
@api.depends('orders')
def _compute_charges(self):
if not self.ids:
#Update calculated fields
self.update({
'total_items_count': 0,
'total_services_fee': 0.0,
'total_discount': 0.0,
})
return True
for payment in self:
total_items_count = 0
total_services_fee = 0.0
total_discount = 0.0
#raise UserError('Debug: %s' % (list(table.pos_order_ids)))
if payment.orders:
for order in payment.orders:
for orderline in order.services:
#if chargeline.state == 'Done':
total_items_count += 1
total_services_fee += orderline.sub_total
total_discount += orderline.discount_amount
payment.update({
'total_items_count': total_items_count,
'total_services_fee': total_services_fee,
'total_discount': total_discount,
})
@api.depends('total_services_fee','total_paid')
def _compute_balance(self):
for payment in self:
if payment.payment_type == 'Down Payment':
payment.balance = 0
elif payment.payment_type == 'A/R Payment':
payment.balance = 0
else:
payment.balance = payment.total_services_fee - payment.total_paid
@api.model
def _default_currency(self):
return self.env.user.company_id.currency_id or None
@api.model
def _get_pricelist(self):
return self.partner_id.property_product_pricelist # and table.partner_id.property_product_pricelist.id or None
name = fields.Char('Payment Ref#')
partner_id = fields.Many2one('res.partner',string='Customer')
payment_date = fields.Date('Payment Date', default=fields.Date.today())
pricelist_id = fields.Many2one('product.pricelist', string='Pricelist', default=_get_pricelist)
currency_id = fields.Many2one('res.currency', string='Currency', default= _default_currency)
total_items_count = fields.Integer(compute='_compute_charges',string="Number of Charged Items")
orders = fields.One2many('e.job.orders','payment_id',string='Orders')
payments = fields.One2many('account.payment','payment_id',string='Payments')
invoices = fields.One2many('account.invoice', 'payment_id', string='Customer Invoices')
total_services_fee = fields.Float(compute='_compute_charges', string='Total Services')
total_discount = fields.Float(compute='_compute_charges', string='Total Discount')
total_paid = fields.Float(compute='_compute_amt_paid', string='Total Paid', digits=(12,2))
total_down_payments = fields.Float('Total Payments', default=0.0)
balance = fields.Float(compute='_compute_balance', string='Balance')
payment_type = fields.Selection([
('Cash','Cash'),
('Charge Slip','Charge Slip'),
('Down Payment','Down Payment'),
('A/R Payment','A/R Payment')],'Payment Type', readonly=True, default='Cash')
payment_due = fields.Date('Payment Due')
user_id = fields.Many2one('res.users', string='Cashier', required=True, readonly=True, default=lambda self: self.env.uid)
company_id = fields.Many2one('res.company', string='Company', required=True, readonly=True, default=lambda self: self.env.user.company_id)
state = fields.Selection([
('new','New'),
('inv','Invoiced'),
('partial','Partial-Paid'),
('paid','Paid')], 'Status', default='new')
@api.depends('payments')
def _compute_amt_paid(self):
if not self.ids:
#Update calculated fields
self.update({
'total_paid': 0.0,
})
return True
for payment in self:
total_paid = 0.0
#raise UserError('Debug: %s' % (list(table.pos_order_ids)))
if payment.payments: | #Update calculated fields
payment.update({
'total_paid': total_paid,
})
@api.depends('orders')
def _compute_total_services(self):
for rec in self:
service_total = 0.0
for line in rec.charge_line_ids:
charge_total += line.net
rec.charge_total = rec.currency_id.round(charge_total)
@api.multi
def create_invoice(self):
payment_id = self.id
payment_name = 'ORD ID: ' + self.name
invoice_name = self.env['ir.sequence'].next_by_code('ejob.invoice.id')
customer = self.partner_id.id
customer_name = self.partner_id.name
ar_account_id = self.partner_id.property_account_receivable_id and self.partner_id.property_account_receivable_id.id or False
if not ar_account_id:
raise UserError('Customer A/R account error! Please check the default receivable account of the customer.')
fiscal_position_id = self.partner_id.property_account_position_id and self.partner_id.property_account_position_id.id or False
invoice_obj = self.env['account.invoice']
journal_obj = self.env['account.journal']
ctr = 0
success = False
for orders in self.orders:
invoice_lines = []
invoice_discs = {}
#Change to appropriate journal
#journal = journal_obj.search([('charge_type','=',charges.charge_type)])
#if journal:
# journal_id = journal[0].id
#else:
# raise UserError('The Charge %s (%s) does not have a journal specified' % (charges.name,charges.charge_type))
journal_id = 1
#Process item invoices
for order_line in orders.services:
#if charge_line.state == 'draft':
if order_line.product_id:
account_id = order_line.product_id.property_account_income_id.id or order_line.product_id.categ_id.property_account_income_categ_id.id
if not account_id:
raise UserError(
_('There is no income account defined for this product: "%s". You may have to install a chart of account from Accounting app, settings menu.') % \
order_line.product_id.name)
unit_price = order_line.price_unit #- order_line.discount_amount
tax_ids = [(6, 0, [x.id for x in order_line.product_id.taxes_id])]
invoice_line = {
'name': order_line.name,
'origin': payment_name + ' [' + order_line.name + ']',
'account_id': account_id,
'price_unit': unit_price,
'quantity': order_line.qty,
'discount': order_line.discount_percentage,
'uom_id': order_line.product_id.uom_id.id,
'product_id': order_line.product_id.id,
'invoice_line_tax_ids': tax_ids,
'account_analytic_id': False,
}
invoice_lines.append(invoice_line)
#Generate Invoice
if len(invoice_lines) > 0:
invoice = {
'date_invoice': self.payment_date, #fields.Datetime.now(),
'name': invoice_name,
'origin': payment_name,
'type': 'out_invoice',
'reference': invoice_name + '-' + payment_name,
'account_id': ar_account_id,
'partner_id': customer,
'currency_id': self.currency_id.id,
'journal_id': journal_id,
'payment_term_id': False,
'fiscal_position_id': fiscal_position_id,
'comment': 'Charges for: ' + customer_name,
'company_id': self.env.user.company_id.id,
'user_id': self.env.user.id,
'payment_id': payment_id,
}
inv_lines = []
#Generate Invoice Lines
for invline in invoice_lines:
inv_lines.append((0, 0, invline))
invoice.update({'invoice_line_ids': inv_lines})
#Create the Invoice
invoice = invoice_obj.create(invoice)
invoice.compute_taxes()
success = True
#Update the Charge status to Invoiced
#charges.update({'state':'done'})
if success:
self.env.user.notify_info('Invoice created.',title='Invoice Generation', sticky=False)
self.update({'state':'inv'})
@api.multi
def cancel_invoice(self):
for rec in self:
if rec.invoices:
for invoice in rec.invoices:
invoice.unlink()
rec.update({'state':'new'})
#Reset all Charges to draft
#for order in rec.orders:
# order.update({'state':'draft'})
self.env.user.notify_info('All generated invoices are cancelled.',title='Invoice Cancellation', sticky=False)
else:
raise UserError('Error! There are no invoices generated for these charges.')
class custom_account_invoice(models.Model):
_inherit = "account.invoice"
payment_id = fields.Many2one('e.job.orders', string="Payment Record")
date_invoice = fields.Date(string='Invoice Date',
readonly=True, states={'draft': [('readonly', False)],'open': [('readonly', False)]}, index=True,
help="Keep empty to use the current date", copy=False)
class custom_account_payment(models.Model):
_inherit = "account.payment"
payment_id = fields.Many2one('e.job.orders', string="Payment Record")
invoice_ref = fields.Char (string='Invoice Reference')
#tax_amt = fields.Float(string='Tax Amount', readonly=True, digits=(10,2))
#vatable_amt = fields.Float(string='VATable Sales', readonly=True, digits=(10,2))
#chk_payment_date = fields.Date (string='Check Date')
#chk_payment_bank = fields.Many2one('estl.ref.banks',string='Bank')
#chk_payment_branch = fields.Many2one('estl.ref.bank.branches',string='Branch')
#chk_payment_checkno = fields.Char(string='Check/Card No.',size=30)
#curr_bal = fields.Monetary(string='Current Balance', readonly=True)
#balance = fields.Monetary(string='Balance', store=False, readonly=True, compute='_compute_balance') | for pay in payment.payments:
total_paid += pay.amount
| random_line_split |
ejob_soa.py | # -*- coding : utf-8 -*-
from odoo import fields, models, api, tools, _
#Cashier Management
class ejob_cashier(models.Model):
_name = 'ejob.cashier'
_description = 'Customer SOA'
_order = "id desc"
@api.depends('orders')
def _compute_charges(self):
if not self.ids:
#Update calculated fields
self.update({
'total_items_count': 0,
'total_services_fee': 0.0,
'total_discount': 0.0,
})
return True
for payment in self:
total_items_count = 0
total_services_fee = 0.0
total_discount = 0.0
#raise UserError('Debug: %s' % (list(table.pos_order_ids)))
if payment.orders:
for order in payment.orders:
for orderline in order.services:
#if chargeline.state == 'Done':
total_items_count += 1
total_services_fee += orderline.sub_total
total_discount += orderline.discount_amount
payment.update({
'total_items_count': total_items_count,
'total_services_fee': total_services_fee,
'total_discount': total_discount,
})
@api.depends('total_services_fee','total_paid')
def _compute_balance(self):
|
@api.model
def _default_currency(self):
return self.env.user.company_id.currency_id or None
@api.model
def _get_pricelist(self):
return self.partner_id.property_product_pricelist # and table.partner_id.property_product_pricelist.id or None
name = fields.Char('Payment Ref#')
partner_id = fields.Many2one('res.partner',string='Customer')
payment_date = fields.Date('Payment Date', default=fields.Date.today())
pricelist_id = fields.Many2one('product.pricelist', string='Pricelist', default=_get_pricelist)
currency_id = fields.Many2one('res.currency', string='Currency', default= _default_currency)
total_items_count = fields.Integer(compute='_compute_charges',string="Number of Charged Items")
orders = fields.One2many('e.job.orders','payment_id',string='Orders')
payments = fields.One2many('account.payment','payment_id',string='Payments')
invoices = fields.One2many('account.invoice', 'payment_id', string='Customer Invoices')
total_services_fee = fields.Float(compute='_compute_charges', string='Total Services')
total_discount = fields.Float(compute='_compute_charges', string='Total Discount')
total_paid = fields.Float(compute='_compute_amt_paid', string='Total Paid', digits=(12,2))
total_down_payments = fields.Float('Total Payments', default=0.0)
balance = fields.Float(compute='_compute_balance', string='Balance')
payment_type = fields.Selection([
('Cash','Cash'),
('Charge Slip','Charge Slip'),
('Down Payment','Down Payment'),
('A/R Payment','A/R Payment')],'Payment Type', readonly=True, default='Cash')
payment_due = fields.Date('Payment Due')
user_id = fields.Many2one('res.users', string='Cashier', required=True, readonly=True, default=lambda self: self.env.uid)
company_id = fields.Many2one('res.company', string='Company', required=True, readonly=True, default=lambda self: self.env.user.company_id)
state = fields.Selection([
('new','New'),
('inv','Invoiced'),
('partial','Partial-Paid'),
('paid','Paid')], 'Status', default='new')
@api.depends('payments')
def _compute_amt_paid(self):
if not self.ids:
#Update calculated fields
self.update({
'total_paid': 0.0,
})
return True
for payment in self:
total_paid = 0.0
#raise UserError('Debug: %s' % (list(table.pos_order_ids)))
if payment.payments:
for pay in payment.payments:
total_paid += pay.amount
#Update calculated fields
payment.update({
'total_paid': total_paid,
})
@api.depends('orders')
def _compute_total_services(self):
for rec in self:
service_total = 0.0
for line in rec.charge_line_ids:
charge_total += line.net
rec.charge_total = rec.currency_id.round(charge_total)
@api.multi
def create_invoice(self):
payment_id = self.id
payment_name = 'ORD ID: ' + self.name
invoice_name = self.env['ir.sequence'].next_by_code('ejob.invoice.id')
customer = self.partner_id.id
customer_name = self.partner_id.name
ar_account_id = self.partner_id.property_account_receivable_id and self.partner_id.property_account_receivable_id.id or False
if not ar_account_id:
raise UserError('Customer A/R account error! Please check the default receivable account of the customer.')
fiscal_position_id = self.partner_id.property_account_position_id and self.partner_id.property_account_position_id.id or False
invoice_obj = self.env['account.invoice']
journal_obj = self.env['account.journal']
ctr = 0
success = False
for orders in self.orders:
invoice_lines = []
invoice_discs = {}
#Change to appropriate journal
#journal = journal_obj.search([('charge_type','=',charges.charge_type)])
#if journal:
# journal_id = journal[0].id
#else:
# raise UserError('The Charge %s (%s) does not have a journal specified' % (charges.name,charges.charge_type))
journal_id = 1
#Process item invoices
for order_line in orders.services:
#if charge_line.state == 'draft':
if order_line.product_id:
account_id = order_line.product_id.property_account_income_id.id or order_line.product_id.categ_id.property_account_income_categ_id.id
if not account_id:
raise UserError(
_('There is no income account defined for this product: "%s". You may have to install a chart of account from Accounting app, settings menu.') % \
order_line.product_id.name)
unit_price = order_line.price_unit #- order_line.discount_amount
tax_ids = [(6, 0, [x.id for x in order_line.product_id.taxes_id])]
invoice_line = {
'name': order_line.name,
'origin': payment_name + ' [' + order_line.name + ']',
'account_id': account_id,
'price_unit': unit_price,
'quantity': order_line.qty,
'discount': order_line.discount_percentage,
'uom_id': order_line.product_id.uom_id.id,
'product_id': order_line.product_id.id,
'invoice_line_tax_ids': tax_ids,
'account_analytic_id': False,
}
invoice_lines.append(invoice_line)
#Generate Invoice
if len(invoice_lines) > 0:
invoice = {
'date_invoice': self.payment_date, #fields.Datetime.now(),
'name': invoice_name,
'origin': payment_name,
'type': 'out_invoice',
'reference': invoice_name + '-' + payment_name,
'account_id': ar_account_id,
'partner_id': customer,
'currency_id': self.currency_id.id,
'journal_id': journal_id,
'payment_term_id': False,
'fiscal_position_id': fiscal_position_id,
'comment': 'Charges for: ' + customer_name,
'company_id': self.env.user.company_id.id,
'user_id': self.env.user.id,
'payment_id': payment_id,
}
inv_lines = []
#Generate Invoice Lines
for invline in invoice_lines:
inv_lines.append((0, 0, invline))
invoice.update({'invoice_line_ids': inv_lines})
#Create the Invoice
invoice = invoice_obj.create(invoice)
invoice.compute_taxes()
success = True
#Update the Charge status to Invoiced
#charges.update({'state':'done'})
if success:
self.env.user.notify_info('Invoice created.',title='Invoice Generation', sticky=False)
self.update({'state':'inv'})
@api.multi
def cancel_invoice(self):
for rec in self:
if rec.invoices:
for invoice in rec.invoices:
invoice.unlink()
rec.update({'state':'new'})
#Reset all Charges to draft
#for order in rec.orders:
# order.update({'state':'draft'})
self.env.user.notify_info('All generated invoices are cancelled.',title='Invoice Cancellation', sticky=False)
else:
raise UserError('Error! There are no invoices generated for these charges.')
class custom_account_invoice(models.Model):
_inherit = "account.invoice"
payment_id = fields.Many2one('e.job.orders', string="Payment Record")
date_invoice = fields.Date(string='Invoice Date',
readonly=True, states={'draft': [('readonly', False)],'open': [('readonly', False)]}, index=True,
help="Keep empty to use the current date", copy=False)
class custom_account_payment(models.Model):
_inherit = "account.payment"
payment_id = fields.Many2one('e.job.orders', string="Payment Record")
invoice_ref = fields.Char (string='Invoice Reference')
#tax_amt = fields.Float(string='Tax Amount', readonly=True, digits=(10,2))
#vatable_amt = fields.Float(string='VATable Sales', readonly=True, digits=(10,2))
#chk_payment_date = fields.Date (string='Check Date')
#chk_payment_bank = fields.Many2one('estl.ref.banks',string='Bank')
#chk_payment_branch = fields.Many2one('estl.ref.bank.branches',string='Branch')
#chk_payment_checkno = fields.Char(string='Check/Card No.',size=30)
#curr_bal = fields.Monetary(string='Current Balance', readonly=True)
#balance = fields.Monetary(string='Balance', store=False, readonly=True, compute='_compute_balance')
| for payment in self:
if payment.payment_type == 'Down Payment':
payment.balance = 0
elif payment.payment_type == 'A/R Payment':
payment.balance = 0
else:
payment.balance = payment.total_services_fee - payment.total_paid | identifier_body |
extension.ts | // The module 'vscode' contains the VS Code extensibility API
// Import the module and reference it with the alias vscode in your code below
import * as vscode from 'vscode';
import { StructCommandManager } from './struct_command_manager'
import { EditCommandManager } from './edit_command_manager';
import { runTestCasesForC, runTestCasesForPy, test_function } from './tester'
import {runEditTests} from './edit_tester'
import { getUserSpecs } from './user_specs'
const {spawn} = require('child_process');
var code_segments = [""];
var cursor_pos = 0;
var count_lines= [0];
var count_speech = [0];
var manager: StructCommandManager;
var editManager: EditCommandManager;
var microphone = true;
var codeBuffer = "";
var errorFlag = false;
var language = "";
var cwd = "";
var ast_cwd = "";
var cred = "";
var datatypes = ["int", "float", "long", "double", "char"];
// this method is called when your extension is activated
// your extension is activated the very first time the command is executed
export function activate(context: vscode.ExtensionContext) {
// Use the console to output diagnostic information (console.log) and errors (console.error)
// This line of code will only be executed once when your extension is activated
console.log('Congratulations, your extension "talk-to-code" is now active!');
// The command has been defined in the package.json file
// Now provide the implementation of the command with registerCommand
// The commandId parameter must match the command field in package.json
let disposable = vscode.commands.registerCommand('extension.helloWorld', () => {
// The code you place here will be executed every time your command is executed
// Display a message box to the user
vscode.window.showInformationMessage('coding by dictation!');
initUser("lawrence"); /* change here to set new user */
initManager();
listen();
// runEditTests();
// test_function();
// runTestCasesForC();
// runTestCasesForPy();
});
context.subscriptions.push(disposable);
}
function initUser(user: string) {
var userSpecs = getUserSpecs(user);
cwd = userSpecs[0];
cred = userSpecs[1];
ast_cwd = userSpecs[2];
}
function initManager() {
language = "c";
manager = new StructCommandManager(language, true);
editManager = new EditCommandManager(manager,count_lines,count_speech);
}
function listen() {
displayCode([""]);
// env: {GOOGLE_APPLICATION_CREDENTIALS: cred}
const child = spawn('node', ['speech_recognizer.js'], {shell:true, cwd: cwd});
child.stdout.on('data', (data: string)=>{
let transcribed_word = data.toString().trim();
console.log("TRANSCRIBED WORD: "+transcribed_word);
if (transcribed_word == 'Listening') vscode.window.showInformationMessage('Begin Speaking!');
else if (transcribed_word == "microphone off" || transcribed_word == "sleep" || transcribed_word == "go to sleep") {
microphone = false;
vscode.window.showInformationMessage("microphone asleep");
}
else if (transcribed_word == "microphone on" || transcribed_word == "wake up") {
microphone = true;
vscode.window.showInformationMessage("microphone active");
}
else if (microphone && editManager.check_if_edit_command(transcribed_word)) {
vscode.window.showInformationMessage("You just said the following edit command: " + transcribed_word);
console.log(transcribed_word)
editManager.checkAll(transcribed_word,count_lines);
displayCode(manager.struct_command_list);
console.log(manager.managerStatus());
}
else if (microphone) {
vscode.window.showInformationMessage("You just said: " + transcribed_word);
errorFlag = false;
codeBuffer = "";
manager.parse_speech(transcribed_word, count_lines);
displayCode(manager.struct_command_list);
}
});
}
function displayCode(struct_command_list: string[]) {
/* Set up commands to insert */
let commands = '#c_program SampleProgram #include "stdio.h";; ';
if (language == "c") commands = '#c_program SampleProgram #include "stdio.h";; ';
else if (language == "py") commands = '#p_program SampleProgram #include "sys";; ';
for (var i=0; i<struct_command_list.length; i++) commands += struct_command_list[i] + "\n"
commands += ' #program_end';
const other_child = spawn('java', ['ast/ASTParser 1'], {shell:true, cwd: ast_cwd});
other_child.stdin.setEncoding('utf8');
other_child.stdin.write(commands);
other_child.stdin.end();
other_child.stdout.setEncoding('utf8');
other_child.stdout.on('data', (data: string)=>{
codeBuffer += data;
if (data.includes("AST construction complete") && !errorFlag) {
var code = codeBuffer.split("ASTNode")[0].trimLeft();
codeBuffer = ""; // clear code stream
writeToEditor(code, struct_command_list);
}
else if (data.includes("Not Supported Syntax Format")) {
console.log("error");
codeBuffer = ""
errorFlag = true;
}
});
}
/* text2 - function prototype, text1 - actual function
Conditions for a function prototype and function:
- one ends with ";", the other ends with "{"
- both start with same data type value
- function name has to be the same
Only function declarations end with "{" and begins with a datatype value
statements that end with ";" and begin with datatype are declaration statements. However, they do not
include "(" in the second word.
*/
function checkIfFunctionPrototype(text1: string, text2: string){
if (!text2.endsWith(";")) return false;
if (!text1.endsWith("{")) return false;
/* Not needed because blank lines should alr be caught before entering this function call.
Just as a precaution. */
if (text1.length < 2 || text2.length < 2) return false;
text2 = text2.substring(0,text2.length-1);
text1 = text1.substring(0,text1.length-1);
text2 = text2.replace(/ +/g, ' ');
text1 = text1.replace(/ +/g, ' ');
/* Convert text1 to function prototype for comparision */
var splitted_text1 = text1.split(" ");
var splitted_text2 = text2.split(" ");
if (splitted_text1.length < 2 || splitted_text2.length < 2) return false;
if (!datatypes.includes(splitted_text1[0]) || !datatypes.includes(splitted_text2[0])) return false;
if (!splitted_text1[1].includes("(") || !splitted_text2[1].includes("(")) return false;
if (splitted_text1[0] != splitted_text2[0]) return false;
if (splitted_text1[1] != splitted_text2[1]) return false;
else return true;
}
function map_lines_to_code(struct_command_list: string[]){
console.log(JSON.stringify(code_segments));
cursor_pos = 0;
count_lines = [];
var count =0;
var j =0;
var includeStatement = false;
for (var i=0;i<code_segments.length;i++) {
console.log(JSON.stringify(code_segments[i]) + " " + i + " " + count);
includeStatement = false;
code_segments[i] = code_segments[i].trim();
if (code_segments[i].startsWith("#include") || code_segments[i].startsWith("import")) includeStatement = true;
if (includeStatement || code_segments[i] == "\r" || code_segments[i] == "" || code_segments[i] == "\t" || code_segments[i]=="*/"|| code_segments[i]=="/*") {
count++;
/* Because cursor position is a blank line in the code so this if-block to detect blank lines is used.
Blank line is a struct command "#string \"\";;", hence this blank line will be mapped to that
struct command as well. */
if (!includeStatement && j < struct_command_list.length && struct_command_list[j] == "#string \"\";;") {
count_lines[j] = count;
cursor_pos = i;
j++;
}
}
else if (i< code_segments.length-1 && checkIfFunctionPrototype(code_segments[i+1], code_segments[i])){
count++;
}
else {
if (struct_command_list[j].startsWith("#string")) cursor_pos = count;
count++;
count_lines[j] = count;
j++;
}
}
}
function | (){
count_speech = [];
var count =0;
var j =0;
for (var i=0;i<manager.struct_command_list.length;i++){
var line = manager.struct_command_list[i];
if (line.startsWith("#comment" || line.indexOf("cursor here")!=-1)|| line.startsWith("#if_branch_end;;")|| line.startsWith("#else_branch_end") || line.startsWith("#function_end;;")|| line.startsWith("#while_end;;")|| line.startsWith("#for_end;;")){
count++;
}
else{
count_speech[j] = count++;
j++;
}
}
}
function writeToEditor(code: string, struct_command_list: string[]) {
code_segments = code.split("\n");
map_lines_to_code(struct_command_list);
console.log("cursor pos: " + cursor_pos)
map_speech_to_struct_command();
console.log("LINE_COUNT: "+JSON.stringify(count_lines));
console.log("SPEECH_COUNT: "+JSON.stringify(count_speech));
let editor = vscode.window.activeTextEditor;
if (manager.holding) {
var line = code_segments[manager.heldline];
var numTabs = "";
for (var i = 0; i < line.length; i++) {
if (line[i] == "\t") numTabs += "\t";
}
var speech = manager.curr_speech.join(" ");
var temp = speech.split(" ");
if (speech.includes("spell") && speech.includes("end_spell")) {
var spellIdx = temp.indexOf("spell");
var spellEndIdx = temp.indexOf("end_spell");
speech = temp.slice(0, spellIdx).join(" ").trim() + " " +
temp.slice(spellIdx + 1, spellEndIdx).join("").trim() + " " +
temp.slice(spellEndIdx + 1).join(" ").trim();
}
code_segments.splice(manager.heldline - 1, 1, numTabs + speech + " *stay");
code = code_segments.join("\n");
cursor_pos = manager.heldline - 1;
}
if (editor) {
/* Get range to delete */
var lineCount = editor.document.lineCount;
var start_pos = new vscode.Position(0, 0);
var end_pos = new vscode.Position(lineCount, 0);
var range = new vscode.Range(start_pos, end_pos);
editor.edit(editBuilder => {
editBuilder.delete(range);
editBuilder.insert(start_pos, code);
}).then(() => {
/* Because editBuilder is a callback function, cursor position cannot be set (it will be outdated) without then().
then() is called when the callback function is done editing. */
if (editor) {
var lineAt = editor.document.lineAt(cursor_pos).text;
if (manager.isLeftRightCalled){
editor.selection = new vscode.Selection(new vscode.Position(cursor_pos, manager.len_cursor), new vscode.Position(cursor_pos, manager.len_cursor));
}
else editor.selection = new vscode.Selection(new vscode.Position(cursor_pos, lineAt.length), new vscode.Position(cursor_pos, lineAt.length));
}
})
}
}
// this method is called when your extension is deactivated
export function deactivate() {} | map_speech_to_struct_command | identifier_name |
extension.ts | // The module 'vscode' contains the VS Code extensibility API
// Import the module and reference it with the alias vscode in your code below
import * as vscode from 'vscode';
import { StructCommandManager } from './struct_command_manager'
import { EditCommandManager } from './edit_command_manager';
import { runTestCasesForC, runTestCasesForPy, test_function } from './tester'
import {runEditTests} from './edit_tester'
import { getUserSpecs } from './user_specs'
const {spawn} = require('child_process');
var code_segments = [""];
var cursor_pos = 0;
var count_lines= [0];
var count_speech = [0];
var manager: StructCommandManager;
var editManager: EditCommandManager;
var microphone = true;
var codeBuffer = "";
var errorFlag = false;
var language = "";
var cwd = "";
var ast_cwd = "";
var cred = "";
var datatypes = ["int", "float", "long", "double", "char"];
// this method is called when your extension is activated
// your extension is activated the very first time the command is executed
export function activate(context: vscode.ExtensionContext) {
// Use the console to output diagnostic information (console.log) and errors (console.error)
// This line of code will only be executed once when your extension is activated
console.log('Congratulations, your extension "talk-to-code" is now active!');
// The command has been defined in the package.json file
// Now provide the implementation of the command with registerCommand
// The commandId parameter must match the command field in package.json
let disposable = vscode.commands.registerCommand('extension.helloWorld', () => {
// The code you place here will be executed every time your command is executed
// Display a message box to the user
vscode.window.showInformationMessage('coding by dictation!');
initUser("lawrence"); /* change here to set new user */
initManager();
listen();
// runEditTests();
// test_function();
// runTestCasesForC();
// runTestCasesForPy();
});
context.subscriptions.push(disposable);
}
function initUser(user: string) |
function initManager() {
language = "c";
manager = new StructCommandManager(language, true);
editManager = new EditCommandManager(manager,count_lines,count_speech);
}
function listen() {
displayCode([""]);
// env: {GOOGLE_APPLICATION_CREDENTIALS: cred}
const child = spawn('node', ['speech_recognizer.js'], {shell:true, cwd: cwd});
child.stdout.on('data', (data: string)=>{
let transcribed_word = data.toString().trim();
console.log("TRANSCRIBED WORD: "+transcribed_word);
if (transcribed_word == 'Listening') vscode.window.showInformationMessage('Begin Speaking!');
else if (transcribed_word == "microphone off" || transcribed_word == "sleep" || transcribed_word == "go to sleep") {
microphone = false;
vscode.window.showInformationMessage("microphone asleep");
}
else if (transcribed_word == "microphone on" || transcribed_word == "wake up") {
microphone = true;
vscode.window.showInformationMessage("microphone active");
}
else if (microphone && editManager.check_if_edit_command(transcribed_word)) {
vscode.window.showInformationMessage("You just said the following edit command: " + transcribed_word);
console.log(transcribed_word)
editManager.checkAll(transcribed_word,count_lines);
displayCode(manager.struct_command_list);
console.log(manager.managerStatus());
}
else if (microphone) {
vscode.window.showInformationMessage("You just said: " + transcribed_word);
errorFlag = false;
codeBuffer = "";
manager.parse_speech(transcribed_word, count_lines);
displayCode(manager.struct_command_list);
}
});
}
function displayCode(struct_command_list: string[]) {
/* Set up commands to insert */
let commands = '#c_program SampleProgram #include "stdio.h";; ';
if (language == "c") commands = '#c_program SampleProgram #include "stdio.h";; ';
else if (language == "py") commands = '#p_program SampleProgram #include "sys";; ';
for (var i=0; i<struct_command_list.length; i++) commands += struct_command_list[i] + "\n"
commands += ' #program_end';
const other_child = spawn('java', ['ast/ASTParser 1'], {shell:true, cwd: ast_cwd});
other_child.stdin.setEncoding('utf8');
other_child.stdin.write(commands);
other_child.stdin.end();
other_child.stdout.setEncoding('utf8');
other_child.stdout.on('data', (data: string)=>{
codeBuffer += data;
if (data.includes("AST construction complete") && !errorFlag) {
var code = codeBuffer.split("ASTNode")[0].trimLeft();
codeBuffer = ""; // clear code stream
writeToEditor(code, struct_command_list);
}
else if (data.includes("Not Supported Syntax Format")) {
console.log("error");
codeBuffer = ""
errorFlag = true;
}
});
}
/* text2 - function prototype, text1 - actual function
Conditions for a function prototype and function:
- one ends with ";", the other ends with "{"
- both start with same data type value
- function name has to be the same
Only function declarations end with "{" and begins with a datatype value
statements that end with ";" and begin with datatype are declaration statements. However, they do not
include "(" in the second word.
*/
function checkIfFunctionPrototype(text1: string, text2: string){
if (!text2.endsWith(";")) return false;
if (!text1.endsWith("{")) return false;
/* Not needed because blank lines should alr be caught before entering this function call.
Just as a precaution. */
if (text1.length < 2 || text2.length < 2) return false;
text2 = text2.substring(0,text2.length-1);
text1 = text1.substring(0,text1.length-1);
text2 = text2.replace(/ +/g, ' ');
text1 = text1.replace(/ +/g, ' ');
/* Convert text1 to function prototype for comparision */
var splitted_text1 = text1.split(" ");
var splitted_text2 = text2.split(" ");
if (splitted_text1.length < 2 || splitted_text2.length < 2) return false;
if (!datatypes.includes(splitted_text1[0]) || !datatypes.includes(splitted_text2[0])) return false;
if (!splitted_text1[1].includes("(") || !splitted_text2[1].includes("(")) return false;
if (splitted_text1[0] != splitted_text2[0]) return false;
if (splitted_text1[1] != splitted_text2[1]) return false;
else return true;
}
function map_lines_to_code(struct_command_list: string[]){
console.log(JSON.stringify(code_segments));
cursor_pos = 0;
count_lines = [];
var count =0;
var j =0;
var includeStatement = false;
for (var i=0;i<code_segments.length;i++) {
console.log(JSON.stringify(code_segments[i]) + " " + i + " " + count);
includeStatement = false;
code_segments[i] = code_segments[i].trim();
if (code_segments[i].startsWith("#include") || code_segments[i].startsWith("import")) includeStatement = true;
if (includeStatement || code_segments[i] == "\r" || code_segments[i] == "" || code_segments[i] == "\t" || code_segments[i]=="*/"|| code_segments[i]=="/*") {
count++;
/* Because cursor position is a blank line in the code so this if-block to detect blank lines is used.
Blank line is a struct command "#string \"\";;", hence this blank line will be mapped to that
struct command as well. */
if (!includeStatement && j < struct_command_list.length && struct_command_list[j] == "#string \"\";;") {
count_lines[j] = count;
cursor_pos = i;
j++;
}
}
else if (i< code_segments.length-1 && checkIfFunctionPrototype(code_segments[i+1], code_segments[i])){
count++;
}
else {
if (struct_command_list[j].startsWith("#string")) cursor_pos = count;
count++;
count_lines[j] = count;
j++;
}
}
}
function map_speech_to_struct_command(){
count_speech = [];
var count =0;
var j =0;
for (var i=0;i<manager.struct_command_list.length;i++){
var line = manager.struct_command_list[i];
if (line.startsWith("#comment" || line.indexOf("cursor here")!=-1)|| line.startsWith("#if_branch_end;;")|| line.startsWith("#else_branch_end") || line.startsWith("#function_end;;")|| line.startsWith("#while_end;;")|| line.startsWith("#for_end;;")){
count++;
}
else{
count_speech[j] = count++;
j++;
}
}
}
function writeToEditor(code: string, struct_command_list: string[]) {
code_segments = code.split("\n");
map_lines_to_code(struct_command_list);
console.log("cursor pos: " + cursor_pos)
map_speech_to_struct_command();
console.log("LINE_COUNT: "+JSON.stringify(count_lines));
console.log("SPEECH_COUNT: "+JSON.stringify(count_speech));
let editor = vscode.window.activeTextEditor;
if (manager.holding) {
var line = code_segments[manager.heldline];
var numTabs = "";
for (var i = 0; i < line.length; i++) {
if (line[i] == "\t") numTabs += "\t";
}
var speech = manager.curr_speech.join(" ");
var temp = speech.split(" ");
if (speech.includes("spell") && speech.includes("end_spell")) {
var spellIdx = temp.indexOf("spell");
var spellEndIdx = temp.indexOf("end_spell");
speech = temp.slice(0, spellIdx).join(" ").trim() + " " +
temp.slice(spellIdx + 1, spellEndIdx).join("").trim() + " " +
temp.slice(spellEndIdx + 1).join(" ").trim();
}
code_segments.splice(manager.heldline - 1, 1, numTabs + speech + " *stay");
code = code_segments.join("\n");
cursor_pos = manager.heldline - 1;
}
if (editor) {
/* Get range to delete */
var lineCount = editor.document.lineCount;
var start_pos = new vscode.Position(0, 0);
var end_pos = new vscode.Position(lineCount, 0);
var range = new vscode.Range(start_pos, end_pos);
editor.edit(editBuilder => {
editBuilder.delete(range);
editBuilder.insert(start_pos, code);
}).then(() => {
/* Because editBuilder is a callback function, cursor position cannot be set (it will be outdated) without then().
then() is called when the callback function is done editing. */
if (editor) {
var lineAt = editor.document.lineAt(cursor_pos).text;
if (manager.isLeftRightCalled){
editor.selection = new vscode.Selection(new vscode.Position(cursor_pos, manager.len_cursor), new vscode.Position(cursor_pos, manager.len_cursor));
}
else editor.selection = new vscode.Selection(new vscode.Position(cursor_pos, lineAt.length), new vscode.Position(cursor_pos, lineAt.length));
}
})
}
}
// this method is called when your extension is deactivated
export function deactivate() {} | {
var userSpecs = getUserSpecs(user);
cwd = userSpecs[0];
cred = userSpecs[1];
ast_cwd = userSpecs[2];
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.